feat(bindless): Build command buffers for pipelines

This commit is contained in:
PancakeTAS 2026-04-25 20:19:26 +02:00
parent 5c32cb2173
commit f8097eddb9
No known key found for this signature in database
4 changed files with 301 additions and 0 deletions

View file

@ -571,5 +571,217 @@ Pipeline::Pipeline(
LOG_DEBUG(" Built " << this->m_stages.size() << " pipeline stages") LOG_DEBUG(" Built " << this->m_stages.size() << " pipeline stages")
// Transition all images into general layout
this->m_pool = vkhelper::createCommandPool(
dld,
device,
queueFamilyIndex
);
std::vector<vk::ImageMemoryBarrier2KHR> barriers;
for (const auto& image : this->m_images) {
for (const auto& subimage : image.subimages) {
barriers.push_back({
.newLayout = vk::ImageLayout::eGeneral,
.image = *subimage.image,
.subresourceRange = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
.levelCount = 1,
.layerCount = image.subimages.size() == 1 ? image.signature.count : 1
}
});
}
}
const auto layoutCmdbuf{
vkhelper::createCommandBuffer(dld, device, *this->m_pool)
};
layoutCmdbuf->begin({ .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit }, dld);
layoutCmdbuf->pipelineBarrier2KHR({
.imageMemoryBarrierCount = static_cast<uint32_t>(barriers.size()),
.pImageMemoryBarriers = barriers.data()
}, dld);
layoutCmdbuf->end(dld);
const auto fence{device.createFenceUnique({}, nullptr, dld)};
queue.submit(
{{
.commandBufferCount = 1,
.pCommandBuffers = &*layoutCmdbuf
}},
*fence,
dld
);
if (device.waitForFences(*fence, VK_TRUE, 50'000'000, dld) != vk::Result::eSuccess) {
throw std::runtime_error("Failed to wait for image layout transition fence");
}
LOG_DEBUG(" Transitioned all " << this->m_images.size() << " images into general layout")
for (size_t i = 0; i < signature.splitIndices.size() + 1; i++) {
auto& cmdbuf{this->m_cmdbufs.emplace_back()};
cmdbuf = vkhelper::createCommandBuffer(dld, device, *this->m_pool);
cmdbuf->begin({ .flags = vk::CommandBufferUsageFlagBits::eSimultaneousUse }, dld);
cmdbuf->bindDescriptorSets(
vk::PipelineBindPoint::eCompute,
*this->m_layout.pipelineLayout,
0,
this->m_descriptorSet.set,
{},
dld
);
}
size_t currentStageIndex{0};
size_t currentStageBound{
signature.splitIndices.empty() ? signature.passes.size() : signature.splitIndices.front()
};
std::vector<vk::ImageMemoryBarrier2KHR> barrierVector;
barrierVector.reserve(16);
std::unordered_map<VkImage, vk::ImageMemoryBarrier2KHR> stageBarriers;
for (size_t i = 0; i < this->m_stages.size(); i++) {
if (i == currentStageBound) {
currentStageIndex++;
currentStageBound = currentStageIndex < signature.splitIndices.size() ?
signature.splitIndices.at(currentStageIndex) : signature.passes.size();
}
const auto& stage{this->m_stages.at(i)};
const auto& cmdbuf{this->m_cmdbufs.at(currentStageIndex)};
// Append barriers for this stage
for (const auto& sampledImage : stage.sampledImages) {
const auto& image = this->m_images.at(sampledImage);
for (const auto& subimage : image.subimages) {
const vk::Image& imageHandle{*subimage.image};
if (stageBarriers.contains(imageHandle)) {
stageBarriers[imageHandle].dstAccessMask = vk::AccessFlagBits2::eShaderRead;
continue;
}
stageBarriers[imageHandle] = {
.srcStageMask = vk::PipelineStageFlagBits2::eComputeShader,
.srcAccessMask = vk::AccessFlagBits2::eNone,
.dstStageMask = vk::PipelineStageFlagBits2::eComputeShader,
.dstAccessMask = vk::AccessFlagBits2::eShaderRead,
.image = *subimage.image,
.subresourceRange = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
.levelCount = 1,
.layerCount = image.subimages.size() == 1 ? image.signature.count : 1
}
};
}
}
for (const auto& storageImage : stage.storageImages) {
const auto& image = this->m_images.at(storageImage);
for (const auto& subimage : image.subimages) {
const vk::Image& imageHandle{*subimage.image};
if (stageBarriers.contains(imageHandle)) {
stageBarriers[imageHandle].dstAccessMask = vk::AccessFlagBits2::eShaderWrite;
continue;
}
stageBarriers[imageHandle] = {
.srcStageMask = vk::PipelineStageFlagBits2::eComputeShader,
.srcAccessMask = vk::AccessFlagBits2::eNone,
.dstStageMask = vk::PipelineStageFlagBits2::eComputeShader,
.dstAccessMask = vk::AccessFlagBits2::eShaderWrite,
.image = *subimage.image,
.subresourceRange = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
.levelCount = 1,
.layerCount = image.subimages.size() == 1 ? image.signature.count : 1
}
};
}
}
barrierVector.clear();
for (const auto& [_, barrier] : stageBarriers) // NOLINT (nondeterministic order)
barrierVector.push_back(barrier);
stageBarriers.clear();
cmdbuf->pipelineBarrier2KHR({
.imageMemoryBarrierCount = static_cast<uint32_t>(barrierVector.size()),
.pImageMemoryBarriers = barrierVector.data()
}, dld);
for (const auto& substage : stage.substages) {
// Bind shader pipeline for this stage
const auto& pipeline = this->m_pipelines.at(substage.pipeline);
cmdbuf->bindPipeline(vk::PipelineBindPoint::eCompute, *pipeline, dld);
// Dispatch all subiterations for this stage
for (const auto& subiteration : substage.subiterations) {
const PushConstants pushConstants{
.specialFlag = subiteration.isSpecial ? 1U : 0U,
.subiteration = subiteration.iterationIndex
};
cmdbuf->pushConstants(
*this->m_layout.pipelineLayout,
vk::ShaderStageFlagBits::eCompute,
0,
sizeof(PushConstants),
&pushConstants,
dld
);
const auto& dispatch{subiteration.dispatch};
cmdbuf->dispatch(dispatch.width, dispatch.height, 1, dld);
}
}
// Append barriers for next stage
for (const auto& sampledImage : stage.sampledImages) {
const auto& image = this->m_images.at(sampledImage);
for (const auto& subimage : image.subimages) {
stageBarriers[*subimage.image] = {
.srcStageMask = vk::PipelineStageFlagBits2::eComputeShader,
.srcAccessMask = vk::AccessFlagBits2::eShaderRead,
.dstStageMask = vk::PipelineStageFlagBits2::eComputeShader,
.dstAccessMask = vk::AccessFlagBits2::eShaderRead,
.image = *subimage.image,
.subresourceRange = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
.levelCount = 1,
.layerCount = image.subimages.size() == 1 ? image.signature.count : 1
}
};
}
}
for (const auto& storageImage : stage.storageImages) {
const auto& image = this->m_images.at(storageImage);
for (const auto& subimage : image.subimages) {
stageBarriers[*subimage.image] = {
.srcStageMask = vk::PipelineStageFlagBits2::eComputeShader,
.srcAccessMask = vk::AccessFlagBits2::eShaderWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eComputeShader,
.dstAccessMask = vk::AccessFlagBits2::eShaderRead,
.image = *subimage.image,
.subresourceRange = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
.levelCount = 1,
.layerCount = image.subimages.size() == 1 ? image.signature.count : 1
}
};
}
}
// Skip barriers on switch between passes
if (i + 1 == currentStageBound) {
stageBarriers.clear();
}
}
for (auto& cmdbuf : this->m_cmdbufs) {
cmdbuf->end(dld);
}
LOG_DEBUG(" Recorded command buffers for pipeline execution")
LOG_DEBUG("Finished building pipeline") LOG_DEBUG("Finished building pipeline")
} }

View file

@ -109,6 +109,15 @@ namespace lsfgvk::pipeline {
return *this->m_descriptorSet.mappedBuffer.get(); return *this->m_descriptorSet.mappedBuffer.get();
} }
///
/// Get all command buffers
///
/// @return List of command buffers
///
[[nodiscard]] auto& getCmdbufs() const {
return this->m_cmdbufs;
}
private: private:
/// Vulkan descriptor set & pipeline layout /// Vulkan descriptor set & pipeline layout
struct Layout { struct Layout {
@ -191,6 +200,9 @@ namespace lsfgvk::pipeline {
std::vector<size_t> storageImages; std::vector<size_t> storageImages;
}; };
std::vector<Stage> m_stages; std::vector<Stage> m_stages;
vk::UniqueCommandPool m_pool;
std::vector<vk::UniqueCommandBuffer> m_cmdbufs;
}; };
} }

View file

@ -445,6 +445,38 @@ vk::UniqueImageView vkhelper::createImageView(
return device.createImageViewUnique(viewInfo, nullptr, dld); return device.createImageViewUnique(viewInfo, nullptr, dld);
} }
/* Command buffers */
vk::UniqueCommandPool vkhelper::createCommandPool(
const vk::detail::DispatchLoaderDynamic& dld,
const vk::Device& device,
uint32_t qfi
) {
const vk::CommandPoolCreateInfo cmdpoolInfo{
.queueFamilyIndex = qfi
};
return device.createCommandPoolUnique(cmdpoolInfo, nullptr, dld);
}
vk::UniqueCommandBuffer vkhelper::createCommandBuffer(
const vk::detail::DispatchLoaderDynamic& dld,
const vk::Device& device,
const vk::CommandPool& cmdpool
) {
const vk::CommandBufferAllocateInfo cmdbufInfo{
.commandPool = cmdpool,
.commandBufferCount = 1
};
return { std::move(device.allocateCommandBuffersUnique(cmdbufInfo, dld).front()) };
}
vk::UniqueFence vkhelper::createFence(
const vk::detail::DispatchLoaderDynamic& dld,
const vk::Device& device
) {
return device.createFenceUnique({}, nullptr, dld);
}
/* External memory */ /* External memory */
std::pair<vk::UniqueImage, vk::UniqueDeviceMemory> vkhelper::createExternalImage( std::pair<vk::UniqueImage, vk::UniqueDeviceMemory> vkhelper::createExternalImage(

View file

@ -306,6 +306,51 @@ namespace vkhelper {
uint32_t layers uint32_t layers
); );
/* Command buffers */
///
/// Create a Vulkan command pool for lsfg-vk
///
/// @param dld Dynamic dispatch loader
/// @param device Vulkan device
/// @param qfi Queue family index
/// @return RAII-wrapped Vulkan command pool
/// @throws std::runtime_error on failure
///
vk::UniqueCommandPool createCommandPool(
const vk::detail::DispatchLoaderDynamic& dld,
const vk::Device& device,
uint32_t qfi
);
///
/// Create a Vulkan command buffer for lsfg-vk
///
/// @param dld Dynamic dispatch loader
/// @param device Vulkan device
/// @param cmdpool Vulkan command pool
/// @return RAII-wrapped Vulkan command buffer
/// @throws std::runtime_error on failure
///
vk::UniqueCommandBuffer createCommandBuffer(
const vk::detail::DispatchLoaderDynamic& dld,
const vk::Device& device,
const vk::CommandPool& cmdpool
);
///
/// Create a fence
///
/// @param dld Dynamic dispatch loader
/// @param device Vulkan device
/// @return RAII-wrapped Vulkan fence
/// @throws std::runtime_error on failure
///
vk::UniqueFence createFence(
const vk::detail::DispatchLoaderDynamic& dld,
const vk::Device& device
);
/* External memory */ /* External memory */
/// ///