Merge pull request from Wunkolo/vulkan-framebuffer

[Vulkan] Implement framebuffer management
This commit is contained in:
wheremyfoodat 2023-08-27 13:41:43 +03:00 committed by GitHub
commit 80cdf0354f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
19 changed files with 1986 additions and 198 deletions

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,3 @@
#include "renderer_vk/vk_api.hpp"
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE;

View file

@ -0,0 +1,119 @@
#include "renderer_vk/vk_descriptor_heap.hpp"
#include <algorithm>
#include <optional>
#include <unordered_map>
namespace Vulkan {
DescriptorHeap::DescriptorHeap(vk::Device device) : device(device) {}
std::optional<vk::DescriptorSet> DescriptorHeap::allocateDescriptorSet() {
// Find a free slot
const auto freeSlot = std::find(allocationMap.begin(), allocationMap.end(), false);
// If there is no free slot, return
if (freeSlot == allocationMap.end()) {
return std::nullopt;
}
// Mark the slot as allocated
*freeSlot = true;
const u16 index = static_cast<u16>(std::distance(allocationMap.begin(), freeSlot));
vk::UniqueDescriptorSet& newDescriptorSet = descriptorSets[index];
if (!newDescriptorSet) {
// Descriptor set doesn't exist yet. Allocate a new one
vk::DescriptorSetAllocateInfo allocateInfo = {};
allocateInfo.descriptorPool = descriptorPool.get();
allocateInfo.pSetLayouts = &descriptorSetLayout.get();
allocateInfo.descriptorSetCount = 1;
if (auto AllocateResult = device.allocateDescriptorSetsUnique(allocateInfo); AllocateResult.result == vk::Result::eSuccess) {
newDescriptorSet = std::move(AllocateResult.value[0]);
} else {
// Error allocating descriptor set
return std::nullopt;
}
}
return newDescriptorSet.get();
}
bool DescriptorHeap::freeDescriptorSet(vk::DescriptorSet Set) {
// Find the descriptor set
const auto found =
std::find_if(descriptorSets.begin(), descriptorSets.end(), [&Set](const auto& CurSet) -> bool { return CurSet.get() == Set; });
// If the descriptor set is not found, return
if (found == descriptorSets.end()) {
return false;
}
// Mark the slot as free
const u16 index = static_cast<u16>(std::distance(descriptorSets.begin(), found));
allocationMap[index] = false;
return true;
}
std::optional<DescriptorHeap> DescriptorHeap::create(
vk::Device device, std::span<const vk::DescriptorSetLayoutBinding> bindings, u16 descriptorHeapCount
) {
DescriptorHeap newDescriptorHeap(device);
// Create a histogram of each of the descriptor types and how many of each
// the pool should have
// Todo: maybe keep this around as a hash table to do more dynamic
// allocations of descriptor sets rather than allocating them all up-front
std::vector<vk::DescriptorPoolSize> poolSizes;
{
std::unordered_map<vk::DescriptorType, u16> descriptorTypeCounts;
for (const auto& binding : bindings) {
descriptorTypeCounts[binding.descriptorType] += binding.descriptorCount;
}
for (const auto& descriptorTypeCount : descriptorTypeCounts) {
poolSizes.push_back(vk::DescriptorPoolSize(descriptorTypeCount.first, descriptorTypeCount.second * descriptorHeapCount));
}
}
// Create descriptor pool
{
vk::DescriptorPoolCreateInfo poolInfo;
poolInfo.flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet;
poolInfo.maxSets = descriptorHeapCount;
poolInfo.pPoolSizes = poolSizes.data();
poolInfo.poolSizeCount = poolSizes.size();
if (auto createResult = device.createDescriptorPoolUnique(poolInfo); createResult.result == vk::Result::eSuccess) {
newDescriptorHeap.descriptorPool = std::move(createResult.value);
} else {
return std::nullopt;
}
}
// Create descriptor set layout
{
vk::DescriptorSetLayoutCreateInfo layoutInfo;
layoutInfo.pBindings = bindings.data();
layoutInfo.bindingCount = bindings.size();
if (auto createResult = device.createDescriptorSetLayoutUnique(layoutInfo); createResult.result == vk::Result::eSuccess) {
newDescriptorHeap.descriptorSetLayout = std::move(createResult.value);
} else {
return std::nullopt;
}
}
newDescriptorHeap.descriptorSets.resize(descriptorHeapCount);
newDescriptorHeap.allocationMap.resize(descriptorHeapCount);
newDescriptorHeap.bindings.assign(bindings.begin(), bindings.end());
return {std::move(newDescriptorHeap)};
}
} // namespace Vulkan

View file

@ -0,0 +1,98 @@
#include "renderer_vk/vk_descriptor_update_batch.hpp"
#include <memory>
#include <span>
namespace Vulkan {
void DescriptorUpdateBatch::flush() {
device.updateDescriptorSets({std::span(descriptorWrites.get(), descriptorWriteEnd)}, {std::span(descriptorCopies.get(), descriptorCopyEnd)});
descriptorWriteEnd = 0;
descriptorCopyEnd = 0;
}
void DescriptorUpdateBatch::addImage(vk::DescriptorSet targetDescriptor, u8 targetBinding, vk::ImageView imageView, vk::ImageLayout imageLayout) {
if (descriptorWriteEnd >= descriptorWriteMax) {
flush();
}
const auto& imageInfo = descriptorInfos[descriptorWriteEnd].emplace<vk::DescriptorImageInfo>(vk::Sampler(), imageView, imageLayout);
descriptorWrites[descriptorWriteEnd] =
vk::WriteDescriptorSet(targetDescriptor, targetBinding, 0, 1, vk::DescriptorType::eSampledImage, &imageInfo, nullptr, nullptr);
++descriptorWriteEnd;
}
void DescriptorUpdateBatch::addSampler(vk::DescriptorSet targetDescriptor, u8 targetBinding, vk::Sampler sampler) {
if (descriptorWriteEnd >= descriptorWriteMax) {
flush();
}
const auto& imageInfo = descriptorInfos[descriptorWriteEnd].emplace<vk::DescriptorImageInfo>(sampler, vk::ImageView(), vk::ImageLayout());
descriptorWrites[descriptorWriteEnd] =
vk::WriteDescriptorSet(targetDescriptor, targetBinding, 0, 1, vk::DescriptorType::eSampler, &imageInfo, nullptr, nullptr);
++descriptorWriteEnd;
}
void DescriptorUpdateBatch::addImageSampler(
vk::DescriptorSet targetDescriptor, u8 targetBinding, vk::ImageView imageView, vk::Sampler sampler, vk::ImageLayout imageLayout
) {
if (descriptorWriteEnd >= descriptorWriteMax) {
flush();
}
const auto& imageInfo = descriptorInfos[descriptorWriteEnd].emplace<vk::DescriptorImageInfo>(sampler, imageView, imageLayout);
descriptorWrites[descriptorWriteEnd] =
vk::WriteDescriptorSet(targetDescriptor, targetBinding, 0, 1, vk::DescriptorType::eCombinedImageSampler, &imageInfo, nullptr, nullptr);
++descriptorWriteEnd;
}
void DescriptorUpdateBatch::addBuffer(
vk::DescriptorSet targetDescriptor, u8 targetBinding, vk::Buffer buffer, vk::DeviceSize offset, vk::DeviceSize size
) {
if (descriptorWriteEnd >= descriptorWriteMax) {
flush();
}
const auto& bufferInfo = descriptorInfos[descriptorWriteEnd].emplace<vk::DescriptorBufferInfo>(buffer, offset, size);
descriptorWrites[descriptorWriteEnd] =
vk::WriteDescriptorSet(targetDescriptor, targetBinding, 0, 1, vk::DescriptorType::eStorageImage, nullptr, &bufferInfo, nullptr);
++descriptorWriteEnd;
}
void DescriptorUpdateBatch::copyBinding(
vk::DescriptorSet sourceDescriptor, vk::DescriptorSet targetDescriptor, u8 sourceBinding, u8 targetBinding, u8 sourceArrayElement,
u8 targetArrayElement, u8 descriptorCount
) {
if (descriptorCopyEnd >= descriptorCopyMax) {
flush();
}
descriptorCopies[descriptorCopyEnd] = vk::CopyDescriptorSet(
sourceDescriptor, sourceBinding, sourceArrayElement, targetDescriptor, targetBinding, targetArrayElement, descriptorCount
);
++descriptorCopyEnd;
}
std::optional<DescriptorUpdateBatch> DescriptorUpdateBatch::create(vk::Device device, usize descriptorWriteMax, usize descriptorCopyMax)
{
DescriptorUpdateBatch newDescriptorUpdateBatch(device, descriptorWriteMax, descriptorCopyMax);
newDescriptorUpdateBatch.descriptorInfos = std::make_unique<DescriptorInfoUnion[]>(descriptorWriteMax);
newDescriptorUpdateBatch.descriptorWrites = std::make_unique<vk::WriteDescriptorSet[]>(descriptorWriteMax);
newDescriptorUpdateBatch.descriptorCopies = std::make_unique<vk::CopyDescriptorSet[]>(descriptorCopyMax);
return {std::move(newDescriptorUpdateBatch)};
}
} // namespace Vulkan

View file

@ -0,0 +1,174 @@
#include "renderer_vk/vk_memory.hpp"
namespace Vulkan {
static constexpr vk::DeviceSize alignUp(vk::DeviceSize value, std::size_t size) {
const vk::DeviceSize mod = static_cast<vk::DeviceSize>(value % size);
value -= mod;
return static_cast<vk::DeviceSize>(mod == vk::DeviceSize{0} ? value : value + size);
}
// Given a speculative heap-allocation, defined by its current size and
// memory-type bits, appends a memory requirements structure to it, updating
// both the size and the required memory-type-bits. Returns the offset within
// the heap for the current MemoryRequirements Todo: Sun Apr 23 13:28:25 PDT
// 2023 Rather than using a running-size of the heap, look at all of the memory
// requests and optimally create a packing for all of the offset and alignment
// requirements. Such as by satisfying all of the largest alignments first, and
// then the smallest, to reduce padding
static vk::DeviceSize commitMemoryRequestToHeap(
const vk::MemoryRequirements& curMemoryRequirements, vk::DeviceSize& curHeapEnd, u32& curMemoryTypeBits, vk::DeviceSize sizeAlignment
) {
// Accumulate a mask of all the memory types that satisfies each of the
// handles
curMemoryTypeBits &= curMemoryRequirements.memoryTypeBits;
// Pad up the memory sizes so they are not considered aliasing
const vk::DeviceSize curMemoryOffset = alignUp(curHeapEnd, curMemoryRequirements.alignment);
// Pad the size by the required size-alignment.
// Intended for BufferImageGranularity
const vk::DeviceSize curMemorySize = alignUp(curMemoryRequirements.size, sizeAlignment);
curHeapEnd = (curMemoryOffset + curMemorySize);
return curMemoryOffset;
}
s32 findMemoryTypeIndex(
vk::PhysicalDevice physicalDevice, u32 memoryTypeMask, vk::MemoryPropertyFlags memoryProperties,
vk::MemoryPropertyFlags memoryExcludeProperties
) {
const vk::PhysicalDeviceMemoryProperties deviceMemoryProperties = physicalDevice.getMemoryProperties();
// Iterate the physical device's memory types until we find a match
for (std::size_t i = 0; i < deviceMemoryProperties.memoryTypeCount; i++) {
if(
// Is within memory type mask
(((memoryTypeMask >> i) & 0b1) == 0b1) &&
// Has property flags
(deviceMemoryProperties.memoryTypes[i].propertyFlags
& memoryProperties)
== memoryProperties
&&
// None of the excluded properties are enabled
!(deviceMemoryProperties.memoryTypes[i].propertyFlags
& memoryExcludeProperties) )
{
return static_cast<u32>(i);
}
}
return -1;
}
std::tuple<vk::Result, vk::UniqueDeviceMemory> commitImageHeap(
vk::Device device, vk::PhysicalDevice physicalDevice, const std::span<const vk::Image> images, vk::MemoryPropertyFlags memoryProperties,
vk::MemoryPropertyFlags memoryExcludeProperties
) {
vk::MemoryAllocateInfo imageHeapAllocInfo = {};
u32 imageHeapMemoryTypeBits = 0xFFFFFFFF;
std::vector<vk::BindImageMemoryInfo> imageHeapBinds;
const vk::DeviceSize bufferImageGranularity = physicalDevice.getProperties().limits.bufferImageGranularity;
for (const vk::Image& curImage : images) {
const vk::DeviceSize curBindOffset = commitMemoryRequestToHeap(
device.getImageMemoryRequirements(curImage), imageHeapAllocInfo.allocationSize, imageHeapMemoryTypeBits, bufferImageGranularity
);
if (imageHeapMemoryTypeBits == 0) {
// No possible memory heap for all of the images to share
return std::make_tuple(vk::Result::eErrorOutOfDeviceMemory, vk::UniqueDeviceMemory());
}
// Put nullptr for the device memory for now
imageHeapBinds.emplace_back(vk::BindImageMemoryInfo{curImage, nullptr, curBindOffset});
}
const s32 memoryTypeIndex = findMemoryTypeIndex(physicalDevice, imageHeapMemoryTypeBits, memoryProperties, memoryExcludeProperties);
if (memoryTypeIndex < 0) {
// Unable to find a memory heap that satisfies all the images
return std::make_tuple(vk::Result::eErrorOutOfDeviceMemory, vk::UniqueDeviceMemory());
}
imageHeapAllocInfo.memoryTypeIndex = memoryTypeIndex;
vk::UniqueDeviceMemory imageHeapMemory = {};
if (auto allocResult = device.allocateMemoryUnique(imageHeapAllocInfo); allocResult.result == vk::Result::eSuccess) {
imageHeapMemory = std::move(allocResult.value);
} else {
return std::make_tuple(allocResult.result, vk::UniqueDeviceMemory());
}
// Assign the device memory to the bindings
for (vk::BindImageMemoryInfo& curBind : imageHeapBinds) {
curBind.memory = imageHeapMemory.get();
}
// Now bind them all in one call
if (const vk::Result bindResult = device.bindImageMemory2(imageHeapBinds); bindResult == vk::Result::eSuccess) {
// Binding memory succeeded
} else {
return std::make_tuple(bindResult, vk::UniqueDeviceMemory());
}
return std::make_tuple(vk::Result::eSuccess, std::move(imageHeapMemory));
}
std::tuple<vk::Result, vk::UniqueDeviceMemory> commitBufferHeap(
vk::Device device, vk::PhysicalDevice physicalDevice, const std::span<const vk::Buffer> buffers, vk::MemoryPropertyFlags memoryProperties,
vk::MemoryPropertyFlags memoryExcludeProperties
) {
vk::MemoryAllocateInfo bufferHeapAllocInfo = {};
u32 bufferHeapMemoryTypeBits = 0xFFFFFFFF;
std::vector<vk::BindBufferMemoryInfo> bufferHeapBinds;
const vk::DeviceSize bufferImageGranularity = physicalDevice.getProperties().limits.bufferImageGranularity;
for (const vk::Buffer& curBuffer : buffers) {
const vk::DeviceSize curBindOffset = commitMemoryRequestToHeap(
device.getBufferMemoryRequirements(curBuffer), bufferHeapAllocInfo.allocationSize, bufferHeapMemoryTypeBits, bufferImageGranularity
);
if (bufferHeapMemoryTypeBits == 0) {
// No possible memory heap for all of the buffers to share
return std::make_tuple(vk::Result::eErrorOutOfDeviceMemory, vk::UniqueDeviceMemory());
}
// Put nullptr for the device memory for now
bufferHeapBinds.emplace_back(vk::BindBufferMemoryInfo{curBuffer, nullptr, curBindOffset});
}
const s32 memoryTypeIndex = findMemoryTypeIndex(physicalDevice, bufferHeapMemoryTypeBits, memoryProperties, memoryExcludeProperties);
if (memoryTypeIndex < 0) {
// Unable to find a memory heap that satisfies all the buffers
return std::make_tuple(vk::Result::eErrorOutOfDeviceMemory, vk::UniqueDeviceMemory());
}
bufferHeapAllocInfo.memoryTypeIndex = memoryTypeIndex;
vk::UniqueDeviceMemory bufferHeapMemory = {};
if (auto allocResult = device.allocateMemoryUnique(bufferHeapAllocInfo); allocResult.result == vk::Result::eSuccess) {
bufferHeapMemory = std::move(allocResult.value);
} else {
return std::make_tuple(allocResult.result, vk::UniqueDeviceMemory());
}
// Assign the device memory to the bindings
for (vk::BindBufferMemoryInfo& curBind : bufferHeapBinds) {
curBind.memory = bufferHeapMemory.get();
}
// Now bind them all in one call
if (const vk::Result bindResult = device.bindBufferMemory2(bufferHeapBinds); bindResult == vk::Result::eSuccess) {
// Binding memory succeeded
} else {
return std::make_tuple(bindResult, vk::UniqueDeviceMemory());
}
return std::make_tuple(vk::Result::eSuccess, std::move(bufferHeapMemory));
}
} // namespace Vulkan

View file

@ -0,0 +1,39 @@
#include "renderer_vk/vk_pica.hpp"
namespace Vulkan {
vk::Format colorFormatToVulkan(PICA::ColorFmt colorFormat) {
switch (colorFormat) {
case PICA::ColorFmt::RGBA8: return vk::Format::eR8G8B8A8Unorm;
// VK_FORMAT_R8G8B8A8_UNORM is mandated by the vulkan specification
// VK_FORMAT_R8G8B8_UNORM may not be supported
// TODO: Detect this!
// case PICA::ColorFmt::RGB8: return vk::Format::eR8G8B8Unorm;
case PICA::ColorFmt::RGB8: return vk::Format::eR8G8B8A8Unorm;
case PICA::ColorFmt::RGBA5551: return vk::Format::eR5G5B5A1UnormPack16;
case PICA::ColorFmt::RGB565: return vk::Format::eR5G6B5UnormPack16;
case PICA::ColorFmt::RGBA4: return vk::Format::eR4G4B4A4UnormPack16;
}
return vk::Format::eUndefined;
}
vk::Format depthFormatToVulkan(PICA::DepthFmt depthFormat) {
switch (depthFormat) {
// VK_FORMAT_D16_UNORM is mandated by the vulkan specification
case PICA::DepthFmt::Depth16: return vk::Format::eD16Unorm;
case PICA::DepthFmt::Unknown1: return vk::Format::eUndefined;
// The GPU may _not_ support these formats natively
// Only one of:
// VK_FORMAT_X8_D24_UNORM_PACK32 and VK_FORMAT_D32_SFLOAT
// and one of:
// VK_FORMAT_D24_UNORM_S8_UINT and VK_FORMAT_D32_SFLOAT_S8_UINT
// will be supported
// TODO: Detect this!
// case PICA::DepthFmt::Depth24: return vk::Format::eX8D24UnormPack32;
// case PICA::DepthFmt::Depth24Stencil8: return vk::Format::eD24UnormS8Uint;
case PICA::DepthFmt::Depth24: return vk::Format::eD32Sfloat;
case PICA::DepthFmt::Depth24Stencil8: return vk::Format::eD32SfloatS8Uint;
}
return vk::Format::eUndefined;
}
} // namespace Vulkan

View file

@ -0,0 +1,31 @@
#include "renderer_vk/vk_sampler_cache.hpp"
#include <vulkan/vulkan_hash.hpp>
#include "helpers.hpp"
namespace Vulkan {
SamplerCache::SamplerCache(vk::Device device) : device(device) {}
const vk::Sampler& SamplerCache::getSampler(const vk::SamplerCreateInfo& samplerInfo) {
const std::size_t samplerHash = std::hash<vk::SamplerCreateInfo>()(samplerInfo);
// Cache hit
if (samplerMap.contains(samplerHash)) {
return samplerMap.at(samplerHash).get();
}
if (auto createResult = device.createSamplerUnique(samplerInfo); createResult.result == vk::Result::eSuccess) {
return (samplerMap[samplerHash] = std::move(createResult.value)).get();
} else {
Helpers::panic("Error creating sampler: %s\n", vk::to_string(createResult.result).c_str());
}
}
std::optional<SamplerCache> SamplerCache::create(vk::Device device) {
SamplerCache newSamplerCache(device);
return {std::move(newSamplerCache)};
}
} // namespace Vulkan

View file

@ -1,3 +0,0 @@
#include "renderer_vk/vulkan_api.hpp"
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE;