2016-03-20 09:35:10 +01:00
|
|
|
#pragma once
|
|
|
|
|
2021-12-09 18:39:55 -08:00
|
|
|
#include <cstdint>
|
2022-10-18 00:26:10 +02:00
|
|
|
#include <cstring>
|
2021-12-09 18:39:55 -08:00
|
|
|
#include <functional>
|
2016-03-20 15:15:13 -07:00
|
|
|
#include <vector>
|
2020-08-16 00:38:55 +02:00
|
|
|
|
2020-10-04 23:24:14 +02:00
|
|
|
#include "Common/GPU/Vulkan/VulkanContext.h"
|
2016-03-20 09:35:10 +01:00
|
|
|
|
2021-11-22 09:04:05 +01:00
|
|
|
// Forward declaration
|
|
|
|
VK_DEFINE_HANDLE(VmaAllocation);
|
|
|
|
|
2016-03-20 09:35:10 +01:00
|
|
|
// VulkanMemory
|
|
|
|
//
|
|
|
|
// Vulkan memory management utils.
|
|
|
|
|
2021-11-22 09:04:05 +01:00
|
|
|
enum class PushBufferType {
|
|
|
|
CPU_TO_GPU,
|
|
|
|
GPU_ONLY,
|
|
|
|
};
|
|
|
|
|
2023-03-14 22:44:37 +01:00
|
|
|
// Just an abstract thing to get debug information.
|
|
|
|
class VulkanMemoryManager {
|
|
|
|
public:
|
|
|
|
virtual ~VulkanMemoryManager() {}
|
|
|
|
|
2023-03-15 00:54:50 +01:00
|
|
|
virtual void GetDebugString(char *buffer, size_t bufSize) const = 0;
|
|
|
|
virtual const char *Name() const = 0; // for sorting
|
2023-03-14 22:44:37 +01:00
|
|
|
};
|
|
|
|
|
2016-03-20 09:35:10 +01:00
|
|
|
// VulkanPushBuffer
|
|
|
|
// Simple incrementing allocator.
|
|
|
|
// Use these to push vertex, index and uniform data. Generally you'll have two of these
|
|
|
|
// and alternate on each frame. Make sure not to reset until the fence from the last time you used it
|
|
|
|
// has completed.
|
|
|
|
//
|
|
|
|
// TODO: Make it possible to suballocate pushbuffers from a large DeviceMemory block.
|
2023-03-14 22:44:37 +01:00
|
|
|
class VulkanPushBuffer : public VulkanMemoryManager {
|
2016-03-20 15:15:13 -07:00
|
|
|
struct BufInfo {
|
|
|
|
VkBuffer buffer;
|
2021-11-22 09:41:14 +01:00
|
|
|
VmaAllocation allocation;
|
2016-03-20 15:15:13 -07:00
|
|
|
};
|
|
|
|
|
2016-03-20 09:35:10 +01:00
|
|
|
public:
|
2021-11-22 09:04:05 +01:00
|
|
|
// NOTE: If you create a push buffer with PushBufferType::GPU_ONLY,
|
2017-12-11 13:23:06 +01:00
|
|
|
// then you can't use any of the push functions as pointers will not be reachable from the CPU.
|
|
|
|
// You must in this case use Allocate() only, and pass the returned offset and the VkBuffer to Vulkan APIs.
|
2021-12-10 21:01:01 +01:00
|
|
|
VulkanPushBuffer(VulkanContext *vulkan, const char *name, size_t size, VkBufferUsageFlags usage, PushBufferType type);
|
2017-05-07 11:08:09 +02:00
|
|
|
~VulkanPushBuffer();
|
2016-03-20 09:35:10 +01:00
|
|
|
|
2017-05-07 11:08:09 +02:00
|
|
|
void Destroy(VulkanContext *vulkan);
|
2016-03-20 09:35:10 +01:00
|
|
|
|
|
|
|
void Reset() { offset_ = 0; }
|
|
|
|
|
2023-03-15 00:54:50 +01:00
|
|
|
void GetDebugString(char *buffer, size_t bufSize) const override;
|
|
|
|
const char *Name() const override {
|
|
|
|
return name_;
|
|
|
|
}
|
|
|
|
|
2016-03-20 15:36:22 -07:00
|
|
|
// Needs context in case of defragment.
|
|
|
|
void Begin(VulkanContext *vulkan) {
|
2016-03-20 15:15:13 -07:00
|
|
|
buf_ = 0;
|
2016-03-20 09:35:10 +01:00
|
|
|
offset_ = 0;
|
2016-03-20 17:24:20 -07:00
|
|
|
// Note: we must defrag because some buffers may be smaller than size_.
|
2016-03-20 15:36:22 -07:00
|
|
|
Defragment(vulkan);
|
2021-11-22 09:04:05 +01:00
|
|
|
if (type_ == PushBufferType::CPU_TO_GPU)
|
2017-12-11 13:23:06 +01:00
|
|
|
Map();
|
2016-03-20 09:35:10 +01:00
|
|
|
}
|
|
|
|
|
2017-08-17 11:22:23 +02:00
|
|
|
void BeginNoReset() {
|
2021-11-22 09:04:05 +01:00
|
|
|
if (type_ == PushBufferType::CPU_TO_GPU)
|
2017-12-11 13:23:06 +01:00
|
|
|
Map();
|
2017-08-17 11:22:23 +02:00
|
|
|
}
|
|
|
|
|
2016-03-20 15:36:22 -07:00
|
|
|
void End() {
|
2021-11-22 09:04:05 +01:00
|
|
|
if (type_ == PushBufferType::CPU_TO_GPU)
|
2017-12-11 13:23:06 +01:00
|
|
|
Unmap();
|
2016-03-20 15:15:13 -07:00
|
|
|
}
|
|
|
|
|
2017-11-29 20:13:38 +01:00
|
|
|
void Map();
|
2016-03-20 15:15:13 -07:00
|
|
|
|
2017-11-29 20:13:38 +01:00
|
|
|
void Unmap();
|
2016-03-20 09:35:10 +01:00
|
|
|
|
2016-03-20 09:52:13 +01:00
|
|
|
// When using the returned memory, make sure to bind the returned vkbuf.
|
|
|
|
// This will later allow for handling overflow correctly.
|
|
|
|
size_t Allocate(size_t numBytes, VkBuffer *vkbuf) {
|
2016-03-20 09:35:10 +01:00
|
|
|
size_t out = offset_;
|
|
|
|
offset_ += (numBytes + 3) & ~3; // Round up to 4 bytes.
|
2016-03-20 16:33:34 +01:00
|
|
|
|
2016-03-20 09:35:10 +01:00
|
|
|
if (offset_ >= size_) {
|
2016-03-20 17:24:20 -07:00
|
|
|
NextBuffer(numBytes);
|
2016-03-20 15:15:13 -07:00
|
|
|
out = offset_;
|
|
|
|
offset_ += (numBytes + 3) & ~3;
|
2016-03-20 09:35:10 +01:00
|
|
|
}
|
2016-03-20 15:15:13 -07:00
|
|
|
*vkbuf = buffers_[buf_].buffer;
|
2016-03-20 09:35:10 +01:00
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the offset that should be used when binding this buffer to get this data.
|
2016-03-20 09:52:13 +01:00
|
|
|
size_t Push(const void *data, size_t size, VkBuffer *vkbuf) {
|
2020-08-16 00:38:55 +02:00
|
|
|
_dbg_assert_(writePtr_);
|
2016-03-20 09:52:13 +01:00
|
|
|
size_t off = Allocate(size, vkbuf);
|
2016-03-20 09:35:10 +01:00
|
|
|
memcpy(writePtr_ + off, data, size);
|
|
|
|
return off;
|
|
|
|
}
|
|
|
|
|
2016-03-20 09:52:13 +01:00
|
|
|
uint32_t PushAligned(const void *data, size_t size, int align, VkBuffer *vkbuf) {
|
2020-08-16 00:38:55 +02:00
|
|
|
_dbg_assert_(writePtr_);
|
2016-03-20 09:35:10 +01:00
|
|
|
offset_ = (offset_ + align - 1) & ~(align - 1);
|
2016-03-20 09:52:13 +01:00
|
|
|
size_t off = Allocate(size, vkbuf);
|
2016-03-20 09:35:10 +01:00
|
|
|
memcpy(writePtr_ + off, data, size);
|
|
|
|
return (uint32_t)off;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t GetOffset() const {
|
|
|
|
return offset_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// "Zero-copy" variant - you can write the data directly as you compute it.
|
2017-11-12 12:07:33 +01:00
|
|
|
// Recommended.
|
2016-03-20 16:06:11 +01:00
|
|
|
void *Push(size_t size, uint32_t *bindOffset, VkBuffer *vkbuf) {
|
2020-08-16 00:38:55 +02:00
|
|
|
_dbg_assert_(writePtr_);
|
2016-03-20 09:52:13 +01:00
|
|
|
size_t off = Allocate(size, vkbuf);
|
2016-03-20 16:06:11 +01:00
|
|
|
*bindOffset = (uint32_t)off;
|
2016-03-20 09:35:10 +01:00
|
|
|
return writePtr_ + off;
|
|
|
|
}
|
2017-11-12 12:07:33 +01:00
|
|
|
void *PushAligned(size_t size, uint32_t *bindOffset, VkBuffer *vkbuf, int align) {
|
2020-08-16 00:38:55 +02:00
|
|
|
_dbg_assert_(writePtr_);
|
2017-11-12 12:07:33 +01:00
|
|
|
offset_ = (offset_ + align - 1) & ~(align - 1);
|
|
|
|
size_t off = Allocate(size, vkbuf);
|
|
|
|
*bindOffset = (uint32_t)off;
|
|
|
|
return writePtr_ + off;
|
|
|
|
}
|
2016-03-20 09:35:10 +01:00
|
|
|
|
2022-10-18 00:26:10 +02:00
|
|
|
template<class T>
|
|
|
|
void PushUBOData(const T &data, VkDescriptorBufferInfo *info) {
|
|
|
|
uint32_t bindOffset;
|
|
|
|
void *ptr = PushAligned(sizeof(T), &bindOffset, &info->buffer, vulkan_->GetPhysicalDeviceProperties().properties.limits.minUniformBufferOffsetAlignment);
|
|
|
|
memcpy(ptr, &data, sizeof(T));
|
|
|
|
info->offset = bindOffset;
|
|
|
|
info->range = sizeof(T);
|
|
|
|
}
|
|
|
|
|
2023-03-15 00:54:50 +01:00
|
|
|
size_t GetTotalSize() const;
|
2017-08-17 11:22:23 +02:00
|
|
|
|
2016-03-20 09:35:10 +01:00
|
|
|
private:
|
2016-03-20 15:15:13 -07:00
|
|
|
bool AddBuffer();
|
2016-03-20 17:24:20 -07:00
|
|
|
void NextBuffer(size_t minSize);
|
2016-03-20 15:36:22 -07:00
|
|
|
void Defragment(VulkanContext *vulkan);
|
2016-03-20 15:15:13 -07:00
|
|
|
|
2019-03-14 11:43:44 +01:00
|
|
|
VulkanContext *vulkan_;
|
2021-11-22 09:04:05 +01:00
|
|
|
PushBufferType type_;
|
2017-12-11 13:23:06 +01:00
|
|
|
|
2016-03-20 15:15:13 -07:00
|
|
|
std::vector<BufInfo> buffers_;
|
2019-03-13 23:31:54 +01:00
|
|
|
size_t buf_ = 0;
|
|
|
|
size_t offset_ = 0;
|
|
|
|
size_t size_ = 0;
|
|
|
|
uint8_t *writePtr_ = nullptr;
|
|
|
|
VkBufferUsageFlags usage_;
|
2021-12-10 21:01:01 +01:00
|
|
|
const char *name_;
|
2016-03-20 09:35:10 +01:00
|
|
|
};
|
2021-12-09 18:39:55 -08:00
|
|
|
|
2023-03-14 22:44:37 +01:00
|
|
|
// Simple memory pushbuffer pool that can share blocks between the "frames", to reduce the impact of push memory spikes -
|
|
|
|
// a later frame can gobble up redundant buffers from an earlier frame even if they don't share frame index.
|
|
|
|
class VulkanPushPool : public VulkanMemoryManager {
|
|
|
|
public:
|
|
|
|
VulkanPushPool(VulkanContext *vulkan, const char *name, size_t originalBlockSize, VkBufferUsageFlags usage);
|
|
|
|
~VulkanPushPool();
|
|
|
|
|
|
|
|
void Destroy();
|
|
|
|
void BeginFrame();
|
|
|
|
|
2023-03-15 00:54:50 +01:00
|
|
|
const char *Name() const override {
|
|
|
|
return name_;
|
|
|
|
}
|
|
|
|
void GetDebugString(char *buffer, size_t bufSize) const override;
|
2023-03-14 22:44:37 +01:00
|
|
|
|
|
|
|
// When using the returned memory, make sure to bind the returned vkbuf.
|
|
|
|
uint8_t *Allocate(VkDeviceSize numBytes, VkDeviceSize alignment, VkBuffer *vkbuf, uint32_t *bindOffset) {
|
|
|
|
_dbg_assert_(curBlockIndex_ >= 0);
|
|
|
|
|
|
|
|
Block &block = blocks_[curBlockIndex_];
|
|
|
|
|
|
|
|
VkDeviceSize offset = (block.used + (alignment - 1)) & ~(alignment - 1);
|
|
|
|
if (offset + numBytes <= block.size) {
|
|
|
|
block.used = offset + numBytes;
|
|
|
|
*vkbuf = block.buffer;
|
|
|
|
*bindOffset = (uint32_t)offset;
|
|
|
|
return block.writePtr + offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
NextBlock(numBytes);
|
|
|
|
|
|
|
|
*vkbuf = blocks_[curBlockIndex_].buffer;
|
|
|
|
*bindOffset = 0; // Newly allocated buffer will start at 0.
|
|
|
|
return blocks_[curBlockIndex_].writePtr;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkDeviceSize Push(const void *data, VkDeviceSize numBytes, int alignment, VkBuffer *vkbuf) {
|
|
|
|
uint32_t bindOffset;
|
|
|
|
uint8_t *ptr = Allocate(numBytes, alignment, vkbuf, &bindOffset);
|
|
|
|
memcpy(ptr, data, numBytes);
|
|
|
|
return bindOffset;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void NextBlock(VkDeviceSize allocationSize);
|
|
|
|
|
|
|
|
struct Block {
|
|
|
|
~Block();
|
|
|
|
VkBuffer buffer;
|
|
|
|
VmaAllocation allocation;
|
|
|
|
|
|
|
|
VkDeviceSize size;
|
|
|
|
VkDeviceSize used;
|
|
|
|
|
|
|
|
int frameIndex;
|
|
|
|
bool original; // these blocks aren't garbage collected.
|
2023-03-15 01:22:32 +01:00
|
|
|
double lastUsed;
|
2023-03-14 22:44:37 +01:00
|
|
|
|
|
|
|
uint8_t *writePtr;
|
|
|
|
|
|
|
|
void Destroy(VulkanContext *vulkan);
|
|
|
|
};
|
|
|
|
|
|
|
|
Block CreateBlock(size_t sz);
|
|
|
|
|
|
|
|
VulkanContext *vulkan_;
|
|
|
|
VkDeviceSize originalBlockSize_;
|
|
|
|
std::vector<Block> blocks_;
|
|
|
|
VkBufferUsageFlags usage_;
|
|
|
|
int curBlockIndex_ = -1;
|
|
|
|
const char *name_;
|
|
|
|
};
|
|
|
|
|
2021-12-09 23:57:37 -08:00
|
|
|
// Only appropriate for use in a per-frame pool.
|
2021-12-09 18:39:55 -08:00
|
|
|
class VulkanDescSetPool {
|
|
|
|
public:
|
2022-09-08 09:15:06 +02:00
|
|
|
VulkanDescSetPool(const char *tag, bool grow) : tag_(tag), grow_(grow) {}
|
2021-12-09 18:39:55 -08:00
|
|
|
~VulkanDescSetPool();
|
|
|
|
|
2021-12-09 23:57:37 -08:00
|
|
|
// Must call this before use: defines how to clear cache of ANY returned values from Allocate().
|
2021-12-09 18:39:55 -08:00
|
|
|
void Setup(const std::function<void()> &clear) {
|
|
|
|
clear_ = clear;
|
|
|
|
}
|
|
|
|
void Create(VulkanContext *vulkan, const VkDescriptorPoolCreateInfo &info, const std::vector<VkDescriptorPoolSize> &sizes);
|
2021-12-09 23:57:37 -08:00
|
|
|
// Allocate a new set, which may resize and empty the current sets.
|
|
|
|
// Use only for the current frame, unless in a cache cleared by clear_.
|
2022-10-26 13:29:56 +02:00
|
|
|
VkDescriptorSet Allocate(int n, const VkDescriptorSetLayout *layouts, const char *tag);
|
2021-12-09 18:39:55 -08:00
|
|
|
void Reset();
|
|
|
|
void Destroy();
|
|
|
|
|
|
|
|
private:
|
|
|
|
VkResult Recreate(bool grow);
|
|
|
|
|
|
|
|
const char *tag_;
|
|
|
|
VulkanContext *vulkan_ = nullptr;
|
|
|
|
VkDescriptorPool descPool_ = VK_NULL_HANDLE;
|
2023-02-26 15:12:46 +01:00
|
|
|
VkDescriptorPoolCreateInfo info_{};
|
2021-12-09 18:39:55 -08:00
|
|
|
std::vector<VkDescriptorPoolSize> sizes_;
|
|
|
|
std::function<void()> clear_;
|
|
|
|
uint32_t usage_ = 0;
|
|
|
|
bool grow_;
|
|
|
|
};
|
2023-03-14 22:44:37 +01:00
|
|
|
|
|
|
|
std::vector<VulkanMemoryManager *> GetActiveVulkanMemoryManagers();
|
|
|
|
|