2016-03-20 09:35:10 +01:00
|
|
|
#pragma once
|
|
|
|
|
2016-03-20 15:15:13 -07:00
|
|
|
#include <vector>
|
2016-03-23 00:40:41 -07:00
|
|
|
#include <unordered_map>
|
2016-03-20 09:35:10 +01:00
|
|
|
#include "Common/Vulkan/VulkanContext.h"
|
|
|
|
|
|
|
|
// VulkanMemory
|
|
|
|
//
|
|
|
|
// Vulkan memory management utils.
|
|
|
|
|
|
|
|
// VulkanPushBuffer
|
|
|
|
// Simple incrementing allocator.
|
|
|
|
// Use these to push vertex, index and uniform data. Generally you'll have two of these
|
|
|
|
// and alternate on each frame. Make sure not to reset until the fence from the last time you used it
|
|
|
|
// has completed.
|
|
|
|
//
|
|
|
|
// TODO: Make it possible to suballocate pushbuffers from a large DeviceMemory block.
|
|
|
|
class VulkanPushBuffer {
|
2016-03-20 15:15:13 -07:00
|
|
|
struct BufInfo {
|
|
|
|
VkBuffer buffer;
|
|
|
|
VkDeviceMemory deviceMemory;
|
|
|
|
};
|
|
|
|
|
2016-03-20 09:35:10 +01:00
|
|
|
public:
|
|
|
|
VulkanPushBuffer(VulkanContext *vulkan, size_t size);
|
2017-05-07 11:08:09 +02:00
|
|
|
~VulkanPushBuffer();
|
2016-03-20 09:35:10 +01:00
|
|
|
|
2017-05-07 11:08:09 +02:00
|
|
|
void Destroy(VulkanContext *vulkan);
|
2016-03-20 09:35:10 +01:00
|
|
|
|
|
|
|
void Reset() { offset_ = 0; }
|
|
|
|
|
2016-03-20 15:36:22 -07:00
|
|
|
// Needs context in case of defragment.
|
|
|
|
void Begin(VulkanContext *vulkan) {
|
2016-03-20 15:15:13 -07:00
|
|
|
buf_ = 0;
|
2016-03-20 09:35:10 +01:00
|
|
|
offset_ = 0;
|
2016-03-20 17:24:20 -07:00
|
|
|
// Note: we must defrag because some buffers may be smaller than size_.
|
2016-03-20 15:36:22 -07:00
|
|
|
Defragment(vulkan);
|
|
|
|
Map();
|
2016-03-20 09:35:10 +01:00
|
|
|
}
|
|
|
|
|
2017-08-17 11:22:23 +02:00
|
|
|
void BeginNoReset() {
|
|
|
|
Map();
|
|
|
|
}
|
|
|
|
|
2016-03-20 15:36:22 -07:00
|
|
|
void End() {
|
|
|
|
Unmap();
|
2016-03-20 15:15:13 -07:00
|
|
|
}
|
|
|
|
|
2017-11-29 20:13:38 +01:00
|
|
|
void Map();
|
2016-03-20 15:15:13 -07:00
|
|
|
|
2017-11-29 20:13:38 +01:00
|
|
|
void Unmap();
|
2016-03-20 09:35:10 +01:00
|
|
|
|
2016-03-20 09:52:13 +01:00
|
|
|
// When using the returned memory, make sure to bind the returned vkbuf.
|
|
|
|
// This will later allow for handling overflow correctly.
|
|
|
|
size_t Allocate(size_t numBytes, VkBuffer *vkbuf) {
|
2016-03-20 09:35:10 +01:00
|
|
|
size_t out = offset_;
|
|
|
|
offset_ += (numBytes + 3) & ~3; // Round up to 4 bytes.
|
2016-03-20 16:33:34 +01:00
|
|
|
|
2016-03-20 09:35:10 +01:00
|
|
|
if (offset_ >= size_) {
|
2016-03-20 17:24:20 -07:00
|
|
|
NextBuffer(numBytes);
|
2016-03-20 15:15:13 -07:00
|
|
|
out = offset_;
|
|
|
|
offset_ += (numBytes + 3) & ~3;
|
2016-03-20 09:35:10 +01:00
|
|
|
}
|
2016-03-20 15:15:13 -07:00
|
|
|
*vkbuf = buffers_[buf_].buffer;
|
2016-03-20 09:35:10 +01:00
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the offset that should be used when binding this buffer to get this data.
|
2016-03-20 09:52:13 +01:00
|
|
|
size_t Push(const void *data, size_t size, VkBuffer *vkbuf) {
|
2016-03-20 15:15:13 -07:00
|
|
|
assert(writePtr_);
|
2016-03-20 09:52:13 +01:00
|
|
|
size_t off = Allocate(size, vkbuf);
|
2016-03-20 09:35:10 +01:00
|
|
|
memcpy(writePtr_ + off, data, size);
|
|
|
|
return off;
|
|
|
|
}
|
|
|
|
|
2016-03-20 09:52:13 +01:00
|
|
|
uint32_t PushAligned(const void *data, size_t size, int align, VkBuffer *vkbuf) {
|
2016-03-20 15:15:13 -07:00
|
|
|
assert(writePtr_);
|
2016-03-20 09:35:10 +01:00
|
|
|
offset_ = (offset_ + align - 1) & ~(align - 1);
|
2016-03-20 09:52:13 +01:00
|
|
|
size_t off = Allocate(size, vkbuf);
|
2016-03-20 09:35:10 +01:00
|
|
|
memcpy(writePtr_ + off, data, size);
|
|
|
|
return (uint32_t)off;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t GetOffset() const {
|
|
|
|
return offset_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// "Zero-copy" variant - you can write the data directly as you compute it.
|
2017-11-12 12:07:33 +01:00
|
|
|
// Recommended.
|
2016-03-20 16:06:11 +01:00
|
|
|
void *Push(size_t size, uint32_t *bindOffset, VkBuffer *vkbuf) {
|
2016-03-20 15:15:13 -07:00
|
|
|
assert(writePtr_);
|
2016-03-20 09:52:13 +01:00
|
|
|
size_t off = Allocate(size, vkbuf);
|
2016-03-20 16:06:11 +01:00
|
|
|
*bindOffset = (uint32_t)off;
|
2016-03-20 09:35:10 +01:00
|
|
|
return writePtr_ + off;
|
|
|
|
}
|
2017-11-12 12:07:33 +01:00
|
|
|
void *PushAligned(size_t size, uint32_t *bindOffset, VkBuffer *vkbuf, int align) {
|
|
|
|
assert(writePtr_);
|
|
|
|
offset_ = (offset_ + align - 1) & ~(align - 1);
|
|
|
|
size_t off = Allocate(size, vkbuf);
|
|
|
|
*bindOffset = (uint32_t)off;
|
|
|
|
return writePtr_ + off;
|
|
|
|
}
|
2016-03-20 09:35:10 +01:00
|
|
|
|
2017-08-17 11:22:23 +02:00
|
|
|
size_t GetTotalSize() const;
|
|
|
|
|
2016-03-20 09:35:10 +01:00
|
|
|
private:
|
2016-03-20 15:15:13 -07:00
|
|
|
bool AddBuffer();
|
2016-03-20 17:24:20 -07:00
|
|
|
void NextBuffer(size_t minSize);
|
2016-03-20 15:36:22 -07:00
|
|
|
void Defragment(VulkanContext *vulkan);
|
2016-03-20 15:15:13 -07:00
|
|
|
|
2016-03-20 15:36:22 -07:00
|
|
|
VkDevice device_;
|
2016-03-20 15:15:13 -07:00
|
|
|
std::vector<BufInfo> buffers_;
|
|
|
|
size_t buf_;
|
2016-03-20 09:35:10 +01:00
|
|
|
size_t offset_;
|
|
|
|
size_t size_;
|
2016-03-20 15:36:22 -07:00
|
|
|
uint32_t memoryTypeIndex_;
|
2016-03-20 09:35:10 +01:00
|
|
|
uint8_t *writePtr_;
|
|
|
|
};
|
2016-03-23 00:40:41 -07:00
|
|
|
|
2016-03-25 23:48:39 -07:00
|
|
|
// VulkanDeviceAllocator
|
|
|
|
//
|
|
|
|
// Implements a slab based allocator that manages suballocations inside the slabs.
|
|
|
|
// Bitmaps are used to handle allocation state, with a 1KB grain.
|
2016-03-23 00:40:41 -07:00
|
|
|
class VulkanDeviceAllocator {
|
|
|
|
public:
|
2016-03-25 23:48:39 -07:00
|
|
|
// Slab sizes start at minSlabSize and double until maxSlabSize.
|
|
|
|
// Total slab count is unlimited, as long as there's free memory.
|
2016-03-24 23:30:01 -07:00
|
|
|
VulkanDeviceAllocator(VulkanContext *vulkan, size_t minSlabSize, size_t maxSlabSize);
|
2016-03-23 00:40:41 -07:00
|
|
|
~VulkanDeviceAllocator();
|
|
|
|
|
2016-03-25 23:48:39 -07:00
|
|
|
// Requires all memory be free beforehand (including all pending deletes.)
|
2016-03-23 00:40:41 -07:00
|
|
|
void Destroy();
|
|
|
|
|
|
|
|
void Begin() {
|
|
|
|
Decimate();
|
|
|
|
}
|
|
|
|
|
|
|
|
void End() {
|
|
|
|
}
|
|
|
|
|
2016-03-25 23:48:39 -07:00
|
|
|
// May return ALLOCATE_FAILED if the allocation fails.
|
2016-03-23 00:40:41 -07:00
|
|
|
size_t Allocate(const VkMemoryRequirements &reqs, VkDeviceMemory *deviceMemory);
|
2016-03-25 23:48:39 -07:00
|
|
|
|
|
|
|
// Crashes on a double or misfree.
|
2016-03-23 00:40:41 -07:00
|
|
|
void Free(VkDeviceMemory deviceMemory, size_t offset);
|
|
|
|
|
|
|
|
static const size_t ALLOCATE_FAILED = -1;
|
|
|
|
|
2017-12-03 10:29:41 +01:00
|
|
|
int GetBlockCount() const { return (int)slabs_.size(); }
|
|
|
|
int GetMinSlabSize() const { return (int)minSlabSize_; }
|
|
|
|
int GetMaxSlabSize() const { return (int)maxSlabSize_; }
|
|
|
|
|
2017-12-03 14:49:45 +01:00
|
|
|
int ComputeUsagePercent() const;
|
|
|
|
|
2016-03-23 00:40:41 -07:00
|
|
|
private:
|
|
|
|
static const size_t SLAB_GRAIN_SIZE = 1024;
|
|
|
|
static const uint8_t SLAB_GRAIN_SHIFT = 10;
|
|
|
|
static const uint32_t UNDEFINED_MEMORY_TYPE = -1;
|
|
|
|
|
|
|
|
struct Slab {
|
|
|
|
VkDeviceMemory deviceMemory;
|
|
|
|
std::vector<uint8_t> usage;
|
|
|
|
std::unordered_map<size_t, size_t> allocSizes;
|
|
|
|
size_t nextFree;
|
|
|
|
|
|
|
|
size_t Size() {
|
|
|
|
return usage.size() * SLAB_GRAIN_SIZE;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2016-03-24 23:27:14 -07:00
|
|
|
struct FreeInfo {
|
2016-03-25 23:48:39 -07:00
|
|
|
explicit FreeInfo(VulkanDeviceAllocator *a, VkDeviceMemory d, size_t o)
|
|
|
|
: allocator(a), deviceMemory(d), offset(o) {
|
2016-03-24 23:27:14 -07:00
|
|
|
}
|
|
|
|
|
2016-03-25 23:48:39 -07:00
|
|
|
VulkanDeviceAllocator *allocator;
|
2016-03-24 23:27:14 -07:00
|
|
|
VkDeviceMemory deviceMemory;
|
|
|
|
size_t offset;
|
|
|
|
};
|
|
|
|
|
2016-03-25 23:48:39 -07:00
|
|
|
static void DispatchFree(void *userdata) {
|
2016-03-24 23:27:14 -07:00
|
|
|
auto freeInfo = static_cast<FreeInfo *>(userdata);
|
2017-12-10 14:36:24 +01:00
|
|
|
freeInfo->allocator->ExecuteFree(freeInfo); // this deletes freeInfo
|
2016-03-24 23:27:14 -07:00
|
|
|
}
|
|
|
|
|
2017-03-19 07:44:02 -07:00
|
|
|
bool AllocateSlab(VkDeviceSize minBytes);
|
2016-03-23 00:40:41 -07:00
|
|
|
bool AllocateFromSlab(Slab &slab, size_t &start, size_t blocks);
|
|
|
|
void Decimate();
|
2016-03-24 23:27:14 -07:00
|
|
|
void ExecuteFree(FreeInfo *userdata);
|
2016-03-23 00:40:41 -07:00
|
|
|
|
2016-03-24 23:30:01 -07:00
|
|
|
VulkanContext *const vulkan_;
|
2016-03-23 00:40:41 -07:00
|
|
|
std::vector<Slab> slabs_;
|
2017-12-24 10:31:25 -08:00
|
|
|
size_t lastSlab_ = 0;
|
2016-03-23 00:40:41 -07:00
|
|
|
size_t minSlabSize_;
|
2016-03-24 23:30:01 -07:00
|
|
|
const size_t maxSlabSize_;
|
2017-12-24 10:31:25 -08:00
|
|
|
uint32_t memoryTypeIndex_ = UNDEFINED_MEMORY_TYPE;
|
|
|
|
bool destroyed_ = false;
|
2016-03-23 00:40:41 -07:00
|
|
|
};
|