2016-03-20 09:35:10 +01:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include "Common/Vulkan/VulkanContext.h"
|
|
|
|
|
|
|
|
// VulkanMemory
|
|
|
|
//
|
|
|
|
// Vulkan memory management utils.
|
|
|
|
|
|
|
|
// VulkanPushBuffer
|
|
|
|
// Simple incrementing allocator.
|
|
|
|
// Use these to push vertex, index and uniform data. Generally you'll have two of these
|
|
|
|
// and alternate on each frame. Make sure not to reset until the fence from the last time you used it
|
|
|
|
// has completed.
|
|
|
|
//
|
|
|
|
// TODO: Make it possible to suballocate pushbuffers from a large DeviceMemory block.
|
|
|
|
// TODO: Make this auto-grow and shrink. Need to be careful about returning and using the new
|
|
|
|
// buffer handle on overflow.
|
|
|
|
class VulkanPushBuffer {
|
|
|
|
public:
|
|
|
|
VulkanPushBuffer(VulkanContext *vulkan, size_t size);
|
|
|
|
|
|
|
|
~VulkanPushBuffer() {
|
|
|
|
assert(buffer_ == VK_NULL_HANDLE);
|
|
|
|
assert(deviceMemory_ == VK_NULL_HANDLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Destroy(VulkanContext *vulkan) {
|
|
|
|
vulkan->Delete().QueueDeleteBuffer(buffer_);
|
|
|
|
vulkan->Delete().QueueDeleteDeviceMemory(deviceMemory_);
|
|
|
|
buffer_ = VK_NULL_HANDLE;
|
|
|
|
deviceMemory_ = VK_NULL_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Reset() { offset_ = 0; }
|
|
|
|
|
|
|
|
void Begin(VkDevice device) {
|
|
|
|
offset_ = 0;
|
|
|
|
VkResult res = vkMapMemory(device, deviceMemory_, 0, size_, 0, (void **)(&writePtr_));
|
|
|
|
assert(VK_SUCCESS == res);
|
|
|
|
}
|
|
|
|
|
|
|
|
void End(VkDevice device) {
|
2016-03-20 14:50:58 +01:00
|
|
|
/*
|
|
|
|
VkMappedMemoryRange range = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
|
|
|
|
range.offset = 0;
|
|
|
|
range.size = offset_;
|
|
|
|
range.memory = deviceMemory_;
|
|
|
|
vkFlushMappedMemoryRanges(device, 1, &range);
|
|
|
|
*/
|
2016-03-20 09:35:10 +01:00
|
|
|
vkUnmapMemory(device, deviceMemory_);
|
|
|
|
writePtr_ = nullptr;
|
|
|
|
}
|
|
|
|
|
2016-03-20 09:52:13 +01:00
|
|
|
// When using the returned memory, make sure to bind the returned vkbuf.
|
|
|
|
// This will later allow for handling overflow correctly.
|
|
|
|
size_t Allocate(size_t numBytes, VkBuffer *vkbuf) {
|
2016-03-20 09:35:10 +01:00
|
|
|
size_t out = offset_;
|
|
|
|
offset_ += (numBytes + 3) & ~3; // Round up to 4 bytes.
|
2016-03-20 16:33:34 +01:00
|
|
|
|
2016-03-20 09:35:10 +01:00
|
|
|
if (offset_ >= size_) {
|
|
|
|
// TODO: Allocate a second buffer, then combine them on the next frame.
|
|
|
|
#ifdef _WIN32
|
|
|
|
DebugBreak();
|
|
|
|
#endif
|
|
|
|
}
|
2016-03-20 09:52:13 +01:00
|
|
|
*vkbuf = buffer_;
|
2016-03-20 09:35:10 +01:00
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Add alignment support?
|
|
|
|
// Returns the offset that should be used when binding this buffer to get this data.
|
2016-03-20 09:52:13 +01:00
|
|
|
size_t Push(const void *data, size_t size, VkBuffer *vkbuf) {
|
|
|
|
size_t off = Allocate(size, vkbuf);
|
2016-03-20 09:35:10 +01:00
|
|
|
memcpy(writePtr_ + off, data, size);
|
|
|
|
return off;
|
|
|
|
}
|
|
|
|
|
2016-03-20 09:52:13 +01:00
|
|
|
uint32_t PushAligned(const void *data, size_t size, int align, VkBuffer *vkbuf) {
|
2016-03-20 09:35:10 +01:00
|
|
|
offset_ = (offset_ + align - 1) & ~(align - 1);
|
2016-03-20 09:52:13 +01:00
|
|
|
size_t off = Allocate(size, vkbuf);
|
2016-03-20 09:35:10 +01:00
|
|
|
memcpy(writePtr_ + off, data, size);
|
|
|
|
return (uint32_t)off;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t GetOffset() const {
|
|
|
|
return offset_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// "Zero-copy" variant - you can write the data directly as you compute it.
|
2016-03-20 16:06:11 +01:00
|
|
|
void *Push(size_t size, uint32_t *bindOffset, VkBuffer *vkbuf) {
|
2016-03-20 09:52:13 +01:00
|
|
|
size_t off = Allocate(size, vkbuf);
|
2016-03-20 16:06:11 +01:00
|
|
|
*bindOffset = (uint32_t)off;
|
2016-03-20 09:35:10 +01:00
|
|
|
return writePtr_ + off;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
VkDeviceMemory deviceMemory_;
|
|
|
|
VkBuffer buffer_;
|
|
|
|
size_t offset_;
|
|
|
|
size_t size_;
|
|
|
|
uint8_t *writePtr_;
|
|
|
|
};
|