2015-10-10 16:41:19 +02:00
// Copyright (c) 2012- PPSSPP Project.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, version 2.0 or later versions.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License 2.0 for more details.
// A copy of the GPL 2.0 should have been included with the program.
// If not, see http://www.gnu.org/licenses/
// Official git repository and contact information can be found at
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
2016-01-03 18:31:03 +01:00
# include <cassert>
2015-10-10 16:41:19 +02:00
# include "base/logging.h"
# include "base/timeutil.h"
2016-01-09 23:27:53 +01:00
# include "math/dataconv.h"
2017-08-18 13:39:42 +02:00
# include "profiler/profiler.h"
2017-08-22 16:28:35 +02:00
# include "thin3d/VulkanRenderManager.h"
2015-10-10 16:41:19 +02:00
# include "Common/MemoryUtil.h"
# include "Core/MemMap.h"
# include "Core/Host.h"
# include "Core/System.h"
# include "Core/Reporting.h"
# include "Core/Config.h"
# include "Core/CoreTiming.h"
# include "GPU/Math3D.h"
# include "GPU/GPUState.h"
# include "GPU/ge_constants.h"
2016-03-20 09:35:10 +01:00
# include "Common/Vulkan/VulkanContext.h"
# include "Common/Vulkan/VulkanMemory.h"
2015-10-10 16:41:19 +02:00
# include "GPU/Common/TextureDecoder.h"
# include "GPU/Common/SplineCommon.h"
# include "GPU/Common/TransformCommon.h"
# include "GPU/Common/VertexDecoderCommon.h"
# include "GPU/Common/SoftwareTransformCommon.h"
# include "GPU/Common/DrawEngineCommon.h"
# include "GPU/Vulkan/DrawEngineVulkan.h"
# include "GPU/Vulkan/TextureCacheVulkan.h"
# include "GPU/Vulkan/ShaderManagerVulkan.h"
# include "GPU/Vulkan/PipelineManagerVulkan.h"
2016-01-10 14:24:10 +01:00
# include "GPU/Vulkan/FramebufferVulkan.h"
2015-10-10 16:41:19 +02:00
# include "GPU/Vulkan/GPU_Vulkan.h"
2017-08-17 11:22:23 +02:00
enum {
2017-08-18 13:39:42 +02:00
VERTEX_CACHE_SIZE = 8192 * 1024
2017-08-17 11:22:23 +02:00
} ;
# define VERTEXCACHE_DECIMATION_INTERVAL 17
2017-11-01 01:25:24 +01:00
# define DESCRIPTORSET_DECIMATION_INTERVAL 1 // Temporarily cut to 1. Handle reuse breaks this when textures get deleted.
2017-08-17 11:22:23 +02:00
enum { VAI_KILL_AGE = 120 , VAI_UNRELIABLE_KILL_AGE = 240 , VAI_UNRELIABLE_KILL_MAX = 4 } ;
2016-01-09 11:07:14 +01:00
enum {
DRAW_BINDING_TEXTURE = 0 ,
DRAW_BINDING_2ND_TEXTURE = 1 ,
DRAW_BINDING_DYNUBO_BASE = 2 ,
DRAW_BINDING_DYNUBO_LIGHT = 3 ,
DRAW_BINDING_DYNUBO_BONE = 4 ,
2017-11-12 12:07:33 +01:00
DRAW_BINDING_TESS_STORAGE_BUF = 5 ,
2016-01-09 11:07:14 +01:00
} ;
2015-10-10 16:41:19 +02:00
enum {
TRANSFORMED_VERTEX_BUFFER_SIZE = VERTEX_BUFFER_MAX * sizeof ( TransformedVertex )
} ;
2017-05-19 17:21:08 +02:00
DrawEngineVulkan : : DrawEngineVulkan ( VulkanContext * vulkan , Draw : : DrawContext * draw )
2017-02-17 12:21:18 +01:00
: vulkan_ ( vulkan ) ,
2017-05-19 17:21:08 +02:00
draw_ ( draw ) ,
2017-08-20 11:30:19 +02:00
stats_ { } ,
vai_ ( 1024 ) {
2016-01-10 13:47:56 +01:00
decOptions_ . expandAllWeightsToFloat = false ;
decOptions_ . expand8BitNormalsToFloat = false ;
2015-10-10 16:41:19 +02:00
2017-06-02 11:47:14 +02:00
// Allocate nicely aligned memory. Maybe graphics drivers will appreciate it.
2015-10-10 16:41:19 +02:00
// All this is a LOT of memory, need to see if we can cut down somehow.
2016-08-28 12:28:17 +02:00
decoded = ( u8 * ) AllocateMemoryPages ( DECODED_VERTEX_BUFFER_SIZE , MEM_PROT_READ | MEM_PROT_WRITE ) ;
decIndex = ( u16 * ) AllocateMemoryPages ( DECODED_INDEX_BUFFER_SIZE , MEM_PROT_READ | MEM_PROT_WRITE ) ;
splineBuffer = ( u8 * ) AllocateMemoryPages ( SPLINE_BUFFER_SIZE , MEM_PROT_READ | MEM_PROT_WRITE ) ;
2015-10-10 16:41:19 +02:00
indexGen . Setup ( decIndex ) ;
2016-01-03 18:31:03 +01:00
2016-10-09 10:53:01 -07:00
InitDeviceObjects ( ) ;
}
void DrawEngineVulkan : : InitDeviceObjects ( ) {
2016-01-06 12:52:42 +01:00
// All resources we need for PSP drawing. Usually only bindings 0 and 2-4 are populated.
2017-11-12 12:07:33 +01:00
VkDescriptorSetLayoutBinding bindings [ 6 ] ;
2016-01-03 18:31:03 +01:00
bindings [ 0 ] . descriptorCount = 1 ;
bindings [ 0 ] . pImmutableSamplers = nullptr ;
bindings [ 0 ] . descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ;
bindings [ 0 ] . stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT ;
2016-01-09 11:07:14 +01:00
bindings [ 0 ] . binding = DRAW_BINDING_TEXTURE ;
2016-01-03 18:31:03 +01:00
bindings [ 1 ] . descriptorCount = 1 ;
bindings [ 1 ] . pImmutableSamplers = nullptr ;
bindings [ 1 ] . descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ;
bindings [ 1 ] . stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT ;
2016-01-09 11:07:14 +01:00
bindings [ 1 ] . binding = DRAW_BINDING_2ND_TEXTURE ;
2016-01-03 18:31:03 +01:00
bindings [ 2 ] . descriptorCount = 1 ;
bindings [ 2 ] . pImmutableSamplers = nullptr ;
bindings [ 2 ] . descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ;
bindings [ 2 ] . stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT ;
2016-01-09 11:07:14 +01:00
bindings [ 2 ] . binding = DRAW_BINDING_DYNUBO_BASE ;
2016-01-03 18:31:03 +01:00
bindings [ 3 ] . descriptorCount = 1 ;
bindings [ 3 ] . pImmutableSamplers = nullptr ;
bindings [ 3 ] . descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ;
bindings [ 3 ] . stageFlags = VK_SHADER_STAGE_VERTEX_BIT ;
2016-01-09 11:07:14 +01:00
bindings [ 3 ] . binding = DRAW_BINDING_DYNUBO_LIGHT ;
2016-01-03 18:31:03 +01:00
bindings [ 4 ] . descriptorCount = 1 ;
bindings [ 4 ] . pImmutableSamplers = nullptr ;
bindings [ 4 ] . descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ;
bindings [ 4 ] . stageFlags = VK_SHADER_STAGE_VERTEX_BIT ;
2016-01-09 11:07:14 +01:00
bindings [ 4 ] . binding = DRAW_BINDING_DYNUBO_BONE ;
2017-11-12 12:07:33 +01:00
// Used only for hardware tessellation.
2017-02-25 18:32:39 +09:00
bindings [ 5 ] . descriptorCount = 1 ;
bindings [ 5 ] . pImmutableSamplers = nullptr ;
2017-11-12 12:07:33 +01:00
bindings [ 5 ] . descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ;
2017-02-25 18:32:39 +09:00
bindings [ 5 ] . stageFlags = VK_SHADER_STAGE_VERTEX_BIT ;
2017-11-12 12:07:33 +01:00
bindings [ 5 ] . binding = DRAW_BINDING_TESS_STORAGE_BUF ;
2016-01-03 18:31:03 +01:00
VkDevice device = vulkan_ - > GetDevice ( ) ;
2016-04-02 23:57:23 +02:00
VkDescriptorSetLayoutCreateInfo dsl = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO } ;
2017-11-12 12:07:33 +01:00
dsl . bindingCount = ARRAY_SIZE ( bindings ) ;
2016-01-03 18:31:03 +01:00
dsl . pBindings = bindings ;
VkResult res = vkCreateDescriptorSetLayout ( device , & dsl , nullptr , & descriptorSetLayout_ ) ;
2016-01-10 13:47:56 +01:00
assert ( VK_SUCCESS = = res ) ;
2016-01-03 18:31:03 +01:00
2017-11-12 12:07:33 +01:00
VkDescriptorPoolSize dpTypes [ 3 ] ;
2017-10-26 00:55:09 +02:00
dpTypes [ 0 ] . descriptorCount = 8192 ;
2016-01-03 18:31:03 +01:00
dpTypes [ 0 ] . type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ;
2017-10-26 00:55:09 +02:00
dpTypes [ 1 ] . descriptorCount = 8192 + 4096 ; // Due to the tess stuff, we need a LOT of these. Most will be empty...
2016-01-03 18:31:03 +01:00
dpTypes [ 1 ] . type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ;
2017-11-12 12:07:33 +01:00
dpTypes [ 2 ] . descriptorCount = 2048 ;
dpTypes [ 2 ] . type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ;
2016-01-03 18:31:03 +01:00
2016-04-02 23:57:23 +02:00
VkDescriptorPoolCreateInfo dp = { VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO } ;
2016-01-03 18:31:03 +01:00
dp . pNext = nullptr ;
2017-08-20 21:35:03 +02:00
dp . flags = 0 ; // Don't want to mess around with individually freeing these.
// We zap the whole pool every few frames.
2017-05-07 11:14:25 +02:00
dp . maxSets = 2048 ;
2016-01-03 18:31:03 +01:00
dp . pPoolSizes = dpTypes ;
dp . poolSizeCount = ARRAY_SIZE ( dpTypes ) ;
// We are going to use one-shot descriptors in the initial implementation. Might look into caching them
// if creating and updating them turns out to be expensive.
2017-08-17 17:55:21 +02:00
for ( int i = 0 ; i < VulkanContext : : MAX_INFLIGHT_FRAMES ; i + + ) {
2016-05-06 19:51:03 -07:00
// If we run out of memory, try with less descriptors.
for ( int tries = 0 ; tries < 3 ; + + tries ) {
VkResult res = vkCreateDescriptorPool ( vulkan_ - > GetDevice ( ) , & dp , nullptr , & frame_ [ i ] . descPool ) ;
if ( res = = VK_SUCCESS ) {
break ;
}
// Let's try to reduce the counts.
assert ( res = = VK_ERROR_OUT_OF_HOST_MEMORY | | res = = VK_ERROR_OUT_OF_DEVICE_MEMORY ) ;
dpTypes [ 0 ] . descriptorCount / = 2 ;
dpTypes [ 1 ] . descriptorCount / = 2 ;
}
2016-03-20 15:23:03 -07:00
frame_ [ i ] . pushUBO = new VulkanPushBuffer ( vulkan_ , 8 * 1024 * 1024 ) ;
frame_ [ i ] . pushVertex = new VulkanPushBuffer ( vulkan_ , 2 * 1024 * 1024 ) ;
frame_ [ i ] . pushIndex = new VulkanPushBuffer ( vulkan_ , 1 * 1024 * 1024 ) ;
2016-01-03 18:31:03 +01:00
}
2016-04-02 23:57:23 +02:00
VkPipelineLayoutCreateInfo pl = { VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO } ;
2016-01-03 18:31:03 +01:00
pl . pPushConstantRanges = nullptr ;
pl . pushConstantRangeCount = 0 ;
pl . setLayoutCount = 1 ;
pl . pSetLayouts = & descriptorSetLayout_ ;
2016-01-25 00:58:47 +01:00
pl . flags = 0 ;
2016-01-03 18:31:03 +01:00
res = vkCreatePipelineLayout ( device , & pl , nullptr , & pipelineLayout_ ) ;
2016-01-09 01:23:32 +01:00
assert ( VK_SUCCESS = = res ) ;
2016-01-03 18:31:03 +01:00
2016-04-02 23:57:23 +02:00
VkSamplerCreateInfo samp = { VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO } ;
2016-01-09 01:23:32 +01:00
samp . addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE ;
samp . addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE ;
samp . addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE ;
samp . mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST ;
samp . flags = 0 ;
2016-03-20 22:46:49 +01:00
samp . magFilter = VK_FILTER_NEAREST ;
samp . minFilter = VK_FILTER_NEAREST ;
2017-10-31 12:02:10 +01:00
res = vkCreateSampler ( device , & samp , nullptr , & samplerSecondary_ ) ;
2016-03-20 22:46:49 +01:00
res = vkCreateSampler ( device , & samp , nullptr , & nullSampler_ ) ;
2016-01-03 18:31:03 +01:00
assert ( VK_SUCCESS = = res ) ;
2017-08-17 11:22:23 +02:00
vertexCache_ = new VulkanPushBuffer ( vulkan_ , VERTEX_CACHE_SIZE ) ;
2017-11-10 12:40:51 +01:00
tessDataTransfer = new TessellationDataTransferVulkan ( vulkan_ , draw_ ) ;
2016-01-03 18:31:03 +01:00
}
2015-10-10 16:41:19 +02:00
DrawEngineVulkan : : ~ DrawEngineVulkan ( ) {
FreeMemoryPages ( decoded , DECODED_VERTEX_BUFFER_SIZE ) ;
FreeMemoryPages ( decIndex , DECODED_INDEX_BUFFER_SIZE ) ;
FreeMemoryPages ( splineBuffer , SPLINE_BUFFER_SIZE ) ;
2016-01-06 12:52:42 +01:00
2016-10-09 10:53:01 -07:00
DestroyDeviceObjects ( ) ;
}
void DrawEngineVulkan : : FrameData : : Destroy ( VulkanContext * vulkan ) {
if ( descPool ! = VK_NULL_HANDLE ) {
vulkan - > Delete ( ) . QueueDeleteDescriptorPool ( descPool ) ;
}
if ( pushUBO ) {
pushUBO - > Destroy ( vulkan ) ;
delete pushUBO ;
pushUBO = nullptr ;
}
if ( pushVertex ) {
pushVertex - > Destroy ( vulkan ) ;
delete pushVertex ;
pushVertex = nullptr ;
}
if ( pushIndex ) {
pushIndex - > Destroy ( vulkan ) ;
delete pushIndex ;
pushIndex = nullptr ;
}
}
void DrawEngineVulkan : : DestroyDeviceObjects ( ) {
2017-11-10 12:40:51 +01:00
delete tessDataTransfer ;
tessDataTransfer = nullptr ;
2017-10-26 11:33:52 +02:00
for ( int i = 0 ; i < VulkanContext : : MAX_INFLIGHT_FRAMES ; i + + ) {
2016-10-09 10:53:01 -07:00
frame_ [ i ] . Destroy ( vulkan_ ) ;
2016-01-06 12:52:42 +01:00
}
2017-10-31 12:02:10 +01:00
if ( samplerSecondary_ ! = VK_NULL_HANDLE )
vulkan_ - > Delete ( ) . QueueDeleteSampler ( samplerSecondary_ ) ;
2016-10-09 10:53:01 -07:00
if ( nullSampler_ ! = VK_NULL_HANDLE )
vulkan_ - > Delete ( ) . QueueDeleteSampler ( nullSampler_ ) ;
if ( pipelineLayout_ ! = VK_NULL_HANDLE )
2017-11-09 16:58:59 +01:00
vulkan_ - > Delete ( ) . QueueDeletePipelineLayout ( pipelineLayout_ ) ;
2016-10-09 10:53:01 -07:00
if ( descriptorSetLayout_ ! = VK_NULL_HANDLE )
2017-11-09 16:58:59 +01:00
vulkan_ - > Delete ( ) . QueueDeleteDescriptorSetLayout ( descriptorSetLayout_ ) ;
2016-03-20 22:46:49 +01:00
if ( nullTexture_ ) {
nullTexture_ - > Destroy ( ) ;
delete nullTexture_ ;
2016-10-09 10:53:01 -07:00
nullTexture_ = nullptr ;
2016-03-20 22:46:49 +01:00
}
2017-11-29 19:15:08 +01:00
if ( vertexCache_ ) {
vertexCache_ - > Destroy ( vulkan_ ) ;
delete vertexCache_ ;
vertexCache_ = nullptr ;
}
2017-11-24 13:53:23 +01:00
// Need to clear this to get rid of all remaining references to the dead buffers.
vai_ . Iterate ( [ ] ( uint32_t hash , VertexArrayInfoVulkan * vai ) {
delete vai ;
} ) ;
vai_ . Clear ( ) ;
2016-10-09 10:53:01 -07:00
}
void DrawEngineVulkan : : DeviceLost ( ) {
DestroyDeviceObjects ( ) ;
DirtyAllUBOs ( ) ;
}
2017-11-09 16:28:22 +01:00
void DrawEngineVulkan : : DeviceRestore ( VulkanContext * vulkan , Draw : : DrawContext * draw ) {
2016-10-09 10:53:01 -07:00
vulkan_ = vulkan ;
2017-11-09 16:28:22 +01:00
draw_ = draw ;
2016-10-09 10:53:01 -07:00
InitDeviceObjects ( ) ;
2015-10-10 16:41:19 +02:00
}
2016-01-09 23:27:53 +01:00
void DrawEngineVulkan : : BeginFrame ( ) {
2017-08-15 10:38:20 +02:00
lastPipeline_ = nullptr ;
2017-10-31 23:49:47 +01:00
int curFrame = vulkan_ - > GetCurFrame ( ) ;
FrameData * frame = & frame_ [ curFrame ] ;
2016-03-20 09:52:13 +01:00
// First reset all buffers, then begin. This is so that Reset can free memory and Begin can allocate it,
// if growing the buffer is needed. Doing it this way will reduce fragmentation if more than one buffer
// needs to grow in the same frame. The state where many buffers are reset can also be used to
// defragment memory.
frame - > pushUBO - > Reset ( ) ;
frame - > pushVertex - > Reset ( ) ;
frame - > pushIndex - > Reset ( ) ;
2016-03-20 15:36:22 -07:00
frame - > pushUBO - > Begin ( vulkan_ ) ;
frame - > pushVertex - > Begin ( vulkan_ ) ;
frame - > pushIndex - > Begin ( vulkan_ ) ;
2016-03-20 16:33:34 +01:00
2017-11-11 20:36:44 +01:00
// TODO: How can we make this nicer...
( ( TessellationDataTransferVulkan * ) tessDataTransfer ) - > SetPushBuffer ( frame - > pushUBO ) ;
2016-03-20 22:46:49 +01:00
// TODO : Find a better place to do this.
if ( ! nullTexture_ ) {
2017-11-01 01:25:24 +01:00
ILOG ( " INIT : Creating null texture " ) ;
2017-08-19 17:32:10 +02:00
VkCommandBuffer cmdInit = ( VkCommandBuffer ) draw_ - > GetNativeObject ( Draw : : NativeObject : : INIT_COMMANDBUFFER ) ;
2016-03-20 22:46:49 +01:00
nullTexture_ = new VulkanTexture ( vulkan_ ) ;
int w = 8 ;
int h = 8 ;
2017-08-19 17:32:10 +02:00
nullTexture_ - > CreateDirect ( cmdInit , w , h , 1 , VK_FORMAT_A8B8G8R8_UNORM_PACK32 , VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL ,
2016-03-28 18:18:49 +02:00
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT ) ;
2016-03-20 22:46:49 +01:00
uint32_t bindOffset ;
VkBuffer bindBuf ;
2016-10-09 10:56:48 -07:00
uint32_t * data = ( uint32_t * ) frame - > pushUBO - > Push ( w * h * 4 , & bindOffset , & bindBuf ) ;
2016-03-20 22:46:49 +01:00
for ( int y = 0 ; y < h ; y + + ) {
for ( int x = 0 ; x < w ; x + + ) {
2016-03-20 23:36:09 +01:00
// data[y*w + x] = ((x ^ y) & 1) ? 0xFF808080 : 0xFF000000; // gray/black checkerboard
data [ y * w + x ] = 0 ; // black
2016-03-20 22:46:49 +01:00
}
}
2017-08-19 17:32:10 +02:00
nullTexture_ - > UploadMip ( cmdInit , 0 , w , h , bindBuf , bindOffset , w ) ;
nullTexture_ - > EndCreate ( cmdInit ) ;
2016-03-20 22:46:49 +01:00
}
2016-03-20 16:33:34 +01:00
DirtyAllUBOs ( ) ;
2017-08-17 11:22:23 +02:00
// Wipe the vertex cache if it's grown too large.
if ( vertexCache_ - > GetTotalSize ( ) > VERTEX_CACHE_SIZE ) {
vertexCache_ - > Destroy ( vulkan_ ) ;
delete vertexCache_ ; // orphans the buffers, they'll get deleted once no longer used by an in-flight frame.
vertexCache_ = new VulkanPushBuffer ( vulkan_ , VERTEX_CACHE_SIZE ) ;
2017-08-20 11:30:19 +02:00
vai_ . Clear ( ) ;
2017-08-17 11:22:23 +02:00
}
vertexCache_ - > BeginNoReset ( ) ;
2017-08-20 21:35:03 +02:00
// TODO: Need a better way to keep the number of descriptors under control.
if ( - - descDecimationCounter_ < = 0 | | frame - > descSets . size ( ) > 1024 ) {
2017-08-18 13:39:42 +02:00
vkResetDescriptorPool ( vulkan_ - > GetDevice ( ) , frame - > descPool , 0 ) ;
2017-08-20 11:30:19 +02:00
frame - > descSets . Clear ( ) ;
2017-08-20 21:35:03 +02:00
descDecimationCounter_ = DESCRIPTORSET_DECIMATION_INTERVAL ;
}
if ( - - decimationCounter_ < = 0 ) {
2017-08-17 11:22:23 +02:00
decimationCounter_ = VERTEXCACHE_DECIMATION_INTERVAL ;
const int threshold = gpuStats . numFlips - VAI_KILL_AGE ;
const int unreliableThreshold = gpuStats . numFlips - VAI_UNRELIABLE_KILL_AGE ;
int unreliableLeft = VAI_UNRELIABLE_KILL_MAX ;
2017-08-20 11:30:19 +02:00
vai_ . Iterate ( [ & ] ( uint32_t hash , VertexArrayInfoVulkan * vai ) {
2017-08-17 11:22:23 +02:00
bool kill ;
2017-08-20 11:30:19 +02:00
if ( vai - > status = = VertexArrayInfoVulkan : : VAI_UNRELIABLE ) {
2017-08-17 11:22:23 +02:00
// We limit killing unreliable so we don't rehash too often.
2017-08-20 11:30:19 +02:00
kill = vai - > lastFrame < unreliableThreshold & & - - unreliableLeft > = 0 ;
2017-08-17 11:22:23 +02:00
} else {
2017-08-20 11:30:19 +02:00
kill = vai - > lastFrame < threshold ;
2017-08-17 11:22:23 +02:00
}
if ( kill ) {
2017-08-20 11:30:19 +02:00
// This is actually quite safe.
vai_ . Remove ( hash ) ;
delete vai ;
2017-08-17 11:22:23 +02:00
}
2017-08-20 11:30:19 +02:00
} ) ;
2017-08-17 11:22:23 +02:00
}
2016-01-09 23:27:53 +01:00
}
void DrawEngineVulkan : : EndFrame ( ) {
2017-10-31 23:49:47 +01:00
FrameData * frame = & frame_ [ vulkan_ - > GetCurFrame ( ) ] ;
2016-03-31 10:23:40 +02:00
stats_ . pushUBOSpaceUsed = ( int ) frame - > pushUBO - > GetOffset ( ) ;
stats_ . pushVertexSpaceUsed = ( int ) frame - > pushVertex - > GetOffset ( ) ;
stats_ . pushIndexSpaceUsed = ( int ) frame - > pushIndex - > GetOffset ( ) ;
2016-03-20 15:36:22 -07:00
frame - > pushUBO - > End ( ) ;
frame - > pushVertex - > End ( ) ;
frame - > pushIndex - > End ( ) ;
2017-08-17 11:22:23 +02:00
vertexCache_ - > End ( ) ;
2016-01-09 23:27:53 +01:00
}
2015-10-10 16:41:19 +02:00
void DrawEngineVulkan : : SubmitPrim ( void * verts , void * inds , GEPrimitiveType prim , int vertexCount , u32 vertType , int * bytesRead ) {
2017-11-14 09:13:13 +01:00
if ( ! indexGen . PrimCompatible ( prevPrim_ , prim ) | | numDrawCalls > = MAX_DEFERRED_DRAW_CALLS | | vertexCountInDrawCalls_ + vertexCount > VERTEX_BUFFER_MAX ) {
2017-05-19 17:21:08 +02:00
Flush ( ) ;
2017-11-14 09:13:13 +01:00
}
2015-10-10 16:41:19 +02:00
// TODO: Is this the right thing to do?
if ( prim = = GE_PRIM_KEEP_PREVIOUS ) {
prim = prevPrim_ ! = GE_PRIM_INVALID ? prevPrim_ : GE_PRIM_POINTS ;
} else {
prevPrim_ = prim ;
}
2017-11-13 16:50:45 +01:00
SetupVertexDecoder ( vertType ) ;
2015-10-10 16:41:19 +02:00
* bytesRead = vertexCount * dec_ - > VertexSize ( ) ;
if ( ( vertexCount < 2 & & prim > 0 ) | | ( vertexCount < 3 & & prim > 2 & & prim ! = GE_PRIM_RECTANGLES ) )
return ;
DeferredDrawCall & dc = drawCalls [ numDrawCalls ] ;
dc . verts = verts ;
dc . inds = inds ;
dc . vertType = vertType ;
dc . indexType = ( vertType & GE_VTYPE_IDX_MASK ) > > GE_VTYPE_IDX_SHIFT ;
dc . prim = prim ;
dc . vertexCount = vertexCount ;
2017-06-02 11:47:14 +02:00
if ( g_Config . bVertexCache ) {
u32 dhash = dcid_ ;
dhash ^ = ( u32 ) ( uintptr_t ) verts ;
dhash = __rotl ( dhash , 13 ) ;
dhash ^ = ( u32 ) ( uintptr_t ) inds ;
dhash = __rotl ( dhash , 13 ) ;
dhash ^ = ( u32 ) vertType ;
dhash = __rotl ( dhash , 13 ) ;
dhash ^ = ( u32 ) vertexCount ;
dhash = __rotl ( dhash , 13 ) ;
dhash ^ = ( u32 ) prim ;
dcid_ = dhash ;
}
2015-10-10 16:41:19 +02:00
if ( inds ) {
GetIndexBounds ( inds , vertexCount , vertType , & dc . indexLowerBound , & dc . indexUpperBound ) ;
} else {
dc . indexLowerBound = 0 ;
dc . indexUpperBound = vertexCount - 1 ;
}
2016-12-20 13:27:44 +01:00
uvScale [ numDrawCalls ] = gstate_c . uv ;
2016-03-21 18:50:11 +01:00
2015-10-10 16:41:19 +02:00
numDrawCalls + + ;
2017-06-02 11:47:14 +02:00
vertexCountInDrawCalls_ + = vertexCount ;
if ( g_Config . bSoftwareSkinning & & ( vertType & GE_VTYPE_WEIGHT_MASK ) ) {
DecodeVertsStep ( decoded , decodeCounter_ , decodedVerts_ ) ;
decodeCounter_ + + ;
}
2015-10-10 16:41:19 +02:00
if ( prim = = GE_PRIM_RECTANGLES & & ( gstate . getTextureAddress ( 0 ) & 0x3FFFFFFF ) = = ( gstate . getFrameBufAddress ( ) & 0x3FFFFFFF ) ) {
// Rendertarget == texture?
if ( ! g_Config . bDisableSlowFramebufEffects ) {
2017-01-24 09:41:38 +01:00
gstate_c . Dirty ( DIRTY_TEXTURE_PARAMS ) ;
2017-05-19 17:21:08 +02:00
Flush ( ) ;
2015-10-10 16:41:19 +02:00
}
}
}
2017-11-19 12:33:20 +01:00
void DrawEngineVulkan : : DecodeVertsToPushBuffer ( VulkanPushBuffer * push , uint32_t * bindOffset , VkBuffer * vkbuf ) {
2016-03-20 16:06:11 +01:00
u8 * dest = decoded ;
// Figure out how much pushbuffer space we need to allocate.
if ( push ) {
2017-08-17 11:22:23 +02:00
int vertsToDecode = ComputeNumVertsToDecode ( ) ;
2016-03-20 16:06:11 +01:00
dest = ( u8 * ) push - > Push ( vertsToDecode * dec_ - > GetDecVtxFmt ( ) . stride , bindOffset , vkbuf ) ;
2015-10-10 16:41:19 +02:00
}
2017-11-19 12:33:20 +01:00
DecodeVerts ( dest ) ;
}
2015-10-10 16:41:19 +02:00
2017-11-12 10:17:49 +01:00
void DrawEngineVulkan : : SetLineWidth ( float lineWidth ) {
pipelineManager_ - > SetLineWidth ( lineWidth ) ;
}
2017-11-13 10:29:01 +01:00
VkDescriptorSet DrawEngineVulkan : : GetOrCreateDescriptorSet ( VkImageView imageView , VkSampler sampler , VkBuffer base , VkBuffer light , VkBuffer bone , bool tess ) {
2016-01-06 12:52:42 +01:00
DescriptorSetKey key ;
2016-01-09 22:14:19 +01:00
key . imageView_ = imageView ;
2016-01-09 01:23:32 +01:00
key . sampler_ = sampler ;
2017-10-31 12:02:10 +01:00
key . secondaryImageView_ = boundSecondary_ ;
2016-03-20 09:52:13 +01:00
key . base_ = base ;
key . light_ = light ;
key . bone_ = bone ;
2017-11-12 10:17:49 +01:00
_dbg_assert_ ( G3D , base ! = VK_NULL_HANDLE ) ;
_dbg_assert_ ( G3D , light ! = VK_NULL_HANDLE ) ;
_dbg_assert_ ( G3D , bone ! = VK_NULL_HANDLE ) ;
2017-10-31 23:49:47 +01:00
FrameData * frame = & frame_ [ vulkan_ - > GetCurFrame ( ) ] ;
2017-11-12 10:17:49 +01:00
// See if we already have this descriptor set cached.
if ( ! tess ) { // Don't cache descriptors for HW tessellation.
2017-08-20 11:30:19 +02:00
VkDescriptorSet d = frame - > descSets . Get ( key ) ;
if ( d ! = VK_NULL_HANDLE )
return d ;
2016-01-09 01:23:32 +01:00
}
2016-01-03 18:31:03 +01:00
2016-01-09 22:14:19 +01:00
// Didn't find one in the frame descriptor set cache, let's make a new one.
// We wipe the cache on every frame.
2016-01-09 01:23:32 +01:00
VkDescriptorSet desc ;
2016-04-02 23:57:23 +02:00
VkDescriptorSetAllocateInfo descAlloc = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO } ;
2016-01-09 11:07:14 +01:00
descAlloc . pNext = nullptr ;
descAlloc . pSetLayouts = & descriptorSetLayout_ ;
descAlloc . descriptorPool = frame - > descPool ;
descAlloc . descriptorSetCount = 1 ;
2016-01-09 22:14:19 +01:00
VkResult result = vkAllocateDescriptorSets ( vulkan_ - > GetDevice ( ) , & descAlloc , & desc ) ;
2017-10-26 00:55:09 +02:00
// Even in release mode, this is bad.
2017-11-22 10:46:23 +01:00
_assert_msg_ ( G3D , result = = VK_SUCCESS , " Ran out of descriptor space in pool. sz=%d res=%d " , ( int ) frame - > descSets . size ( ) , ( int ) result ) ;
2016-01-09 11:07:14 +01:00
2016-01-09 01:23:32 +01:00
// We just don't write to the slots we don't care about.
2017-11-11 21:50:24 +01:00
// We need 8 now that we support secondary texture bindings.
VkWriteDescriptorSet writes [ 8 ] { } ;
2016-01-09 01:23:32 +01:00
// Main texture
2016-01-09 11:07:14 +01:00
int n = 0 ;
2017-11-15 20:43:29 +01:00
VkDescriptorImageInfo tex [ 2 ] { } ;
2016-01-09 21:19:18 +01:00
if ( imageView ) {
// TODO: Also support LAYOUT_GENERAL to be able to texture from framebuffers without transitioning them?
2017-11-15 20:43:29 +01:00
tex [ 0 ] . imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL ;
tex [ 0 ] . imageView = imageView ;
tex [ 0 ] . sampler = sampler ;
2016-01-09 11:07:14 +01:00
writes [ n ] . sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET ;
writes [ n ] . pNext = nullptr ;
writes [ n ] . dstBinding = DRAW_BINDING_TEXTURE ;
2017-11-15 20:43:29 +01:00
writes [ n ] . pImageInfo = & tex [ 0 ] ;
2016-01-09 11:07:14 +01:00
writes [ n ] . descriptorCount = 1 ;
writes [ n ] . descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ;
writes [ n ] . dstSet = desc ;
n + + ;
}
2016-01-09 01:23:32 +01:00
2017-10-31 12:02:10 +01:00
if ( boundSecondary_ ) {
// TODO: Also support LAYOUT_GENERAL to be able to texture from framebuffers without transitioning them?
2017-11-15 20:43:29 +01:00
tex [ 1 ] . imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL ;
tex [ 1 ] . imageView = boundSecondary_ ;
tex [ 1 ] . sampler = samplerSecondary_ ;
2017-10-31 12:02:10 +01:00
writes [ n ] . sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET ;
writes [ n ] . pNext = nullptr ;
writes [ n ] . dstBinding = DRAW_BINDING_2ND_TEXTURE ;
2017-11-15 20:43:29 +01:00
writes [ n ] . pImageInfo = & tex [ 1 ] ;
2017-10-31 12:02:10 +01:00
writes [ n ] . descriptorCount = 1 ;
writes [ n ] . descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ;
writes [ n ] . dstSet = desc ;
n + + ;
}
2016-01-09 11:07:14 +01:00
// Skipping 2nd texture for now.
2017-02-25 18:32:39 +09:00
2017-11-12 12:07:33 +01:00
// Tessellation data buffer. Make sure this is declared outside the if to avoid optimizer
// shenanigans.
VkDescriptorBufferInfo tess_buf { } ;
2017-11-12 10:17:49 +01:00
if ( tess ) {
2017-11-12 12:07:33 +01:00
VkBuffer buf ;
VkDeviceSize offset ;
VkDeviceSize range ;
( ( TessellationDataTransferVulkan * ) tessDataTransfer ) - > GetBufferAndOffset ( & buf , & offset , & range ) ;
assert ( buf ) ;
tess_buf . buffer = buf ;
tess_buf . offset = offset ;
tess_buf . range = range ;
tessOffset_ = offset ;
writes [ n ] . sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET ;
writes [ n ] . pNext = nullptr ;
writes [ n ] . dstBinding = DRAW_BINDING_TESS_STORAGE_BUF ;
writes [ n ] . pBufferInfo = & tess_buf ;
writes [ n ] . descriptorCount = 1 ;
writes [ n ] . descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ;
writes [ n ] . dstSet = desc ;
n + + ;
2017-02-25 18:32:39 +09:00
}
2016-01-09 11:07:14 +01:00
// Uniform buffer objects
2017-08-15 12:02:47 +02:00
VkDescriptorBufferInfo buf [ 3 ] { } ;
2016-03-20 16:33:34 +01:00
int count = 0 ;
buf [ count ] . buffer = base ;
buf [ count ] . offset = 0 ;
buf [ count ] . range = sizeof ( UB_VS_FS_Base ) ;
count + + ;
buf [ count ] . buffer = light ;
buf [ count ] . offset = 0 ;
buf [ count ] . range = sizeof ( UB_VS_Lights ) ;
count + + ;
buf [ count ] . buffer = bone ;
buf [ count ] . offset = 0 ;
buf [ count ] . range = sizeof ( UB_VS_Bones ) ;
count + + ;
for ( int i = 0 ; i < count ; i + + ) {
2016-01-25 00:58:47 +01:00
writes [ n ] . sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET ;
writes [ n ] . pNext = nullptr ;
writes [ n ] . dstBinding = DRAW_BINDING_DYNUBO_BASE + i ;
2016-03-20 16:33:34 +01:00
writes [ n ] . dstArrayElement = 0 ;
2016-01-25 00:58:47 +01:00
writes [ n ] . pBufferInfo = & buf [ i ] ;
writes [ n ] . dstSet = desc ;
writes [ n ] . descriptorCount = 1 ;
writes [ n ] . descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ;
n + + ;
}
2016-01-09 11:07:14 +01:00
vkUpdateDescriptorSets ( vulkan_ - > GetDevice ( ) , n , writes , 0 , nullptr ) ;
2016-01-09 01:23:32 +01:00
2017-11-13 10:29:01 +01:00
if ( ! tess ) // Again, avoid caching when HW tessellation.
2017-08-20 11:30:19 +02:00
frame - > descSets . Insert ( key , desc ) ;
2016-01-09 11:07:14 +01:00
return desc ;
2016-01-03 18:31:03 +01:00
}
2016-03-20 16:33:34 +01:00
void DrawEngineVulkan : : DirtyAllUBOs ( ) {
baseUBOOffset = 0 ;
lightUBOOffset = 0 ;
boneUBOOffset = 0 ;
baseBuf = VK_NULL_HANDLE ;
lightBuf = VK_NULL_HANDLE ;
boneBuf = VK_NULL_HANDLE ;
dirtyUniforms_ = DIRTY_BASE_UNIFORMS | DIRTY_LIGHT_UNIFORMS | DIRTY_BONE_UNIFORMS ;
2016-03-20 22:46:49 +01:00
imageView = VK_NULL_HANDLE ;
sampler = VK_NULL_HANDLE ;
2017-01-24 09:41:38 +01:00
gstate_c . Dirty ( DIRTY_TEXTURE_IMAGE ) ;
2016-03-20 16:33:34 +01:00
}
2017-08-17 11:22:23 +02:00
void MarkUnreliable ( VertexArrayInfoVulkan * vai ) {
vai - > status = VertexArrayInfoVulkan : : VAI_UNRELIABLE ;
// TODO: If we change to a real allocator, free the data here.
// For now we just leave it in the pushbuffer.
}
2017-08-18 13:39:42 +02:00
// The inline wrapper in the header checks for numDrawCalls == 0
2017-05-19 17:21:08 +02:00
void DrawEngineVulkan : : DoFlush ( ) {
2017-08-18 13:39:42 +02:00
PROFILE_THIS_SCOPE ( " Flush " ) ;
2017-05-24 00:45:15 +02:00
gpuStats . numFlushes + + ;
2017-08-17 11:22:23 +02:00
// TODO: Should be enough to update this once per frame?
gpuStats . numTrackedVertexArrays = ( int ) vai_ . size ( ) ;
2017-05-24 00:45:15 +02:00
2017-08-22 13:25:45 +02:00
VulkanRenderManager * renderManager = ( VulkanRenderManager * ) draw_ - > GetNativeObject ( Draw : : NativeObject : : RENDER_MANAGER ) ;
2017-08-22 16:28:35 +02:00
// HACK: These two lines should only execute if we started on a new render pass. Can't tell from in here though...
2017-08-22 13:25:45 +02:00
lastPipeline_ = nullptr ;
// Since we have a new cmdbuf, dirty our dynamic state so it gets re-set.
// gstate_c.Dirty(DIRTY_VIEWPORTSCISSOR_STATE|DIRTY_DEPTHSTENCIL_STATE|DIRTY_BLEND_STATE);
2017-05-22 14:48:20 +02:00
2017-10-31 23:49:47 +01:00
FrameData * frame = & frame_ [ vulkan_ - > GetCurFrame ( ) ] ;
2016-01-03 18:31:03 +01:00
2017-11-13 10:29:01 +01:00
bool tess = gstate_c . bezier | | gstate_c . spline ;
2016-05-01 16:25:09 -07:00
bool textureNeedsApply = false ;
2017-01-24 09:41:38 +01:00
if ( gstate_c . IsDirty ( DIRTY_TEXTURE_IMAGE | DIRTY_TEXTURE_PARAMS ) & & ! gstate . isModeClear ( ) & & gstate . isTextureMapEnabled ( ) ) {
2016-05-01 17:27:14 -07:00
textureCache_ - > SetTexture ( ) ;
2017-01-24 09:41:38 +01:00
gstate_c . Clean ( DIRTY_TEXTURE_IMAGE | DIRTY_TEXTURE_PARAMS ) ;
2016-05-01 16:25:09 -07:00
textureNeedsApply = true ;
2016-01-09 21:19:18 +01:00
if ( gstate_c . needShaderTexClamp ) {
// We will rarely need to set this, so let's do it every time on use rather than in runloop.
// Most of the time non-framebuffer textures will be used which can be clamped themselves.
2017-01-24 09:41:38 +01:00
gstate_c . Dirty ( DIRTY_TEXCLAMP ) ;
2016-01-09 21:19:18 +01:00
}
}
2015-10-10 16:41:19 +02:00
GEPrimitiveType prim = prevPrim_ ;
2016-01-09 11:07:14 +01:00
bool useHWTransform = CanUseHardwareTransform ( prim ) ;
2015-10-10 16:41:19 +02:00
2016-01-10 13:08:54 +01:00
VulkanVertexShader * vshader = nullptr ;
VulkanFragmentShader * fshader = nullptr ;
2015-10-10 16:41:19 +02:00
2017-08-17 22:51:20 +02:00
uint32_t ibOffset ;
uint32_t vbOffset ;
2016-01-03 18:31:03 +01:00
2016-01-09 11:07:14 +01:00
if ( useHWTransform ) {
2016-01-24 17:30:26 +01:00
// We don't detect clears in this path, so here we can switch framebuffers if necessary.
2015-10-10 16:41:19 +02:00
int vertexCount = 0 ;
2017-08-17 11:22:23 +02:00
int maxIndex ;
2015-10-10 16:41:19 +02:00
bool useElements = true ;
2017-06-02 11:47:14 +02:00
// Cannot cache vertex data with morph enabled.
bool useCache = g_Config . bVertexCache & & ! ( lastVType_ & GE_VTYPE_MORPHCOUNT_MASK ) ;
// Also avoid caching when software skinning.
2017-08-17 11:22:23 +02:00
VkBuffer vbuf = VK_NULL_HANDLE ;
VkBuffer ibuf = VK_NULL_HANDLE ;
2017-06-02 11:47:14 +02:00
if ( g_Config . bSoftwareSkinning & & ( lastVType_ & GE_VTYPE_WEIGHT_MASK ) ) {
2017-08-17 11:22:23 +02:00
useCache = false ;
2017-06-02 11:47:14 +02:00
}
if ( useCache ) {
2017-08-18 13:39:42 +02:00
PROFILE_THIS_SCOPE ( " vcache " ) ;
2017-06-02 11:47:14 +02:00
u32 id = dcid_ ^ gstate . getUVGenMode ( ) ; // This can have an effect on which UV decoder we need to use! And hence what the decoded data will look like. See #9263
2017-08-20 11:30:19 +02:00
VertexArrayInfoVulkan * vai = vai_ . Get ( id ) ;
if ( ! vai ) {
2017-08-17 11:22:23 +02:00
vai = new VertexArrayInfoVulkan ( ) ;
2017-08-20 11:30:19 +02:00
vai_ . Insert ( id , vai ) ;
2017-08-17 11:22:23 +02:00
}
switch ( vai - > status ) {
case VertexArrayInfoVulkan : : VAI_NEW :
{
// Haven't seen this one before. We don't actually upload the vertex data yet.
ReliableHashType dataHash = ComputeHash ( ) ;
vai - > hash = dataHash ;
vai - > minihash = ComputeMiniHash ( ) ;
vai - > status = VertexArrayInfoVulkan : : VAI_HASHING ;
vai - > drawsUntilNextFullHash = 0 ;
2017-11-19 12:33:20 +01:00
DecodeVertsToPushBuffer ( frame - > pushVertex , & vbOffset , & vbuf ) ; // writes to indexGen
2017-08-17 11:22:23 +02:00
vai - > numVerts = indexGen . VertexCount ( ) ;
vai - > prim = indexGen . Prim ( ) ;
vai - > maxIndex = indexGen . MaxIndex ( ) ;
vai - > flags = gstate_c . vertexFullAlpha ? VAIVULKAN_FLAG_VERTEXFULLALPHA : 0 ;
goto rotateVBO ;
}
// Hashing - still gaining confidence about the buffer.
// But if we get this far it's likely to be worth uploading the data.
case VertexArrayInfoVulkan : : VAI_HASHING :
{
2017-08-18 13:39:42 +02:00
PROFILE_THIS_SCOPE ( " vcachehash " ) ;
2017-08-17 11:22:23 +02:00
vai - > numDraws + + ;
if ( vai - > lastFrame ! = gpuStats . numFlips ) {
vai - > numFrames + + ;
}
if ( vai - > drawsUntilNextFullHash = = 0 ) {
// Let's try to skip a full hash if mini would fail.
const u32 newMiniHash = ComputeMiniHash ( ) ;
ReliableHashType newHash = vai - > hash ;
if ( newMiniHash = = vai - > minihash ) {
newHash = ComputeHash ( ) ;
}
if ( newMiniHash ! = vai - > minihash | | newHash ! = vai - > hash ) {
MarkUnreliable ( vai ) ;
2017-11-19 12:33:20 +01:00
DecodeVertsToPushBuffer ( frame - > pushVertex , & vbOffset , & vbuf ) ;
2017-08-17 11:22:23 +02:00
goto rotateVBO ;
}
if ( vai - > numVerts > 64 ) {
// exponential backoff up to 16 draws, then every 24
vai - > drawsUntilNextFullHash = std : : min ( 24 , vai - > numFrames ) ;
} else {
// Lower numbers seem much more likely to change.
vai - > drawsUntilNextFullHash = 0 ;
}
// TODO: tweak
//if (vai->numFrames > 1000) {
// vai->status = VertexArrayInfo::VAI_RELIABLE;
//}
} else {
vai - > drawsUntilNextFullHash - - ;
u32 newMiniHash = ComputeMiniHash ( ) ;
if ( newMiniHash ! = vai - > minihash ) {
MarkUnreliable ( vai ) ;
2017-11-19 12:33:20 +01:00
DecodeVertsToPushBuffer ( frame - > pushVertex , & vbOffset , & vbuf ) ;
2017-08-17 11:22:23 +02:00
goto rotateVBO ;
}
}
if ( ! vai - > vb ) {
// Directly push to the vertex cache.
2017-11-19 12:33:20 +01:00
DecodeVertsToPushBuffer ( vertexCache_ , & vai - > vbOffset , & vai - > vb ) ;
2017-08-17 11:22:23 +02:00
_dbg_assert_msg_ ( G3D , gstate_c . vertBounds . minV > = gstate_c . vertBounds . maxV , " Should not have checked UVs when caching. " ) ;
vai - > numVerts = indexGen . VertexCount ( ) ;
vai - > prim = indexGen . Prim ( ) ;
vai - > maxIndex = indexGen . MaxIndex ( ) ;
vai - > flags = gstate_c . vertexFullAlpha ? VAIVULKAN_FLAG_VERTEXFULLALPHA : 0 ;
useElements = ! indexGen . SeenOnlyPurePrims ( ) ;
if ( ! useElements & & indexGen . PureCount ( ) ) {
vai - > numVerts = indexGen . PureCount ( ) ;
2017-08-17 12:00:10 +02:00
}
if ( useElements ) {
u32 size = sizeof ( uint16_t ) * indexGen . VertexCount ( ) ;
void * dest = vertexCache_ - > Push ( size , & vai - > ibOffset , & vai - > ib ) ;
memcpy ( dest , decIndex , size ) ;
} else {
vai - > ib = VK_NULL_HANDLE ;
vai - > ibOffset = 0 ;
}
2017-08-17 11:22:23 +02:00
} else {
gpuStats . numCachedDrawCalls + + ;
useElements = vai - > ib ? true : false ;
gpuStats . numCachedVertsDrawn + = vai - > numVerts ;
gstate_c . vertexFullAlpha = vai - > flags & VAIVULKAN_FLAG_VERTEXFULLALPHA ;
}
vbuf = vai - > vb ;
ibuf = vai - > ib ;
vbOffset = vai - > vbOffset ;
ibOffset = vai - > ibOffset ;
vertexCount = vai - > numVerts ;
maxIndex = vai - > maxIndex ;
prim = static_cast < GEPrimitiveType > ( vai - > prim ) ;
break ;
}
// Reliable - we don't even bother hashing anymore. Right now we don't go here until after a very long time.
case VertexArrayInfoVulkan : : VAI_RELIABLE :
{
vai - > numDraws + + ;
if ( vai - > lastFrame ! = gpuStats . numFlips ) {
vai - > numFrames + + ;
}
gpuStats . numCachedDrawCalls + + ;
gpuStats . numCachedVertsDrawn + = vai - > numVerts ;
vbuf = vai - > vb ;
ibuf = vai - > ib ;
vbOffset = vai - > vbOffset ;
ibOffset = vai - > ibOffset ;
vertexCount = vai - > numVerts ;
maxIndex = vai - > maxIndex ;
prim = static_cast < GEPrimitiveType > ( vai - > prim ) ;
gstate_c . vertexFullAlpha = vai - > flags & VAIVULKAN_FLAG_VERTEXFULLALPHA ;
break ;
}
case VertexArrayInfoVulkan : : VAI_UNRELIABLE :
{
vai - > numDraws + + ;
if ( vai - > lastFrame ! = gpuStats . numFlips ) {
vai - > numFrames + + ;
}
2017-11-19 12:33:20 +01:00
DecodeVertsToPushBuffer ( frame - > pushVertex , & vbOffset , & vbuf ) ;
2017-08-17 11:22:23 +02:00
goto rotateVBO ;
}
default :
break ;
}
} else {
if ( g_Config . bSoftwareSkinning & & ( lastVType_ & GE_VTYPE_WEIGHT_MASK ) ) {
// If software skinning, we've already predecoded into "decoded". So push that content.
VkDeviceSize size = decodedVerts_ * dec_ - > GetDecVtxFmt ( ) . stride ;
u8 * dest = ( u8 * ) frame - > pushVertex - > Push ( size , & vbOffset , & vbuf ) ;
memcpy ( dest , decoded , size ) ;
} else {
// Decode directly into the pushbuffer
2017-11-19 12:33:20 +01:00
DecodeVertsToPushBuffer ( frame - > pushVertex , & vbOffset , & vbuf ) ;
2017-08-17 11:22:23 +02:00
}
2017-06-02 11:47:14 +02:00
2017-08-17 11:22:23 +02:00
rotateVBO :
gpuStats . numUncachedVertsDrawn + = indexGen . VertexCount ( ) ;
useElements = ! indexGen . SeenOnlyPurePrims ( ) ;
vertexCount = indexGen . VertexCount ( ) ;
if ( ! useElements & & indexGen . PureCount ( ) ) {
vertexCount = indexGen . PureCount ( ) ;
}
prim = indexGen . Prim ( ) ;
2015-10-10 16:41:19 +02:00
}
2017-05-21 23:13:53 +02:00
bool hasColor = ( lastVType_ & GE_VTYPE_COL_MASK ) ! = GE_VTYPE_COL_NONE ;
2015-10-10 16:41:19 +02:00
if ( gstate . isModeThrough ( ) ) {
gstate_c . vertexFullAlpha = gstate_c . vertexFullAlpha & & ( hasColor | | gstate . getMaterialAmbientA ( ) = = 255 ) ;
} else {
gstate_c . vertexFullAlpha = gstate_c . vertexFullAlpha & & ( ( hasColor & & ( gstate . materialupdate & 1 ) ) | | gstate . getMaterialAmbientA ( ) = = 255 ) & & ( ! gstate . isLightingEnabled ( ) | | gstate . getAmbientA ( ) = = 255 ) ;
}
2017-08-18 13:39:42 +02:00
PROFILE_THIS_SCOPE ( " updatestate " ) ;
2016-05-01 16:25:09 -07:00
if ( textureNeedsApply ) {
2017-02-19 23:19:55 +01:00
textureCache_ - > ApplyTexture ( ) ;
textureCache_ - > GetVulkanHandles ( imageView , sampler ) ;
2016-05-01 16:25:09 -07:00
if ( imageView = = VK_NULL_HANDLE )
imageView = nullTexture_ - > GetImageView ( ) ;
if ( sampler = = VK_NULL_HANDLE )
sampler = nullSampler_ ;
}
2017-08-22 13:25:45 +02:00
if ( ! lastPipeline_ | | ! gstate_c . IsDirty ( DIRTY_BLEND_STATE | DIRTY_VIEWPORTSCISSOR_STATE | DIRTY_RASTER_STATE | DIRTY_DEPTHSTENCIL_STATE | DIRTY_VERTEXSHADER_STATE | DIRTY_FRAGMENTSHADER_STATE ) | | prim ! = lastPrim_ ) {
2017-11-14 09:13:13 +01:00
shaderManager_ - > GetShaders ( prim , lastVType_ , & vshader , & fshader , true ) ; // usehwtransform
_dbg_assert_msg_ ( G3D , vshader - > UseHWTransform ( ) , " Bad vshader " ) ;
2017-08-15 16:01:50 +02:00
if ( prim ! = lastPrim_ | | gstate_c . IsDirty ( DIRTY_BLEND_STATE | DIRTY_VIEWPORTSCISSOR_STATE | DIRTY_RASTER_STATE | DIRTY_DEPTHSTENCIL_STATE ) ) {
ConvertStateToVulkanKey ( * framebufferManager_ , shaderManager_ , prim , pipelineKey_ , dynState_ ) ;
}
2017-08-22 13:25:45 +02:00
Draw : : NativeObject object = g_Config . iRenderingMode ! = 0 ? Draw : : NativeObject : : FRAMEBUFFER_RENDERPASS : Draw : : NativeObject : : BACKBUFFER_RENDERPASS ;
VkRenderPass renderPass = ( VkRenderPass ) draw_ - > GetNativeObject ( object ) ;
2017-11-13 11:16:20 +01:00
VulkanPipeline * pipeline = pipelineManager_ - > GetOrCreatePipeline ( pipelineLayout_ , renderPass , pipelineKey_ , & dec_ - > decFmt , vshader , fshader , true ) ;
2017-08-15 16:01:50 +02:00
if ( ! pipeline ) {
// Already logged, let's bail out.
return ;
}
2017-11-15 20:43:29 +01:00
BindShaderBlendTex ( ) ; // This might cause copies so important to do before BindPipeline.
2017-10-23 14:53:35 +02:00
renderManager - > BindPipeline ( pipeline - > pipeline ) ;
2017-08-15 16:01:50 +02:00
if ( pipeline ! = lastPipeline_ ) {
2017-10-20 18:09:05 +02:00
if ( lastPipeline_ & & ! lastPipeline_ - > useBlendConstant & & pipeline - > useBlendConstant ) {
gstate_c . Dirty ( DIRTY_BLEND_STATE ) ;
}
2017-10-22 10:07:35 +02:00
lastPipeline_ = pipeline ;
2017-08-15 16:01:50 +02:00
}
2017-08-22 13:25:45 +02:00
ApplyDrawStateLate ( renderManager , false , 0 , pipeline - > useBlendConstant ) ;
2017-08-15 16:01:50 +02:00
gstate_c . Clean ( DIRTY_BLEND_STATE | DIRTY_DEPTHSTENCIL_STATE | DIRTY_RASTER_STATE | DIRTY_VIEWPORTSCISSOR_STATE ) ;
2017-08-22 13:25:45 +02:00
lastPipeline_ = pipeline ;
2017-08-15 16:01:50 +02:00
}
lastPrim_ = prim ;
2016-03-20 16:33:34 +01:00
dirtyUniforms_ | = shaderManager_ - > UpdateUniforms ( ) ;
2016-03-20 15:13:17 -07:00
UpdateUBOs ( frame ) ;
2016-01-09 01:23:32 +01:00
2017-11-13 10:29:01 +01:00
VkDescriptorSet ds = GetOrCreateDescriptorSet ( imageView , sampler , baseBuf , lightBuf , boneBuf , tess ) ;
2017-08-18 13:39:42 +02:00
{
2017-08-22 13:25:45 +02:00
PROFILE_THIS_SCOPE ( " renderman_q " ) ;
2016-03-20 09:52:13 +01:00
2016-01-09 11:07:14 +01:00
const uint32_t dynamicUBOOffsets [ 3 ] = {
2016-01-03 18:31:03 +01:00
baseUBOOffset , lightUBOOffset , boneUBOOffset ,
} ;
2016-01-25 00:58:47 +01:00
int stride = dec_ - > GetDecVtxFmt ( ) . stride ;
2016-01-03 18:31:03 +01:00
2015-10-10 16:41:19 +02:00
if ( useElements ) {
2017-08-17 11:22:23 +02:00
if ( ! ibuf )
ibOffset = ( uint32_t ) frame - > pushIndex - > Push ( decIndex , sizeof ( uint16_t ) * indexGen . VertexCount ( ) , & ibuf ) ;
2017-11-13 10:29:01 +01:00
int numInstances = tess ? numPatches : 1 ;
2017-10-23 14:53:35 +02:00
renderManager - > DrawIndexed ( pipelineLayout_ , ds , 3 , dynamicUBOOffsets , vbuf , vbOffset , ibuf , ibOffset , vertexCount , numInstances , VK_INDEX_TYPE_UINT16 ) ;
2015-10-10 16:41:19 +02:00
} else {
2017-10-23 14:53:35 +02:00
renderManager - > Draw ( pipelineLayout_ , ds , 3 , dynamicUBOOffsets , vbuf , vbOffset , vertexCount ) ;
2015-10-10 16:41:19 +02:00
}
2017-08-18 13:39:42 +02:00
}
2015-10-10 16:41:19 +02:00
} else {
2017-08-18 13:39:42 +02:00
PROFILE_THIS_SCOPE ( " soft " ) ;
2016-03-20 16:06:11 +01:00
// Decode to "decoded"
2017-11-19 12:33:20 +01:00
DecodeVertsToPushBuffer ( nullptr , nullptr , nullptr ) ;
2017-05-21 23:13:53 +02:00
bool hasColor = ( lastVType_ & GE_VTYPE_COL_MASK ) ! = GE_VTYPE_COL_NONE ;
2015-10-10 16:41:19 +02:00
if ( gstate . isModeThrough ( ) ) {
gstate_c . vertexFullAlpha = gstate_c . vertexFullAlpha & & ( hasColor | | gstate . getMaterialAmbientA ( ) = = 255 ) ;
} else {
gstate_c . vertexFullAlpha = gstate_c . vertexFullAlpha & & ( ( hasColor & & ( gstate . materialupdate & 1 ) ) | | gstate . getMaterialAmbientA ( ) = = 255 ) & & ( ! gstate . isLightingEnabled ( ) | | gstate . getAmbientA ( ) = = 255 ) ;
}
gpuStats . numUncachedVertsDrawn + = indexGen . VertexCount ( ) ;
prim = indexGen . Prim ( ) ;
// Undo the strip optimization, not supported by the SW code yet.
if ( prim = = GE_PRIM_TRIANGLE_STRIP )
prim = GE_PRIM_TRIANGLES ;
VERBOSE_LOG ( G3D , " Flush prim %i SW! %i verts in one go " , prim , indexGen . VertexCount ( ) ) ;
int numTrans = 0 ;
bool drawIndexed = false ;
u16 * inds = decIndex ;
TransformedVertex * drawBuffer = NULL ;
SoftwareTransformResult result ;
memset ( & result , 0 , sizeof ( result ) ) ;
2016-03-12 13:37:08 -08:00
SoftwareTransformParams params ;
memset ( & params , 0 , sizeof ( params ) ) ;
params . decoded = decoded ;
params . transformed = transformed ;
params . transformedExpanded = transformedExpanded ;
params . fbman = framebufferManager_ ;
params . texCache = textureCache_ ;
params . allowSeparateAlphaClear = false ;
2015-10-10 16:41:19 +02:00
int maxIndex = indexGen . MaxIndex ( ) ;
SoftwareTransform (
2016-03-12 13:37:08 -08:00
prim , indexGen . VertexCount ( ) ,
2015-10-10 16:41:19 +02:00
dec_ - > VertexType ( ) , inds , GE_VTYPE_IDX_16BIT , dec_ - > GetDecVtxFmt ( ) ,
2016-03-12 13:37:08 -08:00
maxIndex , drawBuffer , numTrans , drawIndexed , & params , & result ) ;
2015-10-10 16:41:19 +02:00
2016-01-24 17:30:26 +01:00
// Only here, where we know whether to clear or to draw primitives, should we actually set the current framebuffer! Because that gives use the opportunity
// to use a "pre-clear" render pass, for high efficiency on tilers.
2015-10-10 16:41:19 +02:00
if ( result . action = = SW_DRAW_PRIMITIVES ) {
2016-05-01 16:25:09 -07:00
if ( textureNeedsApply ) {
2017-02-19 23:19:55 +01:00
textureCache_ - > ApplyTexture ( ) ;
textureCache_ - > GetVulkanHandles ( imageView , sampler ) ;
2016-05-01 16:25:09 -07:00
if ( imageView = = VK_NULL_HANDLE )
imageView = nullTexture_ - > GetImageView ( ) ;
if ( sampler = = VK_NULL_HANDLE )
sampler = nullSampler_ ;
}
2017-08-18 13:39:42 +02:00
if ( ! lastPipeline_ | | gstate_c . IsDirty ( DIRTY_BLEND_STATE | DIRTY_VIEWPORTSCISSOR_STATE | DIRTY_RASTER_STATE | DIRTY_DEPTHSTENCIL_STATE | DIRTY_VERTEXSHADER_STATE | DIRTY_FRAGMENTSHADER_STATE ) | | prim ! = lastPrim_ ) {
2017-11-14 09:13:13 +01:00
shaderManager_ - > GetShaders ( prim , lastVType_ , & vshader , & fshader , false ) ; // usehwtransform
_dbg_assert_msg_ ( G3D , ! vshader - > UseHWTransform ( ) , " Bad vshader " ) ;
2017-08-18 13:39:42 +02:00
if ( prim ! = lastPrim_ | | gstate_c . IsDirty ( DIRTY_BLEND_STATE | DIRTY_VIEWPORTSCISSOR_STATE | DIRTY_RASTER_STATE | DIRTY_DEPTHSTENCIL_STATE ) ) {
ConvertStateToVulkanKey ( * framebufferManager_ , shaderManager_ , prim , pipelineKey_ , dynState_ ) ;
}
2017-08-22 13:25:45 +02:00
Draw : : NativeObject object = g_Config . iRenderingMode ! = 0 ? Draw : : NativeObject : : FRAMEBUFFER_RENDERPASS : Draw : : NativeObject : : BACKBUFFER_RENDERPASS ;
VkRenderPass renderPass = ( VkRenderPass ) draw_ - > GetNativeObject ( object ) ;
2017-11-13 11:16:20 +01:00
VulkanPipeline * pipeline = pipelineManager_ - > GetOrCreatePipeline ( pipelineLayout_ , renderPass , pipelineKey_ , & dec_ - > decFmt , vshader , fshader , false ) ;
2017-08-18 13:39:42 +02:00
if ( ! pipeline ) {
// Already logged, let's bail out.
return ;
}
2017-11-15 20:43:29 +01:00
BindShaderBlendTex ( ) ; // This might cause copies so super important to do before BindPipeline.
2017-11-07 00:08:39 +01:00
renderManager - > BindPipeline ( pipeline - > pipeline ) ;
2017-08-18 13:39:42 +02:00
if ( pipeline ! = lastPipeline_ ) {
2017-10-20 18:09:05 +02:00
if ( lastPipeline_ & & ! lastPipeline_ - > useBlendConstant & & pipeline - > useBlendConstant ) {
gstate_c . Dirty ( DIRTY_BLEND_STATE ) ;
}
2017-10-22 10:07:35 +02:00
lastPipeline_ = pipeline ;
2017-08-18 13:39:42 +02:00
}
2017-11-12 10:34:53 -08:00
ApplyDrawStateLate ( renderManager , result . setStencil , result . stencilValue , pipeline - > useBlendConstant ) ;
2017-08-18 13:39:42 +02:00
gstate_c . Clean ( DIRTY_BLEND_STATE | DIRTY_DEPTHSTENCIL_STATE | DIRTY_RASTER_STATE | DIRTY_VIEWPORTSCISSOR_STATE ) ;
2017-08-22 13:25:45 +02:00
lastPipeline_ = pipeline ;
2016-03-12 09:21:13 -08:00
}
2017-08-18 13:39:42 +02:00
lastPrim_ = prim ;
2016-01-09 01:23:32 +01:00
2017-08-15 16:01:50 +02:00
dirtyUniforms_ | = shaderManager_ - > UpdateUniforms ( ) ;
2016-03-20 16:33:34 +01:00
// Even if the first draw is through-mode, make sure we at least have one copy of these uniforms buffered
2016-03-20 15:13:17 -07:00
UpdateUBOs ( frame ) ;
2016-01-09 01:23:32 +01:00
2017-11-13 10:29:01 +01:00
VkDescriptorSet ds = GetOrCreateDescriptorSet ( imageView , sampler , baseBuf , lightBuf , boneBuf , tess ) ;
2016-01-09 11:07:14 +01:00
const uint32_t dynamicUBOOffsets [ 3 ] = {
2016-01-09 01:23:32 +01:00
baseUBOOffset , lightUBOOffset , boneUBOOffset ,
} ;
2017-08-18 13:39:42 +02:00
2017-08-22 13:25:45 +02:00
PROFILE_THIS_SCOPE ( " renderman_q " ) ;
2016-01-09 01:23:32 +01:00
2015-10-10 16:41:19 +02:00
if ( drawIndexed ) {
2016-03-20 09:52:13 +01:00
VkBuffer vbuf , ibuf ;
2016-03-20 15:08:26 +01:00
vbOffset = ( uint32_t ) frame - > pushVertex - > Push ( drawBuffer , maxIndex * sizeof ( TransformedVertex ) , & vbuf ) ;
ibOffset = ( uint32_t ) frame - > pushIndex - > Push ( inds , sizeof ( short ) * numTrans , & ibuf ) ;
2016-01-10 23:42:09 +01:00
VkDeviceSize offsets [ 1 ] = { vbOffset } ;
2017-10-23 14:53:35 +02:00
renderManager - > DrawIndexed ( pipelineLayout_ , ds , 3 , dynamicUBOOffsets , vbuf , vbOffset , ibuf , ibOffset , numTrans , 1 , VK_INDEX_TYPE_UINT16 ) ;
2015-10-10 16:41:19 +02:00
} else {
2016-03-20 09:52:13 +01:00
VkBuffer vbuf ;
2016-03-20 15:08:26 +01:00
vbOffset = ( uint32_t ) frame - > pushVertex - > Push ( drawBuffer , numTrans * sizeof ( TransformedVertex ) , & vbuf ) ;
2016-01-10 23:42:09 +01:00
VkDeviceSize offsets [ 1 ] = { vbOffset } ;
2017-10-23 14:53:35 +02:00
renderManager - > Draw ( pipelineLayout_ , ds , 3 , dynamicUBOOffsets , vbuf , vbOffset , numTrans ) ;
2015-10-10 16:41:19 +02:00
}
} else if ( result . action = = SW_CLEAR ) {
2016-03-12 13:37:08 -08:00
// Note: we won't get here if the clear is alpha but not color, or color but not alpha.
2015-10-10 16:41:19 +02:00
2016-03-30 23:26:16 +02:00
// We let the framebuffer manager handle the clear. It can use renderpasses to optimize on tilers.
2017-05-16 17:20:22 +02:00
// If non-buffered though, it'll just do a plain clear.
2016-03-30 23:26:16 +02:00
framebufferManager_ - > NotifyClear ( gstate . isClearModeColorMask ( ) , gstate . isClearModeAlphaMask ( ) , gstate . isClearModeDepthMask ( ) , result . color , result . depth ) ;
2016-05-19 20:55:34 -07:00
2016-09-18 19:40:44 -07:00
int scissorX1 = gstate . getScissorX1 ( ) ;
int scissorY1 = gstate . getScissorY1 ( ) ;
2016-05-19 20:55:34 -07:00
int scissorX2 = gstate . getScissorX2 ( ) + 1 ;
int scissorY2 = gstate . getScissorY2 ( ) + 1 ;
framebufferManager_ - > SetSafeSize ( scissorX2 , scissorY2 ) ;
2016-09-18 19:40:44 -07:00
2017-01-28 10:04:50 +01:00
if ( g_Config . bBlockTransferGPU & & ( gstate_c . featureFlags & GPU_USE_CLEAR_RAM_HACK ) & & gstate . isClearModeColorMask ( ) & & ( gstate . isClearModeAlphaMask ( ) | | gstate . FrameBufFormat ( ) = = GE_FORMAT_565 ) ) {
2017-04-09 15:10:07 -07:00
framebufferManager_ - > ApplyClearToMemory ( scissorX1 , scissorY1 , scissorX2 , scissorY2 , result . color ) ;
2016-09-18 19:40:44 -07:00
}
2015-10-10 16:41:19 +02:00
}
}
gpuStats . numDrawCalls + = numDrawCalls ;
2017-06-02 11:47:14 +02:00
gpuStats . numVertsSubmitted + = vertexCountInDrawCalls_ ;
2015-10-10 16:41:19 +02:00
indexGen . Reset ( ) ;
2017-06-02 11:47:14 +02:00
decodedVerts_ = 0 ;
2015-10-10 16:41:19 +02:00
numDrawCalls = 0 ;
2017-06-02 11:47:14 +02:00
vertexCountInDrawCalls_ = 0 ;
decodeCounter_ = 0 ;
dcid_ = 0 ;
2015-10-10 16:41:19 +02:00
prevPrim_ = GE_PRIM_INVALID ;
gstate_c . vertexFullAlpha = true ;
framebufferManager_ - > SetColorUpdated ( gstate_c . skipDrawReason ) ;
// Now seems as good a time as any to reset the min/max coords, which we may examine later.
gstate_c . vertBounds . minU = 512 ;
gstate_c . vertBounds . minV = 512 ;
gstate_c . vertBounds . maxU = 0 ;
gstate_c . vertBounds . maxV = 0 ;
host - > GPUNotifyDraw ( ) ;
}
2016-03-20 15:13:17 -07:00
void DrawEngineVulkan : : UpdateUBOs ( FrameData * frame ) {
if ( ( dirtyUniforms_ & DIRTY_BASE_UNIFORMS ) | | baseBuf = = VK_NULL_HANDLE ) {
baseUBOOffset = shaderManager_ - > PushBaseBuffer ( frame - > pushUBO , & baseBuf ) ;
dirtyUniforms_ & = ~ DIRTY_BASE_UNIFORMS ;
}
if ( ( dirtyUniforms_ & DIRTY_LIGHT_UNIFORMS ) | | lightBuf = = VK_NULL_HANDLE ) {
lightUBOOffset = shaderManager_ - > PushLightBuffer ( frame - > pushUBO , & lightBuf ) ;
dirtyUniforms_ & = ~ DIRTY_LIGHT_UNIFORMS ;
}
if ( ( dirtyUniforms_ & DIRTY_BONE_UNIFORMS ) | | boneBuf = = VK_NULL_HANDLE ) {
boneUBOOffset = shaderManager_ - > PushBoneBuffer ( frame - > pushUBO , & boneBuf ) ;
dirtyUniforms_ & = ~ DIRTY_BONE_UNIFORMS ;
}
}
2017-11-11 20:20:38 +01:00
DrawEngineVulkan : : TessellationDataTransferVulkan : : TessellationDataTransferVulkan ( VulkanContext * vulkan , Draw : : DrawContext * draw )
2017-11-12 12:07:33 +01:00
: TessellationDataTransfer ( ) , vulkan_ ( vulkan ) , draw_ ( draw ) {
2017-11-11 20:20:38 +01:00
}
DrawEngineVulkan : : TessellationDataTransferVulkan : : ~ TessellationDataTransferVulkan ( ) {
}
2017-11-11 21:50:24 +01:00
// TODO: Consolidate the three textures into one, with height 3.
// This can be done for all the backends.
2017-11-12 10:28:55 +01:00
// TODO: Actually, even better, avoid the usage of textures altogether and just use shader storage buffers from the current pushbuffer.
2017-11-12 11:00:11 +01:00
void DrawEngineVulkan : : TessellationDataTransferVulkan : : PrepareBuffers ( float * & pos , float * & tex , float * & col , int & posStride , int & texStride , int & colStride , int size , bool hasColor , bool hasTexCoords ) {
colStride = 4 ;
2017-11-12 12:07:33 +01:00
// TODO: This SHOULD work without padding but I can't get it to work on nvidia, so had
// to expand to vec4. Driver bug?
struct TessData {
float pos [ 3 ] ; float pad1 ;
float uv [ 2 ] ; float pad2 [ 2 ] ;
float color [ 4 ] ;
} ;
int ssboAlignment = vulkan_ - > GetPhysicalDeviceProperties ( ) . limits . minStorageBufferOffsetAlignment ;
uint8_t * data = ( uint8_t * ) push_ - > PushAligned ( size * sizeof ( TessData ) , & offset_ , & buf_ , ssboAlignment ) ;
range_ = size * sizeof ( TessData ) ;
pos = ( float * ) ( data ) ;
tex = ( float * ) ( data + offsetof ( TessData , uv ) ) ;
col = ( float * ) ( data + offsetof ( TessData , color ) ) ;
posStride = sizeof ( TessData ) / sizeof ( float ) ;
colStride = hasColor ? ( sizeof ( TessData ) / sizeof ( float ) ) : 0 ;
texStride = sizeof ( TessData ) / sizeof ( float ) ;
2017-03-23 23:28:38 +09:00
}
void DrawEngineVulkan : : TessellationDataTransferVulkan : : SendDataToShader ( const float * pos , const float * tex , const float * col , int size , bool hasColor , bool hasTexCoords ) {
2017-11-12 12:07:33 +01:00
// Nothing to do here!
2017-11-11 21:50:24 +01:00
}