More progress

This commit is contained in:
Henrik Rydgard 2016-01-02 02:08:05 +01:00
parent 4ddca8607f
commit cfbecf5071
18 changed files with 320 additions and 426 deletions

View file

@ -249,7 +249,6 @@
<ClInclude Include="Vulkan\FramebufferVulkan.h" /> <ClInclude Include="Vulkan\FramebufferVulkan.h" />
<ClInclude Include="Vulkan\GPU_Vulkan.h" /> <ClInclude Include="Vulkan\GPU_Vulkan.h" />
<ClInclude Include="Vulkan\PipelineManagerVulkan.h" /> <ClInclude Include="Vulkan\PipelineManagerVulkan.h" />
<ClInclude Include="Vulkan\ShaderCompiler.h" />
<ClInclude Include="Vulkan\ShaderManagerVulkan.h" /> <ClInclude Include="Vulkan\ShaderManagerVulkan.h" />
<ClInclude Include="Vulkan\TextureCacheVulkan.h" /> <ClInclude Include="Vulkan\TextureCacheVulkan.h" />
<ClInclude Include="Vulkan\VertexShaderGeneratorVulkan.h" /> <ClInclude Include="Vulkan\VertexShaderGeneratorVulkan.h" />
@ -337,7 +336,6 @@
<ClCompile Include="Vulkan\FramebufferVulkan.cpp" /> <ClCompile Include="Vulkan\FramebufferVulkan.cpp" />
<ClCompile Include="Vulkan\GPU_Vulkan.cpp" /> <ClCompile Include="Vulkan\GPU_Vulkan.cpp" />
<ClCompile Include="Vulkan\PipelineManagerVulkan.cpp" /> <ClCompile Include="Vulkan\PipelineManagerVulkan.cpp" />
<ClCompile Include="Vulkan\ShaderCompiler.cpp" />
<ClCompile Include="Vulkan\ShaderManagerVulkan.cpp" /> <ClCompile Include="Vulkan\ShaderManagerVulkan.cpp" />
<ClCompile Include="Vulkan\StateMappingVulkan.cpp" /> <ClCompile Include="Vulkan\StateMappingVulkan.cpp" />
<ClCompile Include="Vulkan\VertexShaderGeneratorVulkan.cpp" /> <ClCompile Include="Vulkan\VertexShaderGeneratorVulkan.cpp" />

View file

@ -210,7 +210,6 @@
<ClInclude Include="Vulkan\VertexShaderGeneratorVulkan.h" /> <ClInclude Include="Vulkan\VertexShaderGeneratorVulkan.h" />
<ClInclude Include="Vulkan\FragmentShaderGeneratorVulkan.h" /> <ClInclude Include="Vulkan\FragmentShaderGeneratorVulkan.h" />
<ClInclude Include="Vulkan\ShaderManagerVulkan.h" /> <ClInclude Include="Vulkan\ShaderManagerVulkan.h" />
<ClInclude Include="Vulkan\ShaderCompiler.h" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClCompile Include="Math3D.cpp"> <ClCompile Include="Math3D.cpp">
@ -404,6 +403,5 @@
<ClCompile Include="Vulkan\FramebufferVulkan.cpp" /> <ClCompile Include="Vulkan\FramebufferVulkan.cpp" />
<ClCompile Include="Vulkan\FragmentShaderGeneratorVulkan.cpp" /> <ClCompile Include="Vulkan\FragmentShaderGeneratorVulkan.cpp" />
<ClCompile Include="Vulkan\ShaderManagerVulkan.cpp" /> <ClCompile Include="Vulkan\ShaderManagerVulkan.cpp" />
<ClCompile Include="Vulkan\ShaderCompiler.cpp" />
</ItemGroup> </ItemGroup>
</Project> </Project>

View file

@ -1,23 +0,0 @@
// Copyright (c) 2015- PPSSPP Project.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, version 2.0 or later versions.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License 2.0 for more details.
// A copy of the GPL 2.0 should have been included with the program.
// If not, see http://www.gnu.org/licenses/
// Official git repository and contact information can be found at
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
#include "GPU/Vulkan/VulkanUtil.h"
#include "GPU/Vulkan/ShaderCompiler.h"
bool CompileGLSLVulkan(const char *code, std::vector<uint32_t> &spirv, std::string &errorMessage) {
return false;
}

View file

@ -1,27 +0,0 @@
// Copyright (c) 2015- PPSSPP Project.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, version 2.0 or later versions.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License 2.0 for more details.
// A copy of the GPL 2.0 should have been included with the program.
// If not, see http://www.gnu.org/licenses/
// Official git repository and contact information can be found at
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
#pragma once
#include <vector>
#include "GPU/Vulkan/VulkanUtil.h"
// Wrapper around the GLSL compiler library. Compiles GLSL into SPIR-V consumable by Vulkan.
bool CompileGLSLVulkan(const char *code, std::vector<uint32_t> &spirv, std::string &errorMessage);
// TODO: Compute shaders

View file

@ -37,7 +37,6 @@
#include "GPU/Vulkan/ShaderManagerVulkan.h" #include "GPU/Vulkan/ShaderManagerVulkan.h"
#include "GPU/Vulkan/DrawEngineVulkan.h" #include "GPU/Vulkan/DrawEngineVulkan.h"
#include "GPU/Vulkan/FramebufferVulkan.h" #include "GPU/Vulkan/FramebufferVulkan.h"
#include "GPU/Vulkan/ShaderCompiler.h"
#include "GPU/Vulkan/FragmentShaderGeneratorVulkan.h" #include "GPU/Vulkan/FragmentShaderGeneratorVulkan.h"
#include "GPU/Vulkan/VertexShaderGeneratorVulkan.h" #include "GPU/Vulkan/VertexShaderGeneratorVulkan.h"
#include "UI/OnScreenDisplay.h" #include "UI/OnScreenDisplay.h"
@ -51,7 +50,8 @@ VulkanFragmentShader::VulkanFragmentShader(VkDevice device, ShaderID id, const c
std::string errorMessage; std::string errorMessage;
std::vector<uint32_t> spirv; std::vector<uint32_t> spirv;
bool success = CompileGLSLVulkan(code, spirv, errorMessage);
bool success = GLSLtoSPV(VK_SHADER_STAGE_FRAGMENT_BIT, code, spirv, &errorMessage);
if (!errorMessage.empty()) { if (!errorMessage.empty()) {
if (success) { if (success) {
ERROR_LOG(G3D, "Warnings in shader compilation!"); ERROR_LOG(G3D, "Warnings in shader compilation!");
@ -100,7 +100,7 @@ VulkanVertexShader::VulkanVertexShader(VkDevice device, ShaderID id, const char
#endif #endif
std::string errorMessage; std::string errorMessage;
std::vector<uint32_t> spirv; std::vector<uint32_t> spirv;
bool success = CompileGLSLVulkan(code, spirv, errorMessage); bool success = GLSLtoSPV(VK_SHADER_STAGE_VERTEX_BIT, code, spirv, &errorMessage);
if (!errorMessage.empty()) { if (!errorMessage.empty()) {
if (success) { if (success) {
ERROR_LOG(G3D, "Warnings in shader compilation!"); ERROR_LOG(G3D, "Warnings in shader compilation!");
@ -142,33 +142,12 @@ std::string VulkanVertexShader::GetShaderString(DebugShaderStringType type) cons
} }
} }
/*
// Utility
void ShaderManagerVulkan::VSSetMatrix4x3(int creg, const float *m4x3) {
float m4x4[16];
ConvertMatrix4x3To4x4Transposed(m4x4, m4x3);
pD3Ddevice->SetVertexShaderConstantF(creg, m4x4, 4);
}
void ShaderManagerVulkan::VSSetMatrix4x3_3(int creg, const float *m4x3) {
float m3x4[16];
ConvertMatrix4x3To3x4Transposed(m3x4, m4x3);
pD3Ddevice->SetVertexShaderConstantF(creg, m3x4, 3);
}
void ShaderManagerVulkan::VSSetMatrix(int creg, const float* pMatrix) {
float transp[16];
Transpose4x4(transp, pMatrix);
pD3Ddevice->SetVertexShaderConstantF(creg, transp, 4);
}
*/
// Depth in ogl is between -1;1 we need between 0;1 and optionally reverse it // Depth in ogl is between -1;1 we need between 0;1 and optionally reverse it
static void ConvertProjMatrixToVulkan(Matrix4x4 &in, bool invertedX, bool invertedY, bool invertedZ) { static void ConvertProjMatrixToVulkan(Matrix4x4 &in, bool invertedX, bool invertedY, bool invertedZ) {
// Half pixel offset hack // Half pixel offset hack
float xoff = 0.5f / gstate_c.curRTRenderWidth; float xoff = 0.5f / gstate_c.curRTRenderWidth;
xoff = gstate_c.vpXOffset + (invertedX ? xoff : -xoff); xoff = gstate_c.vpXOffset + (invertedX ? xoff : -xoff);
float yoff = -0.5f / gstate_c.curRTRenderHeight; float yoff = 0.5f / gstate_c.curRTRenderHeight;
yoff = gstate_c.vpYOffset + (invertedY ? yoff : -yoff); yoff = gstate_c.vpYOffset + (invertedY ? yoff : -yoff);
if (invertedX) if (invertedX)
@ -180,9 +159,7 @@ static void ConvertProjMatrixToVulkan(Matrix4x4 &in, bool invertedX, bool invert
} }
static void ConvertProjMatrixToVulkanThrough(Matrix4x4 &in) { static void ConvertProjMatrixToVulkanThrough(Matrix4x4 &in) {
float xoff = -0.5f / gstate_c.curRTRenderWidth; in.translateAndScale(Vec3(0.0f, 0.0f, 0.5f), Vec3(1.0f, 1.0f, 0.5f));
float yoff = 0.5f / gstate_c.curRTRenderHeight;
in.translateAndScale(Vec3(xoff, yoff, 0.5f), Vec3(1.0f, 1.0f, 0.5f));
} }
void ShaderManagerVulkan::PSUpdateUniforms(int dirtyUniforms) { void ShaderManagerVulkan::PSUpdateUniforms(int dirtyUniforms) {
@ -402,20 +379,20 @@ void ShaderManagerVulkan::VSUpdateUniforms(int dirtyUniforms) {
// Lighting // Lighting
if (dirtyUniforms & DIRTY_AMBIENT) { if (dirtyUniforms & DIRTY_AMBIENT) {
Uint8x3ToFloat4_AlphaUint8(ub_lightGlobal.ambientColor, gstate.ambientcolor, gstate.getAmbientA()); Uint8x3ToFloat4_AlphaUint8(ub_lights.ambientColor, gstate.ambientcolor, gstate.getAmbientA());
} }
if (dirtyUniforms & DIRTY_MATAMBIENTALPHA) { if (dirtyUniforms & DIRTY_MATAMBIENTALPHA) {
// Note - this one is not in lighting but in transformCommon as it has uses beyond lighting // Note - this one is not in lighting but in transformCommon as it has uses beyond lighting
Uint8x3ToFloat4_AlphaUint8(ub_transformCommon.matAmbient, gstate.materialambient, gstate.getMaterialAmbientA()); Uint8x3ToFloat4_AlphaUint8(ub_transformCommon.matAmbient, gstate.materialambient, gstate.getMaterialAmbientA());
} }
if (dirtyUniforms & DIRTY_MATDIFFUSE) { if (dirtyUniforms & DIRTY_MATDIFFUSE) {
Uint8x3ToFloat4(ub_lightGlobal.materialDiffuse, gstate.materialdiffuse); Uint8x3ToFloat4(ub_lights.materialDiffuse, gstate.materialdiffuse);
} }
if (dirtyUniforms & DIRTY_MATEMISSIVE) { if (dirtyUniforms & DIRTY_MATEMISSIVE) {
Uint8x3ToFloat4(ub_lightGlobal.materialEmissive, gstate.materialemissive); Uint8x3ToFloat4(ub_lights.materialEmissive, gstate.materialemissive);
} }
if (dirtyUniforms & DIRTY_MATSPECULAR) { if (dirtyUniforms & DIRTY_MATSPECULAR) {
Uint8x3ToFloat4_Alpha(ub_lightGlobal.materialEmissive, gstate.materialspecular, getFloat24(gstate.materialspecularcoef)); Uint8x3ToFloat4_Alpha(ub_lights.materialEmissive, gstate.materialspecular, getFloat24(gstate.materialspecularcoef));
} }
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {

View file

@ -96,6 +96,10 @@ R"(matrix4x4 proj;
)"; )";
struct UB_VS_Lights { struct UB_VS_Lights {
float ambientColor[4];
float materialDiffuse[4];
float materialSpecular[4];
float materialEmissive[4];
float lpos[4][4]; float lpos[4][4];
float ldir[4][4]; float ldir[4][4];
float latt[4][4]; float latt[4][4];
@ -107,7 +111,11 @@ struct UB_VS_Lights {
}; };
static const char *ub_vs_lightsStr = static const char *ub_vs_lightsStr =
R"(vec3 lpos[4]; R"(vec3 ambientColor;
vec3 materialDiffuse;
vec4 materialSpecular;
vec3 materialEmissive;
vec3 lpos[4];
vec3 ldir[4]; vec3 ldir[4];
vec3 latt[4]; vec3 latt[4];
float lightAngle[4]; float lightAngle[4];
@ -117,26 +125,12 @@ R"(vec3 lpos[4];
vec3 lightSpecular[4]; vec3 lightSpecular[4];
)"; )";
struct UB_VS_LightGlobal {
float ambientColor[4];
float materialDiffuse[4];
float materialSpecular[4];
float materialEmissive[4];
};
static const char *ub_vs_lightsGlobalStr =
R"(vec3 ambientColor;
vec3 materialDiffuse;
vec4 materialSpecular;
vec3 materialEmissive;
)";
struct UB_VS_Bones { struct UB_VS_Bones {
float bones[8][16]; float bones[8][16];
}; };
static const char *ub_vs_bonesStr = static const char *ub_vs_bonesStr =
R"(matrix4x4 bone[8]; R"(matrix4x4 m[8];
)"; )";
// Let's not bother splitting this, we'll just upload the lot for every draw call. // Let's not bother splitting this, we'll just upload the lot for every draw call.
@ -242,7 +236,6 @@ private:
// Uniform block scratchpad. These (the relevant ones) are copied to the current pushbuffer at draw time. // Uniform block scratchpad. These (the relevant ones) are copied to the current pushbuffer at draw time.
UB_VS_TransformCommon ub_transformCommon; UB_VS_TransformCommon ub_transformCommon;
UB_VS_LightGlobal ub_lightGlobal;
UB_VS_Lights ub_lights; UB_VS_Lights ub_lights;
UB_VS_Bones ub_bones; UB_VS_Bones ub_bones;
UB_FS_All ub_fragment; UB_FS_All ub_fragment;

View file

@ -176,22 +176,24 @@ void ConvertStateToVulkanKey(FramebufferManagerVulkan &fbManager, int prim, Vulk
bool alphaMask = gstate.isClearModeAlphaMask(); bool alphaMask = gstate.isClearModeAlphaMask();
key.colorWriteMask = (colorMask ? (VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_A_BIT) : 0) | (alphaMask ? VK_COLOR_COMPONENT_A_BIT : 0); key.colorWriteMask = (colorMask ? (VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_A_BIT) : 0) | (alphaMask ? VK_COLOR_COMPONENT_A_BIT : 0);
GenericStencilFuncState stencilState;
ConvertStencilFuncState(stencilState);
// Stencil Test // Stencil Test
if (alphaMask) { if (stencilState.enabled) {
key.stencilTestEnable = true; key.stencilTestEnable = true;
key.stencilCompareOp = VK_COMPARE_OP_ALWAYS; key.stencilCompareOp = compareOps[stencilState.testFunc];
key.stencilPassOp = VK_STENCIL_OP_REPLACE; key.stencilPassOp = stencilOps[stencilState.zPass];
key.stencilFailOp = VK_STENCIL_OP_REPLACE; key.stencilFailOp = stencilOps[stencilState.sFail];
key.stencilDepthFailOp = VK_STENCIL_OP_REPLACE; key.stencilDepthFailOp = stencilOps[stencilState.zFail];
// TODO: Are these right?
dynState.useStencil = true; dynState.useStencil = true;
dynState.stencilRef = 0xFF; dynState.stencilRef = stencilState.testRef;
dynState.stencilCompareMask = 0xFF; dynState.stencilCompareMask = stencilState.testMask;
dynState.stencilWriteMask = 0xFF; dynState.stencilWriteMask = stencilState.writeMask;
} else { } else {
key.stencilTestEnable = false; key.stencilTestEnable = false;
dynState.useStencil = false;
} }
} else { } else {
// Set cull // Set cull
bool wantCull = !gstate.isModeThrough() && prim != GE_PRIM_RECTANGLES && gstate.isCullEnabled(); bool wantCull = !gstate.isModeThrough() && prim != GE_PRIM_RECTANGLES && gstate.isCullEnabled();

View file

@ -32,6 +32,7 @@
#include "GPU/Common/VertexDecoderCommon.h" #include "GPU/Common/VertexDecoderCommon.h"
#include "GPU/Vulkan/VertexShaderGeneratorVulkan.h" #include "GPU/Vulkan/VertexShaderGeneratorVulkan.h"
#include "GPU/Vulkan/PipelineManagerVulkan.h" #include "GPU/Vulkan/PipelineManagerVulkan.h"
#include "GPU/Vulkan/ShaderManagerVulkan.h"
// "Varying" layout - must match fragment shader // "Varying" layout - must match fragment shader
// color0 = 0 // color0 = 0
@ -70,42 +71,6 @@ enum DoLightComputation {
LIGHT_FULL, LIGHT_FULL,
}; };
const char *vulkan_base_uniforms = R"(
layout(set=0, binding=4) uniform base {
mat4 proj;
mat4 world;
mat4 view;
mat4 texmtx;
vec4 uvscaleoffset;
vec2 fogcoef;
vec4 depthRange;
vec4 matambientalpha;
};
)";
const char *vulkan_light_uniforms = R"(
layout(set=0, binding=5) uniform light {
vec4 ambient;
vec3 matdiffuse;
vec4 matspecular;
vec3 matemissive;
vec3 pos[4];
// TODO: Make lighttype/comp uniforms too
vec3 att[4];
vec3 dir[4];
float angle[4];
float spotCoef[4];
vec3 ambient[4];
vec3 diffuse[4];
vec3 specular[4];
)";
const char *vulkan_bone_uniforms = R"(
layout(set=0, binding=6) uniform bone {
mat4 m[8];
)";
// Depth range and viewport // Depth range and viewport
// //
// After the multiplication with the projection matrix, we have a 4D vector in clip space. // After the multiplication with the projection matrix, we have a 4D vector in clip space.
@ -213,12 +178,11 @@ bool GenerateVulkanGLSLVertexShader(const ShaderID &id, char *buffer) {
// We will memcpy the parts into place in a big buffer so we can be quite dynamic about what parts // We will memcpy the parts into place in a big buffer so we can be quite dynamic about what parts
// are present and what parts aren't, but we will not be ultra detailed about it. // are present and what parts aren't, but we will not be ultra detailed about it.
WRITE(p, "%s", vulkan_base_uniforms); WRITE(p, "layout (binding=3) uniform base {\n%s\n}\n", ub_vs_transformCommonStr);
if (enableLighting) if (enableLighting)
WRITE(p, "%s", vulkan_light_uniforms); WRITE(p, "layout (binding=4) uniform light {\n%s\n}\n", ub_vs_lightsStr);
if (enableBones) if (enableBones)
WRITE(p, "%s", vulkan_bone_uniforms); WRITE(p, "layout (binding=5) uniform bone {\n%s\n}\n", ub_vs_bonesStr);
bool prescale = false; bool prescale = false;
@ -409,10 +373,10 @@ bool GenerateVulkanGLSLVertexShader(const ShaderID &id, char *buffer) {
if (poweredDiffuse) { if (poweredDiffuse) {
// pow(0.0, 0.0) may be undefined, but the PSP seems to treat it as 1.0. // pow(0.0, 0.0) may be undefined, but the PSP seems to treat it as 1.0.
// Seen in Tales of the World: Radiant Mythology (#2424.) // Seen in Tales of the World: Radiant Mythology (#2424.)
WRITE(p, " if (dot%i == 0.0 && u_matspecular.a == 0.0) {\n", i); WRITE(p, " if (dot%i == 0.0 && light.matspecular.a == 0.0) {\n", i);
WRITE(p, " dot%i = 1.0;\n", i); WRITE(p, " dot%i = 1.0;\n", i);
WRITE(p, " } else {\n"); WRITE(p, " } else {\n");
WRITE(p, " dot%i = pow(dot[%i], u_matspecular.a);\n", i, i); WRITE(p, " dot%i = pow(dot[%i], light.matspecular.a);\n", i, i);
WRITE(p, " }\n"); WRITE(p, " }\n");
} }
@ -444,7 +408,7 @@ bool GenerateVulkanGLSLVertexShader(const ShaderID &id, char *buffer) {
if (doSpecular) { if (doSpecular) {
WRITE(p, " dot[%i] = dot(normalize(toLight + vec3(0.0, 0.0, 1.0)), worldnormal);\n", i); WRITE(p, " dot[%i] = dot(normalize(toLight + vec3(0.0, 0.0, 1.0)), worldnormal);\n", i);
WRITE(p, " if (dot[%i] > 0.0)\n", i); WRITE(p, " if (dot[%i] > 0.0)\n", i);
WRITE(p, " lightSum1 += light.specular[%i] * %s * (pow(dot[%i], u_matspecular.a) %s);\n", i, specularStr, i, timesLightScale); WRITE(p, " lightSum1 += light.specular[%i] * %s * (pow(dot[%i], light.matspecular.a) %s);\n", i, specularStr, i, timesLightScale);
} }
WRITE(p, " lightSum0.rgb += (light.ambient[%i] * %s.rgb + diffuse)%s;\n", i, ambientStr, timesLightScale); WRITE(p, " lightSum0.rgb += (light.ambient[%i] * %s.rgb + diffuse)%s;\n", i, ambientStr, timesLightScale);
} }
@ -471,7 +435,7 @@ bool GenerateVulkanGLSLVertexShader(const ShaderID &id, char *buffer) {
if (hasColor) { if (hasColor) {
WRITE(p, " v_color0 = color0;\n"); WRITE(p, " v_color0 = color0;\n");
} else { } else {
WRITE(p, " v_color0 = u_matambientalpha;\n"); WRITE(p, " v_color0 = base.matambientalpha;\n");
} }
if (lmode) if (lmode)
WRITE(p, " v_color1 = vec3(0.0);\n"); WRITE(p, " v_color1 = vec3(0.0);\n");
@ -490,9 +454,9 @@ bool GenerateVulkanGLSLVertexShader(const ShaderID &id, char *buffer) {
} }
} else { } else {
if (hasTexcoord) { if (hasTexcoord) {
WRITE(p, " v_texcoord = texcoord * u_uvscaleoffset.xy + u_uvscaleoffset.zw;\n"); WRITE(p, " v_texcoord = texcoord * base.uvscaleoffset.xy + base.uvscaleoffset.zw;\n");
} else { } else {
WRITE(p, " v_texcoord = u_uvscaleoffset.zw;\n"); WRITE(p, " v_texcoord = base.uvscaleoffset.zw;\n");
} }
} }
break; break;
@ -546,7 +510,7 @@ bool GenerateVulkanGLSLVertexShader(const ShaderID &id, char *buffer) {
// Compute fogdepth // Compute fogdepth
if (enableFog) if (enableFog)
WRITE(p, " v_fogdepth = (viewPos.z + u_fogcoef.x) * u_fogcoef.y;\n"); WRITE(p, " v_fogdepth = (viewPos.z + base.fogcoef.x) * base.fogcoef.y;\n");
} }
WRITE(p, "}\n"); WRITE(p, "}\n");
return true; return true;

View file

@ -2,4 +2,5 @@
#define VK_PROTOTYPES #define VK_PROTOTYPES
#include "ext/vulkan/vulkan.h" #include "ext/vulkan/vulkan.h"
#include "thin3d/vulkan_utils.h"

View file

@ -100,7 +100,7 @@ const char *ObjTypeToString(VkDebugReportObjectTypeEXT type) {
} }
static VkBool32 VKAPI_CALL Vulkan_Dbg(VkDebugReportFlagsEXT msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject, size_t location, int32_t msgCode, const char* pLayerPrefix, const char* pMsg, void *pUserData) { static VkBool32 VKAPI_CALL Vulkan_Dbg(VkDebugReportFlagsEXT msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject, size_t location, int32_t msgCode, const char* pLayerPrefix, const char* pMsg, void *pUserData) {
VulkanLogOptions *options = (VulkanLogOptions *)pUserData; const VulkanLogOptions *options = (const VulkanLogOptions *)pUserData;
std::ostringstream message; std::ostringstream message;
if (msgFlags & VK_DEBUG_REPORT_ERROR_BIT_EXT) { if (msgFlags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
@ -167,16 +167,10 @@ bool WindowsVulkanContext::Init(HINSTANCE hInst, HWND hWnd, std::string *error_m
_CrtCheckMemory(); _CrtCheckMemory();
VkClearValue clearVal[2];
memset(clearVal, 0, sizeof(clearVal));
clearVal[0].color.float32[0] = 0.5f;
g_Vulkan->BeginSurfaceRenderPass(clearVal);
return true; return true;
} }
void WindowsVulkanContext::Shutdown() { void WindowsVulkanContext::Shutdown() {
g_Vulkan->EndSurfaceRenderPass();
g_Vulkan->DestroyObjects(); g_Vulkan->DestroyObjects();
g_Vulkan->DestroyDebugMsgCallback(); g_Vulkan->DestroyDebugMsgCallback();
g_Vulkan->DestroyDevice(); g_Vulkan->DestroyDevice();
@ -191,12 +185,6 @@ Thin3DContext *WindowsVulkanContext::CreateThin3DContext() {
} }
void WindowsVulkanContext::SwapBuffers() { void WindowsVulkanContext::SwapBuffers() {
g_Vulkan->EndSurfaceRenderPass();
VkClearValue clearVal[2];
memset(clearVal, 0, sizeof(clearVal));
clearVal[0].color.float32[0] = 0.5f;
g_Vulkan->BeginSurfaceRenderPass(clearVal);
} }
void WindowsVulkanContext::Resize() { void WindowsVulkanContext::Resize() {

View file

@ -187,6 +187,7 @@ void WindowsHost::SetDebugMode(bool mode) {
} }
void WindowsHost::PollControllers(InputState &input_state) { void WindowsHost::PollControllers(InputState &input_state) {
return;
bool doPad = true; bool doPad = true;
for (auto iter = this->input.begin(); iter != this->input.end(); iter++) for (auto iter = this->input.begin(); iter != this->input.end(); iter++)
{ {

View file

@ -42,7 +42,7 @@ VulkanContext::VulkanContext(const char *app_name, uint32_t flags)
: device_(nullptr), : device_(nullptr),
gfx_queue_(nullptr), gfx_queue_(nullptr),
connection(nullptr), connection(nullptr),
gfx_queue_family_index_(-1), graphics_queue_family_index_(-1),
surface(nullptr), surface(nullptr),
window(nullptr), window(nullptr),
prepared(false), prepared(false),
@ -50,6 +50,7 @@ VulkanContext::VulkanContext(const char *app_name, uint32_t flags)
instance_(nullptr), instance_(nullptr),
width(0), width(0),
height(0), height(0),
flags_(flags),
swapchain_format(VK_FORMAT_UNDEFINED), swapchain_format(VK_FORMAT_UNDEFINED),
swapchainImageCount(0), swapchainImageCount(0),
swap_chain_(nullptr), swap_chain_(nullptr),
@ -58,7 +59,8 @@ VulkanContext::VulkanContext(const char *app_name, uint32_t flags)
cmdInitActive_(false), cmdInitActive_(false),
dbgCreateMsgCallback(nullptr), dbgCreateMsgCallback(nullptr),
dbgDestroyMsgCallback(nullptr), dbgDestroyMsgCallback(nullptr),
queue_count(0) queue_count(0),
curFrame_(0)
{ {
// List extensions to try to enable. // List extensions to try to enable.
instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME); instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
@ -117,14 +119,14 @@ VulkanContext::VulkanContext(const char *app_name, uint32_t flags)
res = vkEnumeratePhysicalDevices(instance_, &gpu_count, physical_devices_.data()); res = vkEnumeratePhysicalDevices(instance_, &gpu_count, physical_devices_.data());
assert(!res); assert(!res);
init_global_layer_properties(); InitGlobalLayerProperties();
init_global_extension_properties(); InitGlobalExtensionProperties();
if (!CheckLayers(instance_layer_properties, instance_layer_names)) { if (!CheckLayers(instance_layer_properties, instance_layer_names)) {
exit(1); exit(1);
} }
init_device_layer_properties(); InitDeviceLayerProperties();
if (!CheckLayers(device_layer_properties, device_layer_names)) { if (!CheckLayers(device_layer_properties, device_layer_names)) {
exit(1); exit(1);
} }
@ -134,7 +136,7 @@ VulkanContext::~VulkanContext() {
vkDestroyInstance(instance_, NULL); vkDestroyInstance(instance_, NULL);
} }
void vk_transition_to_present(VkCommandBuffer cmd, VkImage image) { void TransitionToPresent(VkCommandBuffer cmd, VkImage image) {
VkImageMemoryBarrier prePresentBarrier = {}; VkImageMemoryBarrier prePresentBarrier = {};
prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
prePresentBarrier.pNext = NULL; prePresentBarrier.pNext = NULL;
@ -154,45 +156,39 @@ void vk_transition_to_present(VkCommandBuffer cmd, VkImage image) {
0, 0, nullptr, 0, nullptr, 1, &prePresentBarrier); 0, 0, nullptr, 0, nullptr, 1, &prePresentBarrier);
} }
void vk_transition_from_present(VkCommandBuffer cmd, VkImage image) { void TransitionFromPresent(VkCommandBuffer cmd, VkImage image) {
VkImageMemoryBarrier postPresentBarrier = {}; VkImageMemoryBarrier prePresentBarrier = {};
postPresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
postPresentBarrier.pNext = NULL; prePresentBarrier.pNext = NULL;
postPresentBarrier.srcAccessMask = 0; prePresentBarrier.srcAccessMask = 0;
postPresentBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
postPresentBarrier.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
postPresentBarrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
postPresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
postPresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
postPresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
postPresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.baseMipLevel = 0;
postPresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.levelCount = 1;
postPresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.baseArrayLayer = 0;
postPresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.subresourceRange.layerCount = 1;
postPresentBarrier.image = image; prePresentBarrier.image = image;
vkCmdPipelineBarrier(cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, vkCmdPipelineBarrier(cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
0, 0, nullptr, 0, nullptr, 1, &postPresentBarrier); 0, 0, nullptr, 0, nullptr, 1, &prePresentBarrier);
} }
VkCommandBuffer VulkanContext::BeginSurfaceRenderPass(VkClearValue clear_values[2]) { VkCommandBuffer VulkanContext::BeginSurfaceRenderPass(VkClearValue clear_values[2]) {
VkSemaphoreCreateInfo acquireSemaphoreCreateInfo; FrameData *frame = &frame_[curFrame_];
acquireSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
acquireSemaphoreCreateInfo.pNext = NULL;
acquireSemaphoreCreateInfo.flags = 0;
VkResult res = vkCreateSemaphore(device_, // Make sure the command buffer from the frame before the previous has been fully executed.
&acquireSemaphoreCreateInfo, WaitAndResetFence(frame->fence);
NULL,
&acquireSemaphore);
assert(res == VK_SUCCESS);
// Get the index of the next available swapchain image, and a semaphore to block on. // Get the index of the next available swapchain image, and a semaphore to block command buffer execution on.
res = fpAcquireNextImageKHR(device_, swap_chain_, // Now, I wonder if we should do this early in the frame or late?
VkResult res = fpAcquireNextImageKHR(device_, swap_chain_,
UINT64_MAX, UINT64_MAX,
acquireSemaphore, acquireSemaphore,
NULL, NULL,
&current_buffer); &current_buffer);
// TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR
// return codes // return codes
assert(res == VK_SUCCESS); assert(res == VK_SUCCESS);
@ -202,9 +198,9 @@ VkCommandBuffer VulkanContext::BeginSurfaceRenderPass(VkClearValue clear_values[
begin.pNext = NULL; begin.pNext = NULL;
begin.flags = 0; begin.flags = 0;
begin.pInheritanceInfo = nullptr; begin.pInheritanceInfo = nullptr;
vkBeginCommandBuffer(cmd_, &begin); res = vkBeginCommandBuffer(cmd_, &begin);
vk_transition_from_present(cmd_, swapChainBuffers[current_buffer].image); TransitionFromPresent(frame->cmdBuf, swapChainBuffers[current_buffer].image);
VkRenderPassBeginInfo rp_begin; VkRenderPassBeginInfo rp_begin;
rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
@ -218,8 +214,8 @@ VkCommandBuffer VulkanContext::BeginSurfaceRenderPass(VkClearValue clear_values[
rp_begin.clearValueCount = 2; rp_begin.clearValueCount = 2;
rp_begin.pClearValues = clear_values; rp_begin.pClearValues = clear_values;
vkCmdBeginRenderPass(cmd_, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBeginRenderPass(frame->cmdBuf, &rp_begin, VK_SUBPASS_CONTENTS_INLINE);
return cmd_; return frame->cmdBuf;
} }
void VulkanContext::WaitUntilQueueIdle() { void VulkanContext::WaitUntilQueueIdle() {
@ -274,6 +270,47 @@ void vk_submit_sync(VkDevice device, VkSemaphore waitForSemaphore, VkQueue queue
vkDestroyFence(device, drawFence, NULL); vkDestroyFence(device, drawFence, NULL);
} }
void VulkanContext::EndSurfaceRenderPass() {
FrameData *frame = &frame_[curFrame_];
vkCmdEndRenderPass(frame->cmdBuf);
TransitionToPresent(frame->cmdBuf, swapChainBuffers[current_buffer].image);
VkResult res = vkEndCommandBuffer(frame->cmdBuf);
assert(res == VK_SUCCESS);
VkSubmitInfo submit_info = {};
submit_info.pNext = NULL;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &acquireSemaphore;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &frame->cmdBuf;
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
res = vkQueueSubmit(gfx_queue_, 1, &submit_info, frame->fence);
assert(res == VK_SUCCESS);
// At this point we are certain that acquireSemaphore is of no further use and can be destroyed.
VkPresentInfoKHR present;
present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
present.pNext = NULL;
present.swapchainCount = 1;
present.pSwapchains = &swap_chain_;
present.pImageIndices = &current_buffer;
present.pWaitSemaphores = NULL;
present.waitSemaphoreCount = 0;
present.pResults = NULL;
res = fpQueuePresentKHR(gfx_queue_, &present);
// TODO: Deal with the VK_SUBOPTIMAL_WSI and VK_ERROR_OUT_OF_DATE_WSI
// return codes
assert(!res);
curFrame_ ^= 1;
}
void VulkanContext::BeginInitCommandBuffer() { void VulkanContext::BeginInitCommandBuffer() {
assert(!cmdInitActive_); assert(!cmdInitActive_);
VulkanBeginCommandBuffer(cmd_); VulkanBeginCommandBuffer(cmd_);
@ -300,9 +337,33 @@ void VulkanContext::InitObjects(HINSTANCE hInstance, HWND hWnd, bool depthPresen
InitSurfaceRenderPass(depthPresent, true); InitSurfaceRenderPass(depthPresent, true);
InitFramebuffers(depthPresent); InitFramebuffers(depthPresent);
SubmitInitCommandBufferSync(); SubmitInitCommandBufferSync();
// Create frame data
VkCommandBufferAllocateInfo cmd = {};
cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cmd.pNext = NULL;
cmd.commandPool = cmd_pool_;
cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd.commandBufferCount = 2;
VkCommandBuffer cmdBuf[2];
VkResult res = vkAllocateCommandBuffers(device_, &cmd, cmdBuf);
assert(res == VK_SUCCESS);
frame_[0].cmdBuf = cmdBuf[0];
frame_[0].fence = CreateFence(true); // So it can be instantly waited on
frame_[1].cmdBuf = cmdBuf[1];
frame_[1].fence = CreateFence(true);
} }
void VulkanContext::DestroyObjects() { void VulkanContext::DestroyObjects() {
VkCommandBuffer cmdBuf[2] = { frame_[0].cmdBuf, frame_[1].cmdBuf };
vkFreeCommandBuffers(device_, cmd_pool_, 2, cmdBuf);
vkDestroyFence(device_, frame_[0].fence, nullptr);
vkDestroyFence(device_, frame_[1].fence, nullptr);
DestroyFramebuffers(); DestroyFramebuffers();
DestroySurfaceRenderPass(); DestroySurfaceRenderPass();
DestroyDepthStencilBuffer(); DestroyDepthStencilBuffer();
@ -311,39 +372,7 @@ void VulkanContext::DestroyObjects() {
DestroyCommandPool(); DestroyCommandPool();
} }
void VulkanContext::EndSurfaceRenderPass() { VkResult VulkanContext::InitLayerExtensionProperties(layer_properties &layer_props) {
vkCmdEndRenderPass(cmd_);
vk_transition_to_present(cmd_, swapChainBuffers[current_buffer].image);
VkResult res = vkEndCommandBuffer(cmd_);
assert(res == VK_SUCCESS);
/* Make sure command buffer is finished before presenting */
vk_submit_sync(device_, acquireSemaphore, gfx_queue_, cmd_);
// At this point we are certain that acquireSemaphore is of no further use and can be destroyed.
/* Now present the image in the window */
VkPresentInfoKHR present;
present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
present.pNext = NULL;
present.swapchainCount = 1;
present.pSwapchains = &swap_chain_;
present.pImageIndices = &current_buffer;
present.pWaitSemaphores = NULL;
present.waitSemaphoreCount = 0;
present.pResults = NULL;
res = fpQueuePresentKHR(gfx_queue_, &present);
// TODO: Deal with the VK_SUBOPTIMAL_WSI and VK_ERROR_OUT_OF_DATE_WSI
// return codes
assert(!res);
vkDestroySemaphore(device_, acquireSemaphore, NULL);
}
VkResult VulkanContext::init_layer_extension_properties(layer_properties &layer_props) {
VkExtensionProperties *instance_extensions; VkExtensionProperties *instance_extensions;
uint32_t instance_extension_count; uint32_t instance_extension_count;
VkResult res; VkResult res;
@ -371,7 +400,7 @@ VkResult VulkanContext::init_layer_extension_properties(layer_properties &layer_
return res; return res;
} }
VkResult VulkanContext::init_global_extension_properties() { VkResult VulkanContext::InitGlobalExtensionProperties() {
uint32_t instance_extension_count; uint32_t instance_extension_count;
VkResult res; VkResult res;
@ -394,7 +423,7 @@ VkResult VulkanContext::init_global_extension_properties() {
return res; return res;
} }
VkResult VulkanContext::init_global_layer_properties() { VkResult VulkanContext::InitGlobalLayerProperties() {
uint32_t instance_layer_count; uint32_t instance_layer_count;
VkLayerProperties *vk_props = NULL; VkLayerProperties *vk_props = NULL;
VkResult res; VkResult res;
@ -429,7 +458,7 @@ VkResult VulkanContext::init_global_layer_properties() {
for (uint32_t i = 0; i < instance_layer_count; i++) { for (uint32_t i = 0; i < instance_layer_count; i++) {
layer_properties layer_props; layer_properties layer_props;
layer_props.properties = vk_props[i]; layer_props.properties = vk_props[i];
res = init_layer_extension_properties(layer_props); res = InitLayerExtensionProperties(layer_props);
if (res) if (res)
return res; return res;
instance_layer_properties.push_back(layer_props); instance_layer_properties.push_back(layer_props);
@ -439,7 +468,7 @@ VkResult VulkanContext::init_global_layer_properties() {
return res; return res;
} }
VkResult VulkanContext::init_device_extension_properties(layer_properties &layer_props) { VkResult VulkanContext::InitDeviceExtensionProperties(layer_properties &layer_props) {
VkExtensionProperties *device_extensions; VkExtensionProperties *device_extensions;
uint32_t device_extension_count; uint32_t device_extension_count;
VkResult res; VkResult res;
@ -469,10 +498,7 @@ VkResult VulkanContext::init_device_extension_properties(layer_properties &layer
return res; return res;
} }
/* VkResult VulkanContext::InitDeviceLayerProperties() {
* TODO: function description here
*/
VkResult VulkanContext::init_device_layer_properties() {
uint32_t device_layer_count; uint32_t device_layer_count;
VkLayerProperties *vk_props = NULL; VkLayerProperties *vk_props = NULL;
VkResult res; VkResult res;
@ -509,7 +535,7 @@ VkResult VulkanContext::init_device_layer_properties() {
for (uint32_t i = 0; i < device_layer_count; i++) { for (uint32_t i = 0; i < device_layer_count; i++) {
layer_properties layer_props; layer_properties layer_props;
layer_props.properties = vk_props[i]; layer_props.properties = vk_props[i];
res = init_device_extension_properties(layer_props); res = InitDeviceExtensionProperties(layer_props);
if (res) if (res)
return res; return res;
device_layer_properties.push_back(layer_props); device_layer_properties.push_back(layer_props);
@ -607,7 +633,6 @@ VkResult VulkanContext::InitDebugMsgCallback(PFN_vkDebugReportCallbackEXT dbgFun
std::cout << "GetInstanceProcAddr: Unable to find vkDbgCreateMsgCallback function." << std::endl; std::cout << "GetInstanceProcAddr: Unable to find vkDbgCreateMsgCallback function." << std::endl;
return VK_ERROR_INITIALIZATION_FAILED; return VK_ERROR_INITIALIZATION_FAILED;
} }
std::cout << "Got dbgCreateMsgCallback function\n";
dbgDestroyMsgCallback = (PFN_vkDestroyDebugReportCallbackEXT)vkGetInstanceProcAddr(instance_, "vkDestroyDebugReportCallbackEXT"); dbgDestroyMsgCallback = (PFN_vkDestroyDebugReportCallbackEXT)vkGetInstanceProcAddr(instance_, "vkDestroyDebugReportCallbackEXT");
if (!dbgDestroyMsgCallback) { if (!dbgDestroyMsgCallback) {
@ -624,7 +649,6 @@ VkResult VulkanContext::InitDebugMsgCallback(PFN_vkDebugReportCallbackEXT dbgFun
res = dbgCreateMsgCallback(instance_, &cb, nullptr, &msg_callback); res = dbgCreateMsgCallback(instance_, &cb, nullptr, &msg_callback);
switch (res) { switch (res) {
case VK_SUCCESS: case VK_SUCCESS:
puts("Successfully created message callback object\n");
msg_callbacks.push_back(msg_callback); msg_callbacks.push_back(msg_callback);
break; break;
case VK_ERROR_OUT_OF_HOST_MEMORY: case VK_ERROR_OUT_OF_HOST_MEMORY:
@ -805,7 +829,7 @@ void VulkanContext::InitSurfaceAndQueue(HINSTANCE conn, HWND wnd) {
exit(-1); exit(-1);
} }
gfx_queue_family_index_ = graphicsQueueNodeIndex; graphics_queue_family_index_ = graphicsQueueNodeIndex;
// Get the list of VkFormats that are supported: // Get the list of VkFormats that are supported:
uint32_t formatCount; uint32_t formatCount;
@ -829,7 +853,18 @@ void VulkanContext::InitSurfaceAndQueue(HINSTANCE conn, HWND wnd) {
} }
delete[] surfFormats; delete[] surfFormats;
vkGetDeviceQueue(device_, gfx_queue_family_index_, 0, &gfx_queue_); vkGetDeviceQueue(device_, graphics_queue_family_index_, 0, &gfx_queue_);
VkSemaphoreCreateInfo acquireSemaphoreCreateInfo;
acquireSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
acquireSemaphoreCreateInfo.pNext = NULL;
acquireSemaphoreCreateInfo.flags = 0;
res = vkCreateSemaphore(device_,
&acquireSemaphoreCreateInfo,
NULL,
&acquireSemaphore);
assert(res == VK_SUCCESS);
} }
void VulkanContext::InitSwapchain() { void VulkanContext::InitSwapchain() {
@ -874,13 +909,13 @@ void VulkanContext::InitSwapchain() {
// always available. // always available.
VkPresentModeKHR swapchainPresentMode = VK_PRESENT_MODE_FIFO_KHR; VkPresentModeKHR swapchainPresentMode = VK_PRESENT_MODE_FIFO_KHR;
for (size_t i = 0; i < presentModeCount; i++) { for (size_t i = 0; i < presentModeCount; i++) {
if (presentModes[i] == VK_PRESENT_MODE_MAILBOX_KHR) { if ((flags_ & VULKAN_FLAG_PRESENT_MAILBOX) && presentModes[i] == VK_PRESENT_MODE_MAILBOX_KHR) {
swapchainPresentMode = VK_PRESENT_MODE_MAILBOX_KHR; swapchainPresentMode = VK_PRESENT_MODE_MAILBOX_KHR;
break; break;
} }
if ((swapchainPresentMode != VK_PRESENT_MODE_MAILBOX_KHR) && if ((flags_ & VULKAN_FLAG_PRESENT_IMMEDIATE) && presentModes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR) {
(presentModes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR)) {
swapchainPresentMode = VK_PRESENT_MODE_IMMEDIATE_KHR; swapchainPresentMode = VK_PRESENT_MODE_IMMEDIATE_KHR;
break;
} }
} }
@ -1063,7 +1098,7 @@ void VulkanContext::InitCommandPool() {
VkCommandPoolCreateInfo cmd_pool_info = {}; VkCommandPoolCreateInfo cmd_pool_info = {};
cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
cmd_pool_info.pNext = NULL; cmd_pool_info.pNext = NULL;
cmd_pool_info.queueFamilyIndex = gfx_queue_family_index_; cmd_pool_info.queueFamilyIndex = graphics_queue_family_index_;
cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT | VK_COMMAND_POOL_CREATE_TRANSIENT_BIT; cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT | VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
res = vkCreateCommandPool(device_, &cmd_pool_info, NULL, &cmd_pool_); res = vkCreateCommandPool(device_, &cmd_pool_info, NULL, &cmd_pool_);
@ -1295,7 +1330,7 @@ void VulkanTexture::Unlock(VulkanContext *vulkan) {
// that could be avoided with smarter code - for example we can check the fence the next // that could be avoided with smarter code - for example we can check the fence the next
// time the texture is used and discard the staging image then. // time the texture is used and discard the staging image then.
VkFence fence = vulkan->CreateFence(); VkFence fence = vulkan->CreateFence(false);
res = vkQueueSubmit(vulkan->gfx_queue_, 1, submit_info, fence); res = vkQueueSubmit(vulkan->gfx_queue_, 1, submit_info, fence);
assert(res == VK_SUCCESS); assert(res == VK_SUCCESS);
@ -1307,7 +1342,7 @@ void VulkanTexture::Unlock(VulkanContext *vulkan) {
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
imageLayout); imageLayout);
vulkan->WaitForFence(fence); vulkan->WaitAndResetFence(fence);
/* Release the resources for the staging image */ /* Release the resources for the staging image */
vkFreeMemory(vulkan->device_, mappableMemory, NULL); vkFreeMemory(vulkan->device_, mappableMemory, NULL);
@ -1346,18 +1381,21 @@ void VulkanTexture::Destroy(VulkanContext *vulkan) {
mem = NULL; mem = NULL;
} }
VkFence VulkanContext::CreateFence() { VkFence VulkanContext::CreateFence(bool presignalled) {
VkFence fence; VkFence fence;
VkFenceCreateInfo fenceInfo; VkFenceCreateInfo fenceInfo;
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.pNext = NULL; fenceInfo.pNext = NULL;
fenceInfo.flags = 0; fenceInfo.flags = presignalled ? VK_FENCE_CREATE_SIGNALED_BIT : 0;
vkCreateFence(device_, &fenceInfo, NULL, &fence); vkCreateFence(device_, &fenceInfo, NULL, &fence);
return fence; return fence;
} }
void VulkanContext::WaitForFence(VkFence fence) { void VulkanContext::WaitAndResetFence(VkFence fence) {
vkWaitForFences(device_, 1, &fence, true, 0); VkResult res = vkWaitForFences(device_, 1, &fence, true, UINT64_MAX);
assert(!res);
res = vkResetFences(device_, 1, &fence);
assert(!res);
} }
void VulkanContext::DestroyCommandBuffer() { void VulkanContext::DestroyCommandBuffer() {
@ -1387,6 +1425,7 @@ void VulkanContext::DestroySwapChain() {
fpDestroySwapchainKHR(device_, swap_chain_, NULL); fpDestroySwapchainKHR(device_, swap_chain_, NULL);
swap_chain_ = nullptr; swap_chain_ = nullptr;
swapChainBuffers.clear(); swapChainBuffers.clear();
vkDestroySemaphore(device_, acquireSemaphore, NULL);
} }
void VulkanContext::DestroyFramebuffers() { void VulkanContext::DestroyFramebuffers() {
@ -1426,7 +1465,6 @@ void TransitionImageLayout(
image_memory_barrier.subresourceRange.baseMipLevel = 0; image_memory_barrier.subresourceRange.baseMipLevel = 0;
image_memory_barrier.subresourceRange.levelCount = 1; image_memory_barrier.subresourceRange.levelCount = 1;
image_memory_barrier.subresourceRange.layerCount = 1; image_memory_barrier.subresourceRange.layerCount = 1;
if (old_image_layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) { if (old_image_layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
image_memory_barrier.srcAccessMask = VK_ACCESS_MEMORY_READ_BIT; image_memory_barrier.srcAccessMask = VK_ACCESS_MEMORY_READ_BIT;
} }
@ -1442,7 +1480,9 @@ void TransitionImageLayout(
if (new_image_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { if (new_image_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
/* Make sure any Copy or CPU writes to image are flushed */ /* Make sure any Copy or CPU writes to image are flushed */
image_memory_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT; if (old_image_layout != VK_IMAGE_LAYOUT_UNDEFINED) {
image_memory_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
}
image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
} }
@ -1580,13 +1620,11 @@ EShLanguage FindLanguage(const VkShaderStageFlagBits shader_type) {
} }
} }
//
// Compile a given string containing GLSL into SPV for use by VK // Compile a given string containing GLSL into SPV for use by VK
// Return value of false means an error was encountered. // Return value of false means an error was encountered.
//
bool GLSLtoSPV(const VkShaderStageFlagBits shader_type, bool GLSLtoSPV(const VkShaderStageFlagBits shader_type,
const char *pshader, const char *pshader,
std::vector<unsigned int> &spirv) { std::vector<unsigned int> &spirv, std::string *errorMessage) {
glslang::TProgram& program = *new glslang::TProgram; glslang::TProgram& program = *new glslang::TProgram;
const char *shaderStrings[1]; const char *shaderStrings[1];
@ -1605,6 +1643,10 @@ bool GLSLtoSPV(const VkShaderStageFlagBits shader_type,
if (!shader->parse(&Resources, 100, false, messages)) { if (!shader->parse(&Resources, 100, false, messages)) {
puts(shader->getInfoLog()); puts(shader->getInfoLog());
puts(shader->getInfoDebugLog()); puts(shader->getInfoDebugLog());
if (errorMessage) {
*errorMessage = shader->getInfoLog();
(*errorMessage) += shader->getInfoDebugLog();
}
return false; // something didn't work return false; // something didn't work
} }
@ -1617,11 +1659,15 @@ bool GLSLtoSPV(const VkShaderStageFlagBits shader_type,
if (!program.link(messages)) { if (!program.link(messages)) {
puts(shader->getInfoLog()); puts(shader->getInfoLog());
puts(shader->getInfoDebugLog()); puts(shader->getInfoDebugLog());
if (errorMessage) {
*errorMessage = shader->getInfoLog();
(*errorMessage) += shader->getInfoDebugLog();
}
return false; return false;
} }
// Can't fail, parsing worked, "linking" worked.
glslang::GlslangToSpv(*program.getIntermediate(stage), spirv); glslang::GlslangToSpv(*program.getIntermediate(stage), spirv);
return true; return true;
} }
@ -1663,5 +1709,4 @@ const char *VulkanResultToString(VkResult res) {
void VulkanAssertImpl(VkResult check, const char *function, const char *file, int line) { void VulkanAssertImpl(VkResult check, const char *function, const char *file, int line) {
const char *error = "(none)"; const char *error = "(none)";
} }

View file

@ -54,6 +54,8 @@
enum { enum {
VULKAN_FLAG_VALIDATE = 1, VULKAN_FLAG_VALIDATE = 1,
VULKAN_FLAG_PRESENT_MAILBOX = 2,
VULKAN_FLAG_PRESENT_IMMEDIATE = 4,
}; };
// A layer can expose extensions, keep track of those extensions here. // A layer can expose extensions, keep track of those extensions here.
@ -103,8 +105,8 @@ public:
void WaitUntilQueueIdle(); void WaitUntilQueueIdle();
// Utility functions for shorter code // Utility functions for shorter code
VkFence CreateFence(); VkFence CreateFence(bool presignalled);
void WaitForFence(VkFence fence); void WaitAndResetFence(VkFence fence);
int GetWidth() { return width; } int GetWidth() { return width; }
int GetHeight() { return height; } int GetHeight() { return height; }
@ -120,32 +122,33 @@ public:
VkResult InitDebugMsgCallback(PFN_vkDebugReportCallbackEXT dbgFunc, int bits, void *userdata = nullptr); VkResult InitDebugMsgCallback(PFN_vkDebugReportCallbackEXT dbgFunc, int bits, void *userdata = nullptr);
void DestroyDebugMsgCallback(); void DestroyDebugMsgCallback();
VkSemaphore acquireSemaphore; VkRenderPass GetSurfaceRenderPass() const {
VkRenderPass GetSurfaceRenderPass() {
return surface_render_pass_; return surface_render_pass_;
} }
VkPhysicalDevice GetPhysicalDevice() { VkPhysicalDevice GetPhysicalDevice() const {
return physical_devices_[0]; return physical_devices_[0];
} }
VkQueue GetGraphicsQueue() { VkQueue GetGraphicsQueue() const {
return gfx_queue_; return gfx_queue_;
} }
int GetGraphicsQueueFamilyIndex() { int GetGraphicsQueueFamilyIndex() const {
return gfx_queue_family_index_; return graphics_queue_family_index_;
} }
VkResult init_global_extension_properties();
VkResult init_layer_extension_properties(layer_properties &layer_props);
VkResult init_global_layer_properties(); VkResult InitGlobalExtensionProperties();
VkResult InitLayerExtensionProperties(layer_properties &layer_props);
VkResult init_device_extension_properties(layer_properties &layer_props); VkResult InitGlobalLayerProperties();
VkResult init_device_layer_properties();
VkResult InitDeviceExtensionProperties(layer_properties &layer_props);
VkResult InitDeviceLayerProperties();
VkSemaphore acquireSemaphore;
#ifdef _WIN32 #ifdef _WIN32
#define APP_NAME_STR_LEN 80 #define APP_NAME_STR_LEN 80
@ -182,7 +185,7 @@ public:
std::vector<VkExtensionProperties> device_extension_properties; std::vector<VkExtensionProperties> device_extension_properties;
std::vector<VkPhysicalDevice> physical_devices_; std::vector<VkPhysicalDevice> physical_devices_;
uint32_t gfx_queue_family_index_; uint32_t graphics_queue_family_index_;
VkPhysicalDeviceProperties gpu_props; VkPhysicalDeviceProperties gpu_props;
std::vector<VkQueueFamilyProperties> queue_props; std::vector<VkQueueFamilyProperties> queue_props;
VkPhysicalDeviceMemoryProperties memory_properties; VkPhysicalDeviceMemoryProperties memory_properties;
@ -195,12 +198,21 @@ private:
// Swap chain // Swap chain
int width, height; int width, height;
int flags_;
VkFormat swapchain_format; VkFormat swapchain_format;
std::vector<VkFramebuffer> framebuffers_; std::vector<VkFramebuffer> framebuffers_;
uint32_t swapchainImageCount; uint32_t swapchainImageCount;
VkSwapchainKHR swap_chain_; VkSwapchainKHR swap_chain_;
std::vector<swap_chain_buffer> swapChainBuffers; std::vector<swap_chain_buffer> swapChainBuffers;
// Manages flipping command buffers for the backbuffer render pass.
// It is recommended to do the same for other rendering passes.
struct FrameData {
VkFence fence;
VkCommandBuffer cmdBuf;
};
FrameData frame_[2];
int curFrame_;
// Simple loader for the WSI extension. // Simple loader for the WSI extension.
PFN_vkGetPhysicalDeviceSurfaceSupportKHR fpGetPhysicalDeviceSurfaceSupportKHR; PFN_vkGetPhysicalDeviceSurfaceSupportKHR fpGetPhysicalDeviceSurfaceSupportKHR;
@ -265,7 +277,7 @@ void vk_submit_sync(VkDevice device, VkSemaphore waitForSemaphore, VkQueue queue
void init_glslang(); void init_glslang();
void finalize_glslang(); void finalize_glslang();
bool GLSLtoSPV(const VkShaderStageFlagBits shader_type, const char *pshader, std::vector<unsigned int> &spirv); bool GLSLtoSPV(const VkShaderStageFlagBits shader_type, const char *pshader, std::vector<uint32_t> &spirv, std::string *errorMessage = nullptr);
void TransitionImageLayout( void TransitionImageLayout(
VkCommandBuffer cmd, VkCommandBuffer cmd,
@ -274,10 +286,5 @@ void TransitionImageLayout(
VkImageLayout old_image_layout, VkImageLayout old_image_layout,
VkImageLayout new_image_layout); VkImageLayout new_image_layout);
void VulkanAssertImpl(VkResult check, const char *function, const char *file, int line);
// DO NOT call vulkan functions within this! Instead, store the result in a variable and check that.
#define VulkanAssert(x) if ((x) != VK_SUCCESS) VulkanAssertImpl((x), __FUNCTION__, __FILE__, __LINE__);
#endif // UTIL_INIT #endif // UTIL_INIT

View file

@ -34,7 +34,7 @@ static const char * const vulkan_fsTexCol =
"layout(location = 0) in vec4 oColor0;\n" "layout(location = 0) in vec4 oColor0;\n"
"layout(location = 1) in vec2 oTexCoord0;\n" "layout(location = 1) in vec2 oTexCoord0;\n"
"layout(location = 0) out vec4 fragColor0\n;" "layout(location = 0) out vec4 fragColor0\n;"
"layout(binding = 2) uniform sampler2D Sampler0;\n" "layout(set = 0, binding = 1) uniform sampler2D Sampler0;\n"
"void main() { fragColor0 = texture(Sampler0, oTexCoord0) * oColor0; }\n"; "void main() { fragColor0 = texture(Sampler0, oTexCoord0) * oColor0; }\n";
static const char * const glsl_fsCol = static const char * const glsl_fsCol =
@ -87,7 +87,7 @@ static const char * const vulkan_vsCol =
"#version 140\n" "#version 140\n"
"#extension GL_ARB_separate_shader_objects : enable\n" "#extension GL_ARB_separate_shader_objects : enable\n"
"#extension GL_ARB_shading_language_420pack : enable\n" "#extension GL_ARB_shading_language_420pack : enable\n"
"layout (std140, binding = 1) uniform bufferVals {\n" "layout (std140, set = 0, binding = 0) uniform bufferVals {\n"
" mat4 WorldViewProj;\n" " mat4 WorldViewProj;\n"
"} myBufferVals;\n" "} myBufferVals;\n"
"layout (location = 0) in vec4 pos;\n" "layout (location = 0) in vec4 pos;\n"
@ -128,7 +128,7 @@ static const char * const vulkan_vsTexCol =
"#version 140\n" "#version 140\n"
"#extension GL_ARB_separate_shader_objects : enable\n" "#extension GL_ARB_separate_shader_objects : enable\n"
"#extension GL_ARB_shading_language_420pack : enable\n" "#extension GL_ARB_shading_language_420pack : enable\n"
"layout (std140, binding = 1) uniform bufferVals {\n" "layout (std140, set = 0, binding = 0) uniform bufferVals {\n"
" mat4 WorldViewProj;\n" " mat4 WorldViewProj;\n"
"} myBufferVals;\n" "} myBufferVals;\n"
"layout (location = 0) in vec4 pos;\n" "layout (location = 0) in vec4 pos;\n"

View file

@ -33,9 +33,10 @@
#include "thin3d/VulkanContext.h" #include "thin3d/VulkanContext.h"
// We use a simple descriptor set for all rendering: 1 sampler, 1 texture, 1 UBO binding point. // We use a simple descriptor set for all rendering: 1 sampler, 1 texture, 1 UBO binding point.
// binding 0 - vertex data // binding 0 - uniform data
// binding 1 - uniform data // binding 1 - sampler
// binding 2 - sampler //
// Vertex data lives in a separate namespace (location = 0, 1, etc)
#define VK_PROTOTYPES #define VK_PROTOTYPES
#include "ext/vulkan/vulkan.h" #include "ext/vulkan/vulkan.h"
@ -383,6 +384,10 @@ public:
void SetVector(const char *name, float *value, int n) override; void SetVector(const char *name, float *value, int n) override;
void SetMatrix4x4(const char *name, const float value[16]) override; void SetMatrix4x4(const char *name, const float value[16]) override;
int GetUBOSize() const {
return uboSize_;
}
Thin3DVKShader *vshader; Thin3DVKShader *vshader;
Thin3DVKShader *fshader; Thin3DVKShader *fshader;
@ -426,6 +431,7 @@ struct DescriptorSetKey {
if (texture_ < other.texture_) return true; else if (texture_ > other.texture_) return false; if (texture_ < other.texture_) return true; else if (texture_ > other.texture_) return false;
if (vertexFormat_ < other.vertexFormat_) return true; else if (vertexFormat_ > other.vertexFormat_) return false; if (vertexFormat_ < other.vertexFormat_) return true; else if (vertexFormat_ > other.vertexFormat_) return false;
if (sampler_ < other.sampler_) return true; else if (sampler_ > other.sampler_) return false; if (sampler_ < other.sampler_) return true; else if (sampler_ > other.sampler_) return false;
if (frame < other.frame) return true; else if (frame > other.frame) return false;
return false; return false;
} }
}; };
@ -507,7 +513,6 @@ private:
void DirtyDynamicState(); void DirtyDynamicState();
void BeginInitCommands(); void BeginInitCommands();
void EndInitCommands();
VulkanContext *vulkan_; VulkanContext *vulkan_;
@ -527,17 +532,13 @@ private:
std::map<DescriptorSetKey, VkDescriptorSet> descSets_; std::map<DescriptorSetKey, VkDescriptorSet> descSets_;
VkDescriptorPool descriptorPool_; VkDescriptorPool descriptorPool_;
VkDescriptorSet descriptorSet_;
VkDescriptorSetLayout descriptorSetLayout_; VkDescriptorSetLayout descriptorSetLayout_;
VkPipelineLayout pipelineLayout_; VkPipelineLayout pipelineLayout_;
VkPipelineCache pipelineCache_; VkPipelineCache pipelineCache_;
VkCommandPool cmdPool_; VkCommandPool cmdPool_;
VkInstance instance_;
VkPhysicalDevice physicalDevice_;
VkDevice device_; VkDevice device_;
VkQueue queue_; VkQueue queue_;
VkRenderPass renderPass_;
int queueFamilyIndex_; int queueFamilyIndex_;
// State to apply at the next draw call if viewportDirty or scissorDirty are true. // State to apply at the next draw call if viewportDirty or scissorDirty are true.
@ -557,20 +558,11 @@ private:
VkCommandBuffer initCmd_; VkCommandBuffer initCmd_;
bool hasInitCommands_; bool hasInitCommands_;
VkFence initFence_; VkFence initFence_;
bool pendingInitFence_;
// TODO: Transpose this into a struct FrameObject[2]. // TODO: Transpose this into a struct FrameObject[2].
// We write to one, while we wait for the draws from the other to complete.
// Then, at the end of the frame, they switch roles.
// cmdBuf_ for commands, pushBuffer_ for data. cmd_ will often refer to push_.
VkCommandBuffer cmdBuffer_[2];
VkCommandBuffer cmd_; // The current one VkCommandBuffer cmd_; // The current one
VkFence cmdFences_[2];
VkFence cmdFence_;
VulkanPushBuffer *pushBuffer_[2]; VulkanPushBuffer *pushBuffer_[2];
int frameNum_; int frameNum_;
VulkanPushBuffer *push_; VulkanPushBuffer *push_;
@ -674,7 +666,6 @@ private:
VulkanImage staging_; VulkanImage staging_;
VkImageView view_; VkImageView view_;
int32_t width_, height_, depth_;
int mipLevels_; int mipLevels_;
T3DImageFormat format_; T3DImageFormat format_;
@ -682,14 +673,22 @@ private:
}; };
Thin3DVKContext::Thin3DVKContext(VulkanContext *vulkan) Thin3DVKContext::Thin3DVKContext(VulkanContext *vulkan)
: viewportDirty_(false), scissorDirty_(false), vulkan_(vulkan) { : viewportDirty_(false), scissorDirty_(false), vulkan_(vulkan), frameNum_(0) {
device_ = vulkan->GetDevice(); device_ = vulkan->GetDevice();
queue_ = vulkan->GetGraphicsQueue();
queueFamilyIndex_ = vulkan->GetGraphicsQueueFamilyIndex();
noScissor_.offset.x = 0; noScissor_.offset.x = 0;
noScissor_.offset.y = 0; noScissor_.offset.y = 0;
noScissor_.extent.width = pixel_xres; noScissor_.extent.width = pixel_xres;
noScissor_.extent.height = pixel_yres; noScissor_.extent.height = pixel_yres;
scissor_ = noScissor_;
viewport_.x = 0;
viewport_.y = 0;
viewport_.width = pixel_xres;
viewport_.height = pixel_yres;
viewport_.minDepth = 0.0f;
viewport_.maxDepth = 0.0f;
memset(boundTextures_, 0, sizeof(boundTextures_)); memset(boundTextures_, 0, sizeof(boundTextures_));
CreatePresets(); CreatePresets();
@ -703,18 +702,16 @@ Thin3DVKContext::Thin3DVKContext(VulkanContext *vulkan)
VkResult res = vkCreateCommandPool(device_, &p, nullptr, &cmdPool_); VkResult res = vkCreateCommandPool(device_, &p, nullptr, &cmdPool_);
assert(VK_SUCCESS == res); assert(VK_SUCCESS == res);
VkDescriptorPoolSize dpTypes[3]; VkDescriptorPoolSize dpTypes[2];
dpTypes[0].descriptorCount = 200; dpTypes[0].descriptorCount = 200;
dpTypes[0].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dpTypes[0].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dpTypes[1].descriptorCount = 1; dpTypes[1].descriptorCount = 2;
dpTypes[1].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dpTypes[1].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
dpTypes[2].descriptorCount = 1;
dpTypes[2].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
VkDescriptorPoolCreateInfo dp; VkDescriptorPoolCreateInfo dp;
dp.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; dp.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
dp.pNext = nullptr; dp.pNext = nullptr;
dp.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; // We want to individually alloc and free descriptor sets. (do we?) dp.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; // We want to individually alloc and free descriptor sets. (do we? or one per "frame"?)
dp.maxSets = 200; // One set for every texture available... sigh dp.maxSets = 200; // One set for every texture available... sigh
dp.pPoolSizes = dpTypes; dp.pPoolSizes = dpTypes;
dp.poolSizeCount = ARRAY_SIZE(dpTypes); dp.poolSizeCount = ARRAY_SIZE(dpTypes);
@ -723,31 +720,25 @@ Thin3DVKContext::Thin3DVKContext(VulkanContext *vulkan)
pushBuffer_[0] = new VulkanPushBuffer(device_, vulkan_, 1024 * 1024); pushBuffer_[0] = new VulkanPushBuffer(device_, vulkan_, 1024 * 1024);
pushBuffer_[1] = new VulkanPushBuffer(device_, vulkan_, 1024 * 1024); pushBuffer_[1] = new VulkanPushBuffer(device_, vulkan_, 1024 * 1024);
// binding 0 - vertex data // binding 0 - uniform data
// binding 1 - uniform data // binding 1 - sampler
// binding 2 - sampler // binding 2 - image
// binding 3 - image VkDescriptorSetLayoutBinding bindings[2];
VkDescriptorSetLayoutBinding bindings[4];
bindings[0].descriptorCount = 1; bindings[0].descriptorCount = 1;
bindings[0].pImmutableSamplers = nullptr; bindings[0].pImmutableSamplers = nullptr;
bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
bindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; bindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
bindings[0].binding = 0; bindings[0].binding = 0;
bindings[1].descriptorCount = 1; bindings[1].descriptorCount = 1;
bindings[1].pImmutableSamplers = nullptr; bindings[1].pImmutableSamplers = nullptr;
bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
bindings[1].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; bindings[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[1].binding = 1; bindings[1].binding = 1;
bindings[2].descriptorCount = 1;
bindings[2].pImmutableSamplers = nullptr;
bindings[2].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
bindings[2].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
bindings[2].binding = 2;
VkDescriptorSetLayoutCreateInfo dsl; VkDescriptorSetLayoutCreateInfo dsl;
dsl.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; dsl.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
dsl.pNext = nullptr; dsl.pNext = nullptr;
dsl.bindingCount = 3; dsl.bindingCount = 2;
dsl.pBindings = bindings; dsl.pBindings = bindings;
res = vkCreateDescriptorSetLayout(device_, &dsl, nullptr, &descriptorSetLayout_); res = vkCreateDescriptorSetLayout(device_, &dsl, nullptr, &descriptorSetLayout_);
assert(VK_SUCCESS == res); assert(VK_SUCCESS == res);
@ -768,10 +759,6 @@ Thin3DVKContext::Thin3DVKContext(VulkanContext *vulkan)
cb.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; cb.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cb.commandPool = cmdPool_; cb.commandPool = cmdPool_;
cb.commandBufferCount = 1; cb.commandBufferCount = 1;
res = vkAllocateCommandBuffers(device_, &cb, &cmdBuffer_[0]);
assert(VK_SUCCESS == res);
res = vkAllocateCommandBuffers(device_, &cb, &cmdBuffer_[1]);
assert(VK_SUCCESS == res);
res = vkAllocateCommandBuffers(device_, &cb, &initCmd_); res = vkAllocateCommandBuffers(device_, &cb, &initCmd_);
assert(VK_SUCCESS == res); assert(VK_SUCCESS == res);
hasInitCommands_ = false; hasInitCommands_ = false;
@ -780,16 +767,7 @@ Thin3DVKContext::Thin3DVKContext(VulkanContext *vulkan)
f.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; f.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
f.pNext = nullptr; f.pNext = nullptr;
f.flags = 0; f.flags = 0;
res = vkCreateFence(device_, &f, nullptr, &cmdFences_[0]); vkCreateFence(device_, &f, nullptr, &initFence_);
assert(VK_SUCCESS == res);
f.flags = VK_FENCE_CREATE_SIGNALED_BIT;
res = vkCreateFence(device_, &f, nullptr, &cmdFences_[1]);
assert(VK_SUCCESS == res);
// Create as already signalled, so we can wait for it the first time.
res = vkCreateFence(device_, &f, nullptr, &initFence_);
assert(VK_SUCCESS == res);
pendingInitFence_ = false;
VkPipelineCacheCreateInfo pc; VkPipelineCacheCreateInfo pc;
pc.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; pc.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
@ -799,18 +777,12 @@ Thin3DVKContext::Thin3DVKContext(VulkanContext *vulkan)
pc.flags = 0; pc.flags = 0;
res = vkCreatePipelineCache(device_, &pc, nullptr, &pipelineCache_); res = vkCreatePipelineCache(device_, &pc, nullptr, &pipelineCache_);
assert(VK_SUCCESS == res); assert(VK_SUCCESS == res);
push_ = pushBuffer_[0];
cmd_ = cmdBuffer_[0];
cmdFence_ = cmdFences_[0];
} }
Thin3DVKContext::~Thin3DVKContext() { Thin3DVKContext::~Thin3DVKContext() {
for (auto x : pipelines_) { for (auto x : pipelines_) {
vkDestroyPipeline(device_, x.second, nullptr); vkDestroyPipeline(device_, x.second, nullptr);
} }
vkFreeCommandBuffers(device_, cmdPool_, 2, cmdBuffer_);
vkFreeCommandBuffers(device_, cmdPool_, 1, &cmd_);
vkDestroyCommandPool(device_, cmdPool_, nullptr); vkDestroyCommandPool(device_, cmdPool_, nullptr);
// This also destroys all descriptor sets. // This also destroys all descriptor sets.
vkDestroyDescriptorPool(device_, descriptorPool_, nullptr); vkDestroyDescriptorPool(device_, descriptorPool_, nullptr);
@ -820,24 +792,28 @@ Thin3DVKContext::~Thin3DVKContext() {
} }
void Thin3DVKContext::Begin(bool clear, uint32_t colorval, float depthVal, int stencilVal) { void Thin3DVKContext::Begin(bool clear, uint32_t colorval, float depthVal, int stencilVal) {
VkClearValue clearVal[2]; VkClearValue clearVal[2] = {};
Uint8x4ToFloat4(colorval, clearVal[0].color.float32); Uint8x4ToFloat4(colorval, clearVal[0].color.float32);
clearVal[0].color.float32[2] = 1.0f;
if (frameNum_ & 1)
clearVal[0].color.float32[2] = 1.0f;
clearVal[1].depthStencil.depth = depthVal; clearVal[1].depthStencil.depth = depthVal;
clearVal[1].depthStencil.stencil = stencilVal; clearVal[1].depthStencil.stencil = stencilVal;
vulkan_->BeginSurfaceRenderPass(clearVal);
// Make sure we don't stomp over the old command buffer. cmd_ = vulkan_->BeginSurfaceRenderPass(clearVal);
vkWaitForFences(device_, 1, &cmdFence_, true, 0);
push_ = pushBuffer_[frameNum_ & 1];
// OK, we now know that nothing is reading from this frame's data pushbuffer,
// and that the command buffer can be safely reset and reused. So let's do that.
push_->Begin(device_); push_->Begin(device_);
scissorDirty_ = true;
viewportDirty_ = true;
} }
void Thin3DVKContext::BeginInitCommands() { void Thin3DVKContext::BeginInitCommands() {
assert(!hasInitCommands_); assert(!hasInitCommands_);
// Before we can begin, we must be sure that the command buffer is no longer in use, as we only have a single one for init
// tasks (for now).
VkCommandBufferBeginInfo begin; VkCommandBufferBeginInfo begin;
begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
begin.pNext = nullptr; begin.pNext = nullptr;
@ -848,49 +824,34 @@ void Thin3DVKContext::BeginInitCommands() {
hasInitCommands_ = true; hasInitCommands_ = true;
} }
void Thin3DVKContext::EndInitCommands() {
VkResult res = vkEndCommandBuffer(initCmd_);
assert(VK_SUCCESS == res);
}
void Thin3DVKContext::End() { void Thin3DVKContext::End() {
// Stop collecting data in the frame data buffer. // Stop collecting data in the frame data buffer.
push_->End(device_); push_->End(device_);
vkCmdEndRenderPass(cmd_); // IF something needs to be uploaded etc, sneak it in before we actually run the main command buffer.
VkResult endRes = vkEndCommandBuffer(cmd_);
if (hasInitCommands_) { if (hasInitCommands_) {
assert(!pendingInitFence_); VkResult res = vkEndCommandBuffer(initCmd_);
EndInitCommands(); assert(VK_SUCCESS == res);
// Run the texture uploads etc _before_ we execute the ordinary command buffer // Run the texture uploads etc _before_ we execute the ordinary command buffer
pendingInitFence_ = true;
VkSubmitInfo submit = {}; VkSubmitInfo submit = {};
submit.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit.pCommandBuffers = &initCmd_; submit.pCommandBuffers = &initCmd_;
submit.commandBufferCount = 1; submit.commandBufferCount = 1;
VkResult res = vkQueueSubmit(queue_, 1, &submit, initFence_); res = vkQueueSubmit(queue_, 1, &submit, initFence_);
assert(VK_SUCCESS == res); assert(VK_SUCCESS == res);
// Before we can begin, we must be sure that the command buffer is no longer in use, as we only have a single one for init
// tasks (for now).
vulkan_->WaitAndResetFence(initFence_);
hasInitCommands_ = false; hasInitCommands_ = false;
// Init cmd buffer is again available for writing.
} }
if (VK_SUCCESS != endRes) { vulkan_->EndSurfaceRenderPass();
ELOG("vkEndCommandBuffer failed");
vkResetCommandBuffer(cmd_, 0);
} else {
VkSubmitInfo submit = {};
submit.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit.pCommandBuffers = &cmd_;
submit.commandBufferCount = 1;
VkResult res = vkQueueSubmit(queue_, 1, &submit, cmdFence_);
assert(VK_SUCCESS == res);
}
frameNum_++; frameNum_++;
push_ = pushBuffer_[frameNum_ & 1]; cmd_ = nullptr; // will be set on the next begin
cmd_ = cmdBuffer_[frameNum_ & 1]; push_ = nullptr;
cmdFence_ = cmdFences_[frameNum_ & 1];
DirtyDynamicState(); DirtyDynamicState();
} }
@ -917,14 +878,13 @@ VkDescriptorSet Thin3DVKContext::GetOrCreateDescriptorSet() {
VkResult res = vkAllocateDescriptorSets(device_, &alloc, &descSet); VkResult res = vkAllocateDescriptorSets(device_, &alloc, &descSet);
assert(VK_SUCCESS == res); assert(VK_SUCCESS == res);
// bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; // bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
// bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; // bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
// bindings[2].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
VkDescriptorBufferInfo bufferDesc; VkDescriptorBufferInfo bufferDesc;
bufferDesc.buffer = push_->GetVkBuffer(); bufferDesc.buffer = push_->GetVkBuffer();
bufferDesc.offset = 0; bufferDesc.offset = 0;
bufferDesc.range = 16 * 4; bufferDesc.range = curShaderSet_->GetUBOSize();
VkDescriptorImageInfo imageDesc; VkDescriptorImageInfo imageDesc;
imageDesc.imageView = boundTextures_[0]->GetImageView(); imageDesc.imageView = boundTextures_[0]->GetImageView();
@ -937,7 +897,7 @@ VkDescriptorSet Thin3DVKContext::GetOrCreateDescriptorSet() {
writes[0].pNext = nullptr; writes[0].pNext = nullptr;
writes[0].dstSet = descSet; writes[0].dstSet = descSet;
writes[0].dstArrayElement = 0; writes[0].dstArrayElement = 0;
writes[0].dstBinding = 1; writes[0].dstBinding = 0;
writes[0].pBufferInfo = &bufferDesc; writes[0].pBufferInfo = &bufferDesc;
writes[0].descriptorCount = 1; writes[0].descriptorCount = 1;
writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
@ -946,7 +906,7 @@ VkDescriptorSet Thin3DVKContext::GetOrCreateDescriptorSet() {
writes[1].pNext = nullptr; writes[1].pNext = nullptr;
writes[1].dstSet = descSet; writes[1].dstSet = descSet;
writes[1].dstArrayElement = 0; writes[1].dstArrayElement = 0;
writes[1].dstBinding = 2; writes[1].dstBinding = 1;
writes[1].pImageInfo = &imageDesc; writes[1].pImageInfo = &imageDesc;
writes[1].descriptorCount = 1; writes[1].descriptorCount = 1;
writes[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; writes[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
@ -1062,7 +1022,7 @@ VkPipeline Thin3DVKContext::GetOrCreatePipeline() {
info.pViewportState = &vs; // Must set viewport and scissor counts even if we set the actual state dynamically. info.pViewportState = &vs; // Must set viewport and scissor counts even if we set the actual state dynamically.
info.layout = pipelineLayout_; info.layout = pipelineLayout_;
info.subpass = 0; info.subpass = 0;
info.renderPass = renderPass_; info.renderPass = vulkan_->GetSurfaceRenderPass();
// OK, need to create a new pipeline. // OK, need to create a new pipeline.
VkPipeline pipeline; VkPipeline pipeline;
@ -1136,8 +1096,8 @@ void Thin3DVKTexture::SetImageData(int x, int y, int z, int width, int height, i
// So we need to do a staging copy. We upload the data to the staging buffer immediately, then we actually do the final copy once it's used the first time // So we need to do a staging copy. We upload the data to the staging buffer immediately, then we actually do the final copy once it's used the first time
// as we need a command buffer and the architecture of Thin3D doesn't really work the way we want.. // as we need a command buffer and the architecture of Thin3D doesn't really work the way we want..
if (!image_.IsValid()) { if (!image_.IsValid()) {
staging_.Create2D(vulkan_, vulkanFormat, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, width, height); staging_.Create2D(vulkan_, vulkanFormat, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_IMAGE_TILING_LINEAR, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, width, height);
image_.Create2D(vulkan_, vulkanFormat, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, (VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT), width, height); image_.Create2D(vulkan_, vulkanFormat, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_IMAGE_TILING_OPTIMAL, (VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT), width, height);
} }
VkImageViewCreateInfo iv; VkImageViewCreateInfo iv;
@ -1162,6 +1122,8 @@ void Thin3DVKTexture::SetImageData(int x, int y, int z, int width, int height, i
// TODO: Support setting only parts of the image efficiently. // TODO: Support setting only parts of the image efficiently.
staging_.SetImageData2D(vulkan_->GetDevice(), data, width, height, stride); staging_.SetImageData2D(vulkan_->GetDevice(), data, width, height, stride);
state_ = TextureState::STAGED; state_ = TextureState::STAGED;
width_ = width;
height_ = height;
} }
void Thin3DVKTexture::Finalize(int zim_flags) { void Thin3DVKTexture::Finalize(int zim_flags) {
@ -1173,20 +1135,23 @@ bool Thin3DVKTexture::NeedsUpload() {
} }
void Thin3DVKTexture::Upload(VkCommandBuffer cmd) { void Thin3DVKTexture::Upload(VkCommandBuffer cmd) {
if (state_ == TextureState::STAGED) { if (state_ != TextureState::STAGED) {
// Before we can texture, we need to Copy and ChangeLayout. return;
VkImageCopy copy_region;
copy_region.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
copy_region.srcOffset = { 0, 0, 0 };
copy_region.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
copy_region.dstOffset = { 0, 0, 0 };
copy_region.extent = { (uint32_t)width_, (uint32_t)height_, 1 };
vkCmdCopyImage(cmd, staging_.GetImage(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image_.GetImage(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_region);
image_.ChangeLayout(cmd, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
// From this point on, the image can be used for texturing.
// Even before this function call (but after SetImageData), the image object can be referenced in a descriptor set.
state_ = TextureState::INITIALIZED;
} }
// Before we can texture, we need to Copy and ChangeLayout.
VkImageCopy copy_region;
copy_region.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
copy_region.srcOffset = { 0, 0, 0 };
copy_region.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
copy_region.dstOffset = { 0, 0, 0 };
copy_region.extent = { (uint32_t)width_, (uint32_t)height_, 1 };
vkCmdCopyImage(cmd, staging_.GetImage(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image_.GetImage(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_region);
image_.ChangeLayout(cmd, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
// From this point on, the image can be used for texturing.
// Even before this function call (but after SetImageData), the image object can be referenced in a descriptor set. Better make sure that the image is uploaded
// before it's actually used though...
state_ = TextureState::INITIALIZED;
} }
static bool isPowerOf2(int n) { static bool isPowerOf2(int n) {
@ -1317,6 +1282,7 @@ void Thin3DVKContext::SetRenderState(T3DRenderState rs, uint32_t value) {
} }
void Thin3DVKContext::Draw(T3DPrimitive prim, Thin3DShaderSet *shaderSet, Thin3DVertexFormat *format, Thin3DBuffer *vdata, int vertexCount, int offset) { void Thin3DVKContext::Draw(T3DPrimitive prim, Thin3DShaderSet *shaderSet, Thin3DVertexFormat *format, Thin3DBuffer *vdata, int vertexCount, int offset) {
return;
ApplyDynamicState(); ApplyDynamicState();
curPrim_ = primToVK[prim]; curPrim_ = primToVK[prim];
@ -1338,6 +1304,7 @@ void Thin3DVKContext::Draw(T3DPrimitive prim, Thin3DShaderSet *shaderSet, Thin3D
} }
void Thin3DVKContext::DrawIndexed(T3DPrimitive prim, Thin3DShaderSet *shaderSet, Thin3DVertexFormat *format, Thin3DBuffer *vdata, Thin3DBuffer *idata, int vertexCount, int offset) { void Thin3DVKContext::DrawIndexed(T3DPrimitive prim, Thin3DShaderSet *shaderSet, Thin3DVertexFormat *format, Thin3DBuffer *vdata, Thin3DBuffer *idata, int vertexCount, int offset) {
return;
ApplyDynamicState(); ApplyDynamicState();
curPrim_ = primToVK[prim]; curPrim_ = primToVK[prim];

View file

@ -40,12 +40,12 @@
#include "thin3d/vulkan_utils.h" #include "thin3d/vulkan_utils.h"
void VulkanImage::Create2D(VulkanContext *vulkan, VkFormat format, VkFlags required_props, VkImageUsageFlags usage, int width, int height) { void VulkanImage::Create2D(VulkanContext *vulkan, VkFormat format, VkFlags required_props, VkImageTiling tiling, VkImageUsageFlags usage, int width, int height) {
VkDevice device = vulkan->GetDevice(); VkDevice device = vulkan->GetDevice();
width_ = width; width_ = width;
height_ = height; height_ = height;
VkImageCreateInfo i; VkImageCreateInfo i = {};
i.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; i.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
i.pNext = NULL; i.pNext = NULL;
i.imageType = VK_IMAGE_TYPE_2D; i.imageType = VK_IMAGE_TYPE_2D;
@ -54,13 +54,13 @@ void VulkanImage::Create2D(VulkanContext *vulkan, VkFormat format, VkFlags requi
i.mipLevels = 1; i.mipLevels = 1;
i.arrayLayers = 1; i.arrayLayers = 1;
i.samples = VK_SAMPLE_COUNT_1_BIT; i.samples = VK_SAMPLE_COUNT_1_BIT;
i.tiling = VK_IMAGE_TILING_LINEAR; i.tiling = tiling;
i.usage = usage; i.usage = usage;
i.flags = 0; i.flags = 0;
i.sharingMode = VK_SHARING_MODE_EXCLUSIVE; i.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
i.queueFamilyIndexCount = 0; i.queueFamilyIndexCount = 0;
i.pQueueFamilyIndices = nullptr; i.pQueueFamilyIndices = nullptr;
i.initialLayout = VK_IMAGE_LAYOUT_GENERAL; i.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkMemoryRequirements mem_reqs; VkMemoryRequirements mem_reqs;
@ -81,6 +81,7 @@ void VulkanImage::Create2D(VulkanContext *vulkan, VkFormat format, VkFlags requi
assert(!err); assert(!err);
err = vkBindImageMemory(device, image_, memory_, 0); // at offset 0. err = vkBindImageMemory(device, image_, memory_, 0); // at offset 0.
assert(!err);
} }
void VulkanImage::SetImageData2D(VkDevice device, const uint8_t *data, int width, int height, int pitch) { void VulkanImage::SetImageData2D(VkDevice device, const uint8_t *data, int width, int height, int pitch) {
@ -121,14 +122,18 @@ void VulkanImage::ChangeLayout(VkCommandBuffer cmd, VkImageAspectFlags aspectMas
image_memory_barrier.subresourceRange.baseMipLevel = 0; image_memory_barrier.subresourceRange.baseMipLevel = 0;
image_memory_barrier.subresourceRange.levelCount = 1; image_memory_barrier.subresourceRange.levelCount = 1;
if (old_image_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
}
if (new_image_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) { if (new_image_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
/* Make sure anything that was copying from this image has completed */ // Make sure anything that was copying from this image has completed
image_memory_barrier.srcAccessMask = VK_ACCESS_MEMORY_READ_BIT; image_memory_barrier.srcAccessMask = VK_ACCESS_MEMORY_READ_BIT;
} }
if (new_image_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { if (new_image_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
/* Make sure any Copy or CPU writes to image are flushed */ // Make sure any Copy or CPU writes to image are flushed
image_memory_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT; image_memory_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT;
} }
VkPipelineStageFlags src_stages = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; VkPipelineStageFlags src_stages = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;

View file

@ -23,7 +23,7 @@
#include "ext/vulkan/vulkan.h" #include "ext/vulkan/vulkan.h"
#include "VulkanContext.h" #include "VulkanContext.h"
class VulkanContext;
// Utility class to handle images without going insane. // Utility class to handle images without going insane.
// Allocates its own memory. // Allocates its own memory.
class VulkanImage { class VulkanImage {
@ -32,7 +32,7 @@ public:
bool IsValid() const { return image_ != nullptr; } bool IsValid() const { return image_ != nullptr; }
// This can be done completely unsynchronized. // This can be done completely unsynchronized.
void Create2D(VulkanContext *vulkan, VkFormat format, VkFlags required_props, VkImageUsageFlags usage, int width, int height); void Create2D(VulkanContext *vulkan, VkFormat format, VkFlags required_props, VkImageTiling tiling, VkImageUsageFlags usage, int width, int height);
// This can only be used if you pass in VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT in required_props in Create2D. // This can only be used if you pass in VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT in required_props in Create2D.
void SetImageData2D(VkDevice device, const uint8_t *data, int width, int height, int pitch); void SetImageData2D(VkDevice device, const uint8_t *data, int width, int height, int pitch);
@ -54,6 +54,4 @@ class Thin3DPipelineCache {
}; };
bool GLSLtoSPV(const VkShaderStageFlagBits shader_type, const char *pshader, std::vector<uint32_t> &spirv);
bool CreateShaderModule(VkDevice device, const std::vector<uint32_t> &spirv, VkShaderModule *shaderModule); bool CreateShaderModule(VkDevice device, const std::vector<uint32_t> &spirv, VkShaderModule *shaderModule);

View file

@ -222,4 +222,4 @@
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets"> <ImportGroup Label="ExtensionTargets">
</ImportGroup> </ImportGroup>
</Project> </Project>