Rename some GPU classes and enums to fix some consistency issues between the backends.

This commit is contained in:
Henrik Rydgard 2016-04-10 10:21:48 +02:00
parent 0fdb711c4f
commit e611915a52
22 changed files with 666 additions and 666 deletions

View file

@ -27,12 +27,12 @@ enum CPUCore {
};
enum GPUCore {
GPU_NULL,
GPU_GLES,
GPU_SOFTWARE,
GPU_DIRECTX9,
GPU_DIRECTX11,
GPU_VULKAN,
GPUCORE_NULL,
GPUCORE_GLES,
GPUCORE_SOFTWARE,
GPUCORE_DIRECTX9,
GPUCORE_DIRECTX11,
GPUCORE_VULKAN,
};
class FileLoader;

View file

@ -231,10 +231,10 @@ bool TakeGameScreenshot(const char *filename, ScreenshotFormat fmt, ScreenshotTy
h = PSP_CoreParameter().renderHeight;
} else {
if (GetGPUBackend() == GPUBackend::OPENGL) {
success = GLES_GPU::GetDisplayFramebuffer(buf);
success = GPU_GLES::GetDisplayFramebuffer(buf);
#ifdef _WIN32
} else if (GetGPUBackend() == GPUBackend::DIRECT3D9) {
success = DX9::DIRECTX9_GPU::GetDisplayFramebuffer(buf);
success = DX9::GPU_DX9::GetDisplayFramebuffer(buf);
#endif
}
}

View file

@ -173,7 +173,7 @@ void __PPGeSetupListArgs()
void __PPGeInit()
{
// PPGe isn't really important for headless, and LoadZIM takes a long time.
if (PSP_CoreParameter().gpuCore == GPU_NULL || host->ShouldSkipUI()) {
if (PSP_CoreParameter().gpuCore == GPUCORE_NULL || host->ShouldSkipUI()) {
// Let's just not bother.
dlPtr = 0;
NOTICE_LOG(SCEGE, "Not initializing PPGe - GPU is NullGpu");

View file

@ -37,7 +37,7 @@
namespace DX9 {
class TextureCacheDX9;
class TransformDrawEngineDX9;
class DrawEngineDX9;
class ShaderManagerDX9;
class FramebufferManagerDX9 : public FramebufferManagerCommon {
@ -51,7 +51,7 @@ public:
void SetShaderManager(ShaderManagerDX9 *sm) {
shaderManager_ = sm;
}
void SetTransformDrawEngine(TransformDrawEngineDX9 *td) {
void SetTransformDrawEngine(DrawEngineDX9 *td) {
transformDraw_ = td;
}
@ -132,7 +132,7 @@ private:
TextureCacheDX9 *textureCache_;
ShaderManagerDX9 *shaderManager_;
TransformDrawEngineDX9 *transformDraw_;
DrawEngineDX9 *transformDraw_;
// Used by post-processing shader
std::vector<FBO *> extraFBOs_;

File diff suppressed because it is too large Load diff

View file

@ -33,10 +33,10 @@ namespace DX9 {
class ShaderManagerDX9;
class LinkedShaderDX9;
class DIRECTX9_GPU : public GPUCommon {
class GPU_DX9 : public GPUCommon {
public:
DIRECTX9_GPU(GraphicsContext *gfxCtx);
~DIRECTX9_GPU();
GPU_DX9(GraphicsContext *gfxCtx);
~GPU_DX9();
void CheckGPUFeatures();
void InitClear() override;
void PreExecuteOp(u32 op, u32 diff) override;
@ -83,10 +83,10 @@ public:
static bool GetDisplayFramebuffer(GPUDebugBuffer &buffer);
bool GetCurrentSimpleVertices(int count, std::vector<GPUDebugVertex> &vertices, std::vector<u16> &indices);
typedef void (DIRECTX9_GPU::*CmdFunc)(u32 op, u32 diff);
typedef void (GPU_DX9::*CmdFunc)(u32 op, u32 diff);
struct CommandInfo {
u8 flags;
DIRECTX9_GPU::CmdFunc func;
GPU_DX9::CmdFunc func;
};
void Execute_Generic(u32 op, u32 diff);
@ -160,7 +160,7 @@ private:
void UpdateCmdInfo();
void Flush() {
transformDraw_.Flush();
drawEngine_.Flush();
}
void DoBlockTransfer(u32 skipDrawReason);
void ApplyDrawState(int prim);
@ -177,7 +177,7 @@ private:
FramebufferManagerDX9 framebufferManager_;
TextureCacheDX9 textureCache_;
DepalShaderCacheDX9 depalShaderCache_;
TransformDrawEngineDX9 transformDraw_;
DrawEngineDX9 drawEngine_;
ShaderManagerDX9 *shaderManager_;
static CommandInfo cmdInfo_[256];
@ -193,4 +193,4 @@ private:
} // namespace DX9
typedef DX9::DIRECTX9_GPU DIRECTX9_GPU;
typedef DX9::GPU_DX9 DIRECTX9_GPU;

View file

@ -85,7 +85,7 @@ static const D3DSTENCILOP stencilOps[] = {
D3DSTENCILOP_KEEP, // reserved
};
bool TransformDrawEngineDX9::ApplyShaderBlending() {
bool DrawEngineDX9::ApplyShaderBlending() {
if (gstate_c.featureFlags & GPU_SUPPORTS_ANY_FRAMEBUFFER_FETCH) {
return true;
}
@ -113,14 +113,14 @@ bool TransformDrawEngineDX9::ApplyShaderBlending() {
return true;
}
inline void TransformDrawEngineDX9::ResetShaderBlending() {
inline void DrawEngineDX9::ResetShaderBlending() {
if (fboTexBound_) {
pD3Ddevice->SetTexture(1, nullptr);
fboTexBound_ = false;
}
}
void TransformDrawEngineDX9::ApplyDrawState(int prim) {
void DrawEngineDX9::ApplyDrawState(int prim) {
// TODO: All this setup is soon so expensive that we'll need dirty flags, or simply do it in the command writes where we detect dirty by xoring. Silly to do all this work on every drawcall.
if (gstate_c.textureChanged != TEXCHANGE_UNCHANGED && !gstate.isModeClear() && gstate.isTextureMapEnabled()) {
@ -302,7 +302,7 @@ void TransformDrawEngineDX9::ApplyDrawState(int prim) {
}
}
void TransformDrawEngineDX9::ApplyDrawStateLate() {
void DrawEngineDX9::ApplyDrawStateLate() {
// At this point, we know if the vertices are full alpha or not.
// TODO: Set the nearest/linear here (since we correctly know if alpha/color tests are needed)?
if (!gstate.isModeClear()) {

View file

@ -78,7 +78,7 @@ enum {
enum { VAI_KILL_AGE = 120, VAI_UNRELIABLE_KILL_AGE = 240, VAI_UNRELIABLE_KILL_MAX = 4 };
TransformDrawEngineDX9::TransformDrawEngineDX9()
DrawEngineDX9::DrawEngineDX9()
: decodedVerts_(0),
prevPrim_(GE_PRIM_INVALID),
lastVType_(-1),
@ -116,7 +116,7 @@ TransformDrawEngineDX9::TransformDrawEngineDX9()
InitDeviceObjects();
}
TransformDrawEngineDX9::~TransformDrawEngineDX9() {
DrawEngineDX9::~DrawEngineDX9() {
DestroyDeviceObjects();
FreeMemoryPages(decoded, DECODED_VERTEX_BUFFER_SIZE);
FreeMemoryPages(decIndex, DECODED_INDEX_BUFFER_SIZE);
@ -132,11 +132,11 @@ TransformDrawEngineDX9::~TransformDrawEngineDX9() {
delete [] uvScale;
}
void TransformDrawEngineDX9::InitDeviceObjects() {
void DrawEngineDX9::InitDeviceObjects() {
}
void TransformDrawEngineDX9::DestroyDeviceObjects() {
void DrawEngineDX9::DestroyDeviceObjects() {
ClearTrackedVertexArrays();
}
@ -176,7 +176,7 @@ static void VertexAttribSetup(D3DVERTEXELEMENT9 * VertexElement, u8 fmt, u8 offs
VertexElement->UsageIndex = usage_index;
}
IDirect3DVertexDeclaration9 *TransformDrawEngineDX9::SetupDecFmtForDraw(VSShader *vshader, const DecVtxFormat &decFmt, u32 pspFmt) {
IDirect3DVertexDeclaration9 *DrawEngineDX9::SetupDecFmtForDraw(VSShader *vshader, const DecVtxFormat &decFmt, u32 pspFmt) {
auto vertexDeclCached = vertexDeclMap_.find(pspFmt);
if (vertexDeclCached == vertexDeclMap_.end()) {
@ -244,7 +244,7 @@ IDirect3DVertexDeclaration9 *TransformDrawEngineDX9::SetupDecFmtForDraw(VSShader
}
}
VertexDecoder *TransformDrawEngineDX9::GetVertexDecoder(u32 vtype) {
VertexDecoder *DrawEngineDX9::GetVertexDecoder(u32 vtype) {
auto iter = decoderMap_.find(vtype);
if (iter != decoderMap_.end())
return iter->second;
@ -254,11 +254,11 @@ VertexDecoder *TransformDrawEngineDX9::GetVertexDecoder(u32 vtype) {
return dec;
}
void TransformDrawEngineDX9::SetupVertexDecoder(u32 vertType) {
void DrawEngineDX9::SetupVertexDecoder(u32 vertType) {
SetupVertexDecoderInternal(vertType);
}
inline void TransformDrawEngineDX9::SetupVertexDecoderInternal(u32 vertType) {
inline void DrawEngineDX9::SetupVertexDecoderInternal(u32 vertType) {
// As the decoder depends on the UVGenMode when we use UV prescale, we simply mash it
// into the top of the verttype where there are unused bits.
const u32 vertTypeID = (vertType & 0xFFFFFF) | (gstate.getUVGenMode() << 24);
@ -270,7 +270,7 @@ inline void TransformDrawEngineDX9::SetupVertexDecoderInternal(u32 vertType) {
}
}
void TransformDrawEngineDX9::SubmitPrim(void *verts, void *inds, GEPrimitiveType prim, int vertexCount, u32 vertType, int *bytesRead) {
void DrawEngineDX9::SubmitPrim(void *verts, void *inds, GEPrimitiveType prim, int vertexCount, u32 vertType, int *bytesRead) {
if (!indexGen.PrimCompatible(prevPrim_, prim) || numDrawCalls >= MAX_DEFERRED_DRAW_CALLS || vertexCountInDrawCalls + vertexCount > VERTEX_BUFFER_MAX)
Flush();
@ -336,7 +336,7 @@ void TransformDrawEngineDX9::SubmitPrim(void *verts, void *inds, GEPrimitiveType
}
}
void TransformDrawEngineDX9::DecodeVerts() {
void DrawEngineDX9::DecodeVerts() {
if (uvScale) {
const UVScale origUV = gstate_c.uv;
for (; decodeCounter_ < numDrawCalls; decodeCounter_++) {
@ -357,7 +357,7 @@ void TransformDrawEngineDX9::DecodeVerts() {
}
}
void TransformDrawEngineDX9::DecodeVertsStep() {
void DrawEngineDX9::DecodeVertsStep() {
const int i = decodeCounter_;
const DeferredDrawCall &dc = drawCalls[i];
@ -454,7 +454,7 @@ inline u32 ComputeMiniHashRange(const void *ptr, size_t sz) {
}
}
u32 TransformDrawEngineDX9::ComputeMiniHash() {
u32 DrawEngineDX9::ComputeMiniHash() {
u32 fullhash = 0;
const int vertexSize = dec_->GetDecVtxFmt().stride;
const int indexSize = (dec_->VertexType() & GE_VTYPE_IDX_MASK) == GE_VTYPE_IDX_16BIT ? 2 : 1;
@ -481,7 +481,7 @@ u32 TransformDrawEngineDX9::ComputeMiniHash() {
return fullhash;
}
void TransformDrawEngineDX9::MarkUnreliable(VertexArrayInfoDX9 *vai) {
void DrawEngineDX9::MarkUnreliable(VertexArrayInfoDX9 *vai) {
vai->status = VertexArrayInfoDX9::VAI_UNRELIABLE;
if (vai->vbo) {
vai->vbo->Release();
@ -493,7 +493,7 @@ void TransformDrawEngineDX9::MarkUnreliable(VertexArrayInfoDX9 *vai) {
}
}
ReliableHashType TransformDrawEngineDX9::ComputeHash() {
ReliableHashType DrawEngineDX9::ComputeHash() {
ReliableHashType fullhash = 0;
const int vertexSize = dec_->GetDecVtxFmt().stride;
const int indexSize = (dec_->VertexType() & GE_VTYPE_IDX_MASK) == GE_VTYPE_IDX_16BIT ? 2 : 1;
@ -532,14 +532,14 @@ ReliableHashType TransformDrawEngineDX9::ComputeHash() {
return fullhash;
}
void TransformDrawEngineDX9::ClearTrackedVertexArrays() {
void DrawEngineDX9::ClearTrackedVertexArrays() {
for (auto vai = vai_.begin(); vai != vai_.end(); vai++) {
delete vai->second;
}
vai_.clear();
}
void TransformDrawEngineDX9::DecimateTrackedVertexArrays() {
void DrawEngineDX9::DecimateTrackedVertexArrays() {
if (--decimationCounter_ <= 0) {
decimationCounter_ = VERTEXCACHE_DECIMATION_INTERVAL;
} else {
@ -587,7 +587,7 @@ VertexArrayInfoDX9::~VertexArrayInfoDX9() {
}
// The inline wrapper in the header checks for numDrawCalls == 0
void TransformDrawEngineDX9::DoFlush() {
void DrawEngineDX9::DoFlush() {
gpuStats.numFlushes++;
gpuStats.numTrackedVertexArrays = (int)vai_.size();
@ -905,7 +905,7 @@ rotateVBO:
host->GPUNotifyDraw();
}
void TransformDrawEngineDX9::Resized() {
void DrawEngineDX9::Resized() {
decJitCache_->Clear();
lastVType_ = -1;
dec_ = NULL;
@ -922,7 +922,7 @@ void TransformDrawEngineDX9::Resized() {
}
}
bool TransformDrawEngineDX9::IsCodePtrVertexDecoder(const u8 *ptr) const {
bool DrawEngineDX9::IsCodePtrVertexDecoder(const u8 *ptr) const {
return decJitCache_->IsInSpace(ptr);
}

View file

@ -107,10 +107,10 @@ public:
};
// Handles transform, lighting and drawing.
class TransformDrawEngineDX9 : public DrawEngineCommon {
class DrawEngineDX9 : public DrawEngineCommon {
public:
TransformDrawEngineDX9();
virtual ~TransformDrawEngineDX9();
DrawEngineDX9();
virtual ~DrawEngineDX9();
void SubmitPrim(void *verts, void *inds, GEPrimitiveType prim, int vertexCount, u32 vertType, int *bytesRead);

View file

@ -35,7 +35,7 @@
struct GLSLProgram;
class TextureCache;
class TransformDrawEngine;
class DrawEngineGLES;
class ShaderManager;
// Simple struct for asynchronous PBO readbacks
@ -71,7 +71,7 @@ public:
void SetShaderManager(ShaderManager *sm) {
shaderManager_ = sm;
}
void SetTransformDrawEngine(TransformDrawEngine *td) {
void SetTransformDrawEngine(DrawEngineGLES *td) {
transformDraw_ = td;
}
@ -166,7 +166,7 @@ private:
TextureCache *textureCache_;
ShaderManager *shaderManager_;
TransformDrawEngine *transformDraw_;
DrawEngineGLES *transformDraw_;
// Used by post-processing shader
std::vector<FBO *> extraFBOs_;

File diff suppressed because it is too large Load diff

View file

@ -32,10 +32,10 @@ class ShaderManager;
class LinkedShader;
class GraphicsContext;
class GLES_GPU : public GPUCommon {
class GPU_GLES : public GPUCommon {
public:
GLES_GPU(GraphicsContext *gfxCtx);
~GLES_GPU();
GPU_GLES(GraphicsContext *gfxCtx);
~GPU_GLES();
// This gets called on startup and when we get back from settings.
void CheckGPUFeatures();
@ -90,10 +90,10 @@ public:
bool DescribeCodePtr(const u8 *ptr, std::string &name) override;
typedef void (GLES_GPU::*CmdFunc)(u32 op, u32 diff);
typedef void (GPU_GLES::*CmdFunc)(u32 op, u32 diff);
struct CommandInfo {
u8 flags;
GLES_GPU::CmdFunc func;
GPU_GLES::CmdFunc func;
};
void Execute_Vaddr(u32 op, u32 diff);
@ -165,7 +165,7 @@ protected:
private:
void Flush() {
transformDraw_.Flush();
drawEngine_.Flush();
}
void DoBlockTransfer(u32 skipDrawReason);
void CheckFlushOp(int cmd, u32 diff);
@ -186,7 +186,7 @@ private:
FramebufferManager framebufferManager_;
TextureCache textureCache_;
DepalShaderCache depalShaderCache_;
TransformDrawEngine transformDraw_;
DrawEngineGLES drawEngine_;
FragmentTestCache fragmentTestCache_;
ShaderManager *shaderManager_;

View file

@ -114,7 +114,7 @@ static const GLushort logicOps[] = {
};
#endif
bool TransformDrawEngine::ApplyShaderBlending() {
bool DrawEngineGLES::ApplyShaderBlending() {
if (gstate_c.featureFlags & GPU_SUPPORTS_ANY_FRAMEBUFFER_FETCH) {
return true;
}
@ -142,7 +142,7 @@ bool TransformDrawEngine::ApplyShaderBlending() {
return true;
}
inline void TransformDrawEngine::ResetShaderBlending() {
inline void DrawEngineGLES::ResetShaderBlending() {
if (fboTexBound_) {
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, 0);
@ -151,7 +151,7 @@ inline void TransformDrawEngine::ResetShaderBlending() {
}
}
void TransformDrawEngine::ApplyDrawState(int prim) {
void DrawEngineGLES::ApplyDrawState(int prim) {
// TODO: All this setup is so expensive that we'll need dirty flags, or simply do it in the command writes where we detect dirty by xoring. Silly to do all this work on every drawcall.
if (gstate_c.textureChanged != TEXCHANGE_UNCHANGED && !gstate.isModeClear() && gstate.isTextureMapEnabled()) {
@ -369,7 +369,7 @@ void TransformDrawEngine::ApplyDrawState(int prim) {
}
}
void TransformDrawEngine::ApplyDrawStateLate() {
void DrawEngineGLES::ApplyDrawStateLate() {
// At this point, we know if the vertices are full alpha or not.
// TODO: Set the nearest/linear here (since we correctly know if alpha/color tests are needed)?
if (!gstate.isModeClear()) {

View file

@ -862,7 +862,7 @@ public:
}
}
void Use(TransformDrawEngine *transformDraw) {
void Use(DrawEngineGLES *transformDraw) {
glUseProgram(shader_->program);
// Restore will rebind all of the state below.

View file

@ -32,7 +32,7 @@ struct VirtualFramebuffer;
class FramebufferManager;
class DepalShaderCache;
class ShaderManager;
class TransformDrawEngine;
class DrawEngineGLES;
inline bool UseBGRA8888() {
// TODO: Other platforms? May depend on vendor which is faster?
@ -65,7 +65,7 @@ public:
void SetShaderManager(ShaderManager *sm) {
shaderManager_ = sm;
}
void SetTransformDrawEngine(TransformDrawEngine *td) {
void SetTransformDrawEngine(DrawEngineGLES *td) {
transformDraw_ = td;
}
@ -132,7 +132,7 @@ private:
FramebufferManager *framebufferManager_;
DepalShaderCache *depalShaderCache_;
ShaderManager *shaderManager_;
TransformDrawEngine *transformDraw_;
DrawEngineGLES *transformDraw_;
};
GLenum getClutDestFormat(GEPaletteFormat format);

View file

@ -117,7 +117,7 @@ enum {
enum { VAI_KILL_AGE = 120, VAI_UNRELIABLE_KILL_AGE = 240, VAI_UNRELIABLE_KILL_MAX = 4 };
TransformDrawEngine::TransformDrawEngine()
DrawEngineGLES::DrawEngineGLES()
: decodedVerts_(0),
prevPrim_(GE_PRIM_INVALID),
lastVType_(-1),
@ -153,7 +153,7 @@ TransformDrawEngine::TransformDrawEngine()
register_gl_resource_holder(this);
}
TransformDrawEngine::~TransformDrawEngine() {
DrawEngineGLES::~DrawEngineGLES() {
DestroyDeviceObjects();
FreeMemoryPages(decoded, DECODED_VERTEX_BUFFER_SIZE);
FreeMemoryPages(decIndex, DECODED_INDEX_BUFFER_SIZE);
@ -165,7 +165,7 @@ TransformDrawEngine::~TransformDrawEngine() {
delete [] uvScale;
}
void TransformDrawEngine::RestoreVAO() {
void DrawEngineGLES::RestoreVAO() {
if (sharedVao_ != 0) {
glBindVertexArray(sharedVao_);
} else if (gstate_c.Supports(GPU_SUPPORTS_VAO)) {
@ -176,7 +176,7 @@ void TransformDrawEngine::RestoreVAO() {
}
}
void TransformDrawEngine::InitDeviceObjects() {
void DrawEngineGLES::InitDeviceObjects() {
if (bufferNameCache_.empty()) {
bufferNameCache_.resize(VERTEXCACHE_NAME_CACHE_SIZE);
glGenBuffers(VERTEXCACHE_NAME_CACHE_SIZE, &bufferNameCache_[0]);
@ -192,7 +192,7 @@ void TransformDrawEngine::InitDeviceObjects() {
}
}
void TransformDrawEngine::DestroyDeviceObjects() {
void DrawEngineGLES::DestroyDeviceObjects() {
ClearTrackedVertexArrays();
if (!bufferNameCache_.empty()) {
glstate.arrayBuffer.unbind();
@ -209,7 +209,7 @@ void TransformDrawEngine::DestroyDeviceObjects() {
}
}
void TransformDrawEngine::GLRestore() {
void DrawEngineGLES::GLRestore() {
ILOG("TransformDrawEngine::GLRestore()");
// The objects have already been deleted.
bufferNameCache_.clear();
@ -264,11 +264,11 @@ static void SetupDecFmtForDraw(LinkedShader *program, const DecVtxFormat &decFmt
VertexAttribSetup(ATTR_POSITION, decFmt.posfmt, decFmt.stride, vertexData + decFmt.posoff);
}
void TransformDrawEngine::SetupVertexDecoder(u32 vertType) {
void DrawEngineGLES::SetupVertexDecoder(u32 vertType) {
SetupVertexDecoderInternal(vertType);
}
inline void TransformDrawEngine::SetupVertexDecoderInternal(u32 vertType) {
inline void DrawEngineGLES::SetupVertexDecoderInternal(u32 vertType) {
// As the decoder depends on the UVGenMode when we use UV prescale, we simply mash it
// into the top of the verttype where there are unused bits.
const u32 vertTypeID = (vertType & 0xFFFFFF) | (gstate.getUVGenMode() << 24);
@ -280,7 +280,7 @@ inline void TransformDrawEngine::SetupVertexDecoderInternal(u32 vertType) {
}
}
void TransformDrawEngine::SubmitPrim(void *verts, void *inds, GEPrimitiveType prim, int vertexCount, u32 vertType, int *bytesRead) {
void DrawEngineGLES::SubmitPrim(void *verts, void *inds, GEPrimitiveType prim, int vertexCount, u32 vertType, int *bytesRead) {
if (!indexGen.PrimCompatible(prevPrim_, prim) || numDrawCalls >= MAX_DEFERRED_DRAW_CALLS || vertexCountInDrawCalls + vertexCount > VERTEX_BUFFER_MAX)
Flush();
@ -346,7 +346,7 @@ void TransformDrawEngine::SubmitPrim(void *verts, void *inds, GEPrimitiveType pr
}
}
void TransformDrawEngine::DecodeVerts() {
void DrawEngineGLES::DecodeVerts() {
if (uvScale) {
const UVScale origUV = gstate_c.uv;
for (; decodeCounter_ < numDrawCalls; decodeCounter_++) {
@ -367,7 +367,7 @@ void TransformDrawEngine::DecodeVerts() {
}
}
void TransformDrawEngine::DecodeVertsStep() {
void DrawEngineGLES::DecodeVertsStep() {
PROFILE_THIS_SCOPE("vertdec");
const int i = decodeCounter_;
@ -465,7 +465,7 @@ inline u32 ComputeMiniHashRange(const void *ptr, size_t sz) {
}
}
u32 TransformDrawEngine::ComputeMiniHash() {
u32 DrawEngineGLES::ComputeMiniHash() {
u32 fullhash = 0;
const int vertexSize = dec_->GetDecVtxFmt().stride;
const int indexSize = (dec_->VertexType() & GE_VTYPE_IDX_MASK) == GE_VTYPE_IDX_16BIT ? 2 : 1;
@ -492,7 +492,7 @@ u32 TransformDrawEngine::ComputeMiniHash() {
return fullhash;
}
void TransformDrawEngine::MarkUnreliable(VertexArrayInfo *vai) {
void DrawEngineGLES::MarkUnreliable(VertexArrayInfo *vai) {
vai->status = VertexArrayInfo::VAI_UNRELIABLE;
if (vai->vbo) {
FreeBuffer(vai->vbo);
@ -504,7 +504,7 @@ void TransformDrawEngine::MarkUnreliable(VertexArrayInfo *vai) {
}
}
ReliableHashType TransformDrawEngine::ComputeHash() {
ReliableHashType DrawEngineGLES::ComputeHash() {
ReliableHashType fullhash = 0;
const int vertexSize = dec_->GetDecVtxFmt().stride;
const int indexSize = (dec_->VertexType() & GE_VTYPE_IDX_MASK) == GE_VTYPE_IDX_16BIT ? 2 : 1;
@ -543,7 +543,7 @@ ReliableHashType TransformDrawEngine::ComputeHash() {
return fullhash;
}
void TransformDrawEngine::ClearTrackedVertexArrays() {
void DrawEngineGLES::ClearTrackedVertexArrays() {
for (auto vai = vai_.begin(); vai != vai_.end(); vai++) {
FreeVertexArray(vai->second);
delete vai->second;
@ -551,7 +551,7 @@ void TransformDrawEngine::ClearTrackedVertexArrays() {
vai_.clear();
}
void TransformDrawEngine::DecimateTrackedVertexArrays() {
void DrawEngineGLES::DecimateTrackedVertexArrays() {
if (--decimationCounter_ <= 0) {
decimationCounter_ = VERTEXCACHE_DECIMATION_INTERVAL;
} else {
@ -579,7 +579,7 @@ void TransformDrawEngine::DecimateTrackedVertexArrays() {
}
}
GLuint TransformDrawEngine::AllocateBuffer(size_t sz) {
GLuint DrawEngineGLES::AllocateBuffer(size_t sz) {
GLuint unused = 0;
auto freeMatch = freeSizedBuffers_.find(sz);
@ -630,7 +630,7 @@ GLuint TransformDrawEngine::AllocateBuffer(size_t sz) {
return unused;
}
void TransformDrawEngine::FreeBuffer(GLuint buf) {
void DrawEngineGLES::FreeBuffer(GLuint buf) {
// We can reuse buffers by setting new data on them, so let's actually keep it.
auto it = bufferNameInfo_.find(buf);
if (it != bufferNameInfo_.end()) {
@ -645,7 +645,7 @@ void TransformDrawEngine::FreeBuffer(GLuint buf) {
}
}
void TransformDrawEngine::FreeVertexArray(VertexArrayInfo *vai) {
void DrawEngineGLES::FreeVertexArray(VertexArrayInfo *vai) {
if (vai->vbo) {
FreeBuffer(vai->vbo);
vai->vbo = 0;
@ -656,7 +656,7 @@ void TransformDrawEngine::FreeVertexArray(VertexArrayInfo *vai) {
}
}
void TransformDrawEngine::DoFlush() {
void DrawEngineGLES::DoFlush() {
PROFILE_THIS_SCOPE("flush");
gpuStats.numFlushes++;
gpuStats.numTrackedVertexArrays = (int)vai_.size();
@ -1005,7 +1005,7 @@ rotateVBO:
#endif
}
void TransformDrawEngine::Resized() {
void DrawEngineGLES::Resized() {
decJitCache_->Clear();
lastVType_ = -1;
dec_ = NULL;
@ -1022,7 +1022,7 @@ void TransformDrawEngine::Resized() {
}
}
GLuint TransformDrawEngine::BindBuffer(const void *p, size_t sz) {
GLuint DrawEngineGLES::BindBuffer(const void *p, size_t sz) {
// Get a new buffer each time we need one.
GLuint buf = AllocateBuffer(sz);
glstate.arrayBuffer.bind(buf);
@ -1034,7 +1034,7 @@ GLuint TransformDrawEngine::BindBuffer(const void *p, size_t sz) {
return buf;
}
GLuint TransformDrawEngine::BindBuffer(const void *p1, size_t sz1, const void *p2, size_t sz2) {
GLuint DrawEngineGLES::BindBuffer(const void *p1, size_t sz1, const void *p2, size_t sz2) {
GLuint buf = AllocateBuffer(sz1 + sz2);
glstate.arrayBuffer.bind(buf);
@ -1046,7 +1046,7 @@ GLuint TransformDrawEngine::BindBuffer(const void *p1, size_t sz1, const void *p
return buf;
}
GLuint TransformDrawEngine::BindElementBuffer(const void *p, size_t sz) {
GLuint DrawEngineGLES::BindElementBuffer(const void *p, size_t sz) {
GLuint buf = AllocateBuffer(sz);
glstate.elementArrayBuffer.bind(buf);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sz, p, GL_STREAM_DRAW);
@ -1055,7 +1055,7 @@ GLuint TransformDrawEngine::BindElementBuffer(const void *p, size_t sz) {
return buf;
}
void TransformDrawEngine::DecimateBuffers() {
void DrawEngineGLES::DecimateBuffers() {
for (GLuint buf : buffersThisFrame_) {
FreeBuffer(buf);
}
@ -1119,6 +1119,6 @@ void TransformDrawEngine::DecimateBuffers() {
}
}
bool TransformDrawEngine::IsCodePtrVertexDecoder(const u8 *ptr) const {
bool DrawEngineGLES::IsCodePtrVertexDecoder(const u8 *ptr) const {
return decJitCache_->IsInSpace(ptr);
}

View file

@ -107,10 +107,10 @@ public:
};
// Handles transform, lighting and drawing.
class TransformDrawEngine : public DrawEngineCommon, public GfxResourceHolder {
class DrawEngineGLES : public DrawEngineCommon, public GfxResourceHolder {
public:
TransformDrawEngine();
virtual ~TransformDrawEngine();
DrawEngineGLES();
virtual ~DrawEngineGLES();
void SubmitPrim(void *verts, void *inds, GEPrimitiveType prim, int vertexCount, u32 vertType, int *bytesRead);

View file

@ -48,24 +48,24 @@ static void SetGPU(T *obj) {
bool GPU_Init(GraphicsContext *ctx, Thin3DContext *thin3d) {
switch (PSP_CoreParameter().gpuCore) {
case GPU_NULL:
case GPUCORE_NULL:
SetGPU(new NullGPU());
break;
case GPU_GLES:
SetGPU(new GLES_GPU(ctx));
case GPUCORE_GLES:
SetGPU(new GPU_GLES(ctx));
break;
case GPU_SOFTWARE:
case GPUCORE_SOFTWARE:
SetGPU(new SoftGPU(ctx, thin3d));
break;
case GPU_DIRECTX9:
case GPUCORE_DIRECTX9:
#if defined(_WIN32)
SetGPU(new DIRECTX9_GPU(ctx));
#endif
break;
case GPU_DIRECTX11:
case GPUCORE_DIRECTX11:
return false;
#ifndef NO_VULKAN
case GPU_VULKAN:
case GPUCORE_VULKAN:
SetGPU(new GPU_Vulkan(ctx));
break;
#endif

View file

@ -102,19 +102,19 @@ void EmuScreen::bootGame(const std::string &filename) {
CoreParameter coreParam;
coreParam.cpuCore = g_Config.bJit ? CPU_JIT : CPU_INTERPRETER;
coreParam.gpuCore = GPU_GLES;
coreParam.gpuCore = GPUCORE_GLES;
switch (GetGPUBackend()) {
case GPUBackend::OPENGL:
coreParam.gpuCore = GPU_GLES;
coreParam.gpuCore = GPUCORE_GLES;
break;
case GPUBackend::DIRECT3D9:
coreParam.gpuCore = GPU_DIRECTX9;
coreParam.gpuCore = GPUCORE_DIRECTX9;
break;
case GPUBackend::DIRECT3D11:
coreParam.gpuCore = GPU_DIRECTX11;
coreParam.gpuCore = GPUCORE_DIRECTX11;
break;
case GPUBackend::VULKAN:
coreParam.gpuCore = GPU_VULKAN;
coreParam.gpuCore = GPUCORE_VULKAN;
if (g_Config.iRenderingMode != FB_NON_BUFFERED_MODE) {
#ifdef _WIN32
if (IDYES == MessageBox(MainWindow::GetHWND(), L"The Vulkan backend is not yet compatible with buffered rendering. Switch to non-buffered (WARNING: This will cause glitches with the other backends unless you switch back)", L"Vulkan Experimental Support", MB_ICONINFORMATION | MB_YESNO)) {
@ -128,7 +128,7 @@ void EmuScreen::bootGame(const std::string &filename) {
break;
}
if (g_Config.bSoftwareRendering) {
coreParam.gpuCore = GPU_SOFTWARE;
coreParam.gpuCore = GPUCORE_SOFTWARE;
}
// Preserve the existing graphics context.
coreParam.graphicsContext = PSP_CoreParameter().graphicsContext;

View file

@ -70,7 +70,7 @@ void RunTests()
CoreParameter coreParam;
coreParam.cpuCore = g_Config.bJit ? CPU_JIT : CPU_INTERPRETER;
coreParam.gpuCore = g_Config.bSoftwareRendering ? GPU_SOFTWARE : GPU_GLES;
coreParam.gpuCore = g_Config.bSoftwareRendering ? GPUCORE_SOFTWARE : GPUCORE_GLES;
coreParam.enableSound = g_Config.bEnableSound;
coreParam.graphicsContext = PSP_CoreParameter().graphicsContext;
coreParam.mountIso = "";

View file

@ -116,7 +116,7 @@ int printUsage(const char *progname, const char *reason)
static HeadlessHost *getHost(GPUCore gpuCore) {
switch (gpuCore) {
case GPU_NULL:
case GPUCORE_NULL:
return new HeadlessHost();
#ifdef HEADLESSHOST_CLASS
default:
@ -211,7 +211,7 @@ int main(int argc, const char* argv[])
bool autoCompare = false;
bool verbose = false;
const char *stateToLoad = 0;
GPUCore gpuCore = GPU_NULL;
GPUCore gpuCore = GPUCORE_NULL;
std::vector<std::string> testFilenames;
const char *mountIso = 0;
@ -247,21 +247,21 @@ int main(int argc, const char* argv[])
{
const char *gpuName = argv[i] + strlen("--graphics=");
if (!strcasecmp(gpuName, "gles"))
gpuCore = GPU_GLES;
gpuCore = GPUCORE_GLES;
else if (!strcasecmp(gpuName, "software"))
gpuCore = GPU_SOFTWARE;
gpuCore = GPUCORE_SOFTWARE;
else if (!strcasecmp(gpuName, "directx9"))
gpuCore = GPU_DIRECTX9;
gpuCore = GPUCORE_DIRECTX9;
else if (!strcasecmp(gpuName, "vulkan"))
gpuCore = GPU_VULKAN;
gpuCore = GPUCORE_VULKAN;
else if (!strcasecmp(gpuName, "null"))
gpuCore = GPU_NULL;
gpuCore = GPUCORE_NULL;
else
return printUsage(argv[0], "Unknown gpu backend specified after --graphics=");
}
// Default to GLES if no value selected.
else if (!strcmp(argv[i], "--graphics"))
gpuCore = GPU_GLES;
gpuCore = GPUCORE_GLES;
else if (!strncmp(argv[i], "--screenshot=", strlen("--screenshot=")) && strlen(argv[i]) > strlen("--screenshot="))
screenshotFilename = argv[i] + strlen("--screenshot=");
else if (!strncmp(argv[i], "--timeout=", strlen("--timeout=")) && strlen(argv[i]) > strlen("--timeout="))
@ -312,7 +312,7 @@ int main(int argc, const char* argv[])
CoreParameter coreParameter;
coreParameter.cpuCore = useJit ? CPU_JIT : CPU_INTERPRETER;
coreParameter.gpuCore = glWorking ? gpuCore : GPU_NULL;
coreParameter.gpuCore = glWorking ? gpuCore : GPUCORE_NULL;
coreParameter.graphicsContext = graphicsContext;
coreParameter.enableSound = false;
coreParameter.mountIso = mountIso ? mountIso : "";

View file

@ -153,20 +153,20 @@ bool WindowsHeadlessHost::InitGraphics(std::string *error_message, GraphicsConte
WindowsGraphicsContext *graphicsContext = nullptr;
switch (gpuCore_) {
case GPU_NULL:
case GPU_GLES:
case GPU_SOFTWARE:
case GPUCORE_NULL:
case GPUCORE_GLES:
case GPUCORE_SOFTWARE:
graphicsContext = new WindowsGLContext();
break;
case GPU_DIRECTX9:
case GPUCORE_DIRECTX9:
graphicsContext = new D3D9Context();
break;
case GPU_DIRECTX11:
case GPUCORE_DIRECTX11:
return false;
case GPU_VULKAN:
case GPUCORE_VULKAN:
graphicsContext = new WindowsVulkanContext();
break;
}
@ -181,7 +181,7 @@ bool WindowsHeadlessHost::InitGraphics(std::string *error_message, GraphicsConte
return false;
}
if (gpuCore_ == GPU_GLES) {
if (gpuCore_ == GPUCORE_GLES) {
// TODO: Do we need to do this here?
CheckGLExtensions();
}
@ -200,7 +200,7 @@ void WindowsHeadlessHost::ShutdownGraphics() {
}
void WindowsHeadlessHost::SwapBuffers() {
if (gpuCore_ == GPU_DIRECTX9) {
if (gpuCore_ == GPUCORE_DIRECTX9) {
MSG msg;
PeekMessage(&msg, NULL, 0, 0, PM_REMOVE);
TranslateMessage(&msg);