ppsspp/Core/HLE/sceKernelMutex.cpp

498 lines
12 KiB
C++
Raw Normal View History

2012-11-01 16:19:01 +01:00
// Copyright (c) 2012- PPSSPP Project.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, version 2.0 or later versions.
2012-11-01 16:19:01 +01:00
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License 2.0 for more details.
// A copy of the GPL 2.0 should have been included with the program.
// If not, see http://www.gnu.org/licenses/
// Official git repository and contact information can be found at
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
// UNFINISHED
#include "HLE.h"
#include "../MIPS/MIPS.h"
#include "../../Core/CoreTiming.h"
2012-11-01 16:19:01 +01:00
#include "sceKernel.h"
#include "sceKernelMutex.h"
#include "sceKernelThread.h"
#define PSP_MUTEX_ATTR_FIFO 0
#define PSP_MUTEX_ATTR_PRIORITY 0x100
#define PSP_MUTEX_ATTR_ALLOW_RECURSIVE 0x200
// Not sure about the names of these
#define PSP_MUTEX_ERROR_NO_SUCH_MUTEX 0x800201C3
#define PSP_MUTEX_ERROR_TRYLOCK_FAILED 0x800201C4
#define PSP_MUTEX_ERROR_NOT_LOCKED 0x800201C5
2012-11-17 22:55:41 -08:00
#define PSP_MUTEX_ERROR_LOCK_OVERFLOW 0x800201C6
#define PSP_MUTEX_ERROR_UNLOCK_UNDERFLOW 0x800201C7
#define PSP_MUTEX_ERROR_ALREADY_LOCKED 0x800201C8
2012-11-19 01:01:19 -08:00
#define PSP_LWMUTEX_ERROR_NOT_FOUND 0x800201CA
2012-11-01 16:19:01 +01:00
// Guesswork - not exposed anyway
struct NativeMutex
{
SceSize size;
char name[32];
SceUInt attr;
int lockLevel;
int lockThread; // The thread holding the lock
};
struct Mutex : public KernelObject
{
const char *GetName() {return nm.name;}
const char *GetTypeName() {return "Mutex";}
2012-11-19 01:01:19 -08:00
static u32 GetMissingErrorCode() { return PSP_MUTEX_ERROR_NO_SUCH_MUTEX; }
2012-11-01 16:19:01 +01:00
int GetIDType() const { return SCE_KERNEL_TMID_Mutex; }
NativeMutex nm;
std::vector<SceUID> waitingThreads;
};
2012-11-19 01:01:19 -08:00
// Guesswork - not exposed anyway
struct NativeLwMutex
{
SceSize size;
char name[32];
SceUInt attr;
SceUInt workareaPtr;
};
struct NativeLwMutexWorkarea
{
int lockLevel;
SceUID lockThread;
int attr;
int numWaitThreads;
SceUID uid;
int pad[3];
void init()
{
memset(this, 0, sizeof(NativeLwMutexWorkarea));
}
void clear()
{
lockLevel = 0;
lockThread = -1;
uid = -1;
}
};
struct LwMutex : public KernelObject
2012-11-01 16:19:01 +01:00
{
const char *GetName() {return nm.name;}
2012-11-19 01:01:19 -08:00
const char *GetTypeName() {return "LwMutex";}
static u32 GetMissingErrorCode() { return PSP_LWMUTEX_ERROR_NOT_FOUND; }
2012-11-01 16:19:01 +01:00
int GetIDType() const { return SCE_KERNEL_TMID_LwMutex; }
2012-11-19 01:01:19 -08:00
NativeLwMutex nm;
2012-11-01 16:19:01 +01:00
std::vector<SceUID> waitingThreads;
};
bool mutexInitComplete = false;
int mutexWaitTimer = 0;
int lwMutexWaitTimer = 0;
void __KernelMutexInit()
{
mutexWaitTimer = CoreTiming::RegisterEvent("MutexTimeout", &__KernelMutexTimeout);
// TODO: Write / enable.
//lwMutexWaitTimer = CoreTiming::RegisterEvent("LwMutexTimeout", &__KernelLwMutexTimeout);
mutexInitComplete = true;
}
void sceKernelCreateMutex(const char *name, u32 attr, int initialCount, u32 optionsPtr)
2012-11-01 16:19:01 +01:00
{
if (!mutexInitComplete)
__KernelMutexInit();
u32 error = 0;
2012-11-17 23:08:19 -08:00
if (!name)
error = SCE_KERNEL_ERROR_ERROR;
2012-11-17 23:08:19 -08:00
else if (initialCount < 0)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
2012-11-17 23:08:19 -08:00
else if ((attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && initialCount > 1)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
if (error)
{
RETURN(error);
return;
}
DEBUG_LOG(HLE,"sceKernelCreateMutex(%s, %08x, %d, %08x)", name, attr, initialCount, optionsPtr);
2012-11-01 16:19:01 +01:00
Mutex *mutex = new Mutex();
SceUID id = kernelObjects.Create(mutex);
mutex->nm.size = sizeof(mutex);
strncpy(mutex->nm.name, name, 31);
mutex->nm.name[31] = 0;
2012-11-01 16:19:01 +01:00
mutex->nm.attr = attr;
mutex->nm.lockLevel = initialCount;
2012-11-18 16:18:06 -08:00
if (mutex->nm.lockLevel == 0)
mutex->nm.lockThread = -1;
else
mutex->nm.lockThread = __KernelGetCurThread();
if (optionsPtr != 0)
WARN_LOG(HLE,"sceKernelCreateMutex(%s) unsupported options parameter.", name);
RETURN(id);
2012-11-01 16:19:01 +01:00
__KernelReSchedule("mutex created");
2012-11-01 16:19:01 +01:00
}
void sceKernelDeleteMutex(SceUID id)
2012-11-01 16:19:01 +01:00
{
DEBUG_LOG(HLE,"sceKernelDeleteMutex(%i)", id);
u32 error;
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
if (mutex)
{
std::vector<SceUID>::iterator iter, end;
for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
{
SceUID threadID = *iter;
2012-11-18 19:13:39 -08:00
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
if (timeoutPtr != 0 && mutexWaitTimer != 0)
2012-11-18 19:13:39 -08:00
{
// Remove any event for this thread.
int cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID);
2012-11-18 19:13:39 -08:00
Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr);
}
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE);
}
mutex->waitingThreads.empty();
RETURN(kernelObjects.Destroy<Mutex>(id));
__KernelReSchedule("mutex deleted");
}
else
RETURN(error);
2012-11-01 16:19:01 +01:00
}
2012-11-17 23:08:19 -08:00
bool __KernelLockMutex(Mutex *mutex, int count, u32 &error)
2012-11-01 16:19:01 +01:00
{
2012-11-17 23:08:19 -08:00
if (!error)
{
2012-11-17 23:08:19 -08:00
if (count <= 0)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
else if (count > 1 && !(mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE))
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
// Two positive ints will always sum to negative.
else if (count + mutex->nm.lockLevel < 0)
error = PSP_MUTEX_ERROR_LOCK_OVERFLOW;
}
2012-11-17 23:08:19 -08:00
if (error)
return false;
2012-11-01 16:19:01 +01:00
if (mutex->nm.lockLevel == 0)
{
mutex->nm.lockLevel += count;
mutex->nm.lockThread = __KernelGetCurThread();
2012-11-17 23:08:19 -08:00
// Nobody had it locked - no need to block
return true;
2012-11-01 16:19:01 +01:00
}
2012-11-17 23:08:19 -08:00
if (mutex->nm.lockThread == __KernelGetCurThread())
2012-11-01 16:19:01 +01:00
{
// Recursive mutex, let's just increase the lock count and keep going
if ((mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE))
2012-11-17 23:08:19 -08:00
{
mutex->nm.lockLevel += count;
2012-11-17 23:08:19 -08:00
return true;
}
else
2012-11-17 23:08:19 -08:00
{
error = PSP_MUTEX_ERROR_ALREADY_LOCKED;
return false;
}
}
return false;
}
void __KernelMutexTimeout(u64 userdata, int cyclesLate)
{
SceUID threadID = (SceUID)userdata;
2012-11-18 19:13:39 -08:00
u32 error;
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
if (timeoutPtr != 0)
Memory::Write_U32(0, timeoutPtr);
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT);
}
2012-11-18 19:40:19 -08:00
void __KernelWaitMutex(Mutex *mutex, u32 timeoutPtr)
{
if (timeoutPtr == 0 || mutexWaitTimer == 0)
return;
// This should call __KernelMutexTimeout() later, unless we cancel it.
int micro = (int) Memory::Read_U32(timeoutPtr);
CoreTiming::ScheduleEvent(usToCycles(micro), mutexWaitTimer, __KernelGetCurThread());
}
2012-11-17 23:08:19 -08:00
// int sceKernelLockMutex(SceUID id, int count, int *timeout)
// void because it changes threads.
void sceKernelLockMutex(SceUID id, int count, u32 timeoutPtr)
{
DEBUG_LOG(HLE,"sceKernelLockMutex(%i, %i, %08x)", id, count, timeoutPtr);
u32 error;
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
if (__KernelLockMutex(mutex, count, error))
{
RETURN(0);
__KernelReSchedule("mutex locked");
2012-11-01 16:19:01 +01:00
}
2012-11-17 23:08:19 -08:00
else if (error)
RETURN(error);
2012-11-01 16:19:01 +01:00
else
{
mutex->waitingThreads.push_back(__KernelGetCurThread());
2012-11-18 19:40:19 -08:00
__KernelWaitMutex(mutex, timeoutPtr);
__KernelWaitCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr, false);
2012-11-01 16:19:01 +01:00
}
}
// int sceKernelLockMutexCB(SceUID id, int count, int *timeout)
// void because it changes threads.
void sceKernelLockMutexCB(SceUID id, int count, u32 timeoutPtr)
2012-11-01 16:19:01 +01:00
{
DEBUG_LOG(HLE,"sceKernelLockMutexCB(%i, %i, %08x)", id, count, timeoutPtr);
u32 error;
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
2012-11-17 23:08:19 -08:00
if (__KernelLockMutex(mutex, count, error))
{
2012-11-17 23:08:19 -08:00
RETURN(0);
__KernelReSchedule("mutex locked");
}
2012-11-17 23:08:19 -08:00
else if (error)
RETURN(error);
else
{
mutex->waitingThreads.push_back(__KernelGetCurThread());
2012-11-18 19:40:19 -08:00
__KernelWaitMutex(mutex, timeoutPtr);
__KernelWaitCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr, true);
__KernelCheckCallbacks();
}
__KernelReSchedule("mutex locked");
2012-11-01 16:19:01 +01:00
}
// int sceKernelTryLockMutex(SceUID id, int count)
// void because it changes threads.
void sceKernelTryLockMutex(SceUID id, int count)
2012-11-01 16:19:01 +01:00
{
DEBUG_LOG(HLE,"sceKernelTryLockMutex(%i, %i)", id, count);
2012-11-01 16:19:01 +01:00
u32 error;
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
2012-11-17 23:08:19 -08:00
if (__KernelLockMutex(mutex, count, error))
{
2012-11-17 23:08:19 -08:00
RETURN(0);
__KernelReSchedule("mutex trylocked");
}
2012-11-17 23:08:19 -08:00
else if (error)
RETURN(error);
else
RETURN(PSP_MUTEX_ERROR_TRYLOCK_FAILED);
}
// int sceKernelUnlockMutex(SceUID id, int count)
// void because it changes threads.
void sceKernelUnlockMutex(SceUID id, int count)
{
DEBUG_LOG(HLE,"sceKernelUnlockMutex(%i, %i)", id, count);
u32 error;
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
2012-11-17 23:08:19 -08:00
if (!error)
{
if (count <= 0)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
else if ((mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && count > 1)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
else if (mutex->nm.lockLevel == 0)
error = PSP_MUTEX_ERROR_NOT_LOCKED;
else if (mutex->nm.lockLevel < count)
error = PSP_MUTEX_ERROR_UNLOCK_UNDERFLOW;
}
if (error)
{
RETURN(error);
return;
}
2012-11-01 16:19:01 +01:00
mutex->nm.lockLevel -= count;
RETURN(0);
if (mutex->nm.lockLevel == 0)
{
mutex->nm.lockThread = -1;
// TODO: PSP_MUTEX_ATTR_PRIORITY
bool wokeThreads = false;
std::vector<SceUID>::iterator iter, end;
for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
{
SceUID threadID = *iter;
int wVal = (int)__KernelGetWaitValue(threadID, error);
2012-11-18 19:13:39 -08:00
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
mutex->nm.lockThread = threadID;
mutex->nm.lockLevel = wVal;
if (timeoutPtr != 0 && mutexWaitTimer != 0)
2012-11-18 19:13:39 -08:00
{
// Remove any event for this thread.
int cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID);
2012-11-18 19:13:39 -08:00
Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr);
}
__KernelResumeThreadFromWait(threadID, 0);
wokeThreads = true;
mutex->waitingThreads.erase(iter);
break;
}
__KernelReSchedule("mutex unlocked");
}
2012-11-01 16:19:01 +01:00
}
2012-11-06 20:56:19 +01:00
2012-11-19 01:01:19 -08:00
void sceKernelCreateLwMutex(u32 workareaPtr, const char *name, u32 attr, int initialCount, u32 optionsPtr)
2012-11-06 20:56:19 +01:00
{
if (!mutexInitComplete)
__KernelMutexInit();
2012-11-19 01:01:19 -08:00
DEBUG_LOG(HLE,"sceKernelCreateLwMutex(%08x, %s, %08x, %d, %08x)", workareaPtr, name, attr, initialCount, optionsPtr);
u32 error = 0;
if (!name)
error = SCE_KERNEL_ERROR_ERROR;
else if (initialCount < 0)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
else if ((attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && initialCount > 1)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
if (error)
{
RETURN(error);
return;
}
LwMutex *mutex = new LwMutex();
SceUID id = kernelObjects.Create(mutex);
mutex->nm.size = sizeof(mutex);
strncpy(mutex->nm.name, name, 31);
mutex->nm.name[31] = 0;
mutex->nm.attr = attr;
mutex->nm.workareaPtr = workareaPtr;
NativeLwMutexWorkarea workarea;
workarea.init();
workarea.lockLevel = initialCount;
if (initialCount == 0)
workarea.lockThread = 0;
else
workarea.lockThread = __KernelGetCurThread();
workarea.attr = attr;
workarea.uid = id;
Memory::WriteStruct(workareaPtr, &workarea);
if (optionsPtr != 0)
WARN_LOG(HLE,"sceKernelCreateLwMutex(%s) unsupported options parameter.", name);
2012-11-06 20:56:19 +01:00
RETURN(0);
2012-11-19 01:01:19 -08:00
__KernelReSchedule("lwmutex created");
2012-11-06 20:56:19 +01:00
}
2012-11-19 01:01:19 -08:00
void sceKernelDeleteLwMutex(u32 workareaPtr)
2012-11-06 20:56:19 +01:00
{
2012-11-19 01:01:19 -08:00
DEBUG_LOG(HLE,"sceKernelDeleteLwMutex(%08x)", workareaPtr);
if (!workareaPtr || !Memory::IsValidAddress(workareaPtr))
{
RETURN(SCE_KERNEL_ERROR_ILLEGAL_ADDR);
return;
}
NativeLwMutexWorkarea workarea;
Memory::ReadStruct(workareaPtr, &workarea);
u32 error;
LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea.uid, error);
if (mutex)
{
std::vector<SceUID>::iterator iter, end;
for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
{
SceUID threadID = *iter;
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
if (timeoutPtr != 0 && lwMutexWaitTimer != 0)
2012-11-19 01:01:19 -08:00
{
// Remove any event for this thread.
int cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID);
2012-11-19 01:01:19 -08:00
Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr);
}
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE);
}
mutex->waitingThreads.empty();
RETURN(kernelObjects.Destroy<LwMutex>(workarea.uid));
workarea.clear();
Memory::WriteStruct(workareaPtr, &workarea);
__KernelReSchedule("mutex deleted");
}
else
RETURN(error);
2012-11-06 20:56:19 +01:00
}
void sceKernelTryLockLwMutex()
{
ERROR_LOG(HLE,"UNIMPL sceKernelTryLockLwMutex()");
2012-11-06 20:56:19 +01:00
RETURN(0);
}
void sceKernelLockLwMutex()
{
ERROR_LOG(HLE,"UNIMPL sceKernelLockLwMutex()");
RETURN(0);
}
void sceKernelLockLwMutexCB()
{
ERROR_LOG(HLE,"UNIMPL sceKernelLockLwMutexCB()");
2012-11-06 20:56:19 +01:00
RETURN(0);
}
void sceKernelUnlockLwMutex()
{
ERROR_LOG(HLE,"UNIMPL void sceKernelUnlockLwMutex()");
2012-11-06 20:56:19 +01:00
RETURN(0);
}