2012-11-01 16:19:01 +01:00
// Copyright (c) 2012- PPSSPP Project.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
2012-11-04 23:01:49 +01:00
// the Free Software Foundation, version 2.0 or later versions.
2012-11-01 16:19:01 +01:00
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License 2.0 for more details.
// A copy of the GPL 2.0 should have been included with the program.
// If not, see http://www.gnu.org/licenses/
// Official git repository and contact information can be found at
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
2018-05-08 17:23:14 -07:00
# include <algorithm>
2012-11-08 18:02:33 +01:00
# include <map>
2018-05-08 17:23:14 -07:00
# include <mutex>
# include <set>
2012-11-06 15:46:21 +01:00
# include <queue>
2015-03-15 23:38:21 +01:00
# include "base/logging.h"
2013-03-10 22:25:03 -07:00
# include "Common/LogManager.h"
2013-07-25 22:25:50 -07:00
# include "Common/CommonTypes.h"
2013-09-02 17:59:47 -07:00
# include "Core/HLE/HLE.h"
# include "Core/HLE/HLETables.h"
2018-01-06 16:43:38 -08:00
# include "Core/MIPS/MIPSAnalyst.h"
2013-09-02 17:59:47 -07:00
# include "Core/MIPS/MIPSCodeUtils.h"
# include "Core/MIPS/MIPS.h"
2018-05-09 17:51:27 -07:00
# include "Core/MIPS/MIPSDebugInterface.h"
2013-03-24 20:52:18 -07:00
# include "Core/CoreTiming.h"
2015-04-05 18:03:50 -07:00
# include "Core/MemMapHelpers.h"
2018-04-01 10:09:31 -07:00
# include "Core/MIPS/JitCommon/JitCommon.h"
2013-03-24 20:52:18 -07:00
# include "Core/Reporting.h"
2013-09-02 17:59:47 -07:00
# include "Common/ChunkFile.h"
2012-11-01 16:19:01 +01:00
2013-09-02 17:59:47 -07:00
# include "Core/HLE/sceAudio.h"
# include "Core/HLE/sceKernel.h"
# include "Core/HLE/sceKernelMemory.h"
# include "Core/HLE/sceKernelThread.h"
# include "Core/HLE/sceKernelModule.h"
# include "Core/HLE/sceKernelInterrupt.h"
2018-05-09 17:51:27 -07:00
# include "Core/HLE/KernelThreadDebugInterface.h"
2013-09-02 17:59:47 -07:00
# include "Core/HLE/KernelWaitHelpers.h"
2015-09-12 13:03:15 -07:00
# include "Core/HLE/ThreadQueueList.h"
2012-11-01 16:19:01 +01:00
2013-07-06 21:40:41 +02:00
typedef struct
{
WaitType type ;
2013-07-06 23:06:16 -07:00
const char * name ;
2013-07-06 21:40:41 +02:00
} WaitTypeNames ;
const WaitTypeNames waitTypeNames [ ] = {
2013-07-31 00:18:04 -07:00
{ WAITTYPE_NONE , " None " } ,
{ WAITTYPE_SLEEP , " Sleep " } ,
{ WAITTYPE_DELAY , " Delay " } ,
{ WAITTYPE_SEMA , " Semaphore " } ,
{ WAITTYPE_EVENTFLAG , " Event flag " , } ,
{ WAITTYPE_MBX , " MBX " } ,
{ WAITTYPE_VPL , " VPL " } ,
{ WAITTYPE_FPL , " FPL " } ,
{ WAITTYPE_MSGPIPE , " Message pipe " } ,
{ WAITTYPE_THREADEND , " Thread end " } ,
{ WAITTYPE_AUDIOCHANNEL , " Audio channel " } ,
{ WAITTYPE_UMD , " UMD " } ,
{ WAITTYPE_VBLANK , " VBlank " } ,
{ WAITTYPE_MUTEX , " Mutex " } ,
{ WAITTYPE_LWMUTEX , " LwMutex " } ,
{ WAITTYPE_CTRL , " Control " } ,
{ WAITTYPE_IO , " IO " } ,
{ WAITTYPE_GEDRAWSYNC , " GeDrawSync " } ,
{ WAITTYPE_GELISTSYNC , " GeListSync " } ,
{ WAITTYPE_MODULE , " Module " } ,
{ WAITTYPE_HLEDELAY , " HleDelay " } ,
2013-12-02 23:29:52 -08:00
{ WAITTYPE_TLSPL , " TLS " } ,
2013-08-26 01:04:37 -07:00
{ WAITTYPE_VMEM , " Volatile Mem " } ,
2013-09-07 11:04:42 -07:00
{ WAITTYPE_ASYNCIO , " AsyncIO " } ,
2013-07-06 21:40:41 +02:00
} ;
2013-07-06 23:06:16 -07:00
const char * getWaitTypeName ( WaitType type )
2013-07-06 21:40:41 +02:00
{
int waitTypeNamesAmount = sizeof ( waitTypeNames ) / sizeof ( WaitTypeNames ) ;
for ( int i = 0 ; i < waitTypeNamesAmount ; i + + )
{
if ( waitTypeNames [ i ] . type = = type )
{
return waitTypeNames [ i ] . name ;
}
}
return " Unknown " ;
}
2012-11-01 16:19:01 +01:00
2014-01-05 20:20:56 -08:00
enum ThreadEventType {
THREADEVENT_CREATE = 1 ,
THREADEVENT_START = 2 ,
THREADEVENT_EXIT = 4 ,
THREADEVENT_DELETE = 8 ,
THREADEVENT_SUPPORTED = THREADEVENT_CREATE | THREADEVENT_START | THREADEVENT_EXIT | THREADEVENT_DELETE ,
} ;
2016-05-30 13:00:23 -07:00
bool __KernelThreadTriggerEvent ( bool isKernel , SceUID threadID , ThreadEventType type ) ;
2014-01-05 20:20:56 -08:00
enum {
2014-03-03 11:16:53 -05:00
PSP_THREAD_ATTR_KERNEL = 0x00001000 ,
PSP_THREAD_ATTR_VFPU = 0x00004000 ,
PSP_THREAD_ATTR_SCRATCH_SRAM = 0x00008000 , // Save/restore scratch as part of context???
PSP_THREAD_ATTR_NO_FILLSTACK = 0x00100000 , // No filling of 0xff.
PSP_THREAD_ATTR_CLEAR_STACK = 0x00200000 , // Clear thread stack when deleted.
PSP_THREAD_ATTR_LOW_STACK = 0x00400000 , // Allocate stack from bottom not top.
PSP_THREAD_ATTR_USER = 0x80000000 ,
PSP_THREAD_ATTR_USBWLAN = 0xa0000000 ,
PSP_THREAD_ATTR_VSH = 0xc0000000 ,
2013-09-09 20:48:23 -07:00
// TODO: Support more, not even sure what all of these mean.
2014-03-03 11:16:53 -05:00
PSP_THREAD_ATTR_USER_MASK = 0xf8f060ff ,
PSP_THREAD_ATTR_USER_ERASE = 0x78800000 ,
PSP_THREAD_ATTR_SUPPORTED = ( PSP_THREAD_ATTR_KERNEL | PSP_THREAD_ATTR_VFPU | PSP_THREAD_ATTR_NO_FILLSTACK | PSP_THREAD_ATTR_CLEAR_STACK | PSP_THREAD_ATTR_LOW_STACK | PSP_THREAD_ATTR_USER )
2012-11-01 16:19:01 +01:00
} ;
2012-11-07 15:44:48 +01:00
struct NativeCallback
{
2013-07-24 23:58:45 -07:00
SceUInt_le size ;
2012-11-07 15:44:48 +01:00
char name [ 32 ] ;
2013-07-24 23:58:45 -07:00
SceUID_le threadId ;
u32_le entrypoint ;
u32_le commonArgument ;
2012-11-07 15:44:48 +01:00
2013-07-24 23:58:45 -07:00
s32_le notifyCount ;
s32_le notifyArg ;
2012-11-07 15:44:48 +01:00
} ;
2020-03-15 08:33:40 -07:00
class PSPCallback : public KernelObject {
2012-11-07 15:44:48 +01:00
public :
2014-12-08 15:14:35 -05:00
const char * GetName ( ) override { return nc . name ; }
const char * GetTypeName ( ) override { return " CallBack " ; }
2012-11-07 15:44:48 +01:00
2020-03-15 08:33:40 -07:00
void GetQuickInfo ( char * ptr , int size ) override {
2012-11-07 15:44:48 +01:00
sprintf ( ptr , " thread=%i, argument= %08x " ,
//hackAddress,
nc . threadId ,
nc . commonArgument ) ;
}
2020-03-15 08:33:40 -07:00
~ PSPCallback ( ) {
2012-11-07 15:44:48 +01:00
}
static u32 GetMissingErrorCode ( ) { return SCE_KERNEL_ERROR_UNKNOWN_CBID ; }
2013-06-18 23:54:29 -07:00
static int GetStaticIDType ( ) { return SCE_KERNEL_TMID_Callback ; }
2014-12-08 15:14:35 -05:00
int GetIDType ( ) const override { return SCE_KERNEL_TMID_Callback ; }
2012-11-07 15:44:48 +01:00
2014-12-08 15:14:35 -05:00
void DoState ( PointerWrap & p ) override
2012-12-26 22:45:19 -08:00
{
2013-09-14 20:23:03 -07:00
auto s = p . Section ( " Callback " , 1 ) ;
if ( ! s )
return ;
2012-12-26 22:45:19 -08:00
p . Do ( nc ) ;
2016-05-28 16:59:19 -07:00
// Saved values were moved to mips call, ignoring here.
u32 legacySaved = 0 ;
p . Do ( legacySaved ) ;
p . Do ( legacySaved ) ;
p . Do ( legacySaved ) ;
p . Do ( legacySaved ) ;
p . Do ( legacySaved ) ;
2012-12-26 22:45:19 -08:00
}
2012-11-07 15:44:48 +01:00
NativeCallback nc ;
} ;
2013-07-25 22:25:50 -07:00
# if COMMON_LITTLE_ENDIAN
2013-07-24 23:58:45 -07:00
typedef WaitType WaitType_le ;
# else
2013-07-28 20:28:32 -07:00
typedef swap_struct_t < WaitType , swap_32_t < WaitType > > WaitType_le ;
2013-07-24 23:58:45 -07:00
# endif
2013-09-02 13:40:41 -07:00
// Real PSP struct, don't change the fields.
struct SceKernelThreadRunStatus
{
SceSize_le size ;
u32_le status ;
s32_le currentPriority ;
WaitType_le waitType ;
SceUID_le waitID ;
s32_le wakeupCount ;
SceKernelSysClock runForClocks ;
s32_le numInterruptPreempts ;
s32_le numThreadPreempts ;
s32_le numReleases ;
} ;
// Real PSP struct, don't change the fields.
2012-11-01 16:19:01 +01:00
struct NativeThread
{
2013-07-24 23:58:45 -07:00
u32_le nativeSize ;
2012-11-01 16:19:01 +01:00
char name [ KERNELOBJECT_MAX_NAME_LENGTH + 1 ] ;
// Threading stuff
2014-03-03 11:16:53 -05:00
u32_le attr ;
2013-07-24 23:58:45 -07:00
u32_le status ;
u32_le entrypoint ;
u32_le initialStack ;
u32_le stackSize ;
u32_le gpreg ;
s32_le initialPriority ;
s32_le currentPriority ;
2013-09-02 13:40:41 -07:00
WaitType_le waitType ;
2013-07-24 23:58:45 -07:00
SceUID_le waitID ;
s32_le wakeupCount ;
s32_le exitStatus ;
2012-11-01 16:19:01 +01:00
SceKernelSysClock runForClocks ;
2013-07-24 23:58:45 -07:00
s32_le numInterruptPreempts ;
s32_le numThreadPreempts ;
s32_le numReleases ;
2012-11-01 16:19:01 +01:00
} ;
2012-11-07 15:44:48 +01:00
struct ThreadWaitInfo {
u32 waitValue ;
2012-11-18 17:54:55 -08:00
u32 timeoutPtr ;
2012-11-07 15:44:48 +01:00
} ;
2012-12-23 21:49:44 -08:00
// Owns outstanding MIPS calls and provides a way to get them by ID.
class MipsCallManager {
public :
MipsCallManager ( ) : idGen_ ( 0 ) { }
2013-03-02 14:58:58 -08:00
u32 add ( MipsCall * call ) {
u32 id = genId ( ) ;
2012-12-23 21:49:44 -08:00
calls_ . insert ( std : : pair < int , MipsCall * > ( id , call ) ) ;
return id ;
}
2013-03-02 14:58:58 -08:00
MipsCall * get ( u32 id ) {
2013-03-31 12:09:59 -07:00
auto iter = calls_ . find ( id ) ;
if ( iter = = calls_ . end ( ) )
return NULL ;
return iter - > second ;
2012-12-23 21:49:44 -08:00
}
2013-03-02 14:58:58 -08:00
MipsCall * pop ( u32 id ) {
2012-12-23 21:49:44 -08:00
MipsCall * temp = calls_ [ id ] ;
calls_ . erase ( id ) ;
return temp ;
}
void clear ( ) {
2013-03-02 14:58:58 -08:00
for ( auto it = calls_ . begin ( ) , end = calls_ . end ( ) ; it ! = end ; + + it ) {
2012-12-27 19:45:00 -08:00
delete it - > second ;
}
2012-12-23 21:49:44 -08:00
calls_ . clear ( ) ;
2015-03-28 13:01:49 -07:00
types_ . clear ( ) ;
2012-12-23 21:49:44 -08:00
idGen_ = 0 ;
}
2012-12-27 19:45:00 -08:00
int registerActionType ( ActionCreator creator ) {
2012-12-27 19:30:36 -08:00
types_ . push_back ( creator ) ;
2013-01-19 13:48:20 -08:00
return ( int ) types_ . size ( ) - 1 ;
2012-12-27 19:30:36 -08:00
}
2012-12-27 19:45:00 -08:00
void restoreActionType ( int actionType , ActionCreator creator ) {
2012-12-27 19:30:36 -08:00
if ( actionType > = ( int ) types_ . size ( ) )
types_ . resize ( actionType + 1 , NULL ) ;
types_ [ actionType ] = creator ;
}
2020-03-15 08:33:40 -07:00
PSPAction * createActionByType ( int actionType ) {
2012-12-27 19:30:36 -08:00
if ( actionType < ( int ) types_ . size ( ) & & types_ [ actionType ] ! = NULL ) {
2020-03-15 08:33:40 -07:00
PSPAction * a = types_ [ actionType ] ( ) ;
2012-12-27 19:30:36 -08:00
a - > actionTypeID = actionType ;
return a ;
}
return NULL ;
}
void DoState ( PointerWrap & p ) {
2013-09-14 20:23:03 -07:00
auto s = p . Section ( " MipsCallManager " , 1 ) ;
if ( ! s )
return ;
2013-02-04 01:31:02 -08:00
p . Do ( calls_ ) ;
2012-12-27 19:45:00 -08:00
p . Do ( idGen_ ) ;
2012-12-27 19:30:36 -08:00
}
2012-12-23 21:49:44 -08:00
private :
2013-03-02 14:58:58 -08:00
u32 genId ( ) { return + + idGen_ ; }
std : : map < u32 , MipsCall * > calls_ ;
2012-12-27 19:30:36 -08:00
std : : vector < ActionCreator > types_ ;
2013-03-02 14:58:58 -08:00
u32 idGen_ ;
2012-12-23 21:49:44 -08:00
} ;
2020-03-15 08:33:40 -07:00
class ActionAfterMipsCall : public PSPAction
2012-12-23 21:49:44 -08:00
{
2013-02-02 18:46:23 -08:00
ActionAfterMipsCall ( )
{
chainedAction = NULL ;
}
2012-12-23 21:49:44 -08:00
public :
2014-12-08 15:14:35 -05:00
void run ( MipsCall & call ) override ;
2012-12-27 19:30:36 -08:00
2020-03-15 08:33:40 -07:00
static PSPAction * Create ( ) {
2013-02-02 18:46:23 -08:00
return new ActionAfterMipsCall ( ) ;
2012-12-27 19:30:36 -08:00
}
2014-12-08 15:14:35 -05:00
void DoState ( PointerWrap & p ) override
2012-12-27 19:30:36 -08:00
{
2013-09-14 20:23:03 -07:00
auto s = p . Section ( " ActionAfterMipsCall " , 1 ) ;
if ( ! s )
return ;
2012-12-27 19:30:36 -08:00
p . Do ( threadID ) ;
p . Do ( status ) ;
p . Do ( waitType ) ;
p . Do ( waitID ) ;
p . Do ( waitInfo ) ;
p . Do ( isProcessingCallbacks ) ;
2013-03-27 00:51:46 -07:00
p . Do ( currentCallbackId ) ;
2012-12-27 19:30:36 -08:00
int chainedActionType = 0 ;
if ( chainedAction ! = NULL )
chainedActionType = chainedAction - > actionTypeID ;
p . Do ( chainedActionType ) ;
if ( chainedActionType ! = 0 )
{
if ( p . mode = = p . MODE_READ )
chainedAction = __KernelCreateAction ( chainedActionType ) ;
chainedAction - > DoState ( p ) ;
}
}
SceUID threadID ;
2012-12-23 21:49:44 -08:00
// Saved thread state
int status ;
WaitType waitType ;
int waitID ;
ThreadWaitInfo waitInfo ;
bool isProcessingCallbacks ;
2013-03-27 00:51:46 -07:00
SceUID currentCallbackId ;
2012-12-23 21:49:44 -08:00
2020-03-15 08:33:40 -07:00
PSPAction * chainedAction ;
2012-12-23 21:49:44 -08:00
} ;
2012-12-09 16:56:16 -08:00
2020-03-15 08:33:40 -07:00
class ActionAfterCallback : public PSPAction
2012-12-27 19:30:36 -08:00
{
public :
ActionAfterCallback ( ) { }
2014-12-08 15:14:35 -05:00
void run ( MipsCall & call ) override ;
2012-12-27 19:30:36 -08:00
2020-03-15 08:33:40 -07:00
static PSPAction * Create ( ) {
2012-12-27 19:30:36 -08:00
return new ActionAfterCallback ;
}
void setCallback ( SceUID cbId_ )
{
cbId = cbId_ ;
}
2014-12-08 15:14:35 -05:00
void DoState ( PointerWrap & p ) override
2012-12-27 19:30:36 -08:00
{
2013-09-14 20:23:03 -07:00
auto s = p . Section ( " ActionAfterCallback " , 1 ) ;
if ( ! s )
return ;
2012-12-27 19:30:36 -08:00
p . Do ( cbId ) ;
}
SceUID cbId ;
} ;
2012-12-09 16:56:16 -08:00
2020-03-15 08:33:40 -07:00
class PSPThread : public KernelObject {
2012-11-01 16:19:01 +01:00
public :
2014-12-08 15:14:35 -05:00
const char * GetName ( ) override { return nt . name ; }
const char * GetTypeName ( ) override { return " Thread " ; }
void GetQuickInfo ( char * ptr , int size ) override
2012-11-01 16:19:01 +01:00
{
sprintf ( ptr , " pc= %08x sp= %08x %s %s %s %s %s %s (wt=%i wid=%i wv= %08x ) " ,
context . pc , context . r [ MIPS_REG_SP ] ,
( nt . status & THREADSTATUS_RUNNING ) ? " RUN " : " " ,
( nt . status & THREADSTATUS_READY ) ? " READY " : " " ,
( nt . status & THREADSTATUS_WAIT ) ? " WAIT " : " " ,
( nt . status & THREADSTATUS_SUSPEND ) ? " SUSPEND " : " " ,
2012-12-06 22:27:52 -08:00
( nt . status & THREADSTATUS_DORMANT ) ? " DORMANT " : " " ,
2012-11-01 16:19:01 +01:00
( nt . status & THREADSTATUS_DEAD ) ? " DEAD " : " " ,
2016-12-02 22:46:52 -03:00
( int ) nt . waitType ,
2012-11-01 16:19:01 +01:00
nt . waitID ,
2012-11-07 15:44:48 +01:00
waitInfo . waitValue ) ;
2012-11-01 16:19:01 +01:00
}
2012-12-23 11:16:32 +01:00
2012-11-06 15:46:21 +01:00
static u32 GetMissingErrorCode ( ) { return SCE_KERNEL_ERROR_UNKNOWN_THID ; }
2013-06-18 23:54:29 -07:00
static int GetStaticIDType ( ) { return SCE_KERNEL_TMID_Thread ; }
2014-12-08 15:14:35 -05:00
int GetIDType ( ) const override { return SCE_KERNEL_TMID_Thread ; }
2012-11-06 15:46:21 +01:00
2019-07-28 14:55:21 -07:00
bool AllocateStack ( u32 & stackSize ) {
_assert_msg_ ( SCEKERNEL , stackSize > = 0x200 , " thread stack should be 256 bytes or larger " ) ;
2012-12-06 22:27:52 -08:00
FreeStack ( ) ;
2013-09-09 20:46:19 -07:00
bool fromTop = ( nt . attr & PSP_THREAD_ATTR_LOW_STACK ) = = 0 ;
2012-12-06 22:27:52 -08:00
if ( nt . attr & PSP_THREAD_ATTR_KERNEL )
{
// Allocate stacks for kernel threads (idle) in kernel RAM
2013-09-09 20:46:19 -07:00
currentStack . start = kernelMemory . Alloc ( stackSize , fromTop , ( std : : string ( " stack/ " ) + nt . name ) . c_str ( ) ) ;
2012-12-06 22:27:52 -08:00
}
else
{
2013-09-09 20:46:19 -07:00
currentStack . start = userMemory . Alloc ( stackSize , fromTop , ( std : : string ( " stack/ " ) + nt . name ) . c_str ( ) ) ;
2012-12-06 22:27:52 -08:00
}
2013-05-18 20:16:01 -07:00
if ( currentStack . start = = ( u32 ) - 1 )
2012-12-06 22:27:52 -08:00
{
2013-05-18 20:16:01 -07:00
currentStack . start = 0 ;
2013-10-08 22:38:37 -07:00
nt . initialStack = 0 ;
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " Failed to allocate stack for thread " ) ;
2012-12-06 22:27:52 -08:00
return false ;
}
2013-05-18 20:16:01 -07:00
nt . initialStack = currentStack . start ;
2012-12-06 22:27:52 -08:00
nt . stackSize = stackSize ;
2013-04-27 13:58:59 -07:00
return true ;
}
bool FillStack ( ) {
// Fill the stack.
2013-09-09 20:46:19 -07:00
if ( ( nt . attr & PSP_THREAD_ATTR_NO_FILLSTACK ) = = 0 ) {
Memory : : Memset ( currentStack . start , 0xFF , nt . stackSize ) ;
}
2013-05-18 20:16:01 -07:00
context . r [ MIPS_REG_SP ] = currentStack . start + nt . stackSize ;
currentStack . end = context . r [ MIPS_REG_SP ] ;
2013-04-27 23:35:36 -07:00
// The k0 section is 256 bytes at the top of the stack.
context . r [ MIPS_REG_SP ] - = 256 ;
context . r [ MIPS_REG_K0 ] = context . r [ MIPS_REG_SP ] ;
2013-01-04 17:06:36 +01:00
u32 k0 = context . r [ MIPS_REG_K0 ] ;
Memory : : Memset ( k0 , 0 , 0x100 ) ;
2013-04-27 23:35:36 -07:00
Memory : : Write_U32 ( GetUID ( ) , k0 + 0xc0 ) ;
Memory : : Write_U32 ( nt . initialStack , k0 + 0xc8 ) ;
2013-01-04 17:06:36 +01:00
Memory : : Write_U32 ( 0xffffffff , k0 + 0xf8 ) ;
Memory : : Write_U32 ( 0xffffffff , k0 + 0xfc ) ;
2013-04-27 23:35:36 -07:00
// After k0 comes the arguments, which is done by sceKernelStartThread().
2013-01-04 17:06:36 +01:00
Memory : : Write_U32 ( GetUID ( ) , nt . initialStack ) ;
2012-12-06 22:27:52 -08:00
return true ;
2012-11-01 16:19:01 +01:00
}
2012-11-06 15:46:21 +01:00
2012-11-09 00:03:46 +01:00
void FreeStack ( ) {
2013-05-18 20:16:01 -07:00
if ( currentStack . start ! = 0 ) {
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " Freeing thread stack %s " , nt . name ) ;
2013-10-06 12:16:58 -07:00
2013-10-08 22:38:37 -07:00
if ( ( nt . attr & PSP_THREAD_ATTR_CLEAR_STACK ) ! = 0 & & nt . initialStack ! = 0 ) {
Memory : : Memset ( nt . initialStack , 0 , nt . stackSize ) ;
2013-10-06 12:16:58 -07:00
}
2012-11-09 00:03:46 +01:00
if ( nt . attr & PSP_THREAD_ATTR_KERNEL ) {
2013-05-18 20:16:01 -07:00
kernelMemory . Free ( currentStack . start ) ;
2012-11-09 00:03:46 +01:00
} else {
2013-05-18 20:16:01 -07:00
userMemory . Free ( currentStack . start ) ;
2012-11-09 00:03:46 +01:00
}
2013-05-18 20:16:01 -07:00
currentStack . start = 0 ;
2012-11-09 00:03:46 +01:00
}
}
2013-05-18 20:16:01 -07:00
bool PushExtendedStack ( u32 size )
2012-12-06 22:27:52 -08:00
{
2013-05-18 20:16:01 -07:00
u32 stack = userMemory . Alloc ( size , true , ( std : : string ( " extended/ " ) + nt . name ) . c_str ( ) ) ;
if ( stack = = ( u32 ) - 1 )
return false ;
pushedStacks . push_back ( currentStack ) ;
currentStack . start = stack ;
currentStack . end = stack + size ;
nt . initialStack = currentStack . start ;
nt . stackSize = currentStack . end - currentStack . start ;
// We still drop the threadID at the bottom and fill it, but there's no k0.
Memory : : Memset ( currentStack . start , 0xFF , nt . stackSize ) ;
Memory : : Write_U32 ( GetUID ( ) , nt . initialStack ) ;
return true ;
}
bool PopExtendedStack ( )
{
if ( pushedStacks . size ( ) = = 0 )
return false ;
userMemory . Free ( currentStack . start ) ;
currentStack = pushedStacks . back ( ) ;
pushedStacks . pop_back ( ) ;
nt . initialStack = currentStack . start ;
nt . stackSize = currentStack . end - currentStack . start ;
return true ;
}
2020-03-15 08:33:40 -07:00
PSPThread ( ) : debug ( currentMIPS , context ) {
2013-05-18 20:16:01 -07:00
currentStack . start = 0 ;
2012-12-06 22:27:52 -08:00
}
2013-10-14 22:05:33 -07:00
// Can't use a destructor since savestates will call that too.
void Cleanup ( )
2012-11-01 16:19:01 +01:00
{
2013-09-08 18:46:01 -07:00
// Callbacks are automatically deleted when their owning thread is deleted.
for ( auto it = callbacks . begin ( ) , end = callbacks . end ( ) ; it ! = end ; + + it )
2020-03-15 08:33:40 -07:00
kernelObjects . Destroy < PSPCallback > ( * it ) ;
2013-09-08 18:46:01 -07:00
2013-05-18 20:20:41 -07:00
if ( pushedStacks . size ( ) ! = 0 )
{
2013-09-07 22:02:55 +02:00
WARN_LOG_REPORT ( SCEKERNEL , " Thread ended within an extended stack " ) ;
2013-05-18 20:20:41 -07:00
for ( size_t i = 0 ; i < pushedStacks . size ( ) ; + + i )
userMemory . Free ( pushedStacks [ i ] . start ) ;
}
2012-11-09 00:03:46 +01:00
FreeStack ( ) ;
2012-11-01 16:19:01 +01:00
}
2012-11-10 10:15:11 +01:00
void setReturnValue ( u32 retval ) ;
2013-03-10 10:59:59 -07:00
void setReturnValue ( u64 retval ) ;
2012-12-09 16:56:16 -08:00
void resumeFromWait ( ) ;
2014-08-02 12:55:02 -07:00
bool isWaitingFor ( WaitType type , int id ) const ;
int getWaitID ( WaitType type ) const ;
ThreadWaitInfo getWaitInfo ( ) const ;
2012-11-10 10:15:11 +01:00
2012-11-07 15:44:48 +01:00
// Utils
2013-02-03 12:26:09 -08:00
inline bool isRunning ( ) const { return ( nt . status & THREADSTATUS_RUNNING ) ! = 0 ; }
inline bool isStopped ( ) const { return ( nt . status & THREADSTATUS_DORMANT ) ! = 0 ; }
inline bool isReady ( ) const { return ( nt . status & THREADSTATUS_READY ) ! = 0 ; }
inline bool isWaiting ( ) const { return ( nt . status & THREADSTATUS_WAIT ) ! = 0 ; }
inline bool isSuspended ( ) const { return ( nt . status & THREADSTATUS_SUSPEND ) ! = 0 ; }
2012-12-23 11:16:32 +01:00
2014-12-08 15:14:35 -05:00
void DoState ( PointerWrap & p ) override
2012-12-26 22:45:19 -08:00
{
2015-06-28 10:42:19 -07:00
auto s = p . Section ( " Thread " , 1 , 5 ) ;
2013-09-14 20:23:03 -07:00
if ( ! s )
return ;
2012-12-26 22:45:19 -08:00
p . Do ( nt ) ;
p . Do ( waitInfo ) ;
p . Do ( moduleId ) ;
p . Do ( isProcessingCallbacks ) ;
2013-03-27 00:51:46 -07:00
p . Do ( currentMipscallId ) ;
2012-12-26 22:45:19 -08:00
p . Do ( currentCallbackId ) ;
2013-11-27 22:45:17 +01:00
2018-06-22 21:25:07 -07:00
// TODO: If we want to "version" a DoState method here, we can just use minVer = 0.
2012-12-26 22:45:19 -08:00
p . Do ( context ) ;
2013-11-28 00:08:45 +01:00
if ( s < = 3 )
{
// We must have been loading an old state if we're here.
// Reorder VFPU data to new order.
float temp [ 128 ] ;
memcpy ( temp , context . v , 128 * sizeof ( float ) ) ;
for ( int i = 0 ; i < 128 ; i + + ) {
context . v [ voffset [ i ] ] = temp [ i ] ;
}
}
2013-11-13 22:41:25 -08:00
if ( s < = 2 )
{
context . other [ 4 ] = context . other [ 5 ] ;
context . other [ 3 ] = context . other [ 4 ] ;
}
2015-06-28 10:42:19 -07:00
if ( s < = 4 )
std : : swap ( context . hi , context . lo ) ;
2012-12-26 22:45:19 -08:00
2013-09-08 18:42:40 -07:00
p . Do ( callbacks ) ;
2012-12-26 22:45:19 -08:00
p . Do ( pendingMipsCalls ) ;
2013-05-18 20:16:01 -07:00
p . Do ( pushedStacks ) ;
p . Do ( currentStack ) ;
2013-11-02 17:40:23 -07:00
if ( s > = 2 )
{
p . Do ( waitingThreads ) ;
p . Do ( pausedWaits ) ;
}
2012-12-26 22:45:19 -08:00
}
2012-11-01 16:19:01 +01:00
NativeThread nt ;
2012-11-07 15:44:48 +01:00
ThreadWaitInfo waitInfo ;
2012-11-17 14:20:04 +01:00
SceUID moduleId ;
2012-11-01 16:19:01 +01:00
bool isProcessingCallbacks ;
2013-03-27 00:51:46 -07:00
u32 currentMipscallId ;
SceUID currentCallbackId ;
2012-11-01 16:19:01 +01:00
2020-03-15 08:33:40 -07:00
PSPThreadContext context ;
2018-05-09 17:51:27 -07:00
KernelThreadDebugInterface debug ;
2012-11-01 16:19:01 +01:00
2013-09-08 18:42:40 -07:00
std : : vector < SceUID > callbacks ;
2012-12-23 11:16:32 +01:00
2013-03-02 14:58:58 -08:00
std : : list < u32 > pendingMipsCalls ;
2012-11-01 16:19:01 +01:00
2013-05-18 20:16:01 -07:00
struct StackInfo {
u32 start ;
u32 end ;
} ;
// This is a stack of... stacks, since sceKernelExtendThreadStack() can recurse.
// These are stacks that aren't "active" right now, but will pop off once the func returns.
std : : vector < StackInfo > pushedStacks ;
StackInfo currentStack ;
2013-09-08 11:15:09 -07:00
// For thread end.
std : : vector < SceUID > waitingThreads ;
// Key is the callback id it was for, or if no callback, the thread id.
std : : map < SceUID , u64 > pausedWaits ;
2012-11-01 16:19:01 +01:00
} ;
2013-03-27 00:51:46 -07:00
struct WaitTypeFuncs
{
WaitBeginCallbackFunc beginFunc ;
WaitEndCallbackFunc endFunc ;
} ;
2012-11-06 15:46:21 +01:00
2016-05-28 16:59:19 -07:00
bool __KernelExecuteMipsCallOnCurrentThread ( u32 callId , bool reschedAfter ) ;
2012-11-06 15:46:21 +01:00
2020-03-15 08:33:40 -07:00
PSPThread * __KernelCreateThread ( SceUID & id , SceUID moduleID , const char * name , u32 entryPoint , u32 priority , int stacksize , u32 attr ) ;
void __KernelResetThread ( PSPThread * t , int lowestPriority ) ;
2012-12-01 14:36:56 -08:00
void __KernelCancelWakeup ( SceUID threadID ) ;
2013-01-07 11:11:23 -08:00
void __KernelCancelThreadEndTimeout ( SceUID threadID ) ;
2020-03-15 08:33:40 -07:00
bool __KernelCheckThreadCallbacks ( PSPThread * thread , bool force ) ;
2012-11-01 16:19:01 +01:00
//////////////////////////////////////////////////////////////////////////
//STATE BEGIN
//////////////////////////////////////////////////////////////////////////
2012-12-23 21:49:44 -08:00
int g_inCbCount = 0 ;
2013-01-27 18:43:38 -08:00
// Normally, the same as currentThread. In an interrupt, remembers the callback's thread id.
SceUID currentCallbackThreadID = 0 ;
2013-01-27 17:01:17 -08:00
int readyCallbacksCount = 0 ;
2013-02-10 07:49:08 -08:00
SceUID currentThread ;
2020-03-15 08:33:40 -07:00
PSPThread * currentThreadPtr ;
2012-11-01 16:19:01 +01:00
u32 idleThreadHackAddr ;
u32 threadReturnHackAddr ;
2020-03-21 16:09:23 -07:00
u32 hleReturnHackAddr ;
2012-11-01 16:19:01 +01:00
u32 cbReturnHackAddr ;
u32 intReturnHackAddr ;
2013-05-18 20:16:01 -07:00
u32 extendReturnHackAddr ;
2013-05-26 10:28:08 -07:00
u32 moduleReturnHackAddr ;
2012-11-20 00:18:11 -08:00
std : : vector < ThreadCallback > threadEndListeners ;
2012-11-01 16:19:01 +01:00
2014-01-05 20:20:56 -08:00
typedef std : : vector < SceUID > ThreadEventHandlerList ;
static std : : map < SceUID , ThreadEventHandlerList > threadEventHandlers ;
2016-05-30 13:00:23 -07:00
static std : : vector < SceUID > pendingDeleteThreads ;
2014-01-05 20:20:56 -08:00
2013-02-03 12:09:22 -08:00
// Lists all thread ids that aren't deleted/etc.
2013-02-09 15:30:22 -08:00
std : : vector < SceUID > threadqueue ;
2018-05-08 17:23:14 -07:00
// Only for debugger, so not needed to read, just write.
std : : mutex threadqueueLock ;
2013-02-09 15:30:22 -08:00
2013-02-03 12:09:22 -08:00
// Lists only ready thread ids.
2013-03-23 23:54:46 -07:00
ThreadQueueList threadReadyQueue ;
2013-02-03 12:09:22 -08:00
2012-11-01 16:19:01 +01:00
SceUID threadIdleID [ 2 ] ;
int eventScheduledWakeup ;
2013-01-07 11:11:23 -08:00
int eventThreadEndTimeout ;
2012-11-01 16:19:01 +01:00
2012-11-07 15:44:48 +01:00
bool dispatchEnabled = true ;
2012-11-06 19:22:14 +01:00
2012-12-23 21:49:44 -08:00
MipsCallManager mipsCalls ;
2012-12-27 19:30:36 -08:00
int actionAfterCallback ;
int actionAfterMipsCall ;
2012-11-06 19:22:14 +01:00
2013-09-02 13:18:02 -07:00
// When inside a callback, delays are "paused", and rechecked after the callback returns.
std : : map < SceUID , u64 > pausedDelays ;
2013-03-27 00:51:46 -07:00
// Doesn't need state saving.
WaitTypeFuncs waitTypeFuncs [ NUM_WAITTYPES ] ;
2013-08-25 10:25:49 -07:00
// Doesn't really need state saving, just for logging purposes.
static u64 lastSwitchCycles = 0 ;
2012-11-01 16:19:01 +01:00
//////////////////////////////////////////////////////////////////////////
//STATE END
//////////////////////////////////////////////////////////////////////////
2012-12-27 19:30:36 -08:00
int __KernelRegisterActionType ( ActionCreator creator )
{
2012-12-27 19:45:00 -08:00
return mipsCalls . registerActionType ( creator ) ;
2012-12-27 19:30:36 -08:00
}
void __KernelRestoreActionType ( int actionType , ActionCreator creator )
{
2012-12-27 19:45:00 -08:00
mipsCalls . restoreActionType ( actionType , creator ) ;
2012-12-27 19:30:36 -08:00
}
2020-03-15 08:33:40 -07:00
PSPAction * __KernelCreateAction ( int actionType )
2012-12-27 19:30:36 -08:00
{
2012-12-27 19:45:00 -08:00
return mipsCalls . createActionByType ( actionType ) ;
2012-12-27 19:30:36 -08:00
}
void MipsCall : : DoState ( PointerWrap & p )
{
2013-09-14 20:23:03 -07:00
auto s = p . Section ( " MipsCall " , 1 ) ;
if ( ! s )
return ;
2012-12-27 19:30:36 -08:00
p . Do ( entryPoint ) ;
p . Do ( cbId ) ;
p . DoArray ( args , ARRAY_SIZE ( args ) ) ;
p . Do ( numArgs ) ;
2014-06-22 11:29:47 -07:00
// No longer used.
u32 legacySavedIdRegister = 0 ;
p . Do ( legacySavedIdRegister ) ;
2016-05-28 16:59:19 -07:00
u32 legacySavedRa = 0 ;
p . Do ( legacySavedRa ) ;
2012-12-27 19:30:36 -08:00
p . Do ( savedPc ) ;
p . Do ( savedV0 ) ;
p . Do ( savedV1 ) ;
p . Do ( tag ) ;
p . Do ( savedId ) ;
p . Do ( reschedAfter ) ;
int actionTypeID = 0 ;
if ( doAfter ! = NULL )
actionTypeID = doAfter - > actionTypeID ;
p . Do ( actionTypeID ) ;
if ( actionTypeID ! = 0 )
{
if ( p . mode = = p . MODE_READ )
doAfter = __KernelCreateAction ( actionTypeID ) ;
doAfter - > DoState ( p ) ;
}
}
2012-11-10 10:15:11 +01:00
2013-01-06 15:53:44 -08:00
void MipsCall : : setReturnValue ( u32 value )
{
savedV0 = value ;
}
2013-03-10 10:59:59 -07:00
void MipsCall : : setReturnValue ( u64 value )
{
savedV0 = value & 0xFFFFFFFF ;
savedV1 = ( value > > 32 ) & 0xFFFFFFFF ;
}
2020-03-15 08:33:40 -07:00
inline PSPThread * __GetCurrentThread ( ) {
2013-11-02 20:16:47 -07:00
return currentThreadPtr ;
}
2020-03-15 08:33:40 -07:00
inline void __SetCurrentThread ( PSPThread * thread , SceUID threadID , const char * name ) {
2013-11-02 20:16:47 -07:00
currentThread = threadID ;
currentThreadPtr = thread ;
hleCurrentThreadName = name ;
2012-11-06 15:46:21 +01:00
}
2020-03-21 16:09:23 -07:00
u32 __KernelCallbackReturnAddress ( ) {
2012-12-23 11:16:32 +01:00
return cbReturnHackAddr ;
2012-11-01 16:19:01 +01:00
}
2013-09-02 13:18:02 -07:00
u32 __KernelInterruptReturnAddress ( ) {
2012-12-23 11:16:32 +01:00
return intReturnHackAddr ;
2012-11-01 16:19:01 +01:00
}
2014-12-08 04:40:08 -05:00
static void __KernelDelayBeginCallback ( SceUID threadID , SceUID prevCallbackId ) {
2013-09-02 13:18:02 -07:00
SceUID pauseKey = prevCallbackId = = 0 ? threadID : prevCallbackId ;
u32 error ;
SceUID waitID = __KernelGetWaitID ( threadID , WAITTYPE_DELAY , error ) ;
if ( waitID = = threadID ) {
2013-09-02 17:59:47 -07:00
// Most waits need to keep track of waiting threads, delays don't. Use a fake list.
std : : vector < SceUID > dummy ;
HLEKernel : : WaitBeginCallback ( threadID , prevCallbackId , eventScheduledWakeup , dummy , pausedDelays , true ) ;
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelDelayThreadCB: Suspending delay for callback " ) ;
2013-09-02 13:18:02 -07:00
}
else
2013-09-07 22:02:55 +02:00
WARN_LOG_REPORT ( SCEKERNEL , " sceKernelDelayThreadCB: beginning callback with bad wait? " ) ;
2013-09-02 13:18:02 -07:00
}
2014-12-08 04:40:08 -05:00
static void __KernelDelayEndCallback ( SceUID threadID , SceUID prevCallbackId ) {
2013-09-02 13:18:02 -07:00
SceUID pauseKey = prevCallbackId = = 0 ? threadID : prevCallbackId ;
if ( pausedDelays . find ( pauseKey ) = = pausedDelays . end ( ) )
{
// This probably should not happen.
2013-09-07 22:02:55 +02:00
WARN_LOG_REPORT ( SCEKERNEL , " sceKernelDelayThreadCB: cannot find delay deadline " ) ;
2013-09-02 13:18:02 -07:00
__KernelResumeThreadFromWait ( threadID , 0 ) ;
return ;
}
u64 delayDeadline = pausedDelays [ pauseKey ] ;
pausedDelays . erase ( pauseKey ) ;
// TODO: Don't wake up if __KernelCurHasReadyCallbacks()?
s64 cyclesLeft = delayDeadline - CoreTiming : : GetTicks ( ) ;
if ( cyclesLeft < 0 )
__KernelResumeThreadFromWait ( threadID , 0 ) ;
else
{
CoreTiming : : ScheduleEvent ( cyclesLeft , eventScheduledWakeup , __KernelGetCurThread ( ) ) ;
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelDelayThreadCB: Resuming delay after callback " ) ;
2013-09-02 13:18:02 -07:00
}
}
2014-12-08 04:40:08 -05:00
static void __KernelSleepBeginCallback ( SceUID threadID , SceUID prevCallbackId ) {
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelSleepThreadCB: Suspending sleep for callback " ) ;
2013-09-02 13:18:02 -07:00
}
2014-12-08 04:40:08 -05:00
static void __KernelSleepEndCallback ( SceUID threadID , SceUID prevCallbackId ) {
2013-09-02 13:18:02 -07:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * thread = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2015-03-28 14:28:51 -07:00
if ( ! thread ) {
2013-09-02 13:18:02 -07:00
// This probably should not happen.
2013-09-07 22:02:55 +02:00
WARN_LOG_REPORT ( SCEKERNEL , " sceKernelSleepThreadCB: thread deleted? " ) ;
2013-09-02 13:18:02 -07:00
return ;
}
// TODO: Don't wake up if __KernelCurHasReadyCallbacks()?
if ( thread - > nt . wakeupCount > 0 ) {
thread - > nt . wakeupCount - - ;
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelSleepThreadCB: resume from callback, wakeupCount decremented to %i " , thread - > nt . wakeupCount ) ;
2013-09-02 13:18:02 -07:00
__KernelResumeThreadFromWait ( threadID , 0 ) ;
} else {
2014-06-22 19:52:50 -07:00
DEBUG_LOG ( SCEKERNEL , " sceKernelSleepThreadCB: Resuming sleep after callback " ) ;
2013-09-02 13:18:02 -07:00
}
}
2014-12-08 04:40:08 -05:00
static void __KernelThreadEndBeginCallback ( SceUID threadID , SceUID prevCallbackId )
2013-09-08 12:14:07 -07:00
{
2020-03-15 08:33:40 -07:00
auto result = HLEKernel : : WaitBeginCallback < PSPThread , WAITTYPE_THREADEND , SceUID > ( threadID , prevCallbackId , eventThreadEndTimeout ) ;
2013-09-08 12:14:07 -07:00
if ( result = = HLEKernel : : WAIT_CB_SUCCESS )
2014-06-29 19:03:35 -07:00
DEBUG_LOG ( SCEKERNEL , " sceKernelWaitThreadEndCB: Suspending wait for callback " ) ;
2013-09-08 12:14:07 -07:00
else if ( result = = HLEKernel : : WAIT_CB_BAD_WAIT_DATA )
2014-06-29 19:03:35 -07:00
ERROR_LOG_REPORT ( SCEKERNEL , " sceKernelWaitThreadEndCB: wait not found to pause for callback " ) ;
2013-09-08 12:14:07 -07:00
else
WARN_LOG_REPORT ( SCEKERNEL , " sceKernelWaitThreadEndCB: beginning callback with bad wait id? " ) ;
}
2020-03-15 08:33:40 -07:00
static bool __KernelCheckResumeThreadEnd ( PSPThread * t , SceUID waitingThreadID , u32 & error , int result , bool & wokeThreads ) {
2013-09-08 12:14:07 -07:00
if ( ! HLEKernel : : VerifyWait ( waitingThreadID , WAITTYPE_THREADEND , t - > GetUID ( ) ) )
return true ;
2020-03-15 08:33:40 -07:00
if ( t - > nt . status = = THREADSTATUS_DORMANT ) {
2013-09-08 12:14:07 -07:00
u32 timeoutPtr = __KernelGetWaitTimeoutPtr ( waitingThreadID , error ) ;
s64 cyclesLeft = CoreTiming : : UnscheduleEvent ( eventThreadEndTimeout , waitingThreadID ) ;
if ( timeoutPtr ! = 0 )
Memory : : Write_U32 ( ( u32 ) cyclesToUs ( cyclesLeft ) , timeoutPtr ) ;
2013-08-29 11:48:03 +02:00
s32 exitStatus = t - > nt . exitStatus ;
__KernelResumeThreadFromWait ( waitingThreadID , exitStatus ) ;
2013-09-08 12:14:07 -07:00
return true ;
}
return false ;
}
2014-12-08 04:40:08 -05:00
static void __KernelThreadEndEndCallback ( SceUID threadID , SceUID prevCallbackId )
2013-09-08 12:14:07 -07:00
{
2020-03-15 08:33:40 -07:00
auto result = HLEKernel : : WaitEndCallback < PSPThread , WAITTYPE_THREADEND , SceUID > ( threadID , prevCallbackId , eventThreadEndTimeout , __KernelCheckResumeThreadEnd ) ;
2013-09-08 12:14:07 -07:00
if ( result = = HLEKernel : : WAIT_CB_RESUMED_WAIT )
DEBUG_LOG ( SCEKERNEL , " sceKernelWaitThreadEndCB: Resuming wait from callback " ) ;
}
2013-05-26 19:02:40 -07:00
u32 __KernelSetThreadRA ( SceUID threadID , u32 nid )
2013-05-26 10:28:08 -07:00
{
u32 newRA ;
switch ( nid )
{
case NID_MODULERETURN :
newRA = moduleReturnHackAddr ;
break ;
default :
2013-09-07 22:02:55 +02:00
ERROR_LOG_REPORT ( SCEKERNEL , " __KernelSetThreadRA(): invalid RA address " ) ;
2013-05-26 10:28:08 -07:00
return - 1 ;
}
if ( threadID = = currentThread )
currentMIPS - > r [ MIPS_REG_RA ] = newRA ;
else
{
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * thread = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2013-05-26 10:28:08 -07:00
if ( ! thread )
return error ;
thread - > context . r [ MIPS_REG_RA ] = newRA ;
}
return 0 ;
}
2012-11-01 16:19:01 +01:00
void hleScheduledWakeup ( u64 userdata , int cyclesLate ) ;
2013-01-07 11:11:23 -08:00
void hleThreadEndTimeout ( u64 userdata , int cyclesLate ) ;
2012-11-01 16:19:01 +01:00
2014-12-08 04:40:08 -05:00
static void __KernelWriteFakeSysCall ( u32 nid , u32 * ptr , u32 & pos )
2013-05-18 20:16:01 -07:00
{
2013-05-26 10:28:08 -07:00
* ptr = pos ;
2013-05-18 20:16:01 -07:00
pos + = 8 ;
2013-05-26 10:28:08 -07:00
WriteSyscall ( " FakeSysCalls " , nid , * ptr ) ;
2018-01-06 16:43:38 -08:00
MIPSAnalyst : : PrecompileFunction ( * ptr , 8 ) ;
2013-05-18 20:16:01 -07:00
}
2020-03-21 16:09:23 -07:00
u32 HLEMipsCallReturnAddress ( ) {
if ( hleReturnHackAddr = = 0 ) {
// From an old save state, likely... try to recover.
u32 blockSize = 2 * sizeof ( u32 ) ;
u32 pos = kernelMemory . Alloc ( blockSize , false , " hlerethack " ) ;
__KernelWriteFakeSysCall ( NID_HLECALLRETURN , & hleReturnHackAddr , pos ) ;
}
return hleReturnHackAddr ;
}
2012-11-01 16:19:01 +01:00
void __KernelThreadingInit ( )
{
2013-05-26 10:28:08 -07:00
struct ThreadHack
{
u32 nid ;
u32 * addr ;
} ;
2013-05-18 20:16:01 -07:00
// Yeah, this is straight out of JPCSP, I should be ashamed.
2013-07-24 23:58:45 -07:00
const static u32_le idleThreadCode [ ] = {
2013-05-18 20:16:01 -07:00
MIPS_MAKE_LUI ( MIPS_REG_RA , 0x0800 ) ,
MIPS_MAKE_JR_RA ( ) ,
MIPS_MAKE_SYSCALL ( " FakeSysCalls " , " _sceKernelIdle " ) ,
2014-07-13 20:25:50 -07:00
MIPS_MAKE_BREAK ( 0 ) ,
2013-05-18 20:16:01 -07:00
} ;
2013-05-26 10:28:08 -07:00
// If you add another func here, don't forget __KernelThreadingDoState() below.
static ThreadHack threadHacks [ ] = {
{ NID_THREADRETURN , & threadReturnHackAddr } ,
{ NID_CALLBACKRETURN , & cbReturnHackAddr } ,
{ NID_INTERRUPTRETURN , & intReturnHackAddr } ,
{ NID_EXTENDRETURN , & extendReturnHackAddr } ,
{ NID_MODULERETURN , & moduleReturnHackAddr } ,
2020-03-21 16:09:23 -07:00
{ NID_HLECALLRETURN , & hleReturnHackAddr } ,
2013-05-26 10:28:08 -07:00
} ;
u32 blockSize = sizeof ( idleThreadCode ) + ARRAY_SIZE ( threadHacks ) * 2 * 4 ; // The thread code above plus 8 bytes per "hack"
2012-11-01 16:19:01 +01:00
2012-11-07 15:44:48 +01:00
dispatchEnabled = true ;
2013-03-27 00:51:46 -07:00
memset ( waitTypeFuncs , 0 , sizeof ( waitTypeFuncs ) ) ;
2012-11-06 19:22:14 +01:00
2013-11-02 20:16:47 -07:00
__SetCurrentThread ( NULL , 0 , NULL ) ;
2012-12-23 11:16:32 +01:00
g_inCbCount = 0 ;
2013-01-27 18:43:38 -08:00
currentCallbackThreadID = 0 ;
2013-01-27 17:01:17 -08:00
readyCallbacksCount = 0 ;
2013-08-25 10:25:49 -07:00
lastSwitchCycles = 0 ;
2012-11-01 16:19:01 +01:00
idleThreadHackAddr = kernelMemory . Alloc ( blockSize , false , " threadrethack " ) ;
2012-12-23 11:16:32 +01:00
2013-05-18 20:16:01 -07:00
Memory : : Memcpy ( idleThreadHackAddr , idleThreadCode , sizeof ( idleThreadCode ) ) ;
2012-11-01 16:19:01 +01:00
2013-05-18 20:16:01 -07:00
u32 pos = idleThreadHackAddr + sizeof ( idleThreadCode ) ;
2013-05-31 22:40:50 -07:00
for ( size_t i = 0 ; i < ARRAY_SIZE ( threadHacks ) ; + + i ) {
2013-05-26 10:28:08 -07:00
__KernelWriteFakeSysCall ( threadHacks [ i ] . nid , threadHacks [ i ] . addr , pos ) ;
}
2012-11-01 16:19:01 +01:00
eventScheduledWakeup = CoreTiming : : RegisterEvent ( " ScheduledWakeup " , & hleScheduledWakeup ) ;
2013-01-07 11:11:23 -08:00
eventThreadEndTimeout = CoreTiming : : RegisterEvent ( " ThreadEndTimeout " , & hleThreadEndTimeout ) ;
2012-12-27 19:30:36 -08:00
actionAfterMipsCall = __KernelRegisterActionType ( ActionAfterMipsCall : : Create ) ;
actionAfterCallback = __KernelRegisterActionType ( ActionAfterCallback : : Create ) ;
2012-11-01 16:19:01 +01:00
2012-12-23 11:16:32 +01:00
// Create the two idle threads, as well. With the absolute minimal possible priority.
// 4096 stack size - don't know what the right value is. Hm, if callbacks are ever to run on these threads...
2013-06-05 23:57:27 -07:00
__KernelResetThread ( __KernelCreateThread ( threadIdleID [ 0 ] , 0 , " idle0 " , idleThreadHackAddr , 0x7f , 4096 , PSP_THREAD_ATTR_KERNEL ) , 0 ) ;
__KernelResetThread ( __KernelCreateThread ( threadIdleID [ 1 ] , 0 , " idle1 " , idleThreadHackAddr , 0x7f , 4096 , PSP_THREAD_ATTR_KERNEL ) , 0 ) ;
2012-12-23 11:16:32 +01:00
// These idle threads are later started in LoadExec, which calls __KernelStartIdleThreads below.
2012-12-01 14:36:56 -08:00
2012-12-23 11:16:32 +01:00
__KernelListenThreadEnd ( __KernelCancelWakeup ) ;
2013-01-07 11:11:23 -08:00
__KernelListenThreadEnd ( __KernelCancelThreadEndTimeout ) ;
2013-09-02 13:18:02 -07:00
__KernelRegisterWaitTypeFuncs ( WAITTYPE_DELAY , __KernelDelayBeginCallback , __KernelDelayEndCallback ) ;
__KernelRegisterWaitTypeFuncs ( WAITTYPE_SLEEP , __KernelSleepBeginCallback , __KernelSleepEndCallback ) ;
2013-09-08 12:14:07 -07:00
__KernelRegisterWaitTypeFuncs ( WAITTYPE_THREADEND , __KernelThreadEndBeginCallback , __KernelThreadEndEndCallback ) ;
2012-11-01 16:19:01 +01:00
}
2012-12-27 19:30:36 -08:00
void __KernelThreadingDoState ( PointerWrap & p )
{
2020-03-21 16:09:23 -07:00
auto s = p . Section ( " sceKernelThread " , 1 , 4 ) ;
2013-09-14 20:23:03 -07:00
if ( ! s )
return ;
2012-12-27 19:30:36 -08:00
p . Do ( g_inCbCount ) ;
2013-01-27 18:43:38 -08:00
p . Do ( currentCallbackThreadID ) ;
2013-01-27 17:01:17 -08:00
p . Do ( readyCallbacksCount ) ;
2012-12-27 19:30:36 -08:00
p . Do ( idleThreadHackAddr ) ;
p . Do ( threadReturnHackAddr ) ;
p . Do ( cbReturnHackAddr ) ;
p . Do ( intReturnHackAddr ) ;
2013-05-18 20:16:01 -07:00
p . Do ( extendReturnHackAddr ) ;
2013-05-26 10:28:08 -07:00
p . Do ( moduleReturnHackAddr ) ;
2012-12-27 19:30:36 -08:00
2020-03-21 16:09:23 -07:00
if ( s > = 4 ) {
p . Do ( hleReturnHackAddr ) ;
} else {
hleReturnHackAddr = 0 ;
}
2012-12-27 19:30:36 -08:00
p . Do ( currentThread ) ;
2012-12-28 13:01:46 -08:00
SceUID dv = 0 ;
p . Do ( threadqueue , dv ) ;
2012-12-27 19:30:36 -08:00
p . DoArray ( threadIdleID , ARRAY_SIZE ( threadIdleID ) ) ;
p . Do ( dispatchEnabled ) ;
2013-02-04 01:31:02 -08:00
p . Do ( threadReadyQueue ) ;
2013-02-03 12:09:22 -08:00
2012-12-27 19:30:36 -08:00
p . Do ( eventScheduledWakeup ) ;
CoreTiming : : RestoreRegisterEvent ( eventScheduledWakeup , " ScheduledWakeup " , & hleScheduledWakeup ) ;
2013-01-07 11:11:23 -08:00
p . Do ( eventThreadEndTimeout ) ;
CoreTiming : : RestoreRegisterEvent ( eventThreadEndTimeout , " ThreadEndTimeout " , & hleThreadEndTimeout ) ;
2012-12-27 19:30:36 -08:00
p . Do ( actionAfterMipsCall ) ;
__KernelRestoreActionType ( actionAfterMipsCall , ActionAfterMipsCall : : Create ) ;
p . Do ( actionAfterCallback ) ;
__KernelRestoreActionType ( actionAfterCallback , ActionAfterCallback : : Create ) ;
2013-09-02 13:18:02 -07:00
p . Do ( pausedDelays ) ;
2020-03-15 08:33:40 -07:00
__SetCurrentThread ( kernelObjects . GetFast < PSPThread > ( currentThread ) , currentThread , __KernelGetThreadName ( currentThread ) ) ;
2013-08-25 10:25:49 -07:00
lastSwitchCycles = CoreTiming : : GetTicks ( ) ;
2014-01-05 20:20:56 -08:00
if ( s > = 2 )
p . Do ( threadEventHandlers ) ;
2016-05-30 13:00:23 -07:00
if ( s > = 3 )
p . Do ( pendingDeleteThreads ) ;
2012-12-27 19:30:36 -08:00
}
void __KernelThreadingDoStateLate ( PointerWrap & p )
{
// We do this late to give modules time to register actions.
mipsCalls . DoState ( p ) ;
p . DoMarker ( " sceKernelThread Late " ) ;
}
2012-12-26 22:45:19 -08:00
KernelObject * __KernelThreadObject ( )
{
2020-03-15 08:33:40 -07:00
return new PSPThread ;
2012-12-26 22:45:19 -08:00
}
KernelObject * __KernelCallbackObject ( )
{
2020-03-15 08:33:40 -07:00
return new PSPCallback ;
2012-11-01 16:19:01 +01:00
}
2012-11-20 00:18:11 -08:00
void __KernelListenThreadEnd ( ThreadCallback callback )
{
threadEndListeners . push_back ( callback ) ;
}
2014-12-08 04:40:08 -05:00
static void __KernelFireThreadEnd ( SceUID threadID )
2012-11-20 00:18:11 -08:00
{
2013-01-27 16:55:43 -08:00
for ( auto iter = threadEndListeners . begin ( ) , end = threadEndListeners . end ( ) ; iter ! = end ; + + iter )
2012-11-20 00:18:11 -08:00
{
ThreadCallback cb = * iter ;
cb ( threadID ) ;
}
2012-11-01 16:19:01 +01:00
}
2013-02-03 12:26:09 -08:00
// TODO: Use __KernelChangeThreadState instead? It has other affects...
2020-03-15 08:33:40 -07:00
static void __KernelChangeReadyState ( PSPThread * thread , SceUID threadID , bool ready ) {
2013-07-04 01:58:16 -07:00
// Passing the id as a parameter is just an optimization, if it's wrong it will cause havoc.
2013-09-07 22:02:55 +02:00
_dbg_assert_msg_ ( SCEKERNEL , thread - > GetUID ( ) = = threadID , " Incorrect threadID " ) ;
2013-02-03 12:26:09 -08:00
int prio = thread - > nt . currentPriority ;
if ( thread - > isReady ( ) )
{
if ( ! ready )
2013-03-23 23:54:46 -07:00
threadReadyQueue . remove ( prio , threadID ) ;
2013-02-03 12:26:09 -08:00
}
else if ( ready )
2013-02-03 13:27:23 -08:00
{
2013-02-09 19:02:38 -08:00
if ( thread - > isRunning ( ) )
2013-03-23 23:54:46 -07:00
threadReadyQueue . push_front ( prio , threadID ) ;
2013-02-03 13:27:23 -08:00
else
2013-03-23 23:54:46 -07:00
threadReadyQueue . push_back ( prio , threadID ) ;
2013-02-09 15:16:37 -08:00
thread - > nt . status = THREADSTATUS_READY ;
2013-02-03 13:27:23 -08:00
}
2013-02-03 12:26:09 -08:00
}
2014-12-08 04:40:08 -05:00
static void __KernelChangeReadyState ( SceUID threadID , bool ready )
2013-02-03 12:26:09 -08:00
{
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * thread = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2013-02-03 12:26:09 -08:00
if ( thread )
__KernelChangeReadyState ( thread , threadID , ready ) ;
else
2013-09-07 22:02:55 +02:00
WARN_LOG ( SCEKERNEL , " Trying to change the ready state of an unknown thread? " ) ;
2013-02-03 12:26:09 -08:00
}
2013-04-10 21:16:31 -07:00
void __KernelStartIdleThreads ( SceUID moduleId )
2012-11-01 16:19:01 +01:00
{
2012-12-23 11:16:32 +01:00
for ( int i = 0 ; i < 2 ; i + + )
{
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadIdleID [ i ] , error ) ;
2013-04-10 21:16:31 -07:00
t - > nt . gpreg = __KernelGetModuleGP ( moduleId ) ;
2012-12-23 11:16:32 +01:00
t - > context . r [ MIPS_REG_GP ] = t - > nt . gpreg ;
//t->context.pc += 4; // ADJUSTPC
2013-04-07 10:27:29 -07:00
threadReadyQueue . prepare ( t - > nt . currentPriority ) ;
2013-02-03 12:26:09 -08:00
__KernelChangeReadyState ( t , threadIdleID [ i ] , true ) ;
2012-12-23 11:16:32 +01:00
}
2012-11-01 16:19:01 +01:00
}
2012-12-18 00:58:46 -08:00
bool __KernelSwitchOffThread ( const char * reason )
{
if ( ! reason )
reason = " switch off thread " ;
2012-12-27 17:43:44 -08:00
SceUID threadID = currentThread ;
2012-12-18 00:58:46 -08:00
if ( threadID ! = threadIdleID [ 0 ] & & threadID ! = threadIdleID [ 1 ] )
{
2020-03-15 08:33:40 -07:00
PSPThread * current = __GetCurrentThread ( ) ;
2013-02-02 19:14:00 -08:00
if ( current & & current - > isRunning ( ) )
2013-02-03 12:26:09 -08:00
__KernelChangeReadyState ( current , threadID , true ) ;
2013-02-02 19:14:00 -08:00
2012-12-18 00:58:46 -08:00
// Idle 0 chosen entirely arbitrarily.
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . GetFast < PSPThread > ( threadIdleID [ 0 ] ) ;
2012-12-18 00:58:46 -08:00
if ( t )
{
2013-10-29 22:53:25 -07:00
hleSkipDeadbeef ( ) ;
2012-12-18 00:58:46 -08:00
__KernelSwitchContext ( t , reason ) ;
return true ;
}
else
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " Unable to switch to idle thread. " ) ;
2012-12-18 00:58:46 -08:00
}
return false ;
2012-11-01 16:19:01 +01:00
}
2013-04-06 17:03:39 -07:00
bool __KernelSwitchToThread ( SceUID threadID , const char * reason )
{
if ( ! reason )
reason = " switch to thread " ;
if ( currentThread ! = threadIdleID [ 0 ] & & currentThread ! = threadIdleID [ 1 ] )
{
2013-09-07 22:02:55 +02:00
ERROR_LOG_REPORT ( SCEKERNEL , " __KernelSwitchToThread used when already on a thread. " ) ;
2013-04-06 17:03:39 -07:00
return false ;
}
if ( currentThread = = threadID )
return false ;
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2013-04-06 17:03:39 -07:00
if ( ! t )
2014-01-28 22:10:28 -08:00
{
ERROR_LOG_REPORT ( SCEKERNEL , " __KernelSwitchToThread: %x doesn't exist " , threadID ) ;
hleReSchedule ( " switch to deleted thread " ) ;
}
else if ( t - > isReady ( ) | | t - > isRunning ( ) )
2013-04-06 17:03:39 -07:00
{
2020-03-15 08:33:40 -07:00
PSPThread * current = __GetCurrentThread ( ) ;
2013-04-06 17:03:39 -07:00
if ( current & & current - > isRunning ( ) )
2013-07-04 01:58:16 -07:00
__KernelChangeReadyState ( current , currentThread , true ) ;
2013-04-06 17:03:39 -07:00
__KernelSwitchContext ( t , reason ) ;
return true ;
}
2014-01-28 22:10:28 -08:00
else
{
hleReSchedule ( " switch to waiting thread " ) ;
}
2013-04-06 17:03:39 -07:00
return false ;
}
2012-11-17 14:20:04 +01:00
void __KernelIdle ( )
2012-11-01 16:19:01 +01:00
{
2013-11-02 12:53:44 -07:00
// Don't skip 0xDEADBEEF here, this is called directly bypassing CallSyscall().
// That means the hle flag would stick around until the next call.
2013-10-29 22:53:25 -07:00
2012-12-23 11:16:32 +01:00
CoreTiming : : Idle ( ) ;
2014-06-28 16:20:35 -07:00
// We Advance within __KernelReSchedule(), so anything that has now happened after idle
// will be triggered properly upon reschedule.
2012-12-23 11:16:32 +01:00
__KernelReSchedule ( " idle " ) ;
2012-11-01 16:19:01 +01:00
}
2018-05-08 17:23:14 -07:00
void __KernelThreadingShutdown ( ) {
std : : lock_guard < std : : mutex > guard ( threadqueueLock ) ;
2012-11-01 16:19:01 +01:00
kernelMemory . Free ( threadReturnHackAddr ) ;
2012-12-23 11:16:32 +01:00
threadqueue . clear ( ) ;
2013-02-03 12:09:22 -08:00
threadReadyQueue . clear ( ) ;
2012-12-23 21:27:26 -08:00
threadEndListeners . clear ( ) ;
2012-12-23 21:49:44 -08:00
mipsCalls . clear ( ) ;
2012-11-01 16:19:01 +01:00
threadReturnHackAddr = 0 ;
2012-12-23 11:16:32 +01:00
cbReturnHackAddr = 0 ;
2020-03-21 16:09:23 -07:00
hleReturnHackAddr = 0 ;
2013-11-02 20:16:47 -07:00
__SetCurrentThread ( NULL , 0 , NULL ) ;
2012-11-08 16:28:45 +01:00
intReturnHackAddr = 0 ;
2013-09-02 13:18:02 -07:00
pausedDelays . clear ( ) ;
2014-01-05 20:20:56 -08:00
threadEventHandlers . clear ( ) ;
2016-05-30 13:00:23 -07:00
pendingDeleteThreads . clear ( ) ;
2012-11-01 16:19:01 +01:00
}
2012-12-09 16:56:16 -08:00
const char * __KernelGetThreadName ( SceUID threadID )
{
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2012-12-09 16:56:16 -08:00
if ( t )
return t - > nt . name ;
return " ERROR " ;
}
2019-07-28 14:55:21 -07:00
bool KernelIsThreadDormant ( SceUID threadID ) {
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2019-07-28 14:55:21 -07:00
if ( t )
return ( t - > nt . status & ( THREADSTATUS_DEAD | THREADSTATUS_DORMANT ) ) ! = 0 ;
return 0 ;
}
2020-03-15 08:33:40 -07:00
u32 __KernelGetWaitValue ( SceUID threadID , u32 & error ) {
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
if ( t ) {
2012-12-09 16:56:16 -08:00
return t - > getWaitInfo ( ) . waitValue ;
2020-03-15 08:33:40 -07:00
} else {
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " __KernelGetWaitValue ERROR: thread %i " , threadID ) ;
2012-11-01 16:19:01 +01:00
return 0 ;
}
}
2020-03-15 08:33:40 -07:00
u32 __KernelGetWaitTimeoutPtr ( SceUID threadID , u32 & error ) {
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
if ( t ) {
2012-12-09 16:56:16 -08:00
return t - > getWaitInfo ( ) . timeoutPtr ;
2020-03-15 08:33:40 -07:00
} else {
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " __KernelGetWaitTimeoutPtr ERROR: thread %i " , threadID ) ;
2012-11-18 19:57:08 -08:00
return 0 ;
}
}
2020-03-15 08:33:40 -07:00
SceUID __KernelGetWaitID ( SceUID threadID , WaitType type , u32 & error ) {
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
if ( t ) {
2012-12-09 16:56:16 -08:00
return t - > getWaitID ( type ) ;
2020-03-15 08:33:40 -07:00
} else {
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " __KernelGetWaitID ERROR: thread %i " , threadID ) ;
2013-09-09 21:39:56 -07:00
return - 1 ;
2012-11-18 19:13:39 -08:00
}
}
2020-03-15 08:33:40 -07:00
SceUID __KernelGetCurrentCallbackID ( SceUID threadID , u32 & error ) {
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
if ( t ) {
2013-03-27 00:51:46 -07:00
return t - > currentCallbackId ;
2020-03-15 08:33:40 -07:00
} else {
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " __KernelGetCurrentCallbackID ERROR: thread %i " , threadID ) ;
2013-03-27 00:51:46 -07:00
return 0 ;
}
}
2013-01-05 23:06:28 +01:00
u32 sceKernelReferThreadStatus ( u32 threadID , u32 statusPtr )
2012-11-01 16:19:01 +01:00
{
2013-04-27 02:42:29 -07:00
static const u32 THREADINFO_SIZE = 104 ;
static const u32 THREADINFO_SIZE_AFTER_260 = 108 ;
2012-11-01 16:19:01 +01:00
if ( threadID = = 0 )
threadID = __KernelGetCurThread ( ) ;
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2020-02-22 04:15:13 -08:00
if ( ! t ) {
hleEatCycles ( 700 ) ;
hleReSchedule ( " refer thread status " ) ;
return hleLogError ( SCEKERNEL , error , " bad thread " ) ;
2013-01-05 23:06:28 +01:00
}
2013-04-27 02:42:29 -07:00
u32 wantedSize = Memory : : Read_U32 ( statusPtr ) ;
2020-02-22 04:15:13 -08:00
if ( sceKernelGetCompiledSdkVersion ( ) > 0x02060010 ) {
if ( wantedSize > THREADINFO_SIZE_AFTER_260 ) {
hleEatCycles ( 1200 ) ;
hleReSchedule ( " refer thread status " ) ;
return hleLogError ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_SIZE , " bad size %d " , wantedSize ) ;
2013-04-27 02:42:29 -07:00
}
t - > nt . nativeSize = THREADINFO_SIZE_AFTER_260 ;
if ( wantedSize ! = 0 )
2015-01-17 13:48:26 -08:00
Memory : : Memcpy ( statusPtr , & t - > nt , std : : min ( wantedSize , ( u32 ) sizeof ( t - > nt ) ) ) ;
2013-04-27 02:42:29 -07:00
// TODO: What is this value? Basic tests show 0...
if ( wantedSize > sizeof ( t - > nt ) )
Memory : : Memset ( statusPtr + sizeof ( t - > nt ) , 0 , wantedSize - sizeof ( t - > nt ) ) ;
2020-02-22 04:15:13 -08:00
} else {
2013-04-27 02:42:29 -07:00
t - > nt . nativeSize = THREADINFO_SIZE ;
u32 sz = std : : min ( THREADINFO_SIZE , wantedSize ) ;
if ( sz ! = 0 )
Memory : : Memcpy ( statusPtr , & t - > nt , sz ) ;
2012-11-01 16:19:01 +01:00
}
2013-04-27 02:42:29 -07:00
2020-02-22 04:15:13 -08:00
hleEatCycles ( 1400 ) ;
2013-08-15 07:56:35 -07:00
hleReSchedule ( " refer thread status " ) ;
2020-02-22 04:15:13 -08:00
return hleLogSuccessVerboseI ( SCEKERNEL , 0 ) ;
2013-01-05 23:06:28 +01:00
}
// Thanks JPCSP
u32 sceKernelReferThreadRunStatus ( u32 threadID , u32 statusPtr )
{
if ( threadID = = 0 )
threadID = __KernelGetCurThread ( ) ;
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2013-01-05 23:06:28 +01:00
if ( ! t )
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelReferThreadRunStatus Error %08x " , error ) ;
2013-01-05 23:06:28 +01:00
return error ;
}
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelReferThreadRunStatus(%i, %08x) " , threadID , statusPtr ) ;
2013-01-05 23:06:28 +01:00
if ( ! Memory : : IsValidAddress ( statusPtr ) )
return - 1 ;
2013-12-16 23:47:34 -08:00
auto runStatus = PSPPointer < SceKernelThreadRunStatus > : : Create ( statusPtr ) ;
2013-09-02 13:40:41 -07:00
// TODO: Check size?
runStatus - > size = sizeof ( SceKernelThreadRunStatus ) ;
runStatus - > status = t - > nt . status ;
runStatus - > currentPriority = t - > nt . currentPriority ;
runStatus - > waitType = t - > nt . waitType ;
runStatus - > waitID = t - > nt . waitID ;
runStatus - > wakeupCount = t - > nt . wakeupCount ;
runStatus - > runForClocks = t - > nt . runForClocks ;
runStatus - > numInterruptPreempts = t - > nt . numInterruptPreempts ;
runStatus - > numThreadPreempts = t - > nt . numThreadPreempts ;
runStatus - > numReleases = t - > nt . numReleases ;
2013-01-05 23:06:28 +01:00
return 0 ;
2012-11-01 16:19:01 +01:00
}
2013-05-26 10:17:59 -07:00
int sceKernelGetThreadExitStatus ( SceUID threadID )
2012-11-01 16:19:01 +01:00
{
2012-12-23 11:16:32 +01:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2012-12-23 11:16:32 +01:00
if ( t )
{
if ( t - > nt . status = = THREADSTATUS_DORMANT ) // TODO: can be dormant before starting, too, need to avoid that
{
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelGetThreadExitStatus(%i) " , threadID ) ;
2013-05-26 10:17:59 -07:00
return t - > nt . exitStatus ;
2012-12-23 11:16:32 +01:00
}
else
{
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelGetThreadExitStatus(%i): not dormant " , threadID ) ;
2013-05-26 10:17:59 -07:00
return SCE_KERNEL_ERROR_NOT_DORMANT ;
2012-12-23 11:16:32 +01:00
}
}
else
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelGetThreadExitStatus Error %08x " , error ) ;
2013-05-26 10:17:59 -07:00
return SCE_KERNEL_ERROR_UNKNOWN_THID ;
2012-12-23 11:16:32 +01:00
}
2012-11-01 16:19:01 +01:00
}
2012-12-17 22:20:32 +01:00
u32 sceKernelGetThreadmanIdType ( u32 uid ) {
int type ;
if ( kernelObjects . GetIDType ( uid , & type ) ) {
2014-08-06 07:52:23 -07:00
if ( type < 0x1000 ) {
DEBUG_LOG ( SCEKERNEL , " %i=sceKernelGetThreadmanIdType(%i) " , type , uid ) ;
return type ;
} else {
// This means a partition memory block or module, etc.
ERROR_LOG ( SCEKERNEL , " sceKernelGetThreadmanIdType(%i): invalid object type %i " , uid , type ) ;
return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT ;
}
2012-12-17 22:20:32 +01:00
} else {
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelGetThreadmanIdType(%i) - FAILED " , uid ) ;
2012-12-17 22:20:32 +01:00
return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT ;
}
}
2020-03-15 08:33:40 -07:00
static bool __ThreadmanIdListIsSleeping ( const PSPThread * t ) {
2014-08-02 12:55:02 -07:00
return t - > isWaitingFor ( WAITTYPE_SLEEP , 0 ) ;
}
2020-03-15 08:33:40 -07:00
static bool __ThreadmanIdListIsDelayed ( const PSPThread * t ) {
2014-08-02 12:55:02 -07:00
return t - > isWaitingFor ( WAITTYPE_DELAY , t - > GetUID ( ) ) ;
}
2020-03-15 08:33:40 -07:00
static bool __ThreadmanIdListIsSuspended ( const PSPThread * t ) {
2014-08-02 12:55:02 -07:00
return t - > isSuspended ( ) ;
}
2020-03-15 08:33:40 -07:00
static bool __ThreadmanIdListIsDormant ( const PSPThread * t ) {
2014-08-02 12:55:02 -07:00
return t - > isStopped ( ) ;
}
2014-08-02 12:01:30 -07:00
u32 sceKernelGetThreadmanIdList ( u32 type , u32 readBufPtr , u32 readBufSize , u32 idCountPtr ) {
if ( readBufSize > = 0x8000000 ) {
// Not exact, it's probably if the sum ends up negative or something.
ERROR_LOG_REPORT ( SCEKERNEL , " sceKernelGetThreadmanIdList(%i, %08x, %i, %08x): invalid size " , type , readBufPtr , readBufSize , idCountPtr ) ;
return SCE_KERNEL_ERROR_ILLEGAL_ADDR ;
}
2014-08-02 12:32:40 -07:00
if ( ! Memory : : IsValidAddress ( readBufPtr ) & & readBufSize > 0 ) {
2014-08-02 12:01:30 -07:00
// Crashes on a PSP.
2014-08-02 12:55:02 -07:00
ERROR_LOG_REPORT ( SCEKERNEL , " sceKernelGetThreadmanIdList(%i, %08x, %i, %08x): invalid pointer " , type , readBufPtr , readBufSize , idCountPtr ) ;
2012-12-17 22:20:32 +01:00
return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT ;
}
2014-08-02 12:55:02 -07:00
u32 total = 0 ;
auto uids = PSPPointer < SceUID > : : Create ( readBufPtr ) ;
u32 error ;
if ( type > 0 & & type < = SCE_KERNEL_TMID_Tlspl ) {
DEBUG_LOG ( SCEKERNEL , " sceKernelGetThreadmanIdList(%i, %08x, %i, %08x) " , type , readBufPtr , readBufSize , idCountPtr ) ;
total = kernelObjects . ListIDType ( type , uids , readBufSize ) ;
} else if ( type > = SCE_KERNEL_TMID_SleepThread & & type < = SCE_KERNEL_TMID_DormantThread ) {
2020-03-15 08:33:40 -07:00
bool ( * checkFunc ) ( const PSPThread * t ) = nullptr ;
2014-08-02 12:55:02 -07:00
switch ( type ) {
case SCE_KERNEL_TMID_SleepThread :
checkFunc = & __ThreadmanIdListIsSleeping ;
break ;
case SCE_KERNEL_TMID_DelayThread :
checkFunc = & __ThreadmanIdListIsDelayed ;
break ;
case SCE_KERNEL_TMID_SuspendThread :
checkFunc = & __ThreadmanIdListIsSuspended ;
break ;
case SCE_KERNEL_TMID_DormantThread :
checkFunc = & __ThreadmanIdListIsDormant ;
break ;
default :
_dbg_assert_msg_ ( SCEKERNEL , false , " Unexpected type %d " , type ) ;
}
for ( size_t i = 0 ; i < threadqueue . size ( ) ; i + + ) {
2020-03-15 08:33:40 -07:00
const PSPThread * t = kernelObjects . Get < PSPThread > ( threadqueue [ i ] , error ) ;
2014-08-02 12:55:02 -07:00
if ( checkFunc ( t ) ) {
if ( total < readBufSize ) {
* uids + + = threadqueue [ i ] ;
}
+ + total ;
}
}
} else {
ERROR_LOG_REPORT ( SCEKERNEL , " sceKernelGetThreadmanIdList(%i, %08x, %i, %08x): invalid type " , type , readBufPtr , readBufSize , idCountPtr ) ;
return SCE_KERNEL_ERROR_ILLEGAL_TYPE ;
}
2014-08-02 12:01:30 -07:00
if ( Memory : : IsValidAddress ( idCountPtr ) ) {
Memory : : Write_U32 ( total , idCountPtr ) ;
2012-12-17 22:20:32 +01:00
}
2014-08-02 12:01:30 -07:00
return total > readBufSize ? readBufSize : total ;
2012-11-01 16:19:01 +01:00
}
// Saves the current CPU context
2020-03-15 08:33:40 -07:00
void __KernelSaveContext ( PSPThreadContext * ctx , bool vfpuEnabled ) {
2013-08-15 01:26:16 -07:00
// r and f are immediately next to each other and must be.
2013-10-26 18:30:55 -07:00
memcpy ( ( void * ) ctx - > r , ( void * ) currentMIPS - > r , sizeof ( ctx - > r ) + sizeof ( ctx - > f ) ) ;
2013-01-26 11:27:06 -08:00
2020-03-15 08:33:40 -07:00
if ( vfpuEnabled ) {
2013-04-07 11:28:37 -07:00
memcpy ( ctx - > v , currentMIPS - > v , sizeof ( ctx - > v ) ) ;
memcpy ( ctx - > vfpuCtrl , currentMIPS - > vfpuCtrl , sizeof ( ctx - > vfpuCtrl ) ) ;
}
2013-01-26 11:27:06 -08:00
2013-08-15 01:26:16 -07:00
memcpy ( ctx - > other , currentMIPS - > other , sizeof ( ctx - > other ) ) ;
2012-11-01 16:19:01 +01:00
}
// Loads a CPU context
2020-03-15 08:33:40 -07:00
void __KernelLoadContext ( PSPThreadContext * ctx , bool vfpuEnabled ) {
2013-08-15 01:26:16 -07:00
// r and f are immediately next to each other and must be.
2013-10-26 18:30:55 -07:00
memcpy ( ( void * ) currentMIPS - > r , ( void * ) ctx - > r , sizeof ( ctx - > r ) + sizeof ( ctx - > f ) ) ;
2013-01-26 11:27:06 -08:00
2020-03-15 08:33:40 -07:00
if ( vfpuEnabled ) {
2013-04-07 11:28:37 -07:00
memcpy ( currentMIPS - > v , ctx - > v , sizeof ( ctx - > v ) ) ;
memcpy ( currentMIPS - > vfpuCtrl , ctx - > vfpuCtrl , sizeof ( ctx - > vfpuCtrl ) ) ;
}
2013-01-26 11:27:06 -08:00
2013-08-15 01:26:16 -07:00
memcpy ( currentMIPS - > other , ctx - > other , sizeof ( ctx - > other ) ) ;
2018-04-01 10:09:31 -07:00
if ( MIPSComp : : jit ) {
// When thread switching, we must update the rounding mode if cached in the jit.
MIPSComp : : jit - > UpdateFCR31 ( ) ;
}
2013-02-18 10:27:15 -08:00
// Reset the llBit, the other thread may have touched memory.
currentMIPS - > llBit = 0 ;
2012-11-01 16:19:01 +01:00
}
2013-03-10 10:59:59 -07:00
u32 __KernelResumeThreadFromWait ( SceUID threadID , u32 retval )
{
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2013-03-10 10:59:59 -07:00
if ( t )
{
t - > resumeFromWait ( ) ;
t - > setReturnValue ( retval ) ;
return 0 ;
}
else
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " __KernelResumeThreadFromWait(%d): bad thread: %08x " , threadID , error ) ;
2013-03-10 10:59:59 -07:00
return error ;
}
}
u32 __KernelResumeThreadFromWait ( SceUID threadID , u64 retval )
2012-11-18 17:54:55 -08:00
{
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2012-11-18 17:54:55 -08:00
if ( t )
{
2012-12-09 16:56:16 -08:00
t - > resumeFromWait ( ) ;
2012-11-18 17:54:55 -08:00
t - > setReturnValue ( retval ) ;
return 0 ;
}
else
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " __KernelResumeThreadFromWait(%d): bad thread: %08x " , threadID , error ) ;
2012-11-18 17:54:55 -08:00
return error ;
}
}
2012-11-01 16:19:01 +01:00
// makes the current thread wait for an event
2013-01-26 10:44:04 -08:00
void __KernelWaitCurThread ( WaitType type , SceUID waitID , u32 waitValue , u32 timeoutPtr , bool processCallbacks , const char * reason )
2012-11-01 16:19:01 +01:00
{
2013-03-13 23:49:39 -07:00
if ( ! dispatchEnabled )
{
2013-09-07 22:02:55 +02:00
WARN_LOG_REPORT ( SCEKERNEL , " Ignoring wait, dispatching disabled... right thing to do? " ) ;
2013-03-13 23:49:39 -07:00
return ;
}
2020-03-15 08:33:40 -07:00
PSPThread * thread = __GetCurrentThread ( ) ;
2020-03-22 23:25:22 -07:00
assert ( thread ! = nullptr ) ;
2012-12-27 17:43:44 -08:00
thread - > nt . waitID = waitID ;
thread - > nt . waitType = type ;
2013-05-25 23:57:04 -07:00
__KernelChangeThreadState ( thread , ThreadStatus ( THREADSTATUS_WAIT | ( thread - > nt . status & THREADSTATUS_SUSPEND ) ) ) ;
2012-12-27 17:43:44 -08:00
thread - > nt . numReleases + + ;
thread - > waitInfo . waitValue = waitValue ;
thread - > waitInfo . timeoutPtr = timeoutPtr ;
2012-11-01 16:19:01 +01:00
2012-11-18 17:54:55 -08:00
// TODO: time waster
2013-01-26 10:44:04 -08:00
if ( ! reason )
reason = " started wait " ;
2012-12-23 11:16:32 +01:00
2013-01-26 10:44:04 -08:00
hleReSchedule ( processCallbacks , reason ) ;
2012-11-01 16:19:01 +01:00
}
2013-03-30 13:48:29 -07:00
void __KernelWaitCallbacksCurThread ( WaitType type , SceUID waitID , u32 waitValue , u32 timeoutPtr )
{
if ( ! dispatchEnabled )
{
2013-09-07 22:02:55 +02:00
WARN_LOG_REPORT ( SCEKERNEL , " Ignoring wait, dispatching disabled... right thing to do? " ) ;
2013-03-30 13:48:29 -07:00
return ;
}
2020-03-15 08:33:40 -07:00
PSPThread * thread = __GetCurrentThread ( ) ;
2013-03-30 13:48:29 -07:00
thread - > nt . waitID = waitID ;
thread - > nt . waitType = type ;
2013-05-25 23:57:04 -07:00
__KernelChangeThreadState ( thread , ThreadStatus ( THREADSTATUS_WAIT | ( thread - > nt . status & THREADSTATUS_SUSPEND ) ) ) ;
2013-03-30 13:48:29 -07:00
// TODO: Probably not...?
thread - > nt . numReleases + + ;
thread - > waitInfo . waitValue = waitValue ;
thread - > waitInfo . timeoutPtr = timeoutPtr ;
__KernelForceCallbacks ( ) ;
}
2012-11-01 16:19:01 +01:00
void hleScheduledWakeup ( u64 userdata , int cyclesLate )
{
SceUID threadID = ( SceUID ) userdata ;
2013-02-10 21:32:21 -08:00
u32 error ;
if ( __KernelGetWaitID ( threadID , WAITTYPE_DELAY , error ) = = threadID )
2014-06-24 00:45:38 -07:00
{
2013-09-08 12:02:18 -07:00
__KernelResumeThreadFromWait ( threadID , 0 ) ;
2014-06-24 00:45:38 -07:00
__KernelReSchedule ( " thread delay finished " ) ;
}
2012-11-01 16:19:01 +01:00
}
2013-01-05 23:24:05 +01:00
void __KernelScheduleWakeup ( SceUID threadID , s64 usFromNow )
2012-11-01 16:19:01 +01:00
{
2013-01-05 22:44:30 +01:00
s64 cycles = usToCycles ( usFromNow ) ;
CoreTiming : : ScheduleEvent ( cycles , eventScheduledWakeup , threadID ) ;
2012-11-01 16:19:01 +01:00
}
2012-12-01 14:36:56 -08:00
void __KernelCancelWakeup ( SceUID threadID )
{
CoreTiming : : UnscheduleEvent ( eventScheduledWakeup , threadID ) ;
}
2013-01-07 11:11:23 -08:00
void hleThreadEndTimeout ( u64 userdata , int cyclesLate )
{
SceUID threadID = ( SceUID ) userdata ;
2020-03-15 08:33:40 -07:00
HLEKernel : : WaitExecTimeout < PSPThread , WAITTYPE_THREADEND > ( threadID ) ;
2013-01-07 11:11:23 -08:00
}
2014-12-08 04:40:08 -05:00
static void __KernelScheduleThreadEndTimeout ( SceUID threadID , SceUID waitForID , s64 usFromNow )
2013-01-07 11:11:23 -08:00
{
s64 cycles = usToCycles ( usFromNow ) ;
2013-03-25 08:15:33 -07:00
CoreTiming : : ScheduleEvent ( cycles , eventThreadEndTimeout , threadID ) ;
2013-01-07 11:11:23 -08:00
}
void __KernelCancelThreadEndTimeout ( SceUID threadID )
{
CoreTiming : : UnscheduleEvent ( eventThreadEndTimeout , threadID ) ;
}
2018-05-08 17:23:14 -07:00
static void __KernelRemoveFromThreadQueue ( SceUID threadID ) {
std : : lock_guard < std : : mutex > guard ( threadqueueLock ) ;
2013-02-03 12:09:22 -08:00
int prio = __KernelGetThreadPrio ( threadID ) ;
if ( prio ! = 0 )
2013-03-23 23:54:46 -07:00
threadReadyQueue . remove ( prio , threadID ) ;
2013-02-03 12:09:22 -08:00
threadqueue . erase ( std : : remove ( threadqueue . begin ( ) , threadqueue . end ( ) , threadID ) , threadqueue . end ( ) ) ;
2012-11-01 16:19:01 +01:00
}
2013-09-08 10:50:55 -07:00
void __KernelStopThread ( SceUID threadID , int exitStatus , const char * reason )
2013-01-27 16:55:43 -08:00
{
2013-09-08 10:50:55 -07:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2013-09-08 10:50:55 -07:00
if ( t )
{
__KernelChangeReadyState ( t , threadID , false ) ;
t - > nt . exitStatus = exitStatus ;
t - > nt . status = THREADSTATUS_DORMANT ;
__KernelFireThreadEnd ( threadID ) ;
2013-09-08 11:15:09 -07:00
for ( size_t i = 0 ; i < t - > waitingThreads . size ( ) ; + + i )
{
const SceUID waitingThread = t - > waitingThreads [ i ] ;
u32 timeoutPtr = __KernelGetWaitTimeoutPtr ( waitingThread , error ) ;
if ( HLEKernel : : VerifyWait ( waitingThread , WAITTYPE_THREADEND , threadID ) )
{
s64 cyclesLeft = CoreTiming : : UnscheduleEvent ( eventThreadEndTimeout , waitingThread ) ;
if ( timeoutPtr ! = 0 )
Memory : : Write_U32 ( ( u32 ) cyclesToUs ( cyclesLeft ) , timeoutPtr ) ;
HLEKernel : : ResumeFromWait ( waitingThread , WAITTYPE_THREADEND , threadID , exitStatus ) ;
}
}
t - > waitingThreads . clear ( ) ;
2013-09-09 21:39:38 -07:00
// Stopped threads are never waiting.
t - > nt . waitType = WAITTYPE_NONE ;
t - > nt . waitID = 0 ;
2019-10-20 10:25:57 -07:00
} else {
2013-09-08 10:50:55 -07:00
ERROR_LOG_REPORT ( SCEKERNEL , " __KernelStopThread: thread %d does not exist " , threadID ) ;
2019-10-20 10:25:57 -07:00
}
2013-09-08 10:50:55 -07:00
}
u32 __KernelDeleteThread ( SceUID threadID , int exitStatus , const char * reason )
{
__KernelStopThread ( threadID , exitStatus , reason ) ;
2013-01-27 16:55:43 -08:00
__KernelRemoveFromThreadQueue ( threadID ) ;
if ( currentThread = = threadID )
2013-11-02 20:16:47 -07:00
__SetCurrentThread ( NULL , 0 , NULL ) ;
2013-01-27 18:43:38 -08:00
if ( currentCallbackThreadID = = threadID )
{
currentCallbackThreadID = 0 ;
g_inCbCount = 0 ;
}
2013-01-27 16:55:43 -08:00
2013-01-27 17:01:17 -08:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2013-01-27 17:01:17 -08:00
if ( t )
2013-09-08 18:42:40 -07:00
{
for ( auto it = t - > callbacks . begin ( ) , end = t - > callbacks . end ( ) ; it ! = end ; + + it )
{
2020-03-15 08:33:40 -07:00
PSPCallback * callback = kernelObjects . Get < PSPCallback > ( * it , error ) ;
2013-09-08 18:42:40 -07:00
if ( callback & & callback - > nc . notifyCount ! = 0 )
readyCallbacksCount - - ;
}
2013-10-14 22:05:33 -07:00
t - > Cleanup ( ) ;
2013-01-27 17:01:17 -08:00
2017-11-25 12:35:19 -08:00
// Before triggering, set v0. It'll be restored if one is called.
RETURN ( error ) ;
t - > nt . status = THREADSTATUS_DEAD ;
2014-01-05 20:20:56 -08:00
2017-11-25 12:35:19 -08:00
if ( __KernelThreadTriggerEvent ( ( t - > nt . attr & PSP_THREAD_ATTR_KERNEL ) ! = 0 , threadID , THREADEVENT_DELETE ) ) {
// Don't delete it yet. We'll delete later.
pendingDeleteThreads . push_back ( threadID ) ;
return 0 ;
} else {
2020-03-15 08:33:40 -07:00
return kernelObjects . Destroy < PSPThread > ( threadID ) ;
2017-11-25 12:35:19 -08:00
}
2016-05-30 13:00:23 -07:00
} else {
2017-11-25 12:35:19 -08:00
RETURN ( error ) ;
return error ;
2016-05-30 13:00:23 -07:00
}
2013-01-27 16:55:43 -08:00
}
2015-09-12 13:36:52 -07:00
static void __ReportThreadQueueEmpty ( ) {
// We failed to find a thread to schedule.
// This means something horrible happened to the idle threads.
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * idleThread0 = kernelObjects . Get < PSPThread > ( threadIdleID [ 0 ] , error ) ;
PSPThread * idleThread1 = kernelObjects . Get < PSPThread > ( threadIdleID [ 1 ] , error ) ;
2015-09-12 13:36:52 -07:00
char idleDescription0 [ 256 ] ;
int idleStatus0 = - 1 ;
if ( idleThread0 ) {
idleThread0 - > GetQuickInfo ( idleDescription0 , sizeof ( idleDescription0 ) ) ;
idleStatus0 = idleThread0 - > nt . status ;
} else {
sprintf ( idleDescription0 , " DELETED " ) ;
}
char idleDescription1 [ 256 ] ;
int idleStatus1 = - 1 ;
if ( idleThread1 ) {
idleThread1 - > GetQuickInfo ( idleDescription1 , sizeof ( idleDescription1 ) ) ;
2016-03-20 14:17:51 -07:00
idleStatus1 = idleThread1 - > nt . status ;
2015-09-12 13:36:52 -07:00
} else {
sprintf ( idleDescription1 , " DELETED " ) ;
}
ERROR_LOG_REPORT_ONCE ( threadqueueempty , SCEKERNEL , " Failed to reschedule: out of threads on queue (%d, %d) " , idleStatus0 , idleStatus1 ) ;
WARN_LOG ( SCEKERNEL , " Failed to reschedule: idle0 -> %s " , idleDescription0 ) ;
WARN_LOG ( SCEKERNEL , " Failed to reschedule: idle1 -> %s " , idleDescription1 ) ;
}
2013-04-07 11:03:16 -07:00
// Returns NULL if the current thread is fine.
2020-03-15 08:33:40 -07:00
static PSPThread * __KernelNextThread ( ) {
2013-04-07 11:03:16 -07:00
SceUID bestThread ;
2013-02-10 08:22:23 -08:00
// If the current thread is running, it's a valid candidate.
2020-03-15 08:33:40 -07:00
PSPThread * cur = __GetCurrentThread ( ) ;
2015-09-12 13:36:52 -07:00
if ( cur & & cur - > isRunning ( ) ) {
2013-04-07 11:03:16 -07:00
bestThread = threadReadyQueue . pop_first_better ( cur - > nt . currentPriority ) ;
if ( bestThread ! = 0 )
__KernelChangeReadyState ( cur , currentThread , true ) ;
2015-09-12 13:36:52 -07:00
} else {
2013-04-07 11:03:16 -07:00
bestThread = threadReadyQueue . pop_first ( ) ;
2013-02-10 08:22:23 -08:00
2015-09-12 13:36:52 -07:00
if ( bestThread = = 0 ) {
// Zoinks. No thread?
__ReportThreadQueueEmpty ( ) ;
// Let's try to get back on track, if possible.
bestThread = threadIdleID [ 1 ] ;
}
}
2013-03-16 11:17:44 -07:00
// Assume threadReadyQueue has not become corrupt.
2013-04-07 11:03:16 -07:00
if ( bestThread ! = 0 )
2020-03-15 08:33:40 -07:00
return kernelObjects . GetFast < PSPThread > ( bestThread ) ;
2012-11-07 15:44:48 +01:00
else
return 0 ;
}
void __KernelReSchedule ( const char * reason )
{
2014-06-28 21:37:41 -07:00
// First, let's check if there are any pending callbacks to trigger.
2014-06-28 16:17:52 -07:00
// TODO: Could probably take this out of __KernelReSchedule() which is a bit hot.
__KernelCheckCallbacks ( ) ;
2012-12-08 19:13:33 -08:00
// Execute any pending events while we're doing scheduling.
2013-08-06 23:02:30 -07:00
CoreTiming : : Advance ( ) ;
2016-04-09 18:21:31 +02:00
if ( __IsInInterrupt ( ) | | ! __KernelIsDispatchEnabled ( ) ) {
2014-06-28 16:17:52 -07:00
// Threads don't get changed within interrupts or while dispatch is disabled.
2012-12-08 19:13:33 -08:00
reason = " In Interrupt Or Callback " ;
return ;
}
2012-11-07 15:44:48 +01:00
2020-03-15 08:33:40 -07:00
PSPThread * nextThread = __KernelNextThread ( ) ;
2016-04-09 18:21:31 +02:00
if ( nextThread ) {
2012-11-07 15:44:48 +01:00
__KernelSwitchContext ( nextThread , reason ) ;
2016-04-09 18:21:31 +02:00
}
2013-04-07 11:03:16 -07:00
// Otherwise, no need to switch.
2012-11-01 16:19:01 +01:00
}
2012-11-07 15:44:48 +01:00
void __KernelReSchedule ( bool doCallbacks , const char * reason )
{
2020-03-15 08:33:40 -07:00
PSPThread * thread = __GetCurrentThread ( ) ;
2016-04-09 18:21:31 +02:00
if ( doCallbacks & & thread ! = nullptr ) {
thread - > isProcessingCallbacks = doCallbacks ;
2012-11-07 15:44:48 +01:00
}
2016-04-09 18:21:31 +02:00
// Note - this calls the function above, not this one. Overloading...
2012-11-07 15:44:48 +01:00
__KernelReSchedule ( reason ) ;
2016-04-09 18:21:31 +02:00
if ( doCallbacks & & thread ! = nullptr & & thread - > GetUID ( ) = = currentThread ) {
2012-11-07 15:44:48 +01:00
if ( thread - > isRunning ( ) ) {
thread - > isProcessingCallbacks = false ;
}
}
}
2013-03-23 14:41:13 +01:00
int sceKernelCheckThreadStack ( )
2012-11-01 16:19:01 +01:00
{
2012-12-23 11:16:32 +01:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( __KernelGetCurThread ( ) , error ) ;
2013-01-02 20:56:30 +01:00
if ( t ) {
2013-05-19 18:45:58 +02:00
u32 diff = labs ( ( long ) ( ( s64 ) currentMIPS - > r [ MIPS_REG_SP ] - ( s64 ) t - > currentStack . start ) ) ;
2013-09-10 01:21:20 -07:00
DEBUG_LOG ( SCEKERNEL , " %i=sceKernelCheckThreadStack() " , diff ) ;
2013-03-23 14:41:13 +01:00
return diff ;
2013-01-02 20:56:30 +01:00
} else {
2013-09-07 22:02:55 +02:00
ERROR_LOG_REPORT ( SCEKERNEL , " sceKernelCheckThreadStack() - not on thread " ) ;
2013-03-23 14:41:13 +01:00
return - 1 ;
2013-01-02 20:56:30 +01:00
}
2012-11-01 16:19:01 +01:00
}
2020-03-15 08:33:40 -07:00
void PSPThreadContext : : reset ( ) {
for ( int i = 0 ; i < 32 ; i + + ) {
2013-10-29 21:39:24 -07:00
r [ i ] = 0xDEADBEEF ;
fi [ i ] = 0x7f800001 ;
2012-12-23 11:16:32 +01:00
}
2013-10-29 21:39:24 -07:00
r [ 0 ] = 0 ;
2020-03-15 08:33:40 -07:00
for ( int i = 0 ; i < 128 ; i + + ) {
2015-01-03 10:40:21 -08:00
vi [ i ] = 0x7f800001 ;
2012-12-23 11:16:32 +01:00
}
2020-03-15 08:33:40 -07:00
for ( int i = 0 ; i < 15 ; i + + ) {
2012-12-23 11:16:32 +01:00
vfpuCtrl [ i ] = 0x00000000 ;
}
vfpuCtrl [ VFPU_CTRL_SPREFIX ] = 0xe4 ; // neutral
vfpuCtrl [ VFPU_CTRL_TPREFIX ] = 0xe4 ; // neutral
vfpuCtrl [ VFPU_CTRL_DPREFIX ] = 0x0 ; // neutral
vfpuCtrl [ VFPU_CTRL_CC ] = 0x3f ;
vfpuCtrl [ VFPU_CTRL_INF4 ] = 0 ;
2014-09-01 23:14:04 -07:00
vfpuCtrl [ VFPU_CTRL_REV ] = 0x7772ceab ;
2012-12-23 11:16:32 +01:00
vfpuCtrl [ VFPU_CTRL_RCX0 ] = 0x3f800001 ;
vfpuCtrl [ VFPU_CTRL_RCX1 ] = 0x3f800002 ;
vfpuCtrl [ VFPU_CTRL_RCX2 ] = 0x3f800004 ;
vfpuCtrl [ VFPU_CTRL_RCX3 ] = 0x3f800008 ;
vfpuCtrl [ VFPU_CTRL_RCX4 ] = 0x3f800000 ;
vfpuCtrl [ VFPU_CTRL_RCX5 ] = 0x3f800000 ;
vfpuCtrl [ VFPU_CTRL_RCX6 ] = 0x3f800000 ;
vfpuCtrl [ VFPU_CTRL_RCX7 ] = 0x3f800000 ;
fpcond = 0 ;
2013-11-14 23:44:49 -08:00
fcr31 = 0x00000e00 ;
2013-10-29 21:39:24 -07:00
hi = 0xDEADBEEF ;
lo = 0xDEADBEEF ;
2018-06-22 21:25:07 -07:00
// Just for a clean state.
other [ 5 ] = 0 ;
2012-11-01 16:19:01 +01:00
}
2020-03-15 08:33:40 -07:00
void __KernelResetThread ( PSPThread * t , int lowestPriority ) {
2012-12-06 23:03:09 -08:00
t - > context . reset ( ) ;
t - > context . pc = t - > nt . entrypoint ;
2013-06-05 23:57:27 -07:00
// If the thread would be better than lowestPriority, reset to its initial. Yes, kinda odd...
if ( t - > nt . currentPriority < lowestPriority )
t - > nt . currentPriority = t - > nt . initialPriority ;
2012-12-06 23:03:09 -08:00
t - > nt . waitType = WAITTYPE_NONE ;
t - > nt . waitID = 0 ;
memset ( & t - > waitInfo , 0 , sizeof ( t - > waitInfo ) ) ;
2013-01-07 10:45:56 -08:00
t - > nt . exitStatus = SCE_KERNEL_ERROR_NOT_DORMANT ;
2012-12-06 23:03:09 -08:00
t - > isProcessingCallbacks = false ;
2013-03-27 00:51:46 -07:00
t - > currentCallbackId = 0 ;
t - > currentMipscallId = 0 ;
2012-12-09 16:56:16 -08:00
t - > pendingMipsCalls . clear ( ) ;
2012-12-06 23:03:09 -08:00
2016-09-05 08:56:27 -07:00
// This will be overwritten when starting the thread, but let's point it somewhere useful.
t - > context . r [ MIPS_REG_RA ] = threadReturnHackAddr ;
2013-04-27 23:35:36 -07:00
// TODO: Not sure if it's reset here, but this makes sense.
t - > context . r [ MIPS_REG_GP ] = t - > nt . gpreg ;
2013-04-27 13:58:59 -07:00
t - > FillStack ( ) ;
2013-09-08 11:15:09 -07:00
if ( ! t - > waitingThreads . empty ( ) )
ERROR_LOG_REPORT ( SCEKERNEL , " Resetting thread with threads waiting on end? " ) ;
2012-12-06 23:03:09 -08:00
}
2020-03-15 08:33:40 -07:00
PSPThread * __KernelCreateThread ( SceUID & id , SceUID moduleId , const char * name , u32 entryPoint , u32 priority , int stacksize , u32 attr ) {
2018-05-08 17:23:14 -07:00
std : : lock_guard < std : : mutex > guard ( threadqueueLock ) ;
2020-03-15 08:33:40 -07:00
PSPThread * t = new PSPThread ( ) ;
2012-11-01 16:19:01 +01:00
id = kernelObjects . Create ( t ) ;
2012-12-27 17:43:44 -08:00
threadqueue . push_back ( id ) ;
2013-04-07 10:27:29 -07:00
threadReadyQueue . prepare ( priority ) ;
2012-11-01 16:19:01 +01:00
memset ( & t - > nt , 0xCD , sizeof ( t - > nt ) ) ;
2012-12-06 23:03:09 -08:00
t - > nt . entrypoint = entryPoint ;
t - > nt . nativeSize = sizeof ( t - > nt ) ;
t - > nt . attr = attr ;
2013-09-09 20:51:52 -07:00
// TODO: I have no idea what this value is but the PSP firmware seems to add it on create.
t - > nt . attr | = 0xFF ;
2012-12-06 23:03:09 -08:00
t - > nt . initialPriority = t - > nt . currentPriority = priority ;
2012-11-01 16:19:01 +01:00
t - > nt . stackSize = stacksize ;
t - > nt . status = THREADSTATUS_DORMANT ;
2012-12-06 23:03:09 -08:00
2012-11-01 16:19:01 +01:00
t - > nt . numInterruptPreempts = 0 ;
t - > nt . numReleases = 0 ;
t - > nt . numThreadPreempts = 0 ;
2012-12-30 21:30:33 +01:00
t - > nt . runForClocks . lo = 0 ;
2012-11-01 16:19:01 +01:00
t - > nt . runForClocks . hi = 0 ;
t - > nt . wakeupCount = 0 ;
2013-01-06 00:39:39 +01:00
t - > nt . initialStack = 0 ;
t - > nt . waitID = 0 ;
2013-01-07 10:45:56 -08:00
t - > nt . exitStatus = SCE_KERNEL_ERROR_DORMANT ;
2013-01-06 00:39:39 +01:00
t - > nt . waitType = WAITTYPE_NONE ;
2012-12-06 23:03:09 -08:00
if ( moduleId )
t - > nt . gpreg = __KernelGetModuleGP ( moduleId ) ;
else
t - > nt . gpreg = 0 ; // sceKernelStartThread will take care of this.
2012-11-17 14:20:04 +01:00
t - > moduleId = moduleId ;
2012-11-01 16:19:01 +01:00
2012-12-23 17:05:45 -08:00
strncpy ( t - > nt . name , name , KERNELOBJECT_MAX_NAME_LENGTH ) ;
t - > nt . name [ KERNELOBJECT_MAX_NAME_LENGTH ] = ' \0 ' ;
2013-04-27 13:58:59 -07:00
2013-07-28 20:28:32 -07:00
u32 stackSize = t - > nt . stackSize ;
t - > AllocateStack ( stackSize ) ; // can change the stacksize!
t - > nt . stackSize = stackSize ;
2012-11-01 16:19:01 +01:00
return t ;
}
2013-05-26 10:28:08 -07:00
SceUID __KernelSetupRootThread ( SceUID moduleID , int args , const char * argp , int prio , int stacksize , int attr )
2012-11-01 16:19:01 +01:00
{
//grab mips regs
SceUID id ;
2020-03-15 08:33:40 -07:00
PSPThread * thread = __KernelCreateThread ( id , moduleID , " root " , currentMIPS - > pc , prio , stacksize , attr ) ;
2013-05-18 20:16:01 -07:00
if ( thread - > currentStack . start = = 0 )
2013-09-07 22:02:55 +02:00
ERROR_LOG_REPORT ( SCEKERNEL , " Unable to allocate stack for root thread. " ) ;
2013-06-05 23:57:27 -07:00
__KernelResetThread ( thread , 0 ) ;
2012-11-01 16:19:01 +01:00
2020-03-15 08:33:40 -07:00
PSPThread * prevThread = __GetCurrentThread ( ) ;
2013-02-09 15:16:37 -08:00
if ( prevThread & & prevThread - > isRunning ( ) )
__KernelChangeReadyState ( currentThread , true ) ;
2013-11-02 20:16:47 -07:00
__SetCurrentThread ( thread , id , " root " ) ;
2013-02-09 15:16:37 -08:00
thread - > nt . status = THREADSTATUS_RUNNING ; // do not schedule
2012-11-01 16:19:01 +01:00
2012-12-27 17:43:44 -08:00
strcpy ( thread - > nt . name , " root " ) ;
2013-04-07 11:28:37 -07:00
__KernelLoadContext ( & thread - > context , ( attr & PSP_THREAD_ATTR_VFPU ) ! = 0 ) ;
2013-09-10 01:12:51 -07:00
currentMIPS - > r [ MIPS_REG_A0 ] = args ;
currentMIPS - > r [ MIPS_REG_SP ] - = ( args + 0xf ) & ~ 0xf ;
2013-10-16 01:31:58 -07:00
u32 location = currentMIPS - > r [ MIPS_REG_SP ] ;
2013-09-10 01:12:51 -07:00
currentMIPS - > r [ MIPS_REG_A1 ] = location ;
2013-10-16 01:31:58 -07:00
if ( argp )
Memory : : Memcpy ( location , argp , args ) ;
// Let's assume same as starting a new thread, 64 bytes for safety/kernel.
currentMIPS - > r [ MIPS_REG_SP ] - = 64 ;
2013-05-26 10:28:08 -07:00
return id ;
2012-11-01 16:19:01 +01:00
}
2014-05-22 23:38:21 -07:00
SceUID __KernelCreateThreadInternal ( const char * threadName , SceUID moduleID , u32 entry , u32 prio , int stacksize , u32 attr )
{
SceUID id ;
2020-03-15 08:33:40 -07:00
PSPThread * newThread = __KernelCreateThread ( id , moduleID , threadName , entry , prio , stacksize , attr ) ;
2014-05-22 23:38:21 -07:00
if ( newThread - > currentStack . start = = 0 )
return SCE_KERNEL_ERROR_NO_MEMORY ;
return id ;
}
2019-03-23 12:05:06 -07:00
int __KernelCreateThread ( const char * threadName , SceUID moduleID , u32 entry , u32 prio , int stacksize , u32 attr , u32 optionAddr , bool allowKernel ) {
2015-03-22 21:22:40 -07:00
if ( threadName = = nullptr )
2015-03-28 14:46:09 -07:00
return hleReportError ( SCEKERNEL , SCE_KERNEL_ERROR_ERROR , " NULL thread name " ) ;
2012-12-23 17:05:45 -08:00
2013-09-09 20:48:23 -07:00
if ( ( u32 ) stacksize < 0x200 )
2015-03-28 14:46:09 -07:00
return hleReportWarning ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_STACK_SIZE , " bogus thread stack size %08x " , stacksize ) ;
if ( prio < 0x08 | | prio > 0x77 ) {
2013-09-07 22:02:55 +02:00
WARN_LOG_REPORT ( SCEKERNEL , " sceKernelCreateThread(name=%s): bogus priority %08x " , threadName , prio ) ;
2013-09-09 20:48:23 -07:00
// TODO: Should return this error.
// return SCE_KERNEL_ERROR_ILLEGAL_PRIORITY;
2013-04-27 19:12:26 -07:00
prio = prio < 0x08 ? 0x08 : 0x77 ;
}
2015-03-28 14:46:09 -07:00
if ( ! Memory : : IsValidAddress ( entry ) ) {
2013-04-27 19:12:26 -07:00
// The PSP firmware seems to allow NULL...?
if ( entry ! = 0 )
2015-03-28 14:46:09 -07:00
return hleReportError ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_ADDR , " invalid thread entry %08x " , entry ) ;
2013-04-27 19:12:26 -07:00
}
2019-03-23 12:05:06 -07:00
if ( ( attr & ~ PSP_THREAD_ATTR_USER_MASK ) ! = 0 & & ! allowKernel )
2015-03-28 14:46:09 -07:00
return hleReportWarning ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_ATTR , " illegal thread attributes %08x " , attr ) ;
2013-09-09 20:48:23 -07:00
if ( ( attr & ~ PSP_THREAD_ATTR_SUPPORTED ) ! = 0 )
WARN_LOG_REPORT ( SCEKERNEL , " sceKernelCreateThread(name=%s): unsupported attributes %08x " , threadName , attr ) ;
// TODO: Not sure what these values are, but they are removed from the attr silently.
// Some are USB/VSH specific, probably removes when they are from the wrong module?
attr & = ~ PSP_THREAD_ATTR_USER_ERASE ;
2012-12-23 17:05:45 -08:00
2020-05-24 00:02:34 -07:00
if ( ( attr & PSP_THREAD_ATTR_KERNEL ) = = 0 ) {
2020-05-30 19:19:33 +08:00
if ( allowKernel & & ( attr & PSP_THREAD_ATTR_USER ) = = 0 ) {
2020-05-24 00:02:34 -07:00
attr | = PSP_THREAD_ATTR_KERNEL ;
2020-05-30 19:19:33 +08:00
} else {
2020-05-24 00:02:34 -07:00
attr | = PSP_THREAD_ATTR_USER ;
}
}
2012-12-23 17:05:45 -08:00
2014-05-22 23:38:21 -07:00
SceUID id = __KernelCreateThreadInternal ( threadName , moduleID , entry , prio , stacksize , attr ) ;
2014-05-31 21:38:37 -07:00
if ( ( u32 ) id = = SCE_KERNEL_ERROR_NO_MEMORY )
2015-03-22 21:22:40 -07:00
return hleReportError ( SCEKERNEL , SCE_KERNEL_ERROR_NO_MEMORY , " out of memory, %08x stack requested " , stacksize ) ;
2013-04-27 19:12:26 -07:00
2012-12-09 00:29:10 -08:00
if ( optionAddr ! = 0 )
2013-09-07 22:02:55 +02:00
WARN_LOG_REPORT ( SCEKERNEL , " sceKernelCreateThread(name=%s): unsupported options parameter %08x " , threadName , optionAddr ) ;
2013-09-09 20:49:30 -07:00
2013-09-11 22:13:13 -07:00
// Creating a thread resumes dispatch automatically. Probably can't create without it.
dispatchEnabled = true ;
2013-09-09 20:49:30 -07:00
hleEatCycles ( 32000 ) ;
// This won't schedule to the new thread, but it may to one woken from eating cycles.
// Technically, this should not eat all at once, and reschedule in the middle, but that's hard.
hleReSchedule ( " thread created " ) ;
2014-01-05 20:20:56 -08:00
2016-05-28 16:59:19 -07:00
// Before triggering, set v0, since we restore on return.
RETURN ( id ) ;
2014-01-05 20:20:56 -08:00
__KernelThreadTriggerEvent ( ( attr & PSP_THREAD_ATTR_KERNEL ) ! = 0 , id , THREADEVENT_CREATE ) ;
2015-03-22 21:22:40 -07:00
return hleLogSuccessInfoI ( SCEKERNEL , id ) ;
2012-11-01 16:19:01 +01:00
}
2015-03-28 14:46:09 -07:00
int sceKernelCreateThread ( const char * threadName , u32 entry , u32 prio , int stacksize , u32 attr , u32 optionAddr ) {
2020-03-15 08:33:40 -07:00
PSPThread * cur = __GetCurrentThread ( ) ;
2020-05-24 00:02:34 -07:00
SceUID module = __KernelGetCurThreadModuleId ( ) ;
bool allowKernel = KernelModuleIsKernelMode ( module ) | | hleIsKernelMode ( ) | | ( cur ? ( cur - > nt . attr & PSP_THREAD_ATTR_KERNEL ) ! = 0 : false ) ;
return __KernelCreateThread ( threadName , module , entry , prio , stacksize , attr , optionAddr , allowKernel ) ;
2013-01-01 17:04:06 -08:00
}
2015-03-28 14:46:09 -07:00
int __KernelStartThread ( SceUID threadToStartID , int argSize , u32 argBlockPtr , bool forceArgs ) {
2014-05-22 23:38:21 -07:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * startThread = kernelObjects . Get < PSPThread > ( threadToStartID , error ) ;
2013-04-27 21:26:50 -07:00
if ( startThread = = 0 )
return error ;
2012-12-06 23:03:09 -08:00
2020-03-15 08:33:40 -07:00
PSPThread * cur = __GetCurrentThread ( ) ;
2013-06-05 23:57:27 -07:00
__KernelResetThread ( startThread , cur ? cur - > nt . currentPriority : 0 ) ;
2012-11-01 16:19:01 +01:00
2013-04-27 23:35:36 -07:00
u32 & sp = startThread - > context . r [ MIPS_REG_SP ] ;
2014-05-22 23:38:21 -07:00
// Force args means just use those as a0/a1 without any special treatment.
// This is a hack to avoid allocating memory for helper threads which take args.
2015-03-28 14:46:09 -07:00
if ( ( argBlockPtr & & argSize > 0 ) | | forceArgs ) {
2013-04-27 23:35:36 -07:00
// Make room for the arguments, always 0x10 aligned.
2014-05-22 23:38:21 -07:00
if ( ! forceArgs )
sp - = ( argSize + 0xf ) & ~ 0xf ;
2013-04-27 21:26:50 -07:00
startThread - > context . r [ MIPS_REG_A0 ] = argSize ;
startThread - > context . r [ MIPS_REG_A1 ] = sp ;
2015-03-28 14:46:09 -07:00
} else {
2013-04-27 21:26:50 -07:00
startThread - > context . r [ MIPS_REG_A0 ] = 0 ;
startThread - > context . r [ MIPS_REG_A1 ] = 0 ;
}
2012-11-18 16:26:43 -08:00
2013-04-27 21:26:50 -07:00
// Now copy argument to stack.
2014-05-22 23:38:21 -07:00
if ( ! forceArgs & & Memory : : IsValidAddress ( argBlockPtr ) )
2015-04-05 18:03:50 -07:00
Memory : : Memcpy ( sp , argBlockPtr , argSize ) ;
2013-03-24 20:52:18 -07:00
2013-04-27 23:35:36 -07:00
// On the PSP, there's an extra 64 bytes of stack eaten after the args.
// This could be stack overflow safety, or just stack eaten by the kernel entry func.
sp - = 64 ;
2016-09-05 08:56:27 -07:00
// At the bottom of those 64 bytes, the return syscall and ra is written.
// Test Drive Unlimited actually depends on it being in the correct place.
WriteSyscall ( " FakeSysCalls " , NID_THREADRETURN , sp ) ;
Memory : : Write_U32 ( MIPS_MAKE_B ( - 1 ) , sp + 8 ) ;
Memory : : Write_U32 ( MIPS_MAKE_NOP ( ) , sp + 12 ) ;
2016-09-05 08:57:09 -07:00
// Point ra at our return stub, and start fp off matching sp.
2016-09-05 08:56:27 -07:00
startThread - > context . r [ MIPS_REG_RA ] = sp ;
2016-09-05 08:57:09 -07:00
startThread - > context . r [ MIPS_REG_FP ] = sp ;
2016-09-05 08:56:27 -07:00
2013-04-27 21:26:50 -07:00
// Smaller is better for priority. Only switch if the new thread is better.
2015-03-28 14:46:09 -07:00
if ( cur & & cur - > nt . currentPriority > startThread - > nt . currentPriority ) {
2013-04-27 21:26:50 -07:00
__KernelChangeReadyState ( cur , currentThread , true ) ;
hleReSchedule ( " thread started " ) ;
2012-11-01 16:19:01 +01:00
}
2013-09-11 22:13:13 -07:00
2016-05-30 20:07:28 -07:00
// Starting a thread automatically resumes the dispatch thread if the new thread has worse priority.
// Seems strange but also seems reproducible.
if ( cur & & cur - > nt . currentPriority < = startThread - > nt . currentPriority ) {
dispatchEnabled = true ;
}
2013-04-27 21:26:50 -07:00
__KernelChangeReadyState ( startThread , threadToStartID , true ) ;
2016-05-28 21:14:19 -07:00
// Need to write out v0 before triggering event.
// TODO: Technically the wrong place. This should trigger when the thread actually starts (e.g. if suspended.)
RETURN ( 0 ) ;
__KernelThreadTriggerEvent ( ( startThread - > nt . attr & PSP_THREAD_ATTR_KERNEL ) ! = 0 , threadToStartID , THREADEVENT_START ) ;
2013-04-27 21:26:50 -07:00
return 0 ;
2012-11-01 16:19:01 +01:00
}
2015-03-28 14:46:09 -07:00
int __KernelStartThreadValidate ( SceUID threadToStartID , int argSize , u32 argBlockPtr , bool forceArgs ) {
2014-05-22 23:38:21 -07:00
if ( threadToStartID = = 0 )
2015-03-28 14:46:09 -07:00
return hleLogError ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_THID , " thread id is 0 " ) ;
2014-05-22 23:38:21 -07:00
if ( argSize < 0 | | argBlockPtr & 0x80000000 )
2015-03-28 14:46:09 -07:00
return hleReportError ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_ADDR , " bad thread argument pointer/length %08x / %08x " , argSize , argBlockPtr ) ;
2014-05-22 23:38:21 -07:00
2015-03-28 14:46:09 -07:00
u32 error = 0 ;
2020-03-15 08:33:40 -07:00
PSPThread * startThread = kernelObjects . Get < PSPThread > ( threadToStartID , error ) ;
2014-05-22 23:38:21 -07:00
if ( startThread = = 0 )
2015-03-28 14:46:09 -07:00
return hleLogError ( SCEKERNEL , error , " thread does not exist " ) ;
2014-05-22 23:38:21 -07:00
if ( startThread - > nt . status ! = THREADSTATUS_DORMANT )
2015-03-28 14:46:09 -07:00
return hleLogWarning ( SCEKERNEL , SCE_KERNEL_ERROR_NOT_DORMANT , " thread already running " ) ;
2014-05-22 23:38:21 -07:00
2014-09-01 21:12:13 -07:00
hleEatCycles ( 3400 ) ;
2015-03-28 14:46:09 -07:00
return __KernelStartThread ( threadToStartID , argSize , argBlockPtr , forceArgs ) ;
}
// int sceKernelStartThread(SceUID threadToStartID, SceSize argSize, void *argBlock)
int sceKernelStartThread ( SceUID threadToStartID , int argSize , u32 argBlockPtr ) {
return hleLogSuccessInfoI ( SCEKERNEL , __KernelStartThreadValidate ( threadToStartID , argSize , argBlockPtr ) ) ;
2014-05-22 23:38:21 -07:00
}
2013-09-10 01:21:20 -07:00
int sceKernelGetThreadStackFreeSize ( SceUID threadID )
2012-11-01 16:19:01 +01:00
{
2013-09-10 01:21:20 -07:00
DEBUG_LOG ( SCEKERNEL , " sceKernelGetThreadStackFreeSize(%i) " , threadID ) ;
2012-11-11 20:07:02 -08:00
if ( threadID = = 0 )
2015-03-28 14:28:51 -07:00
threadID = __KernelGetCurThread ( ) ;
2013-09-10 01:21:20 -07:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * thread = kernelObjects . Get < PSPThread > ( threadID , error ) ;
if ( thread = = nullptr ) {
2013-09-10 01:21:20 -07:00
ERROR_LOG ( SCEKERNEL , " sceKernelGetThreadStackFreeSize: invalid thread id %i " , threadID ) ;
return error ;
2012-11-11 20:07:02 -08:00
}
2012-11-01 16:19:01 +01:00
2013-09-10 01:21:20 -07:00
// Scan the stack for 0xFF, starting after 0x10 (the thread id is written there.)
// Obviously this doesn't work great if PSP_THREAD_ATTR_NO_FILLSTACK is used.
2012-12-23 11:16:32 +01:00
int sz = 0 ;
2013-09-10 01:21:20 -07:00
for ( u32 offset = 0x10 ; offset < thread - > nt . stackSize ; + + offset )
2012-12-23 11:16:32 +01:00
{
2013-09-10 01:21:20 -07:00
if ( Memory : : Read_U8 ( thread - > currentStack . start + offset ) ! = 0xFF )
2012-12-23 11:16:32 +01:00
break ;
sz + + ;
}
2012-11-01 16:19:01 +01:00
2013-09-10 01:21:20 -07:00
return sz & ~ 3 ;
2012-11-01 16:19:01 +01:00
}
2012-11-07 15:44:48 +01:00
void __KernelReturnFromThread ( )
2012-11-01 16:19:01 +01:00
{
2013-10-29 22:53:25 -07:00
hleSkipDeadbeef ( ) ;
2013-09-08 10:50:55 -07:00
int exitStatus = currentMIPS - > r [ MIPS_REG_V0 ] ;
2020-03-15 08:33:40 -07:00
PSPThread * thread = __GetCurrentThread ( ) ;
2013-09-07 22:02:55 +02:00
_dbg_assert_msg_ ( SCEKERNEL , thread ! = NULL , " Returned from a NULL thread. " ) ;
2012-12-27 17:43:44 -08:00
2019-07-28 15:43:19 -07:00
DEBUG_LOG ( SCEKERNEL , " __KernelReturnFromThread: %d " , exitStatus ) ;
2013-09-08 10:50:55 -07:00
__KernelStopThread ( currentThread , exitStatus , " thread returned " ) ;
2012-11-09 00:03:46 +01:00
2013-01-07 10:31:19 -08:00
hleReSchedule ( " thread returned " ) ;
2012-11-06 15:46:21 +01:00
2016-05-28 21:14:19 -07:00
// TODO: This should trigger ON the thread when it exits.
__KernelThreadTriggerEvent ( ( thread - > nt . attr & PSP_THREAD_ATTR_KERNEL ) ! = 0 , thread - > GetUID ( ) , THREADEVENT_EXIT ) ;
2012-11-06 15:46:21 +01:00
// The stack will be deallocated when the thread is deleted.
2012-11-01 16:19:01 +01:00
}
2020-03-15 08:33:40 -07:00
void sceKernelExitThread ( int exitStatus ) {
PSPThread * thread = __GetCurrentThread ( ) ;
2013-09-07 22:02:55 +02:00
_dbg_assert_msg_ ( SCEKERNEL , thread ! = NULL , " Exited from a NULL thread. " ) ;
2012-12-27 17:43:44 -08:00
2013-09-07 22:02:55 +02:00
INFO_LOG ( SCEKERNEL , " sceKernelExitThread(%d) " , exitStatus ) ;
2013-09-08 10:50:55 -07:00
__KernelStopThread ( currentThread , exitStatus , " thread exited " ) ;
2012-11-20 00:18:11 -08:00
2013-01-07 10:31:19 -08:00
hleReSchedule ( " thread exited " ) ;
2012-11-06 15:46:21 +01:00
2016-05-28 21:14:19 -07:00
// TODO: This should trigger ON the thread when it exits.
__KernelThreadTriggerEvent ( ( thread - > nt . attr & PSP_THREAD_ATTR_KERNEL ) ! = 0 , thread - > GetUID ( ) , THREADEVENT_EXIT ) ;
2012-11-06 15:46:21 +01:00
// The stack will be deallocated when the thread is deleted.
2012-11-01 16:19:01 +01:00
}
2020-03-15 08:33:40 -07:00
void _sceKernelExitThread ( int exitStatus ) {
PSPThread * thread = __GetCurrentThread ( ) ;
2013-09-07 22:02:55 +02:00
_dbg_assert_msg_ ( SCEKERNEL , thread ! = NULL , " _Exited from a NULL thread. " ) ;
2012-11-20 00:18:11 -08:00
2013-09-07 22:02:55 +02:00
ERROR_LOG_REPORT ( SCEKERNEL , " _sceKernelExitThread(%d): should not be called directly " , exitStatus ) ;
2013-09-08 10:50:55 -07:00
__KernelStopThread ( currentThread , exitStatus , " thread _exited " ) ;
2012-11-20 00:18:11 -08:00
2013-01-07 10:31:19 -08:00
hleReSchedule ( " thread _exited " ) ;
2012-11-06 15:46:21 +01:00
2016-05-28 21:14:19 -07:00
// TODO: This should trigger ON the thread when it exits.
__KernelThreadTriggerEvent ( ( thread - > nt . attr & PSP_THREAD_ATTR_KERNEL ) ! = 0 , thread - > GetUID ( ) , THREADEVENT_EXIT ) ;
2012-11-06 15:46:21 +01:00
// The stack will be deallocated when the thread is deleted.
2012-11-01 16:19:01 +01:00
}
2020-03-15 08:33:40 -07:00
void sceKernelExitDeleteThread ( int exitStatus ) {
PSPThread * thread = __GetCurrentThread ( ) ;
2013-04-09 23:16:23 -07:00
if ( thread )
2012-12-23 11:16:32 +01:00
{
2013-09-07 22:02:55 +02:00
INFO_LOG ( SCEKERNEL , " sceKernelExitDeleteThread(%d) " , exitStatus ) ;
2017-03-09 14:46:42 +01:00
uint32_t thread_attr = thread - > nt . attr ;
uint32_t uid = thread - > GetUID ( ) ;
2013-09-08 10:50:55 -07:00
__KernelDeleteThread ( currentThread , exitStatus , " thread exited with delete " ) ;
2014-01-27 07:38:53 -08:00
// Temporary hack since we don't reschedule within callbacks.
g_inCbCount = 0 ;
2012-11-01 16:19:01 +01:00
2013-01-27 16:55:43 -08:00
hleReSchedule ( " thread exited with delete " ) ;
2016-05-28 21:14:19 -07:00
// TODO: This should trigger ON the thread when it exits.
2017-03-09 14:46:42 +01:00
__KernelThreadTriggerEvent ( ( thread_attr & PSP_THREAD_ATTR_KERNEL ) ! = 0 , uid , THREADEVENT_EXIT ) ;
2012-12-23 11:16:32 +01:00
}
else
2013-09-07 22:02:55 +02:00
ERROR_LOG_REPORT ( SCEKERNEL , " sceKernelExitDeleteThread(%d) ERROR - could not find myself! " , exitStatus ) ;
2012-12-23 11:16:32 +01:00
}
2012-11-01 16:19:01 +01:00
2012-11-06 19:22:14 +01:00
u32 sceKernelSuspendDispatchThread ( )
{
2013-04-06 12:34:32 -07:00
if ( ! __InterruptsEnabled ( ) )
2013-08-27 23:41:10 -07:00
{
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelSuspendDispatchThread(): interrupts disabled " ) ;
2013-04-06 12:34:32 -07:00
return SCE_KERNEL_ERROR_CPUDI ;
2013-08-27 23:41:10 -07:00
}
2013-04-06 12:34:32 -07:00
2013-03-24 19:16:20 -07:00
u32 oldDispatchEnabled = dispatchEnabled ;
2012-11-07 15:44:48 +01:00
dispatchEnabled = false ;
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " %i=sceKernelSuspendDispatchThread() " , oldDispatchEnabled ) ;
2013-05-28 01:33:24 -07:00
hleEatCycles ( 940 ) ;
2013-03-24 19:16:20 -07:00
return oldDispatchEnabled ;
2012-11-06 19:22:14 +01:00
}
2013-03-24 19:16:20 -07:00
u32 sceKernelResumeDispatchThread ( u32 enabled )
2012-11-06 19:22:14 +01:00
{
2013-04-06 12:34:32 -07:00
if ( ! __InterruptsEnabled ( ) )
2013-08-27 23:41:10 -07:00
{
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelResumeDispatchThread(%i): interrupts disabled " , enabled ) ;
2013-04-06 12:34:32 -07:00
return SCE_KERNEL_ERROR_CPUDI ;
2013-08-27 23:41:10 -07:00
}
2013-04-06 12:34:32 -07:00
2013-03-24 19:16:20 -07:00
u32 oldDispatchEnabled = dispatchEnabled ;
dispatchEnabled = enabled ! = 0 ;
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelResumeDispatchThread(%i) - from %i " , enabled , oldDispatchEnabled ) ;
2013-03-13 23:49:39 -07:00
hleReSchedule ( " dispatch resumed " ) ;
2013-05-28 01:33:24 -07:00
hleEatCycles ( 940 ) ;
2013-03-24 20:53:51 -07:00
return 0 ;
2012-11-06 19:22:14 +01:00
}
2012-11-01 16:19:01 +01:00
2013-03-24 23:30:32 -07:00
bool __KernelIsDispatchEnabled ( )
{
// Dispatch can never be enabled when interrupts are disabled.
return dispatchEnabled & & __InterruptsEnabled ( ) ;
}
2013-02-09 02:17:19 -08:00
int sceKernelRotateThreadReadyQueue ( int priority )
2012-11-01 16:19:01 +01:00
{
2013-09-07 22:02:55 +02:00
VERBOSE_LOG ( SCEKERNEL , " sceKernelRotateThreadReadyQueue(%x) " , priority ) ;
2013-02-09 15:54:13 -08:00
2020-03-15 08:33:40 -07:00
PSPThread * cur = __GetCurrentThread ( ) ;
2013-02-09 15:54:13 -08:00
// 0 is special, it means "my current priority."
if ( priority = = 0 )
priority = cur - > nt . currentPriority ;
2013-02-09 02:17:19 -08:00
2013-02-09 18:56:34 -08:00
if ( priority < = 0x07 | | priority > 0x77 )
return SCE_KERNEL_ERROR_ILLEGAL_PRIORITY ;
2013-03-23 23:54:46 -07:00
if ( ! threadReadyQueue . empty ( priority ) )
2013-02-09 02:17:19 -08:00
{
2013-02-09 15:54:13 -08:00
// In other words, yield to everyone else.
2013-02-09 02:17:19 -08:00
if ( cur - > nt . currentPriority = = priority )
2013-02-09 15:54:13 -08:00
{
2013-03-23 23:54:46 -07:00
threadReadyQueue . push_back ( priority , currentThread ) ;
2013-05-25 23:57:04 -07:00
cur - > nt . status = ( cur - > nt . status & ~ THREADSTATUS_RUNNING ) | THREADSTATUS_READY ;
2013-02-09 15:54:13 -08:00
}
// Yield the next thread of this priority to all other threads of same priority.
2013-03-23 23:54:46 -07:00
else
threadReadyQueue . rotate ( priority ) ;
2013-02-09 02:17:19 -08:00
}
2013-11-29 08:58:58 -08:00
hleReSchedule ( " rotatethreadreadyqueue " ) ;
2013-05-04 23:14:28 -07:00
hleEatCycles ( 250 ) ;
2013-02-09 02:17:19 -08:00
return 0 ;
2012-11-01 16:19:01 +01:00
}
2020-03-15 08:33:40 -07:00
int sceKernelDeleteThread ( int threadID ) {
if ( threadID = = 0 | | threadID = = currentThread ) {
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelDeleteThread(%i): cannot delete current thread " , threadID ) ;
2013-06-14 23:55:19 -07:00
return SCE_KERNEL_ERROR_NOT_DORMANT ;
}
2012-12-23 11:16:32 +01:00
2013-06-14 23:55:19 -07:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
if ( t ) {
if ( ! t - > isStopped ( ) ) {
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelDeleteThread(%i): thread not dormant " , threadID ) ;
2013-06-14 23:55:19 -07:00
return SCE_KERNEL_ERROR_NOT_DORMANT ;
2012-12-09 00:29:10 -08:00
}
2012-11-01 16:19:01 +01:00
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelDeleteThread(%i) " , threadID ) ;
2013-09-08 10:50:55 -07:00
return __KernelDeleteThread ( threadID , SCE_KERNEL_ERROR_THREAD_TERMINATED , " thread deleted " ) ;
2020-03-15 08:33:40 -07:00
} else {
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelDeleteThread(%i): thread doesn't exist " , threadID ) ;
2013-06-14 23:55:19 -07:00
return error ;
2012-11-01 16:19:01 +01:00
}
}
2013-06-09 18:56:36 -07:00
int sceKernelTerminateDeleteThread ( int threadID )
2012-11-01 16:19:01 +01:00
{
2013-06-09 18:56:36 -07:00
if ( threadID = = 0 | | threadID = = currentThread )
2012-11-01 16:19:01 +01:00
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelTerminateDeleteThread(%i): cannot terminate current thread " , threadID ) ;
2013-06-09 18:56:36 -07:00
return SCE_KERNEL_ERROR_ILLEGAL_THID ;
}
2013-01-15 19:46:52 -08:00
2013-06-09 18:56:36 -07:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2013-06-09 18:56:36 -07:00
if ( t )
{
2016-05-28 21:14:19 -07:00
bool wasStopped = t - > isStopped ( ) ;
2017-12-03 02:33:21 +01:00
uint32_t attr = t - > nt . attr ;
uint32_t uid = t - > GetUID ( ) ;
2016-05-28 21:14:19 -07:00
2013-09-07 22:02:55 +02:00
INFO_LOG ( SCEKERNEL , " sceKernelTerminateDeleteThread(%i) " , threadID ) ;
2013-09-08 10:50:55 -07:00
error = __KernelDeleteThread ( threadID , SCE_KERNEL_ERROR_THREAD_TERMINATED , " thread terminated with delete " ) ;
2013-01-15 19:46:52 -08:00
2016-05-28 21:14:19 -07:00
if ( ! wasStopped ) {
// Set v0 before calling the handler, or it'll get lost.
RETURN ( error ) ;
2017-12-03 02:33:21 +01:00
__KernelThreadTriggerEvent ( ( attr & PSP_THREAD_ATTR_KERNEL ) ! = 0 , uid , THREADEVENT_EXIT ) ;
2016-05-28 21:14:19 -07:00
}
2013-06-09 18:56:36 -07:00
return error ;
2012-11-01 16:19:01 +01:00
}
else
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelTerminateDeleteThread(%i): thread doesn't exist " , threadID ) ;
2013-06-09 18:56:36 -07:00
return error ;
2012-12-06 23:03:09 -08:00
}
}
2015-10-10 09:06:28 -07:00
int sceKernelTerminateThread ( SceUID threadID ) {
if ( __IsInInterrupt ( ) & & sceKernelGetCompiledSdkVersion ( ) > = 0x03080000 ) {
return hleLogError ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_CONTEXT , " in interrupt " ) ;
}
if ( threadID = = 0 | | threadID = = currentThread ) {
return hleLogError ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_THID , " cannot terminate current thread " ) ;
2013-06-09 18:56:36 -07:00
}
2012-12-06 23:03:09 -08:00
2013-06-09 18:56:36 -07:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2015-10-10 09:06:28 -07:00
if ( t ) {
if ( t - > isStopped ( ) ) {
return hleLogError ( SCEKERNEL , SCE_KERNEL_ERROR_DORMANT , " already stopped " ) ;
2012-12-06 23:03:09 -08:00
}
2013-06-09 18:56:36 -07:00
2013-09-08 10:50:55 -07:00
// TODO: Should this reschedule? Seems like not.
__KernelStopThread ( threadID , SCE_KERNEL_ERROR_THREAD_TERMINATED , " thread terminated " ) ;
2013-06-09 18:56:36 -07:00
2014-01-09 22:04:29 -08:00
// On terminate, we reset the thread priority. On exit, we don't always (see __KernelResetThread.)
t - > nt . currentPriority = t - > nt . initialPriority ;
2016-05-28 21:14:19 -07:00
// Need to set v0 since it'll be restored.
RETURN ( 0 ) ;
__KernelThreadTriggerEvent ( ( t - > nt . attr & PSP_THREAD_ATTR_KERNEL ) ! = 0 , t - > GetUID ( ) , THREADEVENT_EXIT ) ;
2015-10-10 09:06:28 -07:00
return hleLogSuccessInfoI ( SCEKERNEL , 0 ) ;
} else {
return hleLogError ( SCEKERNEL , error , " thread doesn't exist " ) ;
2012-11-01 16:19:01 +01:00
}
}
SceUID __KernelGetCurThread ( )
{
2012-12-27 17:43:44 -08:00
return currentThread ;
2012-11-01 16:19:01 +01:00
}
2019-07-28 14:55:21 -07:00
int KernelCurThreadPriority ( ) {
2020-03-15 08:33:40 -07:00
PSPThread * t = __GetCurrentThread ( ) ;
2019-07-28 14:55:21 -07:00
if ( t )
return t - > nt . currentPriority ;
return 0 ;
}
2020-03-15 08:33:40 -07:00
SceUID __KernelGetCurThreadModuleId ( ) {
PSPThread * t = __GetCurrentThread ( ) ;
2012-12-27 17:43:44 -08:00
if ( t )
return t - > moduleId ;
return 0 ;
2012-11-17 14:20:04 +01:00
}
2020-03-15 08:33:40 -07:00
u32 __KernelGetCurThreadStack ( ) {
PSPThread * t = __GetCurrentThread ( ) ;
2013-03-23 14:26:54 +01:00
if ( t )
2013-05-18 20:16:01 -07:00
return t - > currentStack . end ;
2015-03-02 01:03:12 +01:00
return 0 ;
}
2020-03-15 08:33:40 -07:00
u32 __KernelGetCurThreadStackStart ( ) {
PSPThread * t = __GetCurrentThread ( ) ;
2015-03-02 01:03:12 +01:00
if ( t )
return t - > currentStack . start ;
2013-03-23 14:26:54 +01:00
return 0 ;
}
2013-04-09 23:16:23 -07:00
SceUID sceKernelGetThreadId ( )
2012-11-01 16:19:01 +01:00
{
2013-09-07 22:02:55 +02:00
VERBOSE_LOG ( SCEKERNEL , " %i = sceKernelGetThreadId() " , currentThread ) ;
2013-05-28 01:47:39 -07:00
hleEatCycles ( 180 ) ;
2013-04-09 23:16:23 -07:00
return currentThread ;
2012-11-01 16:19:01 +01:00
}
2015-03-28 14:28:51 -07:00
int sceKernelGetThreadCurrentPriority ( ) {
2012-11-06 16:20:13 +01:00
u32 retVal = __GetCurrentThread ( ) - > nt . currentPriority ;
2015-03-28 14:28:51 -07:00
return hleLogSuccessI ( SCEKERNEL , retVal ) ;
2012-11-06 16:20:13 +01:00
}
2015-03-28 14:28:51 -07:00
int sceKernelChangeCurrentThreadAttr ( u32 clearAttr , u32 setAttr ) {
2013-06-06 00:08:24 -07:00
// Seems like this is the only allowed attribute?
2015-03-28 14:28:51 -07:00
if ( ( clearAttr & ~ PSP_THREAD_ATTR_VFPU ) ! = 0 | | ( setAttr & ~ PSP_THREAD_ATTR_VFPU ) ! = 0 ) {
return hleReportError ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_ATTR , " invalid attr " ) ;
2013-06-06 00:08:24 -07:00
}
2020-03-15 08:33:40 -07:00
PSPThread * t = __GetCurrentThread ( ) ;
2015-03-28 14:28:51 -07:00
if ( ! t )
return hleReportError ( SCEKERNEL , - 1 , " no current thread " ) ;
t - > nt . attr = ( t - > nt . attr & ~ clearAttr ) | setAttr ;
return hleLogSuccessI ( SCEKERNEL , 0 ) ;
2012-11-01 16:19:01 +01:00
}
2019-10-20 10:26:37 -07:00
// Assumes validated parameters.
bool KernelChangeThreadPriority ( SceUID threadID , int priority ) {
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * thread = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2019-10-20 10:26:37 -07:00
if ( thread ) {
int old = thread - > nt . currentPriority ;
threadReadyQueue . remove ( old , threadID ) ;
thread - > nt . currentPriority = priority ;
threadReadyQueue . prepare ( thread - > nt . currentPriority ) ;
if ( thread - > isRunning ( ) ) {
thread - > nt . status = ( thread - > nt . status & ~ THREADSTATUS_RUNNING ) | THREADSTATUS_READY ;
}
if ( thread - > isReady ( ) ) {
threadReadyQueue . push_back ( thread - > nt . currentPriority , threadID ) ;
}
return true ;
} else {
return false ;
}
}
2014-06-29 19:02:41 -07:00
int sceKernelChangeThreadPriority ( SceUID threadID , int priority ) {
if ( threadID = = 0 ) {
2015-03-28 14:28:51 -07:00
threadID = __KernelGetCurThread ( ) ;
2014-06-29 19:02:41 -07:00
}
2013-06-05 23:42:42 -07:00
// 0 means the current (running) thread's priority, not target's.
2014-06-29 19:02:41 -07:00
if ( priority = = 0 ) {
2020-03-15 08:33:40 -07:00
PSPThread * cur = __GetCurrentThread ( ) ;
2014-06-29 19:02:41 -07:00
if ( ! cur ) {
ERROR_LOG_REPORT ( SCEKERNEL , " sceKernelChangeThreadPriority(%i, %i): no current thread? " , threadID , priority ) ;
} else {
2013-06-05 23:04:42 -07:00
priority = cur - > nt . currentPriority ;
2014-06-29 19:02:41 -07:00
}
2013-06-05 23:04:42 -07:00
}
2012-11-01 16:19:01 +01:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * thread = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2014-06-29 19:02:41 -07:00
if ( thread ) {
if ( thread - > isStopped ( ) ) {
2015-03-28 14:28:51 -07:00
return hleLogError ( SCEKERNEL , SCE_KERNEL_ERROR_DORMANT , " thread is dormant " ) ;
2013-06-05 23:52:24 -07:00
}
2014-06-29 19:02:41 -07:00
if ( priority < 0x08 | | priority > 0x77 ) {
2015-03-28 14:28:51 -07:00
return hleLogError ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_PRIORITY , " bogus priority " ) ;
2013-06-05 23:04:42 -07:00
}
2019-10-20 10:26:37 -07:00
KernelChangeThreadPriority ( threadID , priority ) ;
2013-02-03 12:09:22 -08:00
2013-11-28 01:01:22 -08:00
hleEatCycles ( 450 ) ;
2013-06-05 23:42:42 -07:00
hleReSchedule ( " change thread priority " ) ;
2015-03-28 14:28:51 -07:00
return hleLogSuccessI ( SCEKERNEL , 0 ) ;
2014-06-29 19:02:41 -07:00
} else {
2015-03-28 14:28:51 -07:00
return hleLogError ( SCEKERNEL , error , " thread not found " ) ;
2012-11-01 16:19:01 +01:00
}
}
2014-12-08 04:40:08 -05:00
static s64 __KernelDelayThreadUs ( u64 usec ) {
2014-06-22 19:44:43 -07:00
if ( usec < 200 ) {
return 210 ;
}
2016-05-30 20:06:14 -07:00
if ( usec > 0x8000000000000000ULL ) {
// Wrap around (behavior seen on firmware) and potentially wake up soon.
usec - = 0x8000000000000000ULL ;
}
if ( usec > 0x0010000000000000ULL ) {
// This will probably overflow when we convert to cycles.
// Note: converting millenia to hundreds of years. Should be safe, basically perma-delay.
usec > > = 12 ;
}
2014-06-22 19:44:43 -07:00
// It never wakes up right away. It usually takes at least 15 extra us, but let's be nicer.
return usec + 10 ;
2013-05-04 23:18:13 -07:00
}
2014-06-22 19:44:43 -07:00
int sceKernelDelayThreadCB ( u32 usec ) {
hleEatCycles ( 2000 ) ;
2016-05-29 22:04:47 -07:00
// Note: Sometimes (0) won't delay, potentially based on how much the thread is doing.
// But a loop with just 0 often does delay, and games depend on this. So we err on that side.
SceUID curThread = __KernelGetCurThread ( ) ;
s64 delayUs = __KernelDelayThreadUs ( usec ) ;
__KernelScheduleWakeup ( curThread , delayUs ) ;
__KernelWaitCurThread ( WAITTYPE_DELAY , curThread , 0 , 0 , true , " thread delayed " ) ;
return hleLogSuccessI ( SCEKERNEL , 0 , " delaying %lld usecs " , delayUs ) ;
2012-11-01 16:19:01 +01:00
}
2014-06-22 19:44:43 -07:00
int sceKernelDelayThread ( u32 usec ) {
hleEatCycles ( 2000 ) ;
2016-05-29 22:04:47 -07:00
// Note: Sometimes (0) won't delay, potentially based on how much the thread is doing.
// But a loop with just 0 often does delay, and games depend on this. So we err on that side.
SceUID curThread = __KernelGetCurThread ( ) ;
s64 delayUs = __KernelDelayThreadUs ( usec ) ;
__KernelScheduleWakeup ( curThread , delayUs ) ;
__KernelWaitCurThread ( WAITTYPE_DELAY , curThread , 0 , 0 , false , " thread delayed " ) ;
return hleLogSuccessI ( SCEKERNEL , 0 , " delaying %lld usecs " , delayUs ) ;
2013-01-05 23:24:05 +01:00
}
2016-05-30 20:06:14 -07:00
int sceKernelDelaySysClockThreadCB ( u32 sysclockAddr ) {
2013-12-16 23:47:34 -08:00
auto sysclock = PSPPointer < SceKernelSysClock > : : Create ( sysclockAddr ) ;
2013-08-27 23:02:18 -07:00
if ( ! sysclock . IsValid ( ) ) {
2016-05-30 20:06:14 -07:00
// Note: crashes on real firmware.
return hleLogError ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_ADDRESS , " bad pointer " ) ;
2013-01-05 23:24:05 +01:00
}
2016-05-30 20:06:14 -07:00
// This is just a u64 of usecs. All bits are respected, but overflow can happen for very large values.
2013-08-27 23:02:18 -07:00
u64 usec = sysclock - > lo | ( ( u64 ) sysclock - > hi < < 32 ) ;
2013-01-05 23:24:05 +01:00
SceUID curThread = __KernelGetCurThread ( ) ;
2016-05-30 20:06:14 -07:00
s64 delayUs = __KernelDelayThreadUs ( usec ) ;
__KernelScheduleWakeup ( curThread , delayUs ) ;
2013-01-26 10:44:04 -08:00
__KernelWaitCurThread ( WAITTYPE_DELAY , curThread , 0 , 0 , true , " thread delayed " ) ;
2016-05-30 20:06:14 -07:00
return hleLogSuccessI ( SCEKERNEL , 0 , " delaying %lld usecs " , delayUs ) ;
2013-01-05 23:24:05 +01:00
}
2016-05-30 20:06:14 -07:00
int sceKernelDelaySysClockThread ( u32 sysclockAddr ) {
2013-12-16 23:47:34 -08:00
auto sysclock = PSPPointer < SceKernelSysClock > : : Create ( sysclockAddr ) ;
2013-08-27 23:02:18 -07:00
if ( ! sysclock . IsValid ( ) ) {
2016-05-30 20:06:14 -07:00
// Note: crashes on real firmware.
return hleLogError ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_ADDRESS , " bad pointer " ) ;
2013-01-05 23:24:05 +01:00
}
2016-05-30 20:06:14 -07:00
// This is just a u64 of usecs. All bits are respected, but overflow can happen for very large values.
2013-08-27 23:02:18 -07:00
u64 usec = sysclock - > lo | ( ( u64 ) sysclock - > hi < < 32 ) ;
2013-01-05 23:24:05 +01:00
2012-11-01 16:19:01 +01:00
SceUID curThread = __KernelGetCurThread ( ) ;
2016-05-30 20:06:14 -07:00
s64 delayUs = __KernelDelayThreadUs ( usec ) ;
__KernelScheduleWakeup ( curThread , delayUs ) ;
2013-01-26 10:44:04 -08:00
__KernelWaitCurThread ( WAITTYPE_DELAY , curThread , 0 , 0 , false , " thread delayed " ) ;
2016-05-30 20:06:14 -07:00
return hleLogSuccessI ( SCEKERNEL , 0 , " delaying %lld usecs " , delayUs ) ;
2012-11-01 16:19:01 +01:00
}
2020-03-15 08:33:40 -07:00
u32 __KernelGetThreadPrio ( SceUID id ) {
2012-11-08 14:24:51 +01:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * thread = kernelObjects . Get < PSPThread > ( id , error ) ;
2012-11-08 14:24:51 +01:00
if ( thread )
return thread - > nt . currentPriority ;
return 0 ;
}
2013-01-17 00:45:13 -08:00
bool __KernelThreadSortPriority ( SceUID thread1 , SceUID thread2 )
{
return __KernelGetThreadPrio ( thread1 ) < __KernelGetThreadPrio ( thread2 ) ;
}
2012-11-01 16:19:01 +01:00
//////////////////////////////////////////////////////////////////////////
// WAIT/SLEEP ETC
//////////////////////////////////////////////////////////////////////////
2015-03-28 14:28:51 -07:00
int sceKernelWakeupThread ( SceUID uid ) {
if ( uid = = currentThread ) {
return hleLogWarning ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_THID , " unable to wakeup current thread " ) ;
2013-09-02 13:40:59 -07:00
}
2012-11-01 16:19:01 +01:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( uid , error ) ;
2015-03-28 14:28:51 -07:00
if ( t ) {
2013-09-09 21:39:56 -07:00
if ( ! t - > isWaitingFor ( WAITTYPE_SLEEP , 0 ) ) {
2012-11-07 15:44:48 +01:00
t - > nt . wakeupCount + + ;
2015-03-28 14:28:51 -07:00
return hleLogSuccessI ( SCEKERNEL , 0 , " wakeupCount incremented to %i " , t - > nt . wakeupCount ) ;
2012-11-07 15:44:48 +01:00
} else {
2013-09-08 12:02:18 -07:00
__KernelResumeThreadFromWait ( uid , 0 ) ;
2013-04-27 18:06:36 -07:00
hleReSchedule ( " thread woken up " ) ;
2015-03-28 14:28:51 -07:00
return hleLogSuccessVerboseI ( SCEKERNEL , 0 , " woke thread at %i " , t - > nt . wakeupCount ) ;
2012-11-01 16:19:01 +01:00
}
2015-03-28 14:28:51 -07:00
} else {
return hleLogError ( SCEKERNEL , error , " bad thread id " ) ;
2012-11-12 00:04:57 +01:00
}
}
2015-03-28 14:28:51 -07:00
int sceKernelCancelWakeupThread ( SceUID uid ) {
if ( uid = = 0 ) {
uid = __KernelGetCurThread ( ) ;
}
2012-11-12 00:04:57 +01:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( uid , error ) ;
2015-03-28 14:28:51 -07:00
if ( t ) {
2012-11-12 00:04:57 +01:00
int wCount = t - > nt . wakeupCount ;
t - > nt . wakeupCount = 0 ;
2015-03-28 14:28:51 -07:00
return hleLogSuccessI ( SCEKERNEL , wCount , " wakeupCount reset to 0 " ) ;
} else {
return hleLogError ( SCEKERNEL , error , " bad thread id " ) ;
2012-11-01 16:19:01 +01:00
}
}
2013-05-25 20:43:19 -07:00
static int __KernelSleepThread ( bool doCallbacks ) {
2020-03-15 08:33:40 -07:00
PSPThread * thread = __GetCurrentThread ( ) ;
2013-05-25 20:43:19 -07:00
if ( ! thread ) {
2015-03-28 14:28:51 -07:00
ERROR_LOG_REPORT ( SCEKERNEL , " sceKernelSleepThread*(): bad current thread " ) ;
2013-05-25 20:43:19 -07:00
return - 1 ;
2012-12-27 17:43:44 -08:00
}
if ( thread - > nt . wakeupCount > 0 ) {
thread - > nt . wakeupCount - - ;
2015-03-28 14:28:51 -07:00
return hleLogSuccessI ( SCEKERNEL , 0 , " wakeupCount decremented to %i " , thread - > nt . wakeupCount ) ;
2012-11-07 15:44:48 +01:00
} else {
2013-09-09 21:39:56 -07:00
__KernelWaitCurThread ( WAITTYPE_SLEEP , 0 , 0 , 0 , doCallbacks , " thread slept " ) ;
2015-03-28 14:28:51 -07:00
return hleLogSuccessVerboseI ( SCEKERNEL , 0 , " sleeping " ) ;
2012-11-01 16:19:01 +01:00
}
2013-05-25 20:43:19 -07:00
return 0 ;
2012-11-01 16:19:01 +01:00
}
2015-03-28 14:28:51 -07:00
int sceKernelSleepThread ( ) {
2013-05-25 20:43:19 -07:00
return __KernelSleepThread ( false ) ;
2012-11-07 15:44:48 +01:00
}
2012-11-01 16:19:01 +01:00
//the homebrew PollCallbacks
2015-03-28 14:28:51 -07:00
int sceKernelSleepThreadCB ( ) {
2013-05-25 20:43:19 -07:00
return __KernelSleepThread ( true ) ;
2012-11-01 16:19:01 +01:00
}
2013-01-07 10:02:11 -08:00
int sceKernelWaitThreadEnd ( SceUID threadID , u32 timeoutPtr )
2012-11-01 16:19:01 +01:00
{
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelWaitThreadEnd(%i, %08x) " , threadID , timeoutPtr ) ;
2013-01-07 15:21:10 -08:00
if ( threadID = = 0 | | threadID = = currentThread )
return SCE_KERNEL_ERROR_ILLEGAL_THID ;
2013-08-27 23:19:53 -07:00
if ( ! __KernelIsDispatchEnabled ( ) )
return SCE_KERNEL_ERROR_CAN_NOT_WAIT ;
2013-08-28 23:15:13 -07:00
if ( __IsInInterrupt ( ) )
return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT ;
2013-08-27 23:19:53 -07:00
2012-11-01 16:19:01 +01:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2012-11-01 16:19:01 +01:00
if ( t )
{
2013-01-07 10:31:19 -08:00
if ( t - > nt . status ! = THREADSTATUS_DORMANT )
2013-01-07 11:11:23 -08:00
{
if ( Memory : : IsValidAddress ( timeoutPtr ) )
__KernelScheduleThreadEndTimeout ( currentThread , threadID , Memory : : Read_U32 ( timeoutPtr ) ) ;
2013-09-08 11:15:09 -07:00
if ( std : : find ( t - > waitingThreads . begin ( ) , t - > waitingThreads . end ( ) , currentThread ) = = t - > waitingThreads . end ( ) )
t - > waitingThreads . push_back ( currentThread ) ;
2013-01-26 10:44:04 -08:00
__KernelWaitCurThread ( WAITTYPE_THREADEND , threadID , 0 , timeoutPtr , false , " thread wait end " ) ;
2012-11-01 16:19:01 +01:00
}
2013-01-07 10:31:19 -08:00
return t - > nt . exitStatus ;
2012-11-01 16:19:01 +01:00
}
else
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelWaitThreadEnd - bad thread %i " , threadID ) ;
2013-01-07 15:21:10 -08:00
return error ;
2012-11-01 16:19:01 +01:00
}
}
2013-01-07 10:02:11 -08:00
int sceKernelWaitThreadEndCB ( SceUID threadID , u32 timeoutPtr )
2012-11-01 16:19:01 +01:00
{
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelWaitThreadEndCB(%i, 0x%X) " , threadID , timeoutPtr ) ;
2013-01-07 15:21:10 -08:00
if ( threadID = = 0 | | threadID = = currentThread )
return SCE_KERNEL_ERROR_ILLEGAL_THID ;
2013-08-27 23:19:53 -07:00
if ( ! __KernelIsDispatchEnabled ( ) )
return SCE_KERNEL_ERROR_CAN_NOT_WAIT ;
2013-08-28 23:15:13 -07:00
if ( __IsInInterrupt ( ) )
return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT ;
2013-08-27 23:19:53 -07:00
2012-12-23 11:16:32 +01:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2012-12-23 11:16:32 +01:00
if ( t )
{
2013-01-07 10:31:19 -08:00
if ( t - > nt . status ! = THREADSTATUS_DORMANT )
2013-01-07 11:11:23 -08:00
{
if ( Memory : : IsValidAddress ( timeoutPtr ) )
__KernelScheduleThreadEndTimeout ( currentThread , threadID , Memory : : Read_U32 ( timeoutPtr ) ) ;
2013-09-08 11:15:09 -07:00
if ( std : : find ( t - > waitingThreads . begin ( ) , t - > waitingThreads . end ( ) , currentThread ) = = t - > waitingThreads . end ( ) )
t - > waitingThreads . push_back ( currentThread ) ;
2013-01-26 10:44:04 -08:00
__KernelWaitCurThread ( WAITTYPE_THREADEND , threadID , 0 , timeoutPtr , true , " thread wait end " ) ;
2012-11-06 15:46:21 +01:00
}
2014-06-28 03:06:30 -07:00
else
hleCheckCurrentCallbacks ( ) ;
2013-01-07 10:31:19 -08:00
return t - > nt . exitStatus ;
2012-12-23 11:16:32 +01:00
}
else
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelWaitThreadEndCB - bad thread %i " , threadID ) ;
2013-01-07 19:44:39 -08:00
return error ;
}
}
int sceKernelReleaseWaitThread ( SceUID threadID )
{
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelReleaseWaitThread(%i) " , threadID ) ;
2013-01-07 19:44:39 -08:00
if ( __KernelInCallback ( ) )
2013-09-07 22:02:55 +02:00
WARN_LOG_REPORT ( SCEKERNEL , " UNTESTED sceKernelReleaseWaitThread() might not do the right thing in a callback " ) ;
2013-01-07 19:44:39 -08:00
if ( threadID = = 0 | | threadID = = currentThread )
return SCE_KERNEL_ERROR_ILLEGAL_THID ;
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2013-01-07 19:44:39 -08:00
if ( t )
{
if ( ! t - > isWaiting ( ) )
return SCE_KERNEL_ERROR_NOT_WAIT ;
2013-06-08 23:39:49 -07:00
if ( t - > nt . waitType = = WAITTYPE_HLEDELAY )
{
2014-05-08 10:40:58 +02:00
WARN_LOG_REPORT_ONCE ( rwt_delay , SCEKERNEL , " sceKernelReleaseWaitThread(): Refusing to wake HLE-delayed thread, right thing to do? " ) ;
2013-06-08 23:39:49 -07:00
return SCE_KERNEL_ERROR_NOT_WAIT ;
}
2013-07-18 00:16:42 +08:00
if ( t - > nt . waitType = = WAITTYPE_MODULE )
{
2014-05-08 10:40:58 +02:00
WARN_LOG_REPORT_ONCE ( rwt_sm , SCEKERNEL , " sceKernelReleaseWaitThread(): Refusing to wake start_module thread, right thing to do? " ) ;
2013-07-18 00:16:42 +08:00
return SCE_KERNEL_ERROR_NOT_WAIT ;
}
2013-01-07 19:44:39 -08:00
__KernelResumeThreadFromWait ( threadID , SCE_KERNEL_ERROR_RELEASE_WAIT ) ;
hleReSchedule ( " thread released from wait " ) ;
return 0 ;
}
else
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelReleaseWaitThread - bad thread %i " , threadID ) ;
2013-01-07 15:21:10 -08:00
return error ;
2012-12-23 11:16:32 +01:00
}
2012-11-01 16:19:01 +01:00
}
2013-05-26 00:54:30 -07:00
int sceKernelSuspendThread ( SceUID threadID )
2012-11-01 16:19:01 +01:00
{
2013-05-26 02:25:53 -07:00
// TODO: What about interrupts/callbacks?
if ( threadID = = 0 | | threadID = = currentThread )
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelSuspendThread(%d): cannot suspend current thread " , threadID ) ;
2013-05-26 02:25:53 -07:00
return SCE_KERNEL_ERROR_ILLEGAL_THID ;
}
2013-05-26 00:54:30 -07:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2013-05-26 00:54:30 -07:00
if ( t )
{
if ( t - > isStopped ( ) )
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelSuspendThread(%d): thread not running " , threadID ) ;
2013-05-26 00:54:30 -07:00
return SCE_KERNEL_ERROR_DORMANT ;
}
if ( t - > isSuspended ( ) )
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelSuspendThread(%d): thread already suspended " , threadID ) ;
2013-05-26 00:54:30 -07:00
return SCE_KERNEL_ERROR_SUSPEND ;
}
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelSuspendThread(%d) " , threadID ) ;
2013-05-26 00:54:30 -07:00
if ( t - > isReady ( ) )
__KernelChangeReadyState ( t , threadID , false ) ;
t - > nt . status = ( t - > nt . status & ~ THREADSTATUS_READY ) | THREADSTATUS_SUSPEND ;
return 0 ;
}
else
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelSuspendThread(%d): bad thread " , threadID ) ;
2013-05-26 00:54:30 -07:00
return error ;
}
2012-11-01 16:19:01 +01:00
}
2013-05-26 00:54:30 -07:00
int sceKernelResumeThread ( SceUID threadID )
2012-11-01 16:19:01 +01:00
{
2013-05-26 02:25:53 -07:00
// TODO: What about interrupts/callbacks?
if ( threadID = = 0 | | threadID = = currentThread )
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelResumeThread(%d): cannot suspend current thread " , threadID ) ;
2013-05-26 02:25:53 -07:00
return SCE_KERNEL_ERROR_ILLEGAL_THID ;
}
2013-05-26 00:54:30 -07:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2013-05-26 00:54:30 -07:00
if ( t )
{
if ( ! t - > isSuspended ( ) )
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelResumeThread(%d): thread not suspended " , threadID ) ;
2013-05-26 00:54:30 -07:00
return SCE_KERNEL_ERROR_NOT_SUSPEND ;
}
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelResumeThread(%d) " , threadID ) ;
2013-05-26 00:54:30 -07:00
t - > nt . status & = ~ THREADSTATUS_SUSPEND ;
2013-08-12 23:02:39 -07:00
// If it was dormant, waiting, etc. before we don't flip its ready state.
2013-05-26 00:54:30 -07:00
if ( t - > nt . status = = 0 )
__KernelChangeReadyState ( t , threadID , true ) ;
return 0 ;
}
else
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " sceKernelResumeThread(%d): bad thread " , threadID ) ;
2013-05-26 00:54:30 -07:00
return error ;
}
2012-11-01 16:19:01 +01:00
}
2012-11-06 15:46:21 +01:00
2012-11-07 15:44:48 +01:00
2012-11-06 15:46:21 +01:00
//////////////////////////////////////////////////////////////////////////
// CALLBACKS
//////////////////////////////////////////////////////////////////////////
2013-09-01 21:55:37 -07:00
SceUID sceKernelCreateCallback ( const char * name , u32 entrypoint , u32 signalArg )
2012-11-06 15:46:21 +01:00
{
2013-09-01 21:55:37 -07:00
if ( ! name )
2015-03-22 20:27:08 -07:00
return hleReportWarning ( SCEKERNEL , SCE_KERNEL_ERROR_ERROR , " invalid name " ) ;
2013-09-01 21:55:37 -07:00
if ( entrypoint & 0xF0000000 )
2015-03-22 20:27:08 -07:00
return hleReportWarning ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_ADDR , " invalid func " ) ;
2013-09-01 21:55:37 -07:00
2020-03-15 08:33:40 -07:00
PSPCallback * cb = new PSPCallback ( ) ;
2012-11-06 15:46:21 +01:00
SceUID id = kernelObjects . Create ( cb ) ;
2013-09-01 21:55:37 -07:00
strncpy ( cb - > nc . name , name , KERNELOBJECT_MAX_NAME_LENGTH ) ;
cb - > nc . name [ KERNELOBJECT_MAX_NAME_LENGTH ] = 0 ;
2012-11-06 15:46:21 +01:00
cb - > nc . size = sizeof ( NativeCallback ) ;
cb - > nc . entrypoint = entrypoint ;
cb - > nc . threadId = __KernelGetCurThread ( ) ;
2013-09-01 21:55:37 -07:00
cb - > nc . commonArgument = signalArg ;
2012-11-06 15:46:21 +01:00
cb - > nc . notifyCount = 0 ;
cb - > nc . notifyArg = 0 ;
2012-12-23 11:16:32 +01:00
2020-03-15 08:33:40 -07:00
PSPThread * thread = __GetCurrentThread ( ) ;
2013-09-08 18:42:40 -07:00
if ( thread )
thread - > callbacks . push_back ( id ) ;
2015-03-22 20:27:08 -07:00
return hleLogSuccessI ( SCEKERNEL , id ) ;
2012-11-06 15:46:21 +01:00
}
2013-04-14 23:45:46 -07:00
int sceKernelDeleteCallback ( SceUID cbId )
2012-11-06 15:46:21 +01:00
{
2013-09-08 18:42:40 -07:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPCallback * cb = kernelObjects . Get < PSPCallback > ( cbId , error ) ;
2013-09-08 18:42:40 -07:00
if ( cb )
{
2020-03-15 08:33:40 -07:00
PSPThread * thread = kernelObjects . Get < PSPThread > ( cb - > nc . threadId , error ) ;
2013-09-08 18:42:40 -07:00
if ( thread )
2016-05-28 21:15:25 -07:00
thread - > callbacks . erase ( std : : remove ( thread - > callbacks . begin ( ) , thread - > callbacks . end ( ) , cbId ) , thread - > callbacks . end ( ) ) ;
2013-09-08 18:42:40 -07:00
if ( cb - > nc . notifyCount ! = 0 )
readyCallbacksCount - - ;
2012-11-06 15:46:21 +01:00
2020-03-15 08:33:40 -07:00
return hleLogSuccessI ( SCEKERNEL , kernelObjects . Destroy < PSPCallback > ( cbId ) ) ;
2015-03-22 00:12:08 -07:00
} else {
2015-03-22 17:11:52 -07:00
return hleLogError ( SCEKERNEL , error , " bad cbId " ) ;
2013-09-08 18:42:40 -07:00
}
2012-11-06 15:46:21 +01:00
}
2013-04-14 23:45:46 -07:00
// Generally very rarely used, but Numblast uses it like candy.
int sceKernelNotifyCallback ( SceUID cbId , int notifyArg )
2012-11-06 15:46:21 +01:00
{
2013-04-14 23:45:46 -07:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPCallback * cb = kernelObjects . Get < PSPCallback > ( cbId , error ) ;
2013-04-14 23:45:46 -07:00
if ( cb ) {
2013-09-02 01:25:41 -07:00
__KernelNotifyCallback ( cbId , notifyArg ) ;
2015-03-22 17:11:52 -07:00
return hleLogSuccessI ( SCEKERNEL , 0 ) ;
2013-04-14 23:45:46 -07:00
} else {
2015-03-22 17:11:52 -07:00
return hleLogError ( SCEKERNEL , error , " bad cbId " ) ;
2013-04-14 23:45:46 -07:00
}
2012-11-06 15:46:21 +01:00
}
2013-04-14 23:45:46 -07:00
int sceKernelCancelCallback ( SceUID cbId )
2012-11-06 15:46:21 +01:00
{
u32 error ;
2020-03-15 08:33:40 -07:00
PSPCallback * cb = kernelObjects . Get < PSPCallback > ( cbId , error ) ;
2012-11-06 15:46:21 +01:00
if ( cb ) {
2013-04-14 23:45:46 -07:00
// This just resets the notify count.
2012-12-23 11:16:32 +01:00
cb - > nc . notifyArg = 0 ;
2015-03-22 17:11:52 -07:00
return hleLogSuccessI ( SCEKERNEL , 0 ) ;
2012-11-06 15:46:21 +01:00
} else {
2015-03-22 17:11:52 -07:00
return hleLogError ( SCEKERNEL , error , " bad cbId " ) ;
2012-11-06 15:46:21 +01:00
}
}
2013-04-14 23:45:46 -07:00
int sceKernelGetCallbackCount ( SceUID cbId )
2012-11-06 15:46:21 +01:00
{
u32 error ;
2020-03-15 08:33:40 -07:00
PSPCallback * cb = kernelObjects . Get < PSPCallback > ( cbId , error ) ;
2012-11-06 15:46:21 +01:00
if ( cb ) {
2015-03-22 17:11:52 -07:00
return hleLogSuccessVerboseI ( SCEKERNEL , cb - > nc . notifyCount ) ;
2012-11-06 15:46:21 +01:00
} else {
2015-03-22 17:11:52 -07:00
return hleLogError ( SCEKERNEL , error , " bad cbId " ) ;
2012-11-06 15:46:21 +01:00
}
}
2013-04-14 23:45:46 -07:00
int sceKernelReferCallbackStatus ( SceUID cbId , u32 statusAddr )
2013-02-04 02:53:38 +08:00
{
2012-11-06 15:46:21 +01:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPCallback * c = kernelObjects . Get < PSPCallback > ( cbId , error ) ;
2012-11-06 15:46:21 +01:00
if ( c ) {
2013-09-01 21:55:37 -07:00
if ( Memory : : IsValidAddress ( statusAddr ) & & Memory : : Read_U32 ( statusAddr ) ! = 0 ) {
2012-11-06 15:46:21 +01:00
Memory : : WriteStruct ( statusAddr , & c - > nc ) ;
2015-03-22 17:11:52 -07:00
return hleLogSuccessI ( SCEKERNEL , 0 ) ;
2015-03-22 00:12:08 -07:00
} else {
2015-03-22 17:11:52 -07:00
return hleLogDebug ( SCEKERNEL , 0 , " struct size was 0 " ) ;
2013-09-01 21:55:37 -07:00
}
2012-11-06 15:46:21 +01:00
} else {
2015-03-22 17:11:52 -07:00
return hleLogError ( SCEKERNEL , error , " bad cbId " ) ;
2012-11-06 15:46:21 +01:00
}
}
2013-04-19 22:22:35 +08:00
u32 sceKernelExtendThreadStack ( u32 size , u32 entryAddr , u32 entryParameter )
2013-04-14 23:45:46 -07:00
{
2013-05-18 20:16:01 -07:00
if ( size < 512 )
2015-03-22 00:12:08 -07:00
return hleReportError ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_STACK_SIZE , " xxx " , " stack size too small " ) ;
2013-05-18 20:16:01 -07:00
2020-03-15 08:33:40 -07:00
PSPThread * thread = __GetCurrentThread ( ) ;
2013-05-18 20:16:01 -07:00
if ( ! thread )
2015-03-22 00:12:08 -07:00
return hleReportError ( SCEKERNEL , - 1 , " xxx " , " not on a thread? " ) ;
2013-05-18 20:16:01 -07:00
if ( ! thread - > PushExtendedStack ( size ) )
2015-03-22 00:12:08 -07:00
return hleReportError ( SCEKERNEL , SCE_KERNEL_ERROR_NO_MEMORY , " xxx " , " could not allocate new stack " ) ;
2013-05-18 20:16:01 -07:00
// The stack has been changed now, so it's do or die time.
// Push the old SP, RA, and PC onto the stack (so we can restore them later.)
Memory : : Write_U32 ( currentMIPS - > r [ MIPS_REG_RA ] , thread - > currentStack . end - 4 ) ;
Memory : : Write_U32 ( currentMIPS - > r [ MIPS_REG_SP ] , thread - > currentStack . end - 8 ) ;
Memory : : Write_U32 ( currentMIPS - > pc , thread - > currentStack . end - 12 ) ;
currentMIPS - > pc = entryAddr ;
currentMIPS - > r [ MIPS_REG_A0 ] = entryParameter ;
currentMIPS - > r [ MIPS_REG_RA ] = extendReturnHackAddr ;
// Stack should stay aligned even though we saved only 3 regs.
currentMIPS - > r [ MIPS_REG_SP ] = thread - > currentStack . end - 0x10 ;
2013-10-29 22:53:25 -07:00
hleSkipDeadbeef ( ) ;
2015-03-22 17:11:52 -07:00
return hleLogSuccessI ( SCEKERNEL , 0 ) ;
2013-04-14 23:45:46 -07:00
}
2013-05-18 20:16:01 -07:00
void __KernelReturnFromExtendStack ( )
{
2013-10-29 22:53:25 -07:00
hleSkipDeadbeef ( ) ;
2020-03-15 08:33:40 -07:00
PSPThread * thread = __GetCurrentThread ( ) ;
2013-05-18 20:16:01 -07:00
if ( ! thread )
{
2013-09-07 22:02:55 +02:00
ERROR_LOG_REPORT ( SCEKERNEL , " __KernelReturnFromExtendStack() - not on a thread? " ) ;
2013-05-18 20:16:01 -07:00
return ;
}
// Grab the saved regs at the top of the stack.
u32 restoreRA = Memory : : Read_U32 ( thread - > currentStack . end - 4 ) ;
u32 restoreSP = Memory : : Read_U32 ( thread - > currentStack . end - 8 ) ;
u32 restorePC = Memory : : Read_U32 ( thread - > currentStack . end - 12 ) ;
if ( ! thread - > PopExtendedStack ( ) )
{
2013-09-07 22:02:55 +02:00
ERROR_LOG_REPORT ( SCEKERNEL , " __KernelReturnFromExtendStack() - no stack to restore? " ) ;
2013-05-18 20:16:01 -07:00
return ;
}
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " __KernelReturnFromExtendStack() " ) ;
2013-05-18 20:16:01 -07:00
currentMIPS - > r [ MIPS_REG_RA ] = restoreRA ;
currentMIPS - > r [ MIPS_REG_SP ] = restoreSP ;
currentMIPS - > pc = restorePC ;
// We retain whatever is in v0/v1, it gets passed on to the caller of sceKernelExtendThreadStack().
}
2013-01-06 10:54:33 -08:00
void ActionAfterMipsCall : : run ( MipsCall & call ) {
2012-12-27 19:30:36 -08:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * thread = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2012-12-27 19:30:36 -08:00
if ( thread ) {
2016-05-28 21:14:19 -07:00
// Resume waiting after a callback, but not from terminate/delete.
if ( ( thread - > nt . status & ( THREADSTATUS_DEAD | THREADSTATUS_DORMANT ) ) = = 0 ) {
__KernelChangeReadyState ( thread , threadID , ( status & THREADSTATUS_READY ) ! = 0 ) ;
thread - > nt . status = status ;
}
2012-12-27 19:30:36 -08:00
thread - > nt . waitType = waitType ;
thread - > nt . waitID = waitID ;
thread - > waitInfo = waitInfo ;
thread - > isProcessingCallbacks = isProcessingCallbacks ;
2013-03-27 00:51:46 -07:00
thread - > currentCallbackId = currentCallbackId ;
2012-12-27 19:30:36 -08:00
}
2012-11-07 15:44:48 +01:00
if ( chainedAction ) {
2013-01-06 10:54:33 -08:00
chainedAction - > run ( call ) ;
2012-11-07 15:44:48 +01:00
delete chainedAction ;
2012-11-06 15:46:21 +01:00
}
2012-11-07 15:44:48 +01:00
}
2012-11-06 15:46:21 +01:00
2020-03-15 08:33:40 -07:00
void PSPThread : : setReturnValue ( u32 retval ) {
2014-03-04 08:21:55 -08:00
if ( GetUID ( ) = = currentThread ) {
currentMIPS - > r [ MIPS_REG_V0 ] = retval ;
2012-11-11 22:38:19 +01:00
} else {
2014-03-04 08:21:55 -08:00
context . r [ MIPS_REG_V0 ] = retval ;
2012-11-11 22:38:19 +01:00
}
}
2020-03-15 08:33:40 -07:00
void PSPThread : : setReturnValue ( u64 retval ) {
2014-03-04 08:21:55 -08:00
if ( GetUID ( ) = = currentThread ) {
currentMIPS - > r [ MIPS_REG_V0 ] = retval & 0xFFFFFFFF ;
currentMIPS - > r [ MIPS_REG_V1 ] = ( retval > > 32 ) & 0xFFFFFFFF ;
2013-03-10 10:59:59 -07:00
} else {
2014-03-04 08:21:55 -08:00
context . r [ MIPS_REG_V0 ] = retval & 0xFFFFFFFF ;
context . r [ MIPS_REG_V1 ] = ( retval > > 32 ) & 0xFFFFFFFF ;
2013-03-10 10:59:59 -07:00
}
}
2020-03-15 08:33:40 -07:00
void PSPThread : : resumeFromWait ( ) {
2014-01-31 12:22:39 -08:00
nt . status & = ~ THREADSTATUS_WAIT ;
if ( ! ( nt . status & ( THREADSTATUS_WAITSUSPEND | THREADSTATUS_DORMANT | THREADSTATUS_DEAD ) ) )
__KernelChangeReadyState ( this , GetUID ( ) , true ) ;
2012-12-09 16:56:16 -08:00
2014-01-31 12:22:39 -08:00
// Non-waiting threads do not process callbacks.
isProcessingCallbacks = false ;
2012-12-09 16:56:16 -08:00
}
2020-03-15 08:33:40 -07:00
bool PSPThread : : isWaitingFor ( WaitType type , int id ) const {
2014-01-31 12:22:39 -08:00
if ( nt . status & THREADSTATUS_WAIT )
return nt . waitType = = type & & nt . waitID = = id ;
2012-12-09 16:56:16 -08:00
return false ;
}
2020-03-15 08:33:40 -07:00
int PSPThread : : getWaitID ( WaitType type ) const {
2014-01-31 12:22:39 -08:00
if ( nt . waitType = = type )
return nt . waitID ;
2012-12-09 16:56:16 -08:00
return 0 ;
}
2020-03-15 08:33:40 -07:00
ThreadWaitInfo PSPThread : : getWaitInfo ( ) const {
2014-01-31 12:22:39 -08:00
return waitInfo ;
2012-12-09 16:56:16 -08:00
}
2020-03-15 08:33:40 -07:00
void __KernelSwitchContext ( PSPThread * target , const char * reason ) {
2012-12-20 15:23:15 +01:00
u32 oldPC = 0 ;
2013-03-02 14:58:58 -08:00
SceUID oldUID = 0 ;
2013-08-25 10:25:49 -07:00
const char * oldName = hleCurrentThreadName ! = NULL ? hleCurrentThreadName : " (none) " ;
2012-12-27 17:43:44 -08:00
2020-03-15 08:33:40 -07:00
PSPThread * cur = __GetCurrentThread ( ) ;
2012-12-27 17:43:44 -08:00
if ( cur ) // It might just have been deleted.
2012-11-07 15:44:48 +01:00
{
2013-04-07 11:28:37 -07:00
__KernelSaveContext ( & cur - > context , ( cur - > nt . attr & PSP_THREAD_ATTR_VFPU ) ! = 0 ) ;
2012-12-20 15:23:15 +01:00
oldPC = currentMIPS - > pc ;
2012-12-27 17:43:44 -08:00
oldUID = cur - > GetUID ( ) ;
2012-12-24 15:16:04 -08:00
2013-02-10 08:22:23 -08:00
// Normally this is taken care of in __KernelNextThread().
2013-02-02 19:48:23 -08:00
if ( cur - > isRunning ( ) )
2013-02-09 14:27:16 -08:00
__KernelChangeReadyState ( cur , oldUID , true ) ;
2012-11-06 15:46:21 +01:00
}
2013-02-02 19:48:23 -08:00
2013-02-09 14:27:16 -08:00
if ( target )
{
2013-11-02 20:16:47 -07:00
__SetCurrentThread ( target , target - > GetUID ( ) , target - > nt . name ) ;
2013-02-09 14:27:16 -08:00
__KernelChangeReadyState ( target , currentThread , false ) ;
target - > nt . status = ( target - > nt . status | THREADSTATUS_RUNNING ) & ~ THREADSTATUS_READY ;
2013-05-18 10:19:34 -07:00
__KernelLoadContext ( & target - > context , ( target - > nt . attr & PSP_THREAD_ATTR_VFPU ) ! = 0 ) ;
2013-02-09 14:27:16 -08:00
}
2013-03-10 22:25:03 -07:00
else
2013-11-02 20:16:47 -07:00
__SetCurrentThread ( NULL , 0 , NULL ) ;
2013-02-03 12:26:09 -08:00
2014-06-23 23:02:46 -07:00
const bool fromIdle = oldUID = = threadIdleID [ 0 ] | | oldUID = = threadIdleID [ 1 ] ;
const bool toIdle = currentThread = = threadIdleID [ 0 ] | | currentThread = = threadIdleID [ 1 ] ;
2013-08-25 10:25:49 -07:00
# if DEBUG_LEVEL <= MAX_LOGLEVEL || DEBUG_LOG == NOTICE_LOG
2013-02-03 13:36:08 -08:00
if ( ! ( fromIdle & & toIdle ) )
{
2013-08-25 10:25:49 -07:00
u64 nowCycles = CoreTiming : : GetTicks ( ) ;
s64 consumedCycles = nowCycles - lastSwitchCycles ;
lastSwitchCycles = nowCycles ;
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " Context switch: %s -> %s (%i->%i, pc: %08x->%08x, %s) +%lldus " ,
2013-03-10 22:25:03 -07:00
oldName , hleCurrentThreadName ,
2013-08-25 10:25:49 -07:00
oldUID , currentThread ,
oldPC , currentMIPS - > pc ,
2013-02-03 13:36:08 -08:00
reason ,
2013-08-25 10:25:49 -07:00
cyclesToUs ( consumedCycles ) ) ;
2013-02-03 13:36:08 -08:00
}
2013-08-25 10:25:49 -07:00
# endif
2012-11-07 15:44:48 +01:00
2014-06-22 19:48:10 -07:00
// Switching threads eats some cycles. This is a low approximation.
2014-06-23 23:02:46 -07:00
if ( fromIdle & & toIdle ) {
// Don't eat any cycles going between idle.
} else if ( fromIdle | | toIdle ) {
2014-06-27 00:59:57 -07:00
currentMIPS - > downcount - = 1200 ;
2014-06-23 23:02:46 -07:00
} else {
2014-06-27 00:59:57 -07:00
currentMIPS - > downcount - = 2700 ;
2014-06-23 23:02:46 -07:00
}
2014-06-22 19:48:10 -07:00
2013-05-30 11:47:10 -04:00
if ( target )
{
// No longer waiting.
target - > nt . waitType = WAITTYPE_NONE ;
target - > nt . waitID = 0 ;
2012-11-30 23:04:24 -08:00
2013-05-30 11:55:29 -04:00
__KernelExecutePendingMipsCalls ( target , true ) ;
}
2012-11-07 15:44:48 +01:00
}
2020-03-15 08:33:40 -07:00
void __KernelChangeThreadState ( PSPThread * thread , ThreadStatus newStatus ) {
2012-11-07 15:44:48 +01:00
if ( ! thread | | thread - > nt . status = = newStatus )
return ;
2012-11-06 15:46:21 +01:00
2012-12-27 17:43:44 -08:00
if ( ! dispatchEnabled & & thread = = __GetCurrentThread ( ) & & newStatus ! = THREADSTATUS_RUNNING ) {
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " Dispatching suspended, not changing thread state " ) ;
2012-11-07 15:44:48 +01:00
return ;
2012-11-06 15:46:21 +01:00
}
2012-11-07 15:44:48 +01:00
// TODO: JPSCP has many conditions here, like removing wait timeout actions etc.
// if (thread->nt.status == THREADSTATUS_WAIT && newStatus != THREADSTATUS_WAITSUSPEND) {
2013-02-03 12:26:09 -08:00
__KernelChangeReadyState ( thread , thread - > GetUID ( ) , ( newStatus & THREADSTATUS_READY ) ! = 0 ) ;
2012-11-07 15:44:48 +01:00
thread - > nt . status = newStatus ;
if ( newStatus = = THREADSTATUS_WAIT ) {
if ( thread - > nt . waitType = = WAITTYPE_NONE ) {
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " Waittype none not allowed here " ) ;
2012-11-07 15:44:48 +01:00
}
// Schedule deletion of stopped threads here. if (thread->isStopped())
}
}
2020-03-16 08:16:50 +08:00
2020-03-15 08:33:40 -07:00
static bool __CanExecuteCallbackNow ( PSPThread * thread ) {
2020-03-16 07:46:13 +08:00
return currentCallbackThreadID = = 0 & & g_inCbCount = = 0 ;
2012-11-07 15:44:48 +01:00
}
2020-03-15 08:33:40 -07:00
void __KernelCallAddress ( PSPThread * thread , u32 entryPoint , PSPAction * afterAction , const u32 args [ ] , int numargs , bool reschedAfter , SceUID cbId ) {
2016-05-29 00:12:36 -07:00
if ( ! thread | | thread - > isStopped ( ) ) {
2016-05-28 21:14:19 -07:00
WARN_LOG_REPORT ( SCEKERNEL , " Running mipscall on dormant thread " ) ;
}
2013-09-07 22:02:55 +02:00
_dbg_assert_msg_ ( SCEKERNEL , numargs < = 6 , " MipsCalls can only take 6 args. " ) ;
2013-02-02 18:03:55 -08:00
2012-11-07 15:44:48 +01:00
if ( thread ) {
2012-12-27 19:30:36 -08:00
ActionAfterMipsCall * after = ( ActionAfterMipsCall * ) __KernelCreateAction ( actionAfterMipsCall ) ;
2012-11-07 15:44:48 +01:00
after - > chainedAction = afterAction ;
2012-12-27 19:30:36 -08:00
after - > threadID = thread - > GetUID ( ) ;
2012-11-07 15:44:48 +01:00
after - > status = thread - > nt . status ;
2013-08-29 11:48:03 +02:00
after - > waitType = ( WaitType ) ( u32 ) thread - > nt . waitType ;
2012-12-09 16:56:16 -08:00
after - > waitID = thread - > nt . waitID ;
2012-11-07 15:44:48 +01:00
after - > waitInfo = thread - > waitInfo ;
2012-12-08 15:28:54 -08:00
after - > isProcessingCallbacks = thread - > isProcessingCallbacks ;
2013-03-27 00:51:46 -07:00
after - > currentCallbackId = thread - > currentCallbackId ;
2012-11-07 15:44:48 +01:00
afterAction = after ;
2013-03-27 00:51:46 -07:00
if ( thread - > nt . waitType ! = WAITTYPE_NONE ) {
// If it's a callback, tell the wait to stop.
2013-10-05 21:05:29 -07:00
if ( cbId > 0 ) {
if ( waitTypeFuncs [ thread - > nt . waitType ] . beginFunc ! = NULL ) {
waitTypeFuncs [ thread - > nt . waitType ] . beginFunc ( after - > threadID , thread - > currentCallbackId ) ;
} else {
ERROR_LOG_REPORT ( HLE , " Missing begin/restore funcs for wait type %d " , thread - > nt . waitType ) ;
}
2013-03-27 00:51:46 -07:00
}
// Release thread from waiting
thread - > nt . waitType = WAITTYPE_NONE ;
}
2012-11-07 15:44:48 +01:00
__KernelChangeThreadState ( thread , THREADSTATUS_READY ) ;
}
MipsCall * call = new MipsCall ( ) ;
call - > entryPoint = entryPoint ;
2013-02-02 18:03:55 -08:00
for ( int i = 0 ; i < numargs ; i + + ) {
2012-11-07 15:44:48 +01:00
call - > args [ i ] = args [ i ] ;
}
2013-02-02 18:01:34 -08:00
call - > numArgs = ( int ) numargs ;
2012-11-07 15:44:48 +01:00
call - > doAfter = afterAction ;
call - > tag = " callAddress " ;
2013-02-02 17:57:44 -08:00
call - > cbId = cbId ;
2012-12-23 11:16:32 +01:00
2013-03-02 14:58:58 -08:00
u32 callId = mipsCalls . add ( call ) ;
2012-11-07 15:44:48 +01:00
bool called = false ;
2020-03-16 07:46:13 +08:00
if ( ( ! thread | | thread = = __GetCurrentThread ( ) ) ) {
2012-11-07 15:44:48 +01:00
if ( __CanExecuteCallbackNow ( thread ) ) {
2012-12-27 17:43:44 -08:00
thread = __GetCurrentThread ( ) ;
2012-11-07 15:44:48 +01:00
__KernelChangeThreadState ( thread , THREADSTATUS_RUNNING ) ;
2016-05-28 16:59:19 -07:00
called = __KernelExecuteMipsCallOnCurrentThread ( callId , reschedAfter ) ;
2012-11-07 15:44:48 +01:00
}
}
2012-11-06 15:46:21 +01:00
2012-11-07 15:44:48 +01:00
if ( ! called ) {
2012-12-21 16:25:05 -08:00
if ( thread ) {
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " Making mipscall pending on thread " ) ;
2012-12-21 16:25:05 -08:00
thread - > pendingMipsCalls . push_back ( callId ) ;
} else {
2013-09-07 22:02:55 +02:00
WARN_LOG ( SCEKERNEL , " Ignoring mispcall on NULL/deleted thread " ) ;
2012-12-21 16:25:05 -08:00
}
2012-11-07 15:44:48 +01:00
}
}
2012-11-13 18:05:26 +01:00
2020-03-15 08:33:40 -07:00
void __KernelDirectMipsCall ( u32 entryPoint , PSPAction * afterAction , u32 args [ ] , int numargs , bool reschedAfter ) {
2020-03-16 07:46:13 +08:00
__KernelCallAddress ( __GetCurrentThread ( ) , entryPoint , afterAction , args , numargs , reschedAfter , 0 ) ;
2012-11-13 18:05:26 +01:00
}
2016-05-28 16:59:19 -07:00
bool __KernelExecuteMipsCallOnCurrentThread ( u32 callId , bool reschedAfter )
2012-11-07 15:44:48 +01:00
{
2016-05-28 16:59:19 -07:00
hleSkipDeadbeef ( ) ;
2020-03-15 08:33:40 -07:00
PSPThread * cur = __GetCurrentThread ( ) ;
2016-05-28 16:59:19 -07:00
if ( cur = = nullptr ) {
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " __KernelExecuteMipsCallOnCurrentThread(): Bad current thread " ) ;
2016-05-28 16:59:19 -07:00
return false ;
2012-12-27 17:43:44 -08:00
}
2012-11-07 15:44:48 +01:00
if ( g_inCbCount > 0 ) {
2013-09-07 22:02:55 +02:00
WARN_LOG_REPORT ( SCEKERNEL , " __KernelExecuteMipsCallOnCurrentThread(): Already in a callback! " ) ;
2012-11-07 15:44:48 +01:00
}
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " Executing mipscall %i " , callId ) ;
2012-11-07 15:44:48 +01:00
MipsCall * call = mipsCalls . get ( callId ) ;
2012-12-23 11:16:32 +01:00
2016-05-28 16:59:19 -07:00
// Grab some MIPS stack space.
u32 & sp = currentMIPS - > r [ MIPS_REG_SP ] ;
if ( ! Memory : : IsValidAddress ( sp - 32 * 4 ) ) {
ERROR_LOG_REPORT ( SCEKERNEL , " __KernelExecuteMipsCallOnCurrentThread(): Not enough free stack " ) ;
return false ;
}
// Let's just save regs generously. Better to be safe.
sp - = 32 * 4 ;
for ( int i = MIPS_REG_A0 ; i < = MIPS_REG_T7 ; + + i ) {
Memory : : Write_U32 ( currentMIPS - > r [ i ] , sp + i * 4 ) ;
}
Memory : : Write_U32 ( currentMIPS - > r [ MIPS_REG_T8 ] , sp + MIPS_REG_T8 * 4 ) ;
Memory : : Write_U32 ( currentMIPS - > r [ MIPS_REG_T9 ] , sp + MIPS_REG_T9 * 4 ) ;
Memory : : Write_U32 ( currentMIPS - > r [ MIPS_REG_RA ] , sp + MIPS_REG_RA * 4 ) ;
2012-11-06 15:46:21 +01:00
// Save the few regs that need saving
2012-11-07 15:44:48 +01:00
call - > savedPc = currentMIPS - > pc ;
call - > savedV0 = currentMIPS - > r [ MIPS_REG_V0 ] ;
call - > savedV1 = currentMIPS - > r [ MIPS_REG_V1 ] ;
2013-03-27 00:51:46 -07:00
call - > savedId = cur - > currentMipscallId ;
2012-12-08 20:09:20 -08:00
call - > reschedAfter = reschedAfter ;
2012-11-06 15:46:21 +01:00
// Set up the new state
2012-11-07 15:44:48 +01:00
currentMIPS - > pc = call - > entryPoint ;
2020-03-21 16:09:23 -07:00
currentMIPS - > r [ MIPS_REG_RA ] = __KernelCallbackReturnAddress ( ) ;
2013-03-27 00:51:46 -07:00
cur - > currentMipscallId = callId ;
2012-11-07 15:44:48 +01:00
for ( int i = 0 ; i < call - > numArgs ; i + + ) {
currentMIPS - > r [ MIPS_REG_A0 + i ] = call - > args [ i ] ;
}
2012-11-06 15:46:21 +01:00
2013-02-02 17:57:44 -08:00
if ( call - > cbId ! = 0 )
g_inCbCount + + ;
2013-02-02 19:14:00 -08:00
currentCallbackThreadID = currentThread ;
2016-05-28 16:59:19 -07:00
return true ;
2012-11-06 15:46:21 +01:00
}
2012-11-07 15:44:48 +01:00
void __KernelReturnFromMipsCall ( )
2012-11-06 15:46:21 +01:00
{
2013-10-29 22:53:25 -07:00
hleSkipDeadbeef ( ) ;
2020-03-15 08:33:40 -07:00
PSPThread * cur = __GetCurrentThread ( ) ;
2012-12-27 17:43:44 -08:00
if ( cur = = NULL )
{
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " __KernelReturnFromMipsCall(): Bad current thread " ) ;
2012-12-27 17:43:44 -08:00
return ;
}
2013-03-27 00:51:46 -07:00
u32 callId = cur - > currentMipscallId ;
2012-11-07 15:44:48 +01:00
MipsCall * call = mipsCalls . pop ( callId ) ;
2012-11-06 15:46:21 +01:00
// Value returned by the callback function
u32 retVal = currentMIPS - > r [ MIPS_REG_V0 ] ;
2020-03-15 23:05:28 +08:00
DEBUG_LOG ( SCEKERNEL , " __KernelReturnFromMipsCall(), returned %08x " , retVal ) ;
2012-11-06 15:46:21 +01:00
2012-11-07 15:44:48 +01:00
// Should also save/restore wait state here.
if ( call - > doAfter )
2012-12-27 19:45:00 -08:00
{
2013-01-06 10:54:33 -08:00
call - > doAfter - > run ( * call ) ;
2012-12-27 19:45:00 -08:00
delete call - > doAfter ;
}
2012-11-07 15:44:48 +01:00
2016-05-28 16:59:19 -07:00
u32 & sp = currentMIPS - > r [ MIPS_REG_SP ] ;
for ( int i = MIPS_REG_A0 ; i < = MIPS_REG_T7 ; + + i ) {
currentMIPS - > r [ i ] = Memory : : Read_U32 ( sp + i * 4 ) ;
}
currentMIPS - > r [ MIPS_REG_T8 ] = Memory : : Read_U32 ( sp + MIPS_REG_T8 * 4 ) ;
currentMIPS - > r [ MIPS_REG_T9 ] = Memory : : Read_U32 ( sp + MIPS_REG_T9 * 4 ) ;
currentMIPS - > r [ MIPS_REG_RA ] = Memory : : Read_U32 ( sp + MIPS_REG_RA * 4 ) ;
sp + = 32 * 4 ;
2012-11-07 15:44:48 +01:00
currentMIPS - > pc = call - > savedPc ;
2016-05-28 16:59:19 -07:00
// This is how we set the return value.
2012-11-07 15:44:48 +01:00
currentMIPS - > r [ MIPS_REG_V0 ] = call - > savedV0 ;
currentMIPS - > r [ MIPS_REG_V1 ] = call - > savedV1 ;
2013-03-27 00:51:46 -07:00
cur - > currentMipscallId = call - > savedId ;
2012-11-07 15:44:48 +01:00
2014-07-12 13:48:30 -07:00
// If the thread called ExitDelete, we might've already decreased g_inCbCount.
2014-07-12 09:56:03 -07:00
if ( call - > cbId ! = 0 & & g_inCbCount > 0 ) {
2013-02-02 17:57:44 -08:00
g_inCbCount - - ;
2014-07-12 09:56:03 -07:00
}
2013-02-02 19:14:00 -08:00
currentCallbackThreadID = 0 ;
2012-11-07 15:44:48 +01:00
2013-03-27 00:51:46 -07:00
if ( cur - > nt . waitType ! = WAITTYPE_NONE )
{
2013-10-05 21:05:29 -07:00
if ( call - > cbId > 0 )
{
2014-02-15 19:39:13 -05:00
if ( waitTypeFuncs [ cur - > nt . waitType ] . endFunc ! = NULL )
2013-10-05 21:05:29 -07:00
waitTypeFuncs [ cur - > nt . waitType ] . endFunc ( cur - > GetUID ( ) , cur - > currentCallbackId ) ;
else
ERROR_LOG_REPORT ( HLE , " Missing begin/restore funcs for wait type %d " , cur - > nt . waitType ) ;
}
2013-03-27 00:51:46 -07:00
}
2012-11-07 15:44:48 +01:00
// yeah! back in the real world, let's keep going. Should we process more callbacks?
2016-05-30 13:00:23 -07:00
if ( ! __KernelExecutePendingMipsCalls ( cur , call - > reschedAfter ) ) {
2012-12-08 20:09:20 -08:00
// Sometimes, we want to stay on the thread.
2012-12-27 17:43:44 -08:00
int threadReady = cur - > nt . status & ( THREADSTATUS_READY | THREADSTATUS_RUNNING ) ;
2012-12-09 21:10:55 -08:00
if ( call - > reschedAfter | | threadReady = = 0 )
__KernelReSchedule ( " return from callback " ) ;
2016-05-30 13:00:23 -07:00
// Now seems like a good time to clear out any pending deletes.
for ( SceUID delThread : pendingDeleteThreads ) {
2020-03-15 08:33:40 -07:00
kernelObjects . Destroy < PSPThread > ( delThread ) ;
2016-05-30 13:00:23 -07:00
}
pendingDeleteThreads . clear ( ) ;
2012-11-06 15:46:21 +01:00
}
2012-12-27 19:30:36 -08:00
2012-12-27 19:45:00 -08:00
delete call ;
2012-11-07 15:44:48 +01:00
}
2012-11-06 15:46:21 +01:00
2013-03-09 14:21:21 -08:00
// First arg must be current thread, passed to avoid perf cost of a lookup.
2020-03-15 08:33:40 -07:00
bool __KernelExecutePendingMipsCalls ( PSPThread * thread , bool reschedAfter ) {
2013-09-07 22:02:55 +02:00
_dbg_assert_msg_ ( SCEKERNEL , thread - > GetUID ( ) = = __KernelGetCurThread ( ) , " __KernelExecutePendingMipsCalls() should be called only with the current thread. " ) ;
2012-11-06 15:46:21 +01:00
2012-11-07 15:44:48 +01:00
if ( thread - > pendingMipsCalls . empty ( ) ) {
// Nothing to do
return false ;
}
if ( __CanExecuteCallbackNow ( thread ) )
2012-11-06 15:46:21 +01:00
{
2012-11-07 15:44:48 +01:00
// Pop off the first pending mips call
2013-03-02 14:58:58 -08:00
u32 callId = thread - > pendingMipsCalls . front ( ) ;
2012-11-07 15:44:48 +01:00
thread - > pendingMipsCalls . pop_front ( ) ;
2016-05-28 16:59:19 -07:00
if ( __KernelExecuteMipsCallOnCurrentThread ( callId , reschedAfter ) ) {
return true ;
}
2012-11-06 15:46:21 +01:00
}
2012-11-07 15:44:48 +01:00
return false ;
}
2012-11-06 15:46:21 +01:00
2012-11-07 15:44:48 +01:00
// Executes the callback, when it next is context switched to.
2020-03-15 08:33:40 -07:00
static void __KernelRunCallbackOnThread ( SceUID cbId , PSPThread * thread , bool reschedAfter ) {
2012-11-07 15:44:48 +01:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPCallback * cb = kernelObjects . Get < PSPCallback > ( cbId , error ) ;
2012-11-07 15:44:48 +01:00
if ( ! cb ) {
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " __KernelRunCallbackOnThread: Bad cbId %i " , cbId ) ;
2012-11-07 15:44:48 +01:00
return ;
}
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " __KernelRunCallbackOnThread: Turning callback %i into pending mipscall " , cbId ) ;
2012-11-07 15:44:48 +01:00
// Alright, we're on the right thread
// Should save/restore wait state?
2013-02-02 18:01:34 -08:00
const u32 args [ ] = { ( u32 ) cb - > nc . notifyCount , ( u32 ) cb - > nc . notifyArg , cb - > nc . commonArgument } ;
2012-11-07 15:44:48 +01:00
// Clear the notify count / arg
cb - > nc . notifyCount = 0 ;
cb - > nc . notifyArg = 0 ;
2012-12-27 19:30:36 -08:00
ActionAfterCallback * action = ( ActionAfterCallback * ) __KernelCreateAction ( actionAfterCallback ) ;
if ( action ! = NULL )
action - > setCallback ( cbId ) ;
else
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " Something went wrong creating a restore action for a callback. " ) ;
2012-12-27 19:30:36 -08:00
2013-02-02 18:03:55 -08:00
__KernelCallAddress ( thread , cb - > nc . entrypoint , action , args , 3 , reschedAfter , cbId ) ;
2012-11-07 15:44:48 +01:00
}
2013-01-06 10:54:33 -08:00
void ActionAfterCallback : : run ( MipsCall & call ) {
2012-11-07 15:44:48 +01:00
if ( cbId ! = - 1 ) {
u32 error ;
2020-03-15 08:33:40 -07:00
PSPCallback * cb = kernelObjects . Get < PSPCallback > ( cbId , error ) ;
if ( cb ) {
PSPThread * t = kernelObjects . Get < PSPThread > ( cb - > nc . threadId , error ) ;
if ( t ) {
2013-01-27 18:43:12 -08:00
// Check for other callbacks to run (including ones this callback scheduled.)
__KernelCheckThreadCallbacks ( t , true ) ;
}
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " Left callback %i - %s " , cbId , cb - > nc . name ) ;
2012-11-07 15:44:48 +01:00
// Callbacks that don't return 0 are deleted. But should this be done here?
2020-03-15 08:33:40 -07:00
if ( currentMIPS - > r [ MIPS_REG_V0 ] ! = 0 ) {
2013-09-07 22:02:55 +02:00
DEBUG_LOG ( SCEKERNEL , " ActionAfterCallback::run(): Callback returned non-zero, gets deleted! " ) ;
2020-03-15 08:33:40 -07:00
kernelObjects . Destroy < PSPCallback > ( cbId ) ;
2012-11-07 15:44:48 +01:00
}
}
}
2012-11-06 15:46:21 +01:00
}
2013-03-29 00:27:33 -07:00
bool __KernelCurHasReadyCallbacks ( ) {
2013-09-08 18:42:40 -07:00
if ( readyCallbacksCount = = 0 ) {
2013-03-29 00:27:33 -07:00
return false ;
2013-09-08 18:42:40 -07:00
}
2013-03-29 00:27:33 -07:00
2020-03-15 08:33:40 -07:00
PSPThread * thread = __GetCurrentThread ( ) ;
2013-09-08 18:42:40 -07:00
u32 error ;
for ( auto it = thread - > callbacks . begin ( ) , end = thread - > callbacks . end ( ) ; it ! = end ; + + it ) {
2020-03-15 08:33:40 -07:00
PSPCallback * callback = kernelObjects . Get < PSPCallback > ( * it , error ) ;
2013-09-08 18:42:40 -07:00
if ( callback & & callback - > nc . notifyCount ! = 0 ) {
return true ;
}
2013-03-29 00:27:33 -07:00
}
return false ;
}
2012-11-06 15:46:21 +01:00
// Check callbacks on the current thread only.
// Returns true if any callbacks were processed on the current thread.
2020-03-15 08:33:40 -07:00
bool __KernelCheckThreadCallbacks ( PSPThread * thread , bool force ) {
2013-09-08 18:42:40 -07:00
if ( ! thread | | ( ! thread - > isProcessingCallbacks & & ! force ) ) {
2012-11-06 15:46:21 +01:00
return false ;
2013-09-08 18:42:40 -07:00
}
2012-11-06 15:46:21 +01:00
2013-09-08 18:42:40 -07:00
if ( ! thread - > callbacks . empty ( ) ) {
u32 error ;
for ( auto it = thread - > callbacks . begin ( ) , end = thread - > callbacks . end ( ) ; it ! = end ; + + it ) {
2020-03-15 08:33:40 -07:00
PSPCallback * callback = kernelObjects . Get < PSPCallback > ( * it , error ) ;
2013-09-08 18:42:40 -07:00
if ( callback & & callback - > nc . notifyCount ! = 0 ) {
__KernelRunCallbackOnThread ( callback - > GetUID ( ) , thread , ! force ) ;
readyCallbacksCount - - ;
return true ;
}
2012-11-06 15:46:21 +01:00
}
}
return false ;
}
// Checks for callbacks on all threads
bool __KernelCheckCallbacks ( ) {
2013-01-27 17:01:17 -08:00
// Let's not check every thread all the time, callbacks are fairly uncommon.
if ( readyCallbacksCount = = 0 ) {
return false ;
}
if ( readyCallbacksCount < 0 ) {
2013-09-07 22:02:55 +02:00
ERROR_LOG_REPORT ( SCEKERNEL , " readyCallbacksCount became negative: %i " , readyCallbacksCount ) ;
2013-01-27 17:01:17 -08:00
}
2014-06-28 16:17:52 -07:00
if ( __IsInInterrupt ( ) | | ! __KernelIsDispatchEnabled ( ) | | __KernelInCallback ( ) ) {
// TODO: Technically, other callbacks can run when a thread within a callback is waiting.
// However, callbacks that were pending before the current callback started won't be run.
// This is pretty uncommon, and not yet handled correctly.
return false ;
}
2013-01-27 17:01:17 -08:00
2013-09-08 18:42:40 -07:00
bool processed = false ;
2012-11-06 15:46:21 +01:00
2013-09-08 18:42:40 -07:00
u32 error ;
2014-12-24 22:20:21 -05:00
for ( auto iter = threadqueue . begin ( ) ; iter ! = threadqueue . end ( ) ; + + iter ) {
2020-03-15 08:33:40 -07:00
PSPThread * thread = kernelObjects . Get < PSPThread > ( * iter , error ) ;
2013-09-08 18:42:40 -07:00
if ( thread & & __KernelCheckThreadCallbacks ( thread , false ) ) {
processed = true ;
2012-11-06 15:46:21 +01:00
}
2013-09-08 18:42:40 -07:00
}
2012-12-08 18:40:20 -08:00
2013-09-08 18:42:40 -07:00
if ( processed ) {
2013-03-09 14:21:21 -08:00
return __KernelExecutePendingMipsCalls ( __GetCurrentThread ( ) , true ) ;
2013-09-08 18:42:40 -07:00
}
return false ;
2012-11-06 15:46:21 +01:00
}
2012-12-01 18:43:45 -08:00
bool __KernelForceCallbacks ( )
{
2013-03-09 14:11:53 -08:00
// Let's not check every thread all the time, callbacks are fairly uncommon.
if ( readyCallbacksCount = = 0 ) {
return false ;
}
if ( readyCallbacksCount < 0 ) {
2013-09-07 22:02:55 +02:00
ERROR_LOG_REPORT ( SCEKERNEL , " readyCallbacksCount became negative: %i " , readyCallbacksCount ) ;
2013-03-09 14:11:53 -08:00
}
2020-03-15 08:33:40 -07:00
PSPThread * curThread = __GetCurrentThread ( ) ;
2012-11-06 15:46:21 +01:00
2012-12-08 15:28:54 -08:00
bool callbacksProcessed = __KernelCheckThreadCallbacks ( curThread , true ) ;
2012-12-08 18:40:20 -08:00
if ( callbacksProcessed )
2013-03-09 14:21:21 -08:00
__KernelExecutePendingMipsCalls ( curThread , false ) ;
2012-11-06 15:46:21 +01:00
2012-12-01 18:43:45 -08:00
return callbacksProcessed ;
}
2013-04-14 23:45:46 -07:00
// Not wrapped because it has special return logic.
2012-12-08 18:40:20 -08:00
void sceKernelCheckCallback ( )
{
// Start with yes.
RETURN ( 1 ) ;
2012-11-06 15:46:21 +01:00
2012-12-01 18:43:45 -08:00
bool callbacksProcessed = __KernelForceCallbacks ( ) ;
2012-11-06 15:46:21 +01:00
if ( callbacksProcessed ) {
2013-10-21 23:03:11 +02:00
DEBUG_LOG ( SCEKERNEL , " sceKernelCheckCallback() - processed a callback. " ) ;
// The RETURN(1) above is still active here, unless __KernelForceCallbacks changed it.
2012-11-06 15:46:21 +01:00
} else {
2012-11-07 15:44:48 +01:00
RETURN ( 0 ) ;
2012-11-06 15:46:21 +01:00
}
2013-05-28 01:34:00 -07:00
hleEatCycles ( 230 ) ;
2012-11-06 15:46:21 +01:00
}
bool __KernelInCallback ( )
{
return ( g_inCbCount ! = 0 ) ;
}
2013-09-02 01:25:41 -07:00
void __KernelNotifyCallback ( SceUID cbId , int notifyArg )
2012-11-06 15:46:21 +01:00
{
u32 error ;
2020-03-15 08:33:40 -07:00
PSPCallback * cb = kernelObjects . Get < PSPCallback > ( cbId , error ) ;
2012-11-10 10:15:11 +01:00
if ( ! cb ) {
// Yeah, we're screwed, this shouldn't happen.
2013-09-07 22:02:55 +02:00
ERROR_LOG ( SCEKERNEL , " __KernelNotifyCallback - invalid callback %08x " , cbId ) ;
2012-11-10 10:15:11 +01:00
return ;
}
2013-09-08 18:42:40 -07:00
if ( cb - > nc . notifyCount = = 0 ) {
2013-01-27 17:01:17 -08:00
readyCallbacksCount + + ;
}
2013-09-08 18:42:40 -07:00
cb - > nc . notifyCount + + ;
cb - > nc . notifyArg = notifyArg ;
2012-11-06 15:46:21 +01:00
}
2013-03-27 00:51:46 -07:00
void __KernelRegisterWaitTypeFuncs ( WaitType type , WaitBeginCallbackFunc beginFunc , WaitEndCallbackFunc endFunc )
{
waitTypeFuncs [ type ] . beginFunc = beginFunc ;
waitTypeFuncs [ type ] . endFunc = endFunc ;
}
2018-05-08 17:23:14 -07:00
std : : vector < DebugThreadInfo > GetThreadsInfo ( ) {
std : : lock_guard < std : : mutex > guard ( threadqueueLock ) ;
2013-02-10 16:36:06 +01:00
std : : vector < DebugThreadInfo > threadList ;
u32 error ;
2018-05-09 17:51:27 -07:00
for ( const auto uid : threadqueue ) {
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( uid , error ) ;
2013-02-10 16:36:06 +01:00
if ( ! t )
continue ;
DebugThreadInfo info ;
2018-05-09 17:51:27 -07:00
info . id = uid ;
2013-02-10 16:36:06 +01:00
strncpy ( info . name , t - > GetName ( ) , KERNELOBJECT_MAX_NAME_LENGTH ) ;
2013-02-24 10:23:31 -08:00
info . name [ KERNELOBJECT_MAX_NAME_LENGTH ] = 0 ;
2013-02-10 16:36:06 +01:00
info . status = t - > nt . status ;
info . entrypoint = t - > nt . entrypoint ;
2013-08-12 01:54:14 -07:00
info . initialStack = t - > nt . initialStack ;
info . stackSize = ( u32 ) t - > nt . stackSize ;
2013-07-06 21:40:41 +02:00
info . priority = t - > nt . currentPriority ;
2013-08-29 11:48:03 +02:00
info . waitType = ( WaitType ) ( u32 ) t - > nt . waitType ;
2018-05-09 17:51:27 -07:00
info . isCurrent = uid = = currentThread ;
if ( info . isCurrent )
2013-02-17 13:10:40 +01:00
info . curPC = currentMIPS - > pc ;
else
info . curPC = t - > context . pc ;
2013-02-10 16:36:06 +01:00
threadList . push_back ( info ) ;
}
return threadList ;
}
2018-05-09 17:51:27 -07:00
DebugInterface * KernelDebugThread ( SceUID threadID ) {
if ( threadID = = currentThread ) {
return currentDebugMIPS ;
}
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadID , error ) ;
2018-05-09 17:51:27 -07:00
if ( t ) {
return & t - > debug ;
}
return nullptr ;
}
2020-03-15 08:33:40 -07:00
void __KernelChangeThreadState ( SceUID threadId , ThreadStatus newStatus ) {
2013-02-10 16:36:06 +01:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPThread * t = kernelObjects . Get < PSPThread > ( threadId , error ) ;
2013-02-10 16:36:06 +01:00
if ( ! t )
return ;
__KernelChangeThreadState ( t , newStatus ) ;
}
2013-09-29 01:10:07 -07:00
int sceKernelRegisterExitCallback ( SceUID cbId )
{
u32 error ;
2020-03-15 08:33:40 -07:00
PSPCallback * cb = kernelObjects . Get < PSPCallback > ( cbId , error ) ;
2013-09-29 01:10:07 -07:00
if ( ! cb )
{
WARN_LOG ( SCEKERNEL , " sceKernelRegisterExitCallback(%i): invalid callback id " , cbId ) ;
if ( sceKernelGetCompiledSdkVersion ( ) > = 0x3090500 )
return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT ;
return 0 ;
}
DEBUG_LOG ( SCEKERNEL , " sceKernelRegisterExitCallback(%i) " , cbId ) ;
registeredExitCbId = cbId ;
return 0 ;
}
2013-09-29 00:19:54 -07:00
int LoadExecForUser_362A956B ( )
2013-05-04 20:34:33 +09:00
{
2013-09-29 00:19:54 -07:00
WARN_LOG_REPORT ( SCEKERNEL , " LoadExecForUser_362A956B() " ) ;
2013-05-04 20:34:33 +09:00
u32 error ;
2020-03-15 08:33:40 -07:00
PSPCallback * cb = kernelObjects . Get < PSPCallback > ( registeredExitCbId , error ) ;
2013-05-04 20:34:33 +09:00
if ( ! cb ) {
2013-09-07 22:02:55 +02:00
WARN_LOG ( SCEKERNEL , " LoadExecForUser_362A956B() : registeredExitCbId not found 0x%x " , registeredExitCbId ) ;
2013-05-04 20:34:33 +09:00
return SCE_KERNEL_ERROR_UNKNOWN_CBID ;
}
int cbArg = cb - > nc . commonArgument ;
if ( ! Memory : : IsValidAddress ( cbArg ) ) {
2013-09-07 22:02:55 +02:00
WARN_LOG ( SCEKERNEL , " LoadExecForUser_362A956B() : invalid address for cbArg (0x%08X) " , cbArg ) ;
2013-05-04 20:34:33 +09:00
return SCE_KERNEL_ERROR_ILLEGAL_ADDR ;
}
2013-09-29 01:10:07 -07:00
u32 unknown1 = Memory : : Read_U32 ( cbArg - 8 ) ;
if ( unknown1 > = 4 ) {
2013-09-07 22:02:55 +02:00
WARN_LOG ( SCEKERNEL , " LoadExecForUser_362A956B() : invalid value unknown1 (0x%08X) " , unknown1 ) ;
2013-05-04 20:34:33 +09:00
return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT ;
}
2013-09-29 01:10:07 -07:00
u32 parameterArea = Memory : : Read_U32 ( cbArg - 4 ) ;
2013-05-04 20:34:33 +09:00
if ( ! Memory : : IsValidAddress ( parameterArea ) ) {
2013-09-07 22:02:55 +02:00
WARN_LOG ( SCEKERNEL , " LoadExecForUser_362A956B() : invalid address for parameterArea on userMemory (0x%08X) " , parameterArea ) ;
2013-05-04 20:34:33 +09:00
return SCE_KERNEL_ERROR_ILLEGAL_ADDR ;
}
2013-09-29 01:10:07 -07:00
u32 size = Memory : : Read_U32 ( parameterArea ) ;
2013-05-04 20:34:33 +09:00
if ( size < 12 ) {
2013-09-07 22:02:55 +02:00
WARN_LOG ( SCEKERNEL , " LoadExecForUser_362A956B() : invalid parameterArea size %d " , size ) ;
2013-05-04 20:34:33 +09:00
return SCE_KERNEL_ERROR_ILLEGAL_SIZE ;
}
Memory : : Write_U32 ( 0 , parameterArea + 4 ) ;
Memory : : Write_U32 ( - 1 , parameterArea + 8 ) ;
return 0 ;
}
2014-01-04 22:41:37 -08:00
static const SceUID SCE_TE_THREADID_ALL_USER = 0xFFFFFFF0 ;
struct NativeThreadEventHandler {
u32 size ;
char name [ KERNELOBJECT_MAX_NAME_LENGTH + 1 ] ;
SceUID threadID ;
u32 mask ;
u32 handlerPtr ;
u32 commonArg ;
} ;
struct ThreadEventHandler : public KernelObject {
const char * GetName ( ) { return nteh . name ; }
const char * GetTypeName ( ) { return " ThreadEventHandler " ; }
static u32 GetMissingErrorCode ( ) { return SCE_KERNEL_ERROR_UNKNOWN_TEID ; }
static int GetStaticIDType ( ) { return SCE_KERNEL_TMID_ThreadEventHandler ; }
int GetIDType ( ) const { return SCE_KERNEL_TMID_ThreadEventHandler ; }
virtual void DoState ( PointerWrap & p ) {
auto s = p . Section ( " ThreadEventHandler " , 1 ) ;
if ( ! s )
return ;
p . Do ( nteh ) ;
}
NativeThreadEventHandler nteh ;
} ;
KernelObject * __KernelThreadEventHandlerObject ( ) {
// Default object to load from state.
return new ThreadEventHandler ;
}
2016-05-30 13:00:23 -07:00
bool __KernelThreadTriggerEvent ( const ThreadEventHandlerList & handlers , SceUID threadID , ThreadEventType type ) {
2020-03-15 08:33:40 -07:00
PSPThread * thread = __GetCurrentThread ( ) ;
2016-05-29 00:12:36 -07:00
if ( ! thread | | thread - > isStopped ( ) ) {
SceUID nextThreadID = threadReadyQueue . peek_first ( ) ;
2020-03-15 08:33:40 -07:00
thread = kernelObjects . GetFast < PSPThread > ( nextThreadID ) ;
2016-05-28 21:14:19 -07:00
}
2016-05-30 13:00:23 -07:00
bool hadHandlers = false ;
2014-01-05 20:20:56 -08:00
for ( auto it = handlers . begin ( ) , end = handlers . end ( ) ; it ! = end ; + + it ) {
u32 error ;
const auto teh = kernelObjects . Get < ThreadEventHandler > ( * it , error ) ;
if ( ! teh | | ( teh - > nteh . mask & type ) = = 0 ) {
continue ;
}
const u32 args [ ] = { ( u32 ) type , ( u32 ) threadID , teh - > nteh . commonArg } ;
2016-05-28 21:14:19 -07:00
__KernelCallAddress ( thread , teh - > nteh . handlerPtr , nullptr , args , ARRAY_SIZE ( args ) , true , 0 ) ;
2016-05-30 13:00:23 -07:00
hadHandlers = true ;
2014-01-05 20:20:56 -08:00
}
2016-05-30 13:00:23 -07:00
return hadHandlers ;
2014-01-05 20:20:56 -08:00
}
2016-05-30 13:00:23 -07:00
bool __KernelThreadTriggerEvent ( bool isKernel , SceUID threadID , ThreadEventType type ) {
bool hadExactHandlers = false ;
2014-01-05 20:20:56 -08:00
auto exactHandlers = threadEventHandlers . find ( threadID ) ;
if ( exactHandlers ! = threadEventHandlers . end ( ) ) {
2016-05-30 13:00:23 -07:00
hadExactHandlers = __KernelThreadTriggerEvent ( exactHandlers - > second , threadID , type ) ;
2014-01-05 20:20:56 -08:00
}
2016-05-30 13:00:23 -07:00
bool hadKindHandlers = false ;
2014-01-05 20:20:56 -08:00
if ( isKernel ) {
auto kernelHandlers = threadEventHandlers . find ( SCE_TE_THREADID_ALL_USER ) ;
if ( kernelHandlers ! = threadEventHandlers . end ( ) ) {
2016-05-30 13:00:23 -07:00
hadKindHandlers = __KernelThreadTriggerEvent ( kernelHandlers - > second , threadID , type ) ;
2014-01-05 20:20:56 -08:00
}
} else {
auto userHandlers = threadEventHandlers . find ( SCE_TE_THREADID_ALL_USER ) ;
if ( userHandlers ! = threadEventHandlers . end ( ) ) {
2016-05-30 13:00:23 -07:00
hadKindHandlers = __KernelThreadTriggerEvent ( userHandlers - > second , threadID , type ) ;
2014-01-05 20:20:56 -08:00
}
}
2016-05-30 13:00:23 -07:00
return hadKindHandlers | | hadExactHandlers ;
2014-01-05 20:20:56 -08:00
}
2014-01-04 22:41:37 -08:00
SceUID sceKernelRegisterThreadEventHandler ( const char * name , SceUID threadID , u32 mask , u32 handlerPtr , u32 commonArg ) {
if ( ! name ) {
return hleReportError ( SCEKERNEL , SCE_KERNEL_ERROR_ERROR , " invalid name " ) ;
}
if ( threadID = = 0 ) {
2016-05-28 16:17:33 -07:00
// "atexit"?
if ( mask ! = THREADEVENT_EXIT ) {
return hleReportError ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_ATTR , " invalid thread id " ) ;
}
2014-01-04 22:41:37 -08:00
}
u32 error ;
2020-03-15 08:33:40 -07:00
if ( kernelObjects . Get < PSPThread > ( threadID , error ) = = NULL & & threadID ! = SCE_TE_THREADID_ALL_USER ) {
2014-01-04 22:41:37 -08:00
return hleReportError ( SCEKERNEL , error , " bad thread id " ) ;
}
2014-01-05 20:20:56 -08:00
if ( ( mask & ~ THREADEVENT_SUPPORTED ) ! = 0 ) {
2014-01-04 22:41:37 -08:00
return hleReportError ( SCEKERNEL , SCE_KERNEL_ERROR_ILLEGAL_MASK , " invalid event mask " ) ;
}
auto teh = new ThreadEventHandler ;
teh - > nteh . size = sizeof ( teh - > nteh ) ;
strncpy ( teh - > nteh . name , name , KERNELOBJECT_MAX_NAME_LENGTH ) ;
teh - > nteh . name [ KERNELOBJECT_MAX_NAME_LENGTH ] = ' \0 ' ;
teh - > nteh . threadID = threadID ;
teh - > nteh . mask = mask ;
teh - > nteh . handlerPtr = handlerPtr ;
teh - > nteh . commonArg = commonArg ;
SceUID uid = kernelObjects . Create ( teh ) ;
2014-01-05 20:20:56 -08:00
threadEventHandlers [ threadID ] . push_back ( uid ) ;
2014-01-04 22:41:37 -08:00
return hleLogSuccessI ( SCEKERNEL , uid ) ;
}
int sceKernelReleaseThreadEventHandler ( SceUID uid ) {
2014-01-05 20:20:56 -08:00
u32 error ;
auto teh = kernelObjects . Get < ThreadEventHandler > ( uid , error ) ;
if ( ! teh ) {
return hleReportError ( SCEKERNEL , error , " bad handler id " ) ;
}
auto & handlers = threadEventHandlers [ teh - > nteh . threadID ] ;
handlers . erase ( std : : remove ( handlers . begin ( ) , handlers . end ( ) , uid ) , handlers . end ( ) ) ;
2014-01-04 22:41:37 -08:00
return hleLogSuccessI ( SCEKERNEL , kernelObjects . Destroy < ThreadEventHandler > ( uid ) ) ;
}
int sceKernelReferThreadEventHandlerStatus ( SceUID uid , u32 infoPtr ) {
u32 error ;
auto teh = kernelObjects . Get < ThreadEventHandler > ( uid , error ) ;
if ( ! teh ) {
return hleReportError ( SCEKERNEL , error , " bad handler id " ) ;
}
if ( Memory : : IsValidAddress ( infoPtr ) & & Memory : : Read_U32 ( infoPtr ) ! = 0 ) {
Memory : : WriteStruct ( infoPtr , & teh - > nteh ) ;
return hleLogSuccessI ( SCEKERNEL , 0 ) ;
} else {
return hleLogDebug ( SCEKERNEL , 0 , " struct size was 0 " ) ;
}
}