2012-11-01 16:19:01 +01:00
// Copyright (c) 2012- PPSSPP Project.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
2012-11-04 23:01:49 +01:00
// the Free Software Foundation, version 2.0 or later versions.
2012-11-01 16:19:01 +01:00
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License 2.0 for more details.
// A copy of the GPL 2.0 should have been included with the program.
// If not, see http://www.gnu.org/licenses/
// Official git repository and contact information can be found at
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
2012-11-06 15:46:21 +01:00
# include <set>
2012-11-08 18:02:33 +01:00
# include <map>
2012-11-06 15:46:21 +01:00
# include <queue>
2013-01-28 00:48:59 -08:00
# include <algorithm>
2012-11-06 15:46:21 +01:00
2013-03-10 22:25:03 -07:00
# include "Common/LogManager.h"
2012-11-01 16:19:01 +01:00
# include "HLE.h"
# include "HLETables.h"
# include "../MIPS/MIPSInt.h"
# include "../MIPS/MIPSCodeUtils.h"
# include "../MIPS/MIPS.h"
2013-03-24 20:52:18 -07:00
# include "Core/CoreTiming.h"
# include "Core/MemMap.h"
# include "Core/Reporting.h"
2013-02-03 22:10:06 -08:00
# include "ChunkFile.h"
2012-11-01 16:19:01 +01:00
# include "sceAudio.h"
# include "sceKernel.h"
# include "sceKernelMemory.h"
# include "sceKernelThread.h"
# include "sceKernelModule.h"
# include "sceKernelInterrupt.h"
enum
{
2013-04-27 23:35:36 -07:00
PSP_THREAD_ATTR_KERNEL = 0x00001000 ,
PSP_THREAD_ATTR_VFPU = 0x00004000 ,
PSP_THREAD_ATTR_SCRATCH_SRAM = 0x00008000 , // Save/restore scratch as part of context???
PSP_THREAD_ATTR_NO_FILLSTACK = 0x00100000 , // TODO: No filling of 0xff (only with PSP_THREAD_ATTR_LOW_STACK?)
PSP_THREAD_ATTR_CLEAR_STACK = 0x00200000 , // TODO: Clear thread stack when deleted
PSP_THREAD_ATTR_LOW_STACK = 0x00400000 , // TODO: Allocate stack from bottom not top.
PSP_THREAD_ATTR_USER = 0x80000000 ,
PSP_THREAD_ATTR_USBWLAN = 0xa0000000 ,
PSP_THREAD_ATTR_VSH = 0xc0000000 ,
2012-11-01 16:19:01 +01:00
} ;
2012-11-07 15:44:48 +01:00
struct NativeCallback
{
SceUInt size ;
char name [ 32 ] ;
SceUID threadId ;
u32 entrypoint ;
u32 commonArgument ;
int notifyCount ;
int notifyArg ;
} ;
class Callback : public KernelObject
{
public :
const char * GetName ( ) { return nc . name ; }
const char * GetTypeName ( ) { return " CallBack " ; }
void GetQuickInfo ( char * ptr , int size )
{
sprintf ( ptr , " thread=%i, argument= %08x " ,
//hackAddress,
nc . threadId ,
nc . commonArgument ) ;
}
~ Callback ( )
{
}
static u32 GetMissingErrorCode ( ) { return SCE_KERNEL_ERROR_UNKNOWN_CBID ; }
int GetIDType ( ) const { return SCE_KERNEL_TMID_Callback ; }
2012-12-26 22:45:19 -08:00
virtual void DoState ( PointerWrap & p )
{
p . Do ( nc ) ;
p . Do ( savedPC ) ;
p . Do ( savedRA ) ;
p . Do ( savedV0 ) ;
p . Do ( savedV1 ) ;
p . Do ( savedIdRegister ) ;
p . DoMarker ( " Callback " ) ;
}
2012-11-07 15:44:48 +01:00
NativeCallback nc ;
u32 savedPC ;
u32 savedRA ;
u32 savedV0 ;
u32 savedV1 ;
u32 savedIdRegister ;
} ;
2012-11-06 15:46:21 +01:00
// Real PSP struct, don't change the fields
2012-11-01 16:19:01 +01:00
struct NativeThread
{
u32 nativeSize ;
char name [ KERNELOBJECT_MAX_NAME_LENGTH + 1 ] ;
// Threading stuff
u32 attr ;
u32 status ;
u32 entrypoint ;
u32 initialStack ;
u32 stackSize ;
u32 gpreg ;
int initialPriority ;
int currentPriority ;
WaitType waitType ;
SceUID waitID ;
int wakeupCount ;
int exitStatus ;
SceKernelSysClock runForClocks ;
int numInterruptPreempts ;
int numThreadPreempts ;
int numReleases ;
} ;
2012-11-07 15:44:48 +01:00
struct ThreadWaitInfo {
u32 waitValue ;
2012-11-18 17:54:55 -08:00
u32 timeoutPtr ;
2012-11-07 15:44:48 +01:00
} ;
2012-12-23 21:49:44 -08:00
// Owns outstanding MIPS calls and provides a way to get them by ID.
class MipsCallManager {
public :
MipsCallManager ( ) : idGen_ ( 0 ) { }
2013-03-02 14:58:58 -08:00
u32 add ( MipsCall * call ) {
u32 id = genId ( ) ;
2012-12-23 21:49:44 -08:00
calls_ . insert ( std : : pair < int , MipsCall * > ( id , call ) ) ;
return id ;
}
2013-03-02 14:58:58 -08:00
MipsCall * get ( u32 id ) {
2013-03-31 12:09:59 -07:00
auto iter = calls_ . find ( id ) ;
if ( iter = = calls_ . end ( ) )
return NULL ;
return iter - > second ;
2012-12-23 21:49:44 -08:00
}
2013-03-02 14:58:58 -08:00
MipsCall * pop ( u32 id ) {
2012-12-23 21:49:44 -08:00
MipsCall * temp = calls_ [ id ] ;
calls_ . erase ( id ) ;
return temp ;
}
void clear ( ) {
2013-03-02 14:58:58 -08:00
for ( auto it = calls_ . begin ( ) , end = calls_ . end ( ) ; it ! = end ; + + it ) {
2012-12-27 19:45:00 -08:00
delete it - > second ;
}
2012-12-23 21:49:44 -08:00
calls_ . clear ( ) ;
idGen_ = 0 ;
}
2012-12-27 19:45:00 -08:00
int registerActionType ( ActionCreator creator ) {
2012-12-27 19:30:36 -08:00
types_ . push_back ( creator ) ;
2013-01-19 13:48:20 -08:00
return ( int ) types_ . size ( ) - 1 ;
2012-12-27 19:30:36 -08:00
}
2012-12-27 19:45:00 -08:00
void restoreActionType ( int actionType , ActionCreator creator ) {
2012-12-27 19:30:36 -08:00
if ( actionType > = ( int ) types_ . size ( ) )
types_ . resize ( actionType + 1 , NULL ) ;
types_ [ actionType ] = creator ;
}
2012-12-27 19:45:00 -08:00
Action * createActionByType ( int actionType ) {
2012-12-27 19:30:36 -08:00
if ( actionType < ( int ) types_ . size ( ) & & types_ [ actionType ] ! = NULL ) {
Action * a = types_ [ actionType ] ( ) ;
a - > actionTypeID = actionType ;
return a ;
}
return NULL ;
}
void DoState ( PointerWrap & p ) {
2013-02-04 01:31:02 -08:00
p . Do ( calls_ ) ;
2012-12-27 19:45:00 -08:00
p . Do ( idGen_ ) ;
2012-12-27 19:30:36 -08:00
p . DoMarker ( " MipsCallManager " ) ;
}
2012-12-23 21:49:44 -08:00
private :
2013-03-02 14:58:58 -08:00
u32 genId ( ) { return + + idGen_ ; }
std : : map < u32 , MipsCall * > calls_ ;
2012-12-27 19:30:36 -08:00
std : : vector < ActionCreator > types_ ;
2013-03-02 14:58:58 -08:00
u32 idGen_ ;
2012-12-23 21:49:44 -08:00
} ;
class ActionAfterMipsCall : public Action
{
2013-02-02 18:46:23 -08:00
ActionAfterMipsCall ( )
{
chainedAction = NULL ;
}
2012-12-23 21:49:44 -08:00
public :
2013-01-06 10:54:33 -08:00
virtual void run ( MipsCall & call ) ;
2012-12-27 19:30:36 -08:00
static Action * Create ( )
{
2013-02-02 18:46:23 -08:00
return new ActionAfterMipsCall ( ) ;
2012-12-27 19:30:36 -08:00
}
virtual void DoState ( PointerWrap & p )
{
p . Do ( threadID ) ;
p . Do ( status ) ;
p . Do ( waitType ) ;
p . Do ( waitID ) ;
p . Do ( waitInfo ) ;
p . Do ( isProcessingCallbacks ) ;
2013-03-27 00:51:46 -07:00
p . Do ( currentCallbackId ) ;
2012-12-27 19:30:36 -08:00
p . DoMarker ( " ActionAfterMipsCall " ) ;
int chainedActionType = 0 ;
if ( chainedAction ! = NULL )
chainedActionType = chainedAction - > actionTypeID ;
p . Do ( chainedActionType ) ;
if ( chainedActionType ! = 0 )
{
if ( p . mode = = p . MODE_READ )
chainedAction = __KernelCreateAction ( chainedActionType ) ;
chainedAction - > DoState ( p ) ;
}
}
SceUID threadID ;
2012-12-23 21:49:44 -08:00
// Saved thread state
int status ;
WaitType waitType ;
int waitID ;
ThreadWaitInfo waitInfo ;
bool isProcessingCallbacks ;
2013-03-27 00:51:46 -07:00
SceUID currentCallbackId ;
2012-12-23 21:49:44 -08:00
Action * chainedAction ;
} ;
2012-12-09 16:56:16 -08:00
2012-12-27 19:30:36 -08:00
class ActionAfterCallback : public Action
{
public :
ActionAfterCallback ( ) { }
2013-01-06 10:54:33 -08:00
virtual void run ( MipsCall & call ) ;
2012-12-27 19:30:36 -08:00
static Action * Create ( )
{
return new ActionAfterCallback ;
}
void setCallback ( SceUID cbId_ )
{
cbId = cbId_ ;
}
void DoState ( PointerWrap & p )
{
p . Do ( cbId ) ;
p . DoMarker ( " ActionAfterCallback " ) ;
}
SceUID cbId ;
} ;
2012-12-09 16:56:16 -08:00
2012-11-01 16:19:01 +01:00
class Thread : public KernelObject
{
public :
const char * GetName ( ) { return nt . name ; }
const char * GetTypeName ( ) { return " Thread " ; }
void GetQuickInfo ( char * ptr , int size )
{
sprintf ( ptr , " pc= %08x sp= %08x %s %s %s %s %s %s (wt=%i wid=%i wv= %08x ) " ,
context . pc , context . r [ MIPS_REG_SP ] ,
( nt . status & THREADSTATUS_RUNNING ) ? " RUN " : " " ,
( nt . status & THREADSTATUS_READY ) ? " READY " : " " ,
( nt . status & THREADSTATUS_WAIT ) ? " WAIT " : " " ,
( nt . status & THREADSTATUS_SUSPEND ) ? " SUSPEND " : " " ,
2012-12-06 22:27:52 -08:00
( nt . status & THREADSTATUS_DORMANT ) ? " DORMANT " : " " ,
2012-11-01 16:19:01 +01:00
( nt . status & THREADSTATUS_DEAD ) ? " DEAD " : " " ,
nt . waitType ,
nt . waitID ,
2012-11-07 15:44:48 +01:00
waitInfo . waitValue ) ;
2012-11-01 16:19:01 +01:00
}
2012-12-23 11:16:32 +01:00
2012-11-06 15:46:21 +01:00
static u32 GetMissingErrorCode ( ) { return SCE_KERNEL_ERROR_UNKNOWN_THID ; }
2012-12-23 11:16:32 +01:00
2012-11-06 15:46:21 +01:00
int GetIDType ( ) const { return SCE_KERNEL_TMID_Thread ; }
bool AllocateStack ( u32 & stackSize )
2012-11-01 16:19:01 +01:00
{
2012-12-06 22:27:52 -08:00
FreeStack ( ) ;
if ( nt . attr & PSP_THREAD_ATTR_KERNEL )
{
// Allocate stacks for kernel threads (idle) in kernel RAM
stackBlock = kernelMemory . Alloc ( stackSize , true , ( std : : string ( " stack/ " ) + nt . name ) . c_str ( ) ) ;
}
else
{
stackBlock = userMemory . Alloc ( stackSize , true , ( std : : string ( " stack/ " ) + nt . name ) . c_str ( ) ) ;
}
2012-12-30 23:22:30 -08:00
if ( stackBlock = = ( u32 ) - 1 )
2012-12-06 22:27:52 -08:00
{
stackBlock = 0 ;
ERROR_LOG ( HLE , " Failed to allocate stack for thread " ) ;
return false ;
}
2013-01-04 00:57:30 +01:00
nt . initialStack = stackBlock ;
2012-12-06 22:27:52 -08:00
nt . stackSize = stackSize ;
2013-04-27 13:58:59 -07:00
return true ;
}
bool FillStack ( ) {
// Fill the stack.
Memory : : Memset ( stackBlock , 0xFF , nt . stackSize ) ;
context . r [ MIPS_REG_SP ] = stackBlock + nt . stackSize ;
stackEnd = context . r [ MIPS_REG_SP ] ;
2013-04-27 23:35:36 -07:00
// The k0 section is 256 bytes at the top of the stack.
context . r [ MIPS_REG_SP ] - = 256 ;
context . r [ MIPS_REG_K0 ] = context . r [ MIPS_REG_SP ] ;
2013-01-04 17:06:36 +01:00
u32 k0 = context . r [ MIPS_REG_K0 ] ;
Memory : : Memset ( k0 , 0 , 0x100 ) ;
2013-04-27 23:35:36 -07:00
Memory : : Write_U32 ( GetUID ( ) , k0 + 0xc0 ) ;
Memory : : Write_U32 ( nt . initialStack , k0 + 0xc8 ) ;
2013-01-04 17:06:36 +01:00
Memory : : Write_U32 ( 0xffffffff , k0 + 0xf8 ) ;
Memory : : Write_U32 ( 0xffffffff , k0 + 0xfc ) ;
2013-04-27 23:35:36 -07:00
// After k0 comes the arguments, which is done by sceKernelStartThread().
2013-01-04 17:06:36 +01:00
Memory : : Write_U32 ( GetUID ( ) , nt . initialStack ) ;
2012-12-06 22:27:52 -08:00
return true ;
2012-11-01 16:19:01 +01:00
}
2012-11-06 15:46:21 +01:00
2012-11-09 00:03:46 +01:00
void FreeStack ( ) {
if ( stackBlock ! = 0 ) {
DEBUG_LOG ( HLE , " Freeing thread stack %s " , nt . name ) ;
if ( nt . attr & PSP_THREAD_ATTR_KERNEL ) {
kernelMemory . Free ( stackBlock ) ;
} else {
userMemory . Free ( stackBlock ) ;
}
stackBlock = 0 ;
}
}
2012-12-06 22:27:52 -08:00
Thread ( ) : stackBlock ( 0 )
{
}
2012-11-01 16:19:01 +01:00
~ Thread ( )
{
2012-11-09 00:03:46 +01:00
FreeStack ( ) ;
2012-11-01 16:19:01 +01:00
}
2012-12-09 16:56:16 -08:00
ActionAfterMipsCall * getRunningCallbackAction ( ) ;
2012-11-10 10:15:11 +01:00
void setReturnValue ( u32 retval ) ;
2013-03-10 10:59:59 -07:00
void setReturnValue ( u64 retval ) ;
2012-12-09 16:56:16 -08:00
void resumeFromWait ( ) ;
bool isWaitingFor ( WaitType type , int id ) ;
int getWaitID ( WaitType type ) ;
ThreadWaitInfo getWaitInfo ( ) ;
2012-11-10 10:15:11 +01:00
2012-11-07 15:44:48 +01:00
// Utils
2013-02-03 12:26:09 -08:00
inline bool isRunning ( ) const { return ( nt . status & THREADSTATUS_RUNNING ) ! = 0 ; }
inline bool isStopped ( ) const { return ( nt . status & THREADSTATUS_DORMANT ) ! = 0 ; }
inline bool isReady ( ) const { return ( nt . status & THREADSTATUS_READY ) ! = 0 ; }
inline bool isWaiting ( ) const { return ( nt . status & THREADSTATUS_WAIT ) ! = 0 ; }
inline bool isSuspended ( ) const { return ( nt . status & THREADSTATUS_SUSPEND ) ! = 0 ; }
2012-12-23 11:16:32 +01:00
2012-12-26 22:45:19 -08:00
virtual void DoState ( PointerWrap & p )
{
p . Do ( nt ) ;
p . Do ( waitInfo ) ;
p . Do ( moduleId ) ;
p . Do ( isProcessingCallbacks ) ;
2013-03-27 00:51:46 -07:00
p . Do ( currentMipscallId ) ;
2012-12-26 22:45:19 -08:00
p . Do ( currentCallbackId ) ;
p . Do ( context ) ;
u32 numCallbacks = THREAD_CALLBACK_NUM_TYPES ;
p . Do ( numCallbacks ) ;
if ( numCallbacks ! = THREAD_CALLBACK_NUM_TYPES )
2013-04-13 01:39:17 -07:00
{
p . SetError ( p . ERROR_FAILURE ) ;
ERROR_LOG ( HLE , " Unable to load state: different thread callback storage. " ) ;
return ;
}
2012-12-26 22:45:19 -08:00
for ( size_t i = 0 ; i < THREAD_CALLBACK_NUM_TYPES ; + + i )
{
p . Do ( registeredCallbacks [ i ] ) ;
p . Do ( readyCallbacks [ i ] ) ;
}
p . Do ( pendingMipsCalls ) ;
p . Do ( stackBlock ) ;
p . DoMarker ( " Thread " ) ;
}
2012-11-01 16:19:01 +01:00
NativeThread nt ;
2012-11-07 15:44:48 +01:00
ThreadWaitInfo waitInfo ;
2012-11-17 14:20:04 +01:00
SceUID moduleId ;
2012-11-01 16:19:01 +01:00
bool isProcessingCallbacks ;
2013-03-27 00:51:46 -07:00
u32 currentMipscallId ;
SceUID currentCallbackId ;
2012-11-01 16:19:01 +01:00
ThreadContext context ;
2012-11-06 15:46:21 +01:00
std : : set < SceUID > registeredCallbacks [ THREAD_CALLBACK_NUM_TYPES ] ;
std : : list < SceUID > readyCallbacks [ THREAD_CALLBACK_NUM_TYPES ] ;
2012-12-23 11:16:32 +01:00
2013-03-02 14:58:58 -08:00
std : : list < u32 > pendingMipsCalls ;
2012-11-01 16:19:01 +01:00
u32 stackBlock ;
2013-03-23 14:26:54 +01:00
u32 stackEnd ;
2012-11-01 16:19:01 +01:00
} ;
2013-03-23 23:54:46 -07:00
struct ThreadQueueList
2013-02-10 21:57:11 -08:00
{
2013-03-24 00:44:05 -07:00
// Number of queues (number of priority levels starting at 0.)
static const int NUM_QUEUES = 128 ;
2013-04-07 10:27:29 -07:00
// Initial number of threads a single queue can handle.
static const int INITIAL_CAPACITY = 32 ;
2013-03-24 00:44:05 -07:00
struct Queue
{
// Next ever-been-used queue (worse priority.)
Queue * next ;
// First valid item in data.
int first ;
// One after last valid item in data.
int end ;
2013-04-07 10:27:29 -07:00
// A too-large array with room on the front and end.
2013-03-24 00:44:05 -07:00
SceUID * data ;
2013-04-07 10:27:29 -07:00
// Size of data array.
int capacity ;
2013-03-24 00:44:05 -07:00
} ;
ThreadQueueList ( )
{
2013-04-07 10:27:29 -07:00
memset ( queues , 0 , sizeof ( queues ) ) ;
2013-03-24 00:44:05 -07:00
first = invalid ( ) ;
}
~ ThreadQueueList ( )
{
for ( int i = 0 ; i < NUM_QUEUES ; + + i )
{
if ( queues [ i ] . data ! = NULL )
2013-04-07 10:27:29 -07:00
free ( queues [ i ] . data ) ;
2013-03-24 00:44:05 -07:00
}
}
2013-02-10 21:57:11 -08:00
2013-03-23 23:54:46 -07:00
inline SceUID pop_first ( )
2013-02-10 21:57:11 -08:00
{
2013-03-24 00:44:05 -07:00
Queue * cur = first ;
while ( cur ! = invalid ( ) )
2013-03-23 23:54:46 -07:00
{
2013-03-24 00:44:05 -07:00
if ( cur - > end - cur - > first > 0 )
return cur - > data [ cur - > first + + ] ;
cur = cur - > next ;
2013-03-23 23:54:46 -07:00
}
2013-02-10 21:57:11 -08:00
2013-03-24 00:44:05 -07:00
_dbg_assert_msg_ ( HLE , false , " ThreadQueueList should not be empty. " ) ;
return 0 ;
2013-02-10 21:57:11 -08:00
}
2013-04-07 11:03:16 -07:00
inline SceUID pop_first_better ( u32 priority )
{
Queue * cur = first ;
Queue * stop = & queues [ priority ] ;
while ( cur < stop )
{
if ( cur - > end - cur - > first > 0 )
return cur - > data [ cur - > first + + ] ;
cur = cur - > next ;
}
return 0 ;
}
2013-03-23 23:54:46 -07:00
inline void push_front ( u32 priority , const SceUID threadID )
2013-02-10 21:57:11 -08:00
{
2013-03-24 00:44:05 -07:00
Queue * cur = & queues [ priority ] ;
cur - > data [ - - cur - > first ] = threadID ;
if ( cur - > first = = 0 )
rebalance ( priority ) ;
2013-02-10 21:57:11 -08:00
}
2013-03-23 23:54:46 -07:00
inline void push_back ( u32 priority , const SceUID threadID )
2013-02-10 21:57:11 -08:00
{
2013-03-24 00:44:05 -07:00
Queue * cur = & queues [ priority ] ;
cur - > data [ cur - > end + + ] = threadID ;
2013-04-07 10:27:29 -07:00
if ( cur - > end = = cur - > capacity )
2013-03-24 00:44:05 -07:00
rebalance ( priority ) ;
2013-02-10 21:57:11 -08:00
}
2013-03-23 23:54:46 -07:00
inline void remove ( u32 priority , const SceUID threadID )
2013-02-10 21:57:11 -08:00
{
2013-03-24 00:44:05 -07:00
Queue * cur = & queues [ priority ] ;
_dbg_assert_msg_ ( HLE , cur - > next ! = NULL , " ThreadQueueList::Queue should already be linked up. " ) ;
for ( int i = cur - > first ; i < cur - > end ; + + i )
{
if ( cur - > data [ i ] = = threadID )
{
int remaining = - - cur - > end - i ;
if ( remaining > 0 )
memmove ( & cur - > data [ i ] , & cur - > data [ i + 1 ] , remaining * sizeof ( SceUID ) ) ;
return ;
}
}
// Wasn't there.
2013-02-10 21:57:11 -08:00
}
2013-03-23 23:54:46 -07:00
inline void rotate ( u32 priority )
2013-02-10 21:57:11 -08:00
{
2013-03-24 00:44:05 -07:00
Queue * cur = & queues [ priority ] ;
_dbg_assert_msg_ ( HLE , cur - > next ! = NULL , " ThreadQueueList::Queue should already be linked up. " ) ;
if ( cur - > end - cur - > first > 1 )
2013-03-23 23:54:46 -07:00
{
2013-03-24 00:44:05 -07:00
cur - > data [ cur - > end + + ] = cur - > data [ cur - > first + + ] ;
2013-04-07 10:27:29 -07:00
if ( cur - > end = = cur - > capacity )
2013-03-24 00:44:05 -07:00
rebalance ( priority ) ;
2013-03-23 23:54:46 -07:00
}
2013-02-10 21:57:11 -08:00
}
2013-03-23 23:54:46 -07:00
inline void clear ( )
2013-02-10 21:57:11 -08:00
{
2013-03-24 00:44:05 -07:00
for ( int i = 0 ; i < NUM_QUEUES ; + + i )
{
if ( queues [ i ] . data ! = NULL )
2013-04-07 10:27:29 -07:00
free ( queues [ i ] . data ) ;
2013-03-24 00:44:05 -07:00
}
2013-04-07 10:27:29 -07:00
memset ( queues , 0 , sizeof ( queues ) ) ;
2013-03-24 00:44:05 -07:00
first = invalid ( ) ;
2013-02-10 21:57:11 -08:00
}
2013-03-23 23:54:46 -07:00
inline bool empty ( u32 priority ) const
2013-02-10 21:57:11 -08:00
{
2013-03-24 00:44:05 -07:00
const Queue * cur = & queues [ priority ] ;
return cur - > first = = cur - > end ;
2013-02-10 21:57:11 -08:00
}
2013-04-07 10:27:29 -07:00
inline void prepare ( u32 priority )
{
Queue * cur = & queues [ priority ] ;
if ( cur - > next = = NULL )
link ( priority , INITIAL_CAPACITY ) ;
}
2013-02-10 21:57:11 -08:00
void DoState ( PointerWrap & p )
{
2013-03-24 00:44:05 -07:00
int numQueues = NUM_QUEUES ;
p . Do ( numQueues ) ;
if ( numQueues ! = NUM_QUEUES )
{
2013-04-13 01:39:17 -07:00
p . SetError ( p . ERROR_FAILURE ) ;
2013-03-24 00:44:05 -07:00
ERROR_LOG ( HLE , " Savestate loading error: invalid data " ) ;
return ;
}
if ( p . mode = = p . MODE_READ )
clear ( ) ;
for ( int i = 0 ; i < NUM_QUEUES ; + + i )
{
Queue * cur = & queues [ i ] ;
int size = cur - > end - cur - > first ;
p . Do ( size ) ;
2013-04-07 10:32:39 -07:00
int capacity = cur - > capacity ;
p . Do ( capacity ) ;
2013-03-24 00:44:05 -07:00
2013-04-07 10:32:39 -07:00
if ( capacity = = 0 )
2013-03-24 00:44:05 -07:00
continue ;
if ( p . mode = = p . MODE_READ )
{
2013-04-07 10:32:39 -07:00
link ( i , capacity ) ;
2013-04-07 10:27:29 -07:00
cur - > first = ( cur - > capacity - size ) / 2 ;
2013-03-24 00:44:05 -07:00
cur - > end = cur - > first + size ;
}
2013-04-07 10:32:39 -07:00
if ( size ! = 0 )
p . DoArray ( & cur - > data [ cur - > first ] , size ) ;
2013-03-24 00:44:05 -07:00
}
p . DoMarker ( " ThreadQueueList " ) ;
}
private :
Queue * invalid ( ) const
{
return ( Queue * ) - 1 ;
2013-02-10 21:57:11 -08:00
}
2013-03-24 00:44:05 -07:00
2013-04-07 10:27:29 -07:00
void link ( u32 priority , int size )
2013-03-24 00:44:05 -07:00
{
_dbg_assert_msg_ ( HLE , queues [ priority ] . data = = NULL , " ThreadQueueList::Queue should only be initialized once. " ) ;
2013-04-07 10:27:29 -07:00
if ( size < = INITIAL_CAPACITY )
size = INITIAL_CAPACITY ;
else
{
int goal = size ;
size = INITIAL_CAPACITY ;
while ( size < goal )
size * = 2 ;
}
Queue * cur = & queues [ priority ] ;
cur - > data = ( SceUID * ) malloc ( sizeof ( SceUID ) * size ) ;
cur - > capacity = size ;
cur - > first = size / 2 ;
cur - > end = size / 2 ;
2013-03-24 00:44:05 -07:00
for ( int i = ( int ) priority - 1 ; i > = 0 ; - - i )
{
if ( queues [ i ] . next ! = NULL )
{
2013-04-07 10:27:29 -07:00
cur - > next = queues [ i ] . next ;
queues [ i ] . next = cur ;
2013-03-24 00:44:05 -07:00
return ;
}
}
2013-04-07 10:27:29 -07:00
cur - > next = first ;
first = cur ;
2013-03-24 00:44:05 -07:00
}
void rebalance ( u32 priority )
{
Queue * cur = & queues [ priority ] ;
int size = cur - > end - cur - > first ;
2013-04-07 10:27:29 -07:00
if ( size > = cur - > capacity - 2 )
{
cur - > capacity * = 2 ;
cur - > data = ( SceUID * ) realloc ( cur - > data , cur - > capacity * sizeof ( SceUID ) ) ;
}
2013-03-24 00:44:05 -07:00
2013-04-07 10:27:29 -07:00
int newFirst = ( cur - > capacity - size ) / 2 ;
if ( newFirst ! = cur - > first )
{
memmove ( & cur - > data [ newFirst ] , & cur - > data [ cur - > first ] , size * sizeof ( SceUID ) ) ;
cur - > first = newFirst ;
cur - > end = newFirst + size ;
}
2013-03-24 00:44:05 -07:00
}
// The first queue that's ever been used.
Queue * first ;
// The priority level queues of thread ids.
Queue queues [ NUM_QUEUES ] ;
2013-02-10 21:57:11 -08:00
} ;
2013-03-27 00:51:46 -07:00
struct WaitTypeFuncs
{
WaitBeginCallbackFunc beginFunc ;
WaitEndCallbackFunc endFunc ;
} ;
2012-11-06 15:46:21 +01:00
2013-03-27 00:51:46 -07:00
void __KernelExecuteMipsCallOnCurrentThread ( u32 callId , bool reschedAfter ) ;
2012-11-06 15:46:21 +01:00
2012-11-01 16:19:01 +01:00
Thread * __KernelCreateThread ( SceUID & id , SceUID moduleID , const char * name , u32 entryPoint , u32 priority , int stacksize , u32 attr ) ;
2012-12-06 23:03:09 -08:00
void __KernelResetThread ( Thread * t ) ;
2012-12-01 14:36:56 -08:00
void __KernelCancelWakeup ( SceUID threadID ) ;
2013-01-07 11:11:23 -08:00
void __KernelCancelThreadEndTimeout ( SceUID threadID ) ;
2012-12-09 19:07:11 -08:00
bool __KernelCheckThreadCallbacks ( Thread * thread , bool force ) ;
2012-11-01 16:19:01 +01:00
//////////////////////////////////////////////////////////////////////////
//STATE BEGIN
//////////////////////////////////////////////////////////////////////////
2012-12-23 21:49:44 -08:00
int g_inCbCount = 0 ;
2013-01-27 18:43:38 -08:00
// Normally, the same as currentThread. In an interrupt, remembers the callback's thread id.
SceUID currentCallbackThreadID = 0 ;
2013-01-27 17:01:17 -08:00
int readyCallbacksCount = 0 ;
2013-02-10 07:49:08 -08:00
SceUID currentThread ;
2012-11-01 16:19:01 +01:00
u32 idleThreadHackAddr ;
u32 threadReturnHackAddr ;
u32 cbReturnHackAddr ;
u32 intReturnHackAddr ;
2012-11-20 00:18:11 -08:00
std : : vector < ThreadCallback > threadEndListeners ;
2012-11-01 16:19:01 +01:00
2013-02-03 12:09:22 -08:00
// Lists all thread ids that aren't deleted/etc.
2013-02-09 15:30:22 -08:00
std : : vector < SceUID > threadqueue ;
2013-02-03 12:09:22 -08:00
// Lists only ready thread ids.
2013-03-23 23:54:46 -07:00
ThreadQueueList threadReadyQueue ;
2013-02-03 12:09:22 -08:00
2012-11-01 16:19:01 +01:00
SceUID threadIdleID [ 2 ] ;
int eventScheduledWakeup ;
2013-01-07 11:11:23 -08:00
int eventThreadEndTimeout ;
2012-11-01 16:19:01 +01:00
2012-11-07 15:44:48 +01:00
bool dispatchEnabled = true ;
2012-11-06 19:22:14 +01:00
2012-12-23 21:49:44 -08:00
MipsCallManager mipsCalls ;
2012-12-27 19:30:36 -08:00
int actionAfterCallback ;
int actionAfterMipsCall ;
2012-11-06 19:22:14 +01:00
2013-03-27 00:51:46 -07:00
// Doesn't need state saving.
WaitTypeFuncs waitTypeFuncs [ NUM_WAITTYPES ] ;
2012-11-01 16:19:01 +01:00
//////////////////////////////////////////////////////////////////////////
//STATE END
//////////////////////////////////////////////////////////////////////////
2012-12-27 19:30:36 -08:00
int __KernelRegisterActionType ( ActionCreator creator )
{
2012-12-27 19:45:00 -08:00
return mipsCalls . registerActionType ( creator ) ;
2012-12-27 19:30:36 -08:00
}
void __KernelRestoreActionType ( int actionType , ActionCreator creator )
{
2012-12-27 19:45:00 -08:00
mipsCalls . restoreActionType ( actionType , creator ) ;
2012-12-27 19:30:36 -08:00
}
Action * __KernelCreateAction ( int actionType )
{
2012-12-27 19:45:00 -08:00
return mipsCalls . createActionByType ( actionType ) ;
2012-12-27 19:30:36 -08:00
}
void MipsCall : : DoState ( PointerWrap & p )
{
p . Do ( entryPoint ) ;
p . Do ( cbId ) ;
p . DoArray ( args , ARRAY_SIZE ( args ) ) ;
p . Do ( numArgs ) ;
p . Do ( savedIdRegister ) ;
p . Do ( savedRa ) ;
p . Do ( savedPc ) ;
p . Do ( savedV0 ) ;
p . Do ( savedV1 ) ;
p . Do ( tag ) ;
p . Do ( savedId ) ;
p . Do ( reschedAfter ) ;
p . DoMarker ( " MipsCall " ) ;
int actionTypeID = 0 ;
if ( doAfter ! = NULL )
actionTypeID = doAfter - > actionTypeID ;
p . Do ( actionTypeID ) ;
if ( actionTypeID ! = 0 )
{
if ( p . mode = = p . MODE_READ )
doAfter = __KernelCreateAction ( actionTypeID ) ;
doAfter - > DoState ( p ) ;
}
}
2012-11-10 10:15:11 +01:00
2013-01-06 15:53:44 -08:00
void MipsCall : : setReturnValue ( u32 value )
{
savedV0 = value ;
}
2013-03-10 10:59:59 -07:00
void MipsCall : : setReturnValue ( u64 value )
{
savedV0 = value & 0xFFFFFFFF ;
savedV1 = ( value > > 32 ) & 0xFFFFFFFF ;
}
2012-11-06 15:46:21 +01:00
Thread * __GetCurrentThread ( ) {
2012-12-27 17:43:44 -08:00
if ( currentThread ! = 0 )
2013-03-16 11:17:44 -07:00
return kernelObjects . GetFast < Thread > ( currentThread ) ;
2012-12-27 17:43:44 -08:00
else
return NULL ;
2012-11-06 15:46:21 +01:00
}
2012-11-07 15:44:48 +01:00
u32 __KernelMipsCallReturnAddress ( )
2012-11-01 16:19:01 +01:00
{
2012-12-23 11:16:32 +01:00
return cbReturnHackAddr ;
2012-11-01 16:19:01 +01:00
}
u32 __KernelInterruptReturnAddress ( )
{
2012-12-23 11:16:32 +01:00
return intReturnHackAddr ;
2012-11-01 16:19:01 +01:00
}
void hleScheduledWakeup ( u64 userdata , int cyclesLate ) ;
2013-01-07 11:11:23 -08:00
void hleThreadEndTimeout ( u64 userdata , int cyclesLate ) ;
2012-11-01 16:19:01 +01:00
void __KernelThreadingInit ( )
{
2012-12-23 11:16:32 +01:00
u32 blockSize = 4 * 4 + 4 * 2 * 3 ; // One 16-byte thread plus 3 8-byte "hacks"
2012-11-01 16:19:01 +01:00
2012-11-07 15:44:48 +01:00
dispatchEnabled = true ;
2013-03-27 00:51:46 -07:00
memset ( waitTypeFuncs , 0 , sizeof ( waitTypeFuncs ) ) ;
2012-11-06 19:22:14 +01:00
2013-02-10 07:49:08 -08:00
currentThread = 0 ;
2012-12-23 11:16:32 +01:00
g_inCbCount = 0 ;
2013-01-27 18:43:38 -08:00
currentCallbackThreadID = 0 ;
2013-01-27 17:01:17 -08:00
readyCallbacksCount = 0 ;
2012-11-01 16:19:01 +01:00
idleThreadHackAddr = kernelMemory . Alloc ( blockSize , false , " threadrethack " ) ;
2012-12-23 11:16:32 +01:00
// Make sure it got allocated where we expect it... at the very start of kernel RAM
//CHECK_EQ(idleThreadHackAddr & 0x3FFFFFFF, 0x08000000);
// Yeah, this is straight out of JPCSP, I should be ashamed.
Memory : : Write_U32 ( MIPS_MAKE_ADDIU ( MIPS_REG_A0 , MIPS_REG_ZERO , 0 ) , idleThreadHackAddr ) ;
Memory : : Write_U32 ( MIPS_MAKE_LUI ( MIPS_REG_RA , 0x0800 ) , idleThreadHackAddr + 4 ) ;
Memory : : Write_U32 ( MIPS_MAKE_JR_RA ( ) , idleThreadHackAddr + 8 ) ;
//Memory::Write_U32(MIPS_MAKE_SYSCALL("ThreadManForUser", "sceKernelDelayThread"), idleThreadHackAddr + 12);
Memory : : Write_U32 ( MIPS_MAKE_SYSCALL ( " FakeSysCalls " , " _sceKernelIdle " ) , idleThreadHackAddr + 12 ) ;
Memory : : Write_U32 ( MIPS_MAKE_BREAK ( ) , idleThreadHackAddr + 16 ) ;
threadReturnHackAddr = idleThreadHackAddr + 20 ;
2012-11-01 16:19:01 +01:00
WriteSyscall ( " FakeSysCalls " , NID_THREADRETURN , threadReturnHackAddr ) ;
2012-12-23 11:16:32 +01:00
cbReturnHackAddr = threadReturnHackAddr + 8 ;
WriteSyscall ( " FakeSysCalls " , NID_CALLBACKRETURN , cbReturnHackAddr ) ;
2012-11-01 16:19:01 +01:00
2012-12-23 11:16:32 +01:00
intReturnHackAddr = cbReturnHackAddr + 8 ;
WriteSyscall ( " FakeSysCalls " , NID_INTERRUPTRETURN , intReturnHackAddr ) ;
2012-11-01 16:19:01 +01:00
eventScheduledWakeup = CoreTiming : : RegisterEvent ( " ScheduledWakeup " , & hleScheduledWakeup ) ;
2013-01-07 11:11:23 -08:00
eventThreadEndTimeout = CoreTiming : : RegisterEvent ( " ThreadEndTimeout " , & hleThreadEndTimeout ) ;
2012-12-27 19:30:36 -08:00
actionAfterMipsCall = __KernelRegisterActionType ( ActionAfterMipsCall : : Create ) ;
actionAfterCallback = __KernelRegisterActionType ( ActionAfterCallback : : Create ) ;
2012-11-01 16:19:01 +01:00
2012-12-23 11:16:32 +01:00
// Create the two idle threads, as well. With the absolute minimal possible priority.
// 4096 stack size - don't know what the right value is. Hm, if callbacks are ever to run on these threads...
__KernelResetThread ( __KernelCreateThread ( threadIdleID [ 0 ] , 0 , " idle0 " , idleThreadHackAddr , 0x7f , 4096 , PSP_THREAD_ATTR_KERNEL ) ) ;
__KernelResetThread ( __KernelCreateThread ( threadIdleID [ 1 ] , 0 , " idle1 " , idleThreadHackAddr , 0x7f , 4096 , PSP_THREAD_ATTR_KERNEL ) ) ;
// These idle threads are later started in LoadExec, which calls __KernelStartIdleThreads below.
2012-12-01 14:36:56 -08:00
2012-12-23 11:16:32 +01:00
__KernelListenThreadEnd ( __KernelCancelWakeup ) ;
2013-01-07 11:11:23 -08:00
__KernelListenThreadEnd ( __KernelCancelThreadEndTimeout ) ;
2012-11-01 16:19:01 +01:00
}
2012-12-27 19:30:36 -08:00
void __KernelThreadingDoState ( PointerWrap & p )
{
p . Do ( g_inCbCount ) ;
2013-01-27 18:43:38 -08:00
p . Do ( currentCallbackThreadID ) ;
2013-01-27 17:01:17 -08:00
p . Do ( readyCallbacksCount ) ;
2012-12-27 19:30:36 -08:00
p . Do ( idleThreadHackAddr ) ;
p . Do ( threadReturnHackAddr ) ;
p . Do ( cbReturnHackAddr ) ;
p . Do ( intReturnHackAddr ) ;
p . Do ( currentThread ) ;
2012-12-28 13:01:46 -08:00
SceUID dv = 0 ;
p . Do ( threadqueue , dv ) ;
2012-12-27 19:30:36 -08:00
p . DoArray ( threadIdleID , ARRAY_SIZE ( threadIdleID ) ) ;
p . Do ( dispatchEnabled ) ;
2013-02-04 01:31:02 -08:00
p . Do ( threadReadyQueue ) ;
2013-02-03 12:09:22 -08:00
2012-12-27 19:30:36 -08:00
p . Do ( eventScheduledWakeup ) ;
CoreTiming : : RestoreRegisterEvent ( eventScheduledWakeup , " ScheduledWakeup " , & hleScheduledWakeup ) ;
2013-01-07 11:11:23 -08:00
p . Do ( eventThreadEndTimeout ) ;
CoreTiming : : RestoreRegisterEvent ( eventThreadEndTimeout , " ThreadEndTimeout " , & hleThreadEndTimeout ) ;
2012-12-27 19:30:36 -08:00
p . Do ( actionAfterMipsCall ) ;
__KernelRestoreActionType ( actionAfterMipsCall , ActionAfterMipsCall : : Create ) ;
p . Do ( actionAfterCallback ) ;
__KernelRestoreActionType ( actionAfterCallback , ActionAfterCallback : : Create ) ;
2013-03-10 22:25:03 -07:00
hleCurrentThreadName = __KernelGetThreadName ( currentThread ) ;
2012-12-27 19:30:36 -08:00
p . DoMarker ( " sceKernelThread " ) ;
}
void __KernelThreadingDoStateLate ( PointerWrap & p )
{
// We do this late to give modules time to register actions.
mipsCalls . DoState ( p ) ;
p . DoMarker ( " sceKernelThread Late " ) ;
}
2012-12-26 22:45:19 -08:00
KernelObject * __KernelThreadObject ( )
{
return new Thread ;
}
KernelObject * __KernelCallbackObject ( )
{
return new Callback ;
2012-11-01 16:19:01 +01:00
}
2012-11-20 00:18:11 -08:00
void __KernelListenThreadEnd ( ThreadCallback callback )
{
threadEndListeners . push_back ( callback ) ;
}
2013-01-27 16:55:43 -08:00
void __KernelFireThreadEnd ( SceUID threadID )
2012-11-20 00:18:11 -08:00
{
2013-01-27 16:55:43 -08:00
for ( auto iter = threadEndListeners . begin ( ) , end = threadEndListeners . end ( ) ; iter ! = end ; + + iter )
2012-11-20 00:18:11 -08:00
{
ThreadCallback cb = * iter ;
cb ( threadID ) ;
}
2012-11-01 16:19:01 +01:00
}
2013-02-03 12:26:09 -08:00
// TODO: Use __KernelChangeThreadState instead? It has other affects...
2013-02-09 19:02:38 -08:00
void __KernelChangeReadyState ( Thread * thread , SceUID threadID , bool ready )
2013-02-03 12:26:09 -08:00
{
int prio = thread - > nt . currentPriority ;
if ( thread - > isReady ( ) )
{
if ( ! ready )
2013-03-23 23:54:46 -07:00
threadReadyQueue . remove ( prio , threadID ) ;
2013-02-03 12:26:09 -08:00
}
else if ( ready )
2013-02-03 13:27:23 -08:00
{
2013-02-09 19:02:38 -08:00
if ( thread - > isRunning ( ) )
2013-03-23 23:54:46 -07:00
threadReadyQueue . push_front ( prio , threadID ) ;
2013-02-03 13:27:23 -08:00
else
2013-03-23 23:54:46 -07:00
threadReadyQueue . push_back ( prio , threadID ) ;
2013-02-09 15:16:37 -08:00
thread - > nt . status = THREADSTATUS_READY ;
2013-02-03 13:27:23 -08:00
}
2013-02-03 12:26:09 -08:00
}
void __KernelChangeReadyState ( SceUID threadID , bool ready )
{
u32 error ;
Thread * thread = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( thread )
__KernelChangeReadyState ( thread , threadID , ready ) ;
else
WARN_LOG ( HLE , " Trying to change the ready state of an unknown thread? " ) ;
}
2013-04-10 21:16:31 -07:00
void __KernelStartIdleThreads ( SceUID moduleId )
2012-11-01 16:19:01 +01:00
{
2012-12-23 11:16:32 +01:00
for ( int i = 0 ; i < 2 ; i + + )
{
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadIdleID [ i ] , error ) ;
2013-04-10 21:16:31 -07:00
t - > nt . gpreg = __KernelGetModuleGP ( moduleId ) ;
2012-12-23 11:16:32 +01:00
t - > context . r [ MIPS_REG_GP ] = t - > nt . gpreg ;
//t->context.pc += 4; // ADJUSTPC
2013-04-07 10:27:29 -07:00
threadReadyQueue . prepare ( t - > nt . currentPriority ) ;
2013-02-03 12:26:09 -08:00
__KernelChangeReadyState ( t , threadIdleID [ i ] , true ) ;
2012-12-23 11:16:32 +01:00
}
2012-11-01 16:19:01 +01:00
}
2012-12-18 00:58:46 -08:00
bool __KernelSwitchOffThread ( const char * reason )
{
if ( ! reason )
reason = " switch off thread " ;
2012-12-27 17:43:44 -08:00
SceUID threadID = currentThread ;
2012-12-18 00:58:46 -08:00
if ( threadID ! = threadIdleID [ 0 ] & & threadID ! = threadIdleID [ 1 ] )
{
2013-02-02 19:14:00 -08:00
Thread * current = __GetCurrentThread ( ) ;
if ( current & & current - > isRunning ( ) )
2013-02-03 12:26:09 -08:00
__KernelChangeReadyState ( current , threadID , true ) ;
2013-02-02 19:14:00 -08:00
2012-12-18 00:58:46 -08:00
// Idle 0 chosen entirely arbitrarily.
2013-03-16 11:17:44 -07:00
Thread * t = kernelObjects . GetFast < Thread > ( threadIdleID [ 0 ] ) ;
2012-12-18 00:58:46 -08:00
if ( t )
{
__KernelSwitchContext ( t , reason ) ;
return true ;
}
else
ERROR_LOG ( HLE , " Unable to switch to idle thread. " ) ;
}
return false ;
2012-11-01 16:19:01 +01:00
}
2013-04-06 17:03:39 -07:00
bool __KernelSwitchToThread ( SceUID threadID , const char * reason )
{
if ( ! reason )
reason = " switch to thread " ;
if ( currentThread ! = threadIdleID [ 0 ] & & currentThread ! = threadIdleID [ 1 ] )
{
2013-04-09 23:16:23 -07:00
ERROR_LOG_REPORT ( HLE , " __KernelSwitchToThread used when already on a thread. " ) ;
2013-04-06 17:03:39 -07:00
return false ;
}
if ( currentThread = = threadID )
return false ;
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( ! t )
ERROR_LOG ( HLE , " __KernelSwitchToThread: %x doesn't exist " , threadID )
else
{
Thread * current = __GetCurrentThread ( ) ;
if ( current & & current - > isRunning ( ) )
__KernelChangeReadyState ( current , threadID , true ) ;
__KernelSwitchContext ( t , reason ) ;
return true ;
}
return false ;
}
2012-11-17 14:20:04 +01:00
void __KernelIdle ( )
2012-11-01 16:19:01 +01:00
{
2012-12-23 11:16:32 +01:00
CoreTiming : : Idle ( ) ;
// Advance must happen between Idle and Reschedule, so that threads that were waiting for something
// that was triggered at the end of the Idle period must get a chance to be scheduled.
2013-02-22 00:38:22 -08:00
CoreTiming : : AdvanceQuick ( ) ;
2012-11-01 16:19:01 +01:00
2013-01-27 18:43:38 -08:00
// We must've exited a callback?
if ( __KernelInCallback ( ) )
{
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( currentCallbackThreadID , error ) ;
if ( t )
2013-02-02 19:14:00 -08:00
{
2013-02-03 12:26:09 -08:00
__KernelChangeReadyState ( t , currentCallbackThreadID , false ) ;
2013-02-02 19:14:00 -08:00
t - > nt . status = ( t - > nt . status | THREADSTATUS_RUNNING ) & ~ THREADSTATUS_READY ;
2013-01-27 18:43:38 -08:00
__KernelSwitchContext ( t , " idle " ) ;
2013-02-02 19:14:00 -08:00
}
2013-01-27 18:43:38 -08:00
else
{
2013-03-26 00:54:00 -07:00
WARN_LOG_REPORT ( HLE , " UNTESTED - Callback thread deleted during interrupt? " ) ;
2013-01-27 18:43:38 -08:00
g_inCbCount = 0 ;
currentCallbackThreadID = 0 ;
}
}
2012-12-23 11:16:32 +01:00
// In Advance, we might trigger an interrupt such as vblank.
// If we end up in an interrupt, we don't want to reschedule.
// However, we have to reschedule... damn.
__KernelReSchedule ( " idle " ) ;
2012-11-01 16:19:01 +01:00
}
void __KernelThreadingShutdown ( )
{
kernelMemory . Free ( threadReturnHackAddr ) ;
2012-12-23 11:16:32 +01:00
threadqueue . clear ( ) ;
2013-02-03 12:09:22 -08:00
threadReadyQueue . clear ( ) ;
2012-12-23 21:27:26 -08:00
threadEndListeners . clear ( ) ;
2012-12-23 21:49:44 -08:00
mipsCalls . clear ( ) ;
2012-11-01 16:19:01 +01:00
threadReturnHackAddr = 0 ;
2012-12-23 11:16:32 +01:00
cbReturnHackAddr = 0 ;
2012-11-01 16:19:01 +01:00
currentThread = 0 ;
2012-11-08 16:28:45 +01:00
intReturnHackAddr = 0 ;
2013-03-10 22:25:03 -07:00
hleCurrentThreadName = NULL ;
2012-11-01 16:19:01 +01:00
}
2012-12-09 16:56:16 -08:00
const char * __KernelGetThreadName ( SceUID threadID )
{
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( t )
return t - > nt . name ;
return " ERROR " ;
}
2012-11-01 16:19:01 +01:00
u32 __KernelGetWaitValue ( SceUID threadID , u32 & error )
{
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( t )
{
2012-12-09 16:56:16 -08:00
return t - > getWaitInfo ( ) . waitValue ;
2012-11-01 16:19:01 +01:00
}
else
{
ERROR_LOG ( HLE , " __KernelGetWaitValue ERROR: thread %i " , threadID ) ;
return 0 ;
}
}
2012-11-18 19:13:39 -08:00
u32 __KernelGetWaitTimeoutPtr ( SceUID threadID , u32 & error )
{
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( t )
{
2012-12-09 16:56:16 -08:00
return t - > getWaitInfo ( ) . timeoutPtr ;
2012-11-18 19:13:39 -08:00
}
else
{
2012-11-18 19:57:08 -08:00
ERROR_LOG ( HLE , " __KernelGetWaitTimeoutPtr ERROR: thread %i " , threadID ) ;
return 0 ;
}
}
2012-11-20 00:18:11 -08:00
SceUID __KernelGetWaitID ( SceUID threadID , WaitType type , u32 & error )
2012-11-18 19:57:08 -08:00
{
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( t )
{
2012-12-09 16:56:16 -08:00
return t - > getWaitID ( type ) ;
2012-11-18 19:57:08 -08:00
}
else
{
ERROR_LOG ( HLE , " __KernelGetWaitID ERROR: thread %i " , threadID ) ;
2012-11-18 19:13:39 -08:00
return 0 ;
}
}
2013-03-27 00:51:46 -07:00
SceUID __KernelGetCurrentCallbackID ( SceUID threadID , u32 & error )
{
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( t )
return t - > currentCallbackId ;
else
{
ERROR_LOG ( HLE , " __KernelGetCurrentCallbackID ERROR: thread %i " , threadID ) ;
return 0 ;
}
}
2013-01-05 23:06:28 +01:00
u32 sceKernelReferThreadStatus ( u32 threadID , u32 statusPtr )
2012-11-01 16:19:01 +01:00
{
2013-04-27 02:42:29 -07:00
static const u32 THREADINFO_SIZE = 104 ;
static const u32 THREADINFO_SIZE_AFTER_260 = 108 ;
2012-11-01 16:19:01 +01:00
if ( threadID = = 0 )
threadID = __KernelGetCurThread ( ) ;
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
2013-01-05 23:06:28 +01:00
if ( ! t )
2012-11-01 16:19:01 +01:00
{
2013-04-28 11:24:29 -07:00
ERROR_LOG ( HLE , " %08x=sceKernelReferThreadStatus(%i, %08x): bad thread " , error , threadID , statusPtr ) ;
2013-01-05 23:06:28 +01:00
return error ;
}
2013-04-27 02:42:29 -07:00
u32 wantedSize = Memory : : Read_U32 ( statusPtr ) ;
if ( sceKernelGetCompiledSdkVersion ( ) > 0x2060010 )
{
if ( wantedSize > THREADINFO_SIZE_AFTER_260 )
{
2013-04-28 11:24:29 -07:00
ERROR_LOG ( HLE , " %08x=sceKernelReferThreadStatus(%i, %08x): bad size %d " , SCE_KERNEL_ERROR_ILLEGAL_SIZE , threadID , statusPtr , wantedSize ) ;
2013-04-27 02:42:29 -07:00
return SCE_KERNEL_ERROR_ILLEGAL_SIZE ;
}
DEBUG_LOG ( HLE , " sceKernelReferThreadStatus(%i, %08x) " , threadID , statusPtr ) ;
t - > nt . nativeSize = THREADINFO_SIZE_AFTER_260 ;
if ( wantedSize ! = 0 )
Memory : : Memcpy ( statusPtr , & t - > nt , wantedSize ) ;
// TODO: What is this value? Basic tests show 0...
if ( wantedSize > sizeof ( t - > nt ) )
Memory : : Memset ( statusPtr + sizeof ( t - > nt ) , 0 , wantedSize - sizeof ( t - > nt ) ) ;
}
else
{
DEBUG_LOG ( HLE , " sceKernelReferThreadStatus(%i, %08x) " , threadID , statusPtr ) ;
t - > nt . nativeSize = THREADINFO_SIZE ;
u32 sz = std : : min ( THREADINFO_SIZE , wantedSize ) ;
if ( sz ! = 0 )
Memory : : Memcpy ( statusPtr , & t - > nt , sz ) ;
2012-11-01 16:19:01 +01:00
}
2013-04-27 02:42:29 -07:00
2013-01-05 23:06:28 +01:00
return 0 ;
}
// Thanks JPCSP
u32 sceKernelReferThreadRunStatus ( u32 threadID , u32 statusPtr )
{
if ( threadID = = 0 )
threadID = __KernelGetCurThread ( ) ;
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( ! t )
{
ERROR_LOG ( HLE , " sceKernelReferThreadRunStatus Error %08x " , error ) ;
return error ;
}
DEBUG_LOG ( HLE , " sceKernelReferThreadRunStatus(%i, %08x) " , threadID , statusPtr ) ;
if ( ! Memory : : IsValidAddress ( statusPtr ) )
return - 1 ;
Memory : : Write_U32 ( t - > nt . status , statusPtr ) ;
Memory : : Write_U32 ( t - > nt . currentPriority , statusPtr + 4 ) ;
Memory : : Write_U32 ( t - > nt . waitType , statusPtr + 8 ) ;
Memory : : Write_U32 ( t - > nt . waitID , statusPtr + 12 ) ;
Memory : : Write_U32 ( t - > nt . wakeupCount , statusPtr + 16 ) ;
Memory : : Write_U32 ( t - > nt . runForClocks . lo , statusPtr + 20 ) ;
Memory : : Write_U32 ( t - > nt . runForClocks . hi , statusPtr + 24 ) ;
Memory : : Write_U32 ( t - > nt . numInterruptPreempts , statusPtr + 28 ) ;
Memory : : Write_U32 ( t - > nt . numThreadPreempts , statusPtr + 32 ) ;
Memory : : Write_U32 ( t - > nt . numReleases , statusPtr + 36 ) ;
return 0 ;
2012-11-01 16:19:01 +01:00
}
void sceKernelGetThreadExitStatus ( )
{
2012-12-23 11:16:32 +01:00
SceUID threadID = PARAM ( 0 ) ;
if ( threadID = = 0 )
threadID = __KernelGetCurThread ( ) ;
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( t )
{
if ( t - > nt . status = = THREADSTATUS_DORMANT ) // TODO: can be dormant before starting, too, need to avoid that
{
DEBUG_LOG ( HLE , " sceKernelGetThreadExitStatus(%i) " , threadID ) ;
RETURN ( t - > nt . exitStatus ) ;
}
else
{
RETURN ( SCE_KERNEL_ERROR_NOT_DORMANT ) ;
}
}
else
{
ERROR_LOG ( HLE , " sceKernelGetThreadExitStatus Error %08x " , error ) ;
RETURN ( SCE_KERNEL_ERROR_UNKNOWN_THID ) ;
}
2012-11-01 16:19:01 +01:00
}
2012-12-17 22:20:32 +01:00
u32 sceKernelGetThreadmanIdType ( u32 uid ) {
int type ;
if ( kernelObjects . GetIDType ( uid , & type ) ) {
DEBUG_LOG ( HLE , " %i=sceKernelGetThreadmanIdType(%i) " , type , uid ) ;
return type ;
} else {
ERROR_LOG ( HLE , " sceKernelGetThreadmanIdType(%i) - FAILED " , uid ) ;
return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT ;
}
}
u32 sceKernelGetThreadmanIdList ( u32 type , u32 readBufPtr , u32 readBufSize , u32 idCountPtr )
2012-11-01 16:19:01 +01:00
{
2012-12-17 22:20:32 +01:00
DEBUG_LOG ( HLE , " sceKernelGetThreadmanIdList(%i, %08x, %i, %08x) " ,
type , readBufPtr , readBufSize , idCountPtr ) ;
if ( ! Memory : : IsValidAddress ( readBufPtr ) )
return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT ;
if ( type ! = SCE_KERNEL_TMID_Thread ) {
2013-04-09 23:16:23 -07:00
ERROR_LOG_REPORT ( HLE , " sceKernelGetThreadmanIdList only implemented for threads " ) ;
2012-12-17 22:20:32 +01:00
return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT ;
}
2012-12-19 18:35:20 +01:00
for ( size_t i = 0 ; i < std : : min ( ( size_t ) readBufSize , threadqueue . size ( ) ) ; i + + )
2012-12-17 22:20:32 +01:00
{
2013-01-19 13:48:20 -08:00
Memory : : Write_U32 ( threadqueue [ i ] , readBufPtr + ( u32 ) i * 4 ) ;
2012-12-17 22:20:32 +01:00
}
2013-01-19 13:48:20 -08:00
Memory : : Write_U32 ( ( u32 ) threadqueue . size ( ) , idCountPtr ) ;
2012-12-17 22:20:32 +01:00
return 0 ;
2012-11-01 16:19:01 +01:00
}
// Saves the current CPU context
2013-04-07 11:28:37 -07:00
void __KernelSaveContext ( ThreadContext * ctx , bool vfpuEnabled )
2012-11-01 16:19:01 +01:00
{
2013-01-26 11:27:06 -08:00
memcpy ( ctx - > r , currentMIPS - > r , sizeof ( ctx - > r ) ) ;
memcpy ( ctx - > f , currentMIPS - > f , sizeof ( ctx - > f ) ) ;
2013-04-07 11:28:37 -07:00
if ( vfpuEnabled )
{
memcpy ( ctx - > v , currentMIPS - > v , sizeof ( ctx - > v ) ) ;
memcpy ( ctx - > vfpuCtrl , currentMIPS - > vfpuCtrl , sizeof ( ctx - > vfpuCtrl ) ) ;
}
2013-01-26 11:27:06 -08:00
ctx - > pc = currentMIPS - > pc ;
2012-11-01 16:19:01 +01:00
ctx - > hi = currentMIPS - > hi ;
ctx - > lo = currentMIPS - > lo ;
2013-01-07 13:42:05 +01:00
ctx - > fcr0 = currentMIPS - > fcr0 ;
ctx - > fcr31 = currentMIPS - > fcr31 ;
2012-11-01 16:19:01 +01:00
ctx - > fpcond = currentMIPS - > fpcond ;
}
// Loads a CPU context
2013-04-07 11:28:37 -07:00
void __KernelLoadContext ( ThreadContext * ctx , bool vfpuEnabled )
2012-11-01 16:19:01 +01:00
{
2013-01-26 11:27:06 -08:00
memcpy ( currentMIPS - > r , ctx - > r , sizeof ( ctx - > r ) ) ;
memcpy ( currentMIPS - > f , ctx - > f , sizeof ( ctx - > f ) ) ;
2013-04-07 11:28:37 -07:00
if ( vfpuEnabled )
{
memcpy ( currentMIPS - > v , ctx - > v , sizeof ( ctx - > v ) ) ;
memcpy ( currentMIPS - > vfpuCtrl , ctx - > vfpuCtrl , sizeof ( ctx - > vfpuCtrl ) ) ;
}
2013-01-26 11:27:06 -08:00
currentMIPS - > pc = ctx - > pc ;
2012-11-01 16:19:01 +01:00
currentMIPS - > hi = ctx - > hi ;
currentMIPS - > lo = ctx - > lo ;
2013-01-07 13:42:05 +01:00
currentMIPS - > fcr0 = ctx - > fcr0 ;
currentMIPS - > fcr31 = ctx - > fcr31 ;
2012-11-01 16:19:01 +01:00
currentMIPS - > fpcond = ctx - > fpcond ;
2013-02-18 10:27:15 -08:00
// Reset the llBit, the other thread may have touched memory.
currentMIPS - > llBit = 0 ;
2012-11-01 16:19:01 +01:00
}
2012-11-18 17:54:55 -08:00
u32 __KernelResumeThreadFromWait ( SceUID threadID )
{
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( t )
{
2012-12-09 16:56:16 -08:00
t - > resumeFromWait ( ) ;
2012-11-18 17:54:55 -08:00
return 0 ;
}
else
{
ERROR_LOG ( HLE , " __KernelResumeThreadFromWait(%d): bad thread: %08x " , threadID , error ) ;
return error ;
}
}
2013-03-10 10:59:59 -07:00
u32 __KernelResumeThreadFromWait ( SceUID threadID , u32 retval )
{
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( t )
{
t - > resumeFromWait ( ) ;
t - > setReturnValue ( retval ) ;
return 0 ;
}
else
{
ERROR_LOG ( HLE , " __KernelResumeThreadFromWait(%d): bad thread: %08x " , threadID , error ) ;
return error ;
}
}
u32 __KernelResumeThreadFromWait ( SceUID threadID , u64 retval )
2012-11-18 17:54:55 -08:00
{
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( t )
{
2012-12-09 16:56:16 -08:00
t - > resumeFromWait ( ) ;
2012-11-18 17:54:55 -08:00
t - > setReturnValue ( retval ) ;
return 0 ;
}
else
{
ERROR_LOG ( HLE , " __KernelResumeThreadFromWait(%d): bad thread: %08x " , threadID , error ) ;
return error ;
}
}
2012-11-01 16:19:01 +01:00
// Only run when you can safely accept a context switch
// Triggers a waitable event, that is, it wakes up all threads that waits for it
2012-12-08 22:39:36 -08:00
// If any changes were made, it will context switch after the syscall
2013-01-26 10:59:17 -08:00
bool __KernelTriggerWait ( WaitType type , int id , bool useRetVal , int retVal , const char * reason , bool dontSwitch )
2012-11-01 16:19:01 +01:00
{
2012-11-05 10:05:09 +01:00
bool doneAnything = false ;
2012-11-01 16:19:01 +01:00
2012-12-27 17:43:44 -08:00
u32 error ;
for ( std : : vector < SceUID > : : iterator iter = threadqueue . begin ( ) ; iter ! = threadqueue . end ( ) ; iter + + )
2012-11-01 16:19:01 +01:00
{
2012-12-27 17:43:44 -08:00
Thread * t = kernelObjects . Get < Thread > ( * iter , error ) ;
if ( t & & t - > isWaitingFor ( type , id ) )
2012-11-01 16:19:01 +01:00
{
2012-12-09 16:56:16 -08:00
// This thread was waiting for the triggered object.
t - > resumeFromWait ( ) ;
if ( useRetVal )
2013-03-10 10:59:59 -07:00
t - > setReturnValue ( ( u32 ) retVal ) ;
2012-12-09 16:56:16 -08:00
doneAnything = true ;
2013-03-25 08:15:33 -07:00
if ( type = = WAITTYPE_THREADEND )
__KernelCancelThreadEndTimeout ( * iter ) ;
2012-11-01 16:19:01 +01:00
}
}
2012-11-18 17:54:55 -08:00
// if (doneAnything) // lumines?
{
if ( ! dontSwitch )
{
// TODO: time waster
2013-01-26 10:59:17 -08:00
hleReSchedule ( reason ) ;
2012-11-18 17:54:55 -08:00
}
}
2013-04-05 22:38:19 -07:00
return doneAnything ;
2012-11-01 16:19:01 +01:00
}
2013-01-26 10:59:17 -08:00
bool __KernelTriggerWait ( WaitType type , int id , const char * reason , bool dontSwitch )
2012-11-01 16:19:01 +01:00
{
2013-01-26 10:59:17 -08:00
return __KernelTriggerWait ( type , id , false , 0 , reason , dontSwitch ) ;
2012-12-01 17:09:03 -08:00
}
2013-01-26 10:59:17 -08:00
bool __KernelTriggerWait ( WaitType type , int id , int retVal , const char * reason , bool dontSwitch )
2012-12-01 17:09:03 -08:00
{
2013-01-26 10:59:17 -08:00
return __KernelTriggerWait ( type , id , true , retVal , reason , dontSwitch ) ;
2012-11-01 16:19:01 +01:00
}
// makes the current thread wait for an event
2013-01-26 10:44:04 -08:00
void __KernelWaitCurThread ( WaitType type , SceUID waitID , u32 waitValue , u32 timeoutPtr , bool processCallbacks , const char * reason )
2012-11-01 16:19:01 +01:00
{
2013-03-13 23:49:39 -07:00
if ( ! dispatchEnabled )
{
2013-03-26 00:54:00 -07:00
WARN_LOG_REPORT ( HLE , " Ignoring wait, dispatching disabled... right thing to do? " ) ;
2013-03-13 23:49:39 -07:00
return ;
}
2012-12-09 16:56:16 -08:00
// TODO: Need to defer if in callback?
if ( g_inCbCount > 0 )
2013-03-26 00:54:00 -07:00
WARN_LOG_REPORT ( HLE , " UNTESTED - waiting within a callback, probably bad mojo. " ) ;
2012-12-09 16:56:16 -08:00
2012-12-27 17:43:44 -08:00
Thread * thread = __GetCurrentThread ( ) ;
thread - > nt . waitID = waitID ;
thread - > nt . waitType = type ;
__KernelChangeThreadState ( thread , THREADSTATUS_WAIT ) ;
thread - > nt . numReleases + + ;
thread - > waitInfo . waitValue = waitValue ;
thread - > waitInfo . timeoutPtr = timeoutPtr ;
2012-11-01 16:19:01 +01:00
2012-12-09 16:56:16 -08:00
// TODO: Remove this once all callers are cleaned up.
2012-11-01 16:19:01 +01:00
RETURN ( 0 ) ; //pretend all went OK
2012-11-18 17:54:55 -08:00
// TODO: time waster
2013-01-26 10:44:04 -08:00
if ( ! reason )
reason = " started wait " ;
2012-12-23 11:16:32 +01:00
2013-01-26 10:44:04 -08:00
hleReSchedule ( processCallbacks , reason ) ;
2012-11-01 16:19:01 +01:00
// TODO: Remove thread from Ready queue?
}
2013-03-30 13:48:29 -07:00
void __KernelWaitCallbacksCurThread ( WaitType type , SceUID waitID , u32 waitValue , u32 timeoutPtr )
{
if ( ! dispatchEnabled )
{
WARN_LOG_REPORT ( HLE , " Ignoring wait, dispatching disabled... right thing to do? " ) ;
return ;
}
Thread * thread = __GetCurrentThread ( ) ;
thread - > nt . waitID = waitID ;
thread - > nt . waitType = type ;
__KernelChangeThreadState ( thread , THREADSTATUS_WAIT ) ;
// TODO: Probably not...?
thread - > nt . numReleases + + ;
thread - > waitInfo . waitValue = waitValue ;
thread - > waitInfo . timeoutPtr = timeoutPtr ;
__KernelForceCallbacks ( ) ;
}
2012-11-01 16:19:01 +01:00
void hleScheduledWakeup ( u64 userdata , int cyclesLate )
{
SceUID threadID = ( SceUID ) userdata ;
2013-02-10 21:32:21 -08:00
u32 error ;
if ( __KernelGetWaitID ( threadID , WAITTYPE_DELAY , error ) = = threadID )
__KernelResumeThreadFromWait ( threadID ) ;
2012-11-01 16:19:01 +01:00
}
2013-01-05 23:24:05 +01:00
void __KernelScheduleWakeup ( SceUID threadID , s64 usFromNow )
2012-11-01 16:19:01 +01:00
{
2013-01-05 22:44:30 +01:00
s64 cycles = usToCycles ( usFromNow ) ;
CoreTiming : : ScheduleEvent ( cycles , eventScheduledWakeup , threadID ) ;
2012-11-01 16:19:01 +01:00
}
2012-12-01 14:36:56 -08:00
void __KernelCancelWakeup ( SceUID threadID )
{
CoreTiming : : UnscheduleEvent ( eventScheduledWakeup , threadID ) ;
}
2013-01-07 11:11:23 -08:00
void hleThreadEndTimeout ( u64 userdata , int cyclesLate )
{
SceUID threadID = ( SceUID ) userdata ;
u32 error ;
// Just in case it was woken on its own.
2013-03-25 08:15:33 -07:00
if ( __KernelGetWaitID ( threadID , WAITTYPE_THREADEND , error ) ! = 0 )
2013-01-07 11:11:23 -08:00
{
u32 timeoutPtr = __KernelGetWaitTimeoutPtr ( threadID , error ) ;
if ( Memory : : IsValidAddress ( timeoutPtr ) )
Memory : : Write_U32 ( 0 , timeoutPtr ) ;
__KernelResumeThreadFromWait ( threadID , SCE_KERNEL_ERROR_WAIT_TIMEOUT ) ;
}
}
2013-03-25 08:15:33 -07:00
void __KernelScheduleThreadEndTimeout ( SceUID threadID , SceUID waitForID , s64 usFromNow )
2013-01-07 11:11:23 -08:00
{
s64 cycles = usToCycles ( usFromNow ) ;
2013-03-25 08:15:33 -07:00
CoreTiming : : ScheduleEvent ( cycles , eventThreadEndTimeout , threadID ) ;
2013-01-07 11:11:23 -08:00
}
void __KernelCancelThreadEndTimeout ( SceUID threadID )
{
CoreTiming : : UnscheduleEvent ( eventThreadEndTimeout , threadID ) ;
}
2013-01-27 16:55:43 -08:00
void __KernelRemoveFromThreadQueue ( SceUID threadID )
2012-11-01 16:19:01 +01:00
{
2013-02-03 12:09:22 -08:00
int prio = __KernelGetThreadPrio ( threadID ) ;
if ( prio ! = 0 )
2013-03-23 23:54:46 -07:00
threadReadyQueue . remove ( prio , threadID ) ;
2013-02-03 12:09:22 -08:00
threadqueue . erase ( std : : remove ( threadqueue . begin ( ) , threadqueue . end ( ) , threadID ) , threadqueue . end ( ) ) ;
2012-11-01 16:19:01 +01:00
}
2013-01-27 16:55:43 -08:00
u32 __KernelDeleteThread ( SceUID threadID , int exitStatus , const char * reason , bool dontSwitch )
{
__KernelFireThreadEnd ( threadID ) ;
__KernelRemoveFromThreadQueue ( threadID ) ;
__KernelTriggerWait ( WAITTYPE_THREADEND , threadID , exitStatus , reason , dontSwitch ) ;
if ( currentThread = = threadID )
2013-03-10 22:25:03 -07:00
{
2013-01-27 16:55:43 -08:00
currentThread = 0 ;
2013-03-10 22:25:03 -07:00
hleCurrentThreadName = NULL ;
}
2013-01-27 18:43:38 -08:00
if ( currentCallbackThreadID = = threadID )
{
currentCallbackThreadID = 0 ;
g_inCbCount = 0 ;
}
2013-01-27 16:55:43 -08:00
2013-01-27 17:01:17 -08:00
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( t )
{
// TODO: Unless they should be run before deletion?
for ( int i = 0 ; i < THREAD_CALLBACK_NUM_TYPES ; i + + )
2013-02-24 10:38:30 -08:00
readyCallbacksCount - = ( int ) t - > readyCallbacks [ i ] . size ( ) ;
2013-01-27 17:01:17 -08:00
}
2013-01-27 16:55:43 -08:00
return kernelObjects . Destroy < Thread > ( threadID ) ;
}
2013-04-07 11:03:16 -07:00
// Returns NULL if the current thread is fine.
2012-11-07 15:44:48 +01:00
Thread * __KernelNextThread ( ) {
2013-04-07 11:03:16 -07:00
SceUID bestThread ;
2013-02-10 08:22:23 -08:00
// If the current thread is running, it's a valid candidate.
2013-02-10 22:01:36 -08:00
Thread * cur = __GetCurrentThread ( ) ;
if ( cur & & cur - > isRunning ( ) )
2013-04-07 11:03:16 -07:00
{
bestThread = threadReadyQueue . pop_first_better ( cur - > nt . currentPriority ) ;
if ( bestThread ! = 0 )
__KernelChangeReadyState ( cur , currentThread , true ) ;
}
else
bestThread = threadReadyQueue . pop_first ( ) ;
2013-02-10 08:22:23 -08:00
2013-03-16 11:17:44 -07:00
// Assume threadReadyQueue has not become corrupt.
2013-04-07 11:03:16 -07:00
if ( bestThread ! = 0 )
2013-03-16 11:17:44 -07:00
return kernelObjects . GetFast < Thread > ( bestThread ) ;
2012-11-07 15:44:48 +01:00
else
return 0 ;
}
void __KernelReSchedule ( const char * reason )
{
2012-12-08 19:13:33 -08:00
// cancel rescheduling when in interrupt or callback, otherwise everything will be fucked up
2013-03-24 23:30:32 -07:00
if ( __IsInInterrupt ( ) | | __KernelInCallback ( ) | | ! __KernelIsDispatchEnabled ( ) )
2012-12-08 19:13:33 -08:00
{
reason = " In Interrupt Or Callback " ;
return ;
}
2012-11-07 15:44:48 +01:00
2012-12-08 19:13:33 -08:00
// This may get us running a callback, don't reschedule out of it.
if ( __KernelCheckCallbacks ( ) )
{
reason = " Began interrupt or callback. " ;
return ;
}
// Execute any pending events while we're doing scheduling.
2013-02-22 00:38:22 -08:00
CoreTiming : : AdvanceQuick ( ) ;
2013-03-24 23:30:32 -07:00
if ( __IsInInterrupt ( ) | | __KernelInCallback ( ) | | ! __KernelIsDispatchEnabled ( ) )
2012-12-08 19:13:33 -08:00
{
reason = " In Interrupt Or Callback " ;
return ;
}
2012-11-07 15:44:48 +01:00
Thread * nextThread = __KernelNextThread ( ) ;
if ( nextThread )
__KernelSwitchContext ( nextThread , reason ) ;
2013-04-07 11:03:16 -07:00
// Otherwise, no need to switch.
2012-11-01 16:19:01 +01:00
}
2012-11-07 15:44:48 +01:00
void __KernelReSchedule ( bool doCallbacks , const char * reason )
{
2012-12-27 17:43:44 -08:00
Thread * thread = __GetCurrentThread ( ) ;
2012-11-07 15:44:48 +01:00
if ( doCallbacks )
{
if ( thread )
thread - > isProcessingCallbacks = doCallbacks ;
}
__KernelReSchedule ( reason ) ;
2012-12-27 17:43:44 -08:00
if ( doCallbacks & & thread ! = NULL & & thread - > GetUID ( ) = = currentThread ) {
2012-11-07 15:44:48 +01:00
if ( thread - > isRunning ( ) ) {
thread - > isProcessingCallbacks = false ;
}
}
}
2012-11-01 16:19:01 +01:00
//////////////////////////////////////////////////////////////////////////
// Thread Management
//////////////////////////////////////////////////////////////////////////
2013-03-23 14:41:13 +01:00
int sceKernelCheckThreadStack ( )
2012-11-01 16:19:01 +01:00
{
2012-12-23 11:16:32 +01:00
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( __KernelGetCurThread ( ) , error ) ;
2013-01-02 20:56:30 +01:00
if ( t ) {
2013-03-23 14:41:13 +01:00
u32 diff = labs ( ( long ) ( ( s64 ) t - > stackEnd - ( s64 ) currentMIPS - > r [ MIPS_REG_SP ] ) ) ;
2013-01-02 20:56:30 +01:00
WARN_LOG ( HLE , " %i=sceKernelCheckThreadStack() " , diff ) ;
2013-03-23 14:41:13 +01:00
return diff ;
2013-01-02 20:56:30 +01:00
} else {
// WTF?
2013-04-09 23:16:23 -07:00
ERROR_LOG_REPORT ( HLE , " sceKernelCheckThreadStack() - not on thread " ) ;
2013-03-23 14:41:13 +01:00
return - 1 ;
2013-01-02 20:56:30 +01:00
}
2012-11-01 16:19:01 +01:00
}
void ThreadContext : : reset ( )
{
2012-12-23 11:16:32 +01:00
for ( int i = 0 ; i < 32 ; i + + )
{
r [ i ] = 0 ;
f [ i ] = 0.0f ;
}
for ( int i = 0 ; i < 128 ; i + + )
{
v [ i ] = 0.0f ;
}
for ( int i = 0 ; i < 15 ; i + + )
{
vfpuCtrl [ i ] = 0x00000000 ;
}
vfpuCtrl [ VFPU_CTRL_SPREFIX ] = 0xe4 ; // neutral
vfpuCtrl [ VFPU_CTRL_TPREFIX ] = 0xe4 ; // neutral
vfpuCtrl [ VFPU_CTRL_DPREFIX ] = 0x0 ; // neutral
vfpuCtrl [ VFPU_CTRL_CC ] = 0x3f ;
vfpuCtrl [ VFPU_CTRL_INF4 ] = 0 ;
vfpuCtrl [ VFPU_CTRL_RCX0 ] = 0x3f800001 ;
vfpuCtrl [ VFPU_CTRL_RCX1 ] = 0x3f800002 ;
vfpuCtrl [ VFPU_CTRL_RCX2 ] = 0x3f800004 ;
vfpuCtrl [ VFPU_CTRL_RCX3 ] = 0x3f800008 ;
vfpuCtrl [ VFPU_CTRL_RCX4 ] = 0x3f800000 ;
vfpuCtrl [ VFPU_CTRL_RCX5 ] = 0x3f800000 ;
vfpuCtrl [ VFPU_CTRL_RCX6 ] = 0x3f800000 ;
vfpuCtrl [ VFPU_CTRL_RCX7 ] = 0x3f800000 ;
fpcond = 0 ;
fcr0 = 0 ;
fcr31 = 0 ;
hi = 0 ;
lo = 0 ;
2012-11-01 16:19:01 +01:00
}
2012-12-06 23:03:09 -08:00
void __KernelResetThread ( Thread * t )
{
t - > context . reset ( ) ;
t - > context . hi = 0 ;
t - > context . lo = 0 ;
t - > context . pc = t - > nt . entrypoint ;
// TODO: Reset the priority?
t - > nt . waitType = WAITTYPE_NONE ;
t - > nt . waitID = 0 ;
memset ( & t - > waitInfo , 0 , sizeof ( t - > waitInfo ) ) ;
2013-01-07 10:45:56 -08:00
t - > nt . exitStatus = SCE_KERNEL_ERROR_NOT_DORMANT ;
2012-12-06 23:03:09 -08:00
t - > isProcessingCallbacks = false ;
2013-03-27 00:51:46 -07:00
t - > currentCallbackId = 0 ;
t - > currentMipscallId = 0 ;
2012-12-09 16:56:16 -08:00
t - > pendingMipsCalls . clear ( ) ;
2012-12-06 23:03:09 -08:00
t - > context . r [ MIPS_REG_RA ] = threadReturnHackAddr ; //hack! TODO fix
2013-04-27 23:35:36 -07:00
// TODO: Not sure if it's reset here, but this makes sense.
t - > context . r [ MIPS_REG_GP ] = t - > nt . gpreg ;
2013-04-27 13:58:59 -07:00
t - > FillStack ( ) ;
2012-12-06 23:03:09 -08:00
}
2012-11-17 14:20:04 +01:00
Thread * __KernelCreateThread ( SceUID & id , SceUID moduleId , const char * name , u32 entryPoint , u32 priority , int stacksize , u32 attr )
2012-11-01 16:19:01 +01:00
{
Thread * t = new Thread ;
id = kernelObjects . Create ( t ) ;
2012-12-27 17:43:44 -08:00
threadqueue . push_back ( id ) ;
2013-04-07 10:27:29 -07:00
threadReadyQueue . prepare ( priority ) ;
2012-11-01 16:19:01 +01:00
memset ( & t - > nt , 0xCD , sizeof ( t - > nt ) ) ;
2012-12-06 23:03:09 -08:00
t - > nt . entrypoint = entryPoint ;
t - > nt . nativeSize = sizeof ( t - > nt ) ;
t - > nt . attr = attr ;
t - > nt . initialPriority = t - > nt . currentPriority = priority ;
2012-11-01 16:19:01 +01:00
t - > nt . stackSize = stacksize ;
t - > nt . status = THREADSTATUS_DORMANT ;
2012-12-06 23:03:09 -08:00
2012-11-01 16:19:01 +01:00
t - > nt . numInterruptPreempts = 0 ;
t - > nt . numReleases = 0 ;
t - > nt . numThreadPreempts = 0 ;
2012-12-30 21:30:33 +01:00
t - > nt . runForClocks . lo = 0 ;
2012-11-01 16:19:01 +01:00
t - > nt . runForClocks . hi = 0 ;
t - > nt . wakeupCount = 0 ;
2013-01-06 00:39:39 +01:00
t - > nt . initialStack = 0 ;
t - > nt . waitID = 0 ;
2013-01-07 10:45:56 -08:00
t - > nt . exitStatus = SCE_KERNEL_ERROR_DORMANT ;
2013-01-06 00:39:39 +01:00
t - > nt . waitType = WAITTYPE_NONE ;
2012-12-06 23:03:09 -08:00
if ( moduleId )
t - > nt . gpreg = __KernelGetModuleGP ( moduleId ) ;
else
t - > nt . gpreg = 0 ; // sceKernelStartThread will take care of this.
2012-11-17 14:20:04 +01:00
t - > moduleId = moduleId ;
2012-11-01 16:19:01 +01:00
2012-12-23 17:05:45 -08:00
strncpy ( t - > nt . name , name , KERNELOBJECT_MAX_NAME_LENGTH ) ;
t - > nt . name [ KERNELOBJECT_MAX_NAME_LENGTH ] = ' \0 ' ;
2013-04-27 13:58:59 -07:00
t - > AllocateStack ( t - > nt . stackSize ) ; // can change the stacksize!
2012-11-01 16:19:01 +01:00
return t ;
}
void __KernelSetupRootThread ( SceUID moduleID , int args , const char * argp , int prio , int stacksize , int attr )
{
//grab mips regs
SceUID id ;
2012-12-27 17:43:44 -08:00
Thread * thread = __KernelCreateThread ( id , moduleID , " root " , currentMIPS - > pc , prio , stacksize , attr ) ;
2013-04-27 19:12:26 -07:00
if ( thread - > stackBlock = = 0 )
ERROR_LOG_REPORT ( HLE , " Unable to allocate stack for root thread. " ) ;
2012-12-27 17:43:44 -08:00
__KernelResetThread ( thread ) ;
2012-11-01 16:19:01 +01:00
2013-02-09 15:16:37 -08:00
Thread * prevThread = __GetCurrentThread ( ) ;
if ( prevThread & & prevThread - > isRunning ( ) )
__KernelChangeReadyState ( currentThread , true ) ;
2012-12-27 17:43:44 -08:00
currentThread = id ;
2013-03-10 22:25:03 -07:00
hleCurrentThreadName = " root " ;
2013-02-09 15:16:37 -08:00
thread - > nt . status = THREADSTATUS_RUNNING ; // do not schedule
2012-11-01 16:19:01 +01:00
2012-12-27 17:43:44 -08:00
strcpy ( thread - > nt . name , " root " ) ;
2013-04-07 11:28:37 -07:00
__KernelLoadContext ( & thread - > context , ( attr & PSP_THREAD_ATTR_VFPU ) ! = 0 ) ;
2012-11-01 16:19:01 +01:00
mipsr4k . r [ MIPS_REG_A0 ] = args ;
mipsr4k . r [ MIPS_REG_SP ] - = 256 ;
u32 location = mipsr4k . r [ MIPS_REG_SP ] ;
mipsr4k . r [ MIPS_REG_A1 ] = location ;
2012-11-17 14:20:04 +01:00
for ( int i = 0 ; i < args ; i + + )
2012-12-23 11:16:32 +01:00
Memory : : Write_U8 ( argp [ i ] , location + i ) ;
2012-11-01 16:19:01 +01:00
}
2013-01-01 17:04:06 -08:00
int __KernelCreateThread ( const char * threadName , SceUID moduleID , u32 entry , u32 prio , int stacksize , u32 attr , u32 optionAddr )
2012-11-01 16:19:01 +01:00
{
2012-12-23 17:05:45 -08:00
if ( threadName = = NULL )
{
2013-04-09 23:16:23 -07:00
ERROR_LOG_REPORT ( HLE , " SCE_KERNEL_ERROR_ERROR=sceKernelCreateThread(): NULL name " ) ;
2012-12-23 17:05:45 -08:00
return SCE_KERNEL_ERROR_ERROR ;
}
// TODO: PSP actually fails for many of these cases, but trying for compat.
if ( stacksize < 0x200 | | stacksize > = 0x20000000 )
{
2013-04-09 23:16:23 -07:00
WARN_LOG_REPORT ( HLE , " sceKernelCreateThread(name=%s): bogus stack size %08x, using 0x4000 " , threadName , stacksize ) ;
2012-12-23 17:05:45 -08:00
stacksize = 0x4000 ;
}
if ( prio < 0x08 | | prio > 0x77 )
2013-04-27 19:12:26 -07:00
{
2013-04-09 23:16:23 -07:00
WARN_LOG_REPORT ( HLE , " sceKernelCreateThread(name=%s): bogus priority %08x " , threadName , prio ) ;
2013-04-27 19:12:26 -07:00
prio = prio < 0x08 ? 0x08 : 0x77 ;
}
2012-12-23 17:05:45 -08:00
if ( ! Memory : : IsValidAddress ( entry ) )
2013-04-27 19:12:26 -07:00
{
ERROR_LOG_REPORT ( HLE , " sceKernelCreateThread(name=%s): invalid entry %08x " , threadName , entry ) ;
// The PSP firmware seems to allow NULL...?
if ( entry ! = 0 )
return SCE_KERNEL_ERROR_ILLEGAL_ADDR ;
}
2012-12-23 17:05:45 -08:00
// We're assuming all threads created are user threads.
if ( ( attr & PSP_THREAD_ATTR_KERNEL ) = = 0 )
attr | = PSP_THREAD_ATTR_USER ;
2012-11-01 16:19:01 +01:00
SceUID id ;
2013-04-27 19:12:26 -07:00
Thread * newThread = __KernelCreateThread ( id , moduleID , threadName , entry , prio , stacksize , attr ) ;
if ( newThread - > stackBlock = = 0 )
{
ERROR_LOG_REPORT ( HLE , " sceKernelCreateThread(name=%s): out of memory, %08x stack requested " , threadName , stacksize ) ;
return SCE_KERNEL_ERROR_NO_MEMORY ;
}
2013-04-09 23:16:23 -07:00
INFO_LOG ( HLE , " %i=sceKernelCreateThread(name=%s, entry=%08x, prio=%x, stacksize=%i) " , id , threadName , entry , prio , stacksize ) ;
2012-12-09 00:29:10 -08:00
if ( optionAddr ! = 0 )
2013-04-09 23:16:23 -07:00
WARN_LOG_REPORT ( HLE , " sceKernelCreateThread(name=%s): unsupported options parameter %08x " , threadName , optionAddr ) ;
2012-12-09 00:29:10 -08:00
return id ;
2012-11-01 16:19:01 +01:00
}
2013-01-01 17:04:06 -08:00
int sceKernelCreateThread ( const char * threadName , u32 entry , u32 prio , int stacksize , u32 attr , u32 optionAddr )
{
2013-04-10 21:16:31 -07:00
return __KernelCreateThread ( threadName , __KernelGetCurThreadModuleId ( ) , entry , prio , stacksize , attr , optionAddr ) ;
2013-01-01 17:04:06 -08:00
}
2012-11-01 16:19:01 +01:00
2012-11-18 16:26:43 -08:00
// int sceKernelStartThread(SceUID threadToStartID, SceSize argSize, void *argBlock)
2013-04-27 21:26:50 -07:00
int sceKernelStartThread ( SceUID threadToStartID , int argSize , u32 argBlockPtr )
2012-11-01 16:19:01 +01:00
{
2013-04-27 21:26:50 -07:00
u32 error = 0 ;
if ( threadToStartID = = 0 )
2012-11-01 16:19:01 +01:00
{
2013-04-27 21:26:50 -07:00
error = SCE_KERNEL_ERROR_ILLEGAL_THID ;
ERROR_LOG_REPORT ( HLE , " %08x=sceKernelStartThread(thread=%i, argSize=%i, argPtr=%08x): NULL thread " , error , threadToStartID , argSize , argBlockPtr ) ;
return error ;
}
if ( argSize < 0 | | argBlockPtr & 0x80000000 )
{
error = SCE_KERNEL_ERROR_ILLEGAL_ADDR ;
ERROR_LOG_REPORT ( HLE , " %08x=sceKernelStartThread(thread=%i, argSize=%i, argPtr=%08x): bad argument pointer/length " , error , threadToStartID , argSize , argBlockPtr ) ;
return error ;
}
2012-11-01 16:19:01 +01:00
2013-04-27 21:26:50 -07:00
Thread * startThread = kernelObjects . Get < Thread > ( threadToStartID , error ) ;
if ( startThread = = 0 )
{
ERROR_LOG_REPORT ( HLE , " %08x=sceKernelStartThread(thread=%i, argSize=%i, argPtr=%08x): thread does not exist! " , error , threadToStartID , argSize , argBlockPtr ) ;
return error ;
}
2012-11-01 16:19:01 +01:00
2013-04-27 21:26:50 -07:00
if ( startThread - > nt . status ! = THREADSTATUS_DORMANT )
{
error = SCE_KERNEL_ERROR_NOT_DORMANT ;
WARN_LOG_REPORT ( HLE , " %08x=sceKernelStartThread(thread=%i, argSize=%i, argPtr=%08x): thread already running " , error , threadToStartID , argSize , argBlockPtr ) ;
return error ;
}
2012-11-01 16:19:01 +01:00
2013-04-27 21:26:50 -07:00
INFO_LOG ( HLE , " sceKernelStartThread(thread=%i, argSize=%i, argPtr=%08x) " , threadToStartID , argSize , argBlockPtr ) ;
2012-12-06 23:03:09 -08:00
2013-04-27 21:26:50 -07:00
__KernelResetThread ( startThread ) ;
2012-11-01 16:19:01 +01:00
2013-04-27 23:35:36 -07:00
u32 & sp = startThread - > context . r [ MIPS_REG_SP ] ;
2013-04-27 21:26:50 -07:00
if ( argBlockPtr & & argSize > 0 )
{
2013-04-27 23:35:36 -07:00
// Make room for the arguments, always 0x10 aligned.
sp - = ( argSize + 0xf ) & ~ 0xf ;
2013-04-27 21:26:50 -07:00
startThread - > context . r [ MIPS_REG_A0 ] = argSize ;
startThread - > context . r [ MIPS_REG_A1 ] = sp ;
}
else
{
if ( argSize > 0 )
WARN_LOG_REPORT ( HLE , " %08x=sceKernelStartThread(thread=%i, argSize=%i, argPtr=%08x): NULL argument with size (should crash?) " , error , threadToStartID , argSize , argBlockPtr ) ;
2012-11-06 15:46:21 +01:00
2013-04-27 21:26:50 -07:00
startThread - > context . r [ MIPS_REG_A0 ] = 0 ;
startThread - > context . r [ MIPS_REG_A1 ] = 0 ;
}
2012-11-18 16:26:43 -08:00
2013-04-27 21:26:50 -07:00
// Now copy argument to stack.
if ( Memory : : IsValidAddress ( argBlockPtr ) )
Memory : : Memcpy ( sp , Memory : : GetPointer ( argBlockPtr ) , argSize ) ;
2013-03-24 20:52:18 -07:00
2013-04-27 23:35:36 -07:00
// On the PSP, there's an extra 64 bytes of stack eaten after the args.
// This could be stack overflow safety, or just stack eaten by the kernel entry func.
sp - = 64 ;
2013-04-27 21:26:50 -07:00
Thread * cur = __GetCurrentThread ( ) ;
// Smaller is better for priority. Only switch if the new thread is better.
if ( cur & & cur - > nt . currentPriority > startThread - > nt . currentPriority )
2012-11-01 16:19:01 +01:00
{
2013-04-27 21:26:50 -07:00
// Starting a thread automatically resumes the dispatch thread.
// TODO: Maybe this happens even for worse-priority started threads?
dispatchEnabled = true ;
if ( cur & & cur - > isRunning ( ) )
cur - > nt . status & = ~ THREADSTATUS_RUNNING ;
__KernelChangeReadyState ( cur , currentThread , true ) ;
hleReSchedule ( " thread started " ) ;
2012-11-01 16:19:01 +01:00
}
2013-04-27 21:26:50 -07:00
else if ( ! dispatchEnabled )
WARN_LOG_REPORT ( HLE , " UNTESTED Dispatch disabled while starting worse-priority thread " ) ;
__KernelChangeReadyState ( startThread , threadToStartID , true ) ;
return 0 ;
2012-11-01 16:19:01 +01:00
}
void sceKernelGetThreadStackFreeSize ( )
{
2012-11-11 20:07:02 -08:00
SceUID threadID = PARAM ( 0 ) ;
Thread * thread ;
INFO_LOG ( HLE , " sceKernelGetThreadStackFreeSize(%i) " , threadID ) ;
if ( threadID = = 0 )
2012-12-27 17:43:44 -08:00
thread = __GetCurrentThread ( ) ;
2012-11-11 20:07:02 -08:00
else
{
u32 error ;
thread = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( thread = = 0 )
{
ERROR_LOG ( HLE , " sceKernelGetThreadStackFreeSize: invalid thread id %i " , threadID ) ;
RETURN ( error ) ;
return ;
}
}
2012-11-01 16:19:01 +01:00
2012-12-23 11:16:32 +01:00
// Scan the stack for 0xFF
int sz = 0 ;
for ( u32 addr = thread - > stackBlock ; addr < thread - > stackBlock + thread - > nt . stackSize ; addr + + )
{
if ( Memory : : Read_U8 ( addr ) ! = 0xFF )
break ;
sz + + ;
}
2012-11-01 16:19:01 +01:00
2012-12-23 11:16:32 +01:00
RETURN ( sz & ~ 3 ) ;
2012-11-01 16:19:01 +01:00
}
2012-11-07 15:44:48 +01:00
// Internal function
void __KernelReturnFromThread ( )
2012-11-01 16:19:01 +01:00
{
2013-04-09 23:16:23 -07:00
int exitStatus = currentMIPS - > r [ 2 ] ;
2012-12-27 17:43:44 -08:00
Thread * thread = __GetCurrentThread ( ) ;
_dbg_assert_msg_ ( HLE , thread ! = NULL , " Returned from a NULL thread. " ) ;
2013-04-09 23:16:23 -07:00
INFO_LOG ( HLE , " __KernelReturnFromThread: %d " , exitStatus ) ;
2012-11-09 00:03:46 +01:00
// TEMPORARY HACK: kill the stack of the root thread early:
2012-12-27 17:43:44 -08:00
if ( ! strcmp ( thread - > GetName ( ) , " root " ) ) {
thread - > FreeStack ( ) ;
2012-11-09 00:03:46 +01:00
}
2013-04-09 23:16:23 -07:00
thread - > nt . exitStatus = exitStatus ;
2013-02-03 12:26:09 -08:00
__KernelChangeReadyState ( thread , currentThread , false ) ;
2012-12-27 17:43:44 -08:00
thread - > nt . status = THREADSTATUS_DORMANT ;
2013-04-09 23:16:23 -07:00
__KernelFireThreadEnd ( currentThread ) ;
2012-11-01 16:19:01 +01:00
2013-01-26 10:59:17 -08:00
__KernelTriggerWait ( WAITTYPE_THREADEND , __KernelGetCurThread ( ) , thread - > nt . exitStatus , " thread returned " , true ) ;
2013-01-07 10:31:19 -08:00
hleReSchedule ( " thread returned " ) ;
2012-11-06 15:46:21 +01:00
// The stack will be deallocated when the thread is deleted.
2012-11-01 16:19:01 +01:00
}
2013-04-09 23:16:23 -07:00
void sceKernelExitThread ( int exitStatus )
2012-11-01 16:19:01 +01:00
{
2012-12-27 17:43:44 -08:00
Thread * thread = __GetCurrentThread ( ) ;
_dbg_assert_msg_ ( HLE , thread ! = NULL , " Exited from a NULL thread. " ) ;
2013-04-09 23:16:23 -07:00
INFO_LOG ( HLE , " sceKernelExitThread(%d) " , exitStatus ) ;
2013-02-03 12:26:09 -08:00
__KernelChangeReadyState ( thread , currentThread , false ) ;
2012-12-27 17:43:44 -08:00
thread - > nt . status = THREADSTATUS_DORMANT ;
2013-04-09 23:16:23 -07:00
thread - > nt . exitStatus = exitStatus ;
__KernelFireThreadEnd ( currentThread ) ;
2012-11-20 00:18:11 -08:00
2013-01-26 10:59:17 -08:00
__KernelTriggerWait ( WAITTYPE_THREADEND , __KernelGetCurThread ( ) , thread - > nt . exitStatus , " thread exited " , true ) ;
2013-01-07 10:31:19 -08:00
hleReSchedule ( " thread exited " ) ;
2012-11-06 15:46:21 +01:00
// The stack will be deallocated when the thread is deleted.
2012-11-01 16:19:01 +01:00
}
2013-04-09 23:16:23 -07:00
void _sceKernelExitThread ( int exitStatus )
2012-11-01 16:19:01 +01:00
{
2012-12-27 17:43:44 -08:00
Thread * thread = __GetCurrentThread ( ) ;
_dbg_assert_msg_ ( HLE , thread ! = NULL , " _Exited from a NULL thread. " ) ;
2012-11-20 00:18:11 -08:00
2013-04-09 23:16:23 -07:00
ERROR_LOG_REPORT ( HLE , " _sceKernelExitThread(%d): should not be called directly " , exitStatus ) ;
2012-12-27 17:43:44 -08:00
thread - > nt . status = THREADSTATUS_DORMANT ;
2013-04-09 23:16:23 -07:00
thread - > nt . exitStatus = exitStatus ;
__KernelFireThreadEnd ( currentThread ) ;
2012-11-20 00:18:11 -08:00
2013-01-26 10:59:17 -08:00
__KernelTriggerWait ( WAITTYPE_THREADEND , __KernelGetCurThread ( ) , thread - > nt . exitStatus , " thread _exited " , true ) ;
2013-01-07 10:31:19 -08:00
hleReSchedule ( " thread _exited " ) ;
2012-11-06 15:46:21 +01:00
// The stack will be deallocated when the thread is deleted.
2012-11-01 16:19:01 +01:00
}
2013-04-09 23:16:23 -07:00
void sceKernelExitDeleteThread ( int exitStatus )
2012-11-01 16:19:01 +01:00
{
2013-04-09 23:16:23 -07:00
Thread * thread = __GetCurrentThread ( ) ;
if ( thread )
2012-12-23 11:16:32 +01:00
{
2013-04-09 23:16:23 -07:00
INFO_LOG ( HLE , " sceKernelExitDeleteThread(%d) " , exitStatus ) ;
__KernelChangeReadyState ( thread , currentThread , false ) ;
thread - > nt . status = THREADSTATUS_DORMANT ;
thread - > nt . exitStatus = exitStatus ;
__KernelDeleteThread ( currentThread , exitStatus , " thread exited with delete " , true ) ;
2012-11-01 16:19:01 +01:00
2013-01-27 16:55:43 -08:00
hleReSchedule ( " thread exited with delete " ) ;
2012-12-23 11:16:32 +01:00
}
else
2013-04-09 23:16:23 -07:00
ERROR_LOG_REPORT ( HLE , " sceKernelExitDeleteThread(%d) ERROR - could not find myself! " , exitStatus ) ;
2012-12-23 11:16:32 +01:00
}
2012-11-01 16:19:01 +01:00
2012-11-06 19:22:14 +01:00
u32 sceKernelSuspendDispatchThread ( )
{
2013-04-06 12:34:32 -07:00
if ( ! __InterruptsEnabled ( ) )
return SCE_KERNEL_ERROR_CPUDI ;
2013-03-24 19:16:20 -07:00
u32 oldDispatchEnabled = dispatchEnabled ;
2012-11-07 15:44:48 +01:00
dispatchEnabled = false ;
2013-03-24 20:53:51 -07:00
DEBUG_LOG ( HLE , " %i=sceKernelSuspendDispatchThread() " , oldDispatchEnabled ) ;
2013-03-24 19:16:20 -07:00
return oldDispatchEnabled ;
2012-11-06 19:22:14 +01:00
}
2013-03-24 19:16:20 -07:00
u32 sceKernelResumeDispatchThread ( u32 enabled )
2012-11-06 19:22:14 +01:00
{
2013-04-06 12:34:32 -07:00
if ( ! __InterruptsEnabled ( ) )
return SCE_KERNEL_ERROR_CPUDI ;
2013-03-24 19:16:20 -07:00
u32 oldDispatchEnabled = dispatchEnabled ;
dispatchEnabled = enabled ! = 0 ;
2013-03-24 20:53:51 -07:00
DEBUG_LOG ( HLE , " sceKernelResumeDispatchThread(%i) - from %i " , enabled , oldDispatchEnabled ) ;
2013-03-13 23:49:39 -07:00
hleReSchedule ( " dispatch resumed " ) ;
2013-03-24 20:53:51 -07:00
return 0 ;
2012-11-06 19:22:14 +01:00
}
2012-11-01 16:19:01 +01:00
2013-03-24 23:30:32 -07:00
bool __KernelIsDispatchEnabled ( )
{
// Dispatch can never be enabled when interrupts are disabled.
return dispatchEnabled & & __InterruptsEnabled ( ) ;
}
2013-02-09 02:17:19 -08:00
int sceKernelRotateThreadReadyQueue ( int priority )
2012-11-01 16:19:01 +01:00
{
2013-02-09 15:54:13 -08:00
DEBUG_LOG ( HLE , " sceKernelRotateThreadReadyQueue(%x) " , priority ) ;
Thread * cur = __GetCurrentThread ( ) ;
// 0 is special, it means "my current priority."
if ( priority = = 0 )
priority = cur - > nt . currentPriority ;
2013-02-09 02:17:19 -08:00
2013-02-09 18:56:34 -08:00
if ( priority < = 0x07 | | priority > 0x77 )
return SCE_KERNEL_ERROR_ILLEGAL_PRIORITY ;
2013-03-23 23:54:46 -07:00
if ( ! threadReadyQueue . empty ( priority ) )
2013-02-09 02:17:19 -08:00
{
2013-02-09 15:54:13 -08:00
// In other words, yield to everyone else.
2013-02-09 02:17:19 -08:00
if ( cur - > nt . currentPriority = = priority )
2013-02-09 15:54:13 -08:00
{
2013-03-23 23:54:46 -07:00
threadReadyQueue . push_back ( priority , currentThread ) ;
2013-02-09 15:54:13 -08:00
cur - > nt . status = THREADSTATUS_READY ;
}
// Yield the next thread of this priority to all other threads of same priority.
2013-03-23 23:54:46 -07:00
else
threadReadyQueue . rotate ( priority ) ;
2013-02-09 15:54:13 -08:00
2013-02-09 02:17:19 -08:00
hleReSchedule ( " rotatethreadreadyqueue " ) ;
}
return 0 ;
2012-11-01 16:19:01 +01:00
}
2012-12-09 00:29:10 -08:00
int sceKernelDeleteThread ( int threadHandle )
2012-11-01 16:19:01 +01:00
{
2012-12-27 17:43:44 -08:00
if ( threadHandle ! = currentThread )
2012-11-01 16:19:01 +01:00
{
DEBUG_LOG ( HLE , " sceKernelDeleteThread(%i) " , threadHandle ) ;
2012-12-23 11:16:32 +01:00
2012-12-09 00:29:10 -08:00
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadHandle , error ) ;
if ( t )
{
2013-01-07 10:31:19 -08:00
// TODO: Should this reschedule ever? Probably no?
2013-01-27 16:55:43 -08:00
return __KernelDeleteThread ( threadHandle , SCE_KERNEL_ERROR_THREAD_TERMINATED , " thread deleted " , true ) ;
2012-12-09 00:29:10 -08:00
}
2012-11-01 16:19:01 +01:00
2012-12-09 16:57:52 -08:00
// TODO: Error when doesn't exist?
return 0 ;
2012-11-01 16:19:01 +01:00
}
else
{
2013-04-09 23:16:23 -07:00
ERROR_LOG_REPORT ( HLE , " Thread \" %s \" tries to delete itself! :( " , __GetCurrentThread ( ) ? __GetCurrentThread ( ) - > GetName ( ) : " NULL " ) ;
2012-12-09 00:29:10 -08:00
return - 1 ;
2012-11-01 16:19:01 +01:00
}
}
2012-12-09 00:29:10 -08:00
int sceKernelTerminateDeleteThread ( int threadno )
2012-11-01 16:19:01 +01:00
{
2012-12-27 17:43:44 -08:00
if ( threadno ! = currentThread )
2012-11-01 16:19:01 +01:00
{
2012-12-06 23:03:09 -08:00
INFO_LOG ( HLE , " sceKernelTerminateDeleteThread(%i) " , threadno ) ;
2012-11-01 16:19:01 +01:00
2013-01-15 19:46:52 -08:00
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadno , error ) ;
if ( t )
{
//TODO: should we really reschedule here?
2013-01-27 16:55:43 -08:00
error = __KernelDeleteThread ( threadno , SCE_KERNEL_ERROR_THREAD_TERMINATED , " thread terminated with delete " , false ) ;
2013-01-26 10:59:17 -08:00
hleReSchedule ( " thread terminated with delete " ) ;
2013-01-15 19:46:52 -08:00
2013-01-27 16:55:43 -08:00
return error ;
2013-01-15 19:46:52 -08:00
}
// TODO: Error when doesn't exist?
return 0 ;
2012-11-01 16:19:01 +01:00
}
else
{
2013-04-09 23:16:23 -07:00
ERROR_LOG_REPORT ( HLE , " Thread \" %s \" trying to delete itself! :( " , __GetCurrentThread ( ) ? __GetCurrentThread ( ) - > GetName ( ) : " NULL " ) ;
2012-12-09 00:29:10 -08:00
return - 1 ;
2012-12-06 23:03:09 -08:00
}
}
2013-03-02 14:58:58 -08:00
int sceKernelTerminateThread ( SceUID threadID )
2012-12-06 23:03:09 -08:00
{
2012-12-27 17:43:44 -08:00
if ( threadID ! = currentThread )
2012-12-06 23:03:09 -08:00
{
INFO_LOG ( HLE , " sceKernelTerminateThread(%i) " , threadID ) ;
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( t )
{
2013-01-07 10:45:56 -08:00
t - > nt . exitStatus = SCE_KERNEL_ERROR_THREAD_TERMINATED ;
2013-02-03 12:26:09 -08:00
__KernelChangeReadyState ( t , threadID , false ) ;
2012-12-06 23:03:09 -08:00
t - > nt . status = THREADSTATUS_DORMANT ;
2013-01-27 16:55:43 -08:00
__KernelFireThreadEnd ( threadID ) ;
2013-01-07 10:31:19 -08:00
// TODO: Should this really reschedule?
2013-01-26 10:59:17 -08:00
__KernelTriggerWait ( WAITTYPE_THREADEND , threadID , t - > nt . exitStatus , " thread terminated " , true ) ;
2012-12-06 23:03:09 -08:00
}
// TODO: Return an error if it doesn't exist?
2012-12-09 00:29:10 -08:00
return 0 ;
2012-12-06 23:03:09 -08:00
}
else
{
2013-04-09 23:16:23 -07:00
ERROR_LOG_REPORT ( HLE , " Thread \" %s \" trying to delete itself! :( " , __GetCurrentThread ( ) ? __GetCurrentThread ( ) - > GetName ( ) : " NULL " ) ;
2012-12-09 00:29:10 -08:00
return - 1 ;
2012-11-01 16:19:01 +01:00
}
}
SceUID __KernelGetCurThread ( )
{
2012-12-27 17:43:44 -08:00
return currentThread ;
2012-11-01 16:19:01 +01:00
}
2012-11-17 14:20:04 +01:00
SceUID __KernelGetCurThreadModuleId ( )
{
2012-12-27 17:43:44 -08:00
Thread * t = __GetCurrentThread ( ) ;
if ( t )
return t - > moduleId ;
return 0 ;
2012-11-17 14:20:04 +01:00
}
2013-03-23 14:26:54 +01:00
u32 __KernelGetCurThreadStack ( )
{
Thread * t = __GetCurrentThread ( ) ;
if ( t )
return t - > stackEnd ;
return 0 ;
}
2012-11-17 14:20:04 +01:00
2013-04-09 23:16:23 -07:00
SceUID sceKernelGetThreadId ( )
2012-11-01 16:19:01 +01:00
{
2013-04-09 23:16:23 -07:00
VERBOSE_LOG ( HLE , " %i = sceKernelGetThreadId() " , currentThread ) ;
return currentThread ;
2012-11-01 16:19:01 +01:00
}
2012-11-06 16:20:13 +01:00
void sceKernelGetThreadCurrentPriority ( )
{
u32 retVal = __GetCurrentThread ( ) - > nt . currentPriority ;
DEBUG_LOG ( HLE , " %i = sceKernelGetThreadCurrentPriority() " , retVal ) ;
RETURN ( retVal ) ;
}
2012-11-01 16:19:01 +01:00
void sceKernelChangeCurrentThreadAttr ( )
{
int clearAttr = PARAM ( 0 ) ;
int setAttr = PARAM ( 1 ) ;
DEBUG_LOG ( HLE , " 0 = sceKernelChangeCurrentThreadAttr(clear = %08x, set = %08x " , clearAttr , setAttr ) ;
2012-12-27 17:43:44 -08:00
Thread * t = __GetCurrentThread ( ) ;
if ( t )
t - > nt . attr = ( t - > nt . attr & ~ clearAttr ) | setAttr ;
else
ERROR_LOG ( HLE , " %s(): No current thread? " , __FUNCTION__ ) ;
2012-11-01 16:19:01 +01:00
RETURN ( 0 ) ;
}
void sceKernelChangeThreadPriority ( )
{
int id = PARAM ( 0 ) ;
2012-12-27 17:43:44 -08:00
if ( id = = 0 ) id = currentThread ; //special
2012-11-01 16:19:01 +01:00
u32 error ;
Thread * thread = kernelObjects . Get < Thread > ( id , error ) ;
if ( thread )
{
DEBUG_LOG ( HLE , " sceKernelChangeThreadPriority(%i, %i) " , id , PARAM ( 1 ) ) ;
2013-02-03 12:09:22 -08:00
int prio = thread - > nt . currentPriority ;
2013-03-23 23:54:46 -07:00
threadReadyQueue . remove ( prio , id ) ;
2013-02-03 12:09:22 -08:00
2012-11-01 16:19:01 +01:00
thread - > nt . currentPriority = PARAM ( 1 ) ;
2013-02-03 12:09:22 -08:00
2013-04-07 10:27:29 -07:00
threadReadyQueue . prepare ( thread - > nt . currentPriority ) ;
2013-02-03 12:26:09 -08:00
if ( thread - > isReady ( ) )
2013-03-23 23:54:46 -07:00
threadReadyQueue . push_back ( thread - > nt . currentPriority , id ) ;
2013-02-03 12:09:22 -08:00
2012-11-01 16:19:01 +01:00
RETURN ( 0 ) ;
}
else
{
2012-11-06 16:20:13 +01:00
ERROR_LOG ( HLE , " %08x=sceKernelChangeThreadPriority(%i, %i) failed - no such thread " , error , id , PARAM ( 1 ) ) ;
2012-11-01 16:19:01 +01:00
RETURN ( error ) ;
}
}
2013-03-06 07:40:34 -08:00
int sceKernelDelayThreadCB ( u32 usec )
2012-11-01 16:19:01 +01:00
{
if ( usec < 200 ) usec = 200 ;
DEBUG_LOG ( HLE , " sceKernelDelayThreadCB(%i usec) " , usec ) ;
SceUID curThread = __KernelGetCurThread ( ) ;
__KernelScheduleWakeup ( curThread , usec ) ;
2013-01-26 10:44:04 -08:00
__KernelWaitCurThread ( WAITTYPE_DELAY , curThread , 0 , 0 , true , " thread delayed " ) ;
2013-03-06 07:40:34 -08:00
return 0 ;
2012-11-01 16:19:01 +01:00
}
2013-03-06 07:40:34 -08:00
int sceKernelDelayThread ( u32 usec )
2012-11-01 16:19:01 +01:00
{
if ( usec < 200 ) usec = 200 ;
DEBUG_LOG ( HLE , " sceKernelDelayThread(%i usec) " , usec ) ;
2013-01-05 23:24:05 +01:00
SceUID curThread = __KernelGetCurThread ( ) ;
__KernelScheduleWakeup ( curThread , usec ) ;
2013-01-26 10:44:04 -08:00
__KernelWaitCurThread ( WAITTYPE_DELAY , curThread , 0 , 0 , false , " thread delayed " ) ;
2013-03-06 07:40:34 -08:00
return 0 ;
2013-01-05 23:24:05 +01:00
}
void sceKernelDelaySysClockThreadCB ( )
{
u32 sysclockAddr = PARAM ( 0 ) ;
if ( ! Memory : : IsValidAddress ( sysclockAddr ) ) {
ERROR_LOG ( HLE , " sceKernelDelaySysClockThread(%08x) - bad pointer " , sysclockAddr ) ;
RETURN ( - 1 ) ;
return ;
}
SceKernelSysClock sysclock ;
Memory : : ReadStruct ( sysclockAddr , & sysclock ) ;
// TODO: Which unit?
u64 usec = sysclock . lo | ( ( u64 ) sysclock . hi < < 32 ) ;
2013-01-05 23:54:55 +01:00
if ( usec < 200 ) usec = 200 ;
DEBUG_LOG ( HLE , " sceKernelDelaySysClockThread(%08x (%llu)) " , sysclockAddr , usec ) ;
2013-01-05 23:24:05 +01:00
SceUID curThread = __KernelGetCurThread ( ) ;
__KernelScheduleWakeup ( curThread , usec ) ;
2013-01-26 10:44:04 -08:00
__KernelWaitCurThread ( WAITTYPE_DELAY , curThread , 0 , 0 , true , " thread delayed " ) ;
2013-01-05 23:24:05 +01:00
}
void sceKernelDelaySysClockThread ( )
{
u32 sysclockAddr = PARAM ( 0 ) ;
if ( ! Memory : : IsValidAddress ( sysclockAddr ) ) {
ERROR_LOG ( HLE , " sceKernelDelaySysClockThread(%08x) - bad pointer " , sysclockAddr ) ;
RETURN ( - 1 ) ;
return ;
}
SceKernelSysClock sysclock ;
Memory : : ReadStruct ( sysclockAddr , & sysclock ) ;
// TODO: Which unit?
u64 usec = sysclock . lo | ( ( u64 ) sysclock . hi < < 32 ) ;
2013-01-05 23:54:55 +01:00
if ( usec < 200 ) usec = 200 ;
DEBUG_LOG ( HLE , " sceKernelDelaySysClockThread(%08x (%llu)) " , sysclockAddr , usec ) ;
2013-01-05 23:24:05 +01:00
2012-11-01 16:19:01 +01:00
SceUID curThread = __KernelGetCurThread ( ) ;
__KernelScheduleWakeup ( curThread , usec ) ;
2013-01-26 10:44:04 -08:00
__KernelWaitCurThread ( WAITTYPE_DELAY , curThread , 0 , 0 , false , " thread delayed " ) ;
2012-11-01 16:19:01 +01:00
}
2012-11-08 14:24:51 +01:00
u32 __KernelGetThreadPrio ( SceUID id )
{
u32 error ;
Thread * thread = kernelObjects . Get < Thread > ( id , error ) ;
if ( thread )
return thread - > nt . currentPriority ;
return 0 ;
}
2013-01-17 00:45:13 -08:00
bool __KernelThreadSortPriority ( SceUID thread1 , SceUID thread2 )
{
return __KernelGetThreadPrio ( thread1 ) < __KernelGetThreadPrio ( thread2 ) ;
}
2012-11-01 16:19:01 +01:00
//////////////////////////////////////////////////////////////////////////
// WAIT/SLEEP ETC
//////////////////////////////////////////////////////////////////////////
void sceKernelWakeupThread ( )
{
2012-11-07 15:44:48 +01:00
SceUID uid = PARAM ( 0 ) ;
2012-11-01 16:19:01 +01:00
u32 error ;
2012-11-07 15:44:48 +01:00
Thread * t = kernelObjects . Get < Thread > ( uid , error ) ;
2012-11-01 16:19:01 +01:00
if ( t )
{
2013-04-13 00:35:24 -07:00
if ( ! t - > isWaitingFor ( WAITTYPE_SLEEP , 1 ) ) {
2012-11-07 15:44:48 +01:00
t - > nt . wakeupCount + + ;
DEBUG_LOG ( HLE , " sceKernelWakeupThread(%i) - wakeupCount incremented to %i " , uid , t - > nt . wakeupCount ) ;
RETURN ( 0 ) ;
} else {
2013-04-13 00:35:24 -07:00
VERBOSE_LOG ( HLE , " sceKernelWakeupThread(%i) - woke thread at %i " , uid , t - > nt . wakeupCount ) ;
2012-11-07 15:44:48 +01:00
__KernelResumeThreadFromWait ( uid ) ;
2013-04-27 18:06:36 -07:00
hleReSchedule ( " thread woken up " ) ;
2012-11-01 16:19:01 +01:00
}
2012-11-10 10:15:11 +01:00
}
else {
2012-11-18 23:35:02 +01:00
ERROR_LOG ( HLE , " sceKernelWakeupThread(%i) - bad thread id " , uid ) ;
2012-11-12 00:04:57 +01:00
RETURN ( error ) ;
}
}
void sceKernelCancelWakeupThread ( )
{
SceUID uid = PARAM ( 0 ) ;
u32 error ;
2012-11-13 20:07:28 +01:00
if ( uid = = 0 ) uid = __KernelGetCurThread ( ) ;
2012-11-12 00:04:57 +01:00
Thread * t = kernelObjects . Get < Thread > ( uid , error ) ;
if ( t )
{
int wCount = t - > nt . wakeupCount ;
t - > nt . wakeupCount = 0 ;
DEBUG_LOG ( HLE , " sceKernelCancelWakeupThread(%i) - wakeupCount reset from %i " , uid , wCount ) ;
RETURN ( wCount ) ;
2012-12-23 11:16:32 +01:00
}
2012-11-12 00:04:57 +01:00
else {
2012-11-18 23:35:02 +01:00
ERROR_LOG ( HLE , " sceKernelCancelWakeupThread(%i) - bad thread id " , uid ) ;
2012-11-10 10:15:11 +01:00
RETURN ( error ) ;
2012-11-01 16:19:01 +01:00
}
}
2012-11-07 15:44:48 +01:00
static void __KernelSleepThread ( bool doCallbacks ) {
2012-12-27 17:43:44 -08:00
Thread * thread = __GetCurrentThread ( ) ;
if ( ! thread )
{
ERROR_LOG ( HLE , " sceKernelSleepThread*(): bad current thread " ) ;
return ;
}
if ( thread - > nt . wakeupCount > 0 ) {
thread - > nt . wakeupCount - - ;
2013-03-10 22:56:24 -07:00
DEBUG_LOG ( HLE , " sceKernelSleepThread() - wakeupCount decremented to %i " , thread - > nt . wakeupCount ) ;
2012-11-07 15:44:48 +01:00
RETURN ( 0 ) ;
} else {
2013-03-15 00:43:35 -07:00
VERBOSE_LOG ( HLE , " sceKernelSleepThread() " ) ;
2012-11-01 16:19:01 +01:00
RETURN ( 0 ) ;
2013-04-13 00:35:24 -07:00
__KernelWaitCurThread ( WAITTYPE_SLEEP , 1 , 0 , 0 , doCallbacks , " thread slept " ) ;
2012-11-01 16:19:01 +01:00
}
}
2012-11-07 15:44:48 +01:00
void sceKernelSleepThread ( )
{
__KernelSleepThread ( false ) ;
}
2012-11-01 16:19:01 +01:00
//the homebrew PollCallbacks
void sceKernelSleepThreadCB ( )
{
2013-03-10 22:56:24 -07:00
VERBOSE_LOG ( HLE , " sceKernelSleepThreadCB() " ) ;
2012-11-07 15:44:48 +01:00
__KernelSleepThread ( true ) ;
__KernelCheckCallbacks ( ) ;
2012-11-01 16:19:01 +01:00
}
2013-01-07 10:02:11 -08:00
int sceKernelWaitThreadEnd ( SceUID threadID , u32 timeoutPtr )
2012-11-01 16:19:01 +01:00
{
2013-01-19 18:31:04 -08:00
DEBUG_LOG ( HLE , " sceKernelWaitThreadEnd(%i, %08x) " , threadID , timeoutPtr ) ;
2013-01-07 15:21:10 -08:00
if ( threadID = = 0 | | threadID = = currentThread )
return SCE_KERNEL_ERROR_ILLEGAL_THID ;
2012-11-01 16:19:01 +01:00
u32 error ;
2013-01-07 10:02:11 -08:00
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
2012-11-01 16:19:01 +01:00
if ( t )
{
2013-01-07 10:31:19 -08:00
if ( t - > nt . status ! = THREADSTATUS_DORMANT )
2013-01-07 11:11:23 -08:00
{
if ( Memory : : IsValidAddress ( timeoutPtr ) )
__KernelScheduleThreadEndTimeout ( currentThread , threadID , Memory : : Read_U32 ( timeoutPtr ) ) ;
2013-01-26 10:44:04 -08:00
__KernelWaitCurThread ( WAITTYPE_THREADEND , threadID , 0 , timeoutPtr , false , " thread wait end " ) ;
2012-11-01 16:19:01 +01:00
}
2013-01-07 10:31:19 -08:00
return t - > nt . exitStatus ;
2012-11-01 16:19:01 +01:00
}
else
{
2013-01-07 10:02:11 -08:00
ERROR_LOG ( HLE , " sceKernelWaitThreadEnd - bad thread %i " , threadID ) ;
2013-01-07 15:21:10 -08:00
return error ;
2012-11-01 16:19:01 +01:00
}
}
2013-01-07 10:02:11 -08:00
int sceKernelWaitThreadEndCB ( SceUID threadID , u32 timeoutPtr )
2012-11-01 16:19:01 +01:00
{
2013-01-22 06:39:06 -08:00
DEBUG_LOG ( HLE , " sceKernelWaitThreadEndCB(%i, 0x%X) " , threadID , timeoutPtr ) ;
2013-01-07 15:21:10 -08:00
if ( threadID = = 0 | | threadID = = currentThread )
return SCE_KERNEL_ERROR_ILLEGAL_THID ;
2012-12-23 11:16:32 +01:00
u32 error ;
2013-01-07 10:02:11 -08:00
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
2012-12-23 11:16:32 +01:00
if ( t )
{
2013-01-07 10:31:19 -08:00
hleCheckCurrentCallbacks ( ) ;
if ( t - > nt . status ! = THREADSTATUS_DORMANT )
2013-01-07 11:11:23 -08:00
{
if ( Memory : : IsValidAddress ( timeoutPtr ) )
__KernelScheduleThreadEndTimeout ( currentThread , threadID , Memory : : Read_U32 ( timeoutPtr ) ) ;
2013-01-26 10:44:04 -08:00
__KernelWaitCurThread ( WAITTYPE_THREADEND , threadID , 0 , timeoutPtr , true , " thread wait end " ) ;
2012-11-06 15:46:21 +01:00
}
2013-01-07 10:31:19 -08:00
return t - > nt . exitStatus ;
2012-12-23 11:16:32 +01:00
}
else
{
2013-01-19 18:31:04 -08:00
ERROR_LOG ( HLE , " sceKernelWaitThreadEndCB - bad thread %i " , threadID ) ;
2013-01-07 19:44:39 -08:00
return error ;
}
}
int sceKernelReleaseWaitThread ( SceUID threadID )
{
DEBUG_LOG ( HLE , " sceKernelReleaseWaitThread(%i) " , threadID ) ;
if ( __KernelInCallback ( ) )
2013-03-26 00:54:00 -07:00
WARN_LOG_REPORT ( HLE , " UNTESTED sceKernelReleaseWaitThread() might not do the right thing in a callback " ) ;
2013-01-07 19:44:39 -08:00
if ( threadID = = 0 | | threadID = = currentThread )
return SCE_KERNEL_ERROR_ILLEGAL_THID ;
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( t )
{
if ( ! t - > isWaiting ( ) )
return SCE_KERNEL_ERROR_NOT_WAIT ;
__KernelResumeThreadFromWait ( threadID , SCE_KERNEL_ERROR_RELEASE_WAIT ) ;
hleReSchedule ( " thread released from wait " ) ;
return 0 ;
}
else
{
ERROR_LOG ( HLE , " sceKernelReleaseWaitThread - bad thread %i " , threadID ) ;
2013-01-07 15:21:10 -08:00
return error ;
2012-12-23 11:16:32 +01:00
}
2012-11-01 16:19:01 +01:00
}
void sceKernelSuspendThread ( )
{
2013-04-09 23:16:23 -07:00
WARN_LOG_REPORT ( HLE , " UNIMPL sceKernelSuspendThread " ) ;
2012-11-01 16:19:01 +01:00
RETURN ( 0 ) ;
}
void sceKernelResumeThread ( )
{
2013-04-09 23:16:23 -07:00
WARN_LOG_REPORT ( HLE , " UNIMPL sceKernelResumeThread " ) ;
2012-11-01 16:19:01 +01:00
RETURN ( 0 ) ;
}
2012-11-06 15:46:21 +01:00
2012-11-07 15:44:48 +01:00
2012-11-06 15:46:21 +01:00
//////////////////////////////////////////////////////////////////////////
// CALLBACKS
//////////////////////////////////////////////////////////////////////////
// Internal API
u32 __KernelCreateCallback ( const char * name , u32 entrypoint , u32 commonArg )
{
Callback * cb = new Callback ;
SceUID id = kernelObjects . Create ( cb ) ;
cb - > nc . size = sizeof ( NativeCallback ) ;
2012-12-23 11:16:32 +01:00
strncpy ( cb - > nc . name , name , 32 ) ;
2012-11-06 15:46:21 +01:00
cb - > nc . entrypoint = entrypoint ;
cb - > nc . threadId = __KernelGetCurThread ( ) ;
cb - > nc . commonArgument = commonArg ;
cb - > nc . notifyCount = 0 ;
cb - > nc . notifyArg = 0 ;
2012-12-23 11:16:32 +01:00
2012-11-06 15:46:21 +01:00
return id ;
}
2013-04-14 23:45:46 -07:00
SceUID sceKernelCreateCallback ( const char * name , u32 entrypoint , u32 signalArg )
2012-11-06 15:46:21 +01:00
{
2013-04-14 23:45:46 -07:00
SceUID id = __KernelCreateCallback ( name , entrypoint , signalArg ) ;
DEBUG_LOG ( HLE , " %i=sceKernelCreateCallback(name=%s, entry=%08x, callbackArg=%08x) " , id , name , entrypoint , signalArg ) ;
2012-11-06 15:46:21 +01:00
2013-04-14 23:45:46 -07:00
return id ;
2012-11-06 15:46:21 +01:00
}
2013-04-14 23:45:46 -07:00
int sceKernelDeleteCallback ( SceUID cbId )
2012-11-06 15:46:21 +01:00
{
2013-04-14 23:45:46 -07:00
DEBUG_LOG ( HLE , " sceKernelDeleteCallback(%i) " , cbId ) ;
2012-11-06 15:46:21 +01:00
// TODO: Make sure it's gone from all threads first!
2013-04-14 23:45:46 -07:00
return kernelObjects . Destroy < Callback > ( cbId ) ;
2012-11-06 15:46:21 +01:00
}
2013-04-14 23:45:46 -07:00
// Generally very rarely used, but Numblast uses it like candy.
int sceKernelNotifyCallback ( SceUID cbId , int notifyArg )
2012-11-06 15:46:21 +01:00
{
2013-04-14 23:45:46 -07:00
DEBUG_LOG ( HLE , " sceKernelNotifyCallback(%i, %i) " , cbId , notifyArg ) ;
u32 error ;
Callback * cb = kernelObjects . Get < Callback > ( cbId , error ) ;
if ( cb ) {
// TODO: Should this notify other existing callbacks too?
__KernelNotifyCallback ( THREAD_CALLBACK_USER_DEFINED , cbId , notifyArg ) ;
return 0 ;
} else {
ERROR_LOG ( HLE , " sceKernelCancelCallback(%i) - bad cbId " , cbId ) ;
return error ;
}
2012-11-06 15:46:21 +01:00
}
2013-04-14 23:45:46 -07:00
int sceKernelCancelCallback ( SceUID cbId )
2012-11-06 15:46:21 +01:00
{
2013-04-14 23:45:46 -07:00
DEBUG_LOG ( HLE , " sceKernelCancelCallback(%i) " , cbId ) ;
2012-11-06 15:46:21 +01:00
u32 error ;
Callback * cb = kernelObjects . Get < Callback > ( cbId , error ) ;
if ( cb ) {
2013-04-14 23:45:46 -07:00
// This just resets the notify count.
2012-12-23 11:16:32 +01:00
cb - > nc . notifyArg = 0 ;
2013-04-14 23:45:46 -07:00
return 0 ;
2012-11-06 15:46:21 +01:00
} else {
2013-04-14 23:45:46 -07:00
ERROR_LOG ( HLE , " sceKernelCancelCallback(%i) - bad cbId " , cbId ) ;
return error ;
2012-11-06 15:46:21 +01:00
}
}
2013-04-14 23:45:46 -07:00
int sceKernelGetCallbackCount ( SceUID cbId )
2012-11-06 15:46:21 +01:00
{
u32 error ;
Callback * cb = kernelObjects . Get < Callback > ( cbId , error ) ;
if ( cb ) {
2013-04-14 23:45:46 -07:00
return cb - > nc . notifyCount ;
2012-11-06 15:46:21 +01:00
} else {
2013-04-14 23:45:46 -07:00
ERROR_LOG ( HLE , " sceKernelGetCallbackCount(%i) - bad cbId " , cbId ) ;
return error ;
2012-11-06 15:46:21 +01:00
}
}
2013-04-14 23:45:46 -07:00
int sceKernelReferCallbackStatus ( SceUID cbId , u32 statusAddr )
2013-02-04 02:53:38 +08:00
{
2012-11-06 15:46:21 +01:00
u32 error ;
Callback * c = kernelObjects . Get < Callback > ( cbId , error ) ;
if ( c ) {
2013-04-14 23:45:46 -07:00
DEBUG_LOG ( HLE , " sceKernelReferCallbackStatus(%i, %08x) " , cbId , statusAddr ) ;
// TODO: Maybe check size parameter?
2012-11-06 15:46:21 +01:00
if ( Memory : : IsValidAddress ( statusAddr ) ) {
Memory : : WriteStruct ( statusAddr , & c - > nc ) ;
} // else TODO
2013-04-14 23:45:46 -07:00
return 0 ;
2012-11-06 15:46:21 +01:00
} else {
2013-04-14 23:45:46 -07:00
ERROR_LOG ( HLE , " sceKernelReferCallbackStatus(%i, %08x) - bad cbId " , cbId , statusAddr ) ;
return error ;
2012-11-06 15:46:21 +01:00
}
}
2013-04-19 22:22:35 +08:00
u32 sceKernelExtendThreadStack ( u32 size , u32 entryAddr , u32 entryParameter )
2013-04-14 23:45:46 -07:00
{
2013-04-19 22:22:35 +08:00
ERROR_LOG_REPORT ( HLE , " sceKernelExtendThreadStack(%08x, %08x, %08x) - Not fully supported " , size , entryAddr , entryParameter ) ;
u32 args [ 1 ] = { entryParameter } ;
__KernelDirectMipsCall ( entryAddr , 0 , args , 1 , false ) ;
2013-04-14 23:45:46 -07:00
return 0 ;
}
2013-01-06 10:54:33 -08:00
void ActionAfterMipsCall : : run ( MipsCall & call ) {
2012-12-27 19:30:36 -08:00
u32 error ;
Thread * thread = kernelObjects . Get < Thread > ( threadID , error ) ;
if ( thread ) {
2013-02-03 12:26:09 -08:00
__KernelChangeReadyState ( thread , threadID , ( status & THREADSTATUS_READY ) ! = 0 ) ;
2012-12-27 19:30:36 -08:00
thread - > nt . status = status ;
thread - > nt . waitType = waitType ;
thread - > nt . waitID = waitID ;
thread - > waitInfo = waitInfo ;
thread - > isProcessingCallbacks = isProcessingCallbacks ;
2013-03-27 00:51:46 -07:00
thread - > currentCallbackId = currentCallbackId ;
2012-12-27 19:30:36 -08:00
}
2012-11-07 15:44:48 +01:00
if ( chainedAction ) {
2013-01-06 10:54:33 -08:00
chainedAction - > run ( call ) ;
2012-11-07 15:44:48 +01:00
delete chainedAction ;
2012-11-06 15:46:21 +01:00
}
2012-11-07 15:44:48 +01:00
}
2012-11-06 15:46:21 +01:00
2012-12-09 16:56:16 -08:00
ActionAfterMipsCall * Thread : : getRunningCallbackAction ( )
{
2012-12-27 17:43:44 -08:00
if ( this - > GetUID ( ) = = currentThread & & g_inCbCount > 0 )
2012-12-09 16:56:16 -08:00
{
2013-03-27 00:51:46 -07:00
MipsCall * call = mipsCalls . get ( this - > currentMipscallId ) ;
2013-01-08 17:03:17 +01:00
ActionAfterMipsCall * action = 0 ;
2012-12-09 16:56:16 -08:00
if ( call )
action = dynamic_cast < ActionAfterMipsCall * > ( call - > doAfter ) ;
if ( ! call | | ! action )
{
ERROR_LOG ( HLE , " Failed to access deferred info for thread: %s " , this - > nt . name ) ;
return NULL ;
}
return action ;
}
return NULL ;
}
2012-11-11 22:38:19 +01:00
void Thread : : setReturnValue ( u32 retval )
{
2012-12-27 17:43:44 -08:00
if ( this - > GetUID ( ) = = currentThread ) {
2012-11-11 22:38:19 +01:00
if ( g_inCbCount ) {
2013-03-27 00:51:46 -07:00
u32 callId = this - > currentMipscallId ;
2012-11-11 22:38:19 +01:00
MipsCall * call = mipsCalls . get ( callId ) ;
if ( call ) {
2013-01-06 15:53:44 -08:00
call - > setReturnValue ( retval ) ;
2012-11-11 22:38:19 +01:00
} else {
ERROR_LOG ( HLE , " Failed to inject return value %08x in thread " , retval ) ;
}
} else {
currentMIPS - > r [ 2 ] = retval ;
}
} else {
context . r [ 2 ] = retval ;
}
}
2013-03-10 10:59:59 -07:00
void Thread : : setReturnValue ( u64 retval )
{
if ( this - > GetUID ( ) = = currentThread ) {
if ( g_inCbCount ) {
2013-03-27 00:51:46 -07:00
u32 callId = this - > currentMipscallId ;
2013-03-10 10:59:59 -07:00
MipsCall * call = mipsCalls . get ( callId ) ;
if ( call ) {
call - > setReturnValue ( retval ) ;
} else {
ERROR_LOG ( HLE , " Failed to inject return value %08llx in thread " , retval ) ;
}
} else {
currentMIPS - > r [ 2 ] = retval & 0xFFFFFFFF ;
currentMIPS - > r [ 3 ] = ( retval > > 32 ) & 0xFFFFFFFF ;
}
} else {
context . r [ 2 ] = retval & 0xFFFFFFFF ;
context . r [ 3 ] = ( retval > > 32 ) & 0xFFFFFFFF ;
}
}
2012-12-09 16:56:16 -08:00
void Thread : : resumeFromWait ( )
{
// Do we need to "inject" it?
ActionAfterMipsCall * action = getRunningCallbackAction ( ) ;
if ( action )
{
action - > status & = ~ THREADSTATUS_WAIT ;
// TODO: What if DORMANT or DEAD?
if ( ! ( action - > status & THREADSTATUS_WAITSUSPEND ) )
action - > status = THREADSTATUS_READY ;
// Non-waiting threads do not process callbacks.
action - > isProcessingCallbacks = false ;
}
else
{
this - > nt . status & = ~ THREADSTATUS_WAIT ;
// TODO: What if DORMANT or DEAD?
if ( ! ( this - > nt . status & THREADSTATUS_WAITSUSPEND ) )
2013-02-03 12:26:09 -08:00
__KernelChangeReadyState ( this , this - > GetUID ( ) , true ) ;
2012-12-09 16:56:16 -08:00
// Non-waiting threads do not process callbacks.
this - > isProcessingCallbacks = false ;
}
}
bool Thread : : isWaitingFor ( WaitType type , int id )
{
// Thread might be in a callback right now.
ActionAfterMipsCall * action = getRunningCallbackAction ( ) ;
if ( action )
{
if ( action - > status & THREADSTATUS_WAIT )
return action - > waitType = = type & & action - > waitID = = id ;
return false ;
}
if ( this - > nt . status & THREADSTATUS_WAIT )
return this - > nt . waitType = = type & & this - > nt . waitID = = id ;
return false ;
}
int Thread : : getWaitID ( WaitType type )
{
// Thread might be in a callback right now.
ActionAfterMipsCall * action = getRunningCallbackAction ( ) ;
if ( action )
{
if ( action - > waitType = = type )
return action - > waitID ;
return 0 ;
}
if ( this - > nt . waitType = = type )
return this - > nt . waitID ;
return 0 ;
}
ThreadWaitInfo Thread : : getWaitInfo ( )
{
// Thread might be in a callback right now.
ActionAfterMipsCall * action = getRunningCallbackAction ( ) ;
if ( action )
return action - > waitInfo ;
return this - > waitInfo ;
}
2012-11-07 15:44:48 +01:00
void __KernelSwitchContext ( Thread * target , const char * reason )
{
2012-12-20 15:23:15 +01:00
u32 oldPC = 0 ;
2013-03-02 14:58:58 -08:00
SceUID oldUID = 0 ;
2012-12-20 15:23:15 +01:00
const char * oldName = " (none) " ;
2012-12-27 17:43:44 -08:00
Thread * cur = __GetCurrentThread ( ) ;
if ( cur ) // It might just have been deleted.
2012-11-07 15:44:48 +01:00
{
2013-04-07 11:28:37 -07:00
__KernelSaveContext ( & cur - > context , ( cur - > nt . attr & PSP_THREAD_ATTR_VFPU ) ! = 0 ) ;
2012-12-20 15:23:15 +01:00
oldPC = currentMIPS - > pc ;
2012-12-27 17:43:44 -08:00
oldUID = cur - > GetUID ( ) ;
2012-12-24 15:16:04 -08:00
// Profile on Windows shows this takes time, skip it.
if ( DEBUG_LEVEL < = MAX_LOGLEVEL )
oldName = cur - > GetName ( ) ;
2013-02-02 19:48:23 -08:00
2013-02-10 08:22:23 -08:00
// Normally this is taken care of in __KernelNextThread().
2013-02-02 19:48:23 -08:00
if ( cur - > isRunning ( ) )
2013-02-09 14:27:16 -08:00
__KernelChangeReadyState ( cur , oldUID , true ) ;
2012-11-06 15:46:21 +01:00
}
2013-02-02 19:48:23 -08:00
2013-02-09 14:27:16 -08:00
if ( target )
{
2013-03-10 22:25:03 -07:00
currentThread = target - > GetUID ( ) ;
hleCurrentThreadName = target - > nt . name ;
2013-02-09 14:27:16 -08:00
__KernelChangeReadyState ( target , currentThread , false ) ;
target - > nt . status = ( target - > nt . status | THREADSTATUS_RUNNING ) & ~ THREADSTATUS_READY ;
}
2013-03-10 22:25:03 -07:00
else
{
currentThread = 0 ;
hleCurrentThreadName = NULL ;
}
2013-02-03 12:26:09 -08:00
2013-04-07 11:28:37 -07:00
__KernelLoadContext ( & target - > context , ( target - > nt . attr & PSP_THREAD_ATTR_VFPU ) ! = 0 ) ;
2013-02-03 13:36:08 -08:00
bool fromIdle = oldUID = = threadIdleID [ 0 ] | | oldUID = = threadIdleID [ 1 ] ;
bool toIdle = currentThread = = threadIdleID [ 0 ] | | currentThread = = threadIdleID [ 1 ] ;
if ( ! ( fromIdle & & toIdle ) )
{
DEBUG_LOG ( HLE , " Context switched: %s -> %s (%s) (%i - pc: %08x -> %i - pc: %08x) " ,
2013-03-10 22:25:03 -07:00
oldName , hleCurrentThreadName ,
2013-02-03 13:36:08 -08:00
reason ,
oldUID , oldPC , currentThread , currentMIPS - > pc ) ;
}
2012-11-07 15:44:48 +01:00
2012-11-30 23:04:24 -08:00
// No longer waiting.
2012-12-27 17:43:44 -08:00
target - > nt . waitType = WAITTYPE_NONE ;
target - > nt . waitID = 0 ;
2012-11-30 23:04:24 -08:00
2013-03-09 14:21:21 -08:00
__KernelExecutePendingMipsCalls ( target , true ) ;
2012-11-07 15:44:48 +01:00
}
void __KernelChangeThreadState ( Thread * thread , ThreadStatus newStatus ) {
if ( ! thread | | thread - > nt . status = = newStatus )
return ;
2012-11-06 15:46:21 +01:00
2012-12-27 17:43:44 -08:00
if ( ! dispatchEnabled & & thread = = __GetCurrentThread ( ) & & newStatus ! = THREADSTATUS_RUNNING ) {
2012-11-07 15:44:48 +01:00
ERROR_LOG ( HLE , " Dispatching suspended, not changing thread state " ) ;
return ;
2012-11-06 15:46:21 +01:00
}
2012-11-07 15:44:48 +01:00
// TODO: JPSCP has many conditions here, like removing wait timeout actions etc.
// if (thread->nt.status == THREADSTATUS_WAIT && newStatus != THREADSTATUS_WAITSUSPEND) {
2013-02-03 12:26:09 -08:00
__KernelChangeReadyState ( thread , thread - > GetUID ( ) , ( newStatus & THREADSTATUS_READY ) ! = 0 ) ;
2012-11-07 15:44:48 +01:00
thread - > nt . status = newStatus ;
if ( newStatus = = THREADSTATUS_WAIT ) {
if ( thread - > nt . waitType = = WAITTYPE_NONE ) {
ERROR_LOG ( HLE , " Waittype none not allowed here " ) ;
}
// Schedule deletion of stopped threads here. if (thread->isStopped())
}
}
bool __CanExecuteCallbackNow ( Thread * thread ) {
return g_inCbCount = = 0 ;
}
2013-02-02 18:03:55 -08:00
void __KernelCallAddress ( Thread * thread , u32 entryPoint , Action * afterAction , const u32 args [ ] , int numargs , bool reschedAfter , SceUID cbId )
2012-12-08 20:09:20 -08:00
{
2013-02-02 18:03:55 -08:00
_dbg_assert_msg_ ( HLE , numargs < = 6 , " MipsCalls can only take 6 args. " ) ;
2012-11-07 15:44:48 +01:00
if ( thread ) {
2012-12-27 19:30:36 -08:00
ActionAfterMipsCall * after = ( ActionAfterMipsCall * ) __KernelCreateAction ( actionAfterMipsCall ) ;
2012-11-07 15:44:48 +01:00
after - > chainedAction = afterAction ;
2012-12-27 19:30:36 -08:00
after - > threadID = thread - > GetUID ( ) ;
2012-11-07 15:44:48 +01:00
after - > status = thread - > nt . status ;
after - > waitType = thread - > nt . waitType ;
2012-12-09 16:56:16 -08:00
after - > waitID = thread - > nt . waitID ;
2012-11-07 15:44:48 +01:00
after - > waitInfo = thread - > waitInfo ;
2012-12-08 15:28:54 -08:00
after - > isProcessingCallbacks = thread - > isProcessingCallbacks ;
2013-03-27 00:51:46 -07:00
after - > currentCallbackId = thread - > currentCallbackId ;
2012-11-07 15:44:48 +01:00
afterAction = after ;
2013-03-27 00:51:46 -07:00
if ( thread - > nt . waitType ! = WAITTYPE_NONE ) {
// If it's a callback, tell the wait to stop.
if ( waitTypeFuncs [ thread - > nt . waitType ] . beginFunc ! = NULL & & cbId > 0 ) {
waitTypeFuncs [ thread - > nt . waitType ] . beginFunc ( after - > threadID , thread - > currentCallbackId ) ;
}
// Release thread from waiting
thread - > nt . waitType = WAITTYPE_NONE ;
}
2012-11-07 15:44:48 +01:00
__KernelChangeThreadState ( thread , THREADSTATUS_READY ) ;
}
MipsCall * call = new MipsCall ( ) ;
call - > entryPoint = entryPoint ;
2013-02-02 18:03:55 -08:00
for ( int i = 0 ; i < numargs ; i + + ) {
2012-11-07 15:44:48 +01:00
call - > args [ i ] = args [ i ] ;
}
2013-02-02 18:01:34 -08:00
call - > numArgs = ( int ) numargs ;
2012-11-07 15:44:48 +01:00
call - > doAfter = afterAction ;
call - > tag = " callAddress " ;
2013-02-02 17:57:44 -08:00
call - > cbId = cbId ;
2012-12-23 11:16:32 +01:00
2013-03-02 14:58:58 -08:00
u32 callId = mipsCalls . add ( call ) ;
2012-11-07 15:44:48 +01:00
bool called = false ;
2012-12-27 17:43:44 -08:00
if ( ! thread | | thread = = __GetCurrentThread ( ) ) {
2012-11-07 15:44:48 +01:00
if ( __CanExecuteCallbackNow ( thread ) ) {
2012-12-27 17:43:44 -08:00
thread = __GetCurrentThread ( ) ;
2012-11-07 15:44:48 +01:00
__KernelChangeThreadState ( thread , THREADSTATUS_RUNNING ) ;
2012-12-08 20:09:20 -08:00
__KernelExecuteMipsCallOnCurrentThread ( callId , reschedAfter ) ;
2012-11-07 15:44:48 +01:00
called = true ;
}
}
2012-11-06 15:46:21 +01:00
2012-11-07 15:44:48 +01:00
if ( ! called ) {
2012-12-21 16:25:05 -08:00
if ( thread ) {
DEBUG_LOG ( HLE , " Making mipscall pending on thread " ) ;
thread - > pendingMipsCalls . push_back ( callId ) ;
} else {
WARN_LOG ( HLE , " Ignoring mispcall on NULL/deleted thread " ) ;
}
2012-11-07 15:44:48 +01:00
}
}
2012-11-13 18:05:26 +01:00
2013-02-02 18:03:55 -08:00
void __KernelDirectMipsCall ( u32 entryPoint , Action * afterAction , u32 args [ ] , int numargs , bool reschedAfter )
2012-11-13 18:05:26 +01:00
{
2013-02-02 18:03:55 -08:00
__KernelCallAddress ( __GetCurrentThread ( ) , entryPoint , afterAction , args , numargs , reschedAfter , 0 ) ;
2012-11-13 18:05:26 +01:00
}
2013-03-02 14:58:58 -08:00
void __KernelExecuteMipsCallOnCurrentThread ( u32 callId , bool reschedAfter )
2012-11-07 15:44:48 +01:00
{
2012-12-27 17:43:44 -08:00
Thread * cur = __GetCurrentThread ( ) ;
if ( cur = = NULL )
{
ERROR_LOG ( HLE , " __KernelExecuteMipsCallOnCurrentThread(): Bad current thread " ) ;
return ;
}
2012-11-07 15:44:48 +01:00
if ( g_inCbCount > 0 ) {
2013-03-26 00:54:00 -07:00
WARN_LOG_REPORT ( HLE , " __KernelExecuteMipsCallOnCurrentThread(): Already in a callback! " ) ;
2012-11-07 15:44:48 +01:00
}
DEBUG_LOG ( HLE , " Executing mipscall %i " , callId ) ;
MipsCall * call = mipsCalls . get ( callId ) ;
2012-12-23 11:16:32 +01:00
2012-11-06 15:46:21 +01:00
// Save the few regs that need saving
2012-11-07 15:44:48 +01:00
call - > savedPc = currentMIPS - > pc ;
call - > savedRa = currentMIPS - > r [ MIPS_REG_RA ] ;
call - > savedV0 = currentMIPS - > r [ MIPS_REG_V0 ] ;
call - > savedV1 = currentMIPS - > r [ MIPS_REG_V1 ] ;
call - > savedIdRegister = currentMIPS - > r [ MIPS_REG_CALL_ID ] ;
2013-03-27 00:51:46 -07:00
call - > savedId = cur - > currentMipscallId ;
2012-12-08 20:09:20 -08:00
call - > reschedAfter = reschedAfter ;
2012-11-06 15:46:21 +01:00
// Set up the new state
2012-11-07 15:44:48 +01:00
currentMIPS - > pc = call - > entryPoint ;
currentMIPS - > r [ MIPS_REG_RA ] = __KernelMipsCallReturnAddress ( ) ;
2012-12-08 11:23:20 -08:00
// We put this two places in case the game overwrites it.
// We may want it later to "inject" return values.
2012-11-07 15:44:48 +01:00
currentMIPS - > r [ MIPS_REG_CALL_ID ] = callId ;
2013-03-27 00:51:46 -07:00
cur - > currentMipscallId = callId ;
2012-11-07 15:44:48 +01:00
for ( int i = 0 ; i < call - > numArgs ; i + + ) {
currentMIPS - > r [ MIPS_REG_A0 + i ] = call - > args [ i ] ;
}
2012-11-06 15:46:21 +01:00
2013-02-02 17:57:44 -08:00
if ( call - > cbId ! = 0 )
g_inCbCount + + ;
2013-02-02 19:14:00 -08:00
currentCallbackThreadID = currentThread ;
2012-11-06 15:46:21 +01:00
}
2012-11-07 15:44:48 +01:00
void __KernelReturnFromMipsCall ( )
2012-11-06 15:46:21 +01:00
{
2012-12-27 17:43:44 -08:00
Thread * cur = __GetCurrentThread ( ) ;
if ( cur = = NULL )
{
ERROR_LOG ( HLE , " __KernelReturnFromMipsCall(): Bad current thread " ) ;
return ;
}
2013-03-27 00:51:46 -07:00
u32 callId = cur - > currentMipscallId ;
2012-12-08 11:23:20 -08:00
if ( currentMIPS - > r [ MIPS_REG_CALL_ID ] ! = callId )
2013-03-26 00:54:00 -07:00
WARN_LOG_REPORT ( HLE , " __KernelReturnFromMipsCall(): s0 is %08x != %08x " , currentMIPS - > r [ MIPS_REG_CALL_ID ] , callId ) ;
2012-11-07 15:44:48 +01:00
MipsCall * call = mipsCalls . pop ( callId ) ;
2012-11-06 15:46:21 +01:00
// Value returned by the callback function
u32 retVal = currentMIPS - > r [ MIPS_REG_V0 ] ;
2012-11-07 15:44:48 +01:00
DEBUG_LOG ( HLE , " __KernelReturnFromMipsCall(), returned %08x " , retVal ) ;
2012-11-06 15:46:21 +01:00
2012-11-07 15:44:48 +01:00
// Should also save/restore wait state here.
if ( call - > doAfter )
2012-12-27 19:45:00 -08:00
{
2013-01-06 10:54:33 -08:00
call - > doAfter - > run ( * call ) ;
2012-12-27 19:45:00 -08:00
delete call - > doAfter ;
}
2012-11-07 15:44:48 +01:00
currentMIPS - > pc = call - > savedPc ;
currentMIPS - > r [ MIPS_REG_RA ] = call - > savedRa ;
currentMIPS - > r [ MIPS_REG_V0 ] = call - > savedV0 ;
currentMIPS - > r [ MIPS_REG_V1 ] = call - > savedV1 ;
currentMIPS - > r [ MIPS_REG_CALL_ID ] = call - > savedIdRegister ;
2013-03-27 00:51:46 -07:00
cur - > currentMipscallId = call - > savedId ;
2012-11-07 15:44:48 +01:00
2013-02-02 17:57:44 -08:00
if ( call - > cbId ! = 0 )
g_inCbCount - - ;
2013-02-02 19:14:00 -08:00
currentCallbackThreadID = 0 ;
2012-11-07 15:44:48 +01:00
2013-03-27 00:51:46 -07:00
if ( cur - > nt . waitType ! = WAITTYPE_NONE )
{
if ( waitTypeFuncs [ cur - > nt . waitType ] . endFunc ! = NULL & & call - > cbId > 0 )
waitTypeFuncs [ cur - > nt . waitType ] . endFunc ( cur - > GetUID ( ) , cur - > currentCallbackId , currentMIPS - > r [ MIPS_REG_V0 ] ) ;
}
2012-11-07 15:44:48 +01:00
// yeah! back in the real world, let's keep going. Should we process more callbacks?
2013-03-09 14:21:21 -08:00
if ( ! __KernelExecutePendingMipsCalls ( cur , call - > reschedAfter ) )
2012-11-06 15:46:21 +01:00
{
2012-12-08 20:09:20 -08:00
// Sometimes, we want to stay on the thread.
2012-12-27 17:43:44 -08:00
int threadReady = cur - > nt . status & ( THREADSTATUS_READY | THREADSTATUS_RUNNING ) ;
2012-12-09 21:10:55 -08:00
if ( call - > reschedAfter | | threadReady = = 0 )
__KernelReSchedule ( " return from callback " ) ;
2012-11-06 15:46:21 +01:00
}
2012-12-27 19:30:36 -08:00
2012-12-27 19:45:00 -08:00
delete call ;
2012-11-07 15:44:48 +01:00
}
2012-11-06 15:46:21 +01:00
2013-03-09 14:21:21 -08:00
// First arg must be current thread, passed to avoid perf cost of a lookup.
bool __KernelExecutePendingMipsCalls ( Thread * thread , bool reschedAfter )
2012-11-07 15:44:48 +01:00
{
2013-03-09 14:21:21 -08:00
_dbg_assert_msg_ ( HLE , thread - > GetUID ( ) = = __KernelGetCurThread ( ) , " __KernelExecutePendingMipsCalls() should be called only with the current thread. " ) ;
2012-11-06 15:46:21 +01:00
2012-11-07 15:44:48 +01:00
if ( thread - > pendingMipsCalls . empty ( ) ) {
// Nothing to do
return false ;
}
if ( __CanExecuteCallbackNow ( thread ) )
2012-11-06 15:46:21 +01:00
{
2012-11-07 15:44:48 +01:00
// Pop off the first pending mips call
2013-03-02 14:58:58 -08:00
u32 callId = thread - > pendingMipsCalls . front ( ) ;
2012-11-07 15:44:48 +01:00
thread - > pendingMipsCalls . pop_front ( ) ;
2012-12-08 20:09:20 -08:00
__KernelExecuteMipsCallOnCurrentThread ( callId , reschedAfter ) ;
2012-11-07 15:44:48 +01:00
return true ;
2012-11-06 15:46:21 +01:00
}
2012-11-07 15:44:48 +01:00
return false ;
}
2012-11-06 15:46:21 +01:00
2012-11-07 15:44:48 +01:00
// Executes the callback, when it next is context switched to.
2012-12-08 20:09:20 -08:00
void __KernelRunCallbackOnThread ( SceUID cbId , Thread * thread , bool reschedAfter )
2012-11-07 15:44:48 +01:00
{
u32 error ;
Callback * cb = kernelObjects . Get < Callback > ( cbId , error ) ;
if ( ! cb ) {
ERROR_LOG ( HLE , " __KernelRunCallbackOnThread: Bad cbId %i " , cbId ) ;
return ;
}
DEBUG_LOG ( HLE , " __KernelRunCallbackOnThread: Turning callback %i into pending mipscall " , cbId ) ;
// Alright, we're on the right thread
// Should save/restore wait state?
2013-02-02 18:01:34 -08:00
const u32 args [ ] = { ( u32 ) cb - > nc . notifyCount , ( u32 ) cb - > nc . notifyArg , cb - > nc . commonArgument } ;
2012-11-07 15:44:48 +01:00
// Clear the notify count / arg
cb - > nc . notifyCount = 0 ;
cb - > nc . notifyArg = 0 ;
2012-12-27 19:30:36 -08:00
ActionAfterCallback * action = ( ActionAfterCallback * ) __KernelCreateAction ( actionAfterCallback ) ;
if ( action ! = NULL )
action - > setCallback ( cbId ) ;
else
ERROR_LOG ( HLE , " Something went wrong creating a restore action for a callback. " ) ;
2013-02-02 18:03:55 -08:00
__KernelCallAddress ( thread , cb - > nc . entrypoint , action , args , 3 , reschedAfter , cbId ) ;
2012-11-07 15:44:48 +01:00
}
2013-01-06 10:54:33 -08:00
void ActionAfterCallback : : run ( MipsCall & call ) {
2012-11-07 15:44:48 +01:00
if ( cbId ! = - 1 ) {
u32 error ;
Callback * cb = kernelObjects . Get < Callback > ( cbId , error ) ;
if ( cb )
{
2013-01-27 18:43:12 -08:00
Thread * t = kernelObjects . Get < Thread > ( cb - > nc . threadId , error ) ;
if ( t )
{
// Check for other callbacks to run (including ones this callback scheduled.)
__KernelCheckThreadCallbacks ( t , true ) ;
}
2012-11-07 15:44:48 +01:00
DEBUG_LOG ( HLE , " Left callback %i - %s " , cbId , cb - > nc . name ) ;
// Callbacks that don't return 0 are deleted. But should this be done here?
2013-04-27 17:54:15 -07:00
if ( currentMIPS - > r [ MIPS_REG_V0 ] ! = 0 )
2012-11-07 15:44:48 +01:00
{
DEBUG_LOG ( HLE , " ActionAfterCallback::run(): Callback returned non-zero, gets deleted! " ) ;
kernelObjects . Destroy < Callback > ( cbId ) ;
}
}
}
2012-11-06 15:46:21 +01:00
}
2013-03-29 00:27:33 -07:00
bool __KernelCurHasReadyCallbacks ( ) {
if ( readyCallbacksCount = = 0 )
return false ;
Thread * thread = __GetCurrentThread ( ) ;
for ( int i = 0 ; i < THREAD_CALLBACK_NUM_TYPES ; i + + ) {
if ( thread - > readyCallbacks [ i ] . size ( ) ) {
return true ;
}
}
return false ;
}
2012-11-06 15:46:21 +01:00
// Check callbacks on the current thread only.
// Returns true if any callbacks were processed on the current thread.
2012-12-08 15:28:54 -08:00
bool __KernelCheckThreadCallbacks ( Thread * thread , bool force )
{
2013-03-15 08:11:15 -07:00
if ( ! thread | | ( ! thread - > isProcessingCallbacks & & ! force ) )
2012-11-06 15:46:21 +01:00
return false ;
for ( int i = 0 ; i < THREAD_CALLBACK_NUM_TYPES ; i + + ) {
if ( thread - > readyCallbacks [ i ] . size ( ) ) {
SceUID readyCallback = thread - > readyCallbacks [ i ] . front ( ) ;
thread - > readyCallbacks [ i ] . pop_front ( ) ;
2013-01-27 17:01:17 -08:00
readyCallbacksCount - - ;
2012-12-09 19:11:12 -08:00
// If the callback was deleted, we're good. Just skip it.
if ( kernelObjects . IsValid ( readyCallback ) )
{
__KernelRunCallbackOnThread ( readyCallback , thread , ! force ) ; // makes pending
return true ;
}
else
{
WARN_LOG ( HLE , " Ignoring deleted callback %08x " , readyCallback ) ;
}
2012-11-06 15:46:21 +01:00
}
}
return false ;
}
// Checks for callbacks on all threads
bool __KernelCheckCallbacks ( ) {
2013-01-27 17:01:17 -08:00
// Let's not check every thread all the time, callbacks are fairly uncommon.
if ( readyCallbacksCount = = 0 ) {
return false ;
}
if ( readyCallbacksCount < 0 ) {
2013-04-09 23:16:23 -07:00
ERROR_LOG_REPORT ( HLE , " readyCallbacksCount became negative: %i " , readyCallbacksCount ) ;
2013-01-27 17:01:17 -08:00
}
2012-11-07 15:44:48 +01:00
// SceUID currentThread = __KernelGetCurThread();
2012-12-27 17:43:44 -08:00
// __GetCurrentThread()->isProcessingCallbacks = true;
2012-11-06 15:46:21 +01:00
// do {
bool processed = false ;
2012-12-27 17:43:44 -08:00
u32 error ;
for ( std : : vector < SceUID > : : iterator iter = threadqueue . begin ( ) ; iter ! = threadqueue . end ( ) ; iter + + ) {
Thread * thread = kernelObjects . Get < Thread > ( * iter , error ) ;
if ( thread & & __KernelCheckThreadCallbacks ( thread , false ) ) {
2012-11-06 15:46:21 +01:00
processed = true ;
}
}
// } while (processed && currentThread == __KernelGetCurThread());
2012-12-08 18:40:20 -08:00
if ( processed )
2013-03-09 14:21:21 -08:00
return __KernelExecutePendingMipsCalls ( __GetCurrentThread ( ) , true ) ;
2012-11-06 15:46:21 +01:00
return processed ;
}
2012-12-01 18:43:45 -08:00
bool __KernelForceCallbacks ( )
{
2013-03-09 14:11:53 -08:00
// Let's not check every thread all the time, callbacks are fairly uncommon.
if ( readyCallbacksCount = = 0 ) {
return false ;
}
if ( readyCallbacksCount < 0 ) {
2013-04-09 23:16:23 -07:00
ERROR_LOG_REPORT ( HLE , " readyCallbacksCount became negative: %i " , readyCallbacksCount ) ;
2013-03-09 14:11:53 -08:00
}
2012-11-06 15:46:21 +01:00
Thread * curThread = __GetCurrentThread ( ) ;
2012-12-08 15:28:54 -08:00
bool callbacksProcessed = __KernelCheckThreadCallbacks ( curThread , true ) ;
2012-12-08 18:40:20 -08:00
if ( callbacksProcessed )
2013-03-09 14:21:21 -08:00
__KernelExecutePendingMipsCalls ( curThread , false ) ;
2012-11-06 15:46:21 +01:00
2012-12-01 18:43:45 -08:00
return callbacksProcessed ;
}
2013-04-14 23:45:46 -07:00
// Not wrapped because it has special return logic.
2012-12-08 18:40:20 -08:00
void sceKernelCheckCallback ( )
{
// Start with yes.
RETURN ( 1 ) ;
2012-11-06 15:46:21 +01:00
2012-12-01 18:43:45 -08:00
bool callbacksProcessed = __KernelForceCallbacks ( ) ;
2012-11-06 15:46:21 +01:00
if ( callbacksProcessed ) {
2012-12-18 14:12:57 +01:00
DEBUG_LOG ( HLE , " sceKernelCheckCallback() - processed a callback. " ) ;
2012-11-06 15:46:21 +01:00
} else {
2012-11-07 15:44:48 +01:00
RETURN ( 0 ) ;
2012-11-06 15:46:21 +01:00
}
}
bool __KernelInCallback ( )
{
return ( g_inCbCount ! = 0 ) ;
}
u32 __KernelRegisterCallback ( RegisteredCallbackType type , SceUID cbId )
{
Thread * t = __GetCurrentThread ( ) ;
2012-11-10 10:15:11 +01:00
if ( cbId > 0 & & t - > registeredCallbacks [ type ] . find ( cbId ) = = t - > registeredCallbacks [ type ] . end ( ) ) {
2012-11-06 15:46:21 +01:00
t - > registeredCallbacks [ type ] . insert ( cbId ) ;
return 0 ;
} else {
return SCE_KERNEL_ERROR_INVAL ;
}
}
u32 __KernelUnregisterCallback ( RegisteredCallbackType type , SceUID cbId )
{
Thread * t = __GetCurrentThread ( ) ;
if ( t - > registeredCallbacks [ type ] . find ( cbId ) ! = t - > registeredCallbacks [ type ] . end ( ) ) {
t - > registeredCallbacks [ type ] . erase ( cbId ) ;
return 0 ;
} else {
2012-11-09 12:31:58 +01:00
return 0x80010016 ;
2012-11-06 15:46:21 +01:00
}
}
2012-12-07 22:25:42 -08:00
void __KernelNotifyCallback ( RegisteredCallbackType type , SceUID cbId , int notifyArg )
2012-11-06 15:46:21 +01:00
{
u32 error ;
Callback * cb = kernelObjects . Get < Callback > ( cbId , error ) ;
2012-11-10 10:15:11 +01:00
if ( ! cb ) {
// Yeah, we're screwed, this shouldn't happen.
2012-12-01 22:13:58 -08:00
ERROR_LOG ( HLE , " __KernelNotifyCallback - invalid callback %08x " , cbId ) ;
2012-11-10 10:15:11 +01:00
return ;
}
2012-11-06 15:46:21 +01:00
cb - > nc . notifyCount + + ;
cb - > nc . notifyArg = notifyArg ;
2012-12-07 22:25:42 -08:00
Thread * t = kernelObjects . Get < Thread > ( cb - > nc . threadId , error ) ;
2013-01-27 17:01:17 -08:00
std : : list < SceUID > & readyCallbacks = t - > readyCallbacks [ type ] ;
auto iter = std : : find ( readyCallbacks . begin ( ) , readyCallbacks . end ( ) , cbId ) ;
if ( iter = = readyCallbacks . end ( ) )
{
t - > readyCallbacks [ type ] . push_back ( cbId ) ;
readyCallbacksCount + + ;
}
2012-11-06 15:46:21 +01:00
}
// TODO: If cbId == -1, notify the callback ID on all threads that have it.
u32 __KernelNotifyCallbackType ( RegisteredCallbackType type , SceUID cbId , int notifyArg )
{
2012-12-27 17:43:44 -08:00
u32 error ;
for ( std : : vector < SceUID > : : iterator iter = threadqueue . begin ( ) ; iter ! = threadqueue . end ( ) ; iter + + ) {
Thread * t = kernelObjects . Get < Thread > ( * iter , error ) ;
if ( ! t )
continue ;
2012-11-06 15:46:21 +01:00
for ( std : : set < SceUID > : : iterator citer = t - > registeredCallbacks [ type ] . begin ( ) ; citer ! = t - > registeredCallbacks [ type ] . end ( ) ; citer + + ) {
if ( cbId = = - 1 | | cbId = = * citer ) {
2012-12-07 22:25:42 -08:00
__KernelNotifyCallback ( type , * citer , notifyArg ) ;
2012-11-06 15:46:21 +01:00
}
}
}
// checkCallbacks on other threads?
return 0 ;
}
2013-03-27 00:51:46 -07:00
void __KernelRegisterWaitTypeFuncs ( WaitType type , WaitBeginCallbackFunc beginFunc , WaitEndCallbackFunc endFunc )
{
waitTypeFuncs [ type ] . beginFunc = beginFunc ;
waitTypeFuncs [ type ] . endFunc = endFunc ;
}
2013-02-10 16:36:06 +01:00
std : : vector < DebugThreadInfo > GetThreadsInfo ( )
{
std : : vector < DebugThreadInfo > threadList ;
u32 error ;
for ( std : : vector < SceUID > : : iterator iter = threadqueue . begin ( ) ; iter ! = threadqueue . end ( ) ; iter + + )
{
Thread * t = kernelObjects . Get < Thread > ( * iter , error ) ;
if ( ! t )
continue ;
DebugThreadInfo info ;
info . id = * iter ;
strncpy ( info . name , t - > GetName ( ) , KERNELOBJECT_MAX_NAME_LENGTH ) ;
2013-02-24 10:23:31 -08:00
info . name [ KERNELOBJECT_MAX_NAME_LENGTH ] = 0 ;
2013-02-10 16:36:06 +01:00
info . status = t - > nt . status ;
info . entrypoint = t - > nt . entrypoint ;
2013-02-17 13:10:40 +01:00
if ( * iter = = currentThread )
info . curPC = currentMIPS - > pc ;
else
info . curPC = t - > context . pc ;
2013-02-10 16:36:06 +01:00
info . isCurrent = ( * iter = = currentThread ) ;
threadList . push_back ( info ) ;
}
return threadList ;
}
void __KernelChangeThreadState ( SceUID threadId , ThreadStatus newStatus )
{
u32 error ;
Thread * t = kernelObjects . Get < Thread > ( threadId , error ) ;
if ( ! t )
return ;
__KernelChangeThreadState ( t , newStatus ) ;
}