indent is really bad at handling assembly

--HG--
extra : convert_revision : svn%3Ac70aab31-4412-0410-b14c-859654838e24/trunk%403646
This commit is contained in:
Sam Lantinga 2009-06-10 13:38:19 +00:00
parent d03a7700eb
commit 388df73e0d

View file

@ -42,20 +42,30 @@ extern "C" {
/* *INDENT-ON* */
#endif
/* indent is really bad at handling assembly */
/* *INDENT-OFF* */
#if defined(__GNUC__) && (defined(i386) || defined(__i386__) || defined(__x86_64__))
static __inline__ void
SDL_atomic_int_add(volatile int* atomic, int value)
{
__asm__ __volatile__("lock;" "addl %1, %0":"=m"(*atomic)
:"ir"(value), "m"(*atomic));
__asm__ __volatile__("lock;"
"addl %1, %0"
: "=m" (*atomic)
: "ir" (value),
"m" (*atomic));
}
static __inline__ int
SDL_atomic_int_xchg_add(volatile int* atomic, int value)
{
int rv;
__asm__ __volatile__("lock;" "xaddl %0, %1":"=r"(rv), "=m"(*atomic)
:"0"(value), "m"(*atomic));
__asm__ __volatile__("lock;"
"xaddl %0, %1"
: "=r" (rv),
"=m" (*atomic)
: "0" (value),
"m" (*atomic));
return rv;
}
@ -63,14 +73,18 @@ static __inline__ SDL_bool
SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
{
int rv;
__asm__ __volatile__("lock;" "cmpxchgl %2, %1":"=a"(rv), "=m"(*atomic)
:"r"(newvalue), "m"(*atomic), "0"(oldvalue));
__asm__ __volatile__("lock;"
"cmpxchgl %2, %1"
: "=a" (rv),
"=m" (*atomic)
: "r" (newvalue),
"m" (*atomic),
"0" (oldvalue));
return (SDL_bool)(rv == oldvalue);
}
static __inline__ SDL_bool
SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
void *newvalue)
SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
{
void* rv;
__asm__ __volatile__("lock;"
@ -79,8 +93,11 @@ SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
# else
"cmpxchgl %2, %1"
# endif
:"=a"(rv), "=m"(*atomic)
:"r"(newvalue), "m"(*atomic), "0"(oldvalue));
: "=a" (rv),
"=m" (*atomic)
: "r" (newvalue),
"m" (*atomic),
"0" (oldvalue));
return (SDL_bool)(rv == oldvalue);
}
#elif defined(__GNUC__) && defined(__alpha__)
@ -108,8 +125,7 @@ SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
# if (SIZEOF_VOIDP == 4)
static __inline__ SDL_bool
SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
void *newvalue)
SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
{
int rv;
void* prev;
@ -120,15 +136,19 @@ SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
" mov %4,%1\n"
" stl_c %1,%2\n"
" beq %1,1b\n"
" mb\n" "2:":"=&r"(prev), "=&r"(rv)
:"m"(*atomic), "Ir"(oldvalue), "Ir"(newvalue)
" mb\n"
"2:"
: "=&r" (prev),
"=&r" (rv)
: "m" (*atomic),
"Ir" (oldvalue),
"Ir" (newvalue)
: "memory");
return (SDL_bool)(rv != 0);
}
# elif (SIZEOF_VOIDP == 8)
static __inline__ SDL_bool
SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
void *newvalue)
SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
{
int rv;
void* prev;
@ -139,8 +159,13 @@ SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
" mov %4,%1\n"
" stq_c %1,%2\n"
" beq %1,1b\n"
" mb\n" "2:":"=&r"(prev), "=&r"(rv)
:"m"(*atomic), "Ir"(oldvalue), "Ir"(newvalue)
" mb\n"
"2:"
: "=&r" (prev),
"=&r" (rv)
: "m" (*atomic),
"Ir" (oldvalue),
"Ir" (newvalue)
: "memory");
return (SDL_bool)(rv != 0);
}
@ -163,24 +188,31 @@ SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
# if (SIZEOF_VOIDP == 4)
static __inline__ SDL_bool
SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
void *newvalue)
SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
{
void* rv;
__asm__ __volatile__("cas [%4], %2, %0":"=r"(rv), "=m"(*atomic)
__asm__ __volatile__("cas [%4], %2, %0"
: "=r" (rv),
"=m" (*atomic)
: "r" (oldvalue),
"m"(*atomic), "r"(atomic), "0"(newvalue));
"m" (*atomic),
"r" (atomic),
"0" (newvalue));
return (SDL_bool)(rv == oldvalue);
}
# elif (SIZEOF_VOIDP == 8)
static __inline__ SDL_bool
SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
void *newvalue)
SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
{
void* rv;
void** a = atomic;
__asm__ __volatile__("casx [%4], %2, %0":"=r"(rv), "=m"(*a)
:"r"(oldvalue), "m"(*a), "r"(a), "0"(newvalue));
__asm__ __volatile__("casx [%4], %2, %0"
: "=r" (rv),
"=m" (*a)
: "r" (oldvalue),
"m" (*a),
"r" (a),
"0" (newvalue));
return (SDL_bool)(rv == oldvalue);
}
# else
@ -196,9 +228,15 @@ SDL_atomic_int_add(volatile int *atomic, int value)
__asm__ __volatile__("1: lwarx %0, 0, %3\n"
" add %1, %0, %4\n"
" stwcx. %1, 0, %3\n"
" bne- 1b":"=&b"(rv), "=&r"(tmp), "=m"(*atomic)
:"b"(atomic), "r"(value), "m"(*atomic)
:"cr0", "memory");
" bne- 1b"
: "=&b" (rv),
"=&r" (tmp),
"=m" (*atomic)
: "b" (atomic),
"r" (value),
"m" (*atomic)
: "cr0",
"memory");
}
static __inline__ int
@ -208,9 +246,15 @@ SDL_atomic_int_xchg_add(volatile int *atomic, int value)
__asm__ __volatile__("1: lwarx %0, 0, %3\n"
" add %1, %0, %4\n"
" stwcx. %1, 0, %3\n"
" bne- 1b":"=&b"(rv), "=&r"(tmp), "=m"(*atomic)
:"b"(atomic), "r"(value), "m"(*atomic)
:"cr0", "memory");
" bne- 1b"
: "=&b" (rv),
"=&r" (tmp),
"=m" (*atomic)
: "b" (atomic),
"r" (value),
"m" (*atomic)
: "cr0",
"memory");
return rv;
}
@ -224,14 +268,19 @@ SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
" subf. %0, %2, %0\n"
" bne 2f\n"
" stwcx. %3, 0, %1\n"
" bne- 1b\n" "2: isync":"=&r"(rv)
:"b"(atomic), "r"(oldvalue), "r":"cr0", "memory");
" bne- 1b\n"
"2: isync"
: "=&r" (rv)
: "b" (atomic),
"r" (oldvalue),
"r"
: "cr0",
"memory");
return (SDL_bool)(rv == 0);
}
static __inline__ SDL_bool
SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
void *newvalue)
SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
{
void* rv;
__asm__ __volatile__("sync\n"
@ -239,9 +288,14 @@ SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
" subf. %0, %2, %0\n"
" bne 2f\n"
" stwcx. %3, 0, %1\n"
" bne- 1b\n" "2: isync":"=&r"(rv)
:"b"(atomic), "r"(oldvalue), "r"(newvalue)
:"cr0", "memory");
" bne- 1b\n"
"2: isync"
: "=&r" (rv)
: "b" (atomic),
"r" (oldvalue),
"r" (newvalue)
: "cr0",
"memory");
return (SDL_bool)(rv == 0);
}
# elif (SIZEOF_VOIDP == 8)
@ -255,14 +309,19 @@ SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
" subf. %0, %2, %0\n"
" bne 2f\n"
" stwcx. %3, 0, %1\n"
" bne- 1b\n" "2: isync":"=&r"(rv)
:"b"(atomic), "r"(oldvalue), "r":"cr0", "memory");
" bne- 1b\n"
"2: isync"
: "=&r" (rv)
: "b" (atomic),
"r" (oldvalue),
"r"
: "cr0",
"memory");
return (SDL_bool)(rv == 0);
}
static __inline__ SDL_bool
SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
void *newvalue)
SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
{
void* rv;
__asm__ __volatile__("sync\n"
@ -270,9 +329,14 @@ SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
" subf. %0, %2, %0\n"
" bne 2f\n"
" stdcx. %3, 0, %1\n"
" bne- 1b\n" "2: isync":"=&r"(rv)
:"b"(atomic), "r"(oldvalue), "r"(newvalue)
:"cr0", "memory");
" bne- 1b\n"
"2: isync"
: "=&r" (rv)
: "b" (atomic),
"r" (oldvalue),
"r" (newvalue)
: "cr0",
"memory");
return (SDL_bool)(rv == 0);
}
# else
@ -300,9 +364,12 @@ SDL_atomic_int_xchg_add(volatile int *atomic, int value)
"addu %1,%4,%0 \n"
"sc %1,%2 \n"
".set pop \n"
"beqz %1,1b \n":"=&r"(rv),
"=&r"(tmp), "=m"(*atomic)
:"m"(*atomic), "r"(value)
"beqz %1,1b \n"
: "=&r" (rv),
"=&r" (tmp),
"=m" (*atomic)
: "m" (*atomic),
"r" (value)
: "memory");
return rv;
}
@ -318,8 +385,11 @@ SDL_atomic_int_add(volatile int *atomic, int value)
"addu %0,%3,%0 \n"
"sc %0,%1 \n"
".set pop \n"
"beqz %0,1b \n":"=&r"(rv), "=m"(*atomic)
:"m"(*atomic), "r"(value)
"beqz %0,1b \n"
: "=&r" (rv),
"=m" (*atomic)
: "m" (*atomic),
"r" (value)
: "memory");
}
@ -339,25 +409,30 @@ SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
" beqz $1, 1b \n"
" sync \n"
"2: \n"
" .set pop \n":"=&r"(rv), "=R"(*atomic)
:"R"(*atomic), "Jr"(oldvalue), "Jr"(newvalue)
" .set pop \n"
: "=&r" (rv),
"=R" (*atomic)
: "R" (*atomic),
"Jr" (oldvalue),
"Jr" (newvalue)
: "memory");
return (SDL_bool)rv;
}
static __inline__ SDL_bool
SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
void *newvalue)
SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
{
int rv;
__asm__ __volatile__(" .set push \n"
" .set noat \n" " .set mips3 \n"
" .set noat \n"
" .set mips3 \n"
# if defined(__mips64)
"1: lld %0, %2 \n"
# else
"1: ll %0, %2 \n"
# endif
" bne %0, %z3, 2f \n" " move $1, %z4 \n"
" bne %0, %z3, 2f \n"
" move $1, %z4 \n"
# if defined(__mips64)
" sc $1, %1 \n"
# else
@ -366,8 +441,12 @@ SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
" beqz $1, 1b \n"
" sync \n"
"2: \n"
" .set pop \n":"=&r"(rv), "=R"(*atomic)
:"R"(*atomic), "Jr"(oldvalue), "Jr"(newvalue)
" .set pop \n"
: "=&r" (rv),
"=R" (*atomic)
: "R" (*atomic),
"Jr" (oldvalue),
"Jr" (newvalue)
: "memory");
return (SDL_bool)rv;
}
@ -380,8 +459,12 @@ SDL_atomic_int_xchg_add(volatile int *atomic, int value)
__asm__ __volatile__("1: move%.l %0,%1 \n"
" add%.l %2,%1 \n"
" cas%.l %0,%1,%3 \n"
" jbne 1b \n":"=d"(rv), "=&d"(tmp)
:"d"(value), "m"(*atomic), "0"(rv)
" jbne 1b \n"
: "=d" (rv),
"=&d" (tmp)
: "d" (value),
"m" (*atomic),
"0" (rv)
: "memory");
return (SDL_bool)rv;
}
@ -389,7 +472,10 @@ SDL_atomic_int_xchg_add(volatile int *atomic, int value)
static __inline__ void
SDL_atomic_int_add(volatile int* atomic, int value)
{
__asm__ __volatile__("add%.l %0,%1"::"id"(value), "m"(*atomic)
__asm__ __volatile__("add%.l %0,%1"
:
: "id" (value),
"m" (*atomic)
: "memory");
}
@ -399,20 +485,29 @@ SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
char rv;
int readvalue;
__asm__ __volatile__("cas%.l %2,%3,%1\n"
"seq %0":"=dm"(rv), "=m"(*atomic), "=d"(readvalue)
:"d"(newvalue), "m"(*atomic), "2"(oldvalue));
"seq %0"
: "=dm" (rv),
"=m" (*atomic),
"=d" (readvalue)
: "d" (newvalue),
"m" (*atomic),
"2" (oldvalue));
return (SDL_bool)rv;
}
static __inline__ SDL_bool
SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
void *newvalue)
SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
{
char rv;
int readvalue;
__asm__ __volatile__("cas%.l %2,%3,%1\n"
"seq %0":"=dm"(rv), "=m"(*atomic), "=d"(readvalue)
:"d"(newvalue), "m"(*atomic), "2"(oldvalue));
"seq %0"
: "=dm" (rv),
"=m" (*atomic),
"=d" (readvalue)
: "d" (newvalue),
"m" (*atomic),
"2" (oldvalue));
return (SDL_bool)rv;
}
#elif defined(__GNUC__) && defined(__s390__)
@ -429,24 +524,28 @@ SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
})
# if (SIZEOF_VOIDP == 4)
static __inline__ SDL_bool
SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
void *newvalue)
SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
{
void* rv = oldvalue;
__asm__ __volatile__("cs %0, %2, %1":"+d"(rv), "=Q"(*atomic)
:"d"(newvalue), "m"(*atomic)
__asm__ __volatile__("cs %0, %2, %1"
: "+d" (rv),
"=Q" (*atomic)
: "d" (newvalue),
"m" (*atomic)
: "cc");
return (SDL_bool)(rv == oldvalue);
}
# elif (SIZEOF_VOIDP == 8)
static __inline__ SDL_bool
SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
void *newvalue)
SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
{
void* rv = oldvalue;
void** a = atomic;
__asm__ __volatile__("csg %0, %2, %1":"+d"(rv), "=Q"(*a)
:"d"((long) (newvalue)), "m"(*a)
__asm__ __volatile__("csg %0, %2, %1"
: "+d" (rv),
"=Q" (*a)
: "d" ((long)(newvalue)),
"m" (*a)
: "cc");
return (SDL_bool)(rv == oldvalue);
}
@ -473,17 +572,14 @@ SDL_atmoic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
{
return (SDL_bool)(InterlockedCompareExchangePointer((PVOID*)atomic,
(PVOID)newvalue,
(PVOID) oldvalue) ==
oldvalue);
(PVOID)oldvalue) == oldvalue);
}
static __inline__ SDL_bool
SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
void *newvalue)
SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
{
return (InterlockedCompareExchangePointer(atomic, newvalue, oldvalue) ==
oldvalue);
return (InterlockedCompareExchangePointer(atomic, newvalue, oldvalue) == oldvalue);
}
# else /* WINVER <= 0x0400 */
# if (SIZEOF_VOIDP != 4)
@ -493,16 +589,13 @@ SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
static __inline__ SDL_bool
SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
{
return (InterlockedCompareExchange(atomic, newvalue, oldvalue) ==
oldvalue);
return (InterlockedCompareExchange(atomic, newvalue, oldvalue) == oldvalue);
}
static __inline__ SDL_bool
SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
void *newvalue)
SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
{
return (InterlockedCompareExchange(atomic, newvalue, oldvalue) ==
oldvalue);
return (InterlockedCompareExchange(atomic, newvalue, oldvalue) == oldvalue);
}
# endif
#else /* when all else fails */
@ -530,6 +623,8 @@ SDL_atomic_int_add(volatile int *atomic, int value)
}
#endif /* arch & platforms */
/* *INDENT-ON* */
#ifdef ATOMIC_INT_CMP_XCHG
static __inline__ SDL_bool
SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)