1.1 --- a/include/SDL_atomic.h Wed Jun 10 13:34:20 2009 +0000
1.2 +++ b/include/SDL_atomic.h Wed Jun 10 13:38:19 2009 +0000
1.3 @@ -42,46 +42,63 @@
1.4 /* *INDENT-ON* */
1.5 #endif
1.6
1.7 +/* indent is really bad at handling assembly */
1.8 +/* *INDENT-OFF* */
1.9 +
1.10 #if defined(__GNUC__) && (defined(i386) || defined(__i386__) || defined(__x86_64__))
1.11 static __inline__ void
1.12 -SDL_atomic_int_add(volatile int *atomic, int value)
1.13 +SDL_atomic_int_add(volatile int* atomic, int value)
1.14 {
1.15 - __asm__ __volatile__("lock;" "addl %1, %0":"=m"(*atomic)
1.16 - :"ir"(value), "m"(*atomic));
1.17 + __asm__ __volatile__("lock;"
1.18 + "addl %1, %0"
1.19 + : "=m" (*atomic)
1.20 + : "ir" (value),
1.21 + "m" (*atomic));
1.22 }
1.23
1.24 static __inline__ int
1.25 -SDL_atomic_int_xchg_add(volatile int *atomic, int value)
1.26 -{
1.27 - int rv;
1.28 - __asm__ __volatile__("lock;" "xaddl %0, %1":"=r"(rv), "=m"(*atomic)
1.29 - :"0"(value), "m"(*atomic));
1.30 - return rv;
1.31 +SDL_atomic_int_xchg_add(volatile int* atomic, int value)
1.32 +{
1.33 + int rv;
1.34 + __asm__ __volatile__("lock;"
1.35 + "xaddl %0, %1"
1.36 + : "=r" (rv),
1.37 + "=m" (*atomic)
1.38 + : "0" (value),
1.39 + "m" (*atomic));
1.40 + return rv;
1.41 }
1.42
1.43 static __inline__ SDL_bool
1.44 -SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
1.45 +SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
1.46 {
1.47 - int rv;
1.48 - __asm__ __volatile__("lock;" "cmpxchgl %2, %1":"=a"(rv), "=m"(*atomic)
1.49 - :"r"(newvalue), "m"(*atomic), "0"(oldvalue));
1.50 - return (SDL_bool) (rv == oldvalue);
1.51 + int rv;
1.52 + __asm__ __volatile__("lock;"
1.53 + "cmpxchgl %2, %1"
1.54 + : "=a" (rv),
1.55 + "=m" (*atomic)
1.56 + : "r" (newvalue),
1.57 + "m" (*atomic),
1.58 + "0" (oldvalue));
1.59 + return (SDL_bool)(rv == oldvalue);
1.60 }
1.61
1.62 static __inline__ SDL_bool
1.63 -SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
1.64 - void *newvalue)
1.65 +SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
1.66 {
1.67 - void *rv;
1.68 - __asm__ __volatile__("lock;"
1.69 -# if defined(__x86_64__)
1.70 - "cmpxchgq %q2, %1"
1.71 + void* rv;
1.72 + __asm__ __volatile__("lock;"
1.73 +# if defined(__x86_64__)
1.74 + "cmpxchgq %q2, %1"
1.75 # else
1.76 - "cmpxchgl %2, %1"
1.77 -# endif
1.78 - :"=a"(rv), "=m"(*atomic)
1.79 - :"r"(newvalue), "m"(*atomic), "0"(oldvalue));
1.80 - return (SDL_bool) (rv == oldvalue);
1.81 + "cmpxchgl %2, %1"
1.82 +# endif
1.83 + : "=a" (rv),
1.84 + "=m" (*atomic)
1.85 + : "r" (newvalue),
1.86 + "m" (*atomic),
1.87 + "0" (oldvalue));
1.88 + return (SDL_bool)(rv == oldvalue);
1.89 }
1.90 #elif defined(__GNUC__) && defined(__alpha__)
1.91 # define ATOMIC_MEMORY_BARRIER (__asm__ __volatile__ ("mb" : : : "memory"))
1.92 @@ -108,45 +125,53 @@
1.93
1.94 # if (SIZEOF_VOIDP == 4)
1.95 static __inline__ SDL_bool
1.96 -SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
1.97 - void *newvalue)
1.98 +SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
1.99 {
1.100 - int rv;
1.101 - void *prev;
1.102 - __asm__ __volatile__(" mb\n"
1.103 - "1: ldl_l %0,%2\n"
1.104 - " cmpeq %0,%3,%1\n"
1.105 - " beq $1,2f\n"
1.106 - " mov %4,%1\n"
1.107 - " stl_c %1,%2\n"
1.108 - " beq %1,1b\n"
1.109 - " mb\n" "2:":"=&r"(prev), "=&r"(rv)
1.110 - :"m"(*atomic), "Ir"(oldvalue), "Ir"(newvalue)
1.111 - :"memory");
1.112 - return (SDL_bool) (rv != 0);
1.113 + int rv;
1.114 + void* prev;
1.115 + __asm__ __volatile__(" mb\n"
1.116 + "1: ldl_l %0,%2\n"
1.117 + " cmpeq %0,%3,%1\n"
1.118 + " beq $1,2f\n"
1.119 + " mov %4,%1\n"
1.120 + " stl_c %1,%2\n"
1.121 + " beq %1,1b\n"
1.122 + " mb\n"
1.123 + "2:"
1.124 + : "=&r" (prev),
1.125 + "=&r" (rv)
1.126 + : "m" (*atomic),
1.127 + "Ir" (oldvalue),
1.128 + "Ir" (newvalue)
1.129 + : "memory");
1.130 + return (SDL_bool)(rv != 0);
1.131 }
1.132 # elif (SIZEOF_VOIDP == 8)
1.133 static __inline__ SDL_bool
1.134 -SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
1.135 - void *newvalue)
1.136 +SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
1.137 {
1.138 - int rv;
1.139 - void *prev;
1.140 - __asm__ __volatile__(" mb\n"
1.141 - "1: ldq_l %0,%2\n"
1.142 - " cmpeq %0,%3,%1\n"
1.143 - " beq %1,2f\n"
1.144 - " mov %4,%1\n"
1.145 - " stq_c %1,%2\n"
1.146 - " beq %1,1b\n"
1.147 - " mb\n" "2:":"=&r"(prev), "=&r"(rv)
1.148 - :"m"(*atomic), "Ir"(oldvalue), "Ir"(newvalue)
1.149 - :"memory");
1.150 - return (SDL_bool) (rv != 0);
1.151 + int rv;
1.152 + void* prev;
1.153 + __asm__ __volatile__(" mb\n"
1.154 + "1: ldq_l %0,%2\n"
1.155 + " cmpeq %0,%3,%1\n"
1.156 + " beq %1,2f\n"
1.157 + " mov %4,%1\n"
1.158 + " stq_c %1,%2\n"
1.159 + " beq %1,1b\n"
1.160 + " mb\n"
1.161 + "2:"
1.162 + : "=&r" (prev),
1.163 + "=&r" (rv)
1.164 + : "m" (*atomic),
1.165 + "Ir" (oldvalue),
1.166 + "Ir" (newvalue)
1.167 + : "memory");
1.168 + return (SDL_bool)(rv != 0);
1.169 }
1.170 # else
1.171 -# error "Your system has an unsupported pointer size"
1.172 -# endif /* SIZEOF_VOIDP */
1.173 +# error "Your system has an unsupported pointer size"
1.174 +# endif /* SIZEOF_VOIDP */
1.175 #elif defined(__GNUC__) && defined(__sparc__)
1.176 # define ATOMIC_MEMORY_BARRIER \
1.177 (__asm__ __volatile__("membar #LoadLoad | #LoadStore" \
1.178 @@ -163,25 +188,32 @@
1.179
1.180 # if (SIZEOF_VOIDP == 4)
1.181 static __inline__ SDL_bool
1.182 -SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
1.183 - void *newvalue)
1.184 +SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
1.185 {
1.186 - void *rv;
1.187 - __asm__ __volatile__("cas [%4], %2, %0":"=r"(rv), "=m"(*atomic)
1.188 - :"r"(oldvalue),
1.189 - "m"(*atomic), "r"(atomic), "0"(newvalue));
1.190 - return (SDL_bool) (rv == oldvalue);
1.191 + void* rv;
1.192 + __asm__ __volatile__("cas [%4], %2, %0"
1.193 + : "=r" (rv),
1.194 + "=m" (*atomic)
1.195 + : "r" (oldvalue),
1.196 + "m" (*atomic),
1.197 + "r" (atomic),
1.198 + "0" (newvalue));
1.199 + return (SDL_bool)(rv == oldvalue);
1.200 }
1.201 # elif (SIZEOF_VOIDP == 8)
1.202 static __inline__ SDL_bool
1.203 -SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
1.204 - void *newvalue)
1.205 +SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
1.206 {
1.207 - void *rv;
1.208 - void **a = atomic;
1.209 - __asm__ __volatile__("casx [%4], %2, %0":"=r"(rv), "=m"(*a)
1.210 - :"r"(oldvalue), "m"(*a), "r"(a), "0"(newvalue));
1.211 - return (SDL_bool) (rv == oldvalue);
1.212 + void* rv;
1.213 + void** a = atomic;
1.214 + __asm__ __volatile__("casx [%4], %2, %0"
1.215 + : "=r" (rv),
1.216 + "=m" (*a)
1.217 + : "r" (oldvalue),
1.218 + "m" (*a),
1.219 + "r" (a),
1.220 + "0" (newvalue));
1.221 + return (SDL_bool)(rv == oldvalue);
1.222 }
1.223 # else
1.224 # error "Your system has an unsupported pointer size"
1.225 @@ -190,90 +222,122 @@
1.226 # define ATOMIC_MEMORY_BARRIER \
1.227 (__asm__ __volatile__ ("sync" : : : "memory"))
1.228 static __inline__ void
1.229 -SDL_atomic_int_add(volatile int *atomic, int value)
1.230 -{
1.231 - int rv, tmp;
1.232 - __asm__ __volatile__("1: lwarx %0, 0, %3\n"
1.233 - " add %1, %0, %4\n"
1.234 - " stwcx. %1, 0, %3\n"
1.235 - " bne- 1b":"=&b"(rv), "=&r"(tmp), "=m"(*atomic)
1.236 - :"b"(atomic), "r"(value), "m"(*atomic)
1.237 - :"cr0", "memory");
1.238 +SDL_atomic_int_add(volatile int* atomic, int value)
1.239 +{
1.240 + int rv,tmp;
1.241 + __asm__ __volatile__("1: lwarx %0, 0, %3\n"
1.242 + " add %1, %0, %4\n"
1.243 + " stwcx. %1, 0, %3\n"
1.244 + " bne- 1b"
1.245 + : "=&b" (rv),
1.246 + "=&r" (tmp),
1.247 + "=m" (*atomic)
1.248 + : "b" (atomic),
1.249 + "r" (value),
1.250 + "m" (*atomic)
1.251 + : "cr0",
1.252 + "memory");
1.253 }
1.254
1.255 static __inline__ int
1.256 -SDL_atomic_int_xchg_add(volatile int *atomic, int value)
1.257 -{
1.258 - int rv, tmp;
1.259 - __asm__ __volatile__("1: lwarx %0, 0, %3\n"
1.260 - " add %1, %0, %4\n"
1.261 - " stwcx. %1, 0, %3\n"
1.262 - " bne- 1b":"=&b"(rv), "=&r"(tmp), "=m"(*atomic)
1.263 - :"b"(atomic), "r"(value), "m"(*atomic)
1.264 - :"cr0", "memory");
1.265 - return rv;
1.266 +SDL_atomic_int_xchg_add(volatile int* atomic, int value)
1.267 +{
1.268 + int rv,tmp;
1.269 + __asm__ __volatile__("1: lwarx %0, 0, %3\n"
1.270 + " add %1, %0, %4\n"
1.271 + " stwcx. %1, 0, %3\n"
1.272 + " bne- 1b"
1.273 + : "=&b" (rv),
1.274 + "=&r" (tmp),
1.275 + "=m" (*atomic)
1.276 + : "b" (atomic),
1.277 + "r" (value),
1.278 + "m" (*atomic)
1.279 + : "cr0",
1.280 + "memory");
1.281 + return rv;
1.282 }
1.283
1.284 # if (SIZEOF_VOIDP == 4)
1.285 static __inline__ SDL_bool
1.286 -SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
1.287 -{
1.288 - int rv;
1.289 - __asm__ __volatile__(" sync\n"
1.290 - "1: lwarx %0, 0, %1\n"
1.291 - " subf. %0, %2, %0\n"
1.292 - " bne 2f\n"
1.293 - " stwcx. %3, 0, %1\n"
1.294 - " bne- 1b\n" "2: isync":"=&r"(rv)
1.295 - :"b"(atomic), "r"(oldvalue), "r":"cr0", "memory");
1.296 - return (SDL_bool) (rv == 0);
1.297 +SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
1.298 +{
1.299 + int rv;
1.300 + __asm__ __volatile__(" sync\n"
1.301 + "1: lwarx %0, 0, %1\n"
1.302 + " subf. %0, %2, %0\n"
1.303 + " bne 2f\n"
1.304 + " stwcx. %3, 0, %1\n"
1.305 + " bne- 1b\n"
1.306 + "2: isync"
1.307 + : "=&r" (rv)
1.308 + : "b" (atomic),
1.309 + "r" (oldvalue),
1.310 + "r"
1.311 + : "cr0",
1.312 + "memory");
1.313 + return (SDL_bool)(rv == 0);
1.314 }
1.315
1.316 static __inline__ SDL_bool
1.317 -SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
1.318 - void *newvalue)
1.319 +SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
1.320 {
1.321 - void *rv;
1.322 - __asm__ __volatile__("sync\n"
1.323 - "1: lwarx %0, 0, %1\n"
1.324 - " subf. %0, %2, %0\n"
1.325 - " bne 2f\n"
1.326 - " stwcx. %3, 0, %1\n"
1.327 - " bne- 1b\n" "2: isync":"=&r"(rv)
1.328 - :"b"(atomic), "r"(oldvalue), "r"(newvalue)
1.329 - :"cr0", "memory");
1.330 - return (SDL_bool) (rv == 0);
1.331 + void* rv;
1.332 + __asm__ __volatile__("sync\n"
1.333 + "1: lwarx %0, 0, %1\n"
1.334 + " subf. %0, %2, %0\n"
1.335 + " bne 2f\n"
1.336 + " stwcx. %3, 0, %1\n"
1.337 + " bne- 1b\n"
1.338 + "2: isync"
1.339 + : "=&r" (rv)
1.340 + : "b" (atomic),
1.341 + "r" (oldvalue),
1.342 + "r" (newvalue)
1.343 + : "cr0",
1.344 + "memory");
1.345 + return (SDL_bool)(rv == 0);
1.346 }
1.347 # elif (SIZEOF_VOIDP == 8)
1.348 static __inline__ SDL_bool
1.349 -SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
1.350 -{
1.351 - int rv;
1.352 - __asm__ __volatile__(" sync\n"
1.353 - "1: lwarx %0, 0, %1\n"
1.354 - " extsw %0, %0\n"
1.355 - " subf. %0, %2, %0\n"
1.356 - " bne 2f\n"
1.357 - " stwcx. %3, 0, %1\n"
1.358 - " bne- 1b\n" "2: isync":"=&r"(rv)
1.359 - :"b"(atomic), "r"(oldvalue), "r":"cr0", "memory");
1.360 - return (SDL_bool) (rv == 0);
1.361 +SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
1.362 +{
1.363 + int rv;
1.364 + __asm__ __volatile__(" sync\n"
1.365 + "1: lwarx %0, 0, %1\n"
1.366 + " extsw %0, %0\n"
1.367 + " subf. %0, %2, %0\n"
1.368 + " bne 2f\n"
1.369 + " stwcx. %3, 0, %1\n"
1.370 + " bne- 1b\n"
1.371 + "2: isync"
1.372 + : "=&r" (rv)
1.373 + : "b" (atomic),
1.374 + "r" (oldvalue),
1.375 + "r"
1.376 + : "cr0",
1.377 + "memory");
1.378 + return (SDL_bool)(rv == 0);
1.379 }
1.380
1.381 static __inline__ SDL_bool
1.382 -SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
1.383 - void *newvalue)
1.384 +SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
1.385 {
1.386 - void *rv;
1.387 - __asm__ __volatile__("sync\n"
1.388 - "1: ldarx %0, 0, %1\n"
1.389 - " subf. %0, %2, %0\n"
1.390 - " bne 2f\n"
1.391 - " stdcx. %3, 0, %1\n"
1.392 - " bne- 1b\n" "2: isync":"=&r"(rv)
1.393 - :"b"(atomic), "r"(oldvalue), "r"(newvalue)
1.394 - :"cr0", "memory");
1.395 - return (SDL_bool) (rv == 0);
1.396 + void* rv;
1.397 + __asm__ __volatile__("sync\n"
1.398 + "1: ldarx %0, 0, %1\n"
1.399 + " subf. %0, %2, %0\n"
1.400 + " bne 2f\n"
1.401 + " stdcx. %3, 0, %1\n"
1.402 + " bne- 1b\n"
1.403 + "2: isync"
1.404 + : "=&r" (rv)
1.405 + : "b" (atomic),
1.406 + "r" (oldvalue),
1.407 + "r" (newvalue)
1.408 + : "cr0",
1.409 + "memory");
1.410 + return (SDL_bool)(rv == 0);
1.411 }
1.412 # else
1.413 # error "Your system has an unsupported pointer size"
1.414 @@ -290,130 +354,161 @@
1.415 (__sync_bool_compare_and_swap((long*)(atomic),(long)(oldvalue),(long)(newvalue)))
1.416 #elif defined(__GNUC__) && defined(__LINUX__) && (defined(__mips__) || defined(__MIPS__))
1.417 static __inline__ int
1.418 -SDL_atomic_int_xchg_add(volatile int *atomic, int value)
1.419 -{
1.420 - int rv, tmp;
1.421 - __asm__ __volatile__("1: \n"
1.422 - ".set push \n"
1.423 - ".set mips2 \n"
1.424 - "ll %0,%3 \n"
1.425 - "addu %1,%4,%0 \n"
1.426 - "sc %1,%2 \n"
1.427 - ".set pop \n"
1.428 - "beqz %1,1b \n":"=&r"(rv),
1.429 - "=&r"(tmp), "=m"(*atomic)
1.430 - :"m"(*atomic), "r"(value)
1.431 - :"memory");
1.432 - return rv;
1.433 +SDL_atomic_int_xchg_add(volatile int* atomic, int value)
1.434 +{
1.435 + int rv,tmp;
1.436 + __asm__ __volatile__("1: \n"
1.437 + ".set push \n"
1.438 + ".set mips2 \n"
1.439 + "ll %0,%3 \n"
1.440 + "addu %1,%4,%0 \n"
1.441 + "sc %1,%2 \n"
1.442 + ".set pop \n"
1.443 + "beqz %1,1b \n"
1.444 + : "=&r" (rv),
1.445 + "=&r" (tmp),
1.446 + "=m" (*atomic)
1.447 + : "m" (*atomic),
1.448 + "r" (value)
1.449 + : "memory");
1.450 + return rv;
1.451 }
1.452
1.453 static __inline__ void
1.454 -SDL_atomic_int_add(volatile int *atomic, int value)
1.455 -{
1.456 - int rv;
1.457 - __asm__ __volatile__("1: \n"
1.458 - ".set push \n"
1.459 - ".set mips2 \n"
1.460 - "ll %0,%2 \n"
1.461 - "addu %0,%3,%0 \n"
1.462 - "sc %0,%1 \n"
1.463 - ".set pop \n"
1.464 - "beqz %0,1b \n":"=&r"(rv), "=m"(*atomic)
1.465 - :"m"(*atomic), "r"(value)
1.466 - :"memory");
1.467 +SDL_atomic_int_add(volatile int* atomic, int value)
1.468 +{
1.469 + int rv;
1.470 + __asm__ __volatile__("1: \n"
1.471 + ".set push \n"
1.472 + ".set mips2 \n"
1.473 + "ll %0,%2 \n"
1.474 + "addu %0,%3,%0 \n"
1.475 + "sc %0,%1 \n"
1.476 + ".set pop \n"
1.477 + "beqz %0,1b \n"
1.478 + : "=&r" (rv),
1.479 + "=m" (*atomic)
1.480 + : "m" (*atomic),
1.481 + "r" (value)
1.482 + : "memory");
1.483 }
1.484
1.485 static __inline__ SDL_bool
1.486 -SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
1.487 +SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
1.488 {
1.489 - int rv;
1.490 - __asm__ __volatile__(" .set push \n"
1.491 - " .set noat \n"
1.492 - " .set mips3 \n"
1.493 - "1: ll %0, %2 \n"
1.494 - " bne %0, %z3, 2f \n"
1.495 - " .set mips0 \n"
1.496 - " move $1, %z4 \n"
1.497 - " .set mips3 \n"
1.498 - " sc $1, %1 \n"
1.499 - " beqz $1, 1b \n"
1.500 - " sync \n"
1.501 - "2: \n"
1.502 - " .set pop \n":"=&r"(rv), "=R"(*atomic)
1.503 - :"R"(*atomic), "Jr"(oldvalue), "Jr"(newvalue)
1.504 - :"memory");
1.505 - return (SDL_bool) rv;
1.506 + int rv;
1.507 + __asm__ __volatile__(" .set push \n"
1.508 + " .set noat \n"
1.509 + " .set mips3 \n"
1.510 + "1: ll %0, %2 \n"
1.511 + " bne %0, %z3, 2f \n"
1.512 + " .set mips0 \n"
1.513 + " move $1, %z4 \n"
1.514 + " .set mips3 \n"
1.515 + " sc $1, %1 \n"
1.516 + " beqz $1, 1b \n"
1.517 + " sync \n"
1.518 + "2: \n"
1.519 + " .set pop \n"
1.520 + : "=&r" (rv),
1.521 + "=R" (*atomic)
1.522 + : "R" (*atomic),
1.523 + "Jr" (oldvalue),
1.524 + "Jr" (newvalue)
1.525 + : "memory");
1.526 + return (SDL_bool)rv;
1.527 }
1.528
1.529 static __inline__ SDL_bool
1.530 -SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
1.531 - void *newvalue)
1.532 -{
1.533 - int rv;
1.534 - __asm__ __volatile__(" .set push \n"
1.535 - " .set noat \n" " .set mips3 \n"
1.536 +SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
1.537 +{
1.538 + int rv;
1.539 + __asm__ __volatile__(" .set push \n"
1.540 + " .set noat \n"
1.541 + " .set mips3 \n"
1.542 # if defined(__mips64)
1.543 - "1: lld %0, %2 \n"
1.544 + "1: lld %0, %2 \n"
1.545 # else
1.546 - "1: ll %0, %2 \n"
1.547 -# endif
1.548 - " bne %0, %z3, 2f \n" " move $1, %z4 \n"
1.549 + "1: ll %0, %2 \n"
1.550 +# endif
1.551 + " bne %0, %z3, 2f \n"
1.552 + " move $1, %z4 \n"
1.553 # if defined(__mips64)
1.554 - " sc $1, %1 \n"
1.555 + " sc $1, %1 \n"
1.556 # else
1.557 - " scd $1, %1 \n"
1.558 -# endif
1.559 - " beqz $1, 1b \n"
1.560 - " sync \n"
1.561 - "2: \n"
1.562 - " .set pop \n":"=&r"(rv), "=R"(*atomic)
1.563 - :"R"(*atomic), "Jr"(oldvalue), "Jr"(newvalue)
1.564 - :"memory");
1.565 - return (SDL_bool) rv;
1.566 + " scd $1, %1 \n"
1.567 +# endif
1.568 + " beqz $1, 1b \n"
1.569 + " sync \n"
1.570 + "2: \n"
1.571 + " .set pop \n"
1.572 + : "=&r" (rv),
1.573 + "=R" (*atomic)
1.574 + : "R" (*atomic),
1.575 + "Jr" (oldvalue),
1.576 + "Jr" (newvalue)
1.577 + : "memory");
1.578 + return (SDL_bool)rv;
1.579 }
1.580 #elif defined(__GNUC__) && defined(__m68k__)
1.581 static __inline__ int
1.582 -SDL_atomic_int_xchg_add(volatile int *atomic, int value)
1.583 -{
1.584 - int rv = *atomic;
1.585 - int tmp;
1.586 - __asm__ __volatile__("1: move%.l %0,%1 \n"
1.587 - " add%.l %2,%1 \n"
1.588 - " cas%.l %0,%1,%3 \n"
1.589 - " jbne 1b \n":"=d"(rv), "=&d"(tmp)
1.590 - :"d"(value), "m"(*atomic), "0"(rv)
1.591 - :"memory");
1.592 - return (SDL_bool) rv;
1.593 +SDL_atomic_int_xchg_add(volatile int* atomic, int value)
1.594 +{
1.595 + int rv = *atomic;
1.596 + int tmp;
1.597 + __asm__ __volatile__("1: move%.l %0,%1 \n"
1.598 + " add%.l %2,%1 \n"
1.599 + " cas%.l %0,%1,%3 \n"
1.600 + " jbne 1b \n"
1.601 + : "=d" (rv),
1.602 + "=&d" (tmp)
1.603 + : "d" (value),
1.604 + "m" (*atomic),
1.605 + "0" (rv)
1.606 + : "memory");
1.607 + return (SDL_bool)rv;
1.608 }
1.609
1.610 static __inline__ void
1.611 -SDL_atomic_int_add(volatile int *atomic, int value)
1.612 -{
1.613 - __asm__ __volatile__("add%.l %0,%1"::"id"(value), "m"(*atomic)
1.614 - :"memory");
1.615 +SDL_atomic_int_add(volatile int* atomic, int value)
1.616 +{
1.617 + __asm__ __volatile__("add%.l %0,%1"
1.618 + :
1.619 + : "id" (value),
1.620 + "m" (*atomic)
1.621 + : "memory");
1.622 }
1.623
1.624 static __inline__ SDL_bool
1.625 -SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
1.626 -{
1.627 - char rv;
1.628 - int readvalue;
1.629 - __asm__ __volatile__("cas%.l %2,%3,%1\n"
1.630 - "seq %0":"=dm"(rv), "=m"(*atomic), "=d"(readvalue)
1.631 - :"d"(newvalue), "m"(*atomic), "2"(oldvalue));
1.632 - return (SDL_bool) rv;
1.633 +SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
1.634 +{
1.635 + char rv;
1.636 + int readvalue;
1.637 + __asm__ __volatile__("cas%.l %2,%3,%1\n"
1.638 + "seq %0"
1.639 + : "=dm" (rv),
1.640 + "=m" (*atomic),
1.641 + "=d" (readvalue)
1.642 + : "d" (newvalue),
1.643 + "m" (*atomic),
1.644 + "2" (oldvalue));
1.645 + return (SDL_bool)rv;
1.646 }
1.647
1.648 static __inline__ SDL_bool
1.649 -SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
1.650 - void *newvalue)
1.651 +SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
1.652 {
1.653 - char rv;
1.654 - int readvalue;
1.655 - __asm__ __volatile__("cas%.l %2,%3,%1\n"
1.656 - "seq %0":"=dm"(rv), "=m"(*atomic), "=d"(readvalue)
1.657 - :"d"(newvalue), "m"(*atomic), "2"(oldvalue));
1.658 - return (SDL_bool) rv;
1.659 + char rv;
1.660 + int readvalue;
1.661 + __asm__ __volatile__("cas%.l %2,%3,%1\n"
1.662 + "seq %0"
1.663 + : "=dm" (rv),
1.664 + "=m" (*atomic),
1.665 + "=d" (readvalue)
1.666 + : "d" (newvalue),
1.667 + "m" (*atomic),
1.668 + "2" (oldvalue));
1.669 + return (SDL_bool)rv;
1.670 }
1.671 #elif defined(__GNUC__) && defined(__s390__)
1.672 # define ATOMIC_INT_CMP_XCHG(atomic,oldvalue,newvalue) \
1.673 @@ -429,26 +524,30 @@
1.674 })
1.675 # if (SIZEOF_VOIDP == 4)
1.676 static __inline__ SDL_bool
1.677 -SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
1.678 - void *newvalue)
1.679 +SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
1.680 {
1.681 - void *rv = oldvalue;
1.682 - __asm__ __volatile__("cs %0, %2, %1":"+d"(rv), "=Q"(*atomic)
1.683 - :"d"(newvalue), "m"(*atomic)
1.684 - :"cc");
1.685 - return (SDL_bool) (rv == oldvalue);
1.686 + void* rv = oldvalue;
1.687 + __asm__ __volatile__("cs %0, %2, %1"
1.688 + : "+d" (rv),
1.689 + "=Q" (*atomic)
1.690 + : "d" (newvalue),
1.691 + "m" (*atomic)
1.692 + : "cc");
1.693 + return (SDL_bool)(rv == oldvalue);
1.694 }
1.695 # elif (SIZEOF_VOIDP == 8)
1.696 static __inline__ SDL_bool
1.697 -SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
1.698 - void *newvalue)
1.699 +SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
1.700 {
1.701 - void *rv = oldvalue;
1.702 - void **a = atomic;
1.703 - __asm__ __volatile__("csg %0, %2, %1":"+d"(rv), "=Q"(*a)
1.704 - :"d"((long) (newvalue)), "m"(*a)
1.705 - :"cc");
1.706 - return (SDL_bool) (rv == oldvalue);
1.707 + void* rv = oldvalue;
1.708 + void** a = atomic;
1.709 + __asm__ __volatile__("csg %0, %2, %1"
1.710 + : "+d" (rv),
1.711 + "=Q" (*a)
1.712 + : "d" ((long)(newvalue)),
1.713 + "m" (*a)
1.714 + : "cc");
1.715 + return (SDL_bool)(rv == oldvalue);
1.716 }
1.717 # else
1.718 # error "Your system has an unsupported pointer size"
1.719 @@ -456,34 +555,31 @@
1.720 #elif defined(__WIN32__)
1.721 # include <windows.h>
1.722 static __inline__ int
1.723 -SDL_atomic_int_xchg_add(volatile int *atomic, int value)
1.724 +SDL_atomic_int_xchg_add(volatile int* atomic, int value)
1.725 {
1.726 - return InterlockedExchangeAdd(atomic, value);
1.727 + return InterlockedExchangeAdd(atomic, value);
1.728 }
1.729
1.730 static __inline__ void
1.731 -SDL_atomic_int_add(volatile int *atomic, int value)
1.732 +SDL_atomic_int_add(volatile int* atomic, int value)
1.733 {
1.734 - InterlockedExchangeAdd(atomic, value);
1.735 + InterlockedExchangeAdd(atomic, value);
1.736 }
1.737
1.738 # if (WINVER > 0X0400)
1.739 static __inline__ SDL_bool
1.740 -SDL_atmoic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
1.741 +SDL_atmoic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
1.742 {
1.743 - return (SDL_bool) (InterlockedCompareExchangePointer((PVOID *) atomic,
1.744 - (PVOID) newvalue,
1.745 - (PVOID) oldvalue) ==
1.746 - oldvalue);
1.747 + return (SDL_bool)(InterlockedCompareExchangePointer((PVOID*)atomic,
1.748 + (PVOID)newvalue,
1.749 + (PVOID)oldvalue) == oldvalue);
1.750 }
1.751
1.752
1.753 static __inline__ SDL_bool
1.754 -SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
1.755 - void *newvalue)
1.756 +SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
1.757 {
1.758 - return (InterlockedCompareExchangePointer(atomic, newvalue, oldvalue) ==
1.759 - oldvalue);
1.760 + return (InterlockedCompareExchangePointer(atomic, newvalue, oldvalue) == oldvalue);
1.761 }
1.762 # else /* WINVER <= 0x0400 */
1.763 # if (SIZEOF_VOIDP != 4)
1.764 @@ -491,44 +587,43 @@
1.765 # endif
1.766
1.767 static __inline__ SDL_bool
1.768 -SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
1.769 +SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
1.770 {
1.771 - return (InterlockedCompareExchange(atomic, newvalue, oldvalue) ==
1.772 - oldvalue);
1.773 + return (InterlockedCompareExchange(atomic, newvalue, oldvalue) == oldvalue);
1.774 }
1.775
1.776 static __inline__ SDL_bool
1.777 -SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
1.778 - void *newvalue)
1.779 +SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
1.780 {
1.781 - return (InterlockedCompareExchange(atomic, newvalue, oldvalue) ==
1.782 - oldvalue);
1.783 + return (InterlockedCompareExchange(atomic, newvalue, oldvalue) == oldvalue);
1.784 }
1.785 # endif
1.786 #else /* when all else fails */
1.787 # define SDL_ATOMIC_OPS_NOT_SUPPORTED
1.788 # warning "Atomic Ops for this platform not supported!"
1.789 static __inline__ int
1.790 -SDL_atomic_int_xchg_add(volatile int *atomic, int value)
1.791 -{
1.792 - int rv = *atomic;
1.793 - *(atomic) += value;
1.794 - return rv;
1.795 +SDL_atomic_int_xchg_add(volatile int* atomic, int value)
1.796 +{
1.797 + int rv = *atomic;
1.798 + *(atomic) += value;
1.799 + return rv;
1.800 }
1.801
1.802 static __inline__ SDL_bool
1.803 -SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
1.804 +SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
1.805 {
1.806 - return (*atomic == oldvalue) ?
1.807 - ((*atomic = newvalue), SDL_TRUE) : SDL_FALSE;
1.808 + return (*atomic == oldvalue) ?
1.809 + ((*atomic = newvalue), SDL_TRUE) : SDL_FALSE;
1.810 }
1.811
1.812 static __inline__ void
1.813 -SDL_atomic_int_add(volatile int *atomic, int value)
1.814 +SDL_atomic_int_add(volatile int* atomic, int value)
1.815 {
1.816 - *atomic += value;
1.817 + *atomic += value;
1.818 }
1.819 #endif /* arch & platforms */
1.820 +
1.821 +/* *INDENT-ON* */
1.822
1.823 #ifdef ATOMIC_INT_CMP_XCHG
1.824 static __inline__ SDL_bool