include/SDL_atomic.h
author Sam Lantinga <slouken@libsdl.org>
Wed, 10 Jun 2009 13:34:20 +0000
changeset 3186 51750b7a966f
parent 3181 030899df1af5
child 3187 e041d2c603fe
permissions -rw-r--r--
indent
     1 /*
     2     SDL - Simple DirectMedia Layer
     3     Copyright (C) 1997-2006 Sam Lantinga
     4 
     5     This library is free software; you can redistribute it and/or
     6     modify it under the terms of the GNU Lesser General Public
     7     License as published by the Free Software Foundation; either
     8     version 2.1 of the License, or (at your option) any later version.
     9 
    10     This library is distributed in the hope that it will be useful,
    11     but WITHOUT ANY WARRANTY; without even the implied warranty of
    12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    13     Lesser General Public License for more details.
    14 
    15     You should have received a copy of the GNU Lesser General Public
    16     License along with this library; if not, write to the Free Software
    17     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    18 
    19     Sam Lantinga
    20     slouken@libsdl.org
    21  */
    22 
    23 /**
    24  * \file SDL_atomic.h
    25  *
    26  * Atomic int and pointer magic
    27  */
    28 
    29 #ifndef _SDL_atomic_h_
    30 #define _SDL_atomic_h_
    31 
    32 
    33 #include "SDL_stdinc.h"
    34 #include "SDL_platform.h"
    35 
    36 #include "begin_code.h"
    37 
    38 /* Set up for C function definitions, even when using C++ */
    39 #ifdef __cplusplus
    40 /* *INDENT-OFF* */
    41 extern "C" {
    42 /* *INDENT-ON* */
    43 #endif
    44 
    45 #if defined(__GNUC__) && (defined(i386) || defined(__i386__)  || defined(__x86_64__))
    46 static __inline__ void
    47 SDL_atomic_int_add(volatile int *atomic, int value)
    48 {
    49     __asm__ __volatile__("lock;" "addl %1, %0":"=m"(*atomic)
    50                          :"ir"(value), "m"(*atomic));
    51 }
    52 
    53 static __inline__ int
    54 SDL_atomic_int_xchg_add(volatile int *atomic, int value)
    55 {
    56     int rv;
    57     __asm__ __volatile__("lock;" "xaddl %0, %1":"=r"(rv), "=m"(*atomic)
    58                          :"0"(value), "m"(*atomic));
    59     return rv;
    60 }
    61 
    62 static __inline__ SDL_bool
    63 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
    64 {
    65     int rv;
    66     __asm__ __volatile__("lock;" "cmpxchgl %2, %1":"=a"(rv), "=m"(*atomic)
    67                          :"r"(newvalue), "m"(*atomic), "0"(oldvalue));
    68     return (SDL_bool) (rv == oldvalue);
    69 }
    70 
    71 static __inline__ SDL_bool
    72 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
    73                         void *newvalue)
    74 {
    75     void *rv;
    76     __asm__ __volatile__("lock;"
    77 # if defined(__x86_64__)
    78                          "cmpxchgq %q2, %1"
    79 # else
    80                          "cmpxchgl %2, %1"
    81 # endif
    82                          :"=a"(rv), "=m"(*atomic)
    83                          :"r"(newvalue), "m"(*atomic), "0"(oldvalue));
    84     return (SDL_bool) (rv == oldvalue);
    85 }
    86 #elif defined(__GNUC__) && defined(__alpha__)
    87 # define ATOMIC_MEMORY_BARRIER (__asm__ __volatile__ ("mb" : : : "memory"))
    88 # define ATOMIC_INT_CMP_XCHG(atomic,value)              \
    89   ({                                                    \
    90     int rv,prev;                                        \
    91     __asm__ __volatile__("   mb\n"                      \
    92                          "1: ldl_l   %0,%2\n"           \
    93                          "   cmpeq   %0,%3,%1\n"        \
    94                          "   beq     %1,2f\n"           \
    95                          "   mov     %4,%1\n"           \
    96                          "   stl_c   %1,%2\n"           \
    97                          "   beq     %1,1b\n"           \
    98                          "   mb\n"                      \
    99                          "2:"                           \
   100                          : "=&r" (prev),                \
   101                            "=&r" (rv)                   \
   102                          : "m" (*(atomic)),             \
   103                            "Ir" (oldvalue),             \
   104                            "Ir" (newvalue)              \
   105                          : "memory");                   \
   106     (rv != 0);                                          \
   107   })
   108 
   109 # if (SIZEOF_VOIDP == 4)
   110 static __inline__ SDL_bool
   111 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   112                         void *newvalue)
   113 {
   114     int rv;
   115     void *prev;
   116     __asm__ __volatile__("   mb\n"
   117                          "1: ldl_l %0,%2\n"
   118                          "   cmpeq %0,%3,%1\n"
   119                          "   beq   $1,2f\n"
   120                          "   mov   %4,%1\n"
   121                          "   stl_c %1,%2\n"
   122                          "   beq   %1,1b\n"
   123                          "   mb\n" "2:":"=&r"(prev), "=&r"(rv)
   124                          :"m"(*atomic), "Ir"(oldvalue), "Ir"(newvalue)
   125                          :"memory");
   126     return (SDL_bool) (rv != 0);
   127 }
   128 # elif (SIZEOF_VOIDP == 8)
   129 static __inline__ SDL_bool
   130 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   131                         void *newvalue)
   132 {
   133     int rv;
   134     void *prev;
   135     __asm__ __volatile__("   mb\n"
   136                          "1: ldq_l %0,%2\n"
   137                          "   cmpeq %0,%3,%1\n"
   138                          "   beq   %1,2f\n"
   139                          "   mov   %4,%1\n"
   140                          "   stq_c %1,%2\n"
   141                          "   beq   %1,1b\n"
   142                          "   mb\n" "2:":"=&r"(prev), "=&r"(rv)
   143                          :"m"(*atomic), "Ir"(oldvalue), "Ir"(newvalue)
   144                          :"memory");
   145     return (SDL_bool) (rv != 0);
   146 }
   147 # else
   148 #  error "Your system has an unsupported pointer size"
   149 # endif /* SIZEOF_VOIDP */
   150 #elif defined(__GNUC__) && defined(__sparc__)
   151 # define ATOMIC_MEMORY_BARRIER                                          \
   152   (__asm__ __volatile__("membar #LoadLoad | #LoadStore"                 \
   153                         " | #StoreLoad | #StoreStore" : : : "memory"))
   154 # define ATOMIC_INT_CMP_XCHG(atomic,oldvalue,newvalue)                  \
   155   ({                                                                    \
   156     int rv;                                                             \
   157     __asm__ __volatile__("cas [%4], %2, %0"                             \
   158                          : "=r" (rv), "=m" (*(atomic))                  \
   159                          : "r" (oldvalue), "m" (*(atomic)),             \
   160                          "r" (atomic), "0" (newvalue));                 \
   161     rv == oldvalue;                                                     \
   162   })
   163 
   164 # if (SIZEOF_VOIDP == 4)
   165 static __inline__ SDL_bool
   166 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   167                         void *newvalue)
   168 {
   169     void *rv;
   170     __asm__ __volatile__("cas [%4], %2, %0":"=r"(rv), "=m"(*atomic)
   171                          :"r"(oldvalue),
   172                          "m"(*atomic), "r"(atomic), "0"(newvalue));
   173     return (SDL_bool) (rv == oldvalue);
   174 }
   175 # elif (SIZEOF_VOIDP == 8)
   176 static __inline__ SDL_bool
   177 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   178                         void *newvalue)
   179 {
   180     void *rv;
   181     void **a = atomic;
   182     __asm__ __volatile__("casx [%4], %2, %0":"=r"(rv), "=m"(*a)
   183                          :"r"(oldvalue), "m"(*a), "r"(a), "0"(newvalue));
   184     return (SDL_bool) (rv == oldvalue);
   185 }
   186 # else
   187 #  error "Your system has an unsupported pointer size"
   188 # endif /* SIZEOF_VOIDP */
   189 #elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__) || defined(_M_PPC))
   190 # define ATOMIC_MEMORY_BARRIER \
   191   (__asm__ __volatile__ ("sync" : : : "memory"))
   192 static __inline__ void
   193 SDL_atomic_int_add(volatile int *atomic, int value)
   194 {
   195     int rv, tmp;
   196     __asm__ __volatile__("1: lwarx   %0,  0, %3\n"
   197                          "   add     %1, %0, %4\n"
   198                          "   stwcx.  %1,  0, %3\n"
   199                          "   bne-    1b":"=&b"(rv), "=&r"(tmp), "=m"(*atomic)
   200                          :"b"(atomic), "r"(value), "m"(*atomic)
   201                          :"cr0", "memory");
   202 }
   203 
   204 static __inline__ int
   205 SDL_atomic_int_xchg_add(volatile int *atomic, int value)
   206 {
   207     int rv, tmp;
   208     __asm__ __volatile__("1: lwarx  %0, 0, %3\n"
   209                          "   add    %1, %0, %4\n"
   210                          "   stwcx. %1, 0, %3\n"
   211                          "   bne-   1b":"=&b"(rv), "=&r"(tmp), "=m"(*atomic)
   212                          :"b"(atomic), "r"(value), "m"(*atomic)
   213                          :"cr0", "memory");
   214     return rv;
   215 }
   216 
   217 # if (SIZEOF_VOIDP == 4)
   218 static __inline__ SDL_bool
   219 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   220 {
   221     int rv;
   222     __asm__ __volatile__("   sync\n"
   223                          "1: lwarx   %0, 0, %1\n"
   224                          "   subf.   %0, %2, %0\n"
   225                          "   bne     2f\n"
   226                          "   stwcx.  %3, 0, %1\n"
   227                          "   bne-    1b\n" "2: isync":"=&r"(rv)
   228                          :"b"(atomic), "r"(oldvalue), "r":"cr0", "memory");
   229     return (SDL_bool) (rv == 0);
   230 }
   231 
   232 static __inline__ SDL_bool
   233 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   234                         void *newvalue)
   235 {
   236     void *rv;
   237     __asm__ __volatile__("sync\n"
   238                          "1: lwarx  %0,  0, %1\n"
   239                          "   subf.  %0, %2, %0\n"
   240                          "   bne    2f\n"
   241                          "   stwcx. %3,  0, %1\n"
   242                          "   bne-   1b\n" "2: isync":"=&r"(rv)
   243                          :"b"(atomic), "r"(oldvalue), "r"(newvalue)
   244                          :"cr0", "memory");
   245     return (SDL_bool) (rv == 0);
   246 }
   247 # elif (SIZEOF_VOIDP == 8)
   248 static __inline__ SDL_bool
   249 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   250 {
   251     int rv;
   252     __asm__ __volatile__("   sync\n"
   253                          "1: lwarx   %0,  0, %1\n"
   254                          "   extsw   %0, %0\n"
   255                          "   subf.   %0, %2, %0\n"
   256                          "   bne     2f\n"
   257                          "   stwcx.  %3,  0, %1\n"
   258                          "   bne-    1b\n" "2: isync":"=&r"(rv)
   259                          :"b"(atomic), "r"(oldvalue), "r":"cr0", "memory");
   260     return (SDL_bool) (rv == 0);
   261 }
   262 
   263 static __inline__ SDL_bool
   264 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   265                         void *newvalue)
   266 {
   267     void *rv;
   268     __asm__ __volatile__("sync\n"
   269                          "1: ldarx  %0,  0, %1\n"
   270                          "   subf.  %0, %2, %0\n"
   271                          "   bne    2f\n"
   272                          "   stdcx. %3,  0, %1\n"
   273                          "   bne-   1b\n" "2: isync":"=&r"(rv)
   274                          :"b"(atomic), "r"(oldvalue), "r"(newvalue)
   275                          :"cr0", "memory");
   276     return (SDL_bool) (rv == 0);
   277 }
   278 # else
   279 #  error "Your system has an unsupported pointer size"
   280 # endif /* SIZEOF_VOIDP */
   281 #elif defined(__GNUC__) && (defined(__IA64__) || defined(__ia64__))
   282 # define ATOMIC_MEMORY_BARRIER (__sync_synchronize())
   283 # define SDL_atomic_int_xchg_add(atomic, value)     \
   284   (__sync_fetch_and_add((atomic),(value)))
   285 # define SDL_atomic_int_add(atomic, value)                  \
   286   ((void)__sync_fetch_and_add((atomic),(value)))
   287 # define SDL_atomic_int_cmp_xchg(atomic,oldvalue,newvalue)  \
   288   (__sync_bool_compare_and_swap((atomic),(oldvalue),(newvalue)))
   289 # define SDL_atomic_ptr_cmp_xchg(atomic,oldvalue,newvalue)              \
   290   (__sync_bool_compare_and_swap((long*)(atomic),(long)(oldvalue),(long)(newvalue)))
   291 #elif defined(__GNUC__) && defined(__LINUX__) && (defined(__mips__) || defined(__MIPS__))
   292 static __inline__ int
   293 SDL_atomic_int_xchg_add(volatile int *atomic, int value)
   294 {
   295     int rv, tmp;
   296     __asm__ __volatile__("1:              \n"
   297                          ".set  push      \n"
   298                          ".set  mips2     \n"
   299                          "ll    %0,%3     \n"
   300                          "addu  %1,%4,%0  \n"
   301                          "sc    %1,%2     \n"
   302                          ".set  pop       \n"
   303                          "beqz  %1,1b     \n":"=&r"(rv),
   304                          "=&r"(tmp), "=m"(*atomic)
   305                          :"m"(*atomic), "r"(value)
   306                          :"memory");
   307     return rv;
   308 }
   309 
   310 static __inline__ void
   311 SDL_atomic_int_add(volatile int *atomic, int value)
   312 {
   313     int rv;
   314     __asm__ __volatile__("1:               \n"
   315                          ".set  push       \n"
   316                          ".set  mips2      \n"
   317                          "ll    %0,%2      \n"
   318                          "addu  %0,%3,%0   \n"
   319                          "sc    %0,%1      \n"
   320                          ".set  pop        \n"
   321                          "beqz  %0,1b      \n":"=&r"(rv), "=m"(*atomic)
   322                          :"m"(*atomic), "r"(value)
   323                          :"memory");
   324 }
   325 
   326 static __inline__ SDL_bool
   327 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   328 {
   329     int rv;
   330     __asm__ __volatile__("     .set push        \n"
   331                          "     .set noat        \n"
   332                          "     .set mips3       \n"
   333                          "1:   ll   %0, %2      \n"
   334                          "     bne  %0, %z3, 2f \n"
   335                          "     .set mips0       \n"
   336                          "     move $1, %z4     \n"
   337                          "     .set mips3       \n"
   338                          "     sc   $1, %1      \n"
   339                          "     beqz $1, 1b      \n"
   340                          "     sync             \n"
   341                          "2:                    \n"
   342                          "     .set pop         \n":"=&r"(rv), "=R"(*atomic)
   343                          :"R"(*atomic), "Jr"(oldvalue), "Jr"(newvalue)
   344                          :"memory");
   345     return (SDL_bool) rv;
   346 }
   347 
   348 static __inline__ SDL_bool
   349 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   350                         void *newvalue)
   351 {
   352     int rv;
   353     __asm__ __volatile__("     .set push        \n"
   354                          "     .set noat        \n" "     .set mips3       \n"
   355 # if defined(__mips64)
   356                          "1:   lld  %0, %2      \n"
   357 # else
   358                          "1:   ll   %0, %2      \n"
   359 # endif
   360                          "     bne  %0, %z3, 2f \n" "     move $1, %z4     \n"
   361 # if defined(__mips64)
   362                          "     sc   $1, %1      \n"
   363 # else
   364                          "     scd  $1, %1      \n"
   365 # endif
   366                          "     beqz $1, 1b      \n"
   367                          "     sync             \n"
   368                          "2:                    \n"
   369                          "     .set pop         \n":"=&r"(rv), "=R"(*atomic)
   370                          :"R"(*atomic), "Jr"(oldvalue), "Jr"(newvalue)
   371                          :"memory");
   372     return (SDL_bool) rv;
   373 }
   374 #elif defined(__GNUC__) && defined(__m68k__)
   375 static __inline__ int
   376 SDL_atomic_int_xchg_add(volatile int *atomic, int value)
   377 {
   378     int rv = *atomic;
   379     int tmp;
   380     __asm__ __volatile__("1: move%.l %0,%1    \n"
   381                          "   add%.l  %2,%1    \n"
   382                          "   cas%.l  %0,%1,%3 \n"
   383                          "   jbne    1b       \n":"=d"(rv), "=&d"(tmp)
   384                          :"d"(value), "m"(*atomic), "0"(rv)
   385                          :"memory");
   386     return (SDL_bool) rv;
   387 }
   388 
   389 static __inline__ void
   390 SDL_atomic_int_add(volatile int *atomic, int value)
   391 {
   392     __asm__ __volatile__("add%.l %0,%1"::"id"(value), "m"(*atomic)
   393                          :"memory");
   394 }
   395 
   396 static __inline__ SDL_bool
   397 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   398 {
   399     char rv;
   400     int readvalue;
   401     __asm__ __volatile__("cas%.l %2,%3,%1\n"
   402                          "seq    %0":"=dm"(rv), "=m"(*atomic), "=d"(readvalue)
   403                          :"d"(newvalue), "m"(*atomic), "2"(oldvalue));
   404     return (SDL_bool) rv;
   405 }
   406 
   407 static __inline__ SDL_bool
   408 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   409                         void *newvalue)
   410 {
   411     char rv;
   412     int readvalue;
   413     __asm__ __volatile__("cas%.l %2,%3,%1\n"
   414                          "seq    %0":"=dm"(rv), "=m"(*atomic), "=d"(readvalue)
   415                          :"d"(newvalue), "m"(*atomic), "2"(oldvalue));
   416     return (SDL_bool) rv;
   417 }
   418 #elif defined(__GNUC__) && defined(__s390__)
   419 # define ATOMIC_INT_CMP_XCHG(atomic,oldvalue,newvalue)  \
   420   ({                                                    \
   421     int rv = oldvalue;                                  \
   422     __asm__ __volatile__("cs %0, %2, %1"                \
   423                          : "+d" (rv),                   \
   424                            "=Q" (*(atomic))             \
   425                          : "d" (newvalue),              \
   426                            "m" (*(atomic))              \
   427                          : "cc");                       \
   428     rv == oldvalue;                                     \
   429   })
   430 # if (SIZEOF_VOIDP == 4)
   431 static __inline__ SDL_bool
   432 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   433                         void *newvalue)
   434 {
   435     void *rv = oldvalue;
   436     __asm__ __volatile__("cs %0, %2, %1":"+d"(rv), "=Q"(*atomic)
   437                          :"d"(newvalue), "m"(*atomic)
   438                          :"cc");
   439     return (SDL_bool) (rv == oldvalue);
   440 }
   441 # elif (SIZEOF_VOIDP == 8)
   442 static __inline__ SDL_bool
   443 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   444                         void *newvalue)
   445 {
   446     void *rv = oldvalue;
   447     void **a = atomic;
   448     __asm__ __volatile__("csg %0, %2, %1":"+d"(rv), "=Q"(*a)
   449                          :"d"((long) (newvalue)), "m"(*a)
   450                          :"cc");
   451     return (SDL_bool) (rv == oldvalue);
   452 }
   453 # else
   454 #  error "Your system has an unsupported pointer size"
   455 # endif /* SIZEOF_VOIDP */
   456 #elif defined(__WIN32__)
   457 # include <windows.h>
   458 static __inline__ int
   459 SDL_atomic_int_xchg_add(volatile int *atomic, int value)
   460 {
   461     return InterlockedExchangeAdd(atomic, value);
   462 }
   463 
   464 static __inline__ void
   465 SDL_atomic_int_add(volatile int *atomic, int value)
   466 {
   467     InterlockedExchangeAdd(atomic, value);
   468 }
   469 
   470 # if (WINVER > 0X0400)
   471 static __inline__ SDL_bool
   472 SDL_atmoic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   473 {
   474     return (SDL_bool) (InterlockedCompareExchangePointer((PVOID *) atomic,
   475                                                          (PVOID) newvalue,
   476                                                          (PVOID) oldvalue) ==
   477                        oldvalue);
   478 }
   479 
   480 
   481 static __inline__ SDL_bool
   482 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   483                         void *newvalue)
   484 {
   485     return (InterlockedCompareExchangePointer(atomic, newvalue, oldvalue) ==
   486             oldvalue);
   487 }
   488 # else /* WINVER <= 0x0400 */
   489 #  if (SIZEOF_VOIDP != 4)
   490 #   error "InterlockedCompareExchangePointer needed"
   491 #  endif
   492 
   493 static __inline__ SDL_bool
   494 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   495 {
   496     return (InterlockedCompareExchange(atomic, newvalue, oldvalue) ==
   497             oldvalue);
   498 }
   499 
   500 static __inline__ SDL_bool
   501 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   502                         void *newvalue)
   503 {
   504     return (InterlockedCompareExchange(atomic, newvalue, oldvalue) ==
   505             oldvalue);
   506 }
   507 # endif
   508 #else /* when all else fails */
   509 # define SDL_ATOMIC_OPS_NOT_SUPPORTED
   510 # warning "Atomic Ops for this platform not supported!"
   511 static __inline__ int
   512 SDL_atomic_int_xchg_add(volatile int *atomic, int value)
   513 {
   514     int rv = *atomic;
   515     *(atomic) += value;
   516     return rv;
   517 }
   518 
   519 static __inline__ SDL_bool
   520 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   521 {
   522     return (*atomic == oldvalue) ?
   523         ((*atomic = newvalue), SDL_TRUE) : SDL_FALSE;
   524 }
   525 
   526 static __inline__ void
   527 SDL_atomic_int_add(volatile int *atomic, int value)
   528 {
   529     *atomic += value;
   530 }
   531 #endif /* arch & platforms */
   532 
   533 #ifdef ATOMIC_INT_CMP_XCHG
   534 static __inline__ SDL_bool
   535 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   536 {
   537     return (SDL_bool) ATOMIC_INT_CMP_XCHG(atomic, oldvalue, newvalue);
   538 }
   539 
   540 static __inline__ int
   541 SDL_atomic_int_xchg_add(volatile int *atomic, int value)
   542 {
   543     int rv;
   544     do
   545         rv = *atomic;
   546     while (!ATOMIC_INT_CMP_XCHG(atomic, rv, rv + value));
   547     return rv;
   548 }
   549 
   550 static __inline__ void
   551 SDL_atomic_int_add(volatile int *atomic, int value)
   552 {
   553     int rv;
   554     do
   555         rv = *atomic;
   556     while (!ATOMIC_INT_CMP_XCHG(atomic, rv, rv + value));
   557 }
   558 #endif /* ATOMIC_CMP_XCHG */
   559 
   560 #ifdef ATOMIC_MEMORY_BARRIER
   561 # define SDL_atomic_int_get(atomic) \
   562   (ATOMIC_MEMORY_BARRIER,*(atomic))
   563 # define SDL_atomic_int_set(atomic,value) \
   564   (*(atomic)=value,ATOMIC_MEMORY_BARRIER)
   565 #else
   566 # define SDL_atomic_int_get(atomic) (*(atomic))
   567 # define SDL_atomic_int_set(atomic, newvalue) ((void)(*(atomic) = (newvalue)))
   568 #endif /* MEMORY_BARRIER_NEEDED */
   569 
   570 #define SDL_atomic_int_inc(atomic) (SDL_atomic_int_add((atomic),1))
   571 #define SDL_atomic_int_dec_test(atomic) (SDL_atomic_int_xchg_add((atomic),-1) == 1)
   572 
   573 /* Ends C function definitions when using C++ */
   574 #ifdef __cplusplus
   575 /* *INDENT-OFF* */
   576 }
   577 /* *INDENT-ON* */
   578 #endif
   579 
   580 #include "close_code.h"
   581 
   582 #endif /* _SDL_atomic_h_ */
   583 
   584 /* vi: set ts=4 sw=4 expandtab: */