include/SDL_atomic.h
changeset 3187 e041d2c603fe
parent 3186 51750b7a966f
child 3199 3e1bf2b8bd81
equal deleted inserted replaced
3186:51750b7a966f 3187:e041d2c603fe
    40 /* *INDENT-OFF* */
    40 /* *INDENT-OFF* */
    41 extern "C" {
    41 extern "C" {
    42 /* *INDENT-ON* */
    42 /* *INDENT-ON* */
    43 #endif
    43 #endif
    44 
    44 
       
    45 /* indent is really bad at handling assembly */
       
    46 /* *INDENT-OFF* */
       
    47 
    45 #if defined(__GNUC__) && (defined(i386) || defined(__i386__)  || defined(__x86_64__))
    48 #if defined(__GNUC__) && (defined(i386) || defined(__i386__)  || defined(__x86_64__))
    46 static __inline__ void
    49 static __inline__ void
    47 SDL_atomic_int_add(volatile int *atomic, int value)
    50 SDL_atomic_int_add(volatile int* atomic, int value)
    48 {
    51 {
    49     __asm__ __volatile__("lock;" "addl %1, %0":"=m"(*atomic)
    52   __asm__ __volatile__("lock;"
    50                          :"ir"(value), "m"(*atomic));
    53                        "addl %1, %0"
       
    54                        : "=m" (*atomic)
       
    55                        : "ir" (value),
       
    56                          "m" (*atomic));
    51 }
    57 }
    52 
    58 
    53 static __inline__ int
    59 static __inline__ int
    54 SDL_atomic_int_xchg_add(volatile int *atomic, int value)
    60 SDL_atomic_int_xchg_add(volatile int* atomic, int value)
    55 {
    61 {                                              
    56     int rv;
    62   int rv;                                    
    57     __asm__ __volatile__("lock;" "xaddl %0, %1":"=r"(rv), "=m"(*atomic)
    63   __asm__ __volatile__("lock;"               
    58                          :"0"(value), "m"(*atomic));
    64                        "xaddl %0, %1"        
    59     return rv;
    65                        : "=r" (rv),          
    60 }
    66                          "=m" (*atomic)    
    61 
    67                        : "0" (value),        
    62 static __inline__ SDL_bool
    68                          "m" (*atomic));   
    63 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
    69   return rv;                                        
    64 {
    70 }
    65     int rv;
    71 
    66     __asm__ __volatile__("lock;" "cmpxchgl %2, %1":"=a"(rv), "=m"(*atomic)
    72 static __inline__ SDL_bool
    67                          :"r"(newvalue), "m"(*atomic), "0"(oldvalue));
    73 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
    68     return (SDL_bool) (rv == oldvalue);
    74 {
    69 }
    75   int rv;                                                      
    70 
    76   __asm__ __volatile__("lock;"                               
    71 static __inline__ SDL_bool
    77                        "cmpxchgl %2, %1"                     
    72 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
    78                        : "=a" (rv),                          
    73                         void *newvalue)
    79                          "=m" (*atomic)             
    74 {
    80                        : "r" (newvalue),                     
    75     void *rv;
    81                          "m" (*atomic),                    
    76     __asm__ __volatile__("lock;"
    82                          "0" (oldvalue));
    77 # if defined(__x86_64__)
    83   return (SDL_bool)(rv == oldvalue);                                          
    78                          "cmpxchgq %q2, %1"
    84 }
       
    85 
       
    86 static __inline__ SDL_bool
       
    87 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
       
    88 {
       
    89   void* rv;
       
    90   __asm__ __volatile__("lock;"
       
    91 # if defined(__x86_64__)                       
       
    92                        "cmpxchgq %q2, %1"
    79 # else
    93 # else
    80                          "cmpxchgl %2, %1"
    94                        "cmpxchgl %2, %1"
    81 # endif
    95 # endif                       
    82                          :"=a"(rv), "=m"(*atomic)
    96                        : "=a" (rv),
    83                          :"r"(newvalue), "m"(*atomic), "0"(oldvalue));
    97                          "=m" (*atomic)
    84     return (SDL_bool) (rv == oldvalue);
    98                        : "r" (newvalue),
       
    99                          "m" (*atomic),
       
   100                          "0" (oldvalue));
       
   101   return (SDL_bool)(rv == oldvalue);
    85 }
   102 }
    86 #elif defined(__GNUC__) && defined(__alpha__)
   103 #elif defined(__GNUC__) && defined(__alpha__)
    87 # define ATOMIC_MEMORY_BARRIER (__asm__ __volatile__ ("mb" : : : "memory"))
   104 # define ATOMIC_MEMORY_BARRIER (__asm__ __volatile__ ("mb" : : : "memory"))
    88 # define ATOMIC_INT_CMP_XCHG(atomic,value)              \
   105 # define ATOMIC_INT_CMP_XCHG(atomic,value)              \
    89   ({                                                    \
   106   ({                                                    \
   106     (rv != 0);                                          \
   123     (rv != 0);                                          \
   107   })
   124   })
   108 
   125 
   109 # if (SIZEOF_VOIDP == 4)
   126 # if (SIZEOF_VOIDP == 4)
   110 static __inline__ SDL_bool
   127 static __inline__ SDL_bool
   111 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   128 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
   112                         void *newvalue)
   129 {
   113 {
   130   int rv;
   114     int rv;
   131   void* prev;
   115     void *prev;
   132   __asm__ __volatile__("   mb\n"
   116     __asm__ __volatile__("   mb\n"
   133                        "1: ldl_l %0,%2\n"
   117                          "1: ldl_l %0,%2\n"
   134                        "   cmpeq %0,%3,%1\n"
   118                          "   cmpeq %0,%3,%1\n"
   135                        "   beq   $1,2f\n"
   119                          "   beq   $1,2f\n"
   136                        "   mov   %4,%1\n"
   120                          "   mov   %4,%1\n"
   137                        "   stl_c %1,%2\n"
   121                          "   stl_c %1,%2\n"
   138                        "   beq   %1,1b\n"
   122                          "   beq   %1,1b\n"
   139                        "   mb\n"
   123                          "   mb\n" "2:":"=&r"(prev), "=&r"(rv)
   140                        "2:"
   124                          :"m"(*atomic), "Ir"(oldvalue), "Ir"(newvalue)
   141                        : "=&r" (prev),
   125                          :"memory");
   142                          "=&r" (rv)
   126     return (SDL_bool) (rv != 0);
   143                        : "m" (*atomic),
       
   144                          "Ir" (oldvalue),
       
   145                          "Ir" (newvalue)
       
   146                        : "memory");
       
   147   return (SDL_bool)(rv != 0);
   127 }
   148 }
   128 # elif (SIZEOF_VOIDP == 8)
   149 # elif (SIZEOF_VOIDP == 8)
   129 static __inline__ SDL_bool
   150 static __inline__ SDL_bool
   130 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   151 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
   131                         void *newvalue)
   152 {
   132 {
   153   int rv;
   133     int rv;
   154   void* prev;
   134     void *prev;
   155   __asm__ __volatile__("   mb\n"
   135     __asm__ __volatile__("   mb\n"
   156                        "1: ldq_l %0,%2\n"
   136                          "1: ldq_l %0,%2\n"
   157                        "   cmpeq %0,%3,%1\n"
   137                          "   cmpeq %0,%3,%1\n"
   158                        "   beq   %1,2f\n"
   138                          "   beq   %1,2f\n"
   159                        "   mov   %4,%1\n"
   139                          "   mov   %4,%1\n"
   160                        "   stq_c %1,%2\n"
   140                          "   stq_c %1,%2\n"
   161                        "   beq   %1,1b\n"
   141                          "   beq   %1,1b\n"
   162                        "   mb\n"
   142                          "   mb\n" "2:":"=&r"(prev), "=&r"(rv)
   163                        "2:"
   143                          :"m"(*atomic), "Ir"(oldvalue), "Ir"(newvalue)
   164                        : "=&r" (prev),
   144                          :"memory");
   165                          "=&r" (rv)
   145     return (SDL_bool) (rv != 0);
   166                        : "m" (*atomic),
       
   167                          "Ir" (oldvalue),
       
   168                          "Ir" (newvalue)
       
   169                        : "memory");
       
   170   return (SDL_bool)(rv != 0);
   146 }
   171 }
   147 # else
   172 # else
   148 #  error "Your system has an unsupported pointer size"
   173 #  error "Your system has an unsupported pointer size"  
   149 # endif /* SIZEOF_VOIDP */
   174 # endif  /* SIZEOF_VOIDP */
   150 #elif defined(__GNUC__) && defined(__sparc__)
   175 #elif defined(__GNUC__) && defined(__sparc__)
   151 # define ATOMIC_MEMORY_BARRIER                                          \
   176 # define ATOMIC_MEMORY_BARRIER                                          \
   152   (__asm__ __volatile__("membar #LoadLoad | #LoadStore"                 \
   177   (__asm__ __volatile__("membar #LoadLoad | #LoadStore"                 \
   153                         " | #StoreLoad | #StoreStore" : : : "memory"))
   178                         " | #StoreLoad | #StoreStore" : : : "memory"))
   154 # define ATOMIC_INT_CMP_XCHG(atomic,oldvalue,newvalue)                  \
   179 # define ATOMIC_INT_CMP_XCHG(atomic,oldvalue,newvalue)                  \
   161     rv == oldvalue;                                                     \
   186     rv == oldvalue;                                                     \
   162   })
   187   })
   163 
   188 
   164 # if (SIZEOF_VOIDP == 4)
   189 # if (SIZEOF_VOIDP == 4)
   165 static __inline__ SDL_bool
   190 static __inline__ SDL_bool
   166 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   191 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
   167                         void *newvalue)
   192 {
   168 {
   193   void* rv;
   169     void *rv;
   194   __asm__ __volatile__("cas [%4], %2, %0"
   170     __asm__ __volatile__("cas [%4], %2, %0":"=r"(rv), "=m"(*atomic)
   195                        : "=r" (rv),
   171                          :"r"(oldvalue),
   196                          "=m" (*atomic)
   172                          "m"(*atomic), "r"(atomic), "0"(newvalue));
   197                        : "r" (oldvalue),
   173     return (SDL_bool) (rv == oldvalue);
   198                          "m" (*atomic),
       
   199                          "r" (atomic),
       
   200                          "0" (newvalue));
       
   201   return (SDL_bool)(rv == oldvalue);
   174 }
   202 }
   175 # elif (SIZEOF_VOIDP == 8)
   203 # elif (SIZEOF_VOIDP == 8)
   176 static __inline__ SDL_bool
   204 static __inline__ SDL_bool
   177 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   205 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
   178                         void *newvalue)
   206 {
   179 {
   207   void* rv;
   180     void *rv;
   208   void** a = atomic;
   181     void **a = atomic;
   209   __asm__ __volatile__("casx [%4], %2, %0"
   182     __asm__ __volatile__("casx [%4], %2, %0":"=r"(rv), "=m"(*a)
   210                        : "=r" (rv),
   183                          :"r"(oldvalue), "m"(*a), "r"(a), "0"(newvalue));
   211                          "=m" (*a)
   184     return (SDL_bool) (rv == oldvalue);
   212                        : "r" (oldvalue),
       
   213                          "m" (*a),
       
   214                          "r" (a),
       
   215                          "0" (newvalue));
       
   216   return (SDL_bool)(rv == oldvalue);
   185 }
   217 }
   186 # else
   218 # else
   187 #  error "Your system has an unsupported pointer size"
   219 #  error "Your system has an unsupported pointer size"
   188 # endif /* SIZEOF_VOIDP */
   220 # endif /* SIZEOF_VOIDP */
   189 #elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__) || defined(_M_PPC))
   221 #elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__) || defined(_M_PPC))
   190 # define ATOMIC_MEMORY_BARRIER \
   222 # define ATOMIC_MEMORY_BARRIER \
   191   (__asm__ __volatile__ ("sync" : : : "memory"))
   223   (__asm__ __volatile__ ("sync" : : : "memory"))
   192 static __inline__ void
   224 static __inline__ void
   193 SDL_atomic_int_add(volatile int *atomic, int value)
   225 SDL_atomic_int_add(volatile int* atomic, int value)
   194 {
   226 {                                           
   195     int rv, tmp;
   227   int rv,tmp;                                   
   196     __asm__ __volatile__("1: lwarx   %0,  0, %3\n"
   228   __asm__ __volatile__("1: lwarx   %0,  0, %3\n" 
   197                          "   add     %1, %0, %4\n"
   229                        "   add     %1, %0, %4\n"
   198                          "   stwcx.  %1,  0, %3\n"
   230                        "   stwcx.  %1,  0, %3\n" 
   199                          "   bne-    1b":"=&b"(rv), "=&r"(tmp), "=m"(*atomic)
   231                        "   bne-    1b"          
   200                          :"b"(atomic), "r"(value), "m"(*atomic)
   232                        : "=&b" (rv),            
   201                          :"cr0", "memory");
   233                          "=&r" (tmp),           
       
   234                          "=m" (*atomic)       
       
   235                        : "b" (atomic),          
       
   236                          "r" (value),           
       
   237                          "m" (*atomic)        
       
   238                        : "cr0",                 
       
   239                          "memory");             
   202 }
   240 }
   203 
   241 
   204 static __inline__ int
   242 static __inline__ int
   205 SDL_atomic_int_xchg_add(volatile int *atomic, int value)
   243 SDL_atomic_int_xchg_add(volatile int* atomic, int value)
   206 {
   244 {                                          
   207     int rv, tmp;
   245   int rv,tmp;                               
   208     __asm__ __volatile__("1: lwarx  %0, 0, %3\n"
   246   __asm__ __volatile__("1: lwarx  %0, 0, %3\n"        
   209                          "   add    %1, %0, %4\n"
   247                        "   add    %1, %0, %4\n"       
   210                          "   stwcx. %1, 0, %3\n"
   248                        "   stwcx. %1, 0, %3\n"        
   211                          "   bne-   1b":"=&b"(rv), "=&r"(tmp), "=m"(*atomic)
   249                        "   bne-   1b"                 
   212                          :"b"(atomic), "r"(value), "m"(*atomic)
   250                        : "=&b" (rv),                  
   213                          :"cr0", "memory");
   251                          "=&r" (tmp),                 
   214     return rv;
   252                          "=m" (*atomic)
       
   253                        : "b" (atomic),                
       
   254                          "r" (value),                 
       
   255                          "m" (*atomic)
       
   256                        : "cr0",                       
       
   257                        "memory");                   
       
   258   return rv;                                                 
   215 }
   259 }
   216 
   260 
   217 # if (SIZEOF_VOIDP == 4)
   261 # if (SIZEOF_VOIDP == 4)
   218 static __inline__ SDL_bool
   262 static __inline__ SDL_bool
   219 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   263 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
   220 {
   264 {                                                        
   221     int rv;
   265   int rv;                                                 
   222     __asm__ __volatile__("   sync\n"
   266   __asm__ __volatile__("   sync\n"                         
   223                          "1: lwarx   %0, 0, %1\n"
   267                        "1: lwarx   %0, 0, %1\n"           
   224                          "   subf.   %0, %2, %0\n"
   268                        "   subf.   %0, %2, %0\n"          
   225                          "   bne     2f\n"
   269                        "   bne     2f\n"                  
   226                          "   stwcx.  %3, 0, %1\n"
   270                        "   stwcx.  %3, 0, %1\n"           
   227                          "   bne-    1b\n" "2: isync":"=&r"(rv)
   271                        "   bne-    1b\n"                  
   228                          :"b"(atomic), "r"(oldvalue), "r":"cr0", "memory");
   272                        "2: isync"                         
   229     return (SDL_bool) (rv == 0);
   273                        : "=&r" (rv)                       
   230 }
   274                        : "b" (atomic),                    
   231 
   275                          "r" (oldvalue),                  
   232 static __inline__ SDL_bool
   276                          "r"                              
   233 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   277                        : "cr0",                           
   234                         void *newvalue)
   278                          "memory");                         
   235 {
   279   return (SDL_bool)(rv == 0);                                              
   236     void *rv;
   280 }
   237     __asm__ __volatile__("sync\n"
   281 
   238                          "1: lwarx  %0,  0, %1\n"
   282 static __inline__ SDL_bool
   239                          "   subf.  %0, %2, %0\n"
   283 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
   240                          "   bne    2f\n"
   284 {
   241                          "   stwcx. %3,  0, %1\n"
   285   void* rv;
   242                          "   bne-   1b\n" "2: isync":"=&r"(rv)
   286   __asm__ __volatile__("sync\n"
   243                          :"b"(atomic), "r"(oldvalue), "r"(newvalue)
   287                        "1: lwarx  %0,  0, %1\n"
   244                          :"cr0", "memory");
   288                        "   subf.  %0, %2, %0\n"
   245     return (SDL_bool) (rv == 0);
   289                        "   bne    2f\n"
       
   290                        "   stwcx. %3,  0, %1\n"
       
   291                        "   bne-   1b\n"
       
   292                        "2: isync"
       
   293                        : "=&r" (rv)
       
   294                        : "b" (atomic),
       
   295                          "r" (oldvalue),
       
   296                          "r" (newvalue)
       
   297                        : "cr0",
       
   298                        "memory");
       
   299   return (SDL_bool)(rv == 0);
   246 }
   300 }
   247 # elif (SIZEOF_VOIDP == 8)
   301 # elif (SIZEOF_VOIDP == 8)
   248 static __inline__ SDL_bool
   302 static __inline__ SDL_bool
   249 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   303 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
   250 {
   304 {                                                        
   251     int rv;
   305   int rv;                                                 
   252     __asm__ __volatile__("   sync\n"
   306   __asm__ __volatile__("   sync\n"                         
   253                          "1: lwarx   %0,  0, %1\n"
   307                        "1: lwarx   %0,  0, %1\n"
   254                          "   extsw   %0, %0\n"
   308                        "   extsw   %0, %0\n"
   255                          "   subf.   %0, %2, %0\n"
   309                        "   subf.   %0, %2, %0\n"          
   256                          "   bne     2f\n"
   310                        "   bne     2f\n"                  
   257                          "   stwcx.  %3,  0, %1\n"
   311                        "   stwcx.  %3,  0, %1\n"           
   258                          "   bne-    1b\n" "2: isync":"=&r"(rv)
   312                        "   bne-    1b\n"                  
   259                          :"b"(atomic), "r"(oldvalue), "r":"cr0", "memory");
   313                        "2: isync"                         
   260     return (SDL_bool) (rv == 0);
   314                        : "=&r" (rv)                       
   261 }
   315                        : "b" (atomic),                    
   262 
   316                          "r" (oldvalue),                  
   263 static __inline__ SDL_bool
   317                          "r"                              
   264 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   318                        : "cr0",                           
   265                         void *newvalue)
   319                          "memory");                         
   266 {
   320   return (SDL_bool)(rv == 0);                                              
   267     void *rv;
   321 }
   268     __asm__ __volatile__("sync\n"
   322 
   269                          "1: ldarx  %0,  0, %1\n"
   323 static __inline__ SDL_bool
   270                          "   subf.  %0, %2, %0\n"
   324 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
   271                          "   bne    2f\n"
   325 {
   272                          "   stdcx. %3,  0, %1\n"
   326   void* rv;
   273                          "   bne-   1b\n" "2: isync":"=&r"(rv)
   327   __asm__ __volatile__("sync\n"
   274                          :"b"(atomic), "r"(oldvalue), "r"(newvalue)
   328                        "1: ldarx  %0,  0, %1\n"
   275                          :"cr0", "memory");
   329                        "   subf.  %0, %2, %0\n"
   276     return (SDL_bool) (rv == 0);
   330                        "   bne    2f\n"
       
   331                        "   stdcx. %3,  0, %1\n"
       
   332                        "   bne-   1b\n"
       
   333                        "2: isync"
       
   334                        : "=&r" (rv)
       
   335                        : "b" (atomic),
       
   336                          "r" (oldvalue),
       
   337                          "r" (newvalue)
       
   338                        : "cr0",
       
   339                        "memory");
       
   340   return (SDL_bool)(rv == 0);
   277 }
   341 }
   278 # else
   342 # else
   279 #  error "Your system has an unsupported pointer size"
   343 #  error "Your system has an unsupported pointer size"
   280 # endif /* SIZEOF_VOIDP */
   344 # endif /* SIZEOF_VOIDP */
   281 #elif defined(__GNUC__) && (defined(__IA64__) || defined(__ia64__))
   345 #elif defined(__GNUC__) && (defined(__IA64__) || defined(__ia64__))
   288   (__sync_bool_compare_and_swap((atomic),(oldvalue),(newvalue)))
   352   (__sync_bool_compare_and_swap((atomic),(oldvalue),(newvalue)))
   289 # define SDL_atomic_ptr_cmp_xchg(atomic,oldvalue,newvalue)              \
   353 # define SDL_atomic_ptr_cmp_xchg(atomic,oldvalue,newvalue)              \
   290   (__sync_bool_compare_and_swap((long*)(atomic),(long)(oldvalue),(long)(newvalue)))
   354   (__sync_bool_compare_and_swap((long*)(atomic),(long)(oldvalue),(long)(newvalue)))
   291 #elif defined(__GNUC__) && defined(__LINUX__) && (defined(__mips__) || defined(__MIPS__))
   355 #elif defined(__GNUC__) && defined(__LINUX__) && (defined(__mips__) || defined(__MIPS__))
   292 static __inline__ int
   356 static __inline__ int
   293 SDL_atomic_int_xchg_add(volatile int *atomic, int value)
   357 SDL_atomic_int_xchg_add(volatile int* atomic, int value)
   294 {
   358 {                                            
   295     int rv, tmp;
   359   int rv,tmp;                                 
   296     __asm__ __volatile__("1:              \n"
   360   __asm__ __volatile__("1:              \n"                 
   297                          ".set  push      \n"
   361                        ".set  push      \n"         
   298                          ".set  mips2     \n"
   362                        ".set  mips2     \n"        
   299                          "ll    %0,%3     \n"
   363                        "ll    %0,%3     \n"        
   300                          "addu  %1,%4,%0  \n"
   364                        "addu  %1,%4,%0  \n"     
   301                          "sc    %1,%2     \n"
   365                        "sc    %1,%2     \n"        
   302                          ".set  pop       \n"
   366                        ".set  pop       \n"          
   303                          "beqz  %1,1b     \n":"=&r"(rv),
   367                        "beqz  %1,1b     \n"        
   304                          "=&r"(tmp), "=m"(*atomic)
   368                        : "=&r" (rv),          
   305                          :"m"(*atomic), "r"(value)
   369                          "=&r" (tmp),         
   306                          :"memory");
   370                          "=m" (*atomic)     
   307     return rv;
   371                        : "m" (*atomic),     
       
   372                          "r" (value)          
       
   373                        : "memory");           
       
   374   return rv;                                         
   308 }
   375 }
   309 
   376 
   310 static __inline__ void
   377 static __inline__ void
   311 SDL_atomic_int_add(volatile int *atomic, int value)
   378 SDL_atomic_int_add(volatile int* atomic, int value)
   312 {
   379 {                                           
   313     int rv;
   380   int rv;                                    
   314     __asm__ __volatile__("1:               \n"
   381   __asm__ __volatile__("1:               \n"                
   315                          ".set  push       \n"
   382                        ".set  push       \n"        
   316                          ".set  mips2      \n"
   383                        ".set  mips2      \n"       
   317                          "ll    %0,%2      \n"
   384                        "ll    %0,%2      \n"       
   318                          "addu  %0,%3,%0   \n"
   385                        "addu  %0,%3,%0   \n"    
   319                          "sc    %0,%1      \n"
   386                        "sc    %0,%1      \n"       
   320                          ".set  pop        \n"
   387                        ".set  pop        \n"         
   321                          "beqz  %0,1b      \n":"=&r"(rv), "=m"(*atomic)
   388                        "beqz  %0,1b      \n"       
   322                          :"m"(*atomic), "r"(value)
   389                        : "=&r" (rv),         
   323                          :"memory");
   390                          "=m" (*atomic)    
   324 }
   391                        : "m" (*atomic),    
   325 
   392                          "r" (value)         
   326 static __inline__ SDL_bool
   393                        : "memory");          
   327 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   394 }
   328 {
   395 
   329     int rv;
   396 static __inline__ SDL_bool
   330     __asm__ __volatile__("     .set push        \n"
   397 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
   331                          "     .set noat        \n"
   398 {
   332                          "     .set mips3       \n"
   399   int rv;
   333                          "1:   ll   %0, %2      \n"
   400   __asm__ __volatile__("     .set push        \n"
   334                          "     bne  %0, %z3, 2f \n"
   401                        "     .set noat        \n"
   335                          "     .set mips0       \n"
   402                        "     .set mips3       \n"
   336                          "     move $1, %z4     \n"
   403                        "1:   ll   %0, %2      \n"
   337                          "     .set mips3       \n"
   404                        "     bne  %0, %z3, 2f \n"
   338                          "     sc   $1, %1      \n"
   405                        "     .set mips0       \n"
   339                          "     beqz $1, 1b      \n"
   406                        "     move $1, %z4     \n"
   340                          "     sync             \n"
   407                        "     .set mips3       \n"
   341                          "2:                    \n"
   408                        "     sc   $1, %1      \n"
   342                          "     .set pop         \n":"=&r"(rv), "=R"(*atomic)
   409                        "     beqz $1, 1b      \n"
   343                          :"R"(*atomic), "Jr"(oldvalue), "Jr"(newvalue)
   410                        "     sync             \n"
   344                          :"memory");
   411                        "2:                    \n"
   345     return (SDL_bool) rv;
   412                        "     .set pop         \n"
   346 }
   413                        : "=&r" (rv),
   347 
   414                          "=R" (*atomic)
   348 static __inline__ SDL_bool
   415                        : "R" (*atomic),
   349 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   416                          "Jr" (oldvalue),
   350                         void *newvalue)
   417                          "Jr" (newvalue)
   351 {
   418                        : "memory");
   352     int rv;
   419   return (SDL_bool)rv;                  
   353     __asm__ __volatile__("     .set push        \n"
   420 }
   354                          "     .set noat        \n" "     .set mips3       \n"
   421 
       
   422 static __inline__ SDL_bool
       
   423 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
       
   424 {                                                     
       
   425   int rv;
       
   426   __asm__ __volatile__("     .set push        \n"
       
   427                        "     .set noat        \n"
       
   428                        "     .set mips3       \n"
   355 # if defined(__mips64)
   429 # if defined(__mips64)
   356                          "1:   lld  %0, %2      \n"
   430                        "1:   lld  %0, %2      \n"
   357 # else
   431 # else
   358                          "1:   ll   %0, %2      \n"
   432                        "1:   ll   %0, %2      \n"
   359 # endif
   433 # endif                       
   360                          "     bne  %0, %z3, 2f \n" "     move $1, %z4     \n"
   434                        "     bne  %0, %z3, 2f \n"
       
   435                        "     move $1, %z4     \n"
   361 # if defined(__mips64)
   436 # if defined(__mips64)
   362                          "     sc   $1, %1      \n"
   437                        "     sc   $1, %1      \n"
   363 # else
   438 # else
   364                          "     scd  $1, %1      \n"
   439                        "     scd  $1, %1      \n"
   365 # endif
   440 # endif                       
   366                          "     beqz $1, 1b      \n"
   441                        "     beqz $1, 1b      \n"
   367                          "     sync             \n"
   442                        "     sync             \n"
   368                          "2:                    \n"
   443                        "2:                    \n"
   369                          "     .set pop         \n":"=&r"(rv), "=R"(*atomic)
   444                        "     .set pop         \n"
   370                          :"R"(*atomic), "Jr"(oldvalue), "Jr"(newvalue)
   445                        : "=&r" (rv),
   371                          :"memory");
   446                          "=R" (*atomic)
   372     return (SDL_bool) rv;
   447                        : "R" (*atomic),
       
   448                          "Jr" (oldvalue),
       
   449                          "Jr" (newvalue)
       
   450                        : "memory");
       
   451   return (SDL_bool)rv;                                                  
   373 }
   452 }
   374 #elif defined(__GNUC__) && defined(__m68k__)
   453 #elif defined(__GNUC__) && defined(__m68k__)
   375 static __inline__ int
   454 static __inline__ int
   376 SDL_atomic_int_xchg_add(volatile int *atomic, int value)
   455 SDL_atomic_int_xchg_add(volatile int* atomic, int value)
   377 {
   456 {                                          
   378     int rv = *atomic;
   457   int rv = *atomic;
   379     int tmp;
   458   int tmp;
   380     __asm__ __volatile__("1: move%.l %0,%1    \n"
   459   __asm__ __volatile__("1: move%.l %0,%1    \n"
   381                          "   add%.l  %2,%1    \n"
   460                        "   add%.l  %2,%1    \n"
   382                          "   cas%.l  %0,%1,%3 \n"
   461                        "   cas%.l  %0,%1,%3 \n"
   383                          "   jbne    1b       \n":"=d"(rv), "=&d"(tmp)
   462                        "   jbne    1b       \n"
   384                          :"d"(value), "m"(*atomic), "0"(rv)
   463                        : "=d" (rv),
   385                          :"memory");
   464                          "=&d" (tmp)
   386     return (SDL_bool) rv;
   465                        : "d" (value),
       
   466                          "m" (*atomic),
       
   467                          "0" (rv)
       
   468                        : "memory");
       
   469   return (SDL_bool)rv;
   387 }
   470 }
   388 
   471 
   389 static __inline__ void
   472 static __inline__ void
   390 SDL_atomic_int_add(volatile int *atomic, int value)
   473 SDL_atomic_int_add(volatile int* atomic, int value)
   391 {
   474 {                                           
   392     __asm__ __volatile__("add%.l %0,%1"::"id"(value), "m"(*atomic)
   475   __asm__ __volatile__("add%.l %0,%1"        
   393                          :"memory");
   476                        :                     
   394 }
   477                        : "id" (value),       
   395 
   478                          "m" (*atomic)
   396 static __inline__ SDL_bool
   479                        : "memory");          
   397 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   480 }
   398 {
   481 
   399     char rv;
   482 static __inline__ SDL_bool
   400     int readvalue;
   483 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
   401     __asm__ __volatile__("cas%.l %2,%3,%1\n"
   484 {                                           
   402                          "seq    %0":"=dm"(rv), "=m"(*atomic), "=d"(readvalue)
   485   char rv;                                   
   403                          :"d"(newvalue), "m"(*atomic), "2"(oldvalue));
   486   int readvalue;                             
   404     return (SDL_bool) rv;
   487   __asm__ __volatile__("cas%.l %2,%3,%1\n"   
   405 }
   488                        "seq    %0"           
   406 
   489                        : "=dm" (rv),         
   407 static __inline__ SDL_bool
   490                          "=m" (*atomic),   
   408 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   491                          "=d" (readvalue)    
   409                         void *newvalue)
   492                        : "d" (newvalue),     
   410 {
   493                          "m" (*atomic),    
   411     char rv;
   494                          "2" (oldvalue));    
   412     int readvalue;
   495     return (SDL_bool)rv;                                        
   413     __asm__ __volatile__("cas%.l %2,%3,%1\n"
   496 }
   414                          "seq    %0":"=dm"(rv), "=m"(*atomic), "=d"(readvalue)
   497 
   415                          :"d"(newvalue), "m"(*atomic), "2"(oldvalue));
   498 static __inline__ SDL_bool
   416     return (SDL_bool) rv;
   499 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
       
   500 {
       
   501   char rv;                                   
       
   502   int readvalue;                             
       
   503   __asm__ __volatile__("cas%.l %2,%3,%1\n"   
       
   504                        "seq    %0"           
       
   505                        : "=dm" (rv),         
       
   506                          "=m" (*atomic),   
       
   507                          "=d" (readvalue)    
       
   508                        : "d" (newvalue),     
       
   509                          "m" (*atomic),    
       
   510                          "2" (oldvalue));    
       
   511     return (SDL_bool)rv;                                        
   417 }
   512 }
   418 #elif defined(__GNUC__) && defined(__s390__)
   513 #elif defined(__GNUC__) && defined(__s390__)
   419 # define ATOMIC_INT_CMP_XCHG(atomic,oldvalue,newvalue)  \
   514 # define ATOMIC_INT_CMP_XCHG(atomic,oldvalue,newvalue)  \
   420   ({                                                    \
   515   ({                                                    \
   421     int rv = oldvalue;                                  \
   516     int rv = oldvalue;                                  \
   427                          : "cc");                       \
   522                          : "cc");                       \
   428     rv == oldvalue;                                     \
   523     rv == oldvalue;                                     \
   429   })
   524   })
   430 # if (SIZEOF_VOIDP == 4)
   525 # if (SIZEOF_VOIDP == 4)
   431 static __inline__ SDL_bool
   526 static __inline__ SDL_bool
   432 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   527 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
   433                         void *newvalue)
   528 {
   434 {
   529   void* rv = oldvalue;
   435     void *rv = oldvalue;
   530   __asm__ __volatile__("cs %0, %2, %1"
   436     __asm__ __volatile__("cs %0, %2, %1":"+d"(rv), "=Q"(*atomic)
   531                        : "+d" (rv),
   437                          :"d"(newvalue), "m"(*atomic)
   532                          "=Q" (*atomic)
   438                          :"cc");
   533                        : "d" (newvalue),
   439     return (SDL_bool) (rv == oldvalue);
   534                          "m" (*atomic)
       
   535                        : "cc");
       
   536   return (SDL_bool)(rv == oldvalue);
   440 }
   537 }
   441 # elif (SIZEOF_VOIDP == 8)
   538 # elif (SIZEOF_VOIDP == 8)
   442 static __inline__ SDL_bool
   539 static __inline__ SDL_bool
   443 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   540 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
   444                         void *newvalue)
   541 {
   445 {
   542   void* rv = oldvalue;
   446     void *rv = oldvalue;
   543   void** a = atomic;
   447     void **a = atomic;
   544   __asm__ __volatile__("csg %0, %2, %1"
   448     __asm__ __volatile__("csg %0, %2, %1":"+d"(rv), "=Q"(*a)
   545                        : "+d" (rv),
   449                          :"d"((long) (newvalue)), "m"(*a)
   546                          "=Q" (*a)
   450                          :"cc");
   547                        : "d" ((long)(newvalue)),
   451     return (SDL_bool) (rv == oldvalue);
   548                          "m" (*a)
       
   549                        : "cc");
       
   550   return (SDL_bool)(rv == oldvalue);
   452 }
   551 }
   453 # else
   552 # else
   454 #  error "Your system has an unsupported pointer size"
   553 #  error "Your system has an unsupported pointer size"
   455 # endif /* SIZEOF_VOIDP */
   554 # endif /* SIZEOF_VOIDP */
   456 #elif defined(__WIN32__)
   555 #elif defined(__WIN32__)
   457 # include <windows.h>
   556 # include <windows.h>
   458 static __inline__ int
   557 static __inline__ int
   459 SDL_atomic_int_xchg_add(volatile int *atomic, int value)
   558 SDL_atomic_int_xchg_add(volatile int* atomic, int value)
   460 {
   559 {
   461     return InterlockedExchangeAdd(atomic, value);
   560   return InterlockedExchangeAdd(atomic, value);
   462 }
   561 }
   463 
   562 
   464 static __inline__ void
   563 static __inline__ void
   465 SDL_atomic_int_add(volatile int *atomic, int value)
   564 SDL_atomic_int_add(volatile int* atomic, int value)
   466 {
   565 {
   467     InterlockedExchangeAdd(atomic, value);
   566   InterlockedExchangeAdd(atomic, value);
   468 }
   567 }
   469 
   568 
   470 # if (WINVER > 0X0400)
   569 # if (WINVER > 0X0400)
   471 static __inline__ SDL_bool
   570 static __inline__ SDL_bool
   472 SDL_atmoic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   571 SDL_atmoic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
   473 {
   572 {
   474     return (SDL_bool) (InterlockedCompareExchangePointer((PVOID *) atomic,
   573    return (SDL_bool)(InterlockedCompareExchangePointer((PVOID*)atomic,
   475                                                          (PVOID) newvalue,
   574                                                        (PVOID)newvalue,
   476                                                          (PVOID) oldvalue) ==
   575                                                        (PVOID)oldvalue) == oldvalue);
   477                        oldvalue);
   576 }
   478 }
   577 
   479 
   578 
   480 
   579 static __inline__ SDL_bool
   481 static __inline__ SDL_bool
   580 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
   482 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   581 {
   483                         void *newvalue)
   582   return (InterlockedCompareExchangePointer(atomic, newvalue, oldvalue) == oldvalue);
   484 {
       
   485     return (InterlockedCompareExchangePointer(atomic, newvalue, oldvalue) ==
       
   486             oldvalue);
       
   487 }
   583 }
   488 # else /* WINVER <= 0x0400 */
   584 # else /* WINVER <= 0x0400 */
   489 #  if (SIZEOF_VOIDP != 4)
   585 #  if (SIZEOF_VOIDP != 4)
   490 #   error "InterlockedCompareExchangePointer needed"
   586 #   error "InterlockedCompareExchangePointer needed"
   491 #  endif
   587 #  endif
   492 
   588 
   493 static __inline__ SDL_bool
   589 static __inline__ SDL_bool
   494 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   590 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
   495 {
   591 {
   496     return (InterlockedCompareExchange(atomic, newvalue, oldvalue) ==
   592   return (InterlockedCompareExchange(atomic, newvalue, oldvalue) == oldvalue);
   497             oldvalue);
   593 }
   498 }
   594 
   499 
   595 static __inline__ SDL_bool
   500 static __inline__ SDL_bool
   596 SDL_atomic_ptr_cmp_xchg(volatile void** atomic, void* oldvalue, void* newvalue)
   501 SDL_atomic_ptr_cmp_xchg(volatile void **atomic, void *oldvalue,
   597 {
   502                         void *newvalue)
   598   return (InterlockedCompareExchange(atomic, newvalue, oldvalue) == oldvalue);
   503 {
       
   504     return (InterlockedCompareExchange(atomic, newvalue, oldvalue) ==
       
   505             oldvalue);
       
   506 }
   599 }
   507 # endif
   600 # endif
   508 #else /* when all else fails */
   601 #else /* when all else fails */
   509 # define SDL_ATOMIC_OPS_NOT_SUPPORTED
   602 # define SDL_ATOMIC_OPS_NOT_SUPPORTED
   510 # warning "Atomic Ops for this platform not supported!"
   603 # warning "Atomic Ops for this platform not supported!"
   511 static __inline__ int
   604 static __inline__ int
   512 SDL_atomic_int_xchg_add(volatile int *atomic, int value)
   605 SDL_atomic_int_xchg_add(volatile int* atomic, int value)
   513 {
   606 {                                           
   514     int rv = *atomic;
   607   int rv = *atomic;                          
   515     *(atomic) += value;
   608   *(atomic) += value;                        
   516     return rv;
   609   return rv;                                        
   517 }
   610 }
   518 
   611 
   519 static __inline__ SDL_bool
   612 static __inline__ SDL_bool
   520 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   613 SDL_atomic_int_cmp_xchg(volatile int* atomic, int oldvalue, int newvalue)
   521 {
   614 {
   522     return (*atomic == oldvalue) ?
   615   return (*atomic == oldvalue) ?  
   523         ((*atomic = newvalue), SDL_TRUE) : SDL_FALSE;
   616     ((*atomic = newvalue), SDL_TRUE) : SDL_FALSE;
   524 }
   617 }
   525 
   618 
   526 static __inline__ void
   619 static __inline__ void
   527 SDL_atomic_int_add(volatile int *atomic, int value)
   620 SDL_atomic_int_add(volatile int* atomic, int value)
   528 {
   621 {
   529     *atomic += value;
   622   *atomic += value;
   530 }
   623 }
   531 #endif /* arch & platforms */
   624 #endif /* arch & platforms */
       
   625   
       
   626 /* *INDENT-ON* */
   532 
   627 
   533 #ifdef ATOMIC_INT_CMP_XCHG
   628 #ifdef ATOMIC_INT_CMP_XCHG
   534 static __inline__ SDL_bool
   629 static __inline__ SDL_bool
   535 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   630 SDL_atomic_int_cmp_xchg(volatile int *atomic, int oldvalue, int newvalue)
   536 {
   631 {