src/atomic/SDL_atomic.c
changeset 5003 3a95a2b93eb3
child 5004 0c72ae7b7cb2
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/atomic/SDL_atomic.c	Sat Jan 15 12:41:59 2011 -0800
     1.3 @@ -0,0 +1,179 @@
     1.4 +/*
     1.5 +  SDL - Simple DirectMedia Layer
     1.6 +  Copyright (C) 1997-2010 Sam Lantinga
     1.7 +
     1.8 +  This library is free software; you can redistribute it and/or
     1.9 +  modify it under the terms of the GNU Lesser General Public
    1.10 +  License as published by the Free Software Foundation; either
    1.11 +  version 2.1 of the License, or (at your option) any later version.
    1.12 +
    1.13 +  This library is distributed in the hope that it will be useful,
    1.14 +  but WITHOUT ANY WARRANTY; without even the implied warranty of
    1.15 +  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    1.16 +  Lesser General Public License for more details.
    1.17 +
    1.18 +  You should have received a copy of the GNU Lesser General Public
    1.19 +  License along with this library; if not, write to the Free Software
    1.20 +  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    1.21 +
    1.22 +  Sam Lantinga
    1.23 +  slouken@libsdl.org
    1.24 +*/
    1.25 +#include "SDL_stdinc.h"
    1.26 +
    1.27 +#include "SDL_atomic.h"
    1.28 +
    1.29 +/* 
    1.30 +  If any of the operations are not provided then we must emulate some
    1.31 +  of them. That means we need a nice implementation of spin locks
    1.32 +  that avoids the "one big lock" problem. We use a vector of spin
    1.33 +  locks and pick which one to use based on the address of the operand
    1.34 +  of the function.
    1.35 +
    1.36 +  To generate the index of the lock we first shift by 3 bits to get
    1.37 +  rid on the zero bits that result from 32 and 64 bit allignment of
    1.38 +  data. We then mask off all but 5 bits and use those 5 bits as an
    1.39 +  index into the table. 
    1.40 +
    1.41 +  Picking the lock this way insures that accesses to the same data at
    1.42 +  the same time will go to the same lock. OTOH, accesses to different
    1.43 +  data have only a 1/32 chance of hitting the same lock. That should
    1.44 +  pretty much eliminate the chances of several atomic operations on
    1.45 +  different data from waiting on the same "big lock". If it isn't
    1.46 +  then the table of locks can be expanded to a new size so long as
    1.47 +  the new size is a power of two.
    1.48 +
    1.49 +  Contributed by Bob Pendleton, bob@pendleton.com
    1.50 +*/
    1.51 +
    1.52 +static SDL_SpinLock locks[32];
    1.53 +
    1.54 +static __inline__ void
    1.55 +enterLock(void *a)
    1.56 +{
    1.57 +   uintptr_t index = ((((uintptr_t)a) >> 3) & 0x1f);
    1.58 +
    1.59 +   SDL_AtomicLock(&locks[index]);
    1.60 +}
    1.61 +
    1.62 +static __inline__ void
    1.63 +leaveLock(void *a)
    1.64 +{
    1.65 +   uintptr_t index = ((((uintptr_t)a) >> 3) & 0x1f);
    1.66 +
    1.67 +   SDL_AtomicUnlock(&locks[index]);
    1.68 +}
    1.69 +
    1.70 +#ifndef SDL_AtomicSet
    1.71 +int
    1.72 +SDL_AtomicSet(SDL_atomic_t *a, int value)
    1.73 +{
    1.74 +    int oldvalue;
    1.75 +
    1.76 +    enterLock(a);
    1.77 +    oldvalue = a->value;
    1.78 +    a->value = value;
    1.79 +    leaveLock(a);
    1.80 +
    1.81 +    return oldvalue;
    1.82 +}
    1.83 +#endif
    1.84 +
    1.85 +#ifndef SDL_AtomicGet
    1.86 +int
    1.87 +SDL_AtomicGet(SDL_atomic_t *a)
    1.88 +{
    1.89 +    /* Assuming integral reads on this platform, we're safe here since the
    1.90 +       functions that set the variable have the necessary memory barriers.
    1.91 +    */
    1.92 +    return a->value;
    1.93 +}
    1.94 +#endif
    1.95 +
    1.96 +#ifndef SDL_AtomicAdd
    1.97 +int
    1.98 +SDL_AtomicAdd(SDL_atomic_t *a, int value)
    1.99 +{
   1.100 +    int oldvalue;
   1.101 +
   1.102 +    enterLock(a);
   1.103 +    oldvalue = a->value;
   1.104 +    a->value += value;
   1.105 +    leaveLock(a);
   1.106 +
   1.107 +    return oldvalue;
   1.108 +}
   1.109 +#endif
   1.110 +
   1.111 +#ifndef SDL_AtomicIncRef
   1.112 +void
   1.113 +SDL_AtomicIncRef(SDL_atomic_t *a)
   1.114 +{
   1.115 +    SDL_AtomicAdd(a, 1);
   1.116 +}
   1.117 +#endif
   1.118 +
   1.119 +#ifndef SDL_AtomicDecRef
   1.120 +SDL_bool
   1.121 +SDL_AtomicDecRef(SDL_atomic_t *a)
   1.122 +{
   1.123 +    return SDL_AtomicAdd(a, -1) == 1;
   1.124 +}
   1.125 +#endif
   1.126 +
   1.127 +#ifndef SDL_AtomicCAS
   1.128 +int
   1.129 +SDL_AtomicCAS(SDL_atomic_t *a, int oldval, int newval)
   1.130 +{
   1.131 +    int prevval;
   1.132 +
   1.133 +    enterLock(a);
   1.134 +    prevval = a->value;
   1.135 +    if (prevval == oldval) {
   1.136 +        a->value = newval;
   1.137 +    }
   1.138 +    leaveLock(a);
   1.139 +
   1.140 +    return prevval;
   1.141 +}
   1.142 +#endif
   1.143 +
   1.144 +#ifndef SDL_AtomicSetPtr
   1.145 +void
   1.146 +SDL_AtomicSetPtr(void** a, void* value)
   1.147 +{
   1.148 +    void *prevval;
   1.149 +    do {
   1.150 +        prevval = *a;
   1.151 +    } while (SDL_AtomicCASPtr(a, prevval, value) != prevval);
   1.152 +}
   1.153 +#endif
   1.154 +
   1.155 +#ifndef SDL_AtomicGetPtr
   1.156 +void*
   1.157 +SDL_AtomicGetPtr(void** a)
   1.158 +{
   1.159 +    /* Assuming integral reads on this platform, we're safe here since the
   1.160 +       functions that set the pointer have the necessary memory barriers.
   1.161 +    */
   1.162 +    return *a;
   1.163 +}
   1.164 +#endif
   1.165 +
   1.166 +#ifndef SDL_AtomicCASPtr
   1.167 +void* SDL_AtomicCASPtr(void **a, void *oldval, void *newval)
   1.168 +{
   1.169 +    void *prevval;
   1.170 +
   1.171 +    enterLock(a);
   1.172 +    prevval = *a;
   1.173 +    if (*a == oldval) {
   1.174 +        *a = newval;
   1.175 +    }
   1.176 +    leaveLock(a);
   1.177 +
   1.178 +    return prevval;
   1.179 +}
   1.180 +#endif
   1.181 +
   1.182 +/* vi: set ts=4 sw=4 expandtab: */