Skip to content

Latest commit

 

History

History
246 lines (215 loc) · 7.25 KB

SDL_atomic.c

File metadata and controls

246 lines (215 loc) · 7.25 KB
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
/*
Simple DirectMedia Layer
Copyright (C) 1997-2014 Sam Lantinga <slouken@libsdl.org>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "../SDL_internal.h"
#include "SDL_atomic.h"
#if defined(_MSC_VER) && (_MSC_VER >= 1500)
#include <intrin.h>
#define HAVE_MSC_ATOMICS 1
#endif
#if defined(__MACOSX__) /* !!! FIXME: should we favor gcc atomics? */
#include <libkern/OSAtomic.h>
#endif
Jul 5, 2014
Jul 5, 2014
34
35
36
37
#if !defined(HAVE_GCC_ATOMICS) && defined(__SOLARIS__)
#include <atomic.h>
#endif
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
/*
If any of the operations are not provided then we must emulate some
of them. That means we need a nice implementation of spin locks
that avoids the "one big lock" problem. We use a vector of spin
locks and pick which one to use based on the address of the operand
of the function.
To generate the index of the lock we first shift by 3 bits to get
rid on the zero bits that result from 32 and 64 bit allignment of
data. We then mask off all but 5 bits and use those 5 bits as an
index into the table.
Picking the lock this way insures that accesses to the same data at
the same time will go to the same lock. OTOH, accesses to different
data have only a 1/32 chance of hitting the same lock. That should
pretty much eliminate the chances of several atomic operations on
different data from waiting on the same "big lock". If it isn't
then the table of locks can be expanded to a new size so long as
the new size is a power of two.
Contributed by Bob Pendleton, bob@pendleton.com
*/
Jul 5, 2014
Jul 5, 2014
61
#if !defined(HAVE_MSC_ATOMICS) && !defined(HAVE_GCC_ATOMICS) && !defined(__MACOSX__) && !defined(__SOLARIS__)
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
#define EMULATE_CAS 1
#endif
#if EMULATE_CAS
static SDL_SpinLock locks[32];
static SDL_INLINE void
enterLock(void *a)
{
uintptr_t index = ((((uintptr_t)a) >> 3) & 0x1f);
SDL_AtomicLock(&locks[index]);
}
static SDL_INLINE void
leaveLock(void *a)
{
uintptr_t index = ((((uintptr_t)a) >> 3) & 0x1f);
SDL_AtomicUnlock(&locks[index]);
}
#endif
SDL_bool
SDL_AtomicCAS(SDL_atomic_t *a, int oldval, int newval)
{
#ifdef HAVE_MSC_ATOMICS
return (_InterlockedCompareExchange((long*)&a->value, (long)newval, (long)oldval) == (long)oldval);
#elif defined(__MACOSX__) /* !!! FIXME: should we favor gcc atomics? */
return (SDL_bool) OSAtomicCompareAndSwap32Barrier(oldval, newval, &a->value);
#elif defined(HAVE_GCC_ATOMICS)
return (SDL_bool) __sync_bool_compare_and_swap(&a->value, oldval, newval);
Jul 5, 2014
Jul 5, 2014
95
96
97
98
#elif defined(__SOLARIS__) && defined(_LP64)
return (SDL_bool) ((int) atomic_cas_64((volatile uint64_t*)&a->value, (uint64_t)oldval, (uint64_t)newval) == oldval);
#elif defined(__SOLARIS__) && !defined(_LP64)
return (SDL_bool) ((int) atomic_cas_32((volatile uint32_t*)&a->value, (uint32_t)oldval, (uint32_t)newval) == oldval);
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
#elif EMULATE_CAS
SDL_bool retval = SDL_FALSE;
enterLock(a);
if (a->value == oldval) {
a->value = newval;
retval = SDL_TRUE;
}
leaveLock(a);
return retval;
#else
#error Please define your platform.
#endif
}
SDL_bool
SDL_AtomicCASPtr(void **a, void *oldval, void *newval)
{
#if defined(HAVE_MSC_ATOMICS) && (_M_IX86)
return (_InterlockedCompareExchange((long*)a, (long)newval, (long)oldval) == (long)oldval);
#elif defined(HAVE_MSC_ATOMICS) && (!_M_IX86)
return (_InterlockedCompareExchangePointer(a, newval, oldval) == oldval);
#elif defined(__MACOSX__) && defined(__LP64__) /* !!! FIXME: should we favor gcc atomics? */
return (SDL_bool) OSAtomicCompareAndSwap64Barrier((int64_t)oldval, (int64_t)newval, (int64_t*) a);
#elif defined(__MACOSX__) && !defined(__LP64__) /* !!! FIXME: should we favor gcc atomics? */
return (SDL_bool) OSAtomicCompareAndSwap32Barrier((int32_t)oldval, (int32_t)newval, (int32_t*) a);
#elif defined(HAVE_GCC_ATOMICS)
return __sync_bool_compare_and_swap(a, oldval, newval);
Jul 5, 2014
Jul 5, 2014
128
129
#elif defined(__SOLARIS__)
return (SDL_bool) (atomic_cas_ptr(a, oldval, newval) == oldval);
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
#elif EMULATE_CAS
SDL_bool retval = SDL_FALSE;
enterLock(a);
if (*a == oldval) {
*a = newval;
retval = SDL_TRUE;
}
leaveLock(a);
return retval;
#else
#error Please define your platform.
#endif
}
int
SDL_AtomicSet(SDL_atomic_t *a, int v)
{
#ifdef HAVE_MSC_ATOMICS
return _InterlockedExchange((long*)&a->value, v);
#elif defined(HAVE_GCC_ATOMICS)
return __sync_lock_test_and_set(&a->value, v);
Jul 5, 2014
Jul 5, 2014
153
154
155
156
#elif defined(__SOLARIS__) && defined(_LP64)
return (int) atomic_swap_64((volatile uint64_t*)&a->value, (uint64_t)v);
#elif defined(__SOLARIS__) && !defined(_LP64)
return (int) atomic_swap_32((volatile uint32_t*)&a->value, (uint32_t)v);
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
#else
int value;
do {
value = a->value;
} while (!SDL_AtomicCAS(a, value, v));
return value;
#endif
}
void*
SDL_AtomicSetPtr(void **a, void *v)
{
#if defined(HAVE_MSC_ATOMICS) && (_M_IX86)
return (void *) _InterlockedExchange((long *)a, (long) v);
#elif defined(HAVE_MSC_ATOMICS) && (!_M_IX86)
return _InterlockedExchangePointer(a, v);
#elif defined(HAVE_GCC_ATOMICS)
return __sync_lock_test_and_set(a, v);
Jul 5, 2014
Jul 5, 2014
175
176
#elif defined(__SOLARIS__)
return atomic_swap_ptr(a, v);
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
#else
void *value;
do {
value = *a;
} while (!SDL_AtomicCASPtr(a, value, v));
return value;
#endif
}
int
SDL_AtomicAdd(SDL_atomic_t *a, int v)
{
#ifdef HAVE_MSC_ATOMICS
return _InterlockedExchangeAdd((long*)&a->value, v);
#elif defined(HAVE_GCC_ATOMICS)
return __sync_fetch_and_add(&a->value, v);
Jul 5, 2014
Jul 5, 2014
193
194
195
196
197
198
199
200
201
#elif defined(__SOLARIS__)
int pv = a->value;
membar_consumer();
#if defined(_LP64)
atomic_add_64((volatile uint64_t*)&a->value, v);
#elif !defined(_LP64)
atomic_add_32((volatile uint32_t*)&a->value, v);
#endif
return pv;
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
#else
int value;
do {
value = a->value;
} while (!SDL_AtomicCAS(a, value, (value + v)));
return value;
#endif
}
int
SDL_AtomicGet(SDL_atomic_t *a)
{
int value;
do {
value = a->value;
} while (!SDL_AtomicCAS(a, value, value));
return value;
}
void *
SDL_AtomicGetPtr(void **a)
{
void *value;
do {
value = *a;
} while (!SDL_AtomicCASPtr(a, value, value));
return value;
}
#ifdef __thumb__
#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__)
__asm__(
" .align 2\n"
" .globl _SDL_MemoryBarrierRelease\n"
" .globl _SDL_MemoryBarrierAcquire\n"
"_SDL_MemoryBarrierRelease:\n"
"_SDL_MemoryBarrierAcquire:\n"
" mov r0, #0\n"
" mcr p15, 0, r0, c7, c10, 5\n"
" bx lr\n"
);
#endif
#endif
/* vi: set ts=4 sw=4 expandtab: */