This repository has been archived by the owner on Feb 11, 2021. It is now read-only.
/
SDL_atomic.c
469 lines (384 loc) · 8.75 KB
1
/*
2
SDL - Simple DirectMedia Layer
3
Copyright (C) 1997-2010 Sam Lantinga
4
5
6
7
8
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
9
10
11
12
13
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
14
15
16
17
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18
19
20
21
22
Sam Lantinga
slouken@libsdl.org
Contributed by Bob Pendleton, bob@pendleton.com
23
24
*/
25
#include "SDL_stdinc.h"
26
27
#include "SDL_atomic.h"
28
29
#include "SDL_error.h"
30
/*
31
This file provides 32, and 64 bit atomic operations. If the
32
33
operations are provided by the native hardware and operating system
they are used. If they are not then the operations are emulated
34
35
using the SDL spin lock operations. If spin lock can not be
implemented then these functions must fail.
36
37
38
*/
/*
39
DUMMY VERSION.
40
41
42
43
44
45
This version of the code assumes there is no support for atomic
operations. Therefore, every function sets the SDL error
message. Oddly enough, if you only have one thread then this
version actuallys works.
*/
46
47
48
49
50
/*
Native spinlock routines. Because this is the dummy implementation
these will always call SDL_SetError() and do nothing.
*/
51
52
53
void
SDL_AtomicLock(SDL_SpinLock *lock)
54
{
55
SDL_SetError("SDL_atomic.c: is not implemented on this platform");
56
57
}
58
59
void
SDL_AtomicUnlock(SDL_SpinLock *lock)
60
{
61
SDL_SetError("SDL_atomic.c: is not implemented on this platform");
62
63
}
64
65
66
67
68
/*
Note that platform specific versions can be built from this version
by changing the #undefs to #defines and adding platform specific
code.
*/
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
#undef nativeTestThenSet32
#undef nativeClear32
#undef nativeFetchThenIncrement32
#undef nativeFetchThenDecrement32
#undef nativeFetchThenAdd32
#undef nativeFetchThenSubtract32
#undef nativeIncrementThenFetch32
#undef nativeDecrementThenFetch32
#undef nativeAddThenFetch32
#undef nativeSubtractThenFetch32
#undef nativeTestThenSet64
#undef nativeClear64
#undef nativeFetchThenIncrement64
#undef nativeFetchThenDecrement64
#undef nativeFetchThenAdd64
#undef nativeFetchThenSubtract64
#undef nativeIncrementThenFetch64
#undef nativeDecrementThenFetch64
#undef nativeAddThenFetch64
#undef nativeSubtractThenFetch64
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
/*
If any of the operations are not provided then we must emulate some
of them. That means we need a nice implementation of spin locks
that avoids the "one big lock" problem. We use a vector of spin
locks and pick which one to use based on the address of the operand
of the function.
To generate the index of the lock we first shift by 3 bits to get
rid on the zero bits that result from 32 and 64 bit allignment of
data. We then mask off all but 5 bits and use those 5 bits as an
index into the table.
Picking the lock this way insures that accesses to the same data at
the same time will go to the same lock. OTOH, accesses to different
data have only a 1/32 chance of hitting the same lock. That should
pretty much eliminate the chances of several atomic operations on
different data from waiting on the same "big lock". If it isn't
then the table of locks can be expanded to a new size so long as
110
the new size is a power of two.
111
*/
112
113
114
115
116
117
118
static SDL_SpinLock locks[32] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
119
120
121
static __inline__ void
privateWaitLock(volatile void *ptr)
122
{
123
124
125
126
#if SIZEOF_VOIDP == 4
Uint32 index = ((((Uint32)ptr) >> 3) & 0x1f);
#elif SIZEOF_VOIDP == 8
Uint64 index = ((((Uint64)ptr) >> 3) & 0x1f);
127
128
#endif
129
SDL_AtomicLock(&locks[index]);
130
131
}
132
133
static __inline__ void
privateUnlock(volatile void *ptr)
134
{
135
136
137
138
#if SIZEOF_VOIDP == 4
Uint32 index = ((((Uint32)ptr) >> 3) & 0x1f);
#elif SIZEOF_VOIDP == 8
Uint64 index = ((((Uint64)ptr) >> 3) & 0x1f);
139
140
#endif
141
SDL_AtomicUnlock(&locks[index]);
142
143
144
145
}
/* 32 bit atomic operations */
146
SDL_bool
147
SDL_AtomicTestThenSet32(volatile Uint32 * ptr)
148
{
149
150
151
152
#ifdef nativeTestThenSet32
#else
SDL_bool result = SDL_FALSE;
153
privateWaitLock(ptr);
154
155
156
157
158
result = (*ptr == 0);
if (result)
{
*ptr = 1;
}
159
privateUnlock(ptr);
160
161
162
return result;
#endif
163
164
165
}
void
166
SDL_AtomicClear32(volatile Uint32 * ptr)
167
{
168
169
#ifdef nativeClear32
#else
170
privateWaitLock(ptr);
171
*ptr = 0;
172
privateUnlock(ptr);
173
174
175
return;
#endif
176
177
178
}
Uint32
179
SDL_AtomicFetchThenIncrement32(volatile Uint32 * ptr)
180
{
181
182
#ifdef nativeFetchThenIncrement32
#else
183
Uint32 tmp = 0;
184
185
privateWaitLock(ptr);
186
187
tmp = *ptr;
(*ptr)+= 1;
188
privateUnlock(ptr);
189
190
191
return tmp;
#endif
192
193
194
}
Uint32
195
SDL_AtomicFetchThenDecrement32(volatile Uint32 * ptr)
196
{
197
198
#ifdef nativeFetchThenDecrement32
#else
199
Uint32 tmp = 0;
200
201
privateWaitLock(ptr);
202
203
tmp = *ptr;
(*ptr) -= 1;
204
privateUnlock(ptr);
205
206
207
return tmp;
#endif
208
209
210
}
Uint32
211
SDL_AtomicFetchThenAdd32(volatile Uint32 * ptr, Uint32 value)
212
{
213
214
#ifdef nativeFetchThenAdd32
#else
215
Uint32 tmp = 0;
216
217
privateWaitLock(ptr);
218
219
tmp = *ptr;
(*ptr)+= value;
220
privateUnlock(ptr);
221
222
223
return tmp;
#endif
224
225
226
}
Uint32
227
SDL_AtomicFetchThenSubtract32(volatile Uint32 * ptr, Uint32 value)
228
{
229
230
#ifdef nativeFetchThenSubtract32
#else
231
Uint32 tmp = 0;
232
233
privateWaitLock(ptr);
234
235
tmp = *ptr;
(*ptr)-= value;
236
privateUnlock(ptr);
237
238
239
return tmp;
#endif
240
241
242
}
Uint32
243
SDL_AtomicIncrementThenFetch32(volatile Uint32 * ptr)
244
{
245
246
#ifdef nativeIncrementThenFetch32
#else
247
Uint32 tmp = 0;
248
249
privateWaitLock(ptr);
250
251
(*ptr)+= 1;
tmp = *ptr;
252
privateUnlock(ptr);
253
254
255
return tmp;
#endif
256
257
258
}
Uint32
259
SDL_AtomicDecrementThenFetch32(volatile Uint32 * ptr)
260
{
261
262
#ifdef nativeDecrementThenFetch32
#else
263
Uint32 tmp = 0;
264
265
privateWaitLock(ptr);
266
267
(*ptr)-= 1;
tmp = *ptr;
268
privateUnlock(ptr);
269
270
271
return tmp;
#endif
272
273
274
}
Uint32
275
SDL_AtomicAddThenFetch32(volatile Uint32 * ptr, Uint32 value)
276
{
277
278
#ifdef nativeAddThenFetch32
#else
279
Uint32 tmp = 0;
280
281
privateWaitLock(ptr);
282
283
(*ptr)+= value;
tmp = *ptr;
284
privateUnlock(ptr);
285
286
287
return tmp;
#endif
288
289
290
}
Uint32
291
SDL_AtomicSubtractThenFetch32(volatile Uint32 * ptr, Uint32 value)
292
{
293
294
#ifdef nativeSubtractThenFetch32
#else
295
Uint32 tmp = 0;
296
297
privateWaitLock(ptr);
298
299
(*ptr)-= value;
tmp = *ptr;
300
privateUnlock(ptr);
301
302
303
304
return tmp;
#endif
}
305
306
/* 64 bit atomic operations */
307
308
309
#ifdef SDL_HAS_64BIT_TYPE
SDL_bool
310
SDL_AtomicTestThenSet64(volatile Uint64 * ptr)
311
{
312
313
314
315
#ifdef nativeTestThenSet64
#else
SDL_bool result = SDL_FALSE;
316
privateWaitLock(ptr);
317
318
319
320
321
result = (*ptr == 0);
if (result)
{
*ptr = 1;
}
322
privateUnlock(ptr);
323
324
325
return result;
#endif
326
327
328
}
void
329
SDL_AtomicClear64(volatile Uint64 * ptr)
330
{
331
332
#ifdef nativeClear64
#else
333
privateWaitLock(ptr);
334
*ptr = 0;
335
privateUnlock(ptr);
336
337
338
return;
#endif
339
340
341
}
Uint64
342
SDL_AtomicFetchThenIncrement64(volatile Uint64 * ptr)
343
{
344
345
#ifdef nativeFetchThenIncrement64
#else
346
Uint64 tmp = 0;
347
348
privateWaitLock(ptr);
349
350
tmp = *ptr;
(*ptr)+= 1;
351
privateUnlock(ptr);
352
353
354
return tmp;
#endif
355
356
357
}
Uint64
358
SDL_AtomicFetchThenDecrement64(volatile Uint64 * ptr)
359
{
360
361
#ifdef nativeFetchThenDecrement64
#else
362
Uint64 tmp = 0;
363
364
privateWaitLock(ptr);
365
366
tmp = *ptr;
(*ptr) -= 1;
367
privateUnlock(ptr);
368
369
370
return tmp;
#endif
371
372
373
}
Uint64
374
SDL_AtomicFetchThenAdd64(volatile Uint64 * ptr, Uint64 value)
375
{
376
377
#ifdef nativeFetchThenAdd64
#else
378
Uint64 tmp = 0;
379
380
privateWaitLock(ptr);
381
382
tmp = *ptr;
(*ptr)+= value;
383
privateUnlock(ptr);
384
385
386
return tmp;
#endif
387
388
389
}
Uint64
390
SDL_AtomicFetchThenSubtract64(volatile Uint64 * ptr, Uint64 value)
391
{
392
393
#ifdef nativeFetchThenSubtract64
#else
394
Uint64 tmp = 0;
395
396
privateWaitLock(ptr);
397
398
tmp = *ptr;
(*ptr)-= value;
399
privateUnlock(ptr);
400
401
402
return tmp;
#endif
403
404
405
}
Uint64
406
SDL_AtomicIncrementThenFetch64(volatile Uint64 * ptr)
407
{
408
409
#ifdef nativeIncrementThenFetch64
#else
410
Uint64 tmp = 0;
411
412
privateWaitLock(ptr);
413
414
(*ptr)+= 1;
tmp = *ptr;
415
privateUnlock(ptr);
416
417
418
return tmp;
#endif
419
420
421
}
Uint64
422
SDL_AtomicDecrementThenFetch64(volatile Uint64 * ptr)
423
{
424
425
#ifdef nativeDecrementThenFetch64
#else
426
Uint64 tmp = 0;
427
428
privateWaitLock(ptr);
429
430
(*ptr)-= 1;
tmp = *ptr;
431
privateUnlock(ptr);
432
433
434
return tmp;
#endif
435
436
437
}
Uint64
438
SDL_AtomicAddThenFetch64(volatile Uint64 * ptr, Uint64 value)
439
{
440
441
#ifdef nativeAddThenFetch64
#else
442
Uint64 tmp = 0;
443
444
privateWaitLock(ptr);
445
446
(*ptr)+= value;
tmp = *ptr;
447
privateUnlock(ptr);
448
449
450
return tmp;
#endif
451
452
453
}
Uint64
454
SDL_AtomicSubtractThenFetch64(volatile Uint64 * ptr, Uint64 value)
455
{
456
457
#ifdef nativeSubtractThenFetch64
#else
458
Uint64 tmp = 0;
459
460
privateWaitLock(ptr);
461
462
(*ptr)-= value;
tmp = *ptr;
463
privateUnlock(ptr);
464
465
return tmp;
466
#endif
467
}
468
#endif