This repository has been archived by the owner on Feb 11, 2021. It is now read-only.
/
SDL_atomic.c
974 lines (806 loc) · 19.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
/*
SDL - Simple DirectMedia Layer
Copyright (C) 1997-2009 Sam Lantinga
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Sam Lantinga
slouken@libsdl.org
*/
23
#include "SDL_stdinc.h"
24
25
#include "SDL_atomic.h"
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
/*
This file provides 8, 16, 32, and 64 bit atomic operations. If the
operations are provided by the native hardware and operating system
they are used. If they are not then the operations are emulated
using the SDL mutex operations.
*/
/*
First, detect whether the operations are supported and create
#defines that indicate that they do exist. The goal is to have all
the system dependent code in the top part of the file so that the
bottom can be use unchanged across all platforms.
Second, #define all the operations in each size class that are
supported. Doing this allows supported operations to be used along
side of emulated operations.
*/
/*
Linux version.
Test for gnu C builtin support for atomic operations. The only way
I know of is to check to see if the
__GCC_HAVE_SYNC_COMPARE_AND_SWAP_* macros are defined.
*/
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1
#define HAVE_ALL_8_BIT_OPS
#define nativeExchange8(ptr, value) (__sync_lock_test_and_set(ptr, value))
#define nativeCompareThenSet8(ptr, oldvalue, newvalue) (oldvalue == __sync_val_compare_and_swap(ptr, oldvalue, newvalue))
#define nativeTestThenSet8(ptr) (0 == __sync_lock_test_and_set(ptr, 1))
#define nativeClear8(ptr) (__sync_lock_release(ptr))
#define nativeFetchThenIncrement8(ptr) (__sync_fetch_and_add(ptr, 1))
#define nativeFetchThenDecrement8(ptr) (__sync_fetch_and_sub(ptr, 1))
#define nativeFetchThenAdd8(ptr, value) (__sync_fetch_and_add(ptr, value))
#define nativeFetchThenSubtract8(ptr, value) (__sync_fetch_and_sub(ptr, value))
#define nativeIncrementThenFetch8(ptr) (__sync_add_and_fetch(ptr, 1))
#define nativeDecrementThenFetch8(ptr) (__sync_sub_and_fetch(ptr, 1))
#define nativeAddThenFetch8(ptr, value) (__sync_add_and_fetch(ptr, value))
#define nativeSubtractThenFetch8(ptr, value) (__sync_sub_and_fetch(ptr, value))
#endif
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2
#define HAVE_ALL_16_BIT_OPS
#define nativeExchange16(ptr, value) (__sync_lock_test_and_set(ptr, value))
#define nativeCompareThenSet16(ptr, oldvalue, newvalue) (oldvalue == __sync_val_compare_and_swap(ptr, oldvalue, newvalue))
#define nativeTestThenSet16(ptr) (0 == __sync_lock_test_and_set(ptr, 1))
#define nativeClear16(ptr) (__sync_lock_release(ptr))
#define nativeFetchThenIncrement16(ptr) (__sync_fetch_and_add(ptr, 1))
#define nativeFetchThenDecrement16(ptr) (__sync_fetch_and_sub(ptr, 1))
#define nativeFetchThenAdd16(ptr, value) (__sync_fetch_and_add(ptr, value))
#define nativeFetchThenSubtract16(ptr, value) (__sync_fetch_and_sub(ptr, value))
#define nativeIncrementThenFetch16(ptr) (__sync_add_and_fetch(ptr, 1))
#define nativeDecrementThenFetch16(ptr) (__sync_sub_and_fetch(ptr, 1))
#define nativeAddThenFetch16(ptr, value) (__sync_add_and_fetch(ptr, value))
#define nativeSubtractThenFetch16(ptr, value) (__sync_sub_and_fetch(ptr, value))
#endif
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
#define HAVE_ALL_32_BIT_OPS
#define nativeExchange32(ptr, value) (__sync_lock_test_and_set(ptr, value))
#define nativeCompareThenSet32(ptr, oldvalue, newvalue) (oldvalue == __sync_val_compare_and_swap(ptr, oldvalue, newvalue))
#define nativeTestThenSet32(ptr) (0 == __sync_lock_test_and_set(ptr, 1))
#define nativeClear32(ptr) (__sync_lock_release(ptr))
#define nativeFetchThenIncrement32(ptr) (__sync_fetch_and_add(ptr, 1))
#define nativeFetchThenDecrement32(ptr) (__sync_fetch_and_sub(ptr, 1))
#define nativeFetchThenAdd32(ptr, value) (__sync_fetch_and_add(ptr, value))
#define nativeFetchThenSubtract32(ptr, value) (__sync_fetch_and_sub(ptr, value))
#define nativeIncrementThenFetch32(ptr) (__sync_add_and_fetch(ptr, 1))
#define nativeDecrementThenFetch32(ptr) (__sync_sub_and_fetch(ptr, 1))
#define nativeAddThenFetch32(ptr, value) (__sync_add_and_fetch(ptr, value))
#define nativeSubtractThenFetch32(ptr, value) (__sync_sub_and_fetch(ptr, value))
#endif
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
#define HAVE_ALL_64_BIT_OPS
#define nativeExchange64(ptr, value) (__sync_lock_test_and_set(ptr, value))
#define nativeCompareThenSet64(ptr, oldvalue, newvalue) (oldvalue == __sync_val_compare_and_swap(ptr, oldvalue, newvalue))
#define nativeTestThenSet64(ptr) (0 == __sync_lock_test_and_set(ptr, 1))
#define nativeClear64(ptr) (__sync_lock_release(ptr))
#define nativeFetchThenIncrement64(ptr) (__sync_fetch_and_add(ptr, 1))
#define nativeFetchThenDecrement64(ptr) (__sync_fetch_and_sub(ptr, 1))
#define nativeFetchThenAdd64(ptr, value) (__sync_fetch_and_add(ptr, value))
#define nativeFetchThenSubtract64(ptr, value) (__sync_fetch_and_sub(ptr, value))
#define nativeIncrementThenFetch64(ptr) (__sync_add_and_fetch(ptr, 1))
#define nativeDecrementThenFetch64(ptr) (__sync_sub_and_fetch(ptr, 1))
#define nativeAddThenFetch64(ptr, value) (__sync_add_and_fetch(ptr, value))
#define nativeSubtractThenFetch64(ptr, value) (__sync_sub_and_fetch(ptr, value))
#endif
/*
If any of the operations are not provided then we must emulate some of
them.
*/
#if !defined(HAVE_ALL_8_BIT_OPS) || !defined(HAVE_ALL_16_BIT_OPS) || !defined(HAVE_ALL_32_BIT_OPS) || !defined(HAVE_ALL_64_BIT_OPS)
static Uint32 lock = 0;
#define privateWaitLock() \
while (nativeTestThenSet32(&lock)) \
{ \
};
#define privateUnlock() (nativeClear32(&lock))
#endif
/* 8 bit atomic operations */
Uint8
140
SDL_AtomicExchange8(volatile Uint8 * ptr, Uint8 value)
141
142
143
144
{
#ifdef nativeExchange8
return nativeExchange8(ptr, value);
#else
145
Uint8 tmp = 0;
146
147
148
149
150
151
152
153
154
155
156
privateWaitLock();
tmp = *ptr;
*ptr = value;
privateUnlock();
return tmp;
#endif
}
SDL_bool
157
SDL_AtomicCompareThenSet8(volatile Uint8 * ptr, Uint8 oldvalue, Uint8 newvalue)
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
{
#ifdef nativeCompareThenSet8
return (SDL_bool)nativeCompareThenSet8(ptr, oldvalue, newvalue);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == oldvalue);
if (result)
{
*ptr = newvalue;
}
privateUnlock();
return result;
#endif
}
SDL_bool
177
SDL_AtomicTestThenSet8(volatile Uint8 * ptr)
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
{
#ifdef nativeTestThenSet8
return (SDL_bool)nativeTestThenSet8(ptr);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == 0);
if (result)
{
*ptr = 1;
}
privateUnlock();
return result;
#endif
}
void
197
SDL_AtomicClear8(volatile Uint8 * ptr)
198
199
200
201
202
203
204
205
206
207
208
209
210
{
#ifdef nativeClear8
nativeClear8(ptr);
#else
privateWaitLock();
*ptr = 0;
privateUnlock();
return;
#endif
}
Uint8
211
SDL_AtomicFetchThenIncrement8(volatile Uint8 * ptr)
212
213
214
215
{
#ifdef nativeFetchThenIncrement8
return nativeFetchThenIncrement8(ptr);
#else
216
Uint8 tmp = 0;
217
218
219
220
221
222
223
224
225
226
227
privateWaitLock();
tmp = *ptr;
(*ptr)+= 1;
privateUnlock();
return tmp;
#endif
}
Uint8
228
SDL_AtomicFetchThenDecrement8(volatile Uint8 * ptr)
229
230
231
232
{
#ifdef nativeFetchThenDecrement8
return nativeFetchThenDecrement8(ptr);
#else
233
Uint8 tmp = 0;
234
235
236
237
238
239
240
241
242
243
244
privateWaitLock();
tmp = *ptr;
(*ptr) -= 1;
privateUnlock();
return tmp;
#endif
}
Uint8
245
SDL_AtomicFetchThenAdd8(volatile Uint8 * ptr, Uint8 value)
246
247
248
249
{
#ifdef nativeFetchThenAdd8
return nativeFetchThenAdd8(ptr, value);
#else
250
Uint8 tmp = 0;
251
252
253
254
255
256
257
258
259
260
261
privateWaitLock();
tmp = *ptr;
(*ptr)+= value;
privateUnlock();
return tmp;
#endif
}
Uint8
262
SDL_AtomicFetchThenSubtract8(volatile Uint8 * ptr, Uint8 value)
263
264
265
266
{
#ifdef nativeFetchThenSubtract8
return nativeFetchThenSubtract8(ptr, value);
#else
267
Uint8 tmp = 0;
268
269
270
271
272
273
274
275
276
277
278
privateWaitLock();
tmp = *ptr;
(*ptr)-= value;
privateUnlock();
return tmp;
#endif
}
Uint8
279
SDL_AtomicIncrementThenFetch8(volatile Uint8 * ptr)
280
281
282
283
{
#ifdef nativeIncrementThenFetch8
return nativeIncrementThenFetch8(ptr);
#else
284
Uint8 tmp = 0;
285
286
287
288
289
290
291
292
293
294
295
privateWaitLock();
(*ptr)+= 1;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
}
Uint8
296
SDL_AtomicDecrementThenFetch8(volatile Uint8 * ptr)
297
298
299
300
{
#ifdef nativeDecrementThenFetch8
return nativeDecrementThenFetch8(ptr);
#else
301
Uint8 tmp = 0;
302
303
304
305
306
307
308
309
310
311
312
privateWaitLock();
(*ptr)-= 1;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
}
Uint8
313
SDL_AtomicAddThenFetch8(volatile Uint8 * ptr, Uint8 value)
314
315
316
317
{
#ifdef nativeAddThenFetch8
return nativeAddThenFetch8(ptr, value);
#else
318
Uint8 tmp = 0;
319
320
321
322
323
324
325
326
327
328
329
privateWaitLock();
(*ptr)+= value;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
}
Uint8
330
SDL_AtomicSubtractThenFetch8(volatile Uint8 * ptr, Uint8 value)
331
332
333
334
{
#ifdef nativeSubtractThenFetch8
return nativeSubtractThenFetch8(ptr, value);
#else
335
Uint8 tmp = 0;
336
337
338
339
340
341
342
343
344
345
346
347
348
privateWaitLock();
(*ptr)-= value;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
}
/* 16 bit atomic operations */
Uint16
349
SDL_AtomicExchange16(volatile Uint16 * ptr, Uint16 value)
350
351
352
353
{
#ifdef nativeExchange16
return nativeExchange16(ptr, value);
#else
354
Uint16 tmp = 0;
355
356
357
358
359
360
361
362
363
364
365
privateWaitLock();
tmp = *ptr;
*ptr = value;
privateUnlock();
return tmp;
#endif
}
SDL_bool
366
SDL_AtomicCompareThenSet16(volatile Uint16 * ptr, Uint16 oldvalue, Uint16 newvalue)
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
{
#ifdef nativeCompareThenSet16
return (SDL_bool)nativeCompareThenSet16(ptr, oldvalue, newvalue);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == oldvalue);
if (result)
{
*ptr = newvalue;
}
privateUnlock();
return result;
#endif
}
SDL_bool
386
SDL_AtomicTestThenSet16(volatile Uint16 * ptr)
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
{
#ifdef nativeTestThenSet16
return (SDL_bool)nativeTestThenSet16(ptr);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == 0);
if (result)
{
*ptr = 1;
}
privateUnlock();
return result;
#endif
}
void
406
SDL_AtomicClear16(volatile Uint16 * ptr)
407
408
409
410
411
412
413
414
415
416
417
418
419
{
#ifdef nativeClear16
nativeClear16(ptr);
#else
privateWaitLock();
*ptr = 0;
privateUnlock();
return;
#endif
}
Uint16
420
SDL_AtomicFetchThenIncrement16(volatile Uint16 * ptr)
421
422
423
424
{
#ifdef nativeFetchThenIncrement16
return nativeFetchThenIncrement16(ptr);
#else
425
Uint16 tmp = 0;
426
427
428
429
430
431
432
433
434
435
436
privateWaitLock();
tmp = *ptr;
(*ptr)+= 1;
privateUnlock();
return tmp;
#endif
}
Uint16
437
SDL_AtomicFetchThenDecrement16(volatile Uint16 * ptr)
438
439
440
441
{
#ifdef nativeFetchThenDecrement16
return nativeFetchThenDecrement16(ptr);
#else
442
Uint16 tmp = 0;
443
444
445
446
447
448
449
450
451
452
453
privateWaitLock();
tmp = *ptr;
(*ptr) -= 1;
privateUnlock();
return tmp;
#endif
}
Uint16
454
SDL_AtomicFetchThenAdd16(volatile Uint16 * ptr, Uint16 value)
455
456
457
458
{
#ifdef nativeFetchThenAdd16
return nativeFetchThenAdd16(ptr, value);
#else
459
Uint16 tmp = 0;
460
461
462
463
464
465
466
467
468
469
470
privateWaitLock();
tmp = *ptr;
(*ptr)+= value;
privateUnlock();
return tmp;
#endif
}
Uint16
471
SDL_AtomicFetchThenSubtract16(volatile Uint16 * ptr, Uint16 value)
472
473
474
475
{
#ifdef nativeFetchThenSubtract16
return nativeFetchThenSubtract16(ptr, value);
#else
476
Uint16 tmp = 0;
477
478
479
480
481
482
483
484
485
486
487
privateWaitLock();
tmp = *ptr;
(*ptr)-= value;
privateUnlock();
return tmp;
#endif
}
Uint16
488
SDL_AtomicIncrementThenFetch16(volatile Uint16 * ptr)
489
490
491
492
{
#ifdef nativeIncrementThenFetch16
return nativeIncrementThenFetch16(ptr);
#else
493
Uint16 tmp = 0;
494
495
496
497
498
499
500
501
502
503
504
privateWaitLock();
(*ptr)+= 1;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
}
Uint16
505
SDL_AtomicDecrementThenFetch16(volatile Uint16 * ptr)
506
507
508
509
{
#ifdef nativeDecrementThenFetch16
return nativeDecrementThenFetch16(ptr);
#else
510
Uint16 tmp = 0;
511
512
513
514
515
516
517
518
519
520
521
privateWaitLock();
(*ptr)-= 1;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
}
Uint16
522
SDL_AtomicAddThenFetch16(volatile Uint16 * ptr, Uint16 value)
523
524
525
526
{
#ifdef nativeAddThenFetch16
return nativeAddThenFetch16(ptr, value);
#else
527
Uint16 tmp = 0;
528
529
530
531
532
533
534
535
536
537
538
privateWaitLock();
(*ptr)+= value;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
}
Uint16
539
SDL_AtomicSubtractThenFetch16(volatile Uint16 * ptr, Uint16 value)
540
541
542
543
{
#ifdef nativeSubtractThenFetch16
return nativeSubtractThenFetch16(ptr, value);
#else
544
Uint16 tmp = 0;
545
546
547
548
549
550
551
552
553
554
555
556
privateWaitLock();
(*ptr)-= value;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
}
/* 32 bit atomic operations */
557
Uint32
558
SDL_AtomicExchange32(volatile Uint32 * ptr, Uint32 value)
559
{
560
561
562
#ifdef nativeExchange32
return nativeExchange32(ptr, value);
#else
563
Uint32 tmp = 0;
564
565
566
567
568
569
570
571
privateWaitLock();
tmp = *ptr;
*ptr = value;
privateUnlock();
return tmp;
#endif
572
573
574
}
SDL_bool
575
SDL_AtomicCompareThenSet32(volatile Uint32 * ptr, Uint32 oldvalue, Uint32 newvalue)
576
{
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
#ifdef nativeCompareThenSet32
return (SDL_bool)nativeCompareThenSet32(ptr, oldvalue, newvalue);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == oldvalue);
if (result)
{
*ptr = newvalue;
}
privateUnlock();
return result;
#endif
592
593
594
}
SDL_bool
595
SDL_AtomicTestThenSet32(volatile Uint32 * ptr)
596
{
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
#ifdef nativeTestThenSet32
return (SDL_bool)nativeTestThenSet32(ptr);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == 0);
if (result)
{
*ptr = 1;
}
privateUnlock();
return result;
#endif
612
613
614
}
void
615
SDL_AtomicClear32(volatile Uint32 * ptr)
616
{
617
618
619
620
621
622
623
624
625
#ifdef nativeClear32
nativeClear32(ptr);
#else
privateWaitLock();
*ptr = 0;
privateUnlock();
return;
#endif
626
627
628
}
Uint32
629
SDL_AtomicFetchThenIncrement32(volatile Uint32 * ptr)
630
{
631
632
633
#ifdef nativeFetchThenIncrement32
return nativeFetchThenIncrement32(ptr);
#else
634
Uint32 tmp = 0;
635
636
637
638
639
640
641
642
privateWaitLock();
tmp = *ptr;
(*ptr)+= 1;
privateUnlock();
return tmp;
#endif
643
644
645
}
Uint32
646
SDL_AtomicFetchThenDecrement32(volatile Uint32 * ptr)
647
{
648
649
650
#ifdef nativeFetchThenDecrement32
return nativeFetchThenDecrement32(ptr);
#else
651
Uint32 tmp = 0;
652
653
654
655
656
657
658
659
privateWaitLock();
tmp = *ptr;
(*ptr) -= 1;
privateUnlock();
return tmp;
#endif
660
661
662
}
Uint32
663
SDL_AtomicFetchThenAdd32(volatile Uint32 * ptr, Uint32 value)
664
{
665
666
667
#ifdef nativeFetchThenAdd32
return nativeFetchThenAdd32(ptr, value);
#else
668
Uint32 tmp = 0;
669
670
671
672
673
674
675
676
privateWaitLock();
tmp = *ptr;
(*ptr)+= value;
privateUnlock();
return tmp;
#endif
677
678
679
}
Uint32
680
SDL_AtomicFetchThenSubtract32(volatile Uint32 * ptr, Uint32 value)
681
{
682
683
684
#ifdef nativeFetchThenSubtract32
return nativeFetchThenSubtract32(ptr, value);
#else
685
Uint32 tmp = 0;
686
687
688
689
690
691
692
693
privateWaitLock();
tmp = *ptr;
(*ptr)-= value;
privateUnlock();
return tmp;
#endif
694
695
696
}
Uint32
697
SDL_AtomicIncrementThenFetch32(volatile Uint32 * ptr)
698
{
699
700
701
#ifdef nativeIncrementThenFetch32
return nativeIncrementThenFetch32(ptr);
#else
702
Uint32 tmp = 0;
703
704
705
706
707
708
709
710
privateWaitLock();
(*ptr)+= 1;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
711
712
713
}
Uint32
714
SDL_AtomicDecrementThenFetch32(volatile Uint32 * ptr)
715
{
716
717
718
#ifdef nativeDecrementThenFetch32
return nativeDecrementThenFetch32(ptr);
#else
719
Uint32 tmp = 0;
720
721
722
723
724
725
726
727
privateWaitLock();
(*ptr)-= 1;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
728
729
730
}
Uint32
731
SDL_AtomicAddThenFetch32(volatile Uint32 * ptr, Uint32 value)
732
{
733
734
735
#ifdef nativeAddThenFetch32
return nativeAddThenFetch32(ptr, value);
#else
736
Uint32 tmp = 0;
737
738
739
740
741
742
743
744
privateWaitLock();
(*ptr)+= value;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
745
746
747
}
Uint32
748
SDL_AtomicSubtractThenFetch32(volatile Uint32 * ptr, Uint32 value)
749
{
750
751
752
#ifdef nativeSubtractThenFetch32
return nativeSubtractThenFetch32(ptr, value);
#else
753
Uint32 tmp = 0;
754
755
756
757
758
759
760
761
privateWaitLock();
(*ptr)-= value;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
762
763
}
764
765
/* 64 bit atomic operations */
#ifdef SDL_HAS_64BIT_TYPE
766
767
Uint64
768
SDL_AtomicExchange64(volatile Uint64 * ptr, Uint64 value)
769
{
770
771
772
#ifdef nativeExchange64
return nativeExchange64(ptr, value);
#else
773
Uint64 tmp = 0;
774
775
776
777
778
779
780
781
privateWaitLock();
tmp = *ptr;
*ptr = value;
privateUnlock();
return tmp;
#endif
782
783
784
}
SDL_bool
785
SDL_AtomicCompareThenSet64(volatile Uint64 * ptr, Uint64 oldvalue, Uint64 newvalue)
786
{
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
#ifdef nativeCompareThenSet64
return (SDL_bool)nativeCompareThenSet64(ptr, oldvalue, newvalue);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == oldvalue);
if (result)
{
*ptr = newvalue;
}
privateUnlock();
return result;
#endif
802
803
804
}
SDL_bool
805
SDL_AtomicTestThenSet64(volatile Uint64 * ptr)
806
{
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
#ifdef nativeTestThenSet64
return (SDL_bool)nativeTestThenSet64(ptr);
#else
SDL_bool result = SDL_FALSE;
privateWaitLock();
result = (*ptr == 0);
if (result)
{
*ptr = 1;
}
privateUnlock();
return result;
#endif
822
823
824
}
void
825
SDL_AtomicClear64(volatile Uint64 * ptr)
826
{
827
828
829
830
831
832
833
834
835
#ifdef nativeClear64
nativeClear64(ptr);
#else
privateWaitLock();
*ptr = 0;
privateUnlock();
return;
#endif
836
837
838
}
Uint64
839
SDL_AtomicFetchThenIncrement64(volatile Uint64 * ptr)
840
{
841
842
843
#ifdef nativeFetchThenIncrement64
return nativeFetchThenIncrement64(ptr);
#else
844
Uint64 tmp = 0;
845
846
847
848
849
850
851
852
privateWaitLock();
tmp = *ptr;
(*ptr)+= 1;
privateUnlock();
return tmp;
#endif
853
854
855
}
Uint64
856
SDL_AtomicFetchThenDecrement64(volatile Uint64 * ptr)
857
{
858
859
860
#ifdef nativeFetchThenDecrement64
return nativeFetchThenDecrement64(ptr);
#else
861
Uint64 tmp = 0;
862
863
864
865
866
867
868
869
privateWaitLock();
tmp = *ptr;
(*ptr) -= 1;
privateUnlock();
return tmp;
#endif
870
871
872
}
Uint64
873
SDL_AtomicFetchThenAdd64(volatile Uint64 * ptr, Uint64 value)
874
{
875
876
877
#ifdef nativeFetchThenAdd64
return nativeFetchThenAdd64(ptr, value);
#else
878
Uint64 tmp = 0;
879
880
881
882
883
884
885
886
privateWaitLock();
tmp = *ptr;
(*ptr)+= value;
privateUnlock();
return tmp;
#endif
887
888
889
}
Uint64
890
SDL_AtomicFetchThenSubtract64(volatile Uint64 * ptr, Uint64 value)
891
{
892
893
894
#ifdef nativeFetchThenSubtract64
return nativeFetchThenSubtract64(ptr, value);
#else
895
Uint64 tmp = 0;
896
897
898
899
900
901
902
903
privateWaitLock();
tmp = *ptr;
(*ptr)-= value;
privateUnlock();
return tmp;
#endif
904
905
906
}
Uint64
907
SDL_AtomicIncrementThenFetch64(volatile Uint64 * ptr)
908
{
909
910
911
#ifdef nativeIncrementThenFetch64
return nativeIncrementThenFetch64(ptr);
#else
912
Uint64 tmp = 0;
913
914
915
916
917
918
919
920
privateWaitLock();
(*ptr)+= 1;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
921
922
923
}
Uint64
924
SDL_AtomicDecrementThenFetch64(volatile Uint64 * ptr)
925
{
926
927
928
#ifdef nativeDecrementThenFetch64
return nativeDecrementThenFetch64(ptr);
#else
929
Uint64 tmp = 0;
930
931
932
933
934
935
936
937
privateWaitLock();
(*ptr)-= 1;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
938
939
940
}
Uint64
941
SDL_AtomicAddThenFetch64(volatile Uint64 * ptr, Uint64 value)
942
{
943
944
945
#ifdef nativeAddThenFetch64
return nativeAddThenFetch64(ptr, value);
#else
946
Uint64 tmp = 0;
947
948
949
950
951
952
953
954
privateWaitLock();
(*ptr)+= value;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
955
956
957
}
Uint64
958
SDL_AtomicSubtractThenFetch64(volatile Uint64 * ptr, Uint64 value)
959
{
960
961
962
#ifdef nativeSubtractThenFetch64
return nativeSubtractThenFetch64(ptr, value);
#else
963
Uint64 tmp = 0;
964
965
966
967
968
969
970
971
privateWaitLock();
(*ptr)-= value;
tmp = *ptr;
privateUnlock();
return tmp;
#endif
972
973
}
#endif