Skip to content
This repository has been archived by the owner on Feb 11, 2021. It is now read-only.

Latest commit

 

History

History
179 lines (147 loc) · 3.96 KB

SDL_atomic.c

File metadata and controls

179 lines (147 loc) · 3.96 KB
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
/*
SDL - Simple DirectMedia Layer
Copyright (C) 1997-2010 Sam Lantinga
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Sam Lantinga
slouken@libsdl.org
*/
#include "SDL_stdinc.h"
#include "SDL_atomic.h"
/*
If any of the operations are not provided then we must emulate some
of them. That means we need a nice implementation of spin locks
that avoids the "one big lock" problem. We use a vector of spin
locks and pick which one to use based on the address of the operand
of the function.
To generate the index of the lock we first shift by 3 bits to get
rid on the zero bits that result from 32 and 64 bit allignment of
data. We then mask off all but 5 bits and use those 5 bits as an
index into the table.
Picking the lock this way insures that accesses to the same data at
the same time will go to the same lock. OTOH, accesses to different
data have only a 1/32 chance of hitting the same lock. That should
pretty much eliminate the chances of several atomic operations on
different data from waiting on the same "big lock". If it isn't
then the table of locks can be expanded to a new size so long as
the new size is a power of two.
Contributed by Bob Pendleton, bob@pendleton.com
*/
static SDL_SpinLock locks[32];
static __inline__ void
enterLock(void *a)
{
uintptr_t index = ((((uintptr_t)a) >> 3) & 0x1f);
SDL_AtomicLock(&locks[index]);
}
static __inline__ void
leaveLock(void *a)
{
uintptr_t index = ((((uintptr_t)a) >> 3) & 0x1f);
SDL_AtomicUnlock(&locks[index]);
}
#ifndef SDL_AtomicSet
int
SDL_AtomicSet(SDL_atomic_t *a, int value)
{
int oldvalue;
enterLock(a);
oldvalue = a->value;
a->value = value;
leaveLock(a);
return oldvalue;
}
#endif
#ifndef SDL_AtomicGet
int
SDL_AtomicGet(SDL_atomic_t *a)
{
/* Assuming integral reads on this platform, we're safe here since the
functions that set the variable have the necessary memory barriers.
*/
return a->value;
}
#endif
#ifndef SDL_AtomicAdd
int
SDL_AtomicAdd(SDL_atomic_t *a, int value)
{
int oldvalue;
enterLock(a);
oldvalue = a->value;
a->value += value;
leaveLock(a);
return oldvalue;
}
#endif
#ifndef SDL_AtomicIncRef
void
SDL_AtomicIncRef(SDL_atomic_t *a)
{
SDL_AtomicAdd(a, 1);
}
#endif
#ifndef SDL_AtomicDecRef
SDL_bool
SDL_AtomicDecRef(SDL_atomic_t *a)
{
return SDL_AtomicAdd(a, -1) == 1;
}
#endif
#ifndef SDL_AtomicCAS
int
SDL_AtomicCAS(SDL_atomic_t *a, int oldval, int newval)
{
int prevval;
enterLock(a);
prevval = a->value;
if (prevval == oldval) {
a->value = newval;
}
leaveLock(a);
return prevval;
}
#endif
#ifndef SDL_AtomicSetPtr
void
SDL_AtomicSetPtr(void** a, void* value)
{
void *prevval;
do {
prevval = *a;
} while (SDL_AtomicCASPtr(a, prevval, value) != prevval);
}
#endif
#ifndef SDL_AtomicGetPtr
void*
SDL_AtomicGetPtr(void** a)
{
/* Assuming integral reads on this platform, we're safe here since the
functions that set the pointer have the necessary memory barriers.
*/
return *a;
}
#endif
#ifndef SDL_AtomicCASPtr
void* SDL_AtomicCASPtr(void **a, void *oldval, void *newval)
{
void *prevval;
enterLock(a);
prevval = *a;
if (*a == oldval) {
*a = newval;
}
leaveLock(a);
return prevval;
}
#endif
/* vi: set ts=4 sw=4 expandtab: */