Skip to content
This repository has been archived by the owner on Feb 11, 2021. It is now read-only.

Latest commit

 

History

History
176 lines (143 loc) · 4.2 KB

SDL_atomic.c

File metadata and controls

176 lines (143 loc) · 4.2 KB
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
/*
SDL - Simple DirectMedia Layer
Copyright (C) 1997-2010 Sam Lantinga
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Sam Lantinga
slouken@libsdl.org
*/
#include "SDL_stdinc.h"
#include "SDL_atomic.h"
Jan 16, 2011
Jan 16, 2011
26
27
28
29
30
31
/* Note that we undefine the atomic operations here, in case they are
defined as compiler intrinsics while building SDL but the library user
doesn't have that compiler. That way we always have a working set of
atomic operations built into the library.
*/
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
/*
If any of the operations are not provided then we must emulate some
of them. That means we need a nice implementation of spin locks
that avoids the "one big lock" problem. We use a vector of spin
locks and pick which one to use based on the address of the operand
of the function.
To generate the index of the lock we first shift by 3 bits to get
rid on the zero bits that result from 32 and 64 bit allignment of
data. We then mask off all but 5 bits and use those 5 bits as an
index into the table.
Picking the lock this way insures that accesses to the same data at
the same time will go to the same lock. OTOH, accesses to different
data have only a 1/32 chance of hitting the same lock. That should
pretty much eliminate the chances of several atomic operations on
different data from waiting on the same "big lock". If it isn't
then the table of locks can be expanded to a new size so long as
the new size is a power of two.
Contributed by Bob Pendleton, bob@pendleton.com
*/
static SDL_SpinLock locks[32];
static __inline__ void
enterLock(void *a)
{
Jan 16, 2011
Jan 16, 2011
60
uintptr_t index = ((((uintptr_t)a) >> 3) & 0x1f);
Jan 16, 2011
Jan 16, 2011
62
SDL_AtomicLock(&locks[index]);
63
64
65
66
67
}
static __inline__ void
leaveLock(void *a)
{
Jan 16, 2011
Jan 16, 2011
68
uintptr_t index = ((((uintptr_t)a) >> 3) & 0x1f);
Jan 16, 2011
Jan 16, 2011
70
SDL_AtomicUnlock(&locks[index]);
71
72
}
Jan 16, 2011
Jan 16, 2011
73
#undef SDL_AtomicSet
74
75
76
77
78
79
80
81
82
83
84
85
86
int
SDL_AtomicSet(SDL_atomic_t *a, int value)
{
int oldvalue;
enterLock(a);
oldvalue = a->value;
a->value = value;
leaveLock(a);
return oldvalue;
}
Jan 16, 2011
Jan 16, 2011
87
#undef SDL_AtomicGet
88
89
90
91
92
93
94
95
96
int
SDL_AtomicGet(SDL_atomic_t *a)
{
/* Assuming integral reads on this platform, we're safe here since the
functions that set the variable have the necessary memory barriers.
*/
return a->value;
}
Jan 16, 2011
Jan 16, 2011
97
#undef SDL_AtomicAdd
98
99
100
101
102
103
104
105
106
107
108
109
110
int
SDL_AtomicAdd(SDL_atomic_t *a, int value)
{
int oldvalue;
enterLock(a);
oldvalue = a->value;
a->value += value;
leaveLock(a);
return oldvalue;
}
Jan 16, 2011
Jan 16, 2011
111
#undef SDL_AtomicIncRef
112
113
114
115
116
117
void
SDL_AtomicIncRef(SDL_atomic_t *a)
{
SDL_AtomicAdd(a, 1);
}
Jan 16, 2011
Jan 16, 2011
118
#undef SDL_AtomicDecRef
119
120
121
122
123
124
SDL_bool
SDL_AtomicDecRef(SDL_atomic_t *a)
{
return SDL_AtomicAdd(a, -1) == 1;
}
Jan 16, 2011
Jan 16, 2011
125
126
#undef SDL_AtomicCAS
SDL_bool
127
128
SDL_AtomicCAS(SDL_atomic_t *a, int oldval, int newval)
{
Jan 16, 2011
Jan 16, 2011
129
SDL_bool retval = SDL_FALSE;
130
131
enterLock(a);
Jan 16, 2011
Jan 16, 2011
132
if (a->value == oldval) {
133
a->value = newval;
Jan 16, 2011
Jan 16, 2011
134
retval = SDL_TRUE;
135
136
137
}
leaveLock(a);
Jan 16, 2011
Jan 16, 2011
138
return retval;
139
140
}
Jan 16, 2011
Jan 16, 2011
141
#undef SDL_AtomicSetPtr
142
143
144
145
146
147
void
SDL_AtomicSetPtr(void** a, void* value)
{
void *prevval;
do {
prevval = *a;
Jan 16, 2011
Jan 16, 2011
148
} while (!SDL_AtomicCASPtr(a, prevval, value));
149
150
}
Jan 16, 2011
Jan 16, 2011
151
#undef SDL_AtomicGetPtr
152
153
154
155
156
157
158
159
160
void*
SDL_AtomicGetPtr(void** a)
{
/* Assuming integral reads on this platform, we're safe here since the
functions that set the pointer have the necessary memory barriers.
*/
return *a;
}
Jan 16, 2011
Jan 16, 2011
161
162
#undef SDL_AtomicCASPtr
SDL_bool SDL_AtomicCASPtr(void **a, void *oldval, void *newval)
Jan 16, 2011
Jan 16, 2011
164
SDL_bool retval = SDL_FALSE;
165
166
167
168
enterLock(a);
if (*a == oldval) {
*a = newval;
Jan 16, 2011
Jan 16, 2011
169
retval = SDL_TRUE;
170
171
172
}
leaveLock(a);
Jan 16, 2011
Jan 16, 2011
173
return retval;
174
175
176
}
/* vi: set ts=4 sw=4 expandtab: */