1 |
schoenebeck |
53 |
/* |
2 |
|
|
Copyright (C) 2001 Paul Davis and others (see below) |
3 |
|
|
Code derived from various headers from the Linux kernel. |
4 |
|
|
Copyright attributions maintained where present. |
5 |
persson |
1792 |
|
6 |
schoenebeck |
53 |
This program is free software; you can redistribute it and/or modify |
7 |
|
|
it under the terms of the GNU General Public License as published by |
8 |
|
|
the Free Software Foundation; either version 2 of the License, or |
9 |
|
|
(at your option) any later version. |
10 |
|
|
|
11 |
|
|
This program is distributed in the hope that it will be useful, |
12 |
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 |
|
|
GNU General Public License for more details. |
15 |
|
|
|
16 |
|
|
You should have received a copy of the GNU General Public License |
17 |
|
|
along with this program; if not, write to the Free Software |
18 |
|
|
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 |
|
|
|
20 |
schoenebeck |
1879 |
$Id: atomic.h,v 1.8 2009-03-29 18:43:39 schoenebeck Exp $ |
21 |
schoenebeck |
53 |
*/ |
22 |
|
|
|
23 |
schoenebeck |
1879 |
/* |
24 |
|
|
CAUTION: don't ever include this file in header files that are exposed |
25 |
|
|
to the liblinuxsampler C++ API !!! This file will not be installed along |
26 |
|
|
with liblinuxsampler's header files! This is due to the fact that |
27 |
|
|
atomic.h is architecture specific and would in turn require us to include |
28 |
|
|
and export config.h, which is definitely a bad idea. |
29 |
|
|
*/ |
30 |
schoenebeck |
880 |
|
31 |
schoenebeck |
53 |
#ifndef __linuxsampler_atomic_h__ |
32 |
|
|
#define __linuxsampler_atomic_h__ |
33 |
|
|
|
34 |
schoenebeck |
885 |
// needed to automatically include config.h |
35 |
|
|
#include "global.h" |
36 |
schoenebeck |
53 |
|
37 |
|
|
#ifdef HAVE_SMP /* a macro we control, to manage ... */ |
38 |
|
|
#define CONFIG_SMP /* ... the macro the kernel headers use */ |
39 |
|
|
#endif |
40 |
|
|
|
41 |
persson |
1792 |
#if defined(__linux__) || defined(WIN32) || defined(__APPLE__) |
42 |
|
|
#ifdef _ARCH_PPC |
43 |
schoenebeck |
53 |
|
44 |
|
|
/* |
45 |
|
|
* BK Id: SCCS/s.atomic.h 1.15 10/28/01 10:37:22 trini |
46 |
|
|
*/ |
47 |
|
|
/* |
48 |
|
|
* PowerPC atomic operations |
49 |
|
|
*/ |
50 |
|
|
|
51 |
persson |
1792 |
#ifndef _ASM_PPC_ATOMIC_H_ |
52 |
schoenebeck |
53 |
#define _ASM_PPC_ATOMIC_H_ |
53 |
|
|
|
54 |
|
|
typedef struct { volatile int counter; } atomic_t; |
55 |
|
|
|
56 |
|
|
|
57 |
|
|
#define ATOMIC_INIT(i) { (i) } |
58 |
|
|
|
59 |
|
|
#define atomic_read(v) ((v)->counter) |
60 |
|
|
#define atomic_set(v,i) (((v)->counter) = (i)) |
61 |
|
|
|
62 |
|
|
extern void atomic_clear_mask(unsigned long mask, unsigned long *addr); |
63 |
|
|
extern void atomic_set_mask(unsigned long mask, unsigned long *addr); |
64 |
|
|
|
65 |
|
|
#ifdef CONFIG_SMP |
66 |
|
|
#define SMP_ISYNC "\n\tisync" |
67 |
|
|
#else |
68 |
|
|
#define SMP_ISYNC |
69 |
|
|
#endif |
70 |
|
|
|
71 |
|
|
static __inline__ void atomic_add(int a, atomic_t *v) |
72 |
|
|
{ |
73 |
|
|
int t; |
74 |
|
|
|
75 |
|
|
__asm__ __volatile__( |
76 |
persson |
1792 |
"1: lwarx %0,0,%3\n\ |
77 |
schoenebeck |
53 |
add %0,%2,%0\n\ |
78 |
|
|
stwcx. %0,0,%3\n\ |
79 |
|
|
bne- 1b" |
80 |
|
|
: "=&r" (t), "=m" (v->counter) |
81 |
|
|
: "r" (a), "r" (&v->counter), "m" (v->counter) |
82 |
|
|
: "cc"); |
83 |
|
|
} |
84 |
|
|
|
85 |
|
|
static __inline__ int atomic_add_return(int a, atomic_t *v) |
86 |
|
|
{ |
87 |
|
|
int t; |
88 |
|
|
|
89 |
|
|
__asm__ __volatile__( |
90 |
persson |
1792 |
"1: lwarx %0,0,%2\n\ |
91 |
schoenebeck |
53 |
add %0,%1,%0\n\ |
92 |
|
|
stwcx. %0,0,%2\n\ |
93 |
|
|
bne- 1b" |
94 |
|
|
SMP_ISYNC |
95 |
|
|
: "=&r" (t) |
96 |
|
|
: "r" (a), "r" (&v->counter) |
97 |
|
|
: "cc", "memory"); |
98 |
|
|
|
99 |
|
|
return t; |
100 |
|
|
} |
101 |
|
|
|
102 |
|
|
static __inline__ void atomic_sub(int a, atomic_t *v) |
103 |
|
|
{ |
104 |
|
|
int t; |
105 |
|
|
|
106 |
|
|
__asm__ __volatile__( |
107 |
persson |
1792 |
"1: lwarx %0,0,%3\n\ |
108 |
schoenebeck |
53 |
subf %0,%2,%0\n\ |
109 |
|
|
stwcx. %0,0,%3\n\ |
110 |
|
|
bne- 1b" |
111 |
|
|
: "=&r" (t), "=m" (v->counter) |
112 |
|
|
: "r" (a), "r" (&v->counter), "m" (v->counter) |
113 |
|
|
: "cc"); |
114 |
|
|
} |
115 |
|
|
|
116 |
|
|
static __inline__ int atomic_sub_return(int a, atomic_t *v) |
117 |
|
|
{ |
118 |
|
|
int t; |
119 |
|
|
|
120 |
|
|
__asm__ __volatile__( |
121 |
persson |
1792 |
"1: lwarx %0,0,%2\n\ |
122 |
schoenebeck |
53 |
subf %0,%1,%0\n\ |
123 |
|
|
stwcx. %0,0,%2\n\ |
124 |
|
|
bne- 1b" |
125 |
|
|
SMP_ISYNC |
126 |
|
|
: "=&r" (t) |
127 |
|
|
: "r" (a), "r" (&v->counter) |
128 |
|
|
: "cc", "memory"); |
129 |
|
|
|
130 |
|
|
return t; |
131 |
|
|
} |
132 |
|
|
|
133 |
|
|
static __inline__ void atomic_inc(atomic_t *v) |
134 |
|
|
{ |
135 |
|
|
int t; |
136 |
|
|
|
137 |
|
|
__asm__ __volatile__( |
138 |
persson |
1792 |
"1: lwarx %0,0,%2\n\ |
139 |
schoenebeck |
53 |
addic %0,%0,1\n\ |
140 |
|
|
stwcx. %0,0,%2\n\ |
141 |
|
|
bne- 1b" |
142 |
|
|
: "=&r" (t), "=m" (v->counter) |
143 |
|
|
: "r" (&v->counter), "m" (v->counter) |
144 |
|
|
: "cc"); |
145 |
|
|
} |
146 |
|
|
|
147 |
|
|
static __inline__ int atomic_inc_return(atomic_t *v) |
148 |
|
|
{ |
149 |
|
|
int t; |
150 |
|
|
|
151 |
|
|
__asm__ __volatile__( |
152 |
persson |
1792 |
"1: lwarx %0,0,%1\n\ |
153 |
schoenebeck |
53 |
addic %0,%0,1\n\ |
154 |
|
|
stwcx. %0,0,%1\n\ |
155 |
|
|
bne- 1b" |
156 |
|
|
SMP_ISYNC |
157 |
|
|
: "=&r" (t) |
158 |
|
|
: "r" (&v->counter) |
159 |
|
|
: "cc", "memory"); |
160 |
|
|
|
161 |
|
|
return t; |
162 |
|
|
} |
163 |
|
|
|
164 |
|
|
static __inline__ void atomic_dec(atomic_t *v) |
165 |
|
|
{ |
166 |
|
|
int t; |
167 |
|
|
|
168 |
|
|
__asm__ __volatile__( |
169 |
persson |
1792 |
"1: lwarx %0,0,%2\n\ |
170 |
schoenebeck |
53 |
addic %0,%0,-1\n\ |
171 |
|
|
stwcx. %0,0,%2\n\ |
172 |
|
|
bne- 1b" |
173 |
|
|
: "=&r" (t), "=m" (v->counter) |
174 |
|
|
: "r" (&v->counter), "m" (v->counter) |
175 |
|
|
: "cc"); |
176 |
|
|
} |
177 |
|
|
|
178 |
|
|
static __inline__ int atomic_dec_return(atomic_t *v) |
179 |
|
|
{ |
180 |
|
|
int t; |
181 |
|
|
|
182 |
|
|
__asm__ __volatile__( |
183 |
persson |
1792 |
"1: lwarx %0,0,%1\n\ |
184 |
schoenebeck |
53 |
addic %0,%0,-1\n\ |
185 |
|
|
stwcx. %0,0,%1\n\ |
186 |
|
|
bne- 1b" |
187 |
|
|
SMP_ISYNC |
188 |
|
|
: "=&r" (t) |
189 |
|
|
: "r" (&v->counter) |
190 |
|
|
: "cc", "memory"); |
191 |
|
|
|
192 |
|
|
return t; |
193 |
|
|
} |
194 |
|
|
|
195 |
|
|
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) |
196 |
|
|
#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) |
197 |
|
|
|
198 |
|
|
/* |
199 |
|
|
* Atomically test *v and decrement if it is greater than 0. |
200 |
|
|
* The function returns the old value of *v minus 1. |
201 |
|
|
*/ |
202 |
|
|
static __inline__ int atomic_dec_if_positive(atomic_t *v) |
203 |
|
|
{ |
204 |
|
|
int t; |
205 |
|
|
|
206 |
|
|
__asm__ __volatile__( |
207 |
persson |
1792 |
"1: lwarx %0,0,%1\n\ |
208 |
schoenebeck |
53 |
addic. %0,%0,-1\n\ |
209 |
|
|
blt- 2f\n\ |
210 |
|
|
stwcx. %0,0,%1\n\ |
211 |
|
|
bne- 1b" |
212 |
|
|
SMP_ISYNC |
213 |
|
|
"\n\ |
214 |
|
|
2:" : "=&r" (t) |
215 |
|
|
: "r" (&v->counter) |
216 |
|
|
: "cc", "memory"); |
217 |
|
|
|
218 |
|
|
return t; |
219 |
|
|
} |
220 |
|
|
|
221 |
|
|
#define smp_mb__before_atomic_dec() smp_mb() |
222 |
|
|
#define smp_mb__after_atomic_dec() smp_mb() |
223 |
|
|
#define smp_mb__before_atomic_inc() smp_mb() |
224 |
|
|
#define smp_mb__after_atomic_inc() smp_mb() |
225 |
|
|
|
226 |
|
|
#endif /* _ASM_PPC_ATOMIC_H_ */ |
227 |
|
|
|
228 |
|
|
/***********************************************************************/ |
229 |
|
|
|
230 |
|
|
# else /* !PPC */ |
231 |
|
|
|
232 |
|
|
#if defined(__i386__) || defined(__x86_64__) |
233 |
|
|
|
234 |
|
|
#ifndef __ARCH_I386_ATOMIC__ |
235 |
|
|
#define __ARCH_I386_ATOMIC__ |
236 |
|
|
|
237 |
|
|
/* |
238 |
|
|
* Atomic operations that C can't guarantee us. Useful for |
239 |
|
|
* resource counting etc.. |
240 |
|
|
*/ |
241 |
|
|
|
242 |
|
|
#ifdef CONFIG_SMP |
243 |
|
|
#define SMP_LOCK "lock ; " |
244 |
|
|
#else |
245 |
|
|
#define SMP_LOCK "" |
246 |
|
|
#endif |
247 |
|
|
|
248 |
|
|
/* |
249 |
|
|
* Make sure gcc doesn't try to be clever and move things around |
250 |
|
|
* on us. We need to use _exactly_ the address the user gave us, |
251 |
|
|
* not some alias that contains the same information. |
252 |
|
|
*/ |
253 |
|
|
typedef struct { volatile int counter; } atomic_t; |
254 |
|
|
|
255 |
|
|
#define ATOMIC_INIT(i) { (i) } |
256 |
|
|
|
257 |
|
|
/** |
258 |
|
|
* atomic_read - read atomic variable |
259 |
|
|
* @v: pointer of type atomic_t |
260 |
schoenebeck |
1879 |
* |
261 |
schoenebeck |
53 |
* Atomically reads the value of @v. Note that the guaranteed |
262 |
|
|
* useful range of an atomic_t is only 24 bits. |
263 |
schoenebeck |
1879 |
*/ |
264 |
schoenebeck |
53 |
#define atomic_read(v) ((v)->counter) |
265 |
|
|
|
266 |
|
|
/** |
267 |
|
|
* atomic_set - set atomic variable |
268 |
|
|
* @v: pointer of type atomic_t |
269 |
|
|
* @i: required value |
270 |
schoenebeck |
1879 |
* |
271 |
schoenebeck |
53 |
* Atomically sets the value of @v to @i. Note that the guaranteed |
272 |
|
|
* useful range of an atomic_t is only 24 bits. |
273 |
schoenebeck |
1879 |
*/ |
274 |
schoenebeck |
53 |
#define atomic_set(v,i) (((v)->counter) = (i)) |
275 |
|
|
|
276 |
|
|
/** |
277 |
|
|
* atomic_add - add integer to atomic variable |
278 |
|
|
* @i: integer value to add |
279 |
|
|
* @v: pointer of type atomic_t |
280 |
schoenebeck |
1879 |
* |
281 |
schoenebeck |
53 |
* Atomically adds @i to @v. Note that the guaranteed useful range |
282 |
|
|
* of an atomic_t is only 24 bits. |
283 |
|
|
*/ |
284 |
|
|
static __inline__ void atomic_add(int i, atomic_t *v) |
285 |
|
|
{ |
286 |
|
|
__asm__ __volatile__( |
287 |
|
|
SMP_LOCK "addl %1,%0" |
288 |
|
|
:"=m" (v->counter) |
289 |
|
|
:"ir" (i), "m" (v->counter)); |
290 |
|
|
} |
291 |
|
|
|
292 |
|
|
/** |
293 |
|
|
* atomic_sub - subtract the atomic variable |
294 |
|
|
* @i: integer value to subtract |
295 |
|
|
* @v: pointer of type atomic_t |
296 |
schoenebeck |
1879 |
* |
297 |
schoenebeck |
53 |
* Atomically subtracts @i from @v. Note that the guaranteed |
298 |
|
|
* useful range of an atomic_t is only 24 bits. |
299 |
|
|
*/ |
300 |
|
|
static __inline__ void atomic_sub(int i, atomic_t *v) |
301 |
|
|
{ |
302 |
|
|
__asm__ __volatile__( |
303 |
|
|
SMP_LOCK "subl %1,%0" |
304 |
|
|
:"=m" (v->counter) |
305 |
|
|
:"ir" (i), "m" (v->counter)); |
306 |
|
|
} |
307 |
|
|
|
308 |
|
|
/** |
309 |
|
|
* atomic_sub_and_test - subtract value from variable and test result |
310 |
|
|
* @i: integer value to subtract |
311 |
|
|
* @v: pointer of type atomic_t |
312 |
schoenebeck |
1879 |
* |
313 |
schoenebeck |
53 |
* Atomically subtracts @i from @v and returns |
314 |
|
|
* true if the result is zero, or false for all |
315 |
|
|
* other cases. Note that the guaranteed |
316 |
|
|
* useful range of an atomic_t is only 24 bits. |
317 |
|
|
*/ |
318 |
|
|
static __inline__ int atomic_sub_and_test(int i, atomic_t *v) |
319 |
|
|
{ |
320 |
|
|
unsigned char c; |
321 |
|
|
|
322 |
|
|
__asm__ __volatile__( |
323 |
|
|
SMP_LOCK "subl %2,%0; sete %1" |
324 |
|
|
:"=m" (v->counter), "=qm" (c) |
325 |
|
|
:"ir" (i), "m" (v->counter) : "memory"); |
326 |
|
|
return c; |
327 |
|
|
} |
328 |
|
|
|
329 |
|
|
/** |
330 |
|
|
* atomic_inc - increment atomic variable |
331 |
|
|
* @v: pointer of type atomic_t |
332 |
schoenebeck |
1879 |
* |
333 |
schoenebeck |
53 |
* Atomically increments @v by 1. Note that the guaranteed |
334 |
|
|
* useful range of an atomic_t is only 24 bits. |
335 |
schoenebeck |
1879 |
*/ |
336 |
schoenebeck |
53 |
static __inline__ void atomic_inc(atomic_t *v) |
337 |
|
|
{ |
338 |
|
|
__asm__ __volatile__( |
339 |
|
|
SMP_LOCK "incl %0" |
340 |
|
|
:"=m" (v->counter) |
341 |
|
|
:"m" (v->counter)); |
342 |
|
|
} |
343 |
|
|
|
344 |
|
|
/** |
345 |
|
|
* atomic_dec - decrement atomic variable |
346 |
|
|
* @v: pointer of type atomic_t |
347 |
schoenebeck |
1879 |
* |
348 |
schoenebeck |
53 |
* Atomically decrements @v by 1. Note that the guaranteed |
349 |
|
|
* useful range of an atomic_t is only 24 bits. |
350 |
schoenebeck |
1879 |
*/ |
351 |
schoenebeck |
53 |
static __inline__ void atomic_dec(atomic_t *v) |
352 |
|
|
{ |
353 |
|
|
__asm__ __volatile__( |
354 |
|
|
SMP_LOCK "decl %0" |
355 |
|
|
:"=m" (v->counter) |
356 |
|
|
:"m" (v->counter)); |
357 |
|
|
} |
358 |
|
|
|
359 |
|
|
/** |
360 |
|
|
* atomic_dec_and_test - decrement and test |
361 |
|
|
* @v: pointer of type atomic_t |
362 |
schoenebeck |
1879 |
* |
363 |
schoenebeck |
53 |
* Atomically decrements @v by 1 and |
364 |
|
|
* returns true if the result is 0, or false for all other |
365 |
|
|
* cases. Note that the guaranteed |
366 |
|
|
* useful range of an atomic_t is only 24 bits. |
367 |
schoenebeck |
1879 |
*/ |
368 |
schoenebeck |
53 |
static __inline__ int atomic_dec_and_test(atomic_t *v) |
369 |
|
|
{ |
370 |
|
|
unsigned char c; |
371 |
|
|
|
372 |
|
|
__asm__ __volatile__( |
373 |
|
|
SMP_LOCK "decl %0; sete %1" |
374 |
|
|
:"=m" (v->counter), "=qm" (c) |
375 |
|
|
:"m" (v->counter) : "memory"); |
376 |
|
|
return c != 0; |
377 |
|
|
} |
378 |
|
|
|
379 |
|
|
/** |
380 |
schoenebeck |
1879 |
* atomic_inc_and_test - increment and test |
381 |
schoenebeck |
53 |
* @v: pointer of type atomic_t |
382 |
schoenebeck |
1879 |
* |
383 |
schoenebeck |
53 |
* Atomically increments @v by 1 |
384 |
|
|
* and returns true if the result is zero, or false for all |
385 |
|
|
* other cases. Note that the guaranteed |
386 |
|
|
* useful range of an atomic_t is only 24 bits. |
387 |
schoenebeck |
1879 |
*/ |
388 |
schoenebeck |
53 |
static __inline__ int atomic_inc_and_test(atomic_t *v) |
389 |
|
|
{ |
390 |
|
|
unsigned char c; |
391 |
|
|
|
392 |
|
|
__asm__ __volatile__( |
393 |
|
|
SMP_LOCK "incl %0; sete %1" |
394 |
|
|
:"=m" (v->counter), "=qm" (c) |
395 |
|
|
:"m" (v->counter) : "memory"); |
396 |
|
|
return c != 0; |
397 |
|
|
} |
398 |
|
|
|
399 |
|
|
/** |
400 |
|
|
* atomic_add_negative - add and test if negative |
401 |
|
|
* @v: pointer of type atomic_t |
402 |
|
|
* @i: integer value to add |
403 |
schoenebeck |
1879 |
* |
404 |
schoenebeck |
53 |
* Atomically adds @i to @v and returns true |
405 |
|
|
* if the result is negative, or false when |
406 |
|
|
* result is greater than or equal to zero. Note that the guaranteed |
407 |
|
|
* useful range of an atomic_t is only 24 bits. |
408 |
schoenebeck |
1879 |
*/ |
409 |
schoenebeck |
53 |
static __inline__ int atomic_add_negative(int i, atomic_t *v) |
410 |
|
|
{ |
411 |
|
|
unsigned char c; |
412 |
|
|
|
413 |
|
|
__asm__ __volatile__( |
414 |
|
|
SMP_LOCK "addl %2,%0; sets %1" |
415 |
|
|
:"=m" (v->counter), "=qm" (c) |
416 |
|
|
:"ir" (i), "m" (v->counter) : "memory"); |
417 |
|
|
return c; |
418 |
|
|
} |
419 |
|
|
|
420 |
|
|
/* These are x86-specific, used by some header files */ |
421 |
|
|
#define atomic_clear_mask(mask, addr) \ |
422 |
|
|
__asm__ __volatile__(SMP_LOCK "andl %0,%1" \ |
423 |
|
|
: : "r" (~(mask)),"m" (*addr) : "memory") |
424 |
|
|
|
425 |
|
|
#define atomic_set_mask(mask, addr) \ |
426 |
|
|
__asm__ __volatile__(SMP_LOCK "orl %0,%1" \ |
427 |
|
|
: : "r" (mask),"m" (*addr) : "memory") |
428 |
|
|
|
429 |
|
|
/* Atomic operations are already serializing on x86 */ |
430 |
|
|
#define smp_mb__before_atomic_dec() barrier() |
431 |
|
|
#define smp_mb__after_atomic_dec() barrier() |
432 |
|
|
#define smp_mb__before_atomic_inc() barrier() |
433 |
|
|
#define smp_mb__after_atomic_inc() barrier() |
434 |
|
|
|
435 |
|
|
#endif /* __ARCH_I386_ATOMIC__ */ |
436 |
|
|
|
437 |
|
|
/***********************************************************************/ |
438 |
|
|
|
439 |
|
|
#else /* !PPC && !i386 */ |
440 |
|
|
|
441 |
|
|
#ifdef __sparc__ |
442 |
|
|
|
443 |
|
|
/* atomic.h: These still suck, but the I-cache hit rate is higher. |
444 |
|
|
* |
445 |
|
|
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) |
446 |
|
|
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) |
447 |
|
|
*/ |
448 |
|
|
|
449 |
|
|
#ifndef __ARCH_SPARC_ATOMIC__ |
450 |
|
|
#define __ARCH_SPARC_ATOMIC__ |
451 |
|
|
|
452 |
|
|
typedef struct { volatile int counter; } atomic_t; |
453 |
|
|
|
454 |
|
|
#ifndef CONFIG_SMP |
455 |
|
|
|
456 |
|
|
#define ATOMIC_INIT(i) { (i) } |
457 |
|
|
#define atomic_read(v) ((v)->counter) |
458 |
|
|
#define atomic_set(v, i) (((v)->counter) = i) |
459 |
|
|
|
460 |
|
|
#else |
461 |
|
|
/* We do the bulk of the actual work out of line in two common |
462 |
|
|
* routines in assembler, see arch/sparc/lib/atomic.S for the |
463 |
|
|
* "fun" details. |
464 |
|
|
* |
465 |
|
|
* For SMP the trick is you embed the spin lock byte within |
466 |
|
|
* the word, use the low byte so signedness is easily retained |
467 |
|
|
* via a quick arithmetic shift. It looks like this: |
468 |
|
|
* |
469 |
|
|
* ---------------------------------------- |
470 |
|
|
* | signed 24-bit counter value | lock | atomic_t |
471 |
|
|
* ---------------------------------------- |
472 |
|
|
* 31 8 7 0 |
473 |
|
|
*/ |
474 |
|
|
|
475 |
|
|
#define ATOMIC_INIT(i) { (i << 8) } |
476 |
|
|
|
477 |
|
|
static __inline__ int atomic_read(atomic_t *v) |
478 |
|
|
{ |
479 |
|
|
int ret = v->counter; |
480 |
|
|
|
481 |
|
|
while(ret & 0xff) |
482 |
|
|
ret = v->counter; |
483 |
|
|
|
484 |
|
|
return ret >> 8; |
485 |
|
|
} |
486 |
|
|
|
487 |
|
|
#define atomic_set(v, i) (((v)->counter) = ((i) << 8)) |
488 |
|
|
#endif |
489 |
|
|
|
490 |
|
|
static __inline__ int __atomic_add(int i, atomic_t *v) |
491 |
|
|
{ |
492 |
|
|
register volatile int *ptr asm("g1"); |
493 |
|
|
register int increment asm("g2"); |
494 |
|
|
|
495 |
|
|
ptr = &v->counter; |
496 |
|
|
increment = i; |
497 |
|
|
|
498 |
|
|
__asm__ __volatile__( |
499 |
|
|
"mov %%o7, %%g4\n\t" |
500 |
|
|
"call ___atomic_add\n\t" |
501 |
|
|
" add %%o7, 8, %%o7\n" |
502 |
|
|
: "=&r" (increment) |
503 |
|
|
: "0" (increment), "r" (ptr) |
504 |
|
|
: "g3", "g4", "g7", "memory", "cc"); |
505 |
|
|
|
506 |
|
|
return increment; |
507 |
|
|
} |
508 |
|
|
|
509 |
|
|
static __inline__ int __atomic_sub(int i, atomic_t *v) |
510 |
|
|
{ |
511 |
|
|
register volatile int *ptr asm("g1"); |
512 |
|
|
register int increment asm("g2"); |
513 |
|
|
|
514 |
|
|
ptr = &v->counter; |
515 |
|
|
increment = i; |
516 |
|
|
|
517 |
|
|
__asm__ __volatile__( |
518 |
|
|
"mov %%o7, %%g4\n\t" |
519 |
|
|
"call ___atomic_sub\n\t" |
520 |
|
|
" add %%o7, 8, %%o7\n" |
521 |
|
|
: "=&r" (increment) |
522 |
|
|
: "0" (increment), "r" (ptr) |
523 |
|
|
: "g3", "g4", "g7", "memory", "cc"); |
524 |
|
|
|
525 |
|
|
return increment; |
526 |
|
|
} |
527 |
|
|
|
528 |
|
|
#define atomic_add(i, v) ((void)__atomic_add((i), (v))) |
529 |
|
|
#define atomic_sub(i, v) ((void)__atomic_sub((i), (v))) |
530 |
|
|
|
531 |
|
|
#define atomic_dec_return(v) __atomic_sub(1, (v)) |
532 |
|
|
#define atomic_inc_return(v) __atomic_add(1, (v)) |
533 |
|
|
|
534 |
|
|
#define atomic_sub_and_test(i, v) (__atomic_sub((i), (v)) == 0) |
535 |
|
|
#define atomic_dec_and_test(v) (__atomic_sub(1, (v)) == 0) |
536 |
|
|
|
537 |
|
|
#define atomic_inc(v) ((void)__atomic_add(1, (v))) |
538 |
|
|
#define atomic_dec(v) ((void)__atomic_sub(1, (v))) |
539 |
|
|
|
540 |
|
|
#define atomic_add_negative(i, v) (__atomic_add((i), (v)) < 0) |
541 |
|
|
|
542 |
|
|
/* Atomic operations are already serializing */ |
543 |
|
|
#define smp_mb__before_atomic_dec() barrier() |
544 |
|
|
#define smp_mb__after_atomic_dec() barrier() |
545 |
|
|
#define smp_mb__before_atomic_inc() barrier() |
546 |
|
|
#define smp_mb__after_atomic_inc() barrier() |
547 |
|
|
|
548 |
|
|
|
549 |
|
|
#endif /* !(__ARCH_SPARC_ATOMIC__) */ |
550 |
|
|
|
551 |
|
|
/***********************************************************************/ |
552 |
|
|
|
553 |
|
|
#else |
554 |
|
|
|
555 |
|
|
#ifdef __ia64__ |
556 |
|
|
|
557 |
|
|
#ifndef __ARCH_IA64_ATOMIC__ |
558 |
|
|
#define __ARCH_IA64_ATOMIC__ |
559 |
|
|
|
560 |
|
|
typedef volatile int atomic_t; |
561 |
|
|
|
562 |
|
|
inline |
563 |
|
|
int |
564 |
|
|
atomic_read (const atomic_t * a) |
565 |
|
|
{ |
566 |
|
|
return *a; |
567 |
|
|
} |
568 |
|
|
|
569 |
|
|
inline |
570 |
|
|
void |
571 |
|
|
atomic_set(atomic_t *a, int v) |
572 |
|
|
{ |
573 |
|
|
*a = v; |
574 |
|
|
} |
575 |
|
|
|
576 |
|
|
inline |
577 |
|
|
void |
578 |
|
|
atomic_inc (atomic_t *v) |
579 |
|
|
{ |
580 |
|
|
int old, r; |
581 |
|
|
|
582 |
|
|
do { |
583 |
|
|
old = atomic_read(v); |
584 |
|
|
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old)); |
585 |
|
|
__asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" |
586 |
|
|
: "=r"(r) : "r"(v), "r"(old + 1) |
587 |
|
|
: "memory"); |
588 |
|
|
} while (r != old); |
589 |
|
|
} |
590 |
|
|
|
591 |
|
|
inline |
592 |
|
|
void |
593 |
|
|
atomic_dec (atomic_t *v) |
594 |
|
|
{ |
595 |
|
|
int old, r; |
596 |
|
|
|
597 |
|
|
do { |
598 |
|
|
old = atomic_read(v); |
599 |
|
|
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old)); |
600 |
|
|
__asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" |
601 |
|
|
: "=r"(r) : "r"(v), "r"(old - 1) |
602 |
|
|
: "memory"); |
603 |
|
|
} while (r != old); |
604 |
|
|
} |
605 |
|
|
|
606 |
|
|
inline |
607 |
|
|
int |
608 |
|
|
atomic_dec_and_test (atomic_t *v) |
609 |
|
|
{ |
610 |
|
|
int old, r; |
611 |
|
|
|
612 |
|
|
do { |
613 |
|
|
old = atomic_read(v); |
614 |
|
|
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old)); |
615 |
|
|
__asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" |
616 |
|
|
: "=r"(r) : "r"(v), "r"(old - 1) |
617 |
|
|
: "memory"); |
618 |
|
|
} while (r != old); |
619 |
|
|
return old != 1; |
620 |
|
|
} |
621 |
|
|
|
622 |
|
|
#endif /* !(__ARCH_IA64_ATOMIC__) */ |
623 |
|
|
|
624 |
|
|
#else |
625 |
|
|
|
626 |
|
|
#ifdef __alpha__ |
627 |
|
|
|
628 |
|
|
#ifndef _ALPHA_ATOMIC_H |
629 |
|
|
#define _ALPHA_ATOMIC_H |
630 |
|
|
|
631 |
|
|
/* |
632 |
|
|
* Atomic operations that C can't guarantee us. Useful for |
633 |
|
|
* resource counting etc... |
634 |
|
|
* |
635 |
|
|
* But use these as seldom as possible since they are much slower |
636 |
|
|
* than regular operations. |
637 |
|
|
*/ |
638 |
|
|
|
639 |
|
|
|
640 |
|
|
/* |
641 |
|
|
* Counter is volatile to make sure gcc doesn't try to be clever |
642 |
|
|
* and move things around on us. We need to use _exactly_ the address |
643 |
|
|
* the user gave us, not some alias that contains the same information. |
644 |
|
|
*/ |
645 |
|
|
typedef struct { volatile int counter; } atomic_t; |
646 |
|
|
|
647 |
|
|
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
648 |
|
|
|
649 |
|
|
#define atomic_read(v) ((v)->counter) |
650 |
|
|
#define atomic_set(v,i) ((v)->counter = (i)) |
651 |
|
|
|
652 |
|
|
/* |
653 |
|
|
* To get proper branch prediction for the main line, we must branch |
654 |
|
|
* forward to code at the end of this object's .text section, then |
655 |
|
|
* branch back to restart the operation. |
656 |
|
|
*/ |
657 |
|
|
|
658 |
|
|
static __inline__ void atomic_add(int i, atomic_t * v) |
659 |
|
|
{ |
660 |
|
|
unsigned long temp; |
661 |
|
|
__asm__ __volatile__( |
662 |
|
|
"1: ldl_l %0,%1\n" |
663 |
|
|
" addl %0,%2,%0\n" |
664 |
|
|
" stl_c %0,%1\n" |
665 |
|
|
" beq %0,2f\n" |
666 |
|
|
".subsection 2\n" |
667 |
|
|
"2: br 1b\n" |
668 |
|
|
".previous" |
669 |
|
|
:"=&r" (temp), "=m" (v->counter) |
670 |
|
|
:"Ir" (i), "m" (v->counter)); |
671 |
|
|
} |
672 |
|
|
|
673 |
|
|
static __inline__ void atomic_sub(int i, atomic_t * v) |
674 |
|
|
{ |
675 |
|
|
unsigned long temp; |
676 |
|
|
__asm__ __volatile__( |
677 |
|
|
"1: ldl_l %0,%1\n" |
678 |
|
|
" subl %0,%2,%0\n" |
679 |
|
|
" stl_c %0,%1\n" |
680 |
|
|
" beq %0,2f\n" |
681 |
|
|
".subsection 2\n" |
682 |
|
|
"2: br 1b\n" |
683 |
|
|
".previous" |
684 |
|
|
:"=&r" (temp), "=m" (v->counter) |
685 |
|
|
:"Ir" (i), "m" (v->counter)); |
686 |
|
|
} |
687 |
|
|
|
688 |
|
|
/* |
689 |
|
|
* Same as above, but return the result value |
690 |
|
|
*/ |
691 |
|
|
static __inline__ long atomic_add_return(int i, atomic_t * v) |
692 |
|
|
{ |
693 |
|
|
long temp, result; |
694 |
|
|
__asm__ __volatile__( |
695 |
|
|
"1: ldl_l %0,%1\n" |
696 |
|
|
" addl %0,%3,%2\n" |
697 |
|
|
" addl %0,%3,%0\n" |
698 |
|
|
" stl_c %0,%1\n" |
699 |
|
|
" beq %0,2f\n" |
700 |
|
|
" mb\n" |
701 |
|
|
".subsection 2\n" |
702 |
|
|
"2: br 1b\n" |
703 |
|
|
".previous" |
704 |
|
|
:"=&r" (temp), "=m" (v->counter), "=&r" (result) |
705 |
|
|
:"Ir" (i), "m" (v->counter) : "memory"); |
706 |
|
|
return result; |
707 |
|
|
} |
708 |
|
|
|
709 |
|
|
static __inline__ long atomic_sub_return(int i, atomic_t * v) |
710 |
|
|
{ |
711 |
|
|
long temp, result; |
712 |
|
|
__asm__ __volatile__( |
713 |
|
|
"1: ldl_l %0,%1\n" |
714 |
|
|
" subl %0,%3,%2\n" |
715 |
|
|
" subl %0,%3,%0\n" |
716 |
|
|
" stl_c %0,%1\n" |
717 |
|
|
" beq %0,2f\n" |
718 |
|
|
" mb\n" |
719 |
|
|
".subsection 2\n" |
720 |
|
|
"2: br 1b\n" |
721 |
|
|
".previous" |
722 |
|
|
:"=&r" (temp), "=m" (v->counter), "=&r" (result) |
723 |
|
|
:"Ir" (i), "m" (v->counter) : "memory"); |
724 |
|
|
return result; |
725 |
|
|
} |
726 |
|
|
|
727 |
|
|
#define atomic_dec_return(v) atomic_sub_return(1,(v)) |
728 |
|
|
#define atomic_inc_return(v) atomic_add_return(1,(v)) |
729 |
|
|
|
730 |
|
|
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) |
731 |
|
|
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
732 |
|
|
|
733 |
|
|
#define atomic_inc(v) atomic_add(1,(v)) |
734 |
|
|
#define atomic_dec(v) atomic_sub(1,(v)) |
735 |
|
|
|
736 |
|
|
#define smp_mb__before_atomic_dec() smp_mb() |
737 |
|
|
#define smp_mb__after_atomic_dec() smp_mb() |
738 |
|
|
#define smp_mb__before_atomic_inc() smp_mb() |
739 |
|
|
#define smp_mb__after_atomic_inc() smp_mb() |
740 |
|
|
|
741 |
|
|
#endif /* _ALPHA_ATOMIC_H */ |
742 |
|
|
|
743 |
|
|
#else |
744 |
|
|
|
745 |
|
|
#ifdef __s390__ |
746 |
|
|
|
747 |
|
|
#ifndef __ARCH_S390_ATOMIC__ |
748 |
|
|
#define __ARCH_S390_ATOMIC__ |
749 |
|
|
|
750 |
|
|
/* |
751 |
|
|
* include/asm-s390/atomic.h |
752 |
|
|
* |
753 |
|
|
* S390 version |
754 |
|
|
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation |
755 |
|
|
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
756 |
|
|
* Denis Joseph Barrow |
757 |
|
|
* |
758 |
|
|
* Derived from "include/asm-i386/bitops.h" |
759 |
|
|
* Copyright (C) 1992, Linus Torvalds |
760 |
|
|
* |
761 |
|
|
*/ |
762 |
|
|
|
763 |
|
|
/* |
764 |
|
|
* Atomic operations that C can't guarantee us. Useful for |
765 |
|
|
* resource counting etc.. |
766 |
|
|
* S390 uses 'Compare And Swap' for atomicity in SMP enviroment |
767 |
|
|
*/ |
768 |
|
|
|
769 |
|
|
typedef struct { volatile int counter; } __attribute__ ((aligned (4))) atomic_t; |
770 |
|
|
#define ATOMIC_INIT(i) { (i) } |
771 |
|
|
|
772 |
|
|
#define atomic_eieio() __asm__ __volatile__ ("BCR 15,0") |
773 |
|
|
|
774 |
|
|
#define __CS_LOOP(old_val, new_val, ptr, op_val, op_string) \ |
775 |
|
|
__asm__ __volatile__(" l %0,0(%2)\n" \ |
776 |
|
|
"0: lr %1,%0\n" \ |
777 |
|
|
op_string " %1,%3\n" \ |
778 |
|
|
" cs %0,%1,0(%2)\n" \ |
779 |
|
|
" jl 0b" \ |
780 |
|
|
: "=&d" (old_val), "=&d" (new_val) \ |
781 |
|
|
: "a" (ptr), "d" (op_val) : "cc" ); |
782 |
|
|
|
783 |
|
|
#define atomic_read(v) ((v)->counter) |
784 |
|
|
#define atomic_set(v,i) (((v)->counter) = (i)) |
785 |
|
|
|
786 |
|
|
static __inline__ void atomic_add(int i, atomic_t *v) |
787 |
|
|
{ |
788 |
|
|
int old_val, new_val; |
789 |
|
|
__CS_LOOP(old_val, new_val, v, i, "ar"); |
790 |
|
|
} |
791 |
|
|
|
792 |
|
|
static __inline__ int atomic_add_return (int i, atomic_t *v) |
793 |
|
|
{ |
794 |
|
|
int old_val, new_val; |
795 |
|
|
__CS_LOOP(old_val, new_val, v, i, "ar"); |
796 |
|
|
return new_val; |
797 |
|
|
} |
798 |
|
|
|
799 |
|
|
static __inline__ int atomic_add_negative(int i, atomic_t *v) |
800 |
|
|
{ |
801 |
|
|
int old_val, new_val; |
802 |
|
|
__CS_LOOP(old_val, new_val, v, i, "ar"); |
803 |
|
|
return new_val < 0; |
804 |
|
|
} |
805 |
|
|
|
806 |
|
|
static __inline__ void atomic_sub(int i, atomic_t *v) |
807 |
|
|
{ |
808 |
|
|
int old_val, new_val; |
809 |
|
|
__CS_LOOP(old_val, new_val, v, i, "sr"); |
810 |
|
|
} |
811 |
|
|
|
812 |
|
|
static __inline__ void atomic_inc(volatile atomic_t *v) |
813 |
|
|
{ |
814 |
|
|
int old_val, new_val; |
815 |
|
|
__CS_LOOP(old_val, new_val, v, 1, "ar"); |
816 |
|
|
} |
817 |
|
|
|
818 |
|
|
static __inline__ int atomic_inc_return(volatile atomic_t *v) |
819 |
|
|
{ |
820 |
|
|
int old_val, new_val; |
821 |
|
|
__CS_LOOP(old_val, new_val, v, 1, "ar"); |
822 |
|
|
return new_val; |
823 |
|
|
} |
824 |
|
|
|
825 |
|
|
static __inline__ int atomic_inc_and_test(volatile atomic_t *v) |
826 |
|
|
{ |
827 |
|
|
int old_val, new_val; |
828 |
|
|
__CS_LOOP(old_val, new_val, v, 1, "ar"); |
829 |
|
|
return new_val != 0; |
830 |
|
|
} |
831 |
|
|
|
832 |
|
|
static __inline__ void atomic_dec(volatile atomic_t *v) |
833 |
|
|
{ |
834 |
|
|
int old_val, new_val; |
835 |
|
|
__CS_LOOP(old_val, new_val, v, 1, "sr"); |
836 |
|
|
} |
837 |
|
|
|
838 |
|
|
static __inline__ int atomic_dec_return(volatile atomic_t *v) |
839 |
|
|
{ |
840 |
|
|
int old_val, new_val; |
841 |
|
|
__CS_LOOP(old_val, new_val, v, 1, "sr"); |
842 |
|
|
return new_val; |
843 |
|
|
} |
844 |
|
|
|
845 |
|
|
static __inline__ int atomic_dec_and_test(volatile atomic_t *v) |
846 |
|
|
{ |
847 |
|
|
int old_val, new_val; |
848 |
|
|
__CS_LOOP(old_val, new_val, v, 1, "sr"); |
849 |
|
|
return new_val == 0; |
850 |
|
|
} |
851 |
|
|
|
852 |
|
|
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *v) |
853 |
|
|
{ |
854 |
|
|
int old_val, new_val; |
855 |
|
|
__CS_LOOP(old_val, new_val, v, ~mask, "nr"); |
856 |
|
|
} |
857 |
|
|
|
858 |
|
|
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *v) |
859 |
|
|
{ |
860 |
|
|
int old_val, new_val; |
861 |
|
|
__CS_LOOP(old_val, new_val, v, mask, "or"); |
862 |
|
|
} |
863 |
|
|
|
864 |
|
|
/* |
865 |
|
|
returns 0 if expected_oldval==value in *v ( swap was successful ) |
866 |
|
|
returns 1 if unsuccessful. |
867 |
|
|
*/ |
868 |
|
|
static __inline__ int |
869 |
|
|
atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) |
870 |
|
|
{ |
871 |
|
|
int retval; |
872 |
|
|
|
873 |
|
|
__asm__ __volatile__( |
874 |
|
|
" lr 0,%2\n" |
875 |
|
|
" cs 0,%3,0(%1)\n" |
876 |
|
|
" ipm %0\n" |
877 |
|
|
" srl %0,28\n" |
878 |
|
|
"0:" |
879 |
|
|
: "=&d" (retval) |
880 |
|
|
: "a" (v), "d" (expected_oldval) , "d" (new_val) |
881 |
|
|
: "0", "cc"); |
882 |
|
|
return retval; |
883 |
|
|
} |
884 |
|
|
|
885 |
|
|
/* |
886 |
|
|
Spin till *v = expected_oldval then swap with newval. |
887 |
|
|
*/ |
888 |
|
|
static __inline__ void |
889 |
|
|
atomic_compare_and_swap_spin(int expected_oldval,int new_val,atomic_t *v) |
890 |
|
|
{ |
891 |
|
|
__asm__ __volatile__( |
892 |
|
|
"0: lr 0,%1\n" |
893 |
|
|
" cs 0,%2,0(%0)\n" |
894 |
|
|
" jl 0b\n" |
895 |
|
|
: : "a" (v), "d" (expected_oldval) , "d" (new_val) |
896 |
|
|
: "cc", "0" ); |
897 |
|
|
} |
898 |
|
|
|
899 |
|
|
#define smp_mb__before_atomic_dec() smp_mb() |
900 |
|
|
#define smp_mb__after_atomic_dec() smp_mb() |
901 |
|
|
#define smp_mb__before_atomic_inc() smp_mb() |
902 |
|
|
#define smp_mb__after_atomic_inc() smp_mb() |
903 |
|
|
|
904 |
|
|
#endif /* __ARCH_S390_ATOMIC __ */ |
905 |
|
|
|
906 |
|
|
#else |
907 |
|
|
|
908 |
|
|
#ifdef __mips__ |
909 |
|
|
|
910 |
|
|
/* |
911 |
|
|
* Atomic operations that C can't guarantee us. Useful for |
912 |
|
|
* resource counting etc.. |
913 |
|
|
* |
914 |
|
|
* But use these as seldom as possible since they are much more slower |
915 |
|
|
* than regular operations. |
916 |
|
|
* |
917 |
|
|
* This file is subject to the terms and conditions of the GNU General Public |
918 |
|
|
* License. See the file "COPYING" in the main directory of this archive |
919 |
|
|
* for more details. |
920 |
|
|
* |
921 |
|
|
* Copyright (C) 1996, 1997, 2000 by Ralf Baechle |
922 |
|
|
*/ |
923 |
|
|
#ifndef __ASM_ATOMIC_H |
924 |
|
|
#define __ASM_ATOMIC_H |
925 |
|
|
|
926 |
|
|
typedef struct { volatile int counter; } atomic_t; |
927 |
|
|
|
928 |
|
|
#define ATOMIC_INIT(i) { (i) } |
929 |
|
|
|
930 |
|
|
/* |
931 |
|
|
* atomic_read - read atomic variable |
932 |
|
|
* @v: pointer of type atomic_t |
933 |
|
|
* |
934 |
|
|
* Atomically reads the value of @v. Note that the guaranteed |
935 |
|
|
* useful range of an atomic_t is only 24 bits. |
936 |
|
|
*/ |
937 |
|
|
#define atomic_read(v) ((v)->counter) |
938 |
|
|
|
939 |
|
|
/* |
940 |
|
|
* atomic_set - set atomic variable |
941 |
|
|
* @v: pointer of type atomic_t |
942 |
|
|
* @i: required value |
943 |
|
|
* |
944 |
|
|
* Atomically sets the value of @v to @i. Note that the guaranteed |
945 |
|
|
* useful range of an atomic_t is only 24 bits. |
946 |
|
|
*/ |
947 |
|
|
#define atomic_set(v,i) ((v)->counter = (i)) |
948 |
|
|
|
949 |
|
|
/* |
950 |
|
|
* ... while for MIPS II and better we can use ll/sc instruction. This |
951 |
|
|
* implementation is SMP safe ... |
952 |
|
|
*/ |
953 |
|
|
|
954 |
|
|
/* |
955 |
|
|
* atomic_add - add integer to atomic variable |
956 |
|
|
* @i: integer value to add |
957 |
|
|
* @v: pointer of type atomic_t |
958 |
|
|
* |
959 |
|
|
* Atomically adds @i to @v. Note that the guaranteed useful range |
960 |
|
|
* of an atomic_t is only 24 bits. |
961 |
|
|
*/ |
962 |
|
|
extern __inline__ void atomic_add(int i, atomic_t * v) |
963 |
|
|
{ |
964 |
|
|
unsigned long temp; |
965 |
|
|
|
966 |
|
|
__asm__ __volatile__( |
967 |
|
|
".set push # atomic_add\n" |
968 |
|
|
".set mips2 \n" |
969 |
|
|
"1: ll %0, %1 \n" |
970 |
|
|
" addu %0, %2 \n" |
971 |
|
|
" sc %0, %1 \n" |
972 |
|
|
" beqz %0, 1b \n" |
973 |
|
|
".set pop \n" |
974 |
|
|
: "=&r" (temp), "=m" (v->counter) |
975 |
|
|
: "Ir" (i), "m" (v->counter)); |
976 |
|
|
} |
977 |
|
|
|
978 |
|
|
/* |
979 |
|
|
* atomic_sub - subtract the atomic variable |
980 |
|
|
* @i: integer value to subtract |
981 |
|
|
* @v: pointer of type atomic_t |
982 |
|
|
* |
983 |
|
|
* Atomically subtracts @i from @v. Note that the guaranteed |
984 |
|
|
* useful range of an atomic_t is only 24 bits. |
985 |
|
|
*/ |
986 |
|
|
extern __inline__ void atomic_sub(int i, atomic_t * v) |
987 |
|
|
{ |
988 |
|
|
unsigned long temp; |
989 |
|
|
|
990 |
|
|
__asm__ __volatile__( |
991 |
|
|
".set push # atomic_sub\n" |
992 |
|
|
".set mips2 \n" |
993 |
|
|
"1: ll %0, %1 \n" |
994 |
|
|
" subu %0, %2 \n" |
995 |
|
|
" sc %0, %1 \n" |
996 |
|
|
" beqz %0, 1b \n" |
997 |
|
|
".set pop \n" |
998 |
|
|
: "=&r" (temp), "=m" (v->counter) |
999 |
|
|
: "Ir" (i), "m" (v->counter)); |
1000 |
|
|
} |
1001 |
|
|
|
1002 |
|
|
/* |
1003 |
|
|
* Same as above, but return the result value |
1004 |
|
|
*/ |
1005 |
|
|
extern __inline__ int atomic_add_return(int i, atomic_t * v) |
1006 |
|
|
{ |
1007 |
|
|
unsigned long temp, result; |
1008 |
|
|
|
1009 |
|
|
__asm__ __volatile__( |
1010 |
|
|
".set push # atomic_add_return\n" |
1011 |
|
|
".set mips2 \n" |
1012 |
|
|
".set noreorder \n" |
1013 |
|
|
"1: ll %1, %2 \n" |
1014 |
|
|
" addu %0, %1, %3 \n" |
1015 |
|
|
" sc %0, %2 \n" |
1016 |
|
|
" beqz %0, 1b \n" |
1017 |
|
|
" addu %0, %1, %3 \n" |
1018 |
|
|
" sync \n" |
1019 |
|
|
".set pop \n" |
1020 |
|
|
: "=&r" (result), "=&r" (temp), "=m" (v->counter) |
1021 |
|
|
: "Ir" (i), "m" (v->counter) |
1022 |
|
|
: "memory"); |
1023 |
|
|
|
1024 |
|
|
return result; |
1025 |
|
|
} |
1026 |
|
|
|
1027 |
|
|
extern __inline__ int atomic_sub_return(int i, atomic_t * v) |
1028 |
|
|
{ |
1029 |
|
|
unsigned long temp, result; |
1030 |
|
|
|
1031 |
|
|
__asm__ __volatile__( |
1032 |
|
|
".set push # atomic_sub_return\n" |
1033 |
|
|
".set mips2 \n" |
1034 |
|
|
".set noreorder \n" |
1035 |
|
|
"1: ll %1, %2 \n" |
1036 |
|
|
" subu %0, %1, %3 \n" |
1037 |
|
|
" sc %0, %2 \n" |
1038 |
|
|
" beqz %0, 1b \n" |
1039 |
|
|
" subu %0, %1, %3 \n" |
1040 |
|
|
" sync \n" |
1041 |
|
|
".set pop \n" |
1042 |
|
|
: "=&r" (result), "=&r" (temp), "=m" (v->counter) |
1043 |
|
|
: "Ir" (i), "m" (v->counter) |
1044 |
|
|
: "memory"); |
1045 |
|
|
|
1046 |
|
|
return result; |
1047 |
|
|
} |
1048 |
|
|
|
1049 |
|
|
#define atomic_dec_return(v) atomic_sub_return(1,(v)) |
1050 |
|
|
#define atomic_inc_return(v) atomic_add_return(1,(v)) |
1051 |
|
|
|
1052 |
|
|
/* |
1053 |
|
|
* atomic_sub_and_test - subtract value from variable and test result |
1054 |
|
|
* @i: integer value to subtract |
1055 |
|
|
* @v: pointer of type atomic_t |
1056 |
|
|
* |
1057 |
|
|
* Atomically subtracts @i from @v and returns |
1058 |
|
|
* true if the result is zero, or false for all |
1059 |
|
|
* other cases. Note that the guaranteed |
1060 |
|
|
* useful range of an atomic_t is only 24 bits. |
1061 |
|
|
*/ |
1062 |
|
|
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) |
1063 |
|
|
|
1064 |
|
|
/* |
1065 |
|
|
* atomic_inc_and_test - increment and test |
1066 |
|
|
* @v: pointer of type atomic_t |
1067 |
|
|
* |
1068 |
|
|
* Atomically increments @v by 1 |
1069 |
|
|
* and returns true if the result is zero, or false for all |
1070 |
|
|
* other cases. Note that the guaranteed |
1071 |
|
|
* useful range of an atomic_t is only 24 bits. |
1072 |
|
|
*/ |
1073 |
|
|
#define atomic_inc_and_test(v) (atomic_inc_return(1, (v)) == 0) |
1074 |
|
|
|
1075 |
|
|
/* |
1076 |
|
|
* atomic_dec_and_test - decrement by 1 and test |
1077 |
|
|
* @v: pointer of type atomic_t |
1078 |
|
|
* |
1079 |
|
|
* Atomically decrements @v by 1 and |
1080 |
|
|
* returns true if the result is 0, or false for all other |
1081 |
|
|
* cases. Note that the guaranteed |
1082 |
|
|
* useful range of an atomic_t is only 24 bits. |
1083 |
|
|
*/ |
1084 |
|
|
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
1085 |
|
|
|
1086 |
|
|
/* |
1087 |
|
|
* atomic_inc - increment atomic variable |
1088 |
|
|
* @v: pointer of type atomic_t |
1089 |
|
|
* |
1090 |
|
|
* Atomically increments @v by 1. Note that the guaranteed |
1091 |
|
|
* useful range of an atomic_t is only 24 bits. |
1092 |
|
|
*/ |
1093 |
|
|
#define atomic_inc(v) atomic_add(1,(v)) |
1094 |
|
|
|
1095 |
|
|
/* |
1096 |
|
|
* atomic_dec - decrement and test |
1097 |
|
|
* @v: pointer of type atomic_t |
1098 |
|
|
* |
1099 |
|
|
* Atomically decrements @v by 1. Note that the guaranteed |
1100 |
|
|
* useful range of an atomic_t is only 24 bits. |
1101 |
|
|
*/ |
1102 |
|
|
#define atomic_dec(v) atomic_sub(1,(v)) |
1103 |
|
|
|
1104 |
|
|
/* |
1105 |
|
|
* atomic_add_negative - add and test if negative |
1106 |
|
|
* @v: pointer of type atomic_t |
1107 |
|
|
* @i: integer value to add |
1108 |
|
|
* |
1109 |
|
|
* Atomically adds @i to @v and returns true |
1110 |
|
|
* if the result is negative, or false when |
1111 |
|
|
* result is greater than or equal to zero. Note that the guaranteed |
1112 |
|
|
* useful range of an atomic_t is only 24 bits. |
1113 |
|
|
* |
1114 |
|
|
* Currently not implemented for MIPS. |
1115 |
|
|
*/ |
1116 |
|
|
|
1117 |
|
|
/* Atomic operations are already serializing */ |
1118 |
|
|
#define smp_mb__before_atomic_dec() smp_mb() |
1119 |
|
|
#define smp_mb__after_atomic_dec() smp_mb() |
1120 |
|
|
#define smp_mb__before_atomic_inc() smp_mb() |
1121 |
|
|
#define smp_mb__after_atomic_inc() smp_mb() |
1122 |
|
|
|
1123 |
|
|
#endif /* __ASM_ATOMIC_H */ |
1124 |
|
|
|
1125 |
|
|
#else |
1126 |
|
|
|
1127 |
|
|
#if defined(__m68k__) |
1128 |
|
|
|
1129 |
|
|
#ifndef __ARCH_M68K_ATOMIC__ |
1130 |
|
|
#define __ARCH_M68K_ATOMIC__ |
1131 |
|
|
|
1132 |
|
|
/* |
1133 |
|
|
* Atomic operations that C can't guarantee us. Useful for |
1134 |
|
|
* resource counting etc.. |
1135 |
|
|
*/ |
1136 |
|
|
|
1137 |
|
|
/* |
1138 |
|
|
* We do not have SMP m68k systems, so we don't have to deal with that. |
1139 |
|
|
*/ |
1140 |
|
|
|
1141 |
|
|
typedef struct { int counter; } atomic_t; |
1142 |
|
|
#define ATOMIC_INIT(i) { (i) } |
1143 |
|
|
|
1144 |
|
|
#define atomic_read(v) ((v)->counter) |
1145 |
|
|
#define atomic_set(v, i) (((v)->counter) = i) |
1146 |
|
|
|
1147 |
|
|
static __inline__ void atomic_add(int i, atomic_t *v) |
1148 |
|
|
{ |
1149 |
|
|
__asm__ __volatile__("addl %1,%0" : "=m" (*v) : "id" (i), "0" (*v)); |
1150 |
|
|
} |
1151 |
|
|
|
1152 |
|
|
static __inline__ void atomic_sub(int i, atomic_t *v) |
1153 |
|
|
{ |
1154 |
|
|
__asm__ __volatile__("subl %1,%0" : "=m" (*v) : "id" (i), "0" (*v)); |
1155 |
|
|
} |
1156 |
|
|
|
1157 |
|
|
static __inline__ void atomic_inc(volatile atomic_t *v) |
1158 |
|
|
{ |
1159 |
|
|
__asm__ __volatile__("addql #1,%0" : "=m" (*v): "0" (*v)); |
1160 |
|
|
} |
1161 |
|
|
|
1162 |
|
|
static __inline__ void atomic_dec(volatile atomic_t *v) |
1163 |
|
|
{ |
1164 |
|
|
__asm__ __volatile__("subql #1,%0" : "=m" (*v): "0" (*v)); |
1165 |
|
|
} |
1166 |
|
|
|
1167 |
|
|
static __inline__ int atomic_dec_and_test(volatile atomic_t *v) |
1168 |
|
|
{ |
1169 |
|
|
char c; |
1170 |
|
|
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "=m" (*v): "1" (*v)); |
1171 |
|
|
return c != 0; |
1172 |
|
|
} |
1173 |
|
|
|
1174 |
|
|
#define atomic_clear_mask(mask, v) \ |
1175 |
|
|
__asm__ __volatile__("andl %1,%0" : "=m" (*v) : "id" (~(mask)),"0"(*v)) |
1176 |
|
|
|
1177 |
|
|
#define atomic_set_mask(mask, v) \ |
1178 |
|
|
__asm__ __volatile__("orl %1,%0" : "=m" (*v) : "id" (mask),"0"(*v)) |
1179 |
|
|
|
1180 |
|
|
/* Atomic operations are already serializing */ |
1181 |
|
|
#define smp_mb__before_atomic_dec() barrier() |
1182 |
|
|
#define smp_mb__after_atomic_dec() barrier() |
1183 |
|
|
#define smp_mb__before_atomic_inc() barrier() |
1184 |
|
|
#define smp_mb__after_atomic_inc() barrier() |
1185 |
|
|
|
1186 |
|
|
#endif /* __ARCH_M68K_ATOMIC __ */ |
1187 |
|
|
|
1188 |
|
|
#else |
1189 |
|
|
|
1190 |
|
|
#warning libs/pbd has no implementation of strictly atomic operations for your hardware. |
1191 |
|
|
|
1192 |
|
|
#define __NO_STRICT_ATOMIC |
1193 |
|
|
#ifdef __NO_STRICT_ATOMIC |
1194 |
schoenebeck |
1879 |
|
1195 |
|
|
/* |
1196 |
schoenebeck |
53 |
* Because the implementations from the kernel (where all these come |
1197 |
|
|
* from) use cli and spinlocks for hppa and arm... |
1198 |
|
|
*/ |
1199 |
|
|
|
1200 |
|
|
typedef struct { volatile int counter; } atomic_t; |
1201 |
|
|
|
1202 |
|
|
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
1203 |
|
|
|
1204 |
|
|
#define atomic_read(v) ((v)->counter) |
1205 |
|
|
#define atomic_set(v,i) ((v)->counter = (i)) |
1206 |
|
|
|
1207 |
|
|
static __inline__ void atomic_inc(atomic_t *v) |
1208 |
|
|
{ |
1209 |
|
|
v->counter++; |
1210 |
|
|
} |
1211 |
|
|
|
1212 |
|
|
static __inline__ void atomic_dec(atomic_t *v) |
1213 |
|
|
{ |
1214 |
|
|
v->counter--; |
1215 |
|
|
} |
1216 |
schoenebeck |
1879 |
|
1217 |
schoenebeck |
53 |
static __inline__ int atomic_dec_and_test(atomic_t *v) |
1218 |
|
|
{ |
1219 |
|
|
int res; |
1220 |
|
|
v->counter--; |
1221 |
|
|
res = v->counter; |
1222 |
|
|
return res == 0; |
1223 |
|
|
} |
1224 |
schoenebeck |
1879 |
|
1225 |
schoenebeck |
53 |
static __inline__ int atomic_inc_and_test(atomic_t *v) |
1226 |
|
|
{ |
1227 |
|
|
int res; |
1228 |
|
|
v->counter++; |
1229 |
|
|
res = v->counter; |
1230 |
|
|
return res == 0; |
1231 |
|
|
} |
1232 |
|
|
|
1233 |
|
|
# endif /* __NO_STRICT_ATOMIC */ |
1234 |
|
|
# endif /* m68k */ |
1235 |
|
|
# endif /* mips */ |
1236 |
|
|
# endif /* s390 */ |
1237 |
|
|
# endif /* alpha */ |
1238 |
|
|
# endif /* ia64 */ |
1239 |
|
|
# endif /* sparc */ |
1240 |
|
|
# endif /* i386 */ |
1241 |
|
|
# endif /* ppc */ |
1242 |
|
|
|
1243 |
|
|
/***********************************************************************/ |
1244 |
|
|
|
1245 |
|
|
#else /* !linux */ |
1246 |
|
|
|
1247 |
|
|
typedef unsigned long atomic_t; |
1248 |
|
|
|
1249 |
|
|
#if defined(__sgi) |
1250 |
|
|
#undef atomic_set |
1251 |
|
|
#endif |
1252 |
|
|
|
1253 |
|
|
inline |
1254 |
|
|
void |
1255 |
|
|
atomic_set (atomic_t * a, int v) { |
1256 |
|
|
#if defined(__sgi) && !defined(__GNUC__) |
1257 |
|
|
__lock_test_and_set(a, v); |
1258 |
|
|
#else |
1259 |
|
|
*a=v; |
1260 |
|
|
#endif |
1261 |
|
|
} |
1262 |
|
|
|
1263 |
|
|
inline |
1264 |
|
|
int |
1265 |
|
|
atomic_read (const atomic_t * a) { |
1266 |
|
|
return *a; |
1267 |
|
|
} |
1268 |
|
|
|
1269 |
|
|
inline |
1270 |
|
|
void |
1271 |
|
|
atomic_inc (atomic_t * a) { |
1272 |
|
|
#if defined(__sgi) && !defined(__GNUC__) |
1273 |
|
|
__add_and_fetch(a, 1); |
1274 |
|
|
#else |
1275 |
|
|
++(*a); |
1276 |
|
|
#endif |
1277 |
|
|
} |
1278 |
|
|
|
1279 |
|
|
inline |
1280 |
|
|
void |
1281 |
|
|
atomic_dec (atomic_t * a) { |
1282 |
|
|
#if defined(__sgi) && !defined(__GNUC__) |
1283 |
|
|
__sub_and_fetch(a, 1); |
1284 |
|
|
#else |
1285 |
|
|
--(*a); |
1286 |
|
|
#endif |
1287 |
|
|
} |
1288 |
|
|
|
1289 |
|
|
#endif /* linux */ |
1290 |
|
|
#endif /* __linuxsampler_atomic_h__ */ |