1 |
/* |
2 |
Copyright (C) 2001 Paul Davis and others (see below) |
3 |
Code derived from various headers from the Linux kernel. |
4 |
Copyright attributions maintained where present. |
5 |
|
6 |
This program is free software; you can redistribute it and/or modify |
7 |
it under the terms of the GNU General Public License as published by |
8 |
the Free Software Foundation; either version 2 of the License, or |
9 |
(at your option) any later version. |
10 |
|
11 |
This program is distributed in the hope that it will be useful, |
12 |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 |
GNU General Public License for more details. |
15 |
|
16 |
You should have received a copy of the GNU General Public License |
17 |
along with this program; if not, write to the Free Software |
18 |
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 |
|
20 |
$Id: atomic.h,v 1.6 2008-09-06 16:44:42 persson Exp $ |
21 |
*/ |
22 |
|
23 |
//TODO: should we put this into namespace? it might clash with system installed atomic.h, because we need to install atomic.h for the LS API |
24 |
|
25 |
#ifndef __linuxsampler_atomic_h__ |
26 |
#define __linuxsampler_atomic_h__ |
27 |
|
28 |
// needed to automatically include config.h |
29 |
#include "global.h" |
30 |
|
31 |
#ifdef HAVE_SMP /* a macro we control, to manage ... */ |
32 |
#define CONFIG_SMP /* ... the macro the kernel headers use */ |
33 |
#endif |
34 |
|
35 |
#if defined(linux) || defined(WIN32) |
36 |
#ifdef __powerpc__ |
37 |
|
38 |
/* |
39 |
* BK Id: SCCS/s.atomic.h 1.15 10/28/01 10:37:22 trini |
40 |
*/ |
41 |
/* |
42 |
* PowerPC atomic operations |
43 |
*/ |
44 |
|
45 |
#ifndef _ASM_PPC_ATOMIC_H_ |
46 |
#define _ASM_PPC_ATOMIC_H_ |
47 |
|
48 |
typedef struct { volatile int counter; } atomic_t; |
49 |
|
50 |
|
51 |
#define ATOMIC_INIT(i) { (i) } |
52 |
|
53 |
#define atomic_read(v) ((v)->counter) |
54 |
#define atomic_set(v,i) (((v)->counter) = (i)) |
55 |
|
56 |
extern void atomic_clear_mask(unsigned long mask, unsigned long *addr); |
57 |
extern void atomic_set_mask(unsigned long mask, unsigned long *addr); |
58 |
|
59 |
#ifdef CONFIG_SMP |
60 |
#define SMP_ISYNC "\n\tisync" |
61 |
#else |
62 |
#define SMP_ISYNC |
63 |
#endif |
64 |
|
65 |
static __inline__ void atomic_add(int a, atomic_t *v) |
66 |
{ |
67 |
int t; |
68 |
|
69 |
__asm__ __volatile__( |
70 |
"1: lwarx %0,0,%3 # atomic_add\n\ |
71 |
add %0,%2,%0\n\ |
72 |
stwcx. %0,0,%3\n\ |
73 |
bne- 1b" |
74 |
: "=&r" (t), "=m" (v->counter) |
75 |
: "r" (a), "r" (&v->counter), "m" (v->counter) |
76 |
: "cc"); |
77 |
} |
78 |
|
79 |
static __inline__ int atomic_add_return(int a, atomic_t *v) |
80 |
{ |
81 |
int t; |
82 |
|
83 |
__asm__ __volatile__( |
84 |
"1: lwarx %0,0,%2 # atomic_add_return\n\ |
85 |
add %0,%1,%0\n\ |
86 |
stwcx. %0,0,%2\n\ |
87 |
bne- 1b" |
88 |
SMP_ISYNC |
89 |
: "=&r" (t) |
90 |
: "r" (a), "r" (&v->counter) |
91 |
: "cc", "memory"); |
92 |
|
93 |
return t; |
94 |
} |
95 |
|
96 |
static __inline__ void atomic_sub(int a, atomic_t *v) |
97 |
{ |
98 |
int t; |
99 |
|
100 |
__asm__ __volatile__( |
101 |
"1: lwarx %0,0,%3 # atomic_sub\n\ |
102 |
subf %0,%2,%0\n\ |
103 |
stwcx. %0,0,%3\n\ |
104 |
bne- 1b" |
105 |
: "=&r" (t), "=m" (v->counter) |
106 |
: "r" (a), "r" (&v->counter), "m" (v->counter) |
107 |
: "cc"); |
108 |
} |
109 |
|
110 |
static __inline__ int atomic_sub_return(int a, atomic_t *v) |
111 |
{ |
112 |
int t; |
113 |
|
114 |
__asm__ __volatile__( |
115 |
"1: lwarx %0,0,%2 # atomic_sub_return\n\ |
116 |
subf %0,%1,%0\n\ |
117 |
stwcx. %0,0,%2\n\ |
118 |
bne- 1b" |
119 |
SMP_ISYNC |
120 |
: "=&r" (t) |
121 |
: "r" (a), "r" (&v->counter) |
122 |
: "cc", "memory"); |
123 |
|
124 |
return t; |
125 |
} |
126 |
|
127 |
static __inline__ void atomic_inc(atomic_t *v) |
128 |
{ |
129 |
int t; |
130 |
|
131 |
__asm__ __volatile__( |
132 |
"1: lwarx %0,0,%2 # atomic_inc\n\ |
133 |
addic %0,%0,1\n\ |
134 |
stwcx. %0,0,%2\n\ |
135 |
bne- 1b" |
136 |
: "=&r" (t), "=m" (v->counter) |
137 |
: "r" (&v->counter), "m" (v->counter) |
138 |
: "cc"); |
139 |
} |
140 |
|
141 |
static __inline__ int atomic_inc_return(atomic_t *v) |
142 |
{ |
143 |
int t; |
144 |
|
145 |
__asm__ __volatile__( |
146 |
"1: lwarx %0,0,%1 # atomic_inc_return\n\ |
147 |
addic %0,%0,1\n\ |
148 |
stwcx. %0,0,%1\n\ |
149 |
bne- 1b" |
150 |
SMP_ISYNC |
151 |
: "=&r" (t) |
152 |
: "r" (&v->counter) |
153 |
: "cc", "memory"); |
154 |
|
155 |
return t; |
156 |
} |
157 |
|
158 |
static __inline__ void atomic_dec(atomic_t *v) |
159 |
{ |
160 |
int t; |
161 |
|
162 |
__asm__ __volatile__( |
163 |
"1: lwarx %0,0,%2 # atomic_dec\n\ |
164 |
addic %0,%0,-1\n\ |
165 |
stwcx. %0,0,%2\n\ |
166 |
bne- 1b" |
167 |
: "=&r" (t), "=m" (v->counter) |
168 |
: "r" (&v->counter), "m" (v->counter) |
169 |
: "cc"); |
170 |
} |
171 |
|
172 |
static __inline__ int atomic_dec_return(atomic_t *v) |
173 |
{ |
174 |
int t; |
175 |
|
176 |
__asm__ __volatile__( |
177 |
"1: lwarx %0,0,%1 # atomic_dec_return\n\ |
178 |
addic %0,%0,-1\n\ |
179 |
stwcx. %0,0,%1\n\ |
180 |
bne- 1b" |
181 |
SMP_ISYNC |
182 |
: "=&r" (t) |
183 |
: "r" (&v->counter) |
184 |
: "cc", "memory"); |
185 |
|
186 |
return t; |
187 |
} |
188 |
|
189 |
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) |
190 |
#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) |
191 |
|
192 |
/* |
193 |
* Atomically test *v and decrement if it is greater than 0. |
194 |
* The function returns the old value of *v minus 1. |
195 |
*/ |
196 |
static __inline__ int atomic_dec_if_positive(atomic_t *v) |
197 |
{ |
198 |
int t; |
199 |
|
200 |
__asm__ __volatile__( |
201 |
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ |
202 |
addic. %0,%0,-1\n\ |
203 |
blt- 2f\n\ |
204 |
stwcx. %0,0,%1\n\ |
205 |
bne- 1b" |
206 |
SMP_ISYNC |
207 |
"\n\ |
208 |
2:" : "=&r" (t) |
209 |
: "r" (&v->counter) |
210 |
: "cc", "memory"); |
211 |
|
212 |
return t; |
213 |
} |
214 |
|
215 |
#define smp_mb__before_atomic_dec() smp_mb() |
216 |
#define smp_mb__after_atomic_dec() smp_mb() |
217 |
#define smp_mb__before_atomic_inc() smp_mb() |
218 |
#define smp_mb__after_atomic_inc() smp_mb() |
219 |
|
220 |
#endif /* _ASM_PPC_ATOMIC_H_ */ |
221 |
|
222 |
/***********************************************************************/ |
223 |
|
224 |
# else /* !PPC */ |
225 |
|
226 |
#if defined(__i386__) || defined(__x86_64__) |
227 |
|
228 |
#ifndef __ARCH_I386_ATOMIC__ |
229 |
#define __ARCH_I386_ATOMIC__ |
230 |
|
231 |
/* |
232 |
* Atomic operations that C can't guarantee us. Useful for |
233 |
* resource counting etc.. |
234 |
*/ |
235 |
|
236 |
#ifdef CONFIG_SMP |
237 |
#define SMP_LOCK "lock ; " |
238 |
#else |
239 |
#define SMP_LOCK "" |
240 |
#endif |
241 |
|
242 |
/* |
243 |
* Make sure gcc doesn't try to be clever and move things around |
244 |
* on us. We need to use _exactly_ the address the user gave us, |
245 |
* not some alias that contains the same information. |
246 |
*/ |
247 |
typedef struct { volatile int counter; } atomic_t; |
248 |
|
249 |
#define ATOMIC_INIT(i) { (i) } |
250 |
|
251 |
/** |
252 |
* atomic_read - read atomic variable |
253 |
* @v: pointer of type atomic_t |
254 |
* |
255 |
* Atomically reads the value of @v. Note that the guaranteed |
256 |
* useful range of an atomic_t is only 24 bits. |
257 |
*/ |
258 |
#define atomic_read(v) ((v)->counter) |
259 |
|
260 |
/** |
261 |
* atomic_set - set atomic variable |
262 |
* @v: pointer of type atomic_t |
263 |
* @i: required value |
264 |
* |
265 |
* Atomically sets the value of @v to @i. Note that the guaranteed |
266 |
* useful range of an atomic_t is only 24 bits. |
267 |
*/ |
268 |
#define atomic_set(v,i) (((v)->counter) = (i)) |
269 |
|
270 |
/** |
271 |
* atomic_add - add integer to atomic variable |
272 |
* @i: integer value to add |
273 |
* @v: pointer of type atomic_t |
274 |
* |
275 |
* Atomically adds @i to @v. Note that the guaranteed useful range |
276 |
* of an atomic_t is only 24 bits. |
277 |
*/ |
278 |
static __inline__ void atomic_add(int i, atomic_t *v) |
279 |
{ |
280 |
__asm__ __volatile__( |
281 |
SMP_LOCK "addl %1,%0" |
282 |
:"=m" (v->counter) |
283 |
:"ir" (i), "m" (v->counter)); |
284 |
} |
285 |
|
286 |
/** |
287 |
* atomic_sub - subtract the atomic variable |
288 |
* @i: integer value to subtract |
289 |
* @v: pointer of type atomic_t |
290 |
* |
291 |
* Atomically subtracts @i from @v. Note that the guaranteed |
292 |
* useful range of an atomic_t is only 24 bits. |
293 |
*/ |
294 |
static __inline__ void atomic_sub(int i, atomic_t *v) |
295 |
{ |
296 |
__asm__ __volatile__( |
297 |
SMP_LOCK "subl %1,%0" |
298 |
:"=m" (v->counter) |
299 |
:"ir" (i), "m" (v->counter)); |
300 |
} |
301 |
|
302 |
/** |
303 |
* atomic_sub_and_test - subtract value from variable and test result |
304 |
* @i: integer value to subtract |
305 |
* @v: pointer of type atomic_t |
306 |
* |
307 |
* Atomically subtracts @i from @v and returns |
308 |
* true if the result is zero, or false for all |
309 |
* other cases. Note that the guaranteed |
310 |
* useful range of an atomic_t is only 24 bits. |
311 |
*/ |
312 |
static __inline__ int atomic_sub_and_test(int i, atomic_t *v) |
313 |
{ |
314 |
unsigned char c; |
315 |
|
316 |
__asm__ __volatile__( |
317 |
SMP_LOCK "subl %2,%0; sete %1" |
318 |
:"=m" (v->counter), "=qm" (c) |
319 |
:"ir" (i), "m" (v->counter) : "memory"); |
320 |
return c; |
321 |
} |
322 |
|
323 |
/** |
324 |
* atomic_inc - increment atomic variable |
325 |
* @v: pointer of type atomic_t |
326 |
* |
327 |
* Atomically increments @v by 1. Note that the guaranteed |
328 |
* useful range of an atomic_t is only 24 bits. |
329 |
*/ |
330 |
static __inline__ void atomic_inc(atomic_t *v) |
331 |
{ |
332 |
__asm__ __volatile__( |
333 |
SMP_LOCK "incl %0" |
334 |
:"=m" (v->counter) |
335 |
:"m" (v->counter)); |
336 |
} |
337 |
|
338 |
/** |
339 |
* atomic_dec - decrement atomic variable |
340 |
* @v: pointer of type atomic_t |
341 |
* |
342 |
* Atomically decrements @v by 1. Note that the guaranteed |
343 |
* useful range of an atomic_t is only 24 bits. |
344 |
*/ |
345 |
static __inline__ void atomic_dec(atomic_t *v) |
346 |
{ |
347 |
__asm__ __volatile__( |
348 |
SMP_LOCK "decl %0" |
349 |
:"=m" (v->counter) |
350 |
:"m" (v->counter)); |
351 |
} |
352 |
|
353 |
/** |
354 |
* atomic_dec_and_test - decrement and test |
355 |
* @v: pointer of type atomic_t |
356 |
* |
357 |
* Atomically decrements @v by 1 and |
358 |
* returns true if the result is 0, or false for all other |
359 |
* cases. Note that the guaranteed |
360 |
* useful range of an atomic_t is only 24 bits. |
361 |
*/ |
362 |
static __inline__ int atomic_dec_and_test(atomic_t *v) |
363 |
{ |
364 |
unsigned char c; |
365 |
|
366 |
__asm__ __volatile__( |
367 |
SMP_LOCK "decl %0; sete %1" |
368 |
:"=m" (v->counter), "=qm" (c) |
369 |
:"m" (v->counter) : "memory"); |
370 |
return c != 0; |
371 |
} |
372 |
|
373 |
/** |
374 |
* atomic_inc_and_test - increment and test |
375 |
* @v: pointer of type atomic_t |
376 |
* |
377 |
* Atomically increments @v by 1 |
378 |
* and returns true if the result is zero, or false for all |
379 |
* other cases. Note that the guaranteed |
380 |
* useful range of an atomic_t is only 24 bits. |
381 |
*/ |
382 |
static __inline__ int atomic_inc_and_test(atomic_t *v) |
383 |
{ |
384 |
unsigned char c; |
385 |
|
386 |
__asm__ __volatile__( |
387 |
SMP_LOCK "incl %0; sete %1" |
388 |
:"=m" (v->counter), "=qm" (c) |
389 |
:"m" (v->counter) : "memory"); |
390 |
return c != 0; |
391 |
} |
392 |
|
393 |
/** |
394 |
* atomic_add_negative - add and test if negative |
395 |
* @v: pointer of type atomic_t |
396 |
* @i: integer value to add |
397 |
* |
398 |
* Atomically adds @i to @v and returns true |
399 |
* if the result is negative, or false when |
400 |
* result is greater than or equal to zero. Note that the guaranteed |
401 |
* useful range of an atomic_t is only 24 bits. |
402 |
*/ |
403 |
static __inline__ int atomic_add_negative(int i, atomic_t *v) |
404 |
{ |
405 |
unsigned char c; |
406 |
|
407 |
__asm__ __volatile__( |
408 |
SMP_LOCK "addl %2,%0; sets %1" |
409 |
:"=m" (v->counter), "=qm" (c) |
410 |
:"ir" (i), "m" (v->counter) : "memory"); |
411 |
return c; |
412 |
} |
413 |
|
414 |
/* These are x86-specific, used by some header files */ |
415 |
#define atomic_clear_mask(mask, addr) \ |
416 |
__asm__ __volatile__(SMP_LOCK "andl %0,%1" \ |
417 |
: : "r" (~(mask)),"m" (*addr) : "memory") |
418 |
|
419 |
#define atomic_set_mask(mask, addr) \ |
420 |
__asm__ __volatile__(SMP_LOCK "orl %0,%1" \ |
421 |
: : "r" (mask),"m" (*addr) : "memory") |
422 |
|
423 |
/* Atomic operations are already serializing on x86 */ |
424 |
#define smp_mb__before_atomic_dec() barrier() |
425 |
#define smp_mb__after_atomic_dec() barrier() |
426 |
#define smp_mb__before_atomic_inc() barrier() |
427 |
#define smp_mb__after_atomic_inc() barrier() |
428 |
|
429 |
#endif /* __ARCH_I386_ATOMIC__ */ |
430 |
|
431 |
/***********************************************************************/ |
432 |
|
433 |
#else /* !PPC && !i386 */ |
434 |
|
435 |
#ifdef __sparc__ |
436 |
|
437 |
/* atomic.h: These still suck, but the I-cache hit rate is higher. |
438 |
* |
439 |
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) |
440 |
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) |
441 |
*/ |
442 |
|
443 |
#ifndef __ARCH_SPARC_ATOMIC__ |
444 |
#define __ARCH_SPARC_ATOMIC__ |
445 |
|
446 |
typedef struct { volatile int counter; } atomic_t; |
447 |
|
448 |
#ifndef CONFIG_SMP |
449 |
|
450 |
#define ATOMIC_INIT(i) { (i) } |
451 |
#define atomic_read(v) ((v)->counter) |
452 |
#define atomic_set(v, i) (((v)->counter) = i) |
453 |
|
454 |
#else |
455 |
/* We do the bulk of the actual work out of line in two common |
456 |
* routines in assembler, see arch/sparc/lib/atomic.S for the |
457 |
* "fun" details. |
458 |
* |
459 |
* For SMP the trick is you embed the spin lock byte within |
460 |
* the word, use the low byte so signedness is easily retained |
461 |
* via a quick arithmetic shift. It looks like this: |
462 |
* |
463 |
* ---------------------------------------- |
464 |
* | signed 24-bit counter value | lock | atomic_t |
465 |
* ---------------------------------------- |
466 |
* 31 8 7 0 |
467 |
*/ |
468 |
|
469 |
#define ATOMIC_INIT(i) { (i << 8) } |
470 |
|
471 |
static __inline__ int atomic_read(atomic_t *v) |
472 |
{ |
473 |
int ret = v->counter; |
474 |
|
475 |
while(ret & 0xff) |
476 |
ret = v->counter; |
477 |
|
478 |
return ret >> 8; |
479 |
} |
480 |
|
481 |
#define atomic_set(v, i) (((v)->counter) = ((i) << 8)) |
482 |
#endif |
483 |
|
484 |
static __inline__ int __atomic_add(int i, atomic_t *v) |
485 |
{ |
486 |
register volatile int *ptr asm("g1"); |
487 |
register int increment asm("g2"); |
488 |
|
489 |
ptr = &v->counter; |
490 |
increment = i; |
491 |
|
492 |
__asm__ __volatile__( |
493 |
"mov %%o7, %%g4\n\t" |
494 |
"call ___atomic_add\n\t" |
495 |
" add %%o7, 8, %%o7\n" |
496 |
: "=&r" (increment) |
497 |
: "0" (increment), "r" (ptr) |
498 |
: "g3", "g4", "g7", "memory", "cc"); |
499 |
|
500 |
return increment; |
501 |
} |
502 |
|
503 |
static __inline__ int __atomic_sub(int i, atomic_t *v) |
504 |
{ |
505 |
register volatile int *ptr asm("g1"); |
506 |
register int increment asm("g2"); |
507 |
|
508 |
ptr = &v->counter; |
509 |
increment = i; |
510 |
|
511 |
__asm__ __volatile__( |
512 |
"mov %%o7, %%g4\n\t" |
513 |
"call ___atomic_sub\n\t" |
514 |
" add %%o7, 8, %%o7\n" |
515 |
: "=&r" (increment) |
516 |
: "0" (increment), "r" (ptr) |
517 |
: "g3", "g4", "g7", "memory", "cc"); |
518 |
|
519 |
return increment; |
520 |
} |
521 |
|
522 |
#define atomic_add(i, v) ((void)__atomic_add((i), (v))) |
523 |
#define atomic_sub(i, v) ((void)__atomic_sub((i), (v))) |
524 |
|
525 |
#define atomic_dec_return(v) __atomic_sub(1, (v)) |
526 |
#define atomic_inc_return(v) __atomic_add(1, (v)) |
527 |
|
528 |
#define atomic_sub_and_test(i, v) (__atomic_sub((i), (v)) == 0) |
529 |
#define atomic_dec_and_test(v) (__atomic_sub(1, (v)) == 0) |
530 |
|
531 |
#define atomic_inc(v) ((void)__atomic_add(1, (v))) |
532 |
#define atomic_dec(v) ((void)__atomic_sub(1, (v))) |
533 |
|
534 |
#define atomic_add_negative(i, v) (__atomic_add((i), (v)) < 0) |
535 |
|
536 |
/* Atomic operations are already serializing */ |
537 |
#define smp_mb__before_atomic_dec() barrier() |
538 |
#define smp_mb__after_atomic_dec() barrier() |
539 |
#define smp_mb__before_atomic_inc() barrier() |
540 |
#define smp_mb__after_atomic_inc() barrier() |
541 |
|
542 |
|
543 |
#endif /* !(__ARCH_SPARC_ATOMIC__) */ |
544 |
|
545 |
/***********************************************************************/ |
546 |
|
547 |
#else |
548 |
|
549 |
#ifdef __ia64__ |
550 |
|
551 |
#ifndef __ARCH_IA64_ATOMIC__ |
552 |
#define __ARCH_IA64_ATOMIC__ |
553 |
|
554 |
typedef volatile int atomic_t; |
555 |
|
556 |
inline |
557 |
int |
558 |
atomic_read (const atomic_t * a) |
559 |
{ |
560 |
return *a; |
561 |
} |
562 |
|
563 |
inline |
564 |
void |
565 |
atomic_set(atomic_t *a, int v) |
566 |
{ |
567 |
*a = v; |
568 |
} |
569 |
|
570 |
inline |
571 |
void |
572 |
atomic_inc (atomic_t *v) |
573 |
{ |
574 |
int old, r; |
575 |
|
576 |
do { |
577 |
old = atomic_read(v); |
578 |
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old)); |
579 |
__asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" |
580 |
: "=r"(r) : "r"(v), "r"(old + 1) |
581 |
: "memory"); |
582 |
} while (r != old); |
583 |
} |
584 |
|
585 |
inline |
586 |
void |
587 |
atomic_dec (atomic_t *v) |
588 |
{ |
589 |
int old, r; |
590 |
|
591 |
do { |
592 |
old = atomic_read(v); |
593 |
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old)); |
594 |
__asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" |
595 |
: "=r"(r) : "r"(v), "r"(old - 1) |
596 |
: "memory"); |
597 |
} while (r != old); |
598 |
} |
599 |
|
600 |
inline |
601 |
int |
602 |
atomic_dec_and_test (atomic_t *v) |
603 |
{ |
604 |
int old, r; |
605 |
|
606 |
do { |
607 |
old = atomic_read(v); |
608 |
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old)); |
609 |
__asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" |
610 |
: "=r"(r) : "r"(v), "r"(old - 1) |
611 |
: "memory"); |
612 |
} while (r != old); |
613 |
return old != 1; |
614 |
} |
615 |
|
616 |
#endif /* !(__ARCH_IA64_ATOMIC__) */ |
617 |
|
618 |
#else |
619 |
|
620 |
#ifdef __alpha__ |
621 |
|
622 |
#ifndef _ALPHA_ATOMIC_H |
623 |
#define _ALPHA_ATOMIC_H |
624 |
|
625 |
/* |
626 |
* Atomic operations that C can't guarantee us. Useful for |
627 |
* resource counting etc... |
628 |
* |
629 |
* But use these as seldom as possible since they are much slower |
630 |
* than regular operations. |
631 |
*/ |
632 |
|
633 |
|
634 |
/* |
635 |
* Counter is volatile to make sure gcc doesn't try to be clever |
636 |
* and move things around on us. We need to use _exactly_ the address |
637 |
* the user gave us, not some alias that contains the same information. |
638 |
*/ |
639 |
typedef struct { volatile int counter; } atomic_t; |
640 |
|
641 |
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
642 |
|
643 |
#define atomic_read(v) ((v)->counter) |
644 |
#define atomic_set(v,i) ((v)->counter = (i)) |
645 |
|
646 |
/* |
647 |
* To get proper branch prediction for the main line, we must branch |
648 |
* forward to code at the end of this object's .text section, then |
649 |
* branch back to restart the operation. |
650 |
*/ |
651 |
|
652 |
static __inline__ void atomic_add(int i, atomic_t * v) |
653 |
{ |
654 |
unsigned long temp; |
655 |
__asm__ __volatile__( |
656 |
"1: ldl_l %0,%1\n" |
657 |
" addl %0,%2,%0\n" |
658 |
" stl_c %0,%1\n" |
659 |
" beq %0,2f\n" |
660 |
".subsection 2\n" |
661 |
"2: br 1b\n" |
662 |
".previous" |
663 |
:"=&r" (temp), "=m" (v->counter) |
664 |
:"Ir" (i), "m" (v->counter)); |
665 |
} |
666 |
|
667 |
static __inline__ void atomic_sub(int i, atomic_t * v) |
668 |
{ |
669 |
unsigned long temp; |
670 |
__asm__ __volatile__( |
671 |
"1: ldl_l %0,%1\n" |
672 |
" subl %0,%2,%0\n" |
673 |
" stl_c %0,%1\n" |
674 |
" beq %0,2f\n" |
675 |
".subsection 2\n" |
676 |
"2: br 1b\n" |
677 |
".previous" |
678 |
:"=&r" (temp), "=m" (v->counter) |
679 |
:"Ir" (i), "m" (v->counter)); |
680 |
} |
681 |
|
682 |
/* |
683 |
* Same as above, but return the result value |
684 |
*/ |
685 |
static __inline__ long atomic_add_return(int i, atomic_t * v) |
686 |
{ |
687 |
long temp, result; |
688 |
__asm__ __volatile__( |
689 |
"1: ldl_l %0,%1\n" |
690 |
" addl %0,%3,%2\n" |
691 |
" addl %0,%3,%0\n" |
692 |
" stl_c %0,%1\n" |
693 |
" beq %0,2f\n" |
694 |
" mb\n" |
695 |
".subsection 2\n" |
696 |
"2: br 1b\n" |
697 |
".previous" |
698 |
:"=&r" (temp), "=m" (v->counter), "=&r" (result) |
699 |
:"Ir" (i), "m" (v->counter) : "memory"); |
700 |
return result; |
701 |
} |
702 |
|
703 |
static __inline__ long atomic_sub_return(int i, atomic_t * v) |
704 |
{ |
705 |
long temp, result; |
706 |
__asm__ __volatile__( |
707 |
"1: ldl_l %0,%1\n" |
708 |
" subl %0,%3,%2\n" |
709 |
" subl %0,%3,%0\n" |
710 |
" stl_c %0,%1\n" |
711 |
" beq %0,2f\n" |
712 |
" mb\n" |
713 |
".subsection 2\n" |
714 |
"2: br 1b\n" |
715 |
".previous" |
716 |
:"=&r" (temp), "=m" (v->counter), "=&r" (result) |
717 |
:"Ir" (i), "m" (v->counter) : "memory"); |
718 |
return result; |
719 |
} |
720 |
|
721 |
#define atomic_dec_return(v) atomic_sub_return(1,(v)) |
722 |
#define atomic_inc_return(v) atomic_add_return(1,(v)) |
723 |
|
724 |
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) |
725 |
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
726 |
|
727 |
#define atomic_inc(v) atomic_add(1,(v)) |
728 |
#define atomic_dec(v) atomic_sub(1,(v)) |
729 |
|
730 |
#define smp_mb__before_atomic_dec() smp_mb() |
731 |
#define smp_mb__after_atomic_dec() smp_mb() |
732 |
#define smp_mb__before_atomic_inc() smp_mb() |
733 |
#define smp_mb__after_atomic_inc() smp_mb() |
734 |
|
735 |
#endif /* _ALPHA_ATOMIC_H */ |
736 |
|
737 |
#else |
738 |
|
739 |
#ifdef __s390__ |
740 |
|
741 |
#ifndef __ARCH_S390_ATOMIC__ |
742 |
#define __ARCH_S390_ATOMIC__ |
743 |
|
744 |
/* |
745 |
* include/asm-s390/atomic.h |
746 |
* |
747 |
* S390 version |
748 |
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation |
749 |
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
750 |
* Denis Joseph Barrow |
751 |
* |
752 |
* Derived from "include/asm-i386/bitops.h" |
753 |
* Copyright (C) 1992, Linus Torvalds |
754 |
* |
755 |
*/ |
756 |
|
757 |
/* |
758 |
* Atomic operations that C can't guarantee us. Useful for |
759 |
* resource counting etc.. |
760 |
* S390 uses 'Compare And Swap' for atomicity in SMP enviroment |
761 |
*/ |
762 |
|
763 |
typedef struct { volatile int counter; } __attribute__ ((aligned (4))) atomic_t; |
764 |
#define ATOMIC_INIT(i) { (i) } |
765 |
|
766 |
#define atomic_eieio() __asm__ __volatile__ ("BCR 15,0") |
767 |
|
768 |
#define __CS_LOOP(old_val, new_val, ptr, op_val, op_string) \ |
769 |
__asm__ __volatile__(" l %0,0(%2)\n" \ |
770 |
"0: lr %1,%0\n" \ |
771 |
op_string " %1,%3\n" \ |
772 |
" cs %0,%1,0(%2)\n" \ |
773 |
" jl 0b" \ |
774 |
: "=&d" (old_val), "=&d" (new_val) \ |
775 |
: "a" (ptr), "d" (op_val) : "cc" ); |
776 |
|
777 |
#define atomic_read(v) ((v)->counter) |
778 |
#define atomic_set(v,i) (((v)->counter) = (i)) |
779 |
|
780 |
static __inline__ void atomic_add(int i, atomic_t *v) |
781 |
{ |
782 |
int old_val, new_val; |
783 |
__CS_LOOP(old_val, new_val, v, i, "ar"); |
784 |
} |
785 |
|
786 |
static __inline__ int atomic_add_return (int i, atomic_t *v) |
787 |
{ |
788 |
int old_val, new_val; |
789 |
__CS_LOOP(old_val, new_val, v, i, "ar"); |
790 |
return new_val; |
791 |
} |
792 |
|
793 |
static __inline__ int atomic_add_negative(int i, atomic_t *v) |
794 |
{ |
795 |
int old_val, new_val; |
796 |
__CS_LOOP(old_val, new_val, v, i, "ar"); |
797 |
return new_val < 0; |
798 |
} |
799 |
|
800 |
static __inline__ void atomic_sub(int i, atomic_t *v) |
801 |
{ |
802 |
int old_val, new_val; |
803 |
__CS_LOOP(old_val, new_val, v, i, "sr"); |
804 |
} |
805 |
|
806 |
static __inline__ void atomic_inc(volatile atomic_t *v) |
807 |
{ |
808 |
int old_val, new_val; |
809 |
__CS_LOOP(old_val, new_val, v, 1, "ar"); |
810 |
} |
811 |
|
812 |
static __inline__ int atomic_inc_return(volatile atomic_t *v) |
813 |
{ |
814 |
int old_val, new_val; |
815 |
__CS_LOOP(old_val, new_val, v, 1, "ar"); |
816 |
return new_val; |
817 |
} |
818 |
|
819 |
static __inline__ int atomic_inc_and_test(volatile atomic_t *v) |
820 |
{ |
821 |
int old_val, new_val; |
822 |
__CS_LOOP(old_val, new_val, v, 1, "ar"); |
823 |
return new_val != 0; |
824 |
} |
825 |
|
826 |
static __inline__ void atomic_dec(volatile atomic_t *v) |
827 |
{ |
828 |
int old_val, new_val; |
829 |
__CS_LOOP(old_val, new_val, v, 1, "sr"); |
830 |
} |
831 |
|
832 |
static __inline__ int atomic_dec_return(volatile atomic_t *v) |
833 |
{ |
834 |
int old_val, new_val; |
835 |
__CS_LOOP(old_val, new_val, v, 1, "sr"); |
836 |
return new_val; |
837 |
} |
838 |
|
839 |
static __inline__ int atomic_dec_and_test(volatile atomic_t *v) |
840 |
{ |
841 |
int old_val, new_val; |
842 |
__CS_LOOP(old_val, new_val, v, 1, "sr"); |
843 |
return new_val == 0; |
844 |
} |
845 |
|
846 |
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *v) |
847 |
{ |
848 |
int old_val, new_val; |
849 |
__CS_LOOP(old_val, new_val, v, ~mask, "nr"); |
850 |
} |
851 |
|
852 |
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *v) |
853 |
{ |
854 |
int old_val, new_val; |
855 |
__CS_LOOP(old_val, new_val, v, mask, "or"); |
856 |
} |
857 |
|
858 |
/* |
859 |
returns 0 if expected_oldval==value in *v ( swap was successful ) |
860 |
returns 1 if unsuccessful. |
861 |
*/ |
862 |
static __inline__ int |
863 |
atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) |
864 |
{ |
865 |
int retval; |
866 |
|
867 |
__asm__ __volatile__( |
868 |
" lr 0,%2\n" |
869 |
" cs 0,%3,0(%1)\n" |
870 |
" ipm %0\n" |
871 |
" srl %0,28\n" |
872 |
"0:" |
873 |
: "=&d" (retval) |
874 |
: "a" (v), "d" (expected_oldval) , "d" (new_val) |
875 |
: "0", "cc"); |
876 |
return retval; |
877 |
} |
878 |
|
879 |
/* |
880 |
Spin till *v = expected_oldval then swap with newval. |
881 |
*/ |
882 |
static __inline__ void |
883 |
atomic_compare_and_swap_spin(int expected_oldval,int new_val,atomic_t *v) |
884 |
{ |
885 |
__asm__ __volatile__( |
886 |
"0: lr 0,%1\n" |
887 |
" cs 0,%2,0(%0)\n" |
888 |
" jl 0b\n" |
889 |
: : "a" (v), "d" (expected_oldval) , "d" (new_val) |
890 |
: "cc", "0" ); |
891 |
} |
892 |
|
893 |
#define smp_mb__before_atomic_dec() smp_mb() |
894 |
#define smp_mb__after_atomic_dec() smp_mb() |
895 |
#define smp_mb__before_atomic_inc() smp_mb() |
896 |
#define smp_mb__after_atomic_inc() smp_mb() |
897 |
|
898 |
#endif /* __ARCH_S390_ATOMIC __ */ |
899 |
|
900 |
#else |
901 |
|
902 |
#ifdef __mips__ |
903 |
|
904 |
/* |
905 |
* Atomic operations that C can't guarantee us. Useful for |
906 |
* resource counting etc.. |
907 |
* |
908 |
* But use these as seldom as possible since they are much more slower |
909 |
* than regular operations. |
910 |
* |
911 |
* This file is subject to the terms and conditions of the GNU General Public |
912 |
* License. See the file "COPYING" in the main directory of this archive |
913 |
* for more details. |
914 |
* |
915 |
* Copyright (C) 1996, 1997, 2000 by Ralf Baechle |
916 |
*/ |
917 |
#ifndef __ASM_ATOMIC_H |
918 |
#define __ASM_ATOMIC_H |
919 |
|
920 |
typedef struct { volatile int counter; } atomic_t; |
921 |
|
922 |
#define ATOMIC_INIT(i) { (i) } |
923 |
|
924 |
/* |
925 |
* atomic_read - read atomic variable |
926 |
* @v: pointer of type atomic_t |
927 |
* |
928 |
* Atomically reads the value of @v. Note that the guaranteed |
929 |
* useful range of an atomic_t is only 24 bits. |
930 |
*/ |
931 |
#define atomic_read(v) ((v)->counter) |
932 |
|
933 |
/* |
934 |
* atomic_set - set atomic variable |
935 |
* @v: pointer of type atomic_t |
936 |
* @i: required value |
937 |
* |
938 |
* Atomically sets the value of @v to @i. Note that the guaranteed |
939 |
* useful range of an atomic_t is only 24 bits. |
940 |
*/ |
941 |
#define atomic_set(v,i) ((v)->counter = (i)) |
942 |
|
943 |
/* |
944 |
* ... while for MIPS II and better we can use ll/sc instruction. This |
945 |
* implementation is SMP safe ... |
946 |
*/ |
947 |
|
948 |
/* |
949 |
* atomic_add - add integer to atomic variable |
950 |
* @i: integer value to add |
951 |
* @v: pointer of type atomic_t |
952 |
* |
953 |
* Atomically adds @i to @v. Note that the guaranteed useful range |
954 |
* of an atomic_t is only 24 bits. |
955 |
*/ |
956 |
extern __inline__ void atomic_add(int i, atomic_t * v) |
957 |
{ |
958 |
unsigned long temp; |
959 |
|
960 |
__asm__ __volatile__( |
961 |
".set push # atomic_add\n" |
962 |
".set mips2 \n" |
963 |
"1: ll %0, %1 \n" |
964 |
" addu %0, %2 \n" |
965 |
" sc %0, %1 \n" |
966 |
" beqz %0, 1b \n" |
967 |
".set pop \n" |
968 |
: "=&r" (temp), "=m" (v->counter) |
969 |
: "Ir" (i), "m" (v->counter)); |
970 |
} |
971 |
|
972 |
/* |
973 |
* atomic_sub - subtract the atomic variable |
974 |
* @i: integer value to subtract |
975 |
* @v: pointer of type atomic_t |
976 |
* |
977 |
* Atomically subtracts @i from @v. Note that the guaranteed |
978 |
* useful range of an atomic_t is only 24 bits. |
979 |
*/ |
980 |
extern __inline__ void atomic_sub(int i, atomic_t * v) |
981 |
{ |
982 |
unsigned long temp; |
983 |
|
984 |
__asm__ __volatile__( |
985 |
".set push # atomic_sub\n" |
986 |
".set mips2 \n" |
987 |
"1: ll %0, %1 \n" |
988 |
" subu %0, %2 \n" |
989 |
" sc %0, %1 \n" |
990 |
" beqz %0, 1b \n" |
991 |
".set pop \n" |
992 |
: "=&r" (temp), "=m" (v->counter) |
993 |
: "Ir" (i), "m" (v->counter)); |
994 |
} |
995 |
|
996 |
/* |
997 |
* Same as above, but return the result value |
998 |
*/ |
999 |
extern __inline__ int atomic_add_return(int i, atomic_t * v) |
1000 |
{ |
1001 |
unsigned long temp, result; |
1002 |
|
1003 |
__asm__ __volatile__( |
1004 |
".set push # atomic_add_return\n" |
1005 |
".set mips2 \n" |
1006 |
".set noreorder \n" |
1007 |
"1: ll %1, %2 \n" |
1008 |
" addu %0, %1, %3 \n" |
1009 |
" sc %0, %2 \n" |
1010 |
" beqz %0, 1b \n" |
1011 |
" addu %0, %1, %3 \n" |
1012 |
" sync \n" |
1013 |
".set pop \n" |
1014 |
: "=&r" (result), "=&r" (temp), "=m" (v->counter) |
1015 |
: "Ir" (i), "m" (v->counter) |
1016 |
: "memory"); |
1017 |
|
1018 |
return result; |
1019 |
} |
1020 |
|
1021 |
extern __inline__ int atomic_sub_return(int i, atomic_t * v) |
1022 |
{ |
1023 |
unsigned long temp, result; |
1024 |
|
1025 |
__asm__ __volatile__( |
1026 |
".set push # atomic_sub_return\n" |
1027 |
".set mips2 \n" |
1028 |
".set noreorder \n" |
1029 |
"1: ll %1, %2 \n" |
1030 |
" subu %0, %1, %3 \n" |
1031 |
" sc %0, %2 \n" |
1032 |
" beqz %0, 1b \n" |
1033 |
" subu %0, %1, %3 \n" |
1034 |
" sync \n" |
1035 |
".set pop \n" |
1036 |
: "=&r" (result), "=&r" (temp), "=m" (v->counter) |
1037 |
: "Ir" (i), "m" (v->counter) |
1038 |
: "memory"); |
1039 |
|
1040 |
return result; |
1041 |
} |
1042 |
|
1043 |
#define atomic_dec_return(v) atomic_sub_return(1,(v)) |
1044 |
#define atomic_inc_return(v) atomic_add_return(1,(v)) |
1045 |
|
1046 |
/* |
1047 |
* atomic_sub_and_test - subtract value from variable and test result |
1048 |
* @i: integer value to subtract |
1049 |
* @v: pointer of type atomic_t |
1050 |
* |
1051 |
* Atomically subtracts @i from @v and returns |
1052 |
* true if the result is zero, or false for all |
1053 |
* other cases. Note that the guaranteed |
1054 |
* useful range of an atomic_t is only 24 bits. |
1055 |
*/ |
1056 |
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) |
1057 |
|
1058 |
/* |
1059 |
* atomic_inc_and_test - increment and test |
1060 |
* @v: pointer of type atomic_t |
1061 |
* |
1062 |
* Atomically increments @v by 1 |
1063 |
* and returns true if the result is zero, or false for all |
1064 |
* other cases. Note that the guaranteed |
1065 |
* useful range of an atomic_t is only 24 bits. |
1066 |
*/ |
1067 |
#define atomic_inc_and_test(v) (atomic_inc_return(1, (v)) == 0) |
1068 |
|
1069 |
/* |
1070 |
* atomic_dec_and_test - decrement by 1 and test |
1071 |
* @v: pointer of type atomic_t |
1072 |
* |
1073 |
* Atomically decrements @v by 1 and |
1074 |
* returns true if the result is 0, or false for all other |
1075 |
* cases. Note that the guaranteed |
1076 |
* useful range of an atomic_t is only 24 bits. |
1077 |
*/ |
1078 |
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
1079 |
|
1080 |
/* |
1081 |
* atomic_inc - increment atomic variable |
1082 |
* @v: pointer of type atomic_t |
1083 |
* |
1084 |
* Atomically increments @v by 1. Note that the guaranteed |
1085 |
* useful range of an atomic_t is only 24 bits. |
1086 |
*/ |
1087 |
#define atomic_inc(v) atomic_add(1,(v)) |
1088 |
|
1089 |
/* |
1090 |
* atomic_dec - decrement and test |
1091 |
* @v: pointer of type atomic_t |
1092 |
* |
1093 |
* Atomically decrements @v by 1. Note that the guaranteed |
1094 |
* useful range of an atomic_t is only 24 bits. |
1095 |
*/ |
1096 |
#define atomic_dec(v) atomic_sub(1,(v)) |
1097 |
|
1098 |
/* |
1099 |
* atomic_add_negative - add and test if negative |
1100 |
* @v: pointer of type atomic_t |
1101 |
* @i: integer value to add |
1102 |
* |
1103 |
* Atomically adds @i to @v and returns true |
1104 |
* if the result is negative, or false when |
1105 |
* result is greater than or equal to zero. Note that the guaranteed |
1106 |
* useful range of an atomic_t is only 24 bits. |
1107 |
* |
1108 |
* Currently not implemented for MIPS. |
1109 |
*/ |
1110 |
|
1111 |
/* Atomic operations are already serializing */ |
1112 |
#define smp_mb__before_atomic_dec() smp_mb() |
1113 |
#define smp_mb__after_atomic_dec() smp_mb() |
1114 |
#define smp_mb__before_atomic_inc() smp_mb() |
1115 |
#define smp_mb__after_atomic_inc() smp_mb() |
1116 |
|
1117 |
#endif /* __ASM_ATOMIC_H */ |
1118 |
|
1119 |
#else |
1120 |
|
1121 |
#if defined(__m68k__) |
1122 |
|
1123 |
#ifndef __ARCH_M68K_ATOMIC__ |
1124 |
#define __ARCH_M68K_ATOMIC__ |
1125 |
|
1126 |
/* |
1127 |
* Atomic operations that C can't guarantee us. Useful for |
1128 |
* resource counting etc.. |
1129 |
*/ |
1130 |
|
1131 |
/* |
1132 |
* We do not have SMP m68k systems, so we don't have to deal with that. |
1133 |
*/ |
1134 |
|
1135 |
typedef struct { int counter; } atomic_t; |
1136 |
#define ATOMIC_INIT(i) { (i) } |
1137 |
|
1138 |
#define atomic_read(v) ((v)->counter) |
1139 |
#define atomic_set(v, i) (((v)->counter) = i) |
1140 |
|
1141 |
static __inline__ void atomic_add(int i, atomic_t *v) |
1142 |
{ |
1143 |
__asm__ __volatile__("addl %1,%0" : "=m" (*v) : "id" (i), "0" (*v)); |
1144 |
} |
1145 |
|
1146 |
static __inline__ void atomic_sub(int i, atomic_t *v) |
1147 |
{ |
1148 |
__asm__ __volatile__("subl %1,%0" : "=m" (*v) : "id" (i), "0" (*v)); |
1149 |
} |
1150 |
|
1151 |
static __inline__ void atomic_inc(volatile atomic_t *v) |
1152 |
{ |
1153 |
__asm__ __volatile__("addql #1,%0" : "=m" (*v): "0" (*v)); |
1154 |
} |
1155 |
|
1156 |
static __inline__ void atomic_dec(volatile atomic_t *v) |
1157 |
{ |
1158 |
__asm__ __volatile__("subql #1,%0" : "=m" (*v): "0" (*v)); |
1159 |
} |
1160 |
|
1161 |
static __inline__ int atomic_dec_and_test(volatile atomic_t *v) |
1162 |
{ |
1163 |
char c; |
1164 |
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "=m" (*v): "1" (*v)); |
1165 |
return c != 0; |
1166 |
} |
1167 |
|
1168 |
#define atomic_clear_mask(mask, v) \ |
1169 |
__asm__ __volatile__("andl %1,%0" : "=m" (*v) : "id" (~(mask)),"0"(*v)) |
1170 |
|
1171 |
#define atomic_set_mask(mask, v) \ |
1172 |
__asm__ __volatile__("orl %1,%0" : "=m" (*v) : "id" (mask),"0"(*v)) |
1173 |
|
1174 |
/* Atomic operations are already serializing */ |
1175 |
#define smp_mb__before_atomic_dec() barrier() |
1176 |
#define smp_mb__after_atomic_dec() barrier() |
1177 |
#define smp_mb__before_atomic_inc() barrier() |
1178 |
#define smp_mb__after_atomic_inc() barrier() |
1179 |
|
1180 |
#endif /* __ARCH_M68K_ATOMIC __ */ |
1181 |
|
1182 |
#else |
1183 |
|
1184 |
#warning libs/pbd has no implementation of strictly atomic operations for your hardware. |
1185 |
|
1186 |
#define __NO_STRICT_ATOMIC |
1187 |
#ifdef __NO_STRICT_ATOMIC |
1188 |
|
1189 |
/* |
1190 |
* Because the implementations from the kernel (where all these come |
1191 |
* from) use cli and spinlocks for hppa and arm... |
1192 |
*/ |
1193 |
|
1194 |
typedef struct { volatile int counter; } atomic_t; |
1195 |
|
1196 |
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
1197 |
|
1198 |
#define atomic_read(v) ((v)->counter) |
1199 |
#define atomic_set(v,i) ((v)->counter = (i)) |
1200 |
|
1201 |
static __inline__ void atomic_inc(atomic_t *v) |
1202 |
{ |
1203 |
v->counter++; |
1204 |
} |
1205 |
|
1206 |
static __inline__ void atomic_dec(atomic_t *v) |
1207 |
{ |
1208 |
v->counter--; |
1209 |
} |
1210 |
|
1211 |
static __inline__ int atomic_dec_and_test(atomic_t *v) |
1212 |
{ |
1213 |
int res; |
1214 |
v->counter--; |
1215 |
res = v->counter; |
1216 |
return res == 0; |
1217 |
} |
1218 |
|
1219 |
static __inline__ int atomic_inc_and_test(atomic_t *v) |
1220 |
{ |
1221 |
int res; |
1222 |
v->counter++; |
1223 |
res = v->counter; |
1224 |
return res == 0; |
1225 |
} |
1226 |
|
1227 |
# endif /* __NO_STRICT_ATOMIC */ |
1228 |
# endif /* m68k */ |
1229 |
# endif /* mips */ |
1230 |
# endif /* s390 */ |
1231 |
# endif /* alpha */ |
1232 |
# endif /* ia64 */ |
1233 |
# endif /* sparc */ |
1234 |
# endif /* i386 */ |
1235 |
# endif /* ppc */ |
1236 |
|
1237 |
/***********************************************************************/ |
1238 |
|
1239 |
#else /* !linux */ |
1240 |
|
1241 |
#if defined(__APPLE__) |
1242 |
|
1243 |
typedef unsigned long atomic_t; |
1244 |
|
1245 |
#define ATOMIC_INIT(i) { (i) } |
1246 |
#define atomic_set(a, v) (*(a) = (v)) |
1247 |
#define atomic_read(a) (*(a)) |
1248 |
|
1249 |
/* TODO: should use atomic routines in CoreServices.framework */ |
1250 |
#define atomic_inc(a) (++(*a)) |
1251 |
#define atomic_dec(a) (--(*a)) |
1252 |
|
1253 |
#else |
1254 |
|
1255 |
typedef unsigned long atomic_t; |
1256 |
|
1257 |
#if defined(__sgi) |
1258 |
#undef atomic_set |
1259 |
#endif |
1260 |
|
1261 |
inline |
1262 |
void |
1263 |
atomic_set (atomic_t * a, int v) { |
1264 |
#if defined(__sgi) && !defined(__GNUC__) |
1265 |
__lock_test_and_set(a, v); |
1266 |
#else |
1267 |
*a=v; |
1268 |
#endif |
1269 |
} |
1270 |
|
1271 |
inline |
1272 |
int |
1273 |
atomic_read (const atomic_t * a) { |
1274 |
return *a; |
1275 |
} |
1276 |
|
1277 |
inline |
1278 |
void |
1279 |
atomic_inc (atomic_t * a) { |
1280 |
#if defined(__sgi) && !defined(__GNUC__) |
1281 |
__add_and_fetch(a, 1); |
1282 |
#else |
1283 |
++(*a); |
1284 |
#endif |
1285 |
} |
1286 |
|
1287 |
inline |
1288 |
void |
1289 |
atomic_dec (atomic_t * a) { |
1290 |
#if defined(__sgi) && !defined(__GNUC__) |
1291 |
__sub_and_fetch(a, 1); |
1292 |
#else |
1293 |
--(*a); |
1294 |
#endif |
1295 |
} |
1296 |
|
1297 |
#endif /* __APPLE__ */ |
1298 |
#endif /* linux */ |
1299 |
#endif /* __linuxsampler_atomic_h__ */ |