1 |
/* |
2 |
Copyright (C) 2001 Paul Davis and others (see below) |
3 |
Code derived from various headers from the Linux kernel. |
4 |
Copyright attributions maintained where present. |
5 |
|
6 |
This program is free software; you can redistribute it and/or modify |
7 |
it under the terms of the GNU General Public License as published by |
8 |
the Free Software Foundation; either version 2 of the License, or |
9 |
(at your option) any later version. |
10 |
|
11 |
This program is distributed in the hope that it will be useful, |
12 |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 |
GNU General Public License for more details. |
15 |
|
16 |
You should have received a copy of the GNU General Public License |
17 |
along with this program; if not, write to the Free Software |
18 |
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 |
|
20 |
$Id: atomic.h,v 1.2 2006-06-27 22:57:36 schoenebeck Exp $ |
21 |
*/ |
22 |
|
23 |
//TODO: should we put this into namespace? it might clash with system installed atomic.h, because we need to install atomic.h for the LS API |
24 |
|
25 |
#ifndef __linuxsampler_atomic_h__ |
26 |
#define __linuxsampler_atomic_h__ |
27 |
|
28 |
#ifdef HAVE_CONFIG_H |
29 |
#include <config.h> /* config.h c/o auto* tools, wherever it may be */ |
30 |
#endif |
31 |
|
32 |
#ifdef HAVE_SMP /* a macro we control, to manage ... */ |
33 |
#define CONFIG_SMP /* ... the macro the kernel headers use */ |
34 |
#endif |
35 |
|
36 |
#ifdef linux |
37 |
#ifdef __powerpc__ |
38 |
|
39 |
/* |
40 |
* BK Id: SCCS/s.atomic.h 1.15 10/28/01 10:37:22 trini |
41 |
*/ |
42 |
/* |
43 |
* PowerPC atomic operations |
44 |
*/ |
45 |
|
46 |
#ifndef _ASM_PPC_ATOMIC_H_ |
47 |
#define _ASM_PPC_ATOMIC_H_ |
48 |
|
49 |
typedef struct { volatile int counter; } atomic_t; |
50 |
|
51 |
|
52 |
#define ATOMIC_INIT(i) { (i) } |
53 |
|
54 |
#define atomic_read(v) ((v)->counter) |
55 |
#define atomic_set(v,i) (((v)->counter) = (i)) |
56 |
|
57 |
extern void atomic_clear_mask(unsigned long mask, unsigned long *addr); |
58 |
extern void atomic_set_mask(unsigned long mask, unsigned long *addr); |
59 |
|
60 |
#ifdef CONFIG_SMP |
61 |
#define SMP_ISYNC "\n\tisync" |
62 |
#else |
63 |
#define SMP_ISYNC |
64 |
#endif |
65 |
|
66 |
static __inline__ void atomic_add(int a, atomic_t *v) |
67 |
{ |
68 |
int t; |
69 |
|
70 |
__asm__ __volatile__( |
71 |
"1: lwarx %0,0,%3 # atomic_add\n\ |
72 |
add %0,%2,%0\n\ |
73 |
stwcx. %0,0,%3\n\ |
74 |
bne- 1b" |
75 |
: "=&r" (t), "=m" (v->counter) |
76 |
: "r" (a), "r" (&v->counter), "m" (v->counter) |
77 |
: "cc"); |
78 |
} |
79 |
|
80 |
static __inline__ int atomic_add_return(int a, atomic_t *v) |
81 |
{ |
82 |
int t; |
83 |
|
84 |
__asm__ __volatile__( |
85 |
"1: lwarx %0,0,%2 # atomic_add_return\n\ |
86 |
add %0,%1,%0\n\ |
87 |
stwcx. %0,0,%2\n\ |
88 |
bne- 1b" |
89 |
SMP_ISYNC |
90 |
: "=&r" (t) |
91 |
: "r" (a), "r" (&v->counter) |
92 |
: "cc", "memory"); |
93 |
|
94 |
return t; |
95 |
} |
96 |
|
97 |
static __inline__ void atomic_sub(int a, atomic_t *v) |
98 |
{ |
99 |
int t; |
100 |
|
101 |
__asm__ __volatile__( |
102 |
"1: lwarx %0,0,%3 # atomic_sub\n\ |
103 |
subf %0,%2,%0\n\ |
104 |
stwcx. %0,0,%3\n\ |
105 |
bne- 1b" |
106 |
: "=&r" (t), "=m" (v->counter) |
107 |
: "r" (a), "r" (&v->counter), "m" (v->counter) |
108 |
: "cc"); |
109 |
} |
110 |
|
111 |
static __inline__ int atomic_sub_return(int a, atomic_t *v) |
112 |
{ |
113 |
int t; |
114 |
|
115 |
__asm__ __volatile__( |
116 |
"1: lwarx %0,0,%2 # atomic_sub_return\n\ |
117 |
subf %0,%1,%0\n\ |
118 |
stwcx. %0,0,%2\n\ |
119 |
bne- 1b" |
120 |
SMP_ISYNC |
121 |
: "=&r" (t) |
122 |
: "r" (a), "r" (&v->counter) |
123 |
: "cc", "memory"); |
124 |
|
125 |
return t; |
126 |
} |
127 |
|
128 |
static __inline__ void atomic_inc(atomic_t *v) |
129 |
{ |
130 |
int t; |
131 |
|
132 |
__asm__ __volatile__( |
133 |
"1: lwarx %0,0,%2 # atomic_inc\n\ |
134 |
addic %0,%0,1\n\ |
135 |
stwcx. %0,0,%2\n\ |
136 |
bne- 1b" |
137 |
: "=&r" (t), "=m" (v->counter) |
138 |
: "r" (&v->counter), "m" (v->counter) |
139 |
: "cc"); |
140 |
} |
141 |
|
142 |
static __inline__ int atomic_inc_return(atomic_t *v) |
143 |
{ |
144 |
int t; |
145 |
|
146 |
__asm__ __volatile__( |
147 |
"1: lwarx %0,0,%1 # atomic_inc_return\n\ |
148 |
addic %0,%0,1\n\ |
149 |
stwcx. %0,0,%1\n\ |
150 |
bne- 1b" |
151 |
SMP_ISYNC |
152 |
: "=&r" (t) |
153 |
: "r" (&v->counter) |
154 |
: "cc", "memory"); |
155 |
|
156 |
return t; |
157 |
} |
158 |
|
159 |
static __inline__ void atomic_dec(atomic_t *v) |
160 |
{ |
161 |
int t; |
162 |
|
163 |
__asm__ __volatile__( |
164 |
"1: lwarx %0,0,%2 # atomic_dec\n\ |
165 |
addic %0,%0,-1\n\ |
166 |
stwcx. %0,0,%2\n\ |
167 |
bne- 1b" |
168 |
: "=&r" (t), "=m" (v->counter) |
169 |
: "r" (&v->counter), "m" (v->counter) |
170 |
: "cc"); |
171 |
} |
172 |
|
173 |
static __inline__ int atomic_dec_return(atomic_t *v) |
174 |
{ |
175 |
int t; |
176 |
|
177 |
__asm__ __volatile__( |
178 |
"1: lwarx %0,0,%1 # atomic_dec_return\n\ |
179 |
addic %0,%0,-1\n\ |
180 |
stwcx. %0,0,%1\n\ |
181 |
bne- 1b" |
182 |
SMP_ISYNC |
183 |
: "=&r" (t) |
184 |
: "r" (&v->counter) |
185 |
: "cc", "memory"); |
186 |
|
187 |
return t; |
188 |
} |
189 |
|
190 |
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) |
191 |
#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) |
192 |
|
193 |
/* |
194 |
* Atomically test *v and decrement if it is greater than 0. |
195 |
* The function returns the old value of *v minus 1. |
196 |
*/ |
197 |
static __inline__ int atomic_dec_if_positive(atomic_t *v) |
198 |
{ |
199 |
int t; |
200 |
|
201 |
__asm__ __volatile__( |
202 |
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ |
203 |
addic. %0,%0,-1\n\ |
204 |
blt- 2f\n\ |
205 |
stwcx. %0,0,%1\n\ |
206 |
bne- 1b" |
207 |
SMP_ISYNC |
208 |
"\n\ |
209 |
2:" : "=&r" (t) |
210 |
: "r" (&v->counter) |
211 |
: "cc", "memory"); |
212 |
|
213 |
return t; |
214 |
} |
215 |
|
216 |
#define smp_mb__before_atomic_dec() smp_mb() |
217 |
#define smp_mb__after_atomic_dec() smp_mb() |
218 |
#define smp_mb__before_atomic_inc() smp_mb() |
219 |
#define smp_mb__after_atomic_inc() smp_mb() |
220 |
|
221 |
#endif /* _ASM_PPC_ATOMIC_H_ */ |
222 |
|
223 |
/***********************************************************************/ |
224 |
|
225 |
# else /* !PPC */ |
226 |
|
227 |
#if defined(__i386__) || defined(__x86_64__) |
228 |
|
229 |
#ifndef __ARCH_I386_ATOMIC__ |
230 |
#define __ARCH_I386_ATOMIC__ |
231 |
|
232 |
/* |
233 |
* Atomic operations that C can't guarantee us. Useful for |
234 |
* resource counting etc.. |
235 |
*/ |
236 |
|
237 |
#ifdef CONFIG_SMP |
238 |
#define SMP_LOCK "lock ; " |
239 |
#else |
240 |
#define SMP_LOCK "" |
241 |
#endif |
242 |
|
243 |
/* |
244 |
* Make sure gcc doesn't try to be clever and move things around |
245 |
* on us. We need to use _exactly_ the address the user gave us, |
246 |
* not some alias that contains the same information. |
247 |
*/ |
248 |
typedef struct { volatile int counter; } atomic_t; |
249 |
|
250 |
#define ATOMIC_INIT(i) { (i) } |
251 |
|
252 |
/** |
253 |
* atomic_read - read atomic variable |
254 |
* @v: pointer of type atomic_t |
255 |
* |
256 |
* Atomically reads the value of @v. Note that the guaranteed |
257 |
* useful range of an atomic_t is only 24 bits. |
258 |
*/ |
259 |
#define atomic_read(v) ((v)->counter) |
260 |
|
261 |
/** |
262 |
* atomic_set - set atomic variable |
263 |
* @v: pointer of type atomic_t |
264 |
* @i: required value |
265 |
* |
266 |
* Atomically sets the value of @v to @i. Note that the guaranteed |
267 |
* useful range of an atomic_t is only 24 bits. |
268 |
*/ |
269 |
#define atomic_set(v,i) (((v)->counter) = (i)) |
270 |
|
271 |
/** |
272 |
* atomic_add - add integer to atomic variable |
273 |
* @i: integer value to add |
274 |
* @v: pointer of type atomic_t |
275 |
* |
276 |
* Atomically adds @i to @v. Note that the guaranteed useful range |
277 |
* of an atomic_t is only 24 bits. |
278 |
*/ |
279 |
static __inline__ void atomic_add(int i, atomic_t *v) |
280 |
{ |
281 |
__asm__ __volatile__( |
282 |
SMP_LOCK "addl %1,%0" |
283 |
:"=m" (v->counter) |
284 |
:"ir" (i), "m" (v->counter)); |
285 |
} |
286 |
|
287 |
/** |
288 |
* atomic_sub - subtract the atomic variable |
289 |
* @i: integer value to subtract |
290 |
* @v: pointer of type atomic_t |
291 |
* |
292 |
* Atomically subtracts @i from @v. Note that the guaranteed |
293 |
* useful range of an atomic_t is only 24 bits. |
294 |
*/ |
295 |
static __inline__ void atomic_sub(int i, atomic_t *v) |
296 |
{ |
297 |
__asm__ __volatile__( |
298 |
SMP_LOCK "subl %1,%0" |
299 |
:"=m" (v->counter) |
300 |
:"ir" (i), "m" (v->counter)); |
301 |
} |
302 |
|
303 |
/** |
304 |
* atomic_sub_and_test - subtract value from variable and test result |
305 |
* @i: integer value to subtract |
306 |
* @v: pointer of type atomic_t |
307 |
* |
308 |
* Atomically subtracts @i from @v and returns |
309 |
* true if the result is zero, or false for all |
310 |
* other cases. Note that the guaranteed |
311 |
* useful range of an atomic_t is only 24 bits. |
312 |
*/ |
313 |
static __inline__ int atomic_sub_and_test(int i, atomic_t *v) |
314 |
{ |
315 |
unsigned char c; |
316 |
|
317 |
__asm__ __volatile__( |
318 |
SMP_LOCK "subl %2,%0; sete %1" |
319 |
:"=m" (v->counter), "=qm" (c) |
320 |
:"ir" (i), "m" (v->counter) : "memory"); |
321 |
return c; |
322 |
} |
323 |
|
324 |
/** |
325 |
* atomic_inc - increment atomic variable |
326 |
* @v: pointer of type atomic_t |
327 |
* |
328 |
* Atomically increments @v by 1. Note that the guaranteed |
329 |
* useful range of an atomic_t is only 24 bits. |
330 |
*/ |
331 |
static __inline__ void atomic_inc(atomic_t *v) |
332 |
{ |
333 |
__asm__ __volatile__( |
334 |
SMP_LOCK "incl %0" |
335 |
:"=m" (v->counter) |
336 |
:"m" (v->counter)); |
337 |
} |
338 |
|
339 |
/** |
340 |
* atomic_dec - decrement atomic variable |
341 |
* @v: pointer of type atomic_t |
342 |
* |
343 |
* Atomically decrements @v by 1. Note that the guaranteed |
344 |
* useful range of an atomic_t is only 24 bits. |
345 |
*/ |
346 |
static __inline__ void atomic_dec(atomic_t *v) |
347 |
{ |
348 |
__asm__ __volatile__( |
349 |
SMP_LOCK "decl %0" |
350 |
:"=m" (v->counter) |
351 |
:"m" (v->counter)); |
352 |
} |
353 |
|
354 |
/** |
355 |
* atomic_dec_and_test - decrement and test |
356 |
* @v: pointer of type atomic_t |
357 |
* |
358 |
* Atomically decrements @v by 1 and |
359 |
* returns true if the result is 0, or false for all other |
360 |
* cases. Note that the guaranteed |
361 |
* useful range of an atomic_t is only 24 bits. |
362 |
*/ |
363 |
static __inline__ int atomic_dec_and_test(atomic_t *v) |
364 |
{ |
365 |
unsigned char c; |
366 |
|
367 |
__asm__ __volatile__( |
368 |
SMP_LOCK "decl %0; sete %1" |
369 |
:"=m" (v->counter), "=qm" (c) |
370 |
:"m" (v->counter) : "memory"); |
371 |
return c != 0; |
372 |
} |
373 |
|
374 |
/** |
375 |
* atomic_inc_and_test - increment and test |
376 |
* @v: pointer of type atomic_t |
377 |
* |
378 |
* Atomically increments @v by 1 |
379 |
* and returns true if the result is zero, or false for all |
380 |
* other cases. Note that the guaranteed |
381 |
* useful range of an atomic_t is only 24 bits. |
382 |
*/ |
383 |
static __inline__ int atomic_inc_and_test(atomic_t *v) |
384 |
{ |
385 |
unsigned char c; |
386 |
|
387 |
__asm__ __volatile__( |
388 |
SMP_LOCK "incl %0; sete %1" |
389 |
:"=m" (v->counter), "=qm" (c) |
390 |
:"m" (v->counter) : "memory"); |
391 |
return c != 0; |
392 |
} |
393 |
|
394 |
/** |
395 |
* atomic_add_negative - add and test if negative |
396 |
* @v: pointer of type atomic_t |
397 |
* @i: integer value to add |
398 |
* |
399 |
* Atomically adds @i to @v and returns true |
400 |
* if the result is negative, or false when |
401 |
* result is greater than or equal to zero. Note that the guaranteed |
402 |
* useful range of an atomic_t is only 24 bits. |
403 |
*/ |
404 |
static __inline__ int atomic_add_negative(int i, atomic_t *v) |
405 |
{ |
406 |
unsigned char c; |
407 |
|
408 |
__asm__ __volatile__( |
409 |
SMP_LOCK "addl %2,%0; sets %1" |
410 |
:"=m" (v->counter), "=qm" (c) |
411 |
:"ir" (i), "m" (v->counter) : "memory"); |
412 |
return c; |
413 |
} |
414 |
|
415 |
/* These are x86-specific, used by some header files */ |
416 |
#define atomic_clear_mask(mask, addr) \ |
417 |
__asm__ __volatile__(SMP_LOCK "andl %0,%1" \ |
418 |
: : "r" (~(mask)),"m" (*addr) : "memory") |
419 |
|
420 |
#define atomic_set_mask(mask, addr) \ |
421 |
__asm__ __volatile__(SMP_LOCK "orl %0,%1" \ |
422 |
: : "r" (mask),"m" (*addr) : "memory") |
423 |
|
424 |
/* Atomic operations are already serializing on x86 */ |
425 |
#define smp_mb__before_atomic_dec() barrier() |
426 |
#define smp_mb__after_atomic_dec() barrier() |
427 |
#define smp_mb__before_atomic_inc() barrier() |
428 |
#define smp_mb__after_atomic_inc() barrier() |
429 |
|
430 |
#endif /* __ARCH_I386_ATOMIC__ */ |
431 |
|
432 |
/***********************************************************************/ |
433 |
|
434 |
#else /* !PPC && !i386 */ |
435 |
|
436 |
#ifdef __sparc__ |
437 |
|
438 |
/* atomic.h: These still suck, but the I-cache hit rate is higher. |
439 |
* |
440 |
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) |
441 |
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) |
442 |
*/ |
443 |
|
444 |
#ifndef __ARCH_SPARC_ATOMIC__ |
445 |
#define __ARCH_SPARC_ATOMIC__ |
446 |
|
447 |
typedef struct { volatile int counter; } atomic_t; |
448 |
|
449 |
#ifndef CONFIG_SMP |
450 |
|
451 |
#define ATOMIC_INIT(i) { (i) } |
452 |
#define atomic_read(v) ((v)->counter) |
453 |
#define atomic_set(v, i) (((v)->counter) = i) |
454 |
|
455 |
#else |
456 |
/* We do the bulk of the actual work out of line in two common |
457 |
* routines in assembler, see arch/sparc/lib/atomic.S for the |
458 |
* "fun" details. |
459 |
* |
460 |
* For SMP the trick is you embed the spin lock byte within |
461 |
* the word, use the low byte so signedness is easily retained |
462 |
* via a quick arithmetic shift. It looks like this: |
463 |
* |
464 |
* ---------------------------------------- |
465 |
* | signed 24-bit counter value | lock | atomic_t |
466 |
* ---------------------------------------- |
467 |
* 31 8 7 0 |
468 |
*/ |
469 |
|
470 |
#define ATOMIC_INIT(i) { (i << 8) } |
471 |
|
472 |
static __inline__ int atomic_read(atomic_t *v) |
473 |
{ |
474 |
int ret = v->counter; |
475 |
|
476 |
while(ret & 0xff) |
477 |
ret = v->counter; |
478 |
|
479 |
return ret >> 8; |
480 |
} |
481 |
|
482 |
#define atomic_set(v, i) (((v)->counter) = ((i) << 8)) |
483 |
#endif |
484 |
|
485 |
static __inline__ int __atomic_add(int i, atomic_t *v) |
486 |
{ |
487 |
register volatile int *ptr asm("g1"); |
488 |
register int increment asm("g2"); |
489 |
|
490 |
ptr = &v->counter; |
491 |
increment = i; |
492 |
|
493 |
__asm__ __volatile__( |
494 |
"mov %%o7, %%g4\n\t" |
495 |
"call ___atomic_add\n\t" |
496 |
" add %%o7, 8, %%o7\n" |
497 |
: "=&r" (increment) |
498 |
: "0" (increment), "r" (ptr) |
499 |
: "g3", "g4", "g7", "memory", "cc"); |
500 |
|
501 |
return increment; |
502 |
} |
503 |
|
504 |
static __inline__ int __atomic_sub(int i, atomic_t *v) |
505 |
{ |
506 |
register volatile int *ptr asm("g1"); |
507 |
register int increment asm("g2"); |
508 |
|
509 |
ptr = &v->counter; |
510 |
increment = i; |
511 |
|
512 |
__asm__ __volatile__( |
513 |
"mov %%o7, %%g4\n\t" |
514 |
"call ___atomic_sub\n\t" |
515 |
" add %%o7, 8, %%o7\n" |
516 |
: "=&r" (increment) |
517 |
: "0" (increment), "r" (ptr) |
518 |
: "g3", "g4", "g7", "memory", "cc"); |
519 |
|
520 |
return increment; |
521 |
} |
522 |
|
523 |
#define atomic_add(i, v) ((void)__atomic_add((i), (v))) |
524 |
#define atomic_sub(i, v) ((void)__atomic_sub((i), (v))) |
525 |
|
526 |
#define atomic_dec_return(v) __atomic_sub(1, (v)) |
527 |
#define atomic_inc_return(v) __atomic_add(1, (v)) |
528 |
|
529 |
#define atomic_sub_and_test(i, v) (__atomic_sub((i), (v)) == 0) |
530 |
#define atomic_dec_and_test(v) (__atomic_sub(1, (v)) == 0) |
531 |
|
532 |
#define atomic_inc(v) ((void)__atomic_add(1, (v))) |
533 |
#define atomic_dec(v) ((void)__atomic_sub(1, (v))) |
534 |
|
535 |
#define atomic_add_negative(i, v) (__atomic_add((i), (v)) < 0) |
536 |
|
537 |
/* Atomic operations are already serializing */ |
538 |
#define smp_mb__before_atomic_dec() barrier() |
539 |
#define smp_mb__after_atomic_dec() barrier() |
540 |
#define smp_mb__before_atomic_inc() barrier() |
541 |
#define smp_mb__after_atomic_inc() barrier() |
542 |
|
543 |
|
544 |
#endif /* !(__ARCH_SPARC_ATOMIC__) */ |
545 |
|
546 |
/***********************************************************************/ |
547 |
|
548 |
#else |
549 |
|
550 |
#ifdef __ia64__ |
551 |
|
552 |
#ifndef __ARCH_IA64_ATOMIC__ |
553 |
#define __ARCH_IA64_ATOMIC__ |
554 |
|
555 |
typedef volatile int atomic_t; |
556 |
|
557 |
inline |
558 |
int |
559 |
atomic_read (const atomic_t * a) |
560 |
{ |
561 |
return *a; |
562 |
} |
563 |
|
564 |
inline |
565 |
void |
566 |
atomic_set(atomic_t *a, int v) |
567 |
{ |
568 |
*a = v; |
569 |
} |
570 |
|
571 |
inline |
572 |
void |
573 |
atomic_inc (atomic_t *v) |
574 |
{ |
575 |
int old, r; |
576 |
|
577 |
do { |
578 |
old = atomic_read(v); |
579 |
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old)); |
580 |
__asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" |
581 |
: "=r"(r) : "r"(v), "r"(old + 1) |
582 |
: "memory"); |
583 |
} while (r != old); |
584 |
} |
585 |
|
586 |
inline |
587 |
void |
588 |
atomic_dec (atomic_t *v) |
589 |
{ |
590 |
int old, r; |
591 |
|
592 |
do { |
593 |
old = atomic_read(v); |
594 |
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old)); |
595 |
__asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" |
596 |
: "=r"(r) : "r"(v), "r"(old - 1) |
597 |
: "memory"); |
598 |
} while (r != old); |
599 |
} |
600 |
|
601 |
inline |
602 |
int |
603 |
atomic_dec_and_test (atomic_t *v) |
604 |
{ |
605 |
int old, r; |
606 |
|
607 |
do { |
608 |
old = atomic_read(v); |
609 |
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old)); |
610 |
__asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" |
611 |
: "=r"(r) : "r"(v), "r"(old - 1) |
612 |
: "memory"); |
613 |
} while (r != old); |
614 |
return old != 1; |
615 |
} |
616 |
|
617 |
#endif /* !(__ARCH_IA64_ATOMIC__) */ |
618 |
|
619 |
#else |
620 |
|
621 |
#ifdef __alpha__ |
622 |
|
623 |
#ifndef _ALPHA_ATOMIC_H |
624 |
#define _ALPHA_ATOMIC_H |
625 |
|
626 |
/* |
627 |
* Atomic operations that C can't guarantee us. Useful for |
628 |
* resource counting etc... |
629 |
* |
630 |
* But use these as seldom as possible since they are much slower |
631 |
* than regular operations. |
632 |
*/ |
633 |
|
634 |
|
635 |
/* |
636 |
* Counter is volatile to make sure gcc doesn't try to be clever |
637 |
* and move things around on us. We need to use _exactly_ the address |
638 |
* the user gave us, not some alias that contains the same information. |
639 |
*/ |
640 |
typedef struct { volatile int counter; } atomic_t; |
641 |
|
642 |
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
643 |
|
644 |
#define atomic_read(v) ((v)->counter) |
645 |
#define atomic_set(v,i) ((v)->counter = (i)) |
646 |
|
647 |
/* |
648 |
* To get proper branch prediction for the main line, we must branch |
649 |
* forward to code at the end of this object's .text section, then |
650 |
* branch back to restart the operation. |
651 |
*/ |
652 |
|
653 |
static __inline__ void atomic_add(int i, atomic_t * v) |
654 |
{ |
655 |
unsigned long temp; |
656 |
__asm__ __volatile__( |
657 |
"1: ldl_l %0,%1\n" |
658 |
" addl %0,%2,%0\n" |
659 |
" stl_c %0,%1\n" |
660 |
" beq %0,2f\n" |
661 |
".subsection 2\n" |
662 |
"2: br 1b\n" |
663 |
".previous" |
664 |
:"=&r" (temp), "=m" (v->counter) |
665 |
:"Ir" (i), "m" (v->counter)); |
666 |
} |
667 |
|
668 |
static __inline__ void atomic_sub(int i, atomic_t * v) |
669 |
{ |
670 |
unsigned long temp; |
671 |
__asm__ __volatile__( |
672 |
"1: ldl_l %0,%1\n" |
673 |
" subl %0,%2,%0\n" |
674 |
" stl_c %0,%1\n" |
675 |
" beq %0,2f\n" |
676 |
".subsection 2\n" |
677 |
"2: br 1b\n" |
678 |
".previous" |
679 |
:"=&r" (temp), "=m" (v->counter) |
680 |
:"Ir" (i), "m" (v->counter)); |
681 |
} |
682 |
|
683 |
/* |
684 |
* Same as above, but return the result value |
685 |
*/ |
686 |
static __inline__ long atomic_add_return(int i, atomic_t * v) |
687 |
{ |
688 |
long temp, result; |
689 |
__asm__ __volatile__( |
690 |
"1: ldl_l %0,%1\n" |
691 |
" addl %0,%3,%2\n" |
692 |
" addl %0,%3,%0\n" |
693 |
" stl_c %0,%1\n" |
694 |
" beq %0,2f\n" |
695 |
" mb\n" |
696 |
".subsection 2\n" |
697 |
"2: br 1b\n" |
698 |
".previous" |
699 |
:"=&r" (temp), "=m" (v->counter), "=&r" (result) |
700 |
:"Ir" (i), "m" (v->counter) : "memory"); |
701 |
return result; |
702 |
} |
703 |
|
704 |
static __inline__ long atomic_sub_return(int i, atomic_t * v) |
705 |
{ |
706 |
long temp, result; |
707 |
__asm__ __volatile__( |
708 |
"1: ldl_l %0,%1\n" |
709 |
" subl %0,%3,%2\n" |
710 |
" subl %0,%3,%0\n" |
711 |
" stl_c %0,%1\n" |
712 |
" beq %0,2f\n" |
713 |
" mb\n" |
714 |
".subsection 2\n" |
715 |
"2: br 1b\n" |
716 |
".previous" |
717 |
:"=&r" (temp), "=m" (v->counter), "=&r" (result) |
718 |
:"Ir" (i), "m" (v->counter) : "memory"); |
719 |
return result; |
720 |
} |
721 |
|
722 |
#define atomic_dec_return(v) atomic_sub_return(1,(v)) |
723 |
#define atomic_inc_return(v) atomic_add_return(1,(v)) |
724 |
|
725 |
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) |
726 |
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
727 |
|
728 |
#define atomic_inc(v) atomic_add(1,(v)) |
729 |
#define atomic_dec(v) atomic_sub(1,(v)) |
730 |
|
731 |
#define smp_mb__before_atomic_dec() smp_mb() |
732 |
#define smp_mb__after_atomic_dec() smp_mb() |
733 |
#define smp_mb__before_atomic_inc() smp_mb() |
734 |
#define smp_mb__after_atomic_inc() smp_mb() |
735 |
|
736 |
#endif /* _ALPHA_ATOMIC_H */ |
737 |
|
738 |
#else |
739 |
|
740 |
#ifdef __s390__ |
741 |
|
742 |
#ifndef __ARCH_S390_ATOMIC__ |
743 |
#define __ARCH_S390_ATOMIC__ |
744 |
|
745 |
/* |
746 |
* include/asm-s390/atomic.h |
747 |
* |
748 |
* S390 version |
749 |
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation |
750 |
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
751 |
* Denis Joseph Barrow |
752 |
* |
753 |
* Derived from "include/asm-i386/bitops.h" |
754 |
* Copyright (C) 1992, Linus Torvalds |
755 |
* |
756 |
*/ |
757 |
|
758 |
/* |
759 |
* Atomic operations that C can't guarantee us. Useful for |
760 |
* resource counting etc.. |
761 |
* S390 uses 'Compare And Swap' for atomicity in SMP enviroment |
762 |
*/ |
763 |
|
764 |
typedef struct { volatile int counter; } __attribute__ ((aligned (4))) atomic_t; |
765 |
#define ATOMIC_INIT(i) { (i) } |
766 |
|
767 |
#define atomic_eieio() __asm__ __volatile__ ("BCR 15,0") |
768 |
|
769 |
#define __CS_LOOP(old_val, new_val, ptr, op_val, op_string) \ |
770 |
__asm__ __volatile__(" l %0,0(%2)\n" \ |
771 |
"0: lr %1,%0\n" \ |
772 |
op_string " %1,%3\n" \ |
773 |
" cs %0,%1,0(%2)\n" \ |
774 |
" jl 0b" \ |
775 |
: "=&d" (old_val), "=&d" (new_val) \ |
776 |
: "a" (ptr), "d" (op_val) : "cc" ); |
777 |
|
778 |
#define atomic_read(v) ((v)->counter) |
779 |
#define atomic_set(v,i) (((v)->counter) = (i)) |
780 |
|
781 |
static __inline__ void atomic_add(int i, atomic_t *v) |
782 |
{ |
783 |
int old_val, new_val; |
784 |
__CS_LOOP(old_val, new_val, v, i, "ar"); |
785 |
} |
786 |
|
787 |
static __inline__ int atomic_add_return (int i, atomic_t *v) |
788 |
{ |
789 |
int old_val, new_val; |
790 |
__CS_LOOP(old_val, new_val, v, i, "ar"); |
791 |
return new_val; |
792 |
} |
793 |
|
794 |
static __inline__ int atomic_add_negative(int i, atomic_t *v) |
795 |
{ |
796 |
int old_val, new_val; |
797 |
__CS_LOOP(old_val, new_val, v, i, "ar"); |
798 |
return new_val < 0; |
799 |
} |
800 |
|
801 |
static __inline__ void atomic_sub(int i, atomic_t *v) |
802 |
{ |
803 |
int old_val, new_val; |
804 |
__CS_LOOP(old_val, new_val, v, i, "sr"); |
805 |
} |
806 |
|
807 |
static __inline__ void atomic_inc(volatile atomic_t *v) |
808 |
{ |
809 |
int old_val, new_val; |
810 |
__CS_LOOP(old_val, new_val, v, 1, "ar"); |
811 |
} |
812 |
|
813 |
static __inline__ int atomic_inc_return(volatile atomic_t *v) |
814 |
{ |
815 |
int old_val, new_val; |
816 |
__CS_LOOP(old_val, new_val, v, 1, "ar"); |
817 |
return new_val; |
818 |
} |
819 |
|
820 |
static __inline__ int atomic_inc_and_test(volatile atomic_t *v) |
821 |
{ |
822 |
int old_val, new_val; |
823 |
__CS_LOOP(old_val, new_val, v, 1, "ar"); |
824 |
return new_val != 0; |
825 |
} |
826 |
|
827 |
static __inline__ void atomic_dec(volatile atomic_t *v) |
828 |
{ |
829 |
int old_val, new_val; |
830 |
__CS_LOOP(old_val, new_val, v, 1, "sr"); |
831 |
} |
832 |
|
833 |
static __inline__ int atomic_dec_return(volatile atomic_t *v) |
834 |
{ |
835 |
int old_val, new_val; |
836 |
__CS_LOOP(old_val, new_val, v, 1, "sr"); |
837 |
return new_val; |
838 |
} |
839 |
|
840 |
static __inline__ int atomic_dec_and_test(volatile atomic_t *v) |
841 |
{ |
842 |
int old_val, new_val; |
843 |
__CS_LOOP(old_val, new_val, v, 1, "sr"); |
844 |
return new_val == 0; |
845 |
} |
846 |
|
847 |
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *v) |
848 |
{ |
849 |
int old_val, new_val; |
850 |
__CS_LOOP(old_val, new_val, v, ~mask, "nr"); |
851 |
} |
852 |
|
853 |
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *v) |
854 |
{ |
855 |
int old_val, new_val; |
856 |
__CS_LOOP(old_val, new_val, v, mask, "or"); |
857 |
} |
858 |
|
859 |
/* |
860 |
returns 0 if expected_oldval==value in *v ( swap was successful ) |
861 |
returns 1 if unsuccessful. |
862 |
*/ |
863 |
static __inline__ int |
864 |
atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) |
865 |
{ |
866 |
int retval; |
867 |
|
868 |
__asm__ __volatile__( |
869 |
" lr 0,%2\n" |
870 |
" cs 0,%3,0(%1)\n" |
871 |
" ipm %0\n" |
872 |
" srl %0,28\n" |
873 |
"0:" |
874 |
: "=&d" (retval) |
875 |
: "a" (v), "d" (expected_oldval) , "d" (new_val) |
876 |
: "0", "cc"); |
877 |
return retval; |
878 |
} |
879 |
|
880 |
/* |
881 |
Spin till *v = expected_oldval then swap with newval. |
882 |
*/ |
883 |
static __inline__ void |
884 |
atomic_compare_and_swap_spin(int expected_oldval,int new_val,atomic_t *v) |
885 |
{ |
886 |
__asm__ __volatile__( |
887 |
"0: lr 0,%1\n" |
888 |
" cs 0,%2,0(%0)\n" |
889 |
" jl 0b\n" |
890 |
: : "a" (v), "d" (expected_oldval) , "d" (new_val) |
891 |
: "cc", "0" ); |
892 |
} |
893 |
|
894 |
#define smp_mb__before_atomic_dec() smp_mb() |
895 |
#define smp_mb__after_atomic_dec() smp_mb() |
896 |
#define smp_mb__before_atomic_inc() smp_mb() |
897 |
#define smp_mb__after_atomic_inc() smp_mb() |
898 |
|
899 |
#endif /* __ARCH_S390_ATOMIC __ */ |
900 |
|
901 |
#else |
902 |
|
903 |
#ifdef __mips__ |
904 |
|
905 |
/* |
906 |
* Atomic operations that C can't guarantee us. Useful for |
907 |
* resource counting etc.. |
908 |
* |
909 |
* But use these as seldom as possible since they are much more slower |
910 |
* than regular operations. |
911 |
* |
912 |
* This file is subject to the terms and conditions of the GNU General Public |
913 |
* License. See the file "COPYING" in the main directory of this archive |
914 |
* for more details. |
915 |
* |
916 |
* Copyright (C) 1996, 1997, 2000 by Ralf Baechle |
917 |
*/ |
918 |
#ifndef __ASM_ATOMIC_H |
919 |
#define __ASM_ATOMIC_H |
920 |
|
921 |
typedef struct { volatile int counter; } atomic_t; |
922 |
|
923 |
#define ATOMIC_INIT(i) { (i) } |
924 |
|
925 |
/* |
926 |
* atomic_read - read atomic variable |
927 |
* @v: pointer of type atomic_t |
928 |
* |
929 |
* Atomically reads the value of @v. Note that the guaranteed |
930 |
* useful range of an atomic_t is only 24 bits. |
931 |
*/ |
932 |
#define atomic_read(v) ((v)->counter) |
933 |
|
934 |
/* |
935 |
* atomic_set - set atomic variable |
936 |
* @v: pointer of type atomic_t |
937 |
* @i: required value |
938 |
* |
939 |
* Atomically sets the value of @v to @i. Note that the guaranteed |
940 |
* useful range of an atomic_t is only 24 bits. |
941 |
*/ |
942 |
#define atomic_set(v,i) ((v)->counter = (i)) |
943 |
|
944 |
/* |
945 |
* ... while for MIPS II and better we can use ll/sc instruction. This |
946 |
* implementation is SMP safe ... |
947 |
*/ |
948 |
|
949 |
/* |
950 |
* atomic_add - add integer to atomic variable |
951 |
* @i: integer value to add |
952 |
* @v: pointer of type atomic_t |
953 |
* |
954 |
* Atomically adds @i to @v. Note that the guaranteed useful range |
955 |
* of an atomic_t is only 24 bits. |
956 |
*/ |
957 |
extern __inline__ void atomic_add(int i, atomic_t * v) |
958 |
{ |
959 |
unsigned long temp; |
960 |
|
961 |
__asm__ __volatile__( |
962 |
".set push # atomic_add\n" |
963 |
".set mips2 \n" |
964 |
"1: ll %0, %1 \n" |
965 |
" addu %0, %2 \n" |
966 |
" sc %0, %1 \n" |
967 |
" beqz %0, 1b \n" |
968 |
".set pop \n" |
969 |
: "=&r" (temp), "=m" (v->counter) |
970 |
: "Ir" (i), "m" (v->counter)); |
971 |
} |
972 |
|
973 |
/* |
974 |
* atomic_sub - subtract the atomic variable |
975 |
* @i: integer value to subtract |
976 |
* @v: pointer of type atomic_t |
977 |
* |
978 |
* Atomically subtracts @i from @v. Note that the guaranteed |
979 |
* useful range of an atomic_t is only 24 bits. |
980 |
*/ |
981 |
extern __inline__ void atomic_sub(int i, atomic_t * v) |
982 |
{ |
983 |
unsigned long temp; |
984 |
|
985 |
__asm__ __volatile__( |
986 |
".set push # atomic_sub\n" |
987 |
".set mips2 \n" |
988 |
"1: ll %0, %1 \n" |
989 |
" subu %0, %2 \n" |
990 |
" sc %0, %1 \n" |
991 |
" beqz %0, 1b \n" |
992 |
".set pop \n" |
993 |
: "=&r" (temp), "=m" (v->counter) |
994 |
: "Ir" (i), "m" (v->counter)); |
995 |
} |
996 |
|
997 |
/* |
998 |
* Same as above, but return the result value |
999 |
*/ |
1000 |
extern __inline__ int atomic_add_return(int i, atomic_t * v) |
1001 |
{ |
1002 |
unsigned long temp, result; |
1003 |
|
1004 |
__asm__ __volatile__( |
1005 |
".set push # atomic_add_return\n" |
1006 |
".set mips2 \n" |
1007 |
".set noreorder \n" |
1008 |
"1: ll %1, %2 \n" |
1009 |
" addu %0, %1, %3 \n" |
1010 |
" sc %0, %2 \n" |
1011 |
" beqz %0, 1b \n" |
1012 |
" addu %0, %1, %3 \n" |
1013 |
" sync \n" |
1014 |
".set pop \n" |
1015 |
: "=&r" (result), "=&r" (temp), "=m" (v->counter) |
1016 |
: "Ir" (i), "m" (v->counter) |
1017 |
: "memory"); |
1018 |
|
1019 |
return result; |
1020 |
} |
1021 |
|
1022 |
extern __inline__ int atomic_sub_return(int i, atomic_t * v) |
1023 |
{ |
1024 |
unsigned long temp, result; |
1025 |
|
1026 |
__asm__ __volatile__( |
1027 |
".set push # atomic_sub_return\n" |
1028 |
".set mips2 \n" |
1029 |
".set noreorder \n" |
1030 |
"1: ll %1, %2 \n" |
1031 |
" subu %0, %1, %3 \n" |
1032 |
" sc %0, %2 \n" |
1033 |
" beqz %0, 1b \n" |
1034 |
" subu %0, %1, %3 \n" |
1035 |
" sync \n" |
1036 |
".set pop \n" |
1037 |
: "=&r" (result), "=&r" (temp), "=m" (v->counter) |
1038 |
: "Ir" (i), "m" (v->counter) |
1039 |
: "memory"); |
1040 |
|
1041 |
return result; |
1042 |
} |
1043 |
|
1044 |
#define atomic_dec_return(v) atomic_sub_return(1,(v)) |
1045 |
#define atomic_inc_return(v) atomic_add_return(1,(v)) |
1046 |
|
1047 |
/* |
1048 |
* atomic_sub_and_test - subtract value from variable and test result |
1049 |
* @i: integer value to subtract |
1050 |
* @v: pointer of type atomic_t |
1051 |
* |
1052 |
* Atomically subtracts @i from @v and returns |
1053 |
* true if the result is zero, or false for all |
1054 |
* other cases. Note that the guaranteed |
1055 |
* useful range of an atomic_t is only 24 bits. |
1056 |
*/ |
1057 |
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) |
1058 |
|
1059 |
/* |
1060 |
* atomic_inc_and_test - increment and test |
1061 |
* @v: pointer of type atomic_t |
1062 |
* |
1063 |
* Atomically increments @v by 1 |
1064 |
* and returns true if the result is zero, or false for all |
1065 |
* other cases. Note that the guaranteed |
1066 |
* useful range of an atomic_t is only 24 bits. |
1067 |
*/ |
1068 |
#define atomic_inc_and_test(v) (atomic_inc_return(1, (v)) == 0) |
1069 |
|
1070 |
/* |
1071 |
* atomic_dec_and_test - decrement by 1 and test |
1072 |
* @v: pointer of type atomic_t |
1073 |
* |
1074 |
* Atomically decrements @v by 1 and |
1075 |
* returns true if the result is 0, or false for all other |
1076 |
* cases. Note that the guaranteed |
1077 |
* useful range of an atomic_t is only 24 bits. |
1078 |
*/ |
1079 |
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
1080 |
|
1081 |
/* |
1082 |
* atomic_inc - increment atomic variable |
1083 |
* @v: pointer of type atomic_t |
1084 |
* |
1085 |
* Atomically increments @v by 1. Note that the guaranteed |
1086 |
* useful range of an atomic_t is only 24 bits. |
1087 |
*/ |
1088 |
#define atomic_inc(v) atomic_add(1,(v)) |
1089 |
|
1090 |
/* |
1091 |
* atomic_dec - decrement and test |
1092 |
* @v: pointer of type atomic_t |
1093 |
* |
1094 |
* Atomically decrements @v by 1. Note that the guaranteed |
1095 |
* useful range of an atomic_t is only 24 bits. |
1096 |
*/ |
1097 |
#define atomic_dec(v) atomic_sub(1,(v)) |
1098 |
|
1099 |
/* |
1100 |
* atomic_add_negative - add and test if negative |
1101 |
* @v: pointer of type atomic_t |
1102 |
* @i: integer value to add |
1103 |
* |
1104 |
* Atomically adds @i to @v and returns true |
1105 |
* if the result is negative, or false when |
1106 |
* result is greater than or equal to zero. Note that the guaranteed |
1107 |
* useful range of an atomic_t is only 24 bits. |
1108 |
* |
1109 |
* Currently not implemented for MIPS. |
1110 |
*/ |
1111 |
|
1112 |
/* Atomic operations are already serializing */ |
1113 |
#define smp_mb__before_atomic_dec() smp_mb() |
1114 |
#define smp_mb__after_atomic_dec() smp_mb() |
1115 |
#define smp_mb__before_atomic_inc() smp_mb() |
1116 |
#define smp_mb__after_atomic_inc() smp_mb() |
1117 |
|
1118 |
#endif /* __ASM_ATOMIC_H */ |
1119 |
|
1120 |
#else |
1121 |
|
1122 |
#if defined(__m68k__) |
1123 |
|
1124 |
#ifndef __ARCH_M68K_ATOMIC__ |
1125 |
#define __ARCH_M68K_ATOMIC__ |
1126 |
|
1127 |
/* |
1128 |
* Atomic operations that C can't guarantee us. Useful for |
1129 |
* resource counting etc.. |
1130 |
*/ |
1131 |
|
1132 |
/* |
1133 |
* We do not have SMP m68k systems, so we don't have to deal with that. |
1134 |
*/ |
1135 |
|
1136 |
typedef struct { int counter; } atomic_t; |
1137 |
#define ATOMIC_INIT(i) { (i) } |
1138 |
|
1139 |
#define atomic_read(v) ((v)->counter) |
1140 |
#define atomic_set(v, i) (((v)->counter) = i) |
1141 |
|
1142 |
static __inline__ void atomic_add(int i, atomic_t *v) |
1143 |
{ |
1144 |
__asm__ __volatile__("addl %1,%0" : "=m" (*v) : "id" (i), "0" (*v)); |
1145 |
} |
1146 |
|
1147 |
static __inline__ void atomic_sub(int i, atomic_t *v) |
1148 |
{ |
1149 |
__asm__ __volatile__("subl %1,%0" : "=m" (*v) : "id" (i), "0" (*v)); |
1150 |
} |
1151 |
|
1152 |
static __inline__ void atomic_inc(volatile atomic_t *v) |
1153 |
{ |
1154 |
__asm__ __volatile__("addql #1,%0" : "=m" (*v): "0" (*v)); |
1155 |
} |
1156 |
|
1157 |
static __inline__ void atomic_dec(volatile atomic_t *v) |
1158 |
{ |
1159 |
__asm__ __volatile__("subql #1,%0" : "=m" (*v): "0" (*v)); |
1160 |
} |
1161 |
|
1162 |
static __inline__ int atomic_dec_and_test(volatile atomic_t *v) |
1163 |
{ |
1164 |
char c; |
1165 |
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "=m" (*v): "1" (*v)); |
1166 |
return c != 0; |
1167 |
} |
1168 |
|
1169 |
#define atomic_clear_mask(mask, v) \ |
1170 |
__asm__ __volatile__("andl %1,%0" : "=m" (*v) : "id" (~(mask)),"0"(*v)) |
1171 |
|
1172 |
#define atomic_set_mask(mask, v) \ |
1173 |
__asm__ __volatile__("orl %1,%0" : "=m" (*v) : "id" (mask),"0"(*v)) |
1174 |
|
1175 |
/* Atomic operations are already serializing */ |
1176 |
#define smp_mb__before_atomic_dec() barrier() |
1177 |
#define smp_mb__after_atomic_dec() barrier() |
1178 |
#define smp_mb__before_atomic_inc() barrier() |
1179 |
#define smp_mb__after_atomic_inc() barrier() |
1180 |
|
1181 |
#endif /* __ARCH_M68K_ATOMIC __ */ |
1182 |
|
1183 |
#else |
1184 |
|
1185 |
#warning libs/pbd has no implementation of strictly atomic operations for your hardware. |
1186 |
|
1187 |
#define __NO_STRICT_ATOMIC |
1188 |
#ifdef __NO_STRICT_ATOMIC |
1189 |
|
1190 |
/* |
1191 |
* Because the implementations from the kernel (where all these come |
1192 |
* from) use cli and spinlocks for hppa and arm... |
1193 |
*/ |
1194 |
|
1195 |
typedef struct { volatile int counter; } atomic_t; |
1196 |
|
1197 |
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
1198 |
|
1199 |
#define atomic_read(v) ((v)->counter) |
1200 |
#define atomic_set(v,i) ((v)->counter = (i)) |
1201 |
|
1202 |
static __inline__ void atomic_inc(atomic_t *v) |
1203 |
{ |
1204 |
v->counter++; |
1205 |
} |
1206 |
|
1207 |
static __inline__ void atomic_dec(atomic_t *v) |
1208 |
{ |
1209 |
v->counter--; |
1210 |
} |
1211 |
|
1212 |
static __inline__ int atomic_dec_and_test(atomic_t *v) |
1213 |
{ |
1214 |
int res; |
1215 |
v->counter--; |
1216 |
res = v->counter; |
1217 |
return res == 0; |
1218 |
} |
1219 |
|
1220 |
static __inline__ int atomic_inc_and_test(atomic_t *v) |
1221 |
{ |
1222 |
int res; |
1223 |
v->counter++; |
1224 |
res = v->counter; |
1225 |
return res == 0; |
1226 |
} |
1227 |
|
1228 |
# endif /* __NO_STRICT_ATOMIC */ |
1229 |
# endif /* m68k */ |
1230 |
# endif /* mips */ |
1231 |
# endif /* s390 */ |
1232 |
# endif /* alpha */ |
1233 |
# endif /* ia64 */ |
1234 |
# endif /* sparc */ |
1235 |
# endif /* i386 */ |
1236 |
# endif /* ppc */ |
1237 |
|
1238 |
/***********************************************************************/ |
1239 |
|
1240 |
#else /* !linux */ |
1241 |
|
1242 |
typedef unsigned long atomic_t; |
1243 |
|
1244 |
#if defined(__sgi) |
1245 |
#undef atomic_set |
1246 |
#endif |
1247 |
|
1248 |
inline |
1249 |
void |
1250 |
atomic_set (atomic_t * a, int v) { |
1251 |
#if defined(__sgi) && !defined(__GNUC__) |
1252 |
__lock_test_and_set(a, v); |
1253 |
#else |
1254 |
*a=v; |
1255 |
#endif |
1256 |
} |
1257 |
|
1258 |
inline |
1259 |
int |
1260 |
atomic_read (const atomic_t * a) { |
1261 |
return *a; |
1262 |
} |
1263 |
|
1264 |
inline |
1265 |
void |
1266 |
atomic_inc (atomic_t * a) { |
1267 |
#if defined(__sgi) && !defined(__GNUC__) |
1268 |
__add_and_fetch(a, 1); |
1269 |
#else |
1270 |
++(*a); |
1271 |
#endif |
1272 |
} |
1273 |
|
1274 |
inline |
1275 |
void |
1276 |
atomic_dec (atomic_t * a) { |
1277 |
#if defined(__sgi) && !defined(__GNUC__) |
1278 |
__sub_and_fetch(a, 1); |
1279 |
#else |
1280 |
--(*a); |
1281 |
#endif |
1282 |
} |
1283 |
|
1284 |
#endif /* linux */ |
1285 |
#endif /* __linuxsampler_atomic_h__ */ |