1 |
schoenebeck |
28 |
/* |
2 |
|
|
Copyright (C) 2001 Paul Davis and others (see below) |
3 |
|
|
Code derived from various headers from the Linux kernel. |
4 |
|
|
Copyright attributions maintained where present. |
5 |
|
|
|
6 |
|
|
This program is free software; you can redistribute it and/or modify |
7 |
|
|
it under the terms of the GNU General Public License as published by |
8 |
|
|
the Free Software Foundation; either version 2 of the License, or |
9 |
|
|
(at your option) any later version. |
10 |
|
|
|
11 |
|
|
This program is distributed in the hope that it will be useful, |
12 |
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 |
|
|
GNU General Public License for more details. |
15 |
|
|
|
16 |
|
|
You should have received a copy of the GNU General Public License |
17 |
|
|
along with this program; if not, write to the Free Software |
18 |
|
|
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 |
|
|
|
20 |
|
|
$Id: atomic.h,v 1.1 2004-01-02 00:02:56 schoenebeck Exp $ |
21 |
|
|
*/ |
22 |
|
|
|
23 |
|
|
#ifndef __linuxsampler_atomic_h__ |
24 |
|
|
#define __linuxsampler_atomic_h__ |
25 |
|
|
|
26 |
|
|
#ifdef HAVE_CONFIG_H |
27 |
|
|
#include <config.h> /* config.h c/o auto* tools, wherever it may be */ |
28 |
|
|
#endif |
29 |
|
|
|
30 |
|
|
#ifdef HAVE_SMP /* a macro we control, to manage ... */ |
31 |
|
|
#define CONFIG_SMP /* ... the macro the kernel headers use */ |
32 |
|
|
#endif |
33 |
|
|
|
34 |
|
|
#ifdef linux |
35 |
|
|
#ifdef __powerpc__ |
36 |
|
|
|
37 |
|
|
/* |
38 |
|
|
* BK Id: SCCS/s.atomic.h 1.15 10/28/01 10:37:22 trini |
39 |
|
|
*/ |
40 |
|
|
/* |
41 |
|
|
* PowerPC atomic operations |
42 |
|
|
*/ |
43 |
|
|
|
44 |
|
|
#ifndef _ASM_PPC_ATOMIC_H_ |
45 |
|
|
#define _ASM_PPC_ATOMIC_H_ |
46 |
|
|
|
47 |
|
|
typedef struct { volatile int counter; } atomic_t; |
48 |
|
|
|
49 |
|
|
|
50 |
|
|
#define ATOMIC_INIT(i) { (i) } |
51 |
|
|
|
52 |
|
|
#define atomic_read(v) ((v)->counter) |
53 |
|
|
#define atomic_set(v,i) (((v)->counter) = (i)) |
54 |
|
|
|
55 |
|
|
extern void atomic_clear_mask(unsigned long mask, unsigned long *addr); |
56 |
|
|
extern void atomic_set_mask(unsigned long mask, unsigned long *addr); |
57 |
|
|
|
58 |
|
|
#ifdef CONFIG_SMP |
59 |
|
|
#define SMP_ISYNC "\n\tisync" |
60 |
|
|
#else |
61 |
|
|
#define SMP_ISYNC |
62 |
|
|
#endif |
63 |
|
|
|
64 |
|
|
static __inline__ void atomic_add(int a, atomic_t *v) |
65 |
|
|
{ |
66 |
|
|
int t; |
67 |
|
|
|
68 |
|
|
__asm__ __volatile__( |
69 |
|
|
"1: lwarx %0,0,%3 # atomic_add\n\ |
70 |
|
|
add %0,%2,%0\n\ |
71 |
|
|
stwcx. %0,0,%3\n\ |
72 |
|
|
bne- 1b" |
73 |
|
|
: "=&r" (t), "=m" (v->counter) |
74 |
|
|
: "r" (a), "r" (&v->counter), "m" (v->counter) |
75 |
|
|
: "cc"); |
76 |
|
|
} |
77 |
|
|
|
78 |
|
|
static __inline__ int atomic_add_return(int a, atomic_t *v) |
79 |
|
|
{ |
80 |
|
|
int t; |
81 |
|
|
|
82 |
|
|
__asm__ __volatile__( |
83 |
|
|
"1: lwarx %0,0,%2 # atomic_add_return\n\ |
84 |
|
|
add %0,%1,%0\n\ |
85 |
|
|
stwcx. %0,0,%2\n\ |
86 |
|
|
bne- 1b" |
87 |
|
|
SMP_ISYNC |
88 |
|
|
: "=&r" (t) |
89 |
|
|
: "r" (a), "r" (&v->counter) |
90 |
|
|
: "cc", "memory"); |
91 |
|
|
|
92 |
|
|
return t; |
93 |
|
|
} |
94 |
|
|
|
95 |
|
|
static __inline__ void atomic_sub(int a, atomic_t *v) |
96 |
|
|
{ |
97 |
|
|
int t; |
98 |
|
|
|
99 |
|
|
__asm__ __volatile__( |
100 |
|
|
"1: lwarx %0,0,%3 # atomic_sub\n\ |
101 |
|
|
subf %0,%2,%0\n\ |
102 |
|
|
stwcx. %0,0,%3\n\ |
103 |
|
|
bne- 1b" |
104 |
|
|
: "=&r" (t), "=m" (v->counter) |
105 |
|
|
: "r" (a), "r" (&v->counter), "m" (v->counter) |
106 |
|
|
: "cc"); |
107 |
|
|
} |
108 |
|
|
|
109 |
|
|
static __inline__ int atomic_sub_return(int a, atomic_t *v) |
110 |
|
|
{ |
111 |
|
|
int t; |
112 |
|
|
|
113 |
|
|
__asm__ __volatile__( |
114 |
|
|
"1: lwarx %0,0,%2 # atomic_sub_return\n\ |
115 |
|
|
subf %0,%1,%0\n\ |
116 |
|
|
stwcx. %0,0,%2\n\ |
117 |
|
|
bne- 1b" |
118 |
|
|
SMP_ISYNC |
119 |
|
|
: "=&r" (t) |
120 |
|
|
: "r" (a), "r" (&v->counter) |
121 |
|
|
: "cc", "memory"); |
122 |
|
|
|
123 |
|
|
return t; |
124 |
|
|
} |
125 |
|
|
|
126 |
|
|
static __inline__ void atomic_inc(atomic_t *v) |
127 |
|
|
{ |
128 |
|
|
int t; |
129 |
|
|
|
130 |
|
|
__asm__ __volatile__( |
131 |
|
|
"1: lwarx %0,0,%2 # atomic_inc\n\ |
132 |
|
|
addic %0,%0,1\n\ |
133 |
|
|
stwcx. %0,0,%2\n\ |
134 |
|
|
bne- 1b" |
135 |
|
|
: "=&r" (t), "=m" (v->counter) |
136 |
|
|
: "r" (&v->counter), "m" (v->counter) |
137 |
|
|
: "cc"); |
138 |
|
|
} |
139 |
|
|
|
140 |
|
|
static __inline__ int atomic_inc_return(atomic_t *v) |
141 |
|
|
{ |
142 |
|
|
int t; |
143 |
|
|
|
144 |
|
|
__asm__ __volatile__( |
145 |
|
|
"1: lwarx %0,0,%1 # atomic_inc_return\n\ |
146 |
|
|
addic %0,%0,1\n\ |
147 |
|
|
stwcx. %0,0,%1\n\ |
148 |
|
|
bne- 1b" |
149 |
|
|
SMP_ISYNC |
150 |
|
|
: "=&r" (t) |
151 |
|
|
: "r" (&v->counter) |
152 |
|
|
: "cc", "memory"); |
153 |
|
|
|
154 |
|
|
return t; |
155 |
|
|
} |
156 |
|
|
|
157 |
|
|
static __inline__ void atomic_dec(atomic_t *v) |
158 |
|
|
{ |
159 |
|
|
int t; |
160 |
|
|
|
161 |
|
|
__asm__ __volatile__( |
162 |
|
|
"1: lwarx %0,0,%2 # atomic_dec\n\ |
163 |
|
|
addic %0,%0,-1\n\ |
164 |
|
|
stwcx. %0,0,%2\n\ |
165 |
|
|
bne- 1b" |
166 |
|
|
: "=&r" (t), "=m" (v->counter) |
167 |
|
|
: "r" (&v->counter), "m" (v->counter) |
168 |
|
|
: "cc"); |
169 |
|
|
} |
170 |
|
|
|
171 |
|
|
static __inline__ int atomic_dec_return(atomic_t *v) |
172 |
|
|
{ |
173 |
|
|
int t; |
174 |
|
|
|
175 |
|
|
__asm__ __volatile__( |
176 |
|
|
"1: lwarx %0,0,%1 # atomic_dec_return\n\ |
177 |
|
|
addic %0,%0,-1\n\ |
178 |
|
|
stwcx. %0,0,%1\n\ |
179 |
|
|
bne- 1b" |
180 |
|
|
SMP_ISYNC |
181 |
|
|
: "=&r" (t) |
182 |
|
|
: "r" (&v->counter) |
183 |
|
|
: "cc", "memory"); |
184 |
|
|
|
185 |
|
|
return t; |
186 |
|
|
} |
187 |
|
|
|
188 |
|
|
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) |
189 |
|
|
#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) |
190 |
|
|
|
191 |
|
|
/* |
192 |
|
|
* Atomically test *v and decrement if it is greater than 0. |
193 |
|
|
* The function returns the old value of *v minus 1. |
194 |
|
|
*/ |
195 |
|
|
static __inline__ int atomic_dec_if_positive(atomic_t *v) |
196 |
|
|
{ |
197 |
|
|
int t; |
198 |
|
|
|
199 |
|
|
__asm__ __volatile__( |
200 |
|
|
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ |
201 |
|
|
addic. %0,%0,-1\n\ |
202 |
|
|
blt- 2f\n\ |
203 |
|
|
stwcx. %0,0,%1\n\ |
204 |
|
|
bne- 1b" |
205 |
|
|
SMP_ISYNC |
206 |
|
|
"\n\ |
207 |
|
|
2:" : "=&r" (t) |
208 |
|
|
: "r" (&v->counter) |
209 |
|
|
: "cc", "memory"); |
210 |
|
|
|
211 |
|
|
return t; |
212 |
|
|
} |
213 |
|
|
|
214 |
|
|
#define smp_mb__before_atomic_dec() smp_mb() |
215 |
|
|
#define smp_mb__after_atomic_dec() smp_mb() |
216 |
|
|
#define smp_mb__before_atomic_inc() smp_mb() |
217 |
|
|
#define smp_mb__after_atomic_inc() smp_mb() |
218 |
|
|
|
219 |
|
|
#endif /* _ASM_PPC_ATOMIC_H_ */ |
220 |
|
|
|
221 |
|
|
/***********************************************************************/ |
222 |
|
|
|
223 |
|
|
# else /* !PPC */ |
224 |
|
|
|
225 |
|
|
#if defined(__i386__) || defined(__x86_64__) |
226 |
|
|
|
227 |
|
|
#ifndef __ARCH_I386_ATOMIC__ |
228 |
|
|
#define __ARCH_I386_ATOMIC__ |
229 |
|
|
|
230 |
|
|
/* |
231 |
|
|
* Atomic operations that C can't guarantee us. Useful for |
232 |
|
|
* resource counting etc.. |
233 |
|
|
*/ |
234 |
|
|
|
235 |
|
|
#ifdef CONFIG_SMP |
236 |
|
|
#define SMP_LOCK "lock ; " |
237 |
|
|
#else |
238 |
|
|
#define SMP_LOCK "" |
239 |
|
|
#endif |
240 |
|
|
|
241 |
|
|
/* |
242 |
|
|
* Make sure gcc doesn't try to be clever and move things around |
243 |
|
|
* on us. We need to use _exactly_ the address the user gave us, |
244 |
|
|
* not some alias that contains the same information. |
245 |
|
|
*/ |
246 |
|
|
typedef struct { volatile int counter; } atomic_t; |
247 |
|
|
|
248 |
|
|
#define ATOMIC_INIT(i) { (i) } |
249 |
|
|
|
250 |
|
|
/** |
251 |
|
|
* atomic_read - read atomic variable |
252 |
|
|
* @v: pointer of type atomic_t |
253 |
|
|
* |
254 |
|
|
* Atomically reads the value of @v. Note that the guaranteed |
255 |
|
|
* useful range of an atomic_t is only 24 bits. |
256 |
|
|
*/ |
257 |
|
|
#define atomic_read(v) ((v)->counter) |
258 |
|
|
|
259 |
|
|
/** |
260 |
|
|
* atomic_set - set atomic variable |
261 |
|
|
* @v: pointer of type atomic_t |
262 |
|
|
* @i: required value |
263 |
|
|
* |
264 |
|
|
* Atomically sets the value of @v to @i. Note that the guaranteed |
265 |
|
|
* useful range of an atomic_t is only 24 bits. |
266 |
|
|
*/ |
267 |
|
|
#define atomic_set(v,i) (((v)->counter) = (i)) |
268 |
|
|
|
269 |
|
|
/** |
270 |
|
|
* atomic_add - add integer to atomic variable |
271 |
|
|
* @i: integer value to add |
272 |
|
|
* @v: pointer of type atomic_t |
273 |
|
|
* |
274 |
|
|
* Atomically adds @i to @v. Note that the guaranteed useful range |
275 |
|
|
* of an atomic_t is only 24 bits. |
276 |
|
|
*/ |
277 |
|
|
static __inline__ void atomic_add(int i, atomic_t *v) |
278 |
|
|
{ |
279 |
|
|
__asm__ __volatile__( |
280 |
|
|
SMP_LOCK "addl %1,%0" |
281 |
|
|
:"=m" (v->counter) |
282 |
|
|
:"ir" (i), "m" (v->counter)); |
283 |
|
|
} |
284 |
|
|
|
285 |
|
|
/** |
286 |
|
|
* atomic_sub - subtract the atomic variable |
287 |
|
|
* @i: integer value to subtract |
288 |
|
|
* @v: pointer of type atomic_t |
289 |
|
|
* |
290 |
|
|
* Atomically subtracts @i from @v. Note that the guaranteed |
291 |
|
|
* useful range of an atomic_t is only 24 bits. |
292 |
|
|
*/ |
293 |
|
|
static __inline__ void atomic_sub(int i, atomic_t *v) |
294 |
|
|
{ |
295 |
|
|
__asm__ __volatile__( |
296 |
|
|
SMP_LOCK "subl %1,%0" |
297 |
|
|
:"=m" (v->counter) |
298 |
|
|
:"ir" (i), "m" (v->counter)); |
299 |
|
|
} |
300 |
|
|
|
301 |
|
|
/** |
302 |
|
|
* atomic_sub_and_test - subtract value from variable and test result |
303 |
|
|
* @i: integer value to subtract |
304 |
|
|
* @v: pointer of type atomic_t |
305 |
|
|
* |
306 |
|
|
* Atomically subtracts @i from @v and returns |
307 |
|
|
* true if the result is zero, or false for all |
308 |
|
|
* other cases. Note that the guaranteed |
309 |
|
|
* useful range of an atomic_t is only 24 bits. |
310 |
|
|
*/ |
311 |
|
|
static __inline__ int atomic_sub_and_test(int i, atomic_t *v) |
312 |
|
|
{ |
313 |
|
|
unsigned char c; |
314 |
|
|
|
315 |
|
|
__asm__ __volatile__( |
316 |
|
|
SMP_LOCK "subl %2,%0; sete %1" |
317 |
|
|
:"=m" (v->counter), "=qm" (c) |
318 |
|
|
:"ir" (i), "m" (v->counter) : "memory"); |
319 |
|
|
return c; |
320 |
|
|
} |
321 |
|
|
|
322 |
|
|
/** |
323 |
|
|
* atomic_inc - increment atomic variable |
324 |
|
|
* @v: pointer of type atomic_t |
325 |
|
|
* |
326 |
|
|
* Atomically increments @v by 1. Note that the guaranteed |
327 |
|
|
* useful range of an atomic_t is only 24 bits. |
328 |
|
|
*/ |
329 |
|
|
static __inline__ void atomic_inc(atomic_t *v) |
330 |
|
|
{ |
331 |
|
|
__asm__ __volatile__( |
332 |
|
|
SMP_LOCK "incl %0" |
333 |
|
|
:"=m" (v->counter) |
334 |
|
|
:"m" (v->counter)); |
335 |
|
|
} |
336 |
|
|
|
337 |
|
|
/** |
338 |
|
|
* atomic_dec - decrement atomic variable |
339 |
|
|
* @v: pointer of type atomic_t |
340 |
|
|
* |
341 |
|
|
* Atomically decrements @v by 1. Note that the guaranteed |
342 |
|
|
* useful range of an atomic_t is only 24 bits. |
343 |
|
|
*/ |
344 |
|
|
static __inline__ void atomic_dec(atomic_t *v) |
345 |
|
|
{ |
346 |
|
|
__asm__ __volatile__( |
347 |
|
|
SMP_LOCK "decl %0" |
348 |
|
|
:"=m" (v->counter) |
349 |
|
|
:"m" (v->counter)); |
350 |
|
|
} |
351 |
|
|
|
352 |
|
|
/** |
353 |
|
|
* atomic_dec_and_test - decrement and test |
354 |
|
|
* @v: pointer of type atomic_t |
355 |
|
|
* |
356 |
|
|
* Atomically decrements @v by 1 and |
357 |
|
|
* returns true if the result is 0, or false for all other |
358 |
|
|
* cases. Note that the guaranteed |
359 |
|
|
* useful range of an atomic_t is only 24 bits. |
360 |
|
|
*/ |
361 |
|
|
static __inline__ int atomic_dec_and_test(atomic_t *v) |
362 |
|
|
{ |
363 |
|
|
unsigned char c; |
364 |
|
|
|
365 |
|
|
__asm__ __volatile__( |
366 |
|
|
SMP_LOCK "decl %0; sete %1" |
367 |
|
|
:"=m" (v->counter), "=qm" (c) |
368 |
|
|
:"m" (v->counter) : "memory"); |
369 |
|
|
return c != 0; |
370 |
|
|
} |
371 |
|
|
|
372 |
|
|
/** |
373 |
|
|
* atomic_inc_and_test - increment and test |
374 |
|
|
* @v: pointer of type atomic_t |
375 |
|
|
* |
376 |
|
|
* Atomically increments @v by 1 |
377 |
|
|
* and returns true if the result is zero, or false for all |
378 |
|
|
* other cases. Note that the guaranteed |
379 |
|
|
* useful range of an atomic_t is only 24 bits. |
380 |
|
|
*/ |
381 |
|
|
static __inline__ int atomic_inc_and_test(atomic_t *v) |
382 |
|
|
{ |
383 |
|
|
unsigned char c; |
384 |
|
|
|
385 |
|
|
__asm__ __volatile__( |
386 |
|
|
SMP_LOCK "incl %0; sete %1" |
387 |
|
|
:"=m" (v->counter), "=qm" (c) |
388 |
|
|
:"m" (v->counter) : "memory"); |
389 |
|
|
return c != 0; |
390 |
|
|
} |
391 |
|
|
|
392 |
|
|
/** |
393 |
|
|
* atomic_add_negative - add and test if negative |
394 |
|
|
* @v: pointer of type atomic_t |
395 |
|
|
* @i: integer value to add |
396 |
|
|
* |
397 |
|
|
* Atomically adds @i to @v and returns true |
398 |
|
|
* if the result is negative, or false when |
399 |
|
|
* result is greater than or equal to zero. Note that the guaranteed |
400 |
|
|
* useful range of an atomic_t is only 24 bits. |
401 |
|
|
*/ |
402 |
|
|
static __inline__ int atomic_add_negative(int i, atomic_t *v) |
403 |
|
|
{ |
404 |
|
|
unsigned char c; |
405 |
|
|
|
406 |
|
|
__asm__ __volatile__( |
407 |
|
|
SMP_LOCK "addl %2,%0; sets %1" |
408 |
|
|
:"=m" (v->counter), "=qm" (c) |
409 |
|
|
:"ir" (i), "m" (v->counter) : "memory"); |
410 |
|
|
return c; |
411 |
|
|
} |
412 |
|
|
|
413 |
|
|
/* These are x86-specific, used by some header files */ |
414 |
|
|
#define atomic_clear_mask(mask, addr) \ |
415 |
|
|
__asm__ __volatile__(SMP_LOCK "andl %0,%1" \ |
416 |
|
|
: : "r" (~(mask)),"m" (*addr) : "memory") |
417 |
|
|
|
418 |
|
|
#define atomic_set_mask(mask, addr) \ |
419 |
|
|
__asm__ __volatile__(SMP_LOCK "orl %0,%1" \ |
420 |
|
|
: : "r" (mask),"m" (*addr) : "memory") |
421 |
|
|
|
422 |
|
|
/* Atomic operations are already serializing on x86 */ |
423 |
|
|
#define smp_mb__before_atomic_dec() barrier() |
424 |
|
|
#define smp_mb__after_atomic_dec() barrier() |
425 |
|
|
#define smp_mb__before_atomic_inc() barrier() |
426 |
|
|
#define smp_mb__after_atomic_inc() barrier() |
427 |
|
|
|
428 |
|
|
#endif /* __ARCH_I386_ATOMIC__ */ |
429 |
|
|
|
430 |
|
|
/***********************************************************************/ |
431 |
|
|
|
432 |
|
|
#else /* !PPC && !i386 */ |
433 |
|
|
|
434 |
|
|
#ifdef __sparc__ |
435 |
|
|
|
436 |
|
|
/* atomic.h: These still suck, but the I-cache hit rate is higher. |
437 |
|
|
* |
438 |
|
|
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) |
439 |
|
|
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) |
440 |
|
|
*/ |
441 |
|
|
|
442 |
|
|
#ifndef __ARCH_SPARC_ATOMIC__ |
443 |
|
|
#define __ARCH_SPARC_ATOMIC__ |
444 |
|
|
|
445 |
|
|
typedef struct { volatile int counter; } atomic_t; |
446 |
|
|
|
447 |
|
|
#ifndef CONFIG_SMP |
448 |
|
|
|
449 |
|
|
#define ATOMIC_INIT(i) { (i) } |
450 |
|
|
#define atomic_read(v) ((v)->counter) |
451 |
|
|
#define atomic_set(v, i) (((v)->counter) = i) |
452 |
|
|
|
453 |
|
|
#else |
454 |
|
|
/* We do the bulk of the actual work out of line in two common |
455 |
|
|
* routines in assembler, see arch/sparc/lib/atomic.S for the |
456 |
|
|
* "fun" details. |
457 |
|
|
* |
458 |
|
|
* For SMP the trick is you embed the spin lock byte within |
459 |
|
|
* the word, use the low byte so signedness is easily retained |
460 |
|
|
* via a quick arithmetic shift. It looks like this: |
461 |
|
|
* |
462 |
|
|
* ---------------------------------------- |
463 |
|
|
* | signed 24-bit counter value | lock | atomic_t |
464 |
|
|
* ---------------------------------------- |
465 |
|
|
* 31 8 7 0 |
466 |
|
|
*/ |
467 |
|
|
|
468 |
|
|
#define ATOMIC_INIT(i) { (i << 8) } |
469 |
|
|
|
470 |
|
|
static __inline__ int atomic_read(atomic_t *v) |
471 |
|
|
{ |
472 |
|
|
int ret = v->counter; |
473 |
|
|
|
474 |
|
|
while(ret & 0xff) |
475 |
|
|
ret = v->counter; |
476 |
|
|
|
477 |
|
|
return ret >> 8; |
478 |
|
|
} |
479 |
|
|
|
480 |
|
|
#define atomic_set(v, i) (((v)->counter) = ((i) << 8)) |
481 |
|
|
#endif |
482 |
|
|
|
483 |
|
|
static __inline__ int __atomic_add(int i, atomic_t *v) |
484 |
|
|
{ |
485 |
|
|
register volatile int *ptr asm("g1"); |
486 |
|
|
register int increment asm("g2"); |
487 |
|
|
|
488 |
|
|
ptr = &v->counter; |
489 |
|
|
increment = i; |
490 |
|
|
|
491 |
|
|
__asm__ __volatile__( |
492 |
|
|
"mov %%o7, %%g4\n\t" |
493 |
|
|
"call ___atomic_add\n\t" |
494 |
|
|
" add %%o7, 8, %%o7\n" |
495 |
|
|
: "=&r" (increment) |
496 |
|
|
: "0" (increment), "r" (ptr) |
497 |
|
|
: "g3", "g4", "g7", "memory", "cc"); |
498 |
|
|
|
499 |
|
|
return increment; |
500 |
|
|
} |
501 |
|
|
|
502 |
|
|
static __inline__ int __atomic_sub(int i, atomic_t *v) |
503 |
|
|
{ |
504 |
|
|
register volatile int *ptr asm("g1"); |
505 |
|
|
register int increment asm("g2"); |
506 |
|
|
|
507 |
|
|
ptr = &v->counter; |
508 |
|
|
increment = i; |
509 |
|
|
|
510 |
|
|
__asm__ __volatile__( |
511 |
|
|
"mov %%o7, %%g4\n\t" |
512 |
|
|
"call ___atomic_sub\n\t" |
513 |
|
|
" add %%o7, 8, %%o7\n" |
514 |
|
|
: "=&r" (increment) |
515 |
|
|
: "0" (increment), "r" (ptr) |
516 |
|
|
: "g3", "g4", "g7", "memory", "cc"); |
517 |
|
|
|
518 |
|
|
return increment; |
519 |
|
|
} |
520 |
|
|
|
521 |
|
|
#define atomic_add(i, v) ((void)__atomic_add((i), (v))) |
522 |
|
|
#define atomic_sub(i, v) ((void)__atomic_sub((i), (v))) |
523 |
|
|
|
524 |
|
|
#define atomic_dec_return(v) __atomic_sub(1, (v)) |
525 |
|
|
#define atomic_inc_return(v) __atomic_add(1, (v)) |
526 |
|
|
|
527 |
|
|
#define atomic_sub_and_test(i, v) (__atomic_sub((i), (v)) == 0) |
528 |
|
|
#define atomic_dec_and_test(v) (__atomic_sub(1, (v)) == 0) |
529 |
|
|
|
530 |
|
|
#define atomic_inc(v) ((void)__atomic_add(1, (v))) |
531 |
|
|
#define atomic_dec(v) ((void)__atomic_sub(1, (v))) |
532 |
|
|
|
533 |
|
|
#define atomic_add_negative(i, v) (__atomic_add((i), (v)) < 0) |
534 |
|
|
|
535 |
|
|
/* Atomic operations are already serializing */ |
536 |
|
|
#define smp_mb__before_atomic_dec() barrier() |
537 |
|
|
#define smp_mb__after_atomic_dec() barrier() |
538 |
|
|
#define smp_mb__before_atomic_inc() barrier() |
539 |
|
|
#define smp_mb__after_atomic_inc() barrier() |
540 |
|
|
|
541 |
|
|
|
542 |
|
|
#endif /* !(__ARCH_SPARC_ATOMIC__) */ |
543 |
|
|
|
544 |
|
|
/***********************************************************************/ |
545 |
|
|
|
546 |
|
|
#else |
547 |
|
|
|
548 |
|
|
#ifdef __ia64__ |
549 |
|
|
|
550 |
|
|
#ifndef __ARCH_IA64_ATOMIC__ |
551 |
|
|
#define __ARCH_IA64_ATOMIC__ |
552 |
|
|
|
553 |
|
|
typedef volatile int atomic_t; |
554 |
|
|
|
555 |
|
|
inline |
556 |
|
|
int |
557 |
|
|
atomic_read (const atomic_t * a) |
558 |
|
|
{ |
559 |
|
|
return *a; |
560 |
|
|
} |
561 |
|
|
|
562 |
|
|
inline |
563 |
|
|
void |
564 |
|
|
atomic_set(atomic_t *a, int v) |
565 |
|
|
{ |
566 |
|
|
*a = v; |
567 |
|
|
} |
568 |
|
|
|
569 |
|
|
inline |
570 |
|
|
void |
571 |
|
|
atomic_inc (atomic_t *v) |
572 |
|
|
{ |
573 |
|
|
int old, r; |
574 |
|
|
|
575 |
|
|
do { |
576 |
|
|
old = atomic_read(v); |
577 |
|
|
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old)); |
578 |
|
|
__asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" |
579 |
|
|
: "=r"(r) : "r"(v), "r"(old + 1) |
580 |
|
|
: "memory"); |
581 |
|
|
} while (r != old); |
582 |
|
|
} |
583 |
|
|
|
584 |
|
|
inline |
585 |
|
|
void |
586 |
|
|
atomic_dec (atomic_t *v) |
587 |
|
|
{ |
588 |
|
|
int old, r; |
589 |
|
|
|
590 |
|
|
do { |
591 |
|
|
old = atomic_read(v); |
592 |
|
|
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old)); |
593 |
|
|
__asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" |
594 |
|
|
: "=r"(r) : "r"(v), "r"(old - 1) |
595 |
|
|
: "memory"); |
596 |
|
|
} while (r != old); |
597 |
|
|
} |
598 |
|
|
|
599 |
|
|
inline |
600 |
|
|
int |
601 |
|
|
atomic_dec_and_test (atomic_t *v) |
602 |
|
|
{ |
603 |
|
|
int old, r; |
604 |
|
|
|
605 |
|
|
do { |
606 |
|
|
old = atomic_read(v); |
607 |
|
|
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO" (old)); |
608 |
|
|
__asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" |
609 |
|
|
: "=r"(r) : "r"(v), "r"(old - 1) |
610 |
|
|
: "memory"); |
611 |
|
|
} while (r != old); |
612 |
|
|
return old != 1; |
613 |
|
|
} |
614 |
|
|
|
615 |
|
|
#endif /* !(__ARCH_IA64_ATOMIC__) */ |
616 |
|
|
|
617 |
|
|
#else |
618 |
|
|
|
619 |
|
|
#ifdef __alpha__ |
620 |
|
|
|
621 |
|
|
#ifndef _ALPHA_ATOMIC_H |
622 |
|
|
#define _ALPHA_ATOMIC_H |
623 |
|
|
|
624 |
|
|
/* |
625 |
|
|
* Atomic operations that C can't guarantee us. Useful for |
626 |
|
|
* resource counting etc... |
627 |
|
|
* |
628 |
|
|
* But use these as seldom as possible since they are much slower |
629 |
|
|
* than regular operations. |
630 |
|
|
*/ |
631 |
|
|
|
632 |
|
|
|
633 |
|
|
/* |
634 |
|
|
* Counter is volatile to make sure gcc doesn't try to be clever |
635 |
|
|
* and move things around on us. We need to use _exactly_ the address |
636 |
|
|
* the user gave us, not some alias that contains the same information. |
637 |
|
|
*/ |
638 |
|
|
typedef struct { volatile int counter; } atomic_t; |
639 |
|
|
|
640 |
|
|
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
641 |
|
|
|
642 |
|
|
#define atomic_read(v) ((v)->counter) |
643 |
|
|
#define atomic_set(v,i) ((v)->counter = (i)) |
644 |
|
|
|
645 |
|
|
/* |
646 |
|
|
* To get proper branch prediction for the main line, we must branch |
647 |
|
|
* forward to code at the end of this object's .text section, then |
648 |
|
|
* branch back to restart the operation. |
649 |
|
|
*/ |
650 |
|
|
|
651 |
|
|
static __inline__ void atomic_add(int i, atomic_t * v) |
652 |
|
|
{ |
653 |
|
|
unsigned long temp; |
654 |
|
|
__asm__ __volatile__( |
655 |
|
|
"1: ldl_l %0,%1\n" |
656 |
|
|
" addl %0,%2,%0\n" |
657 |
|
|
" stl_c %0,%1\n" |
658 |
|
|
" beq %0,2f\n" |
659 |
|
|
".subsection 2\n" |
660 |
|
|
"2: br 1b\n" |
661 |
|
|
".previous" |
662 |
|
|
:"=&r" (temp), "=m" (v->counter) |
663 |
|
|
:"Ir" (i), "m" (v->counter)); |
664 |
|
|
} |
665 |
|
|
|
666 |
|
|
static __inline__ void atomic_sub(int i, atomic_t * v) |
667 |
|
|
{ |
668 |
|
|
unsigned long temp; |
669 |
|
|
__asm__ __volatile__( |
670 |
|
|
"1: ldl_l %0,%1\n" |
671 |
|
|
" subl %0,%2,%0\n" |
672 |
|
|
" stl_c %0,%1\n" |
673 |
|
|
" beq %0,2f\n" |
674 |
|
|
".subsection 2\n" |
675 |
|
|
"2: br 1b\n" |
676 |
|
|
".previous" |
677 |
|
|
:"=&r" (temp), "=m" (v->counter) |
678 |
|
|
:"Ir" (i), "m" (v->counter)); |
679 |
|
|
} |
680 |
|
|
|
681 |
|
|
/* |
682 |
|
|
* Same as above, but return the result value |
683 |
|
|
*/ |
684 |
|
|
static __inline__ long atomic_add_return(int i, atomic_t * v) |
685 |
|
|
{ |
686 |
|
|
long temp, result; |
687 |
|
|
__asm__ __volatile__( |
688 |
|
|
"1: ldl_l %0,%1\n" |
689 |
|
|
" addl %0,%3,%2\n" |
690 |
|
|
" addl %0,%3,%0\n" |
691 |
|
|
" stl_c %0,%1\n" |
692 |
|
|
" beq %0,2f\n" |
693 |
|
|
" mb\n" |
694 |
|
|
".subsection 2\n" |
695 |
|
|
"2: br 1b\n" |
696 |
|
|
".previous" |
697 |
|
|
:"=&r" (temp), "=m" (v->counter), "=&r" (result) |
698 |
|
|
:"Ir" (i), "m" (v->counter) : "memory"); |
699 |
|
|
return result; |
700 |
|
|
} |
701 |
|
|
|
702 |
|
|
static __inline__ long atomic_sub_return(int i, atomic_t * v) |
703 |
|
|
{ |
704 |
|
|
long temp, result; |
705 |
|
|
__asm__ __volatile__( |
706 |
|
|
"1: ldl_l %0,%1\n" |
707 |
|
|
" subl %0,%3,%2\n" |
708 |
|
|
" subl %0,%3,%0\n" |
709 |
|
|
" stl_c %0,%1\n" |
710 |
|
|
" beq %0,2f\n" |
711 |
|
|
" mb\n" |
712 |
|
|
".subsection 2\n" |
713 |
|
|
"2: br 1b\n" |
714 |
|
|
".previous" |
715 |
|
|
:"=&r" (temp), "=m" (v->counter), "=&r" (result) |
716 |
|
|
:"Ir" (i), "m" (v->counter) : "memory"); |
717 |
|
|
return result; |
718 |
|
|
} |
719 |
|
|
|
720 |
|
|
#define atomic_dec_return(v) atomic_sub_return(1,(v)) |
721 |
|
|
#define atomic_inc_return(v) atomic_add_return(1,(v)) |
722 |
|
|
|
723 |
|
|
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) |
724 |
|
|
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
725 |
|
|
|
726 |
|
|
#define atomic_inc(v) atomic_add(1,(v)) |
727 |
|
|
#define atomic_dec(v) atomic_sub(1,(v)) |
728 |
|
|
|
729 |
|
|
#define smp_mb__before_atomic_dec() smp_mb() |
730 |
|
|
#define smp_mb__after_atomic_dec() smp_mb() |
731 |
|
|
#define smp_mb__before_atomic_inc() smp_mb() |
732 |
|
|
#define smp_mb__after_atomic_inc() smp_mb() |
733 |
|
|
|
734 |
|
|
#endif /* _ALPHA_ATOMIC_H */ |
735 |
|
|
|
736 |
|
|
#else |
737 |
|
|
|
738 |
|
|
#ifdef __s390__ |
739 |
|
|
|
740 |
|
|
#ifndef __ARCH_S390_ATOMIC__ |
741 |
|
|
#define __ARCH_S390_ATOMIC__ |
742 |
|
|
|
743 |
|
|
/* |
744 |
|
|
* include/asm-s390/atomic.h |
745 |
|
|
* |
746 |
|
|
* S390 version |
747 |
|
|
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation |
748 |
|
|
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
749 |
|
|
* Denis Joseph Barrow |
750 |
|
|
* |
751 |
|
|
* Derived from "include/asm-i386/bitops.h" |
752 |
|
|
* Copyright (C) 1992, Linus Torvalds |
753 |
|
|
* |
754 |
|
|
*/ |
755 |
|
|
|
756 |
|
|
/* |
757 |
|
|
* Atomic operations that C can't guarantee us. Useful for |
758 |
|
|
* resource counting etc.. |
759 |
|
|
* S390 uses 'Compare And Swap' for atomicity in SMP enviroment |
760 |
|
|
*/ |
761 |
|
|
|
762 |
|
|
typedef struct { volatile int counter; } __attribute__ ((aligned (4))) atomic_t; |
763 |
|
|
#define ATOMIC_INIT(i) { (i) } |
764 |
|
|
|
765 |
|
|
#define atomic_eieio() __asm__ __volatile__ ("BCR 15,0") |
766 |
|
|
|
767 |
|
|
#define __CS_LOOP(old_val, new_val, ptr, op_val, op_string) \ |
768 |
|
|
__asm__ __volatile__(" l %0,0(%2)\n" \ |
769 |
|
|
"0: lr %1,%0\n" \ |
770 |
|
|
op_string " %1,%3\n" \ |
771 |
|
|
" cs %0,%1,0(%2)\n" \ |
772 |
|
|
" jl 0b" \ |
773 |
|
|
: "=&d" (old_val), "=&d" (new_val) \ |
774 |
|
|
: "a" (ptr), "d" (op_val) : "cc" ); |
775 |
|
|
|
776 |
|
|
#define atomic_read(v) ((v)->counter) |
777 |
|
|
#define atomic_set(v,i) (((v)->counter) = (i)) |
778 |
|
|
|
779 |
|
|
static __inline__ void atomic_add(int i, atomic_t *v) |
780 |
|
|
{ |
781 |
|
|
int old_val, new_val; |
782 |
|
|
__CS_LOOP(old_val, new_val, v, i, "ar"); |
783 |
|
|
} |
784 |
|
|
|
785 |
|
|
static __inline__ int atomic_add_return (int i, atomic_t *v) |
786 |
|
|
{ |
787 |
|
|
int old_val, new_val; |
788 |
|
|
__CS_LOOP(old_val, new_val, v, i, "ar"); |
789 |
|
|
return new_val; |
790 |
|
|
} |
791 |
|
|
|
792 |
|
|
static __inline__ int atomic_add_negative(int i, atomic_t *v) |
793 |
|
|
{ |
794 |
|
|
int old_val, new_val; |
795 |
|
|
__CS_LOOP(old_val, new_val, v, i, "ar"); |
796 |
|
|
return new_val < 0; |
797 |
|
|
} |
798 |
|
|
|
799 |
|
|
static __inline__ void atomic_sub(int i, atomic_t *v) |
800 |
|
|
{ |
801 |
|
|
int old_val, new_val; |
802 |
|
|
__CS_LOOP(old_val, new_val, v, i, "sr"); |
803 |
|
|
} |
804 |
|
|
|
805 |
|
|
static __inline__ void atomic_inc(volatile atomic_t *v) |
806 |
|
|
{ |
807 |
|
|
int old_val, new_val; |
808 |
|
|
__CS_LOOP(old_val, new_val, v, 1, "ar"); |
809 |
|
|
} |
810 |
|
|
|
811 |
|
|
static __inline__ int atomic_inc_return(volatile atomic_t *v) |
812 |
|
|
{ |
813 |
|
|
int old_val, new_val; |
814 |
|
|
__CS_LOOP(old_val, new_val, v, 1, "ar"); |
815 |
|
|
return new_val; |
816 |
|
|
} |
817 |
|
|
|
818 |
|
|
static __inline__ int atomic_inc_and_test(volatile atomic_t *v) |
819 |
|
|
{ |
820 |
|
|
int old_val, new_val; |
821 |
|
|
__CS_LOOP(old_val, new_val, v, 1, "ar"); |
822 |
|
|
return new_val != 0; |
823 |
|
|
} |
824 |
|
|
|
825 |
|
|
static __inline__ void atomic_dec(volatile atomic_t *v) |
826 |
|
|
{ |
827 |
|
|
int old_val, new_val; |
828 |
|
|
__CS_LOOP(old_val, new_val, v, 1, "sr"); |
829 |
|
|
} |
830 |
|
|
|
831 |
|
|
static __inline__ int atomic_dec_return(volatile atomic_t *v) |
832 |
|
|
{ |
833 |
|
|
int old_val, new_val; |
834 |
|
|
__CS_LOOP(old_val, new_val, v, 1, "sr"); |
835 |
|
|
return new_val; |
836 |
|
|
} |
837 |
|
|
|
838 |
|
|
static __inline__ int atomic_dec_and_test(volatile atomic_t *v) |
839 |
|
|
{ |
840 |
|
|
int old_val, new_val; |
841 |
|
|
__CS_LOOP(old_val, new_val, v, 1, "sr"); |
842 |
|
|
return new_val == 0; |
843 |
|
|
} |
844 |
|
|
|
845 |
|
|
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *v) |
846 |
|
|
{ |
847 |
|
|
int old_val, new_val; |
848 |
|
|
__CS_LOOP(old_val, new_val, v, ~mask, "nr"); |
849 |
|
|
} |
850 |
|
|
|
851 |
|
|
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *v) |
852 |
|
|
{ |
853 |
|
|
int old_val, new_val; |
854 |
|
|
__CS_LOOP(old_val, new_val, v, mask, "or"); |
855 |
|
|
} |
856 |
|
|
|
857 |
|
|
/* |
858 |
|
|
returns 0 if expected_oldval==value in *v ( swap was successful ) |
859 |
|
|
returns 1 if unsuccessful. |
860 |
|
|
*/ |
861 |
|
|
static __inline__ int |
862 |
|
|
atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) |
863 |
|
|
{ |
864 |
|
|
int retval; |
865 |
|
|
|
866 |
|
|
__asm__ __volatile__( |
867 |
|
|
" lr 0,%2\n" |
868 |
|
|
" cs 0,%3,0(%1)\n" |
869 |
|
|
" ipm %0\n" |
870 |
|
|
" srl %0,28\n" |
871 |
|
|
"0:" |
872 |
|
|
: "=&d" (retval) |
873 |
|
|
: "a" (v), "d" (expected_oldval) , "d" (new_val) |
874 |
|
|
: "0", "cc"); |
875 |
|
|
return retval; |
876 |
|
|
} |
877 |
|
|
|
878 |
|
|
/* |
879 |
|
|
Spin till *v = expected_oldval then swap with newval. |
880 |
|
|
*/ |
881 |
|
|
static __inline__ void |
882 |
|
|
atomic_compare_and_swap_spin(int expected_oldval,int new_val,atomic_t *v) |
883 |
|
|
{ |
884 |
|
|
__asm__ __volatile__( |
885 |
|
|
"0: lr 0,%1\n" |
886 |
|
|
" cs 0,%2,0(%0)\n" |
887 |
|
|
" jl 0b\n" |
888 |
|
|
: : "a" (v), "d" (expected_oldval) , "d" (new_val) |
889 |
|
|
: "cc", "0" ); |
890 |
|
|
} |
891 |
|
|
|
892 |
|
|
#define smp_mb__before_atomic_dec() smp_mb() |
893 |
|
|
#define smp_mb__after_atomic_dec() smp_mb() |
894 |
|
|
#define smp_mb__before_atomic_inc() smp_mb() |
895 |
|
|
#define smp_mb__after_atomic_inc() smp_mb() |
896 |
|
|
|
897 |
|
|
#endif /* __ARCH_S390_ATOMIC __ */ |
898 |
|
|
|
899 |
|
|
#else |
900 |
|
|
|
901 |
|
|
#ifdef __mips__ |
902 |
|
|
|
903 |
|
|
/* |
904 |
|
|
* Atomic operations that C can't guarantee us. Useful for |
905 |
|
|
* resource counting etc.. |
906 |
|
|
* |
907 |
|
|
* But use these as seldom as possible since they are much more slower |
908 |
|
|
* than regular operations. |
909 |
|
|
* |
910 |
|
|
* This file is subject to the terms and conditions of the GNU General Public |
911 |
|
|
* License. See the file "COPYING" in the main directory of this archive |
912 |
|
|
* for more details. |
913 |
|
|
* |
914 |
|
|
* Copyright (C) 1996, 1997, 2000 by Ralf Baechle |
915 |
|
|
*/ |
916 |
|
|
#ifndef __ASM_ATOMIC_H |
917 |
|
|
#define __ASM_ATOMIC_H |
918 |
|
|
|
919 |
|
|
typedef struct { volatile int counter; } atomic_t; |
920 |
|
|
|
921 |
|
|
#define ATOMIC_INIT(i) { (i) } |
922 |
|
|
|
923 |
|
|
/* |
924 |
|
|
* atomic_read - read atomic variable |
925 |
|
|
* @v: pointer of type atomic_t |
926 |
|
|
* |
927 |
|
|
* Atomically reads the value of @v. Note that the guaranteed |
928 |
|
|
* useful range of an atomic_t is only 24 bits. |
929 |
|
|
*/ |
930 |
|
|
#define atomic_read(v) ((v)->counter) |
931 |
|
|
|
932 |
|
|
/* |
933 |
|
|
* atomic_set - set atomic variable |
934 |
|
|
* @v: pointer of type atomic_t |
935 |
|
|
* @i: required value |
936 |
|
|
* |
937 |
|
|
* Atomically sets the value of @v to @i. Note that the guaranteed |
938 |
|
|
* useful range of an atomic_t is only 24 bits. |
939 |
|
|
*/ |
940 |
|
|
#define atomic_set(v,i) ((v)->counter = (i)) |
941 |
|
|
|
942 |
|
|
/* |
943 |
|
|
* ... while for MIPS II and better we can use ll/sc instruction. This |
944 |
|
|
* implementation is SMP safe ... |
945 |
|
|
*/ |
946 |
|
|
|
947 |
|
|
/* |
948 |
|
|
* atomic_add - add integer to atomic variable |
949 |
|
|
* @i: integer value to add |
950 |
|
|
* @v: pointer of type atomic_t |
951 |
|
|
* |
952 |
|
|
* Atomically adds @i to @v. Note that the guaranteed useful range |
953 |
|
|
* of an atomic_t is only 24 bits. |
954 |
|
|
*/ |
955 |
|
|
extern __inline__ void atomic_add(int i, atomic_t * v) |
956 |
|
|
{ |
957 |
|
|
unsigned long temp; |
958 |
|
|
|
959 |
|
|
__asm__ __volatile__( |
960 |
|
|
".set push # atomic_add\n" |
961 |
|
|
".set mips2 \n" |
962 |
|
|
"1: ll %0, %1 \n" |
963 |
|
|
" addu %0, %2 \n" |
964 |
|
|
" sc %0, %1 \n" |
965 |
|
|
" beqz %0, 1b \n" |
966 |
|
|
".set pop \n" |
967 |
|
|
: "=&r" (temp), "=m" (v->counter) |
968 |
|
|
: "Ir" (i), "m" (v->counter)); |
969 |
|
|
} |
970 |
|
|
|
971 |
|
|
/* |
972 |
|
|
* atomic_sub - subtract the atomic variable |
973 |
|
|
* @i: integer value to subtract |
974 |
|
|
* @v: pointer of type atomic_t |
975 |
|
|
* |
976 |
|
|
* Atomically subtracts @i from @v. Note that the guaranteed |
977 |
|
|
* useful range of an atomic_t is only 24 bits. |
978 |
|
|
*/ |
979 |
|
|
extern __inline__ void atomic_sub(int i, atomic_t * v) |
980 |
|
|
{ |
981 |
|
|
unsigned long temp; |
982 |
|
|
|
983 |
|
|
__asm__ __volatile__( |
984 |
|
|
".set push # atomic_sub\n" |
985 |
|
|
".set mips2 \n" |
986 |
|
|
"1: ll %0, %1 \n" |
987 |
|
|
" subu %0, %2 \n" |
988 |
|
|
" sc %0, %1 \n" |
989 |
|
|
" beqz %0, 1b \n" |
990 |
|
|
".set pop \n" |
991 |
|
|
: "=&r" (temp), "=m" (v->counter) |
992 |
|
|
: "Ir" (i), "m" (v->counter)); |
993 |
|
|
} |
994 |
|
|
|
995 |
|
|
/* |
996 |
|
|
* Same as above, but return the result value |
997 |
|
|
*/ |
998 |
|
|
extern __inline__ int atomic_add_return(int i, atomic_t * v) |
999 |
|
|
{ |
1000 |
|
|
unsigned long temp, result; |
1001 |
|
|
|
1002 |
|
|
__asm__ __volatile__( |
1003 |
|
|
".set push # atomic_add_return\n" |
1004 |
|
|
".set mips2 \n" |
1005 |
|
|
".set noreorder \n" |
1006 |
|
|
"1: ll %1, %2 \n" |
1007 |
|
|
" addu %0, %1, %3 \n" |
1008 |
|
|
" sc %0, %2 \n" |
1009 |
|
|
" beqz %0, 1b \n" |
1010 |
|
|
" addu %0, %1, %3 \n" |
1011 |
|
|
" sync \n" |
1012 |
|
|
".set pop \n" |
1013 |
|
|
: "=&r" (result), "=&r" (temp), "=m" (v->counter) |
1014 |
|
|
: "Ir" (i), "m" (v->counter) |
1015 |
|
|
: "memory"); |
1016 |
|
|
|
1017 |
|
|
return result; |
1018 |
|
|
} |
1019 |
|
|
|
1020 |
|
|
extern __inline__ int atomic_sub_return(int i, atomic_t * v) |
1021 |
|
|
{ |
1022 |
|
|
unsigned long temp, result; |
1023 |
|
|
|
1024 |
|
|
__asm__ __volatile__( |
1025 |
|
|
".set push # atomic_sub_return\n" |
1026 |
|
|
".set mips2 \n" |
1027 |
|
|
".set noreorder \n" |
1028 |
|
|
"1: ll %1, %2 \n" |
1029 |
|
|
" subu %0, %1, %3 \n" |
1030 |
|
|
" sc %0, %2 \n" |
1031 |
|
|
" beqz %0, 1b \n" |
1032 |
|
|
" subu %0, %1, %3 \n" |
1033 |
|
|
" sync \n" |
1034 |
|
|
".set pop \n" |
1035 |
|
|
: "=&r" (result), "=&r" (temp), "=m" (v->counter) |
1036 |
|
|
: "Ir" (i), "m" (v->counter) |
1037 |
|
|
: "memory"); |
1038 |
|
|
|
1039 |
|
|
return result; |
1040 |
|
|
} |
1041 |
|
|
|
1042 |
|
|
#define atomic_dec_return(v) atomic_sub_return(1,(v)) |
1043 |
|
|
#define atomic_inc_return(v) atomic_add_return(1,(v)) |
1044 |
|
|
|
1045 |
|
|
/* |
1046 |
|
|
* atomic_sub_and_test - subtract value from variable and test result |
1047 |
|
|
* @i: integer value to subtract |
1048 |
|
|
* @v: pointer of type atomic_t |
1049 |
|
|
* |
1050 |
|
|
* Atomically subtracts @i from @v and returns |
1051 |
|
|
* true if the result is zero, or false for all |
1052 |
|
|
* other cases. Note that the guaranteed |
1053 |
|
|
* useful range of an atomic_t is only 24 bits. |
1054 |
|
|
*/ |
1055 |
|
|
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) |
1056 |
|
|
|
1057 |
|
|
/* |
1058 |
|
|
* atomic_inc_and_test - increment and test |
1059 |
|
|
* @v: pointer of type atomic_t |
1060 |
|
|
* |
1061 |
|
|
* Atomically increments @v by 1 |
1062 |
|
|
* and returns true if the result is zero, or false for all |
1063 |
|
|
* other cases. Note that the guaranteed |
1064 |
|
|
* useful range of an atomic_t is only 24 bits. |
1065 |
|
|
*/ |
1066 |
|
|
#define atomic_inc_and_test(v) (atomic_inc_return(1, (v)) == 0) |
1067 |
|
|
|
1068 |
|
|
/* |
1069 |
|
|
* atomic_dec_and_test - decrement by 1 and test |
1070 |
|
|
* @v: pointer of type atomic_t |
1071 |
|
|
* |
1072 |
|
|
* Atomically decrements @v by 1 and |
1073 |
|
|
* returns true if the result is 0, or false for all other |
1074 |
|
|
* cases. Note that the guaranteed |
1075 |
|
|
* useful range of an atomic_t is only 24 bits. |
1076 |
|
|
*/ |
1077 |
|
|
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
1078 |
|
|
|
1079 |
|
|
/* |
1080 |
|
|
* atomic_inc - increment atomic variable |
1081 |
|
|
* @v: pointer of type atomic_t |
1082 |
|
|
* |
1083 |
|
|
* Atomically increments @v by 1. Note that the guaranteed |
1084 |
|
|
* useful range of an atomic_t is only 24 bits. |
1085 |
|
|
*/ |
1086 |
|
|
#define atomic_inc(v) atomic_add(1,(v)) |
1087 |
|
|
|
1088 |
|
|
/* |
1089 |
|
|
* atomic_dec - decrement and test |
1090 |
|
|
* @v: pointer of type atomic_t |
1091 |
|
|
* |
1092 |
|
|
* Atomically decrements @v by 1. Note that the guaranteed |
1093 |
|
|
* useful range of an atomic_t is only 24 bits. |
1094 |
|
|
*/ |
1095 |
|
|
#define atomic_dec(v) atomic_sub(1,(v)) |
1096 |
|
|
|
1097 |
|
|
/* |
1098 |
|
|
* atomic_add_negative - add and test if negative |
1099 |
|
|
* @v: pointer of type atomic_t |
1100 |
|
|
* @i: integer value to add |
1101 |
|
|
* |
1102 |
|
|
* Atomically adds @i to @v and returns true |
1103 |
|
|
* if the result is negative, or false when |
1104 |
|
|
* result is greater than or equal to zero. Note that the guaranteed |
1105 |
|
|
* useful range of an atomic_t is only 24 bits. |
1106 |
|
|
* |
1107 |
|
|
* Currently not implemented for MIPS. |
1108 |
|
|
*/ |
1109 |
|
|
|
1110 |
|
|
/* Atomic operations are already serializing */ |
1111 |
|
|
#define smp_mb__before_atomic_dec() smp_mb() |
1112 |
|
|
#define smp_mb__after_atomic_dec() smp_mb() |
1113 |
|
|
#define smp_mb__before_atomic_inc() smp_mb() |
1114 |
|
|
#define smp_mb__after_atomic_inc() smp_mb() |
1115 |
|
|
|
1116 |
|
|
#endif /* __ASM_ATOMIC_H */ |
1117 |
|
|
|
1118 |
|
|
#else |
1119 |
|
|
|
1120 |
|
|
#if defined(__m68k__) |
1121 |
|
|
|
1122 |
|
|
#ifndef __ARCH_M68K_ATOMIC__ |
1123 |
|
|
#define __ARCH_M68K_ATOMIC__ |
1124 |
|
|
|
1125 |
|
|
/* |
1126 |
|
|
* Atomic operations that C can't guarantee us. Useful for |
1127 |
|
|
* resource counting etc.. |
1128 |
|
|
*/ |
1129 |
|
|
|
1130 |
|
|
/* |
1131 |
|
|
* We do not have SMP m68k systems, so we don't have to deal with that. |
1132 |
|
|
*/ |
1133 |
|
|
|
1134 |
|
|
typedef struct { int counter; } atomic_t; |
1135 |
|
|
#define ATOMIC_INIT(i) { (i) } |
1136 |
|
|
|
1137 |
|
|
#define atomic_read(v) ((v)->counter) |
1138 |
|
|
#define atomic_set(v, i) (((v)->counter) = i) |
1139 |
|
|
|
1140 |
|
|
static __inline__ void atomic_add(int i, atomic_t *v) |
1141 |
|
|
{ |
1142 |
|
|
__asm__ __volatile__("addl %1,%0" : "=m" (*v) : "id" (i), "0" (*v)); |
1143 |
|
|
} |
1144 |
|
|
|
1145 |
|
|
static __inline__ void atomic_sub(int i, atomic_t *v) |
1146 |
|
|
{ |
1147 |
|
|
__asm__ __volatile__("subl %1,%0" : "=m" (*v) : "id" (i), "0" (*v)); |
1148 |
|
|
} |
1149 |
|
|
|
1150 |
|
|
static __inline__ void atomic_inc(volatile atomic_t *v) |
1151 |
|
|
{ |
1152 |
|
|
__asm__ __volatile__("addql #1,%0" : "=m" (*v): "0" (*v)); |
1153 |
|
|
} |
1154 |
|
|
|
1155 |
|
|
static __inline__ void atomic_dec(volatile atomic_t *v) |
1156 |
|
|
{ |
1157 |
|
|
__asm__ __volatile__("subql #1,%0" : "=m" (*v): "0" (*v)); |
1158 |
|
|
} |
1159 |
|
|
|
1160 |
|
|
static __inline__ int atomic_dec_and_test(volatile atomic_t *v) |
1161 |
|
|
{ |
1162 |
|
|
char c; |
1163 |
|
|
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "=m" (*v): "1" (*v)); |
1164 |
|
|
return c != 0; |
1165 |
|
|
} |
1166 |
|
|
|
1167 |
|
|
#define atomic_clear_mask(mask, v) \ |
1168 |
|
|
__asm__ __volatile__("andl %1,%0" : "=m" (*v) : "id" (~(mask)),"0"(*v)) |
1169 |
|
|
|
1170 |
|
|
#define atomic_set_mask(mask, v) \ |
1171 |
|
|
__asm__ __volatile__("orl %1,%0" : "=m" (*v) : "id" (mask),"0"(*v)) |
1172 |
|
|
|
1173 |
|
|
/* Atomic operations are already serializing */ |
1174 |
|
|
#define smp_mb__before_atomic_dec() barrier() |
1175 |
|
|
#define smp_mb__after_atomic_dec() barrier() |
1176 |
|
|
#define smp_mb__before_atomic_inc() barrier() |
1177 |
|
|
#define smp_mb__after_atomic_inc() barrier() |
1178 |
|
|
|
1179 |
|
|
#endif /* __ARCH_M68K_ATOMIC __ */ |
1180 |
|
|
|
1181 |
|
|
#else |
1182 |
|
|
|
1183 |
|
|
#warning libs/pbd has no implementation of strictly atomic operations for your hardware. |
1184 |
|
|
|
1185 |
|
|
#define __NO_STRICT_ATOMIC |
1186 |
|
|
#ifdef __NO_STRICT_ATOMIC |
1187 |
|
|
|
1188 |
|
|
/* |
1189 |
|
|
* Because the implementations from the kernel (where all these come |
1190 |
|
|
* from) use cli and spinlocks for hppa and arm... |
1191 |
|
|
*/ |
1192 |
|
|
|
1193 |
|
|
typedef struct { volatile int counter; } atomic_t; |
1194 |
|
|
|
1195 |
|
|
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
1196 |
|
|
|
1197 |
|
|
#define atomic_read(v) ((v)->counter) |
1198 |
|
|
#define atomic_set(v,i) ((v)->counter = (i)) |
1199 |
|
|
|
1200 |
|
|
static __inline__ void atomic_inc(atomic_t *v) |
1201 |
|
|
{ |
1202 |
|
|
v->counter++; |
1203 |
|
|
} |
1204 |
|
|
|
1205 |
|
|
static __inline__ void atomic_dec(atomic_t *v) |
1206 |
|
|
{ |
1207 |
|
|
v->counter--; |
1208 |
|
|
} |
1209 |
|
|
|
1210 |
|
|
static __inline__ int atomic_dec_and_test(atomic_t *v) |
1211 |
|
|
{ |
1212 |
|
|
int res; |
1213 |
|
|
v->counter--; |
1214 |
|
|
res = v->counter; |
1215 |
|
|
return res == 0; |
1216 |
|
|
} |
1217 |
|
|
|
1218 |
|
|
static __inline__ int atomic_inc_and_test(atomic_t *v) |
1219 |
|
|
{ |
1220 |
|
|
int res; |
1221 |
|
|
v->counter++; |
1222 |
|
|
res = v->counter; |
1223 |
|
|
return res == 0; |
1224 |
|
|
} |
1225 |
|
|
|
1226 |
|
|
# endif /* __NO_STRICT_ATOMIC */ |
1227 |
|
|
# endif /* m68k */ |
1228 |
|
|
# endif /* mips */ |
1229 |
|
|
# endif /* s390 */ |
1230 |
|
|
# endif /* alpha */ |
1231 |
|
|
# endif /* ia64 */ |
1232 |
|
|
# endif /* sparc */ |
1233 |
|
|
# endif /* i386 */ |
1234 |
|
|
# endif /* ppc */ |
1235 |
|
|
|
1236 |
|
|
/***********************************************************************/ |
1237 |
|
|
|
1238 |
|
|
#else /* !linux */ |
1239 |
|
|
|
1240 |
|
|
typedef unsigned long atomic_t; |
1241 |
|
|
|
1242 |
|
|
#if defined(__sgi) |
1243 |
|
|
#undef atomic_set |
1244 |
|
|
#endif |
1245 |
|
|
|
1246 |
|
|
inline |
1247 |
|
|
void |
1248 |
|
|
atomic_set (atomic_t * a, int v) { |
1249 |
|
|
#if defined(__sgi) && !defined(__GNUC__) |
1250 |
|
|
__lock_test_and_set(a, v); |
1251 |
|
|
#else |
1252 |
|
|
*a=v; |
1253 |
|
|
#endif |
1254 |
|
|
} |
1255 |
|
|
|
1256 |
|
|
inline |
1257 |
|
|
int |
1258 |
|
|
atomic_read (const atomic_t * a) { |
1259 |
|
|
return *a; |
1260 |
|
|
} |
1261 |
|
|
|
1262 |
|
|
inline |
1263 |
|
|
void |
1264 |
|
|
atomic_inc (atomic_t * a) { |
1265 |
|
|
#if defined(__sgi) && !defined(__GNUC__) |
1266 |
|
|
__add_and_fetch(a, 1); |
1267 |
|
|
#else |
1268 |
|
|
++(*a); |
1269 |
|
|
#endif |
1270 |
|
|
} |
1271 |
|
|
|
1272 |
|
|
inline |
1273 |
|
|
void |
1274 |
|
|
atomic_dec (atomic_t * a) { |
1275 |
|
|
#if defined(__sgi) && !defined(__GNUC__) |
1276 |
|
|
__sub_and_fetch(a, 1); |
1277 |
|
|
#else |
1278 |
|
|
--(*a); |
1279 |
|
|
#endif |
1280 |
|
|
} |
1281 |
|
|
|
1282 |
|
|
#endif /* linux */ |
1283 |
|
|
#endif /* __linuxsampler_atomic_h__ */ |