]> code.delx.au - pulseaudio/blob - src/pulsecore/atomic.h
fix a compiler warning on ARM due to missing cast, patch from Jyri Sarha
[pulseaudio] / src / pulsecore / atomic.h
1 #ifndef foopulseatomichfoo
2 #define foopulseatomichfoo
3
4 /* $Id$ */
5
6 /***
7 This file is part of PulseAudio.
8
9 Copyright 2006 Lennart Poettering
10
11 PulseAudio is free software; you can redistribute it and/or modify
12 it under the terms of the GNU Lesser General Public License as
13 published by the Free Software Foundation; either version 2 of the
14 License, or (at your option) any later version.
15
16 PulseAudio is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
20
21 You should have received a copy of the GNU Lesser General Public
22 License along with PulseAudio; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
24 USA.
25 ***/
26
27 /*
28 * atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*). It is
29 * not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
30 * however very likely.
31 *
32 * For now we do only full memory barriers. Eventually we might want
33 * to support more elaborate memory barriers, in which case we will add
34 * suffixes to the function names.
35 *
36 * On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
37 * libatomic_ops
38 */
39 #
40 #ifndef PACKAGE
41 #error "Please include config.h before including this file!"
42 #endif
43
44 #ifdef HAVE_ATOMIC_BUILTINS
45
46 /* __sync based implementation */
47
48 typedef struct pa_atomic {
49 volatile int value;
50 } pa_atomic_t;
51
52 #define PA_ATOMIC_INIT(v) { .value = (v) }
53
54 static inline int pa_atomic_load(const pa_atomic_t *a) {
55 __sync_synchronize();
56 return a->value;
57 }
58
59 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
60 a->value = i;
61 __sync_synchronize();
62 }
63
64 /* Returns the previously set value */
65 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
66 return __sync_fetch_and_add(&a->value, i);
67 }
68
69 /* Returns the previously set value */
70 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
71 return __sync_fetch_and_sub(&a->value, i);
72 }
73
74 /* Returns the previously set value */
75 static inline int pa_atomic_inc(pa_atomic_t *a) {
76 return pa_atomic_add(a, 1);
77 }
78
79 /* Returns the previously set value */
80 static inline int pa_atomic_dec(pa_atomic_t *a) {
81 return pa_atomic_sub(a, 1);
82 }
83
84 /* Returns non-zero when the operation was successful. */
85 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
86 return __sync_bool_compare_and_swap(&a->value, old_i, new_i);
87 }
88
89 typedef struct pa_atomic_ptr {
90 volatile unsigned long value;
91 } pa_atomic_ptr_t;
92
93 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
94
95 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
96 __sync_synchronize();
97 return (void*) a->value;
98 }
99
100 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
101 a->value = (unsigned long) p;
102 __sync_synchronize();
103 }
104
105 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
106 return __sync_bool_compare_and_swap(&a->value, (long) old_p, (long) new_p);
107 }
108
109 #elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
110
111 #error "The native atomic operations implementation for AMD64 has not been tested. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: make the native atomic operations implementation for AMD64 work, fix libatomic_ops, or upgrade your GCC."
112
113 /* Addapted from glibc */
114
115 typedef struct pa_atomic {
116 volatile int value;
117 } pa_atomic_t;
118
119 #define PA_ATOMIC_INIT(v) { .value = (v) }
120
121 static inline int pa_atomic_load(const pa_atomic_t *a) {
122 return a->value;
123 }
124
125 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
126 a->value = i;
127 }
128
129 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
130 int result;
131
132 __asm __volatile ("lock; xaddl %0, %1"
133 : "=r" (result), "=m" (a->value)
134 : "0" (i), "m" (a->value));
135
136 return result;
137 }
138
139 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
140 return pa_atomic_add(a, -i);
141 }
142
143 static inline int pa_atomic_inc(pa_atomic_t *a) {
144 return pa_atomic_add(a, 1);
145 }
146
147 static inline int pa_atomic_dec(pa_atomic_t *a) {
148 return pa_atomic_sub(a, 1);
149 }
150
151 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
152 int result;
153
154 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
155 : "=a" (result), "=m" (a->value)
156 : "r" (new_i), "m" (a->value), "0" (old_i));
157
158 return result == oldval;
159 }
160
161 typedef struct pa_atomic_ptr {
162 volatile unsigned long value;
163 } pa_atomic_ptr_t;
164
165 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
166
167 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
168 return (void*) a->value;
169 }
170
171 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
172 a->value = (unsigned long) p;
173 }
174
175 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
176 void *result;
177
178 __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
179 : "=a" (result), "=m" (a->value)
180 : "r" (new_p), "m" (a->value), "0" (old_p));
181
182 return result;
183 }
184
185 #elif defined(ATOMIC_ARM_INLINE_ASM)
186
187 /*
188 These should only be enabled if we have ARMv6 or better.
189 */
190
191 typedef struct pa_atomic {
192 volatile int value;
193 } pa_atomic_t;
194
195 #define PA_ATOMIC_INIT(v) { .value = (v) }
196
197 static inline void pa_memory_barrier(void) {
198 #ifdef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
199 asm volatile ("mcr p15, 0, r0, c7, c10, 5 @ dmb");
200 #endif
201 }
202
203 static inline int pa_atomic_load(const pa_atomic_t *a) {
204 pa_memory_barrier();
205 return a->value;
206 }
207
208 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
209 a->value = i;
210 pa_memory_barrier();
211 }
212
213 /* Returns the previously set value */
214 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
215 unsigned long not_exclusive;
216 int new_val, old_val;
217
218 pa_memory_barrier();
219 do {
220 asm volatile ("ldrex %0, [%3]\n"
221 "add %2, %0, %4\n"
222 "strex %1, %2, [%3]\n"
223 : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
224 : "r" (&a->value), "Ir" (i)
225 : "cc");
226 } while(not_exclusive);
227 pa_memory_barrier();
228
229 return old_val;
230 }
231
232 /* Returns the previously set value */
233 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
234 unsigned long not_exclusive;
235 int new_val, old_val;
236
237 pa_memory_barrier();
238 do {
239 asm volatile ("ldrex %0, [%3]\n"
240 "sub %2, %0, %4\n"
241 "strex %1, %2, [%3]\n"
242 : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
243 : "r" (&a->value), "Ir" (i)
244 : "cc");
245 } while(not_exclusive);
246 pa_memory_barrier();
247
248 return old_val;
249 }
250
251 static inline int pa_atomic_inc(pa_atomic_t *a) {
252 return pa_atomic_add(a, 1);
253 }
254
255 static inline int pa_atomic_dec(pa_atomic_t *a) {
256 return pa_atomic_sub(a, 1);
257 }
258
259 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
260 unsigned long not_equal, not_exclusive;
261
262 pa_memory_barrier();
263 do {
264 asm volatile ("ldrex %0, [%2]\n"
265 "subs %0, %0, %3\n"
266 "mov %1, %0\n"
267 "strexeq %0, %4, [%2]\n"
268 : "=&r" (not_exclusive), "=&r" (not_equal)
269 : "r" (&a->value), "Ir" (old_i), "r" (new_i)
270 : "cc");
271 } while(not_exclusive && !not_equal);
272 pa_memory_barrier();
273
274 return !not_equal;
275 }
276
277 typedef struct pa_atomic_ptr {
278 volatile unsigned long value;
279 } pa_atomic_ptr_t;
280
281 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
282
283 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
284 pa_memory_barrier();
285 return (void*) a->value;
286 }
287
288 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
289 a->value = (unsigned long) p;
290 pa_memory_barrier();
291 }
292
293 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
294 unsigned long not_equal, not_exclusive;
295
296 pa_memory_barrier();
297 do {
298 asm volatile ("ldrex %0, [%2]\n"
299 "subs %0, %0, %3\n"
300 "mov %1, %0\n"
301 "strexeq %0, %4, [%2]\n"
302 : "=&r" (not_exclusive), "=&r" (not_equal)
303 : "r" (&a->value), "Ir" (old_p), "r" (new_p)
304 : "cc");
305 } while(not_exclusive && !not_equal);
306 pa_memory_barrier();
307
308 return !not_equal;
309 }
310
311 #elif defined(ATOMIC_ARM_LINUX_HELPERS)
312
313 /* See file arch/arm/kernel/entry-armv.S in your kernel sources for more
314 information about these functions. The arm kernel helper functions first
315 appeared in 2.6.16.
316 Apply --disable-atomic-arm-linux-helpers flag to confugure if you prefere
317 inline asm implementation or you have an obsolete Linux kernel.
318 */
319 /* Memory barrier */
320 typedef void (__kernel_dmb_t)(void);
321 #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
322
323 static inline void pa_memory_barrier(void) {
324 #ifndef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
325 __kernel_dmb();
326 #endif
327 }
328
329 /* Atomic exchange (__kernel_cmpxchg_t contains memory barriers if needed) */
330 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
331 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
332
333 /* This is just to get rid of all warnings */
334 typedef int (__kernel_cmpxchg_u_t)(unsigned long oldval, unsigned long newval, volatile unsigned long *ptr);
335 #define __kernel_cmpxchg_u (*(__kernel_cmpxchg_u_t *)0xffff0fc0)
336
337 typedef struct pa_atomic {
338 volatile int value;
339 } pa_atomic_t;
340
341 #define PA_ATOMIC_INIT(v) { .value = (v) }
342
343 static inline int pa_atomic_load(const pa_atomic_t *a) {
344 pa_memory_barrier();
345 return a->value;
346 }
347
348 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
349 a->value = i;
350 pa_memory_barrier();
351 }
352
353 /* Returns the previously set value */
354 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
355 int old_val;
356 do {
357 old_val = a->value;
358 } while(__kernel_cmpxchg(old_val, old_val + i, &a->value));
359 return old_val;
360 }
361
362 /* Returns the previously set value */
363 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
364 int old_val;
365 do {
366 old_val = a->value;
367 } while(__kernel_cmpxchg(old_val, old_val - i, &a->value));
368 return old_val;
369 }
370
371 /* Returns the previously set value */
372 static inline int pa_atomic_inc(pa_atomic_t *a) {
373 return pa_atomic_add(a, 1);
374 }
375
376 /* Returns the previously set value */
377 static inline int pa_atomic_dec(pa_atomic_t *a) {
378 return pa_atomic_sub(a, 1);
379 }
380
381 /* Returns non-zero when the operation was successful. */
382 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
383 pa_bool_t failed;
384 do {
385 failed = !!__kernel_cmpxchg(old_i, new_i, &a->value);
386 } while(failed && a->value == old_i);
387 return !failed;
388 }
389
390 typedef struct pa_atomic_ptr {
391 volatile unsigned long value;
392 } pa_atomic_ptr_t;
393
394 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
395
396 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
397 pa_memory_barrier();
398 return (void*) a->value;
399 }
400
401 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
402 a->value = (unsigned long) p;
403 pa_memory_barrier();
404 }
405
406 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
407 pa_bool_t failed;
408 do {
409 failed = !!__kernel_cmpxchg_u((unsigned long) old_p, (unsigned long) new_p, &a->value);
410 } while(failed && a->value == (unsigned long) old_p);
411 return !failed;
412 }
413
414 #else
415
416 /* libatomic_ops based implementation */
417
418 #include <atomic_ops.h>
419
420 typedef struct pa_atomic {
421 volatile AO_t value;
422 } pa_atomic_t;
423
424 #define PA_ATOMIC_INIT(v) { .value = (v) }
425
426 static inline int pa_atomic_load(const pa_atomic_t *a) {
427 return (int) AO_load_full((AO_t*) &a->value);
428 }
429
430 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
431 AO_store_full(&a->value, (AO_t) i);
432 }
433
434 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
435 return AO_fetch_and_add_full(&a->value, (AO_t) i);
436 }
437
438 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
439 return AO_fetch_and_add_full(&a->value, (AO_t) -i);
440 }
441
442 static inline int pa_atomic_inc(pa_atomic_t *a) {
443 return AO_fetch_and_add1_full(&a->value);
444 }
445
446 static inline int pa_atomic_dec(pa_atomic_t *a) {
447 return AO_fetch_and_sub1_full(&a->value);
448 }
449
450 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
451 return AO_compare_and_swap_full(&a->value, old_i, new_i);
452 }
453
454 typedef struct pa_atomic_ptr {
455 volatile AO_t value;
456 } pa_atomic_ptr_t;
457
458 #define PA_ATOMIC_PTR_INIT(v) { .value = (AO_t) (v) }
459
460 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
461 return (void*) AO_load_full((AO_t*) &a->value);
462 }
463
464 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
465 AO_store_full(&a->value, (AO_t) p);
466 }
467
468 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
469 return AO_compare_and_swap_full(&a->value, (AO_t) old_p, (AO_t) new_p);
470 }
471
472 #endif
473
474 #endif