]>
code.delx.au - pulseaudio/blob - src/pulsecore/atomic.h
1 #ifndef foopulseatomichfoo
2 #define foopulseatomichfoo
7 This file is part of PulseAudio.
9 Copyright 2006 Lennart Poettering
11 PulseAudio is free software; you can redistribute it and/or modify
12 it under the terms of the GNU Lesser General Public License as
13 published by the Free Software Foundation; either version 2 of the
14 License, or (at your option) any later version.
16 PulseAudio is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with PulseAudio; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 * atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*). It is
29 * not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
30 * however very likely.
32 * For now we do only full memory barriers. Eventually we might want
33 * to support more elaborate memory barriers, in which case we will add
34 * suffixes to the function names.
36 * On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
41 #error "Please include config.h before including this file!"
44 #ifdef HAVE_ATOMIC_BUILTINS
46 /* __sync based implementation */
48 typedef struct pa_atomic
{
52 #define PA_ATOMIC_INIT(v) { .value = (v) }
54 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
59 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
64 /* Returns the previously set value */
65 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
66 return __sync_fetch_and_add (& a
-> value
, i
);
69 /* Returns the previously set value */
70 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
71 return __sync_fetch_and_sub (& a
-> value
, i
);
74 /* Returns the previously set value */
75 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
76 return pa_atomic_add ( a
, 1 );
79 /* Returns the previously set value */
80 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
81 return pa_atomic_sub ( a
, 1 );
84 /* Returns non-zero when the operation was successful. */
85 static inline int pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
86 return __sync_bool_compare_and_swap (& a
-> value
, old_i
, new_i
);
89 typedef struct pa_atomic_ptr
{
90 volatile unsigned long value
;
93 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
95 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
97 return ( void *) a
-> value
;
100 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
101 a
-> value
= ( unsigned long ) p
;
102 __sync_synchronize ();
105 static inline int pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
106 return __sync_bool_compare_and_swap (& a
-> value
, ( long ) old_p
, ( long ) new_p
);
109 #elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
111 #error "The native atomic operations implementation for AMD64 has not been tested. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: make the native atomic operations implementation for AMD64 work, fix libatomic_ops, or upgrade your GCC."
113 /* Addapted from glibc */
115 typedef struct pa_atomic
{
119 #define PA_ATOMIC_INIT(v) { .value = (v) }
121 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
125 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
129 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
132 __asm
__volatile ( "lock; xaddl %0, %1"
133 : "=r" ( result
), "=m" ( a
-> value
)
134 : "0" ( i
), "m" ( a
-> value
));
139 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
140 return pa_atomic_add ( a
, - i
);
143 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
144 return pa_atomic_add ( a
, 1 );
147 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
148 return pa_atomic_sub ( a
, 1 );
151 static inline int pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
154 __asm__
__volatile__ ( "lock; cmpxchgl %2, %1"
155 : "=a" ( result
), "=m" ( a
-> value
)
156 : "r" ( new_i
), "m" ( a
-> value
), "0" ( old_i
));
158 return result
== oldval
;
161 typedef struct pa_atomic_ptr
{
162 volatile unsigned long value
;
165 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
167 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
168 return ( void *) a
-> value
;
171 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
172 a
-> value
= ( unsigned long ) p
;
175 static inline int pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
178 __asm__
__volatile__ ( "lock; cmpxchgq %q2, %1"
179 : "=a" ( result
), "=m" ( a
-> value
)
180 : "r" ( new_p
), "m" ( a
-> value
), "0" ( old_p
));
185 #elif defined(ATOMIC_ARM_INLINE_ASM)
188 These should only be enabled if we have ARMv6 or better.
191 typedef struct pa_atomic
{
195 #define PA_ATOMIC_INIT(v) { .value = (v) }
197 static inline void pa_memory_barrier ( void ) {
198 #ifdef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
199 asm volatile ( "mcr p15, 0, r0, c7, c10, 5 @ dmb" );
203 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
208 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
213 /* Returns the previously set value */
214 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
215 unsigned long not_exclusive
;
216 int new_val
, old_val
;
220 asm volatile ( "ldrex %0, [%3] \n "
222 "strex %1, %2, [%3] \n "
223 : "=&r" ( old_val
), "=&r" ( not_exclusive
), "=&r" ( new_val
)
224 : "r" (& a
-> value
), "Ir" ( i
)
226 } while ( not_exclusive
);
232 /* Returns the previously set value */
233 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
234 unsigned long not_exclusive
;
235 int new_val
, old_val
;
239 asm volatile ( "ldrex %0, [%3] \n "
241 "strex %1, %2, [%3] \n "
242 : "=&r" ( old_val
), "=&r" ( not_exclusive
), "=&r" ( new_val
)
243 : "r" (& a
-> value
), "Ir" ( i
)
245 } while ( not_exclusive
);
251 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
252 return pa_atomic_add ( a
, 1 );
255 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
256 return pa_atomic_sub ( a
, 1 );
259 static inline int pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
260 unsigned long not_equal
, not_exclusive
;
264 asm volatile ( "ldrex %0, [%2] \n "
267 "strexeq %0, %4, [%2] \n "
268 : "=&r" ( not_exclusive
), "=&r" ( not_equal
)
269 : "r" (& a
-> value
), "Ir" ( old_i
), "r" ( new_i
)
271 } while ( not_exclusive
&& ! not_equal
);
277 typedef struct pa_atomic_ptr
{
278 volatile unsigned long value
;
281 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
283 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
285 return ( void *) a
-> value
;
288 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
289 a
-> value
= ( unsigned long ) p
;
293 static inline int pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
294 unsigned long not_equal
, not_exclusive
;
298 asm volatile ( "ldrex %0, [%2] \n "
301 "strexeq %0, %4, [%2] \n "
302 : "=&r" ( not_exclusive
), "=&r" ( not_equal
)
303 : "r" (& a
-> value
), "Ir" ( old_p
), "r" ( new_p
)
305 } while ( not_exclusive
&& ! not_equal
);
311 #elif defined(ATOMIC_ARM_LINUX_HELPERS)
313 /* See file arch/arm/kernel/entry-armv.S in your kernel sources for more
314 information about these functions. The arm kernel helper functions first
316 Apply --disable-atomic-arm-linux-helpers flag to confugure if you prefere
317 inline asm implementation or you have an obsolete Linux kernel.
320 typedef void ( __kernel_dmb_t
)( void );
321 #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
323 static inline void pa_memory_barrier ( void ) {
324 #ifndef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
329 /* Atomic exchange (__kernel_cmpxchg_t contains memory barriers if needed) */
330 typedef int ( __kernel_cmpxchg_t
)( int oldval
, int newval
, volatile int * ptr
);
331 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
333 /* This is just to get rid of all warnings */
334 typedef int ( __kernel_cmpxchg_u_t
)( unsigned long oldval
, unsigned long newval
, volatile unsigned long * ptr
);
335 #define __kernel_cmpxchg_u (*(__kernel_cmpxchg_u_t *)0xffff0fc0)
337 typedef struct pa_atomic
{
341 #define PA_ATOMIC_INIT(v) { .value = (v) }
343 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
348 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
353 /* Returns the previously set value */
354 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
358 } while ( __kernel_cmpxchg ( old_val
, old_val
+ i
, & a
-> value
));
362 /* Returns the previously set value */
363 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
367 } while ( __kernel_cmpxchg ( old_val
, old_val
- i
, & a
-> value
));
371 /* Returns the previously set value */
372 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
373 return pa_atomic_add ( a
, 1 );
376 /* Returns the previously set value */
377 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
378 return pa_atomic_sub ( a
, 1 );
381 /* Returns non-zero when the operation was successful. */
382 static inline int pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
385 failed
= !! __kernel_cmpxchg ( old_i
, new_i
, & a
-> value
);
386 } while ( failed
&& a
-> value
== old_i
);
390 typedef struct pa_atomic_ptr
{
391 volatile unsigned long value
;
394 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
396 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
398 return ( void *) a
-> value
;
401 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
402 a
-> value
= ( unsigned long ) p
;
406 static inline int pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
409 failed
= !! __kernel_cmpxchg_u (( unsigned long ) old_p
, ( unsigned long ) new_p
, & a
-> value
);
410 } while ( failed
&& a
-> value
== ( unsigned long ) old_p
);
416 /* libatomic_ops based implementation */
418 #include <atomic_ops.h>
420 typedef struct pa_atomic
{
424 #define PA_ATOMIC_INIT(v) { .value = (v) }
426 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
427 return ( int ) AO_load_full (( AO_t
*) & a
-> value
);
430 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
431 AO_store_full (& a
-> value
, ( AO_t
) i
);
434 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
435 return AO_fetch_and_add_full (& a
-> value
, ( AO_t
) i
);
438 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
439 return AO_fetch_and_add_full (& a
-> value
, ( AO_t
) - i
);
442 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
443 return AO_fetch_and_add1_full (& a
-> value
);
446 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
447 return AO_fetch_and_sub1_full (& a
-> value
);
450 static inline int pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
451 return AO_compare_and_swap_full (& a
-> value
, old_i
, new_i
);
454 typedef struct pa_atomic_ptr
{
458 #define PA_ATOMIC_PTR_INIT(v) { .value = (AO_t) (v) }
460 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
461 return ( void *) AO_load_full (( AO_t
*) & a
-> value
);
464 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
465 AO_store_full (& a
-> value
, ( AO_t
) p
);
468 static inline int pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
469 return AO_compare_and_swap_full (& a
-> value
, ( AO_t
) old_p
, ( AO_t
) new_p
);