]>
code.delx.au - pulseaudio/blob - src/pulsecore/atomic.h
1 #ifndef foopulseatomichfoo
2 #define foopulseatomichfoo
5 This file is part of PulseAudio.
7 Copyright 2006-2008 Lennart Poettering
8 Copyright 2008 Nokia Corporation
10 PulseAudio is free software; you can redistribute it and/or modify
11 it under the terms of the GNU Lesser General Public License as
12 published by the Free Software Foundation; either version 2 of the
13 License, or (at your option) any later version.
15 PulseAudio is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with PulseAudio; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
27 * atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*). It is
28 * not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
29 * however very likely.
31 * For now we do only full memory barriers. Eventually we might want
32 * to support more elaborate memory barriers, in which case we will add
33 * suffixes to the function names.
35 * On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
40 #error "Please include config.h before including this file!"
43 #ifdef HAVE_ATOMIC_BUILTINS
45 /* __sync based implementation */
47 typedef struct pa_atomic
{
51 #define PA_ATOMIC_INIT(v) { .value = (v) }
53 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
58 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
63 /* Returns the previously set value */
64 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
65 return __sync_fetch_and_add (& a
-> value
, i
);
68 /* Returns the previously set value */
69 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
70 return __sync_fetch_and_sub (& a
-> value
, i
);
73 /* Returns the previously set value */
74 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
75 return pa_atomic_add ( a
, 1 );
78 /* Returns the previously set value */
79 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
80 return pa_atomic_sub ( a
, 1 );
83 /* Returns non-zero when the operation was successful. */
84 static inline int pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
85 return __sync_bool_compare_and_swap (& a
-> value
, old_i
, new_i
);
88 typedef struct pa_atomic_ptr
{
89 volatile unsigned long value
;
92 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
94 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
96 return ( void *) a
-> value
;
99 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
100 a
-> value
= ( unsigned long ) p
;
101 __sync_synchronize ();
104 static inline int pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
105 return __sync_bool_compare_and_swap (& a
-> value
, ( long ) old_p
, ( long ) new_p
);
108 #elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
110 #error "The native atomic operations implementation for AMD64 has not been tested. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: make the native atomic operations implementation for AMD64 work, fix libatomic_ops, or upgrade your GCC."
112 /* Addapted from glibc */
114 typedef struct pa_atomic
{
118 #define PA_ATOMIC_INIT(v) { .value = (v) }
120 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
124 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
128 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
131 __asm
__volatile ( "lock; xaddl %0, %1"
132 : "=r" ( result
), "=m" ( a
-> value
)
133 : "0" ( i
), "m" ( a
-> value
));
138 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
139 return pa_atomic_add ( a
, - i
);
142 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
143 return pa_atomic_add ( a
, 1 );
146 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
147 return pa_atomic_sub ( a
, 1 );
150 static inline int pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
153 __asm__
__volatile__ ( "lock; cmpxchgl %2, %1"
154 : "=a" ( result
), "=m" ( a
-> value
)
155 : "r" ( new_i
), "m" ( a
-> value
), "0" ( old_i
));
157 return result
== oldval
;
160 typedef struct pa_atomic_ptr
{
161 volatile unsigned long value
;
164 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
166 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
167 return ( void *) a
-> value
;
170 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
171 a
-> value
= ( unsigned long ) p
;
174 static inline int pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
177 __asm__
__volatile__ ( "lock; cmpxchgq %q2, %1"
178 : "=a" ( result
), "=m" ( a
-> value
)
179 : "r" ( new_p
), "m" ( a
-> value
), "0" ( old_p
));
184 #elif defined(ATOMIC_ARM_INLINE_ASM)
187 These should only be enabled if we have ARMv6 or better.
190 typedef struct pa_atomic
{
194 #define PA_ATOMIC_INIT(v) { .value = (v) }
196 static inline void pa_memory_barrier ( void ) {
197 #ifdef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
198 asm volatile ( "mcr p15, 0, r0, c7, c10, 5 @ dmb" );
202 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
207 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
212 /* Returns the previously set value */
213 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
214 unsigned long not_exclusive
;
215 int new_val
, old_val
;
219 asm volatile ( "ldrex %0, [%3] \n "
221 "strex %1, %2, [%3] \n "
222 : "=&r" ( old_val
), "=&r" ( not_exclusive
), "=&r" ( new_val
)
223 : "r" (& a
-> value
), "Ir" ( i
)
225 } while ( not_exclusive
);
231 /* Returns the previously set value */
232 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
233 unsigned long not_exclusive
;
234 int new_val
, old_val
;
238 asm volatile ( "ldrex %0, [%3] \n "
240 "strex %1, %2, [%3] \n "
241 : "=&r" ( old_val
), "=&r" ( not_exclusive
), "=&r" ( new_val
)
242 : "r" (& a
-> value
), "Ir" ( i
)
244 } while ( not_exclusive
);
250 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
251 return pa_atomic_add ( a
, 1 );
254 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
255 return pa_atomic_sub ( a
, 1 );
258 static inline int pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
259 unsigned long not_equal
, not_exclusive
;
263 asm volatile ( "ldrex %0, [%2] \n "
266 "strexeq %0, %4, [%2] \n "
267 : "=&r" ( not_exclusive
), "=&r" ( not_equal
)
268 : "r" (& a
-> value
), "Ir" ( old_i
), "r" ( new_i
)
270 } while ( not_exclusive
&& ! not_equal
);
276 typedef struct pa_atomic_ptr
{
277 volatile unsigned long value
;
280 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
282 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
284 return ( void *) a
-> value
;
287 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
288 a
-> value
= ( unsigned long ) p
;
292 static inline int pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
293 unsigned long not_equal
, not_exclusive
;
297 asm volatile ( "ldrex %0, [%2] \n "
300 "strexeq %0, %4, [%2] \n "
301 : "=&r" ( not_exclusive
), "=&r" ( not_equal
)
302 : "r" (& a
-> value
), "Ir" ( old_p
), "r" ( new_p
)
304 } while ( not_exclusive
&& ! not_equal
);
310 #elif defined(ATOMIC_ARM_LINUX_HELPERS)
312 /* See file arch/arm/kernel/entry-armv.S in your kernel sources for more
313 information about these functions. The arm kernel helper functions first
315 Apply --disable-atomic-arm-linux-helpers flag to confugure if you prefere
316 inline asm implementation or you have an obsolete Linux kernel.
319 typedef void ( __kernel_dmb_t
)( void );
320 #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
322 static inline void pa_memory_barrier ( void ) {
323 #ifndef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
328 /* Atomic exchange (__kernel_cmpxchg_t contains memory barriers if needed) */
329 typedef int ( __kernel_cmpxchg_t
)( int oldval
, int newval
, volatile int * ptr
);
330 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
332 /* This is just to get rid of all warnings */
333 typedef int ( __kernel_cmpxchg_u_t
)( unsigned long oldval
, unsigned long newval
, volatile unsigned long * ptr
);
334 #define __kernel_cmpxchg_u (*(__kernel_cmpxchg_u_t *)0xffff0fc0)
336 typedef struct pa_atomic
{
340 #define PA_ATOMIC_INIT(v) { .value = (v) }
342 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
347 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
352 /* Returns the previously set value */
353 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
357 } while ( __kernel_cmpxchg ( old_val
, old_val
+ i
, & a
-> value
));
361 /* Returns the previously set value */
362 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
366 } while ( __kernel_cmpxchg ( old_val
, old_val
- i
, & a
-> value
));
370 /* Returns the previously set value */
371 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
372 return pa_atomic_add ( a
, 1 );
375 /* Returns the previously set value */
376 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
377 return pa_atomic_sub ( a
, 1 );
380 /* Returns non-zero when the operation was successful. */
381 static inline int pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
384 failed
= !! __kernel_cmpxchg ( old_i
, new_i
, & a
-> value
);
385 } while ( failed
&& a
-> value
== old_i
);
389 typedef struct pa_atomic_ptr
{
390 volatile unsigned long value
;
393 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
395 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
397 return ( void *) a
-> value
;
400 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
401 a
-> value
= ( unsigned long ) p
;
405 static inline int pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
408 failed
= !! __kernel_cmpxchg_u (( unsigned long ) old_p
, ( unsigned long ) new_p
, & a
-> value
);
409 } while ( failed
&& a
-> value
== ( unsigned long ) old_p
);
415 /* libatomic_ops based implementation */
417 #include <atomic_ops.h>
419 typedef struct pa_atomic
{
423 #define PA_ATOMIC_INIT(v) { .value = (v) }
425 static inline int pa_atomic_load ( const pa_atomic_t
* a
) {
426 return ( int ) AO_load_full (( AO_t
*) & a
-> value
);
429 static inline void pa_atomic_store ( pa_atomic_t
* a
, int i
) {
430 AO_store_full (& a
-> value
, ( AO_t
) i
);
433 static inline int pa_atomic_add ( pa_atomic_t
* a
, int i
) {
434 return AO_fetch_and_add_full (& a
-> value
, ( AO_t
) i
);
437 static inline int pa_atomic_sub ( pa_atomic_t
* a
, int i
) {
438 return AO_fetch_and_add_full (& a
-> value
, ( AO_t
) - i
);
441 static inline int pa_atomic_inc ( pa_atomic_t
* a
) {
442 return AO_fetch_and_add1_full (& a
-> value
);
445 static inline int pa_atomic_dec ( pa_atomic_t
* a
) {
446 return AO_fetch_and_sub1_full (& a
-> value
);
449 static inline int pa_atomic_cmpxchg ( pa_atomic_t
* a
, int old_i
, int new_i
) {
450 return AO_compare_and_swap_full (& a
-> value
, old_i
, new_i
);
453 typedef struct pa_atomic_ptr
{
457 #define PA_ATOMIC_PTR_INIT(v) { .value = (AO_t) (v) }
459 static inline void * pa_atomic_ptr_load ( const pa_atomic_ptr_t
* a
) {
460 return ( void *) AO_load_full (( AO_t
*) & a
-> value
);
463 static inline void pa_atomic_ptr_store ( pa_atomic_ptr_t
* a
, void * p
) {
464 AO_store_full (& a
-> value
, ( AO_t
) p
);
467 static inline int pa_atomic_ptr_cmpxchg ( pa_atomic_ptr_t
* a
, void * old_p
, void * new_p
) {
468 return AO_compare_and_swap_full (& a
-> value
, ( AO_t
) old_p
, ( AO_t
) new_p
);