X-Git-Url: https://code.delx.au/pulseaudio/blobdiff_plain/ff38eaf67773e0039befb95c0f9ad91e7a06fc3f..9362bdc8a1d5bd1ce213c517e1999644728193a2:/src/pulsecore/atomic.h diff --git a/src/pulsecore/atomic.h b/src/pulsecore/atomic.h index 119c445b..419783d6 100644 --- a/src/pulsecore/atomic.h +++ b/src/pulsecore/atomic.h @@ -82,8 +82,8 @@ static inline int pa_atomic_dec(pa_atomic_t *a) { return pa_atomic_sub(a, 1); } -/* Returns TRUE when the operation was successful. */ -static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) { +/* Returns true when the operation was successful. */ +static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) { return __sync_bool_compare_and_swap(&a->value, old_i, new_i); } @@ -103,7 +103,7 @@ static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) { __sync_synchronize(); } -static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) { +static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) { return __sync_bool_compare_and_swap(&a->value, (long) old_p, (long) new_p); } @@ -153,8 +153,8 @@ static inline int pa_atomic_dec(pa_atomic_t *a) { return nv + 1; } -/* Returns TRUE when the operation was successful. */ -static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) { +/* Returns true when the operation was successful. */ +static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) { unsigned int r = atomic_cas_uint(&a->value, (unsigned int) old_i, (unsigned int) new_i); return (int) r == old_i; } @@ -175,16 +175,120 @@ static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) { membar_sync(); } -static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) { +static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) { void *r = atomic_cas_ptr(&a->value, old_p, new_p); return r == old_p; } +#elif defined(__FreeBSD__) + +#include +#include +#include +#include + +#if __FreeBSD_version < 600000 +#if defined(__i386__) || defined(__amd64__) +#if defined(__amd64__) +#define atomic_load_acq_64 atomic_load_acq_long +#endif +static inline u_int atomic_fetchadd_int(volatile u_int *p, u_int v) { + __asm __volatile( + " " __XSTRING(MPLOCKED) " " + " xaddl %0, %1 ; " + "# atomic_fetchadd_int" + : "+r" (v), + "=m" (*p) + : "m" (*p)); + + return (v); +} +#elif defined(__sparc64__) +#define atomic_load_acq_64 atomic_load_acq_long +#define atomic_fetchadd_int atomic_add_int +#elif defined(__ia64__) +#define atomic_load_acq_64 atomic_load_acq_long +static inline uint32_t +atomic_fetchadd_int(volatile uint32_t *p, uint32_t v) { + uint32_t value; + + do { + value = *p; + } while (!atomic_cmpset_32(p, value, value + v)); + return (value); +} +#endif +#endif + +typedef struct pa_atomic { + volatile unsigned long value; +} pa_atomic_t; + +#define PA_ATOMIC_INIT(v) { .value = (v) } + +static inline int pa_atomic_load(const pa_atomic_t *a) { + return (int) atomic_load_acq_int((unsigned int *) &a->value); +} + +static inline void pa_atomic_store(pa_atomic_t *a, int i) { + atomic_store_rel_int((unsigned int *) &a->value, i); +} + +static inline int pa_atomic_add(pa_atomic_t *a, int i) { + return atomic_fetchadd_int((unsigned int *) &a->value, i); +} + +static inline int pa_atomic_sub(pa_atomic_t *a, int i) { + return atomic_fetchadd_int((unsigned int *) &a->value, -(i)); +} + +static inline int pa_atomic_inc(pa_atomic_t *a) { + return atomic_fetchadd_int((unsigned int *) &a->value, 1); +} + +static inline int pa_atomic_dec(pa_atomic_t *a) { + return atomic_fetchadd_int((unsigned int *) &a->value, -1); +} + +static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) { + return atomic_cmpset_int((unsigned int *) &a->value, old_i, new_i); +} + +typedef struct pa_atomic_ptr { + volatile unsigned long value; +} pa_atomic_ptr_t; + +#define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) } + +static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) { +#ifdef atomic_load_acq_64 + return (void*) atomic_load_acq_ptr((unsigned long *) &a->value); +#else + return (void*) atomic_load_acq_ptr((unsigned int *) &a->value); +#endif +} + +static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) { +#ifdef atomic_load_acq_64 + atomic_store_rel_ptr(&a->value, (unsigned long) p); +#else + atomic_store_rel_ptr((unsigned int *) &a->value, (unsigned int) p); +#endif +} + +static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) { +#ifdef atomic_load_acq_64 + return atomic_cmpset_ptr(&a->value, (unsigned long) old_p, (unsigned long) new_p); +#else + return atomic_cmpset_ptr((unsigned int *) &a->value, (unsigned int) old_p, (unsigned int) new_p); +#endif +} + #elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__)) #warn "The native atomic operations implementation for AMD64 has not been tested thoroughly. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: test the native atomic operations implementation for AMD64, fix libatomic_ops, or upgrade your GCC." -/* Addapted from glibc */ +/* Adapted from glibc */ typedef struct pa_atomic { volatile int value; @@ -222,7 +326,7 @@ static inline int pa_atomic_dec(pa_atomic_t *a) { return pa_atomic_sub(a, 1); } -static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) { +static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) { int result; __asm__ __volatile__ ("lock; cmpxchgl %2, %1" @@ -246,7 +350,7 @@ static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) { a->value = (unsigned long) p; } -static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) { +static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) { void *result; __asm__ __volatile__ ("lock; cmpxchgq %q2, %1" @@ -330,7 +434,7 @@ static inline int pa_atomic_dec(pa_atomic_t *a) { return pa_atomic_sub(a, 1); } -static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) { +static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) { unsigned long not_equal, not_exclusive; pa_memory_barrier(); @@ -364,7 +468,7 @@ static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) { pa_memory_barrier(); } -static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) { +static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) { unsigned long not_equal, not_exclusive; pa_memory_barrier(); @@ -387,7 +491,7 @@ static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, v /* See file arch/arm/kernel/entry-armv.S in your kernel sources for more information about these functions. The arm kernel helper functions first appeared in 2.6.16. - Apply --disable-atomic-arm-linux-helpers flag to confugure if you prefere + Apply --disable-atomic-arm-linux-helpers flag to configure if you prefer inline asm implementation or you have an obsolete Linux kernel. */ /* Memory barrier */ @@ -452,9 +556,9 @@ static inline int pa_atomic_dec(pa_atomic_t *a) { return pa_atomic_sub(a, 1); } -/* Returns TRUE when the operation was successful. */ -static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) { - pa_bool_t failed; +/* Returns true when the operation was successful. */ +static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) { + bool failed; do { failed = !!__kernel_cmpxchg(old_i, new_i, &a->value); } while(failed && a->value == old_i); @@ -477,8 +581,8 @@ static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) { pa_memory_barrier(); } -static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) { - pa_bool_t failed; +static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) { + bool failed; do { failed = !!__kernel_cmpxchg_u((unsigned long) old_p, (unsigned long) new_p, &a->value); } while(failed && a->value == (unsigned long) old_p); @@ -521,7 +625,7 @@ static inline int pa_atomic_dec(pa_atomic_t *a) { return (int) AO_fetch_and_sub1_full(&a->value); } -static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) { +static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) { return AO_compare_and_swap_full(&a->value, (unsigned long) old_i, (unsigned long) new_i); } @@ -539,7 +643,7 @@ static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) { AO_store_full(&a->value, (AO_t) p); } -static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) { +static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) { return AO_compare_and_swap_full(&a->value, (AO_t) old_p, (AO_t) new_p); }