PulseAudio is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
- published by the Free Software Foundation; either version 2 of the
+ published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
PulseAudio is distributed in the hope that it will be useful, but
return pa_atomic_sub(a, 1);
}
-/* Returns TRUE when the operation was successful. */
-static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
+/* Returns true when the operation was successful. */
+static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
return __sync_bool_compare_and_swap(&a->value, old_i, new_i);
}
__sync_synchronize();
}
-static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
+static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
return __sync_bool_compare_and_swap(&a->value, (long) old_p, (long) new_p);
}
+#elif defined(__NetBSD__) && defined(HAVE_SYS_ATOMIC_H)
+
+/* NetBSD 5.0+ atomic_ops(3) implementation */
+
+#include <sys/atomic.h>
+
+typedef struct pa_atomic {
+ volatile unsigned int value;
+} pa_atomic_t;
+
+#define PA_ATOMIC_INIT(v) { .value = (unsigned int) (v) }
+
+static inline int pa_atomic_load(const pa_atomic_t *a) {
+ membar_sync();
+ return (int) a->value;
+}
+
+static inline void pa_atomic_store(pa_atomic_t *a, int i) {
+ a->value = (unsigned int) i;
+ membar_sync();
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_add(pa_atomic_t *a, int i) {
+ int nv = (int) atomic_add_int_nv(&a->value, i);
+ return nv - i;
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
+ int nv = (int) atomic_add_int_nv(&a->value, -i);
+ return nv + i;
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_inc(pa_atomic_t *a) {
+ int nv = (int) atomic_inc_uint_nv(&a->value);
+ return nv - 1;
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_dec(pa_atomic_t *a) {
+ int nv = (int) atomic_dec_uint_nv(&a->value);
+ return nv + 1;
+}
+
+/* Returns true when the operation was successful. */
+static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
+ unsigned int r = atomic_cas_uint(&a->value, (unsigned int) old_i, (unsigned int) new_i);
+ return (int) r == old_i;
+}
+
+typedef struct pa_atomic_ptr {
+ volatile void *value;
+} pa_atomic_ptr_t;
+
+#define PA_ATOMIC_PTR_INIT(v) { .value = (v) }
+
+static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
+ membar_sync();
+ return (void *) a->value;
+}
+
+static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
+ a->value = p;
+ membar_sync();
+}
+
+static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
+ void *r = atomic_cas_ptr(&a->value, old_p, new_p);
+ return r == old_p;
+}
+
+#elif defined(__FreeBSD__)
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <sys/param.h>
+#include <machine/atomic.h>
+
+#if __FreeBSD_version < 600000
+#if defined(__i386__) || defined(__amd64__)
+#if defined(__amd64__)
+#define atomic_load_acq_64 atomic_load_acq_long
+#endif
+static inline u_int atomic_fetchadd_int(volatile u_int *p, u_int v) {
+ __asm __volatile(
+ " " __XSTRING(MPLOCKED) " "
+ " xaddl %0, %1 ; "
+ "# atomic_fetchadd_int"
+ : "+r" (v),
+ "=m" (*p)
+ : "m" (*p));
+
+ return (v);
+}
+#elif defined(__sparc64__)
+#define atomic_load_acq_64 atomic_load_acq_long
+#define atomic_fetchadd_int atomic_add_int
+#elif defined(__ia64__)
+#define atomic_load_acq_64 atomic_load_acq_long
+static inline uint32_t
+atomic_fetchadd_int(volatile uint32_t *p, uint32_t v) {
+ uint32_t value;
+
+ do {
+ value = *p;
+ } while (!atomic_cmpset_32(p, value, value + v));
+ return (value);
+}
+#endif
+#endif
+
+typedef struct pa_atomic {
+ volatile unsigned long value;
+} pa_atomic_t;
+
+#define PA_ATOMIC_INIT(v) { .value = (v) }
+
+static inline int pa_atomic_load(const pa_atomic_t *a) {
+ return (int) atomic_load_acq_int((unsigned int *) &a->value);
+}
+
+static inline void pa_atomic_store(pa_atomic_t *a, int i) {
+ atomic_store_rel_int((unsigned int *) &a->value, i);
+}
+
+static inline int pa_atomic_add(pa_atomic_t *a, int i) {
+ return atomic_fetchadd_int((unsigned int *) &a->value, i);
+}
+
+static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
+ return atomic_fetchadd_int((unsigned int *) &a->value, -(i));
+}
+
+static inline int pa_atomic_inc(pa_atomic_t *a) {
+ return atomic_fetchadd_int((unsigned int *) &a->value, 1);
+}
+
+static inline int pa_atomic_dec(pa_atomic_t *a) {
+ return atomic_fetchadd_int((unsigned int *) &a->value, -1);
+}
+
+static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
+ return atomic_cmpset_int((unsigned int *) &a->value, old_i, new_i);
+}
+
+typedef struct pa_atomic_ptr {
+ volatile unsigned long value;
+} pa_atomic_ptr_t;
+
+#define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
+
+static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
+#ifdef atomic_load_acq_64
+ return (void*) atomic_load_acq_ptr((unsigned long *) &a->value);
+#else
+ return (void*) atomic_load_acq_ptr((unsigned int *) &a->value);
+#endif
+}
+
+static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
+#ifdef atomic_load_acq_64
+ atomic_store_rel_ptr(&a->value, (unsigned long) p);
+#else
+ atomic_store_rel_ptr((unsigned int *) &a->value, (unsigned int) p);
+#endif
+}
+
+static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
+#ifdef atomic_load_acq_64
+ return atomic_cmpset_ptr(&a->value, (unsigned long) old_p, (unsigned long) new_p);
+#else
+ return atomic_cmpset_ptr((unsigned int *) &a->value, (unsigned int) old_p, (unsigned int) new_p);
+#endif
+}
+
#elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
#warn "The native atomic operations implementation for AMD64 has not been tested thoroughly. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: test the native atomic operations implementation for AMD64, fix libatomic_ops, or upgrade your GCC."
-/* Addapted from glibc */
+/* Adapted from glibc */
typedef struct pa_atomic {
volatile int value;
return pa_atomic_sub(a, 1);
}
-static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
+static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
int result;
__asm__ __volatile__ ("lock; cmpxchgl %2, %1"
a->value = (unsigned long) p;
}
-static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
+static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
void *result;
__asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
return pa_atomic_sub(a, 1);
}
-static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
+static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
unsigned long not_equal, not_exclusive;
pa_memory_barrier();
pa_memory_barrier();
}
-static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
+static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
unsigned long not_equal, not_exclusive;
pa_memory_barrier();
/* See file arch/arm/kernel/entry-armv.S in your kernel sources for more
information about these functions. The arm kernel helper functions first
appeared in 2.6.16.
- Apply --disable-atomic-arm-linux-helpers flag to confugure if you prefere
+ Apply --disable-atomic-arm-linux-helpers flag to configure if you prefer
inline asm implementation or you have an obsolete Linux kernel.
*/
/* Memory barrier */
return pa_atomic_sub(a, 1);
}
-/* Returns TRUE when the operation was successful. */
-static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
- pa_bool_t failed;
+/* Returns true when the operation was successful. */
+static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
+ bool failed;
do {
failed = !!__kernel_cmpxchg(old_i, new_i, &a->value);
} while(failed && a->value == old_i);
pa_memory_barrier();
}
-static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
- pa_bool_t failed;
+static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
+ bool failed;
do {
failed = !!__kernel_cmpxchg_u((unsigned long) old_p, (unsigned long) new_p, &a->value);
} while(failed && a->value == (unsigned long) old_p);
return (int) AO_fetch_and_sub1_full(&a->value);
}
-static inline pa_bool_t pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
+static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
return AO_compare_and_swap_full(&a->value, (unsigned long) old_i, (unsigned long) new_i);
}
AO_store_full(&a->value, (AO_t) p);
}
-static inline pa_bool_t pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
+static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
return AO_compare_and_swap_full(&a->value, (AO_t) old_p, (AO_t) new_p);
}