]> code.delx.au - pulseaudio/blob - src/pulsecore/atomic.h
merge 'lennart' branch back into trunk.
[pulseaudio] / src / pulsecore / atomic.h
1 #ifndef foopulseatomichfoo
2 #define foopulseatomichfoo
3
4 /* $Id$ */
5
6 /***
7 This file is part of PulseAudio.
8
9 Copyright 2006 Lennart Poettering
10
11 PulseAudio is free software; you can redistribute it and/or modify
12 it under the terms of the GNU Lesser General Public License as
13 published by the Free Software Foundation; either version 2 of the
14 License, or (at your option) any later version.
15
16 PulseAudio is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
20
21 You should have received a copy of the GNU Lesser General Public
22 License along with PulseAudio; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
24 USA.
25 ***/
26
27 /*
28 * atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*). It is
29 * not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
30 * however very likely.
31 *
32 * For now we do only full memory barriers. Eventually we might want
33 * to support more elaborate memory barriers, in which case we will add
34 * suffixes to the function names.
35 *
36 * On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
37 * libatomic_ops
38 */
39
40 #ifndef PACKAGE
41 #error "Please include config.h before including this file!"
42 #endif
43
44 #ifdef HAVE_ATOMIC_BUILTINS
45
46 /* __sync based implementation */
47
48 typedef struct pa_atomic {
49 volatile int value;
50 } pa_atomic_t;
51
52 #define PA_ATOMIC_INIT(v) { .value = (v) }
53
54 static inline int pa_atomic_load(const pa_atomic_t *a) {
55 __sync_synchronize();
56 return a->value;
57 }
58
59 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
60 a->value = i;
61 __sync_synchronize();
62 }
63
64 /* Returns the previously set value */
65 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
66 return __sync_fetch_and_add(&a->value, i);
67 }
68
69 /* Returns the previously set value */
70 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
71 return __sync_fetch_and_sub(&a->value, i);
72 }
73
74 /* Returns the previously set value */
75 static inline int pa_atomic_inc(pa_atomic_t *a) {
76 return pa_atomic_add(a, 1);
77 }
78
79 /* Returns the previously set value */
80 static inline int pa_atomic_dec(pa_atomic_t *a) {
81 return pa_atomic_sub(a, 1);
82 }
83
84 /* Returns non-zero when the operation was successful. */
85 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
86 return __sync_bool_compare_and_swap(&a->value, old_i, new_i);
87 }
88
89 typedef struct pa_atomic_ptr {
90 volatile unsigned long value;
91 } pa_atomic_ptr_t;
92
93 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
94
95 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
96 __sync_synchronize();
97 return (void*) a->value;
98 }
99
100 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
101 a->value = (unsigned long) p;
102 __sync_synchronize();
103 }
104
105 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
106 return __sync_bool_compare_and_swap(&a->value, (long) old_p, (long) new_p);
107 }
108
109 #elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
110
111 #error "The native atomic operations implementation for AMD64 has not been tested. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: make the native atomic operations implementation for AMD64 work, fix libatomic_ops, or upgrade your GCC."
112
113 /* Addapted from glibc */
114
115 typedef struct pa_atomic {
116 volatile int value;
117 } pa_atomic_t;
118
119 #define PA_ATOMIC_INIT(v) { .value = (v) }
120
121 static inline int pa_atomic_load(const pa_atomic_t *a) {
122 return a->value;
123 }
124
125 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
126 a->value = i;
127 }
128
129 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
130 int result;
131
132 __asm __volatile ("lock; xaddl %0, %1"
133 : "=r" (result), "=m" (a->value)
134 : "0" (i), "m" (a->value));
135
136 return result;
137 }
138
139 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
140 return pa_atomic_add(a, -i);
141 }
142
143 static inline int pa_atomic_inc(pa_atomic_t *a) {
144 return pa_atomic_add(a, 1);
145 }
146
147 static inline int pa_atomic_dec(pa_atomic_t *a) {
148 return pa_atomic_sub(a, 1);
149 }
150
151 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
152 int result;
153
154 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
155 : "=a" (result), "=m" (a->value)
156 : "r" (new_i), "m" (a->value), "0" (old_i));
157
158 return result == oldval;
159 }
160
161 typedef struct pa_atomic_ptr {
162 volatile unsigned long value;
163 } pa_atomic_ptr_t;
164
165 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
166
167 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
168 return (void*) a->value;
169 }
170
171 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
172 a->value = (unsigned long) p;
173 }
174
175 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
176 void *result;
177
178 __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
179 : "=a" (result), "=m" (a->value)
180 : "r" (new_p), "m" (a->value), "0" (old_p));
181
182 return result;
183 }
184
185 #else
186
187 /* libatomic_ops based implementation */
188
189 #include <atomic_ops.h>
190
191 typedef struct pa_atomic {
192 volatile AO_t value;
193 } pa_atomic_t;
194
195 #define PA_ATOMIC_INIT(v) { .value = (v) }
196
197 static inline int pa_atomic_load(const pa_atomic_t *a) {
198 return (int) AO_load_full((AO_t*) &a->value);
199 }
200
201 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
202 AO_store_full(&a->value, (AO_t) i);
203 }
204
205 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
206 return AO_fetch_and_add_full(&a->value, (AO_t) i);
207 }
208
209 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
210 return AO_fetch_and_add_full(&a->value, (AO_t) -i);
211 }
212
213 static inline int pa_atomic_inc(pa_atomic_t *a) {
214 return AO_fetch_and_add1_full(&a->value);
215 }
216
217 static inline int pa_atomic_dec(pa_atomic_t *a) {
218 return AO_fetch_and_sub1_full(&a->value);
219 }
220
221 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
222 return AO_compare_and_swap_full(&a->value, old_i, new_i);
223 }
224
225 typedef struct pa_atomic_ptr {
226 volatile AO_t value;
227 } pa_atomic_ptr_t;
228
229 #define PA_ATOMIC_PTR_INIT(v) { .value = (AO_t) (v) }
230
231 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
232 return (void*) AO_load_full((AO_t*) &a->value);
233 }
234
235 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
236 AO_store_full(&a->value, (AO_t) p);
237 }
238
239 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
240 return AO_compare_and_swap_full(&a->value, (AO_t) old_p, (AO_t) new_p);
241 }
242
243 #endif
244
245 #endif