-/* $Id$ */
-
/***
This file is part of PulseAudio.
- Copyright 2006 Lennart Poettering
+ Copyright 2006-2008 Lennart Poettering
PulseAudio is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
#include <pulsecore/thread.h>
#include <pulsecore/macro.h>
#include <pulsecore/core-util.h>
+#include <pulsecore/llist.h>
+#include <pulsecore/flist.h>
#include <pulse/xmalloc.h>
#include "asyncq.h"
+#include "fdsem.h"
-#define ASYNCQ_SIZE 128
+#define ASYNCQ_SIZE 256
-/* For debugging purposes we can define _Y to put and extra thread
+/* For debugging purposes we can define _Y to put an extra thread
* yield between each operation. */
+/* #define PROFILE */
+
#ifdef PROFILE
#define _Y pa_thread_yield()
#else
#define _Y do { } while(0)
#endif
+struct localq {
+ void *data;
+ PA_LLIST_FIELDS(struct localq);
+};
+
struct pa_asyncq {
unsigned size;
unsigned read_idx;
unsigned write_idx;
- pa_atomic_t read_waiting; /* a bool */
- pa_atomic_t write_waiting; /* a bool */
- int read_fds[2], write_fds[2];
- pa_atomic_t in_read_fifo, in_write_fifo;
+ pa_fdsem *read_fdsem, *write_fdsem;
+
+ PA_LLIST_HEAD(struct localq, localq);
+ struct localq *last_localq;
+ pa_bool_t waiting_for_post;
};
-#define PA_ASYNCQ_CELLS(x) ((pa_atomic_ptr_t*) ((uint8_t*) (x) + PA_ALIGN(sizeof(struct pa_asyncq))))
+PA_STATIC_FLIST_DECLARE(localq, 0, pa_xfree);
-static int is_power_of_two(unsigned size) {
- return !(size & (size - 1));
-}
+#define PA_ASYNCQ_CELLS(x) ((pa_atomic_ptr_t*) ((uint8_t*) (x) + PA_ALIGN(sizeof(struct pa_asyncq))))
static int reduce(pa_asyncq *l, int value) {
return value & (unsigned) (l->size - 1);
if (!size)
size = ASYNCQ_SIZE;
- pa_assert(is_power_of_two(size));
+ pa_assert(pa_is_power_of_two(size));
l = pa_xmalloc0(PA_ALIGN(sizeof(pa_asyncq)) + (sizeof(pa_atomic_ptr_t) * size));
l->size = size;
- pa_atomic_store(&l->read_waiting, 0);
- pa_atomic_store(&l->write_waiting, 0);
- pa_atomic_store(&l->in_read_fifo, 0);
- pa_atomic_store(&l->in_write_fifo, 0);
- if (pipe(l->read_fds) < 0) {
+ PA_LLIST_HEAD_INIT(struct localq, l->localq);
+ l->last_localq = NULL;
+ l->waiting_for_post = FALSE;
+
+ if (!(l->read_fdsem = pa_fdsem_new())) {
pa_xfree(l);
return NULL;
}
- if (pipe(l->write_fds) < 0) {
- pa_close(l->read_fds[0]);
- pa_close(l->read_fds[1]);
+ if (!(l->write_fdsem = pa_fdsem_new())) {
+ pa_fdsem_free(l->read_fdsem);
pa_xfree(l);
return NULL;
}
- pa_make_nonblock_fd(l->read_fds[1]);
- pa_make_nonblock_fd(l->write_fds[1]);
-
return l;
}
void pa_asyncq_free(pa_asyncq *l, pa_free_cb_t free_cb) {
+ struct localq *q;
pa_assert(l);
if (free_cb) {
free_cb(p);
}
- pa_close(l->read_fds[0]);
- pa_close(l->read_fds[1]);
- pa_close(l->write_fds[0]);
- pa_close(l->write_fds[1]);
+ while ((q = l->localq)) {
+ if (free_cb)
+ free_cb(q->data);
+ PA_LLIST_REMOVE(struct localq, l->localq, q);
+
+ if (pa_flist_push(PA_STATIC_FLIST_GET(localq), q) < 0)
+ pa_xfree(q);
+ }
+
+ pa_fdsem_free(l->read_fdsem);
+ pa_fdsem_free(l->write_fdsem);
pa_xfree(l);
}
-int pa_asyncq_push(pa_asyncq*l, void *p, int wait) {
+static int push(pa_asyncq*l, void *p, pa_bool_t wait) {
int idx;
pa_atomic_ptr_t *cells;
if (!pa_atomic_ptr_cmpxchg(&cells[idx], NULL, p)) {
- /* Let's empty the FIFO from old notifications, before we return */
-
- while (pa_atomic_load(&l->in_write_fifo) > 0) {
- ssize_t r;
- int x[20];
-
- if ((r = read(l->write_fds[0], x, sizeof(x))) < 0) {
-
- if (errno == EINTR)
- continue;
-
- return -1;
- }
+ if (!wait)
+ return -1;
- pa_assert(r > 0);
-
- if (pa_atomic_sub(&l->in_write_fifo, r) <= r)
- break;
+/* pa_log("sleeping on push"); */
- }
-
- /* Now let's make sure that we didn't lose any events */
- if (!pa_atomic_ptr_cmpxchg(&cells[idx], NULL, p)) {
-
- if (!wait)
- return -1;
-
- /* Let's wait for changes. */
-
- _Y;
-
- pa_assert_se(pa_atomic_cmpxchg(&l->write_waiting, 0, 1));
-
- for (;;) {
- char x[20];
- ssize_t r;
-
- _Y;
-
- if (pa_atomic_ptr_cmpxchg(&cells[idx], NULL, p))
- break;
-
- _Y;
-
- if ((r = read(l->write_fds[0], x, sizeof(x))) < 0) {
-
- if (errno == EINTR)
- continue;
-
- pa_assert_se(pa_atomic_cmpxchg(&l->write_waiting, 1, 0));
- return -1;
- }
-
- pa_assert(r > 0);
- pa_atomic_sub(&l->in_write_fifo, r);
- }
-
- _Y;
-
- pa_assert_se(pa_atomic_cmpxchg(&l->write_waiting, 1, 0));
- }
+ do {
+ pa_fdsem_wait(l->read_fdsem);
+ } while (!pa_atomic_ptr_cmpxchg(&cells[idx], NULL, p));
}
_Y;
l->write_idx++;
- if (pa_atomic_load(&l->read_waiting) > 0) {
- char x = 'x';
- _Y;
- if (write(l->read_fds[1], &x, sizeof(x)) > 0) {
- pa_atomic_inc(&l->in_read_fifo);
-/* pa_log("increasing %p by 1", l); */
- }
- }
+ pa_fdsem_post(l->write_fdsem);
return 0;
}
-void* pa_asyncq_pop(pa_asyncq*l, int wait) {
+static pa_bool_t flush_postq(pa_asyncq *l) {
+ struct localq *q;
+
+ pa_assert(l);
+
+ while ((q = l->last_localq)) {
+
+ if (push(l, q->data, FALSE) < 0)
+ return FALSE;
+
+ l->last_localq = q->prev;
+
+ PA_LLIST_REMOVE(struct localq, l->localq, q);
+
+ if (pa_flist_push(PA_STATIC_FLIST_GET(localq), q) < 0)
+ pa_xfree(q);
+ }
+
+ return TRUE;
+}
+
+int pa_asyncq_push(pa_asyncq*l, void *p, pa_bool_t wait) {
+ pa_assert(l);
+
+ if (!flush_postq(l))
+ return -1;
+
+ return push(l, p, wait);
+}
+
+void pa_asyncq_post(pa_asyncq*l, void *p) {
+ struct localq *q;
+
+ pa_assert(l);
+ pa_assert(p);
+
+ if (pa_asyncq_push(l, p, FALSE) >= 0)
+ return;
+
+ /* OK, we couldn't push anything in the queue. So let's queue it
+ * locally and push it later */
+
+ pa_log("q overrun, queuing locally");
+
+ if (!(q = pa_flist_pop(PA_STATIC_FLIST_GET(localq))))
+ q = pa_xnew(struct localq, 1);
+
+ q->data = p;
+ PA_LLIST_PREPEND(struct localq, l->localq, q);
+
+ if (!l->last_localq)
+ l->last_localq = q;
+
+ return;
+}
+
+void* pa_asyncq_pop(pa_asyncq*l, pa_bool_t wait) {
int idx;
void *ret;
pa_atomic_ptr_t *cells;
pa_assert(l);
-
+
cells = PA_ASYNCQ_CELLS(l);
_Y;
if (!(ret = pa_atomic_ptr_load(&cells[idx]))) {
-/* pa_log("pop failed wait=%i", wait); */
-
- /* Hmm, nothing, here, so let's drop all queued events. */
- while (pa_atomic_load(&l->in_read_fifo) > 0) {
- ssize_t r;
- int x[20];
-
- if ((r = read(l->read_fds[0], x, sizeof(x))) < 0) {
-
- if (errno == EINTR)
- continue;
-
- return NULL;
- }
-
- pa_assert(r > 0);
-
-/* pa_log("decreasing %p by %i", l, r); */
-
- if (pa_atomic_sub(&l->in_read_fifo, r) <= r)
- break;
- }
+ if (!wait)
+ return NULL;
- /* Now let's make sure that we didn't lose any events */
- if (!(ret = pa_atomic_ptr_load(&cells[idx]))) {
-
- if (!wait)
- return NULL;
-
- /* Let's wait for changes. */
-
- _Y;
-
- pa_assert_se(pa_atomic_cmpxchg(&l->read_waiting, 0, 1));
-
- for (;;) {
- char x[20];
- ssize_t r;
-
- _Y;
-
- if ((ret = pa_atomic_ptr_load(&cells[idx])))
- break;
-
- _Y;
-
- if ((r = read(l->read_fds[0], x, sizeof(x))) < 0) {
-
- if (errno == EINTR)
- continue;
-
- pa_assert_se(pa_atomic_cmpxchg(&l->read_waiting, 1, 0));
- return NULL;
- }
-
-/* pa_log("decreasing %p by %i", l, r); */
-
- pa_assert(r > 0);
- pa_atomic_sub(&l->in_read_fifo, r);
- }
-
- _Y;
-
- pa_assert_se(pa_atomic_cmpxchg(&l->read_waiting, 1, 0));
- }
+/* pa_log("sleeping on pop"); */
+
+ do {
+ pa_fdsem_wait(l->write_fdsem);
+ } while (!(ret = pa_atomic_ptr_load(&cells[idx])));
}
pa_assert(ret);
- /* Guaranteed if we only have a single reader */
+ /* Guaranteed to succeed if we only have a single reader */
pa_assert_se(pa_atomic_ptr_cmpxchg(&cells[idx], ret, NULL));
_Y;
l->read_idx++;
- if (pa_atomic_load(&l->write_waiting) > 0) {
- char x = 'x';
- _Y;
- if (write(l->write_fds[1], &x, sizeof(x)) >= 0)
- pa_atomic_inc(&l->in_write_fifo);
- }
+ pa_fdsem_post(l->read_fdsem);
return ret;
}
-int pa_asyncq_get_fd(pa_asyncq *q) {
+int pa_asyncq_read_fd(pa_asyncq *q) {
pa_assert(q);
- return q->read_fds[0];
+ return pa_fdsem_get(q->write_fdsem);
}
-int pa_asyncq_before_poll(pa_asyncq *l) {
+int pa_asyncq_read_before_poll(pa_asyncq *l) {
int idx;
pa_atomic_ptr_t *cells;
_Y;
idx = reduce(l, l->read_idx);
- if (pa_atomic_ptr_load(&cells[idx]) || pa_atomic_load(&l->in_read_fifo) > 0)
- return -1;
-
- pa_assert_se(pa_atomic_cmpxchg(&l->read_waiting, 0, 1));
+ for (;;) {
+ if (pa_atomic_ptr_load(&cells[idx]))
+ return -1;
- if (pa_atomic_ptr_load(&cells[idx]) || pa_atomic_load(&l->in_read_fifo) > 0) {
- pa_assert_se(pa_atomic_cmpxchg(&l->read_waiting, 1, 0));
- return -1;
+ if (pa_fdsem_before_poll(l->write_fdsem) >= 0)
+ return 0;
}
return 0;
}
-void pa_asyncq_after_poll(pa_asyncq *l) {
+void pa_asyncq_read_after_poll(pa_asyncq *l) {
+ pa_assert(l);
+
+ pa_fdsem_after_poll(l->write_fdsem);
+}
+
+int pa_asyncq_write_fd(pa_asyncq *q) {
+ pa_assert(q);
+
+ return pa_fdsem_get(q->read_fdsem);
+}
+
+void pa_asyncq_write_before_poll(pa_asyncq *l) {
pa_assert(l);
- pa_assert_se(pa_atomic_cmpxchg(&l->read_waiting, 1, 0));
+ for (;;) {
+
+ if (flush_postq(l))
+ break;
+
+ if (pa_fdsem_before_poll(l->read_fdsem) >= 0) {
+ l->waiting_for_post = TRUE;
+ break;
+ }
+ }
+}
+
+void pa_asyncq_write_after_poll(pa_asyncq *l) {
+ pa_assert(l);
+
+ if (l->waiting_for_post) {
+ pa_fdsem_after_poll(l->read_fdsem);
+ l->waiting_for_post = FALSE;
+ }
}