X-Git-Url: https://code.delx.au/pulseaudio/blobdiff_plain/1a3984cb4c517a0b9e04c82c002c993be9483d93..30a32d35c8b91cafc004961828a7d0acbce74f1f:/src/pulsecore/flist.c diff --git a/src/pulsecore/flist.c b/src/pulsecore/flist.c index f166ee33..b110e1e4 100644 --- a/src/pulsecore/flist.c +++ b/src/pulsecore/flist.c @@ -1,7 +1,11 @@ /*** This file is part of PulseAudio. - Copyright 2006 Lennart Poettering + Copyright 2006-2008 Lennart Poettering + Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies). + Copyright (C) 2012 Canonical Ltd. + + Contact: Jyri Sarha PulseAudio is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as @@ -27,206 +31,149 @@ #include #include -#include #include #include #include #include "flist.h" -/* Algorithm is not perfect, in a few corner cases it will fail to pop - * from the flist although it isn't empty, and fail to push into the - * flist, although it isn't full. - * - * We keep a fixed size array of entries, each item is either marked - * UNUSED, USED or BUSY and contains a user data pointer. When pushing - * into the queue we look for an UNUSED cell and mark it BUSY with a - * CAS operation. If successful we use it and mark it USED, otherwise - * we go on and look for the next UNUSED cell. The algorithm for - * popping an item from the queue is practically inverse: look for a - * USED cell and and mark it BUSY with a CAS operation, after reading - * from it mark it UNUSED again. - * - * To accelerate finding of used/unused cells we maintain a read and a - * write index which is used like a ring buffer. After each push we - * increase the write index and after each pop we increase the read - * index. - * - * The indexes are incremented atomically and are never truncated to - * the buffer size. Instead we assume that the buffer size is a power - * of two and that the truncation can thus be done by applying a - * simple AND on read. - * - * To make sure that we do not look for empty cells indefinitely we - * maintain a length value which stores the number of used cells. From - * this value the number of unused cells is easily calculated. Please - * note that the length value is not updated atomically with the read - * and write index and might thus be a few cells off the real - * value. To deal with this we always look for N_EXTRA_SCAN extra - * cells when pushing/popping entries. - * - * It might make sense to replace this implementation with a link list - * stack or queue, which however requires DCAS to be simple. Patches - * welcome. - * - * Please note that this algorithm is home grown.*/ - -#define FLIST_SIZE 128 -#define N_EXTRA_SCAN 2 - -/* For debugging purposes we can define _Y to put and extra thread - * yield between each operation. */ - -#ifdef PROFILE -#define _Y pa_thread_yield() -#else -#define _Y do { } while(0) -#endif +#define FLIST_SIZE 256 -enum { - STATE_UNUSED, - STATE_USED, - STATE_BUSY -}; +/* Atomic table indices contain + sign bit = if set, indicates empty/NULL value + tag bits (to avoid the ABA problem) + actual index bits +*/ -struct cell { - pa_atomic_t state; - void *data; +/* Lock free single linked list element. */ +struct pa_flist_elem { + pa_atomic_t next; + pa_atomic_ptr_t ptr; }; +typedef struct pa_flist_elem pa_flist_elem; + struct pa_flist { + char *name; unsigned size; - pa_atomic_t length; - pa_atomic_t read_idx; - pa_atomic_t write_idx; + + pa_atomic_t current_tag; + int index_mask; + int tag_shift; + int tag_mask; + + /* Stack that contains pointers stored into free list */ + pa_atomic_t stored; + /* Stack that contains empty list elements */ + pa_atomic_t empty; + pa_flist_elem table[]; }; -#define PA_FLIST_CELLS(x) ((struct cell*) ((uint8_t*) (x) + PA_ALIGN(sizeof(struct pa_flist)))) +/* Lock free pop from linked list stack */ +static pa_flist_elem *stack_pop(pa_flist *flist, pa_atomic_t *list) { + pa_flist_elem *popped; + int idx; + pa_assert(list); -pa_flist *pa_flist_new(unsigned size) { + do { + idx = pa_atomic_load(list); + if (idx < 0) + return NULL; + popped = &flist->table[idx & flist->index_mask]; + } while (!pa_atomic_cmpxchg(list, idx, pa_atomic_load(&popped->next))); + + return popped; +} + +/* Lock free push to linked list stack */ +static void stack_push(pa_flist *flist, pa_atomic_t *list, pa_flist_elem *new_elem) { + int tag, newindex, next; + pa_assert(list); + + tag = pa_atomic_inc(&flist->current_tag); + newindex = new_elem - flist->table; + pa_assert(newindex >= 0 && newindex < (int) flist->size); + newindex |= (tag << flist->tag_shift) & flist->tag_mask; + + do { + next = pa_atomic_load(list); + pa_atomic_store(&new_elem->next, next); + } while (!pa_atomic_cmpxchg(list, next, newindex)); +} + +pa_flist *pa_flist_new_with_name(unsigned size, const char *name) { pa_flist *l; + unsigned i; + pa_assert(name); if (!size) size = FLIST_SIZE; - pa_assert(pa_is_power_of_two(size)); - - l = pa_xmalloc0(PA_ALIGN(sizeof(pa_flist)) + (sizeof(struct cell) * size)); + l = pa_xmalloc0(sizeof(pa_flist) + sizeof(pa_flist_elem) * size); + l->name = pa_xstrdup(name); l->size = size; - pa_atomic_store(&l->read_idx, 0); - pa_atomic_store(&l->write_idx, 0); - pa_atomic_store(&l->length, 0); + while (1 << l->tag_shift < (int) size) + l->tag_shift++; + l->index_mask = (1 << l->tag_shift) - 1; + l->tag_mask = INT_MAX - l->index_mask; + pa_atomic_store(&l->stored, -1); + pa_atomic_store(&l->empty, -1); + for (i=0; i < size; i++) { + stack_push(l, &l->empty, &l->table[i]); + } return l; } -static int reduce(pa_flist *l, int value) { - return value & (unsigned) (l->size - 1); +pa_flist *pa_flist_new(unsigned size) { + return pa_flist_new_with_name(size, "unknown"); } void pa_flist_free(pa_flist *l, pa_free_cb_t free_cb) { pa_assert(l); + pa_assert(l->name); if (free_cb) { - struct cell *cells; - int len, idx; - - cells = PA_FLIST_CELLS(l); - - idx = reduce(l, pa_atomic_load(&l->read_idx)); - len = pa_atomic_load(&l->length); - - for (; len > 0; len--) { - - if (pa_atomic_load(&cells[idx].state) == STATE_USED) - free_cb(cells[idx].data); - - idx = reduce(l, idx + 1); - } + pa_flist_elem *elem; + while((elem = stack_pop(l, &l->stored))) + free_cb(pa_atomic_ptr_load(&elem->ptr)); } + pa_xfree(l->name); pa_xfree(l); } -int pa_flist_push(pa_flist*l, void *p) { - int idx, len, n; - struct cell *cells; - +int pa_flist_push(pa_flist *l, void *p) { + pa_flist_elem *elem; pa_assert(l); pa_assert(p); - cells = PA_FLIST_CELLS(l); - - n = len = (int) l->size - pa_atomic_load(&l->length) + N_EXTRA_SCAN; - _Y; - idx = reduce(l, pa_atomic_load(&l->write_idx)); - - for (; n > 0 ; n--) { - _Y; - - if (pa_atomic_cmpxchg(&cells[idx].state, STATE_UNUSED, STATE_BUSY)) { - _Y; - pa_atomic_inc(&l->write_idx); - _Y; - cells[idx].data = p; - _Y; - pa_atomic_store(&cells[idx].state, STATE_USED); - _Y; - pa_atomic_inc(&l->length); - return 0; - } - - _Y; - idx = reduce(l, idx + 1); + elem = stack_pop(l, &l->empty); + if (elem == NULL) { + if (pa_log_ratelimit(PA_LOG_DEBUG)) + pa_log_debug("%s flist is full (don't worry)", l->name); + return -1; } + pa_atomic_ptr_store(&elem->ptr, p); + stack_push(l, &l->stored, elem); -#ifdef PROFILE - if (len > N_EXTRA_SCAN) - pa_log_warn("Didn't find free cell after %u iterations.", len); -#endif - - return -1; + return 0; } -void* pa_flist_pop(pa_flist*l) { - int idx, len, n; - struct cell *cells; - +void* pa_flist_pop(pa_flist *l) { + pa_flist_elem *elem; + void *ptr; pa_assert(l); - cells = PA_FLIST_CELLS(l); - - n = len = pa_atomic_load(&l->length) + N_EXTRA_SCAN; - _Y; - idx = reduce(l, pa_atomic_load(&l->read_idx)); - - for (; n > 0 ; n--) { - _Y; + elem = stack_pop(l, &l->stored); + if (elem == NULL) + return NULL; - if (pa_atomic_cmpxchg(&cells[idx].state, STATE_USED, STATE_BUSY)) { - void *p; - _Y; - pa_atomic_inc(&l->read_idx); - _Y; - p = cells[idx].data; - _Y; - pa_atomic_store(&cells[idx].state, STATE_UNUSED); - _Y; + ptr = pa_atomic_ptr_load(&elem->ptr); - pa_atomic_dec(&l->length); - return p; - } - - _Y; - idx = reduce(l, idx+1); - } - -#ifdef PROFILE - if (len > N_EXTRA_SCAN) - pa_log_warn("Didn't find used cell after %u iterations.", len); -#endif + stack_push(l, &l->empty, elem); - return NULL; + return ptr; }