-/* Algorithm is not perfect, in a few corner cases it will fail to pop
- * from the flist although it isn't empty, and fail to push into the
- * flist, although it isn't full.
- *
- * We keep a fixed size array of entries, each item is an atomic
- * pointer.
- *
- * To accelerate finding of used/unused cells we maintain a read and a
- * write index which is used like a ring buffer. After each push we
- * increase the write index and after each pop we increase the read
- * index.
- *
- * The indexes are incremented atomically and are never truncated to
- * the buffer size. Instead we assume that the buffer size is a power
- * of two and that the truncation can thus be done by applying a
- * simple AND on read.
- *
- * To make sure that we do not look for empty cells indefinitely we
- * maintain a length value which stores the number of used cells. From
- * this value the number of unused cells is easily calculated. Please
- * note that the length value is not updated atomically with the read
- * and write index and might thus be a few cells off the real
- * value. To deal with this we always look for N_EXTRA_SCAN extra
- * cells when pushing/popping entries.
- *
- * It might make sense to replace this implementation with a link list
- * stack or queue, which however requires DCAS to be simple. Patches
- * welcome.
- *
- * Please note that this algorithm is home grown.*/
-
-#define FLIST_SIZE 128
-#define N_EXTRA_SCAN 3
-
-/* For debugging purposes we can define _Y to put and extra thread
- * yield between each operation. */
-
-#ifdef PROFILE
-#define _Y pa_thread_yield()
-#else
-#define _Y do { } while(0)
-#endif
+#define FLIST_SIZE 256
+
+/* Atomic table indices contain
+ sign bit = if set, indicates empty/NULL value
+ tag bits (to avoid the ABA problem)
+ actual index bits
+*/
+
+/* Lock free single linked list element. */
+struct pa_flist_elem {
+ pa_atomic_t next;
+ pa_atomic_ptr_t ptr;
+};
+
+typedef struct pa_flist_elem pa_flist_elem;