#include <signal.h>
#include <errno.h>
+#ifdef HAVE_VALGRIND_MEMCHECK_H
+#include <valgrind/memcheck.h>
+#endif
+
#include <pulse/xmalloc.h>
#include <pulse/def.h>
#include <pulsecore/macro.h>
#include <pulsecore/flist.h>
#include <pulsecore/core-util.h>
+#include <pulsecore/memtrap.h>
#include "memblock.h"
#define PA_MEMEXPORT_SLOTS_MAX 128
-#define PA_MEMIMPORT_SLOTS_MAX 128
+#define PA_MEMIMPORT_SLOTS_MAX 160
#define PA_MEMIMPORT_SEGMENTS_MAX 16
struct pa_memblock {
struct pa_memimport_segment {
pa_memimport *import;
pa_shm memory;
+ pa_memtrap *trap;
unsigned n_blocks;
};
+/* A collection of multiple segments */
struct pa_memimport {
pa_mutex *mutex;
pa_assert(b->pool);
pa_atomic_inc(&b->pool->stat.n_allocated);
- pa_atomic_add(&b->pool->stat.allocated_size, b->length);
+ pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
pa_atomic_inc(&b->pool->stat.n_accumulated);
- pa_atomic_add(&b->pool->stat.accumulated_size, b->length);
+ pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
if (b->type == PA_MEMBLOCK_IMPORTED) {
pa_atomic_inc(&b->pool->stat.n_imported);
- pa_atomic_add(&b->pool->stat.imported_size, b->length);
+ pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
}
pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
pa_atomic_dec(&b->pool->stat.n_allocated);
- pa_atomic_sub(&b->pool->stat.allocated_size, b->length);
+ pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
if (b->type == PA_MEMBLOCK_IMPORTED) {
pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
pa_atomic_dec(&b->pool->stat.n_imported);
- pa_atomic_sub(&b->pool->stat.imported_size, b->length);
+ pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
}
pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
pa_atomic_dec(&p->n_init);
else
- slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * idx));
+ slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
if (!slot) {
- pa_log_info("Pool full");
+ if (pa_log_ratelimit())
+ pa_log_debug("Pool full");
pa_atomic_inc(&p->stat.n_pool_full);
return NULL;
}
}
+/* #ifdef HAVE_VALGRIND_MEMCHECK_H */
+/* if (PA_UNLIKELY(pa_in_valgrind())) { */
+/* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
+/* } */
+/* #endif */
+
return slot;
}
pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
- return ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size;
+ return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
}
/* No lock necessary */
pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
pa_memblock *b = NULL;
struct mempool_slot *slot;
+ static int mempool_disable = 0;
pa_assert(p);
pa_assert(length);
+ if (mempool_disable == 0)
+ mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
+
+ if (mempool_disable > 0)
+ return NULL;
+
/* If -1 is passed as length we choose the size for the caller: we
* take the largest size that fits in one of our slots. */
if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
b = pa_xnew(pa_memblock, 1);
+
PA_REFCNT_INIT(b);
b->pool = p;
b->type = PA_MEMBLOCK_FIXED;
if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
b = pa_xnew(pa_memblock, 1);
+
PA_REFCNT_INIT(b);
b->pool = p;
b->type = PA_MEMBLOCK_USER;
/* Fall through */
case PA_MEMBLOCK_FIXED:
- case PA_MEMBLOCK_APPENDED :
if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
pa_xfree(b);
break;
- case PA_MEMBLOCK_IMPORTED : {
+ case PA_MEMBLOCK_APPENDED:
+
+ /* We could attached it unused_memblocks, but that would
+ * probably waste some considerable memory */
+ pa_xfree(b);
+ break;
+
+ case PA_MEMBLOCK_IMPORTED: {
pa_memimport_segment *segment;
pa_memimport *import;
/* FIXME! This should be implemented lock-free */
- segment = b->per_type.imported.segment;
- pa_assert(segment);
- import = segment->import;
- pa_assert(import);
+ pa_assert_se(segment = b->per_type.imported.segment);
+ pa_assert_se(import = segment->import);
pa_mutex_lock(import->mutex);
- pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
+
+ pa_assert_se(pa_hashmap_remove(
+ import->blocks,
+ PA_UINT32_TO_PTR(b->per_type.imported.id)));
+
+ pa_assert(segment->n_blocks >= 1);
if (-- segment->n_blocks <= 0)
segment_detach(segment);
if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
pa_xfree(b);
+
break;
}
case PA_MEMBLOCK_POOL_EXTERNAL:
case PA_MEMBLOCK_POOL: {
struct mempool_slot *slot;
- int call_free;
+ pa_bool_t call_free;
- slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
- pa_assert(slot);
+ pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
+/* #ifdef HAVE_VALGRIND_MEMCHECK_H */
+/* if (PA_UNLIKELY(pa_in_valgrind())) { */
+/* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
+/* } */
+/* #endif */
+
/* The free list dimensions should easily allow all slots
* to fit in, hence try harder if pushing this slot into
* the free list fails */
/* Self-locked. This function is not multiple-caller safe */
static void memblock_replace_import(pa_memblock *b) {
- pa_memimport_segment *seg;
+ pa_memimport_segment *segment;
+ pa_memimport *import;
pa_assert(b);
pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
pa_atomic_dec(&b->pool->stat.n_imported);
- pa_atomic_sub(&b->pool->stat.imported_size, b->length);
+ pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
- seg = b->per_type.imported.segment;
- pa_assert(seg);
- pa_assert(seg->import);
+ pa_assert_se(segment = b->per_type.imported.segment);
+ pa_assert_se(import = segment->import);
- pa_mutex_lock(seg->import->mutex);
+ pa_mutex_lock(import->mutex);
- pa_hashmap_remove(
- seg->import->blocks,
- PA_UINT32_TO_PTR(b->per_type.imported.id));
+ pa_assert_se(pa_hashmap_remove(
+ import->blocks,
+ PA_UINT32_TO_PTR(b->per_type.imported.id)));
memblock_make_local(b);
- if (-- seg->n_blocks <= 0) {
- pa_mutex_unlock(seg->import->mutex);
- segment_detach(seg);
- } else
- pa_mutex_unlock(seg->import->mutex);
+ pa_assert(segment->n_blocks >= 1);
+ if (-- segment->n_blocks <= 0)
+ segment_detach(segment);
+
+ pa_mutex_unlock(import->mutex);
}
-pa_mempool* pa_mempool_new(pa_bool_t shared) {
+pa_mempool* pa_mempool_new(pa_bool_t shared, size_t size) {
pa_mempool *p;
+ char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
p = pa_xnew(pa_mempool, 1);
if (p->block_size < PA_PAGE_SIZE)
p->block_size = PA_PAGE_SIZE;
- p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
+ if (size <= 0)
+ p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
+ else {
+ p->n_blocks = (unsigned) (size / p->block_size);
+
+ if (p->n_blocks < 2)
+ p->n_blocks = 2;
+ }
if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
pa_xfree(p);
return NULL;
}
+ pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
+ p->memory.shared ? "shared" : "private",
+ p->n_blocks,
+ pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
+ pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
+ (unsigned long) pa_mempool_block_size_max(p));
+
memset(&p->stat, 0, sizeof(p->stat));
pa_atomic_store(&p->n_init, 0);
PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
- p->free_slots = pa_flist_new(p->n_blocks*2);
+ p->free_slots = pa_flist_new(p->n_blocks);
return p;
}
pa_flist_free(p->free_slots, NULL);
if (pa_atomic_load(&p->stat.n_allocated) > 0) {
-/* raise(SIGTRAP); */
- pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
+
+ /* Ouch, somebody is retaining a memory block reference! */
+
+#ifdef DEBUG_REF
+ unsigned i;
+ pa_flist *list;
+
+ /* Let's try to find at least one of those leaked memory blocks */
+
+ list = pa_flist_new(p->n_blocks);
+
+ for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
+ struct mempool_slot *slot;
+ pa_memblock *b, *k;
+
+ slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
+ b = mempool_slot_data(slot);
+
+ while ((k = pa_flist_pop(p->free_slots))) {
+ while (pa_flist_push(list, k) < 0)
+ ;
+
+ if (b == k)
+ break;
+ }
+
+ if (!k)
+ pa_log("REF: Leaked memory block %p", b);
+
+ while ((k = pa_flist_pop(list)))
+ while (pa_flist_push(p->free_slots, k) < 0)
+ ;
+ }
+
+ pa_flist_free(list, NULL);
+
+#endif
+
+ pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
+
+/* PA_DEBUG_TRAP; */
}
pa_shm_free(&p->memory);
pa_assert(p);
- list = pa_flist_new(p->n_blocks*2);
+ list = pa_flist_new(p->n_blocks);
while ((slot = pa_flist_pop(p->free_slots)))
while (pa_flist_push(list, slot) < 0)
;
while ((slot = pa_flist_pop(list))) {
- pa_shm_punch(&p->memory, (uint8_t*) slot - (uint8_t*) p->memory.ptr, p->block_size);
+ pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
while (pa_flist_push(p->free_slots, slot))
;
if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
return NULL;
- seg = pa_xnew(pa_memimport_segment, 1);
+ seg = pa_xnew0(pa_memimport_segment, 1);
if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
pa_xfree(seg);
}
seg->import = i;
- seg->n_blocks = 0;
+ seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
- pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
+ pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
return seg;
}
pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
pa_shm_free(&seg->memory);
+
+ if (seg->trap)
+ pa_memtrap_remove(seg->trap);
+
pa_xfree(seg);
}
pa_mutex_lock(i->mutex);
- while ((b = pa_hashmap_get_first(i->blocks)))
+ while ((b = pa_hashmap_first(i->blocks)))
memblock_replace_import(b);
pa_assert(pa_hashmap_size(i->segments) == 0);
pa_mutex_lock(i->mutex);
+ if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
+ pa_memblock_ref(b);
+ goto finish;
+ }
+
if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
goto finish;
seg->n_blocks++;
+ stat_add(b);
+
finish:
pa_mutex_unlock(i->mutex);
- if (b)
- stat_add(b);
-
return b;
}
pa_mutex_lock(e->mutex);
while (e->used_slots)
- pa_memexport_process_release(e, e->used_slots - e->slots);
+ pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
pa_mutex_unlock(e->mutex);
pa_mutex_lock(e->pool->mutex);
pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
pa_atomic_dec(&e->pool->stat.n_exported);
- pa_atomic_sub(&e->pool->stat.exported_size, b->length);
+ pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
pa_memblock_unref(b);
slot->block->per_type.imported.segment->import != i)
continue;
- idx = slot - e->slots;
+ idx = (uint32_t) (slot - e->slots);
e->revoke_cb(e, idx, e->userdata);
pa_memexport_process_release(e, idx);
}
PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
slot->block = b;
- *block_id = slot - e->slots;
+ *block_id = (uint32_t) (slot - e->slots);
pa_mutex_unlock(e->mutex);
/* pa_log("Got block id %u", *block_id); */
pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
*shm_id = memory->id;
- *offset = (uint8_t*) data - (uint8_t*) memory->ptr;
+ *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
*size = b->length;
pa_memblock_release(b);
pa_atomic_inc(&e->pool->stat.n_exported);
- pa_atomic_add(&e->pool->stat.exported_size, b->length);
+ pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
return 0;
}