X-Git-Url: https://code.delx.au/pulseaudio/blobdiff_plain/3d2d6ca958719c03fa541dda48037501919a55de..6825df8cecb050a42804ad861bf67e8e42f634ea:/src/pulsecore/memblock.c diff --git a/src/pulsecore/memblock.c b/src/pulsecore/memblock.c index 47909cdc..acfb481e 100644 --- a/src/pulsecore/memblock.c +++ b/src/pulsecore/memblock.c @@ -42,9 +42,13 @@ #include #include #include +#include #include +#include +#include #include #include +#include #include "memblock.h" @@ -57,7 +61,7 @@ #define PA_MEMEXPORT_SLOTS_MAX 128 -#define PA_MEMIMPORT_SLOTS_MAX 128 +#define PA_MEMIMPORT_SLOTS_MAX 160 #define PA_MEMIMPORT_SEGMENTS_MAX 16 struct pa_memblock { @@ -66,8 +70,8 @@ struct pa_memblock { pa_memblock_type_t type; - pa_bool_t read_only:1; - pa_bool_t is_silence:1; + bool read_only:1; + bool is_silence:1; pa_atomic_ptr_t data; size_t length; @@ -81,7 +85,7 @@ struct pa_memblock { pa_free_cb_t free_cb; } user; - struct { + struct { uint32_t id; pa_memimport_segment *segment; } imported; @@ -91,9 +95,11 @@ struct pa_memblock { struct pa_memimport_segment { pa_memimport *import; pa_shm memory; + pa_memtrap *trap; unsigned n_blocks; }; +/* A collection of multiple segments */ struct pa_memimport { pa_mutex *mutex; @@ -162,14 +168,14 @@ static void stat_add(pa_memblock*b) { pa_assert(b->pool); pa_atomic_inc(&b->pool->stat.n_allocated); - pa_atomic_add(&b->pool->stat.allocated_size, b->length); + pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length); pa_atomic_inc(&b->pool->stat.n_accumulated); - pa_atomic_add(&b->pool->stat.accumulated_size, b->length); + pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length); if (b->type == PA_MEMBLOCK_IMPORTED) { pa_atomic_inc(&b->pool->stat.n_imported); - pa_atomic_add(&b->pool->stat.imported_size, b->length); + pa_atomic_add(&b->pool->stat.imported_size, (int) b->length); } pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]); @@ -185,14 +191,14 @@ static void stat_remove(pa_memblock *b) { pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length); pa_atomic_dec(&b->pool->stat.n_allocated); - pa_atomic_sub(&b->pool->stat.allocated_size, b->length); + pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length); if (b->type == PA_MEMBLOCK_IMPORTED) { pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0); pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length); pa_atomic_dec(&b->pool->stat.n_imported); - pa_atomic_sub(&b->pool->stat.imported_size, b->length); + pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length); } pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]); @@ -223,13 +229,13 @@ static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) { /* If -1 is passed as length we choose the size for the caller. */ if (length == (size_t) -1) - length = p->block_size - PA_ALIGN(sizeof(pa_memblock)); + length = pa_mempool_block_size_max(p); b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length); PA_REFCNT_INIT(b); b->pool = p; b->type = PA_MEMBLOCK_APPENDED; - b->read_only = b->is_silence = FALSE; + b->read_only = b->is_silence = false; pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock))); b->length = length; pa_atomic_store(&b->n_acquired, 0); @@ -252,18 +258,21 @@ static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) { if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks) pa_atomic_dec(&p->n_init); else - slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * idx)); + slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx)); if (!slot) { - pa_log_info("Pool full"); + if (pa_log_ratelimit(PA_LOG_DEBUG)) + pa_log_debug("Pool full"); pa_atomic_inc(&p->stat.n_pool_full); return NULL; } } -#ifdef HAVE_VALGRIND_MEMCHECK_H - VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); -#endif +/* #ifdef HAVE_VALGRIND_MEMCHECK_H */ +/* if (PA_UNLIKELY(pa_in_valgrind())) { */ +/* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */ +/* } */ +/* #endif */ return slot; } @@ -280,7 +289,7 @@ static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) { pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr); pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size); - return ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size; + return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size); } /* No lock necessary */ @@ -297,10 +306,17 @@ static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) { pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) { pa_memblock *b = NULL; struct mempool_slot *slot; + static int mempool_disable = 0; pa_assert(p); pa_assert(length); + if (mempool_disable == 0) + mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1; + + if (mempool_disable > 0) + return NULL; + /* If -1 is passed as length we choose the size for the caller: we * take the largest size that fits in one of our slots. */ @@ -335,7 +351,7 @@ pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) { PA_REFCNT_INIT(b); b->pool = p; - b->read_only = b->is_silence = FALSE; + b->read_only = b->is_silence = false; b->length = length; pa_atomic_store(&b->n_acquired, 0); pa_atomic_store(&b->please_signal, 0); @@ -345,7 +361,7 @@ pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) { } /* No lock necessary */ -pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) { +pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, bool read_only) { pa_memblock *b; pa_assert(p); @@ -355,11 +371,12 @@ pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_boo if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks)))) b = pa_xnew(pa_memblock, 1); + PA_REFCNT_INIT(b); b->pool = p; b->type = PA_MEMBLOCK_FIXED; b->read_only = read_only; - b->is_silence = FALSE; + b->is_silence = false; pa_atomic_ptr_store(&b->data, d); b->length = length; pa_atomic_store(&b->n_acquired, 0); @@ -370,7 +387,7 @@ pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_boo } /* No lock necessary */ -pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) { +pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, bool read_only) { pa_memblock *b; pa_assert(p); @@ -381,11 +398,12 @@ pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks)))) b = pa_xnew(pa_memblock, 1); + PA_REFCNT_INIT(b); b->pool = p; b->type = PA_MEMBLOCK_USER; b->read_only = read_only; - b->is_silence = FALSE; + b->is_silence = false; pa_atomic_ptr_store(&b->data, d); b->length = length; pa_atomic_store(&b->n_acquired, 0); @@ -398,7 +416,7 @@ pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free } /* No lock necessary */ -pa_bool_t pa_memblock_is_read_only(pa_memblock *b) { +bool pa_memblock_is_read_only(pa_memblock *b) { pa_assert(b); pa_assert(PA_REFCNT_VALUE(b) > 0); @@ -406,7 +424,7 @@ pa_bool_t pa_memblock_is_read_only(pa_memblock *b) { } /* No lock necessary */ -pa_bool_t pa_memblock_is_silence(pa_memblock *b) { +bool pa_memblock_is_silence(pa_memblock *b) { pa_assert(b); pa_assert(PA_REFCNT_VALUE(b) > 0); @@ -414,7 +432,7 @@ pa_bool_t pa_memblock_is_silence(pa_memblock *b) { } /* No lock necessary */ -void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) { +void pa_memblock_set_is_silence(pa_memblock *b, bool v) { pa_assert(b); pa_assert(PA_REFCNT_VALUE(b) > 0); @@ -422,7 +440,7 @@ void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) { } /* No lock necessary */ -pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) { +bool pa_memblock_ref_is_one(pa_memblock *b) { int r; pa_assert(b); @@ -441,6 +459,13 @@ void* pa_memblock_acquire(pa_memblock *b) { return pa_atomic_ptr_load(&b->data); } +/* No lock necessary */ +void *pa_memblock_acquire_chunk(const pa_memchunk *c) { + pa_assert(c); + + return (uint8_t *) pa_memblock_acquire(c->memblock) + c->index; +} + /* No lock necessary, in corner cases locks by its own */ void pa_memblock_release(pa_memblock *b) { int r; @@ -493,25 +518,32 @@ static void memblock_free(pa_memblock *b) { /* Fall through */ case PA_MEMBLOCK_FIXED: - case PA_MEMBLOCK_APPENDED : if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); break; - case PA_MEMBLOCK_IMPORTED : { + case PA_MEMBLOCK_APPENDED: + + /* We could attach it to unused_memblocks, but that would + * probably waste some considerable amount of memory */ + pa_xfree(b); + break; + + case PA_MEMBLOCK_IMPORTED: { pa_memimport_segment *segment; pa_memimport *import; /* FIXME! This should be implemented lock-free */ - segment = b->per_type.imported.segment; - pa_assert(segment); - import = segment->import; - pa_assert(import); + pa_assert_se(segment = b->per_type.imported.segment); + pa_assert_se(import = segment->import); pa_mutex_lock(import->mutex); - pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)); + + pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id))); + + pa_assert(segment->n_blocks >= 1); if (-- segment->n_blocks <= 0) segment_detach(segment); @@ -521,29 +553,31 @@ static void memblock_free(pa_memblock *b) { if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); + break; } case PA_MEMBLOCK_POOL_EXTERNAL: case PA_MEMBLOCK_POOL: { struct mempool_slot *slot; - pa_bool_t call_free; + bool call_free; - slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)); - pa_assert(slot); + pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data))); call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL; +/* #ifdef HAVE_VALGRIND_MEMCHECK_H */ +/* if (PA_UNLIKELY(pa_in_valgrind())) { */ +/* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */ +/* } */ +/* #endif */ + /* The free list dimensions should easily allow all slots * to fit in, hence try harder if pushing this slot into * the free list fails */ while (pa_flist_push(b->pool->free_slots, slot) < 0) ; -#ifdef HAVE_VALGRIND_MEMCHECK_H - VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); -#endif - if (call_free) if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0) pa_xfree(b); @@ -604,7 +638,7 @@ static void memblock_make_local(pa_memblock *b) { pa_atomic_ptr_store(&b->data, new_data); b->type = PA_MEMBLOCK_POOL_EXTERNAL; - b->read_only = FALSE; + b->read_only = false; goto finish; } @@ -615,7 +649,7 @@ static void memblock_make_local(pa_memblock *b) { pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length)); b->type = PA_MEMBLOCK_USER; - b->read_only = FALSE; + b->read_only = false; finish: pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]); @@ -651,7 +685,8 @@ pa_memblock *pa_memblock_will_need(pa_memblock *b) { /* Self-locked. This function is not multiple-caller safe */ static void memblock_replace_import(pa_memblock *b) { - pa_memimport_segment *seg; + pa_memimport_segment *segment; + pa_memimport *import; pa_assert(b); pa_assert(b->type == PA_MEMBLOCK_IMPORTED); @@ -659,52 +694,64 @@ static void memblock_replace_import(pa_memblock *b) { pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0); pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length); pa_atomic_dec(&b->pool->stat.n_imported); - pa_atomic_sub(&b->pool->stat.imported_size, b->length); + pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length); - seg = b->per_type.imported.segment; - pa_assert(seg); - pa_assert(seg->import); + pa_assert_se(segment = b->per_type.imported.segment); + pa_assert_se(import = segment->import); - pa_mutex_lock(seg->import->mutex); + pa_mutex_lock(import->mutex); - pa_hashmap_remove( - seg->import->blocks, - PA_UINT32_TO_PTR(b->per_type.imported.id)); + pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id))); memblock_make_local(b); - if (-- seg->n_blocks <= 0) { - pa_mutex_unlock(seg->import->mutex); - segment_detach(seg); - } else - pa_mutex_unlock(seg->import->mutex); + pa_assert(segment->n_blocks >= 1); + if (-- segment->n_blocks <= 0) + segment_detach(segment); + + pa_mutex_unlock(import->mutex); } -pa_mempool* pa_mempool_new(pa_bool_t shared) { +pa_mempool* pa_mempool_new(bool shared, size_t size) { pa_mempool *p; + char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX]; p = pa_xnew(pa_mempool, 1); - p->mutex = pa_mutex_new(TRUE, TRUE); - p->semaphore = pa_semaphore_new(0); - p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE); if (p->block_size < PA_PAGE_SIZE) p->block_size = PA_PAGE_SIZE; - p->n_blocks = PA_MEMPOOL_SLOTS_MAX; + if (size <= 0) + p->n_blocks = PA_MEMPOOL_SLOTS_MAX; + else { + p->n_blocks = (unsigned) (size / p->block_size); + + if (p->n_blocks < 2) + p->n_blocks = 2; + } if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) { pa_xfree(p); return NULL; } + pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu", + p->memory.shared ? "shared" : "private", + p->n_blocks, + pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size), + pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)), + (unsigned long) pa_mempool_block_size_max(p)); + memset(&p->stat, 0, sizeof(p->stat)); pa_atomic_store(&p->n_init, 0); PA_LLIST_HEAD_INIT(pa_memimport, p->imports); PA_LLIST_HEAD_INIT(pa_memexport, p->exports); + p->mutex = pa_mutex_new(true, true); + p->semaphore = pa_semaphore_new(0); + p->free_slots = pa_flist_new(p->n_blocks); return p; @@ -726,8 +773,47 @@ void pa_mempool_free(pa_mempool *p) { pa_flist_free(p->free_slots, NULL); if (pa_atomic_load(&p->stat.n_allocated) > 0) { -/* raise(SIGTRAP); */ - pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated)); + + /* Ouch, somebody is retaining a memory block reference! */ + +#ifdef DEBUG_REF + unsigned i; + pa_flist *list; + + /* Let's try to find at least one of those leaked memory blocks */ + + list = pa_flist_new(p->n_blocks); + + for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) { + struct mempool_slot *slot; + pa_memblock *b, *k; + + slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i)); + b = mempool_slot_data(slot); + + while ((k = pa_flist_pop(p->free_slots))) { + while (pa_flist_push(list, k) < 0) + ; + + if (b == k) + break; + } + + if (!k) + pa_log("REF: Leaked memory block %p", b); + + while ((k = pa_flist_pop(list))) + while (pa_flist_push(p->free_slots, k) < 0) + ; + } + + pa_flist_free(list, NULL); + +#endif + + pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated)); + +/* PA_DEBUG_TRAP; */ } pa_shm_free(&p->memory); @@ -766,7 +852,7 @@ void pa_mempool_vacuum(pa_mempool *p) { ; while ((slot = pa_flist_pop(list))) { - pa_shm_punch(&p->memory, (uint8_t*) slot - (uint8_t*) p->memory.ptr, p->block_size); + pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size); while (pa_flist_push(p->free_slots, slot)) ; @@ -788,13 +874,13 @@ int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) { } /* No lock necessary */ -pa_bool_t pa_mempool_is_shared(pa_mempool *p) { +bool pa_mempool_is_shared(pa_mempool *p) { pa_assert(p); return !!p->memory.shared; } -/* For recieving blocks from other nodes */ +/* For receiving blocks from other nodes */ pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) { pa_memimport *i; @@ -802,7 +888,7 @@ pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void pa_assert(cb); i = pa_xnew(pa_memimport, 1); - i->mutex = pa_mutex_new(TRUE, TRUE); + i->mutex = pa_mutex_new(true, true); i->pool = p; i->segments = pa_hashmap_new(NULL, NULL); i->blocks = pa_hashmap_new(NULL, NULL); @@ -825,7 +911,7 @@ static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) { if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX) return NULL; - seg = pa_xnew(pa_memimport_segment, 1); + seg = pa_xnew0(pa_memimport_segment, 1); if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) { pa_xfree(seg); @@ -833,9 +919,9 @@ static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) { } seg->import = i; - seg->n_blocks = 0; + seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size); - pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg); + pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg); return seg; } @@ -845,6 +931,10 @@ static void segment_detach(pa_memimport_segment *seg) { pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id)); pa_shm_free(&seg->memory); + + if (seg->trap) + pa_memtrap_remove(seg->trap); + pa_xfree(seg); } @@ -874,8 +964,8 @@ void pa_memimport_free(pa_memimport *i) { pa_mutex_unlock(i->pool->mutex); - pa_hashmap_free(i->blocks, NULL, NULL); - pa_hashmap_free(i->segments, NULL, NULL); + pa_hashmap_free(i->blocks); + pa_hashmap_free(i->segments); pa_mutex_free(i->mutex); @@ -891,6 +981,11 @@ pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_i pa_mutex_lock(i->mutex); + if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) { + pa_memblock_ref(b); + goto finish; + } + if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX) goto finish; @@ -907,8 +1002,8 @@ pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_i PA_REFCNT_INIT(b); b->pool = i->pool; b->type = PA_MEMBLOCK_IMPORTED; - b->read_only = TRUE; - b->is_silence = FALSE; + b->read_only = true; + b->is_silence = false; pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset); b->length = size; pa_atomic_store(&b->n_acquired, 0); @@ -920,12 +1015,11 @@ pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_i seg->n_blocks++; + stat_add(b); + finish: pa_mutex_unlock(i->mutex); - if (b) - stat_add(b); - return b; } @@ -960,7 +1054,7 @@ pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void return NULL; e = pa_xnew(pa_memexport, 1); - e->mutex = pa_mutex_new(TRUE, TRUE); + e->mutex = pa_mutex_new(true, true); e->pool = p; PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots); PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots); @@ -979,7 +1073,7 @@ void pa_memexport_free(pa_memexport *e) { pa_mutex_lock(e->mutex); while (e->used_slots) - pa_memexport_process_release(e, e->used_slots - e->slots); + pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots)); pa_mutex_unlock(e->mutex); pa_mutex_lock(e->pool->mutex); @@ -1018,7 +1112,7 @@ int pa_memexport_process_release(pa_memexport *e, uint32_t id) { pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length); pa_atomic_dec(&e->pool->stat.n_exported); - pa_atomic_sub(&e->pool->stat.exported_size, b->length); + pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length); pa_memblock_unref(b); @@ -1046,7 +1140,7 @@ static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) { slot->block->per_type.imported.segment->import != i) continue; - idx = slot - e->slots; + idx = (uint32_t) (slot - e->slots); e->revoke_cb(e, idx, e->userdata); pa_memexport_process_release(e, idx); } @@ -1107,7 +1201,7 @@ int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot); slot->block = b; - *block_id = slot - e->slots; + *block_id = (uint32_t) (slot - e->slots); pa_mutex_unlock(e->mutex); /* pa_log("Got block id %u", *block_id); */ @@ -1127,13 +1221,13 @@ int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size); *shm_id = memory->id; - *offset = (uint8_t*) data - (uint8_t*) memory->ptr; + *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr); *size = b->length; pa_memblock_release(b); pa_atomic_inc(&e->pool->stat.n_exported); - pa_atomic_add(&e->pool->stat.exported_size, b->length); + pa_atomic_add(&e->pool->stat.exported_size, (int) b->length); return 0; }