4 This file is part of PulseAudio.
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as
11 published by the Free Software Foundation; either version 2.1 of the
12 License, or (at your option) any later version.
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details
19 You should have received a copy of the GNU Lesser General Public
20 License along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
36 #include <pulse/xmalloc.h>
37 #include <pulse/def.h>
39 #include <pulsecore/shm.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/hashmap.h>
42 #include <pulsecore/semaphore.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/flist.h>
45 #include <pulsecore/core-util.h>
49 #define PA_MEMPOOL_SLOTS_MAX 512
50 #define PA_MEMPOOL_SLOT_SIZE (32*1024)
52 #define PA_MEMEXPORT_SLOTS_MAX 128
54 #define PA_MEMIMPORT_SLOTS_MAX 128
55 #define PA_MEMIMPORT_SEGMENTS_MAX 16
58 PA_REFCNT_DECLARE
; /* the reference counter */
61 pa_memblock_type_t type
;
63 pa_bool_t read_only
:1;
64 pa_bool_t is_silence
:1;
69 pa_atomic_t n_acquired
;
70 pa_atomic_t please_signal
;
74 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
80 pa_memimport_segment
*segment
;
85 struct pa_memimport_segment
{
98 /* Called whenever an imported memory block is no longer
100 pa_memimport_release_cb_t release_cb
;
103 PA_LLIST_FIELDS(pa_memimport
);
106 struct memexport_slot
{
107 PA_LLIST_FIELDS(struct memexport_slot
);
111 struct pa_memexport
{
115 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
117 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
118 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
121 /* Called whenever a client from which we imported a memory block
122 which we in turn exported to another client dies and we need to
123 revoke the memory block accordingly */
124 pa_memexport_revoke_cb_t revoke_cb
;
127 PA_LLIST_FIELDS(pa_memexport
);
130 struct mempool_slot
{
131 PA_LLIST_FIELDS(struct mempool_slot
);
132 /* the actual data follows immediately hereafter */
136 pa_semaphore
*semaphore
;
145 PA_LLIST_HEAD(pa_memimport
, imports
);
146 PA_LLIST_HEAD(pa_memexport
, exports
);
148 /* A list of free slots that may be reused */
149 pa_flist
*free_slots
;
151 pa_mempool_stat stat
;
154 static void segment_detach(pa_memimport_segment
*seg
);
156 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
158 /* No lock necessary */
159 static void stat_add(pa_memblock
*b
) {
163 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
164 pa_atomic_add(&b
->pool
->stat
.allocated_size
, b
->length
);
166 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
167 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, b
->length
);
169 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
170 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
171 pa_atomic_add(&b
->pool
->stat
.imported_size
, b
->length
);
174 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
175 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
178 /* No lock necessary */
179 static void stat_remove(pa_memblock
*b
) {
183 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
184 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
186 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
187 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, b
->length
);
189 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
190 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
191 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
193 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
194 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
197 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
200 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
202 /* No lock necessary */
203 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
207 pa_assert(length
> 0);
209 if (!(b
= pa_memblock_new_pool(p
, length
)))
210 b
= memblock_new_appended(p
, length
);
215 /* No lock necessary */
216 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
220 pa_assert(length
> 0);
222 /* If -1 is passed as length we choose the size for the caller. */
224 if (length
== (size_t) -1)
225 length
= p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) - PA_ALIGN(sizeof(pa_memblock
));
227 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
230 b
->type
= PA_MEMBLOCK_APPENDED
;
231 b
->read_only
= b
->is_silence
= FALSE
;
232 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
234 pa_atomic_store(&b
->n_acquired
, 0);
235 pa_atomic_store(&b
->please_signal
, 0);
241 /* No lock necessary */
242 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
243 struct mempool_slot
*slot
;
246 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
249 /* The free list was empty, we have to allocate a new entry */
251 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
252 pa_atomic_dec(&p
->n_init
);
254 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* idx
));
257 pa_log_info("Pool full");
258 pa_atomic_inc(&p
->stat
.n_pool_full
);
266 /* No lock necessary */
267 static void* mempool_slot_data(struct mempool_slot
*slot
) {
270 return (uint8_t*) slot
+ PA_ALIGN(sizeof(struct mempool_slot
));
273 /* No lock necessary */
274 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
277 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
278 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
280 return ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
;
283 /* No lock necessary */
284 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
287 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
290 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
293 /* No lock necessary */
294 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
295 pa_memblock
*b
= NULL
;
296 struct mempool_slot
*slot
;
299 pa_assert(length
> 0);
301 /* If -1 is passed as length we choose the size for the caller: we
302 * take the largest size that fits in one of our slots. */
304 if (length
== (size_t) -1)
305 length
= pa_mempool_block_size_max(p
);
307 if (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) >= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
309 if (!(slot
= mempool_allocate_slot(p
)))
312 b
= mempool_slot_data(slot
);
313 b
->type
= PA_MEMBLOCK_POOL
;
314 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
316 } else if (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) >= length
) {
318 if (!(slot
= mempool_allocate_slot(p
)))
321 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
322 b
= pa_xnew(pa_memblock
, 1);
324 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
325 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
328 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
))));
329 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
335 b
->read_only
= b
->is_silence
= FALSE
;
337 pa_atomic_store(&b
->n_acquired
, 0);
338 pa_atomic_store(&b
->please_signal
, 0);
344 /* No lock necessary */
345 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, pa_bool_t read_only
) {
350 pa_assert(length
!= (size_t) -1);
351 pa_assert(length
> 0);
353 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
354 b
= pa_xnew(pa_memblock
, 1);
357 b
->type
= PA_MEMBLOCK_FIXED
;
358 b
->read_only
= read_only
;
359 b
->is_silence
= FALSE
;
360 pa_atomic_ptr_store(&b
->data
, d
);
362 pa_atomic_store(&b
->n_acquired
, 0);
363 pa_atomic_store(&b
->please_signal
, 0);
369 /* No lock necessary */
370 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, pa_free_cb_t free_cb
, pa_bool_t read_only
) {
375 pa_assert(length
> 0);
376 pa_assert(length
!= (size_t) -1);
379 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
380 b
= pa_xnew(pa_memblock
, 1);
383 b
->type
= PA_MEMBLOCK_USER
;
384 b
->read_only
= read_only
;
385 b
->is_silence
= FALSE
;
386 pa_atomic_ptr_store(&b
->data
, d
);
388 pa_atomic_store(&b
->n_acquired
, 0);
389 pa_atomic_store(&b
->please_signal
, 0);
391 b
->per_type
.user
.free_cb
= free_cb
;
397 /* No lock necessary */
398 pa_bool_t
pa_memblock_is_read_only(pa_memblock
*b
) {
400 pa_assert(PA_REFCNT_VALUE(b
) > 0);
402 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
405 /* No lock necessary */
406 pa_bool_t
pa_memblock_is_silence(pa_memblock
*b
) {
408 pa_assert(PA_REFCNT_VALUE(b
) > 0);
410 return b
->is_silence
;
413 /* No lock necessary */
414 void pa_memblock_set_is_silence(pa_memblock
*b
, pa_bool_t v
) {
416 pa_assert(PA_REFCNT_VALUE(b
) > 0);
421 /* No lock necessary */
422 pa_bool_t
pa_memblock_ref_is_one(pa_memblock
*b
) {
427 pa_assert_se((r
= PA_REFCNT_VALUE(b
)) > 0);
432 /* No lock necessary */
433 void* pa_memblock_acquire(pa_memblock
*b
) {
435 pa_assert(PA_REFCNT_VALUE(b
) > 0);
437 pa_atomic_inc(&b
->n_acquired
);
439 return pa_atomic_ptr_load(&b
->data
);
442 /* No lock necessary, in corner cases locks by its own */
443 void pa_memblock_release(pa_memblock
*b
) {
446 pa_assert(PA_REFCNT_VALUE(b
) > 0);
448 r
= pa_atomic_dec(&b
->n_acquired
);
451 /* Signal a waiting thread that this memblock is no longer used */
452 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
453 pa_semaphore_post(b
->pool
->semaphore
);
456 size_t pa_memblock_get_length(pa_memblock
*b
) {
458 pa_assert(PA_REFCNT_VALUE(b
) > 0);
463 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
465 pa_assert(PA_REFCNT_VALUE(b
) > 0);
470 /* No lock necessary */
471 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
473 pa_assert(PA_REFCNT_VALUE(b
) > 0);
479 static void memblock_free(pa_memblock
*b
) {
482 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
487 case PA_MEMBLOCK_USER
:
488 pa_assert(b
->per_type
.user
.free_cb
);
489 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
493 case PA_MEMBLOCK_FIXED
:
494 case PA_MEMBLOCK_APPENDED
:
495 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
500 case PA_MEMBLOCK_IMPORTED
: {
501 pa_memimport_segment
*segment
;
502 pa_memimport
*import
;
504 /* FIXME! This should be implemented lock-free */
506 segment
= b
->per_type
.imported
.segment
;
508 import
= segment
->import
;
511 pa_mutex_lock(import
->mutex
);
512 pa_hashmap_remove(import
->blocks
, PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
513 if (-- segment
->n_blocks
<= 0)
514 segment_detach(segment
);
516 pa_mutex_unlock(import
->mutex
);
518 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
520 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
525 case PA_MEMBLOCK_POOL_EXTERNAL
:
526 case PA_MEMBLOCK_POOL
: {
527 struct mempool_slot
*slot
;
530 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
533 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
535 /* The free list dimensions should easily allow all slots
536 * to fit in, hence try harder if pushing this slot into
537 * the free list fails */
538 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
542 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
548 case PA_MEMBLOCK_TYPE_MAX
:
550 pa_assert_not_reached();
554 /* No lock necessary */
555 void pa_memblock_unref(pa_memblock
*b
) {
557 pa_assert(PA_REFCNT_VALUE(b
) > 0);
559 if (PA_REFCNT_DEC(b
) > 0)
566 static void memblock_wait(pa_memblock
*b
) {
569 if (pa_atomic_load(&b
->n_acquired
) > 0) {
570 /* We need to wait until all threads gave up access to the
571 * memory block before we can go on. Unfortunately this means
572 * that we have to lock and wait here. Sniff! */
574 pa_atomic_inc(&b
->please_signal
);
576 while (pa_atomic_load(&b
->n_acquired
) > 0)
577 pa_semaphore_wait(b
->pool
->semaphore
);
579 pa_atomic_dec(&b
->please_signal
);
583 /* No lock necessary. This function is not multiple caller safe! */
584 static void memblock_make_local(pa_memblock
*b
) {
587 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
589 if (b
->length
<= b
->pool
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
))) {
590 struct mempool_slot
*slot
;
592 if ((slot
= mempool_allocate_slot(b
->pool
))) {
594 /* We can move it into a local pool, perfect! */
596 new_data
= mempool_slot_data(slot
);
597 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
598 pa_atomic_ptr_store(&b
->data
, new_data
);
600 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
601 b
->read_only
= FALSE
;
607 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
608 b
->per_type
.user
.free_cb
= pa_xfree
;
609 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
611 b
->type
= PA_MEMBLOCK_USER
;
612 b
->read_only
= FALSE
;
615 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
616 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
620 /* No lock necessary. This function is not multiple caller safe*/
621 void pa_memblock_unref_fixed(pa_memblock
*b
) {
623 pa_assert(PA_REFCNT_VALUE(b
) > 0);
624 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
626 if (PA_REFCNT_VALUE(b
) > 1)
627 memblock_make_local(b
);
629 pa_memblock_unref(b
);
632 /* No lock necessary. */
633 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
637 pa_assert(PA_REFCNT_VALUE(b
) > 0);
639 p
= pa_memblock_acquire(b
);
640 pa_will_need(p
, b
->length
);
641 pa_memblock_release(b
);
646 /* Self-locked. This function is not multiple-caller safe */
647 static void memblock_replace_import(pa_memblock
*b
) {
648 pa_memimport_segment
*seg
;
651 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
653 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
654 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
655 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
656 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
658 seg
= b
->per_type
.imported
.segment
;
660 pa_assert(seg
->import
);
662 pa_mutex_lock(seg
->import
->mutex
);
666 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
668 memblock_make_local(b
);
670 if (-- seg
->n_blocks
<= 0) {
671 pa_mutex_unlock(seg
->import
->mutex
);
674 pa_mutex_unlock(seg
->import
->mutex
);
677 pa_mempool
* pa_mempool_new(int shared
) {
680 p
= pa_xnew(pa_mempool
, 1);
682 p
->mutex
= pa_mutex_new(TRUE
, TRUE
);
683 p
->semaphore
= pa_semaphore_new(0);
685 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
686 if (p
->block_size
< PA_PAGE_SIZE
)
687 p
->block_size
= PA_PAGE_SIZE
;
689 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
691 pa_assert(p
->block_size
> PA_ALIGN(sizeof(struct mempool_slot
)));
693 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
698 memset(&p
->stat
, 0, sizeof(p
->stat
));
699 pa_atomic_store(&p
->n_init
, 0);
701 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
702 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
704 p
->free_slots
= pa_flist_new(p
->n_blocks
*2);
709 void pa_mempool_free(pa_mempool
*p
) {
712 pa_mutex_lock(p
->mutex
);
715 pa_memimport_free(p
->imports
);
718 pa_memexport_free(p
->exports
);
720 pa_mutex_unlock(p
->mutex
);
722 pa_flist_free(p
->free_slots
, NULL
);
724 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
725 /* raise(SIGTRAP); */
726 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
729 pa_shm_free(&p
->memory
);
731 pa_mutex_free(p
->mutex
);
732 pa_semaphore_free(p
->semaphore
);
737 /* No lock necessary */
738 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
744 /* No lock necessary */
745 size_t pa_mempool_block_size_max(pa_mempool
*p
) {
748 return p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) - PA_ALIGN(sizeof(pa_memblock
));
751 /* No lock necessary */
752 void pa_mempool_vacuum(pa_mempool
*p
) {
753 struct mempool_slot
*slot
;
758 list
= pa_flist_new(p
->n_blocks
*2);
760 while ((slot
= pa_flist_pop(p
->free_slots
)))
761 while (pa_flist_push(list
, slot
) < 0)
764 while ((slot
= pa_flist_pop(list
))) {
765 pa_shm_punch(&p
->memory
,
766 (uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
+ PA_ALIGN(sizeof(struct mempool_slot
)),
767 p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)));
769 while (pa_flist_push(p
->free_slots
, slot
))
773 pa_flist_free(list
, NULL
);
776 /* No lock necessary */
777 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
780 if (!p
->memory
.shared
)
788 /* No lock necessary */
789 pa_bool_t
pa_mempool_is_shared(pa_mempool
*p
) {
792 return !!p
->memory
.shared
;
795 /* For recieving blocks from other nodes */
796 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
802 i
= pa_xnew(pa_memimport
, 1);
803 i
->mutex
= pa_mutex_new(TRUE
, TRUE
);
805 i
->segments
= pa_hashmap_new(NULL
, NULL
);
806 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
808 i
->userdata
= userdata
;
810 pa_mutex_lock(p
->mutex
);
811 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
812 pa_mutex_unlock(p
->mutex
);
817 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
819 /* Should be called locked */
820 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
821 pa_memimport_segment
* seg
;
823 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
826 seg
= pa_xnew(pa_memimport_segment
, 1);
828 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
836 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
840 /* Should be called locked */
841 static void segment_detach(pa_memimport_segment
*seg
) {
844 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
845 pa_shm_free(&seg
->memory
);
849 /* Self-locked. Not multiple-caller safe */
850 void pa_memimport_free(pa_memimport
*i
) {
856 pa_mutex_lock(i
->mutex
);
858 while ((b
= pa_hashmap_get_first(i
->blocks
)))
859 memblock_replace_import(b
);
861 pa_assert(pa_hashmap_size(i
->segments
) == 0);
863 pa_mutex_unlock(i
->mutex
);
865 pa_mutex_lock(i
->pool
->mutex
);
867 /* If we've exported this block further we need to revoke that export */
868 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
869 memexport_revoke_blocks(e
, i
);
871 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
873 pa_mutex_unlock(i
->pool
->mutex
);
875 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
876 pa_hashmap_free(i
->segments
, NULL
, NULL
);
878 pa_mutex_free(i
->mutex
);
884 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
885 pa_memblock
*b
= NULL
;
886 pa_memimport_segment
*seg
;
890 pa_mutex_lock(i
->mutex
);
892 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
895 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
896 if (!(seg
= segment_attach(i
, shm_id
)))
899 if (offset
+size
> seg
->memory
.size
)
902 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
903 b
= pa_xnew(pa_memblock
, 1);
907 b
->type
= PA_MEMBLOCK_IMPORTED
;
909 b
->is_silence
= FALSE
;
910 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
912 pa_atomic_store(&b
->n_acquired
, 0);
913 pa_atomic_store(&b
->please_signal
, 0);
914 b
->per_type
.imported
.id
= block_id
;
915 b
->per_type
.imported
.segment
= seg
;
917 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
922 pa_mutex_unlock(i
->mutex
);
930 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
935 pa_mutex_lock(i
->mutex
);
937 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
)))) {
942 memblock_replace_import(b
);
945 pa_mutex_unlock(i
->mutex
);
950 /* For sending blocks to other nodes */
951 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
957 if (!p
->memory
.shared
)
960 e
= pa_xnew(pa_memexport
, 1);
961 e
->mutex
= pa_mutex_new(TRUE
, TRUE
);
963 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
964 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
967 e
->userdata
= userdata
;
969 pa_mutex_lock(p
->mutex
);
970 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
971 pa_mutex_unlock(p
->mutex
);
975 void pa_memexport_free(pa_memexport
*e
) {
978 pa_mutex_lock(e
->mutex
);
979 while (e
->used_slots
)
980 pa_memexport_process_release(e
, e
->used_slots
- e
->slots
);
981 pa_mutex_unlock(e
->mutex
);
983 pa_mutex_lock(e
->pool
->mutex
);
984 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
985 pa_mutex_unlock(e
->pool
->mutex
);
987 pa_mutex_free(e
->mutex
);
992 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
997 pa_mutex_lock(e
->mutex
);
1002 if (!e
->slots
[id
].block
)
1005 b
= e
->slots
[id
].block
;
1006 e
->slots
[id
].block
= NULL
;
1008 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
1009 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
1011 pa_mutex_unlock(e
->mutex
);
1013 /* pa_log("Processing release for %u", id); */
1015 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
1016 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
1018 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
1019 pa_atomic_sub(&e
->pool
->stat
.exported_size
, b
->length
);
1021 pa_memblock_unref(b
);
1026 pa_mutex_unlock(e
->mutex
);
1032 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1033 struct memexport_slot
*slot
, *next
;
1037 pa_mutex_lock(e
->mutex
);
1039 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1043 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1044 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1047 idx
= slot
- e
->slots
;
1048 e
->revoke_cb(e
, idx
, e
->userdata
);
1049 pa_memexport_process_release(e
, idx
);
1052 pa_mutex_unlock(e
->mutex
);
1055 /* No lock necessary */
1056 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1062 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1063 b
->type
== PA_MEMBLOCK_POOL
||
1064 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1065 pa_assert(b
->pool
== p
);
1066 return pa_memblock_ref(b
);
1069 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1072 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1077 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1079 struct memexport_slot
*slot
;
1084 pa_assert(block_id
);
1088 pa_assert(b
->pool
== e
->pool
);
1090 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1093 pa_mutex_lock(e
->mutex
);
1095 if (e
->free_slots
) {
1096 slot
= e
->free_slots
;
1097 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1098 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1099 slot
= &e
->slots
[e
->n_init
++];
1101 pa_mutex_unlock(e
->mutex
);
1102 pa_memblock_unref(b
);
1106 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1108 *block_id
= slot
- e
->slots
;
1110 pa_mutex_unlock(e
->mutex
);
1111 /* pa_log("Got block id %u", *block_id); */
1113 data
= pa_memblock_acquire(b
);
1115 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1116 pa_assert(b
->per_type
.imported
.segment
);
1117 memory
= &b
->per_type
.imported
.segment
->memory
;
1119 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1121 memory
= &b
->pool
->memory
;
1124 pa_assert(data
>= memory
->ptr
);
1125 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1127 *shm_id
= memory
->id
;
1128 *offset
= (uint8_t*) data
- (uint8_t*) memory
->ptr
;
1131 pa_memblock_release(b
);
1133 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1134 pa_atomic_add(&e
->pool
->stat
.exported_size
, b
->length
);