4 This file is part of PulseAudio.
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as
11 published by the Free Software Foundation; either version 2.1 of the
12 License, or (at your option) any later version.
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details
19 You should have received a copy of the GNU Lesser General Public
20 License along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
36 #include <pulse/xmalloc.h>
37 #include <pulse/def.h>
39 #include <pulsecore/shm.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/hashmap.h>
42 #include <pulsecore/semaphore.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/flist.h>
45 #include <pulsecore/core-util.h>
49 #define PA_MEMPOOL_SLOTS_MAX 128
50 #define PA_MEMPOOL_SLOT_SIZE (16*1024)
52 #define PA_MEMEXPORT_SLOTS_MAX 128
54 #define PA_MEMIMPORT_SLOTS_MAX 128
55 #define PA_MEMIMPORT_SEGMENTS_MAX 16
58 PA_REFCNT_DECLARE
; /* the reference counter */
61 pa_memblock_type_t type
;
62 pa_bool_t read_only
, is_silence
;
67 pa_atomic_t n_acquired
;
68 pa_atomic_t please_signal
;
72 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
78 pa_memimport_segment
*segment
;
83 struct pa_memimport_segment
{
96 /* Called whenever an imported memory block is no longer
98 pa_memimport_release_cb_t release_cb
;
101 PA_LLIST_FIELDS(pa_memimport
);
104 struct memexport_slot
{
105 PA_LLIST_FIELDS(struct memexport_slot
);
109 struct pa_memexport
{
113 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
115 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
116 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
119 /* Called whenever a client from which we imported a memory block
120 which we in turn exported to another client dies and we need to
121 revoke the memory block accordingly */
122 pa_memexport_revoke_cb_t revoke_cb
;
125 PA_LLIST_FIELDS(pa_memexport
);
128 struct mempool_slot
{
129 PA_LLIST_FIELDS(struct mempool_slot
);
130 /* the actual data follows immediately hereafter */
134 pa_semaphore
*semaphore
;
143 PA_LLIST_HEAD(pa_memimport
, imports
);
144 PA_LLIST_HEAD(pa_memexport
, exports
);
146 /* A list of free slots that may be reused */
147 pa_flist
*free_slots
;
149 pa_mempool_stat stat
;
152 static void segment_detach(pa_memimport_segment
*seg
);
154 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0, pa_xfree
);
156 /* No lock necessary */
157 static void stat_add(pa_memblock
*b
) {
161 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
162 pa_atomic_add(&b
->pool
->stat
.allocated_size
, b
->length
);
164 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
165 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, b
->length
);
167 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
168 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
169 pa_atomic_add(&b
->pool
->stat
.imported_size
, b
->length
);
172 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
173 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
176 /* No lock necessary */
177 static void stat_remove(pa_memblock
*b
) {
181 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
182 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
184 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
185 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, b
->length
);
187 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
188 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
189 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
191 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
192 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
195 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
198 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
200 /* No lock necessary */
201 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
205 pa_assert(length
> 0);
207 if (!(b
= pa_memblock_new_pool(p
, length
)))
208 b
= memblock_new_appended(p
, length
);
213 /* No lock necessary */
214 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
218 pa_assert(length
> 0);
220 /* If -1 is passed as length we choose the size for the caller. */
222 if (length
== (size_t) -1)
223 length
= p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) - PA_ALIGN(sizeof(pa_memblock
));
225 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
228 b
->type
= PA_MEMBLOCK_APPENDED
;
229 b
->read_only
= FALSE
;
230 b
->is_silence
= FALSE
;
231 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
233 pa_atomic_store(&b
->n_acquired
, 0);
234 pa_atomic_store(&b
->please_signal
, 0);
240 /* No lock necessary */
241 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
242 struct mempool_slot
*slot
;
245 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
248 /* The free list was empty, we have to allocate a new entry */
250 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
251 pa_atomic_dec(&p
->n_init
);
253 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* idx
));
256 pa_log_debug("Pool full");
257 pa_atomic_inc(&p
->stat
.n_pool_full
);
265 /* No lock necessary */
266 static void* mempool_slot_data(struct mempool_slot
*slot
) {
269 return (uint8_t*) slot
+ PA_ALIGN(sizeof(struct mempool_slot
));
272 /* No lock necessary */
273 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
276 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
277 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
279 return ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
;
282 /* No lock necessary */
283 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
286 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
289 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
292 /* No lock necessary */
293 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
294 pa_memblock
*b
= NULL
;
295 struct mempool_slot
*slot
;
298 pa_assert(length
> 0);
300 /* If -1 is passed as length we choose the size for the caller: we
301 * take the largest size that fits in one of our slots. */
303 if (length
== (size_t) -1)
304 length
= pa_mempool_block_size_max(p
);
306 if (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) >= PA_ALIGN(sizeof(pa_memblock
)) + length
) {
308 if (!(slot
= mempool_allocate_slot(p
)))
311 b
= mempool_slot_data(slot
);
312 b
->type
= PA_MEMBLOCK_POOL
;
313 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
315 } else if (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) >= length
) {
317 if (!(slot
= mempool_allocate_slot(p
)))
320 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
321 b
= pa_xnew(pa_memblock
, 1);
323 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
324 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
327 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length
, (unsigned long) (p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
))));
328 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
334 b
->read_only
= FALSE
;
335 b
->is_silence
= FALSE
;
337 pa_atomic_store(&b
->n_acquired
, 0);
338 pa_atomic_store(&b
->please_signal
, 0);
344 /* No lock necessary */
345 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, pa_bool_t read_only
) {
350 pa_assert(length
!= (size_t) -1);
351 pa_assert(length
> 0);
353 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
354 b
= pa_xnew(pa_memblock
, 1);
357 b
->type
= PA_MEMBLOCK_FIXED
;
358 b
->read_only
= read_only
;
359 b
->is_silence
= FALSE
;
360 pa_atomic_ptr_store(&b
->data
, d
);
362 pa_atomic_store(&b
->n_acquired
, 0);
363 pa_atomic_store(&b
->please_signal
, 0);
369 /* No lock necessary */
370 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, pa_free_cb_t free_cb
, pa_bool_t read_only
) {
375 pa_assert(length
> 0);
376 pa_assert(length
!= (size_t) -1);
379 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
380 b
= pa_xnew(pa_memblock
, 1);
383 b
->type
= PA_MEMBLOCK_USER
;
384 b
->read_only
= read_only
;
385 b
->is_silence
= FALSE
;
386 pa_atomic_ptr_store(&b
->data
, d
);
388 pa_atomic_store(&b
->n_acquired
, 0);
389 pa_atomic_store(&b
->please_signal
, 0);
391 b
->per_type
.user
.free_cb
= free_cb
;
397 /* No lock necessary */
398 pa_bool_t
pa_memblock_is_read_only(pa_memblock
*b
) {
400 pa_assert(PA_REFCNT_VALUE(b
) > 0);
402 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
405 /* No lock necessary */
406 pa_bool_t
pa_memblock_is_silence(pa_memblock
*b
) {
408 pa_assert(PA_REFCNT_VALUE(b
) > 0);
410 return b
->is_silence
;
413 /* No lock necessary */
414 void pa_memblock_set_is_silence(pa_memblock
*b
, pa_bool_t v
) {
416 pa_assert(PA_REFCNT_VALUE(b
) > 0);
421 /* No lock necessary */
422 pa_bool_t
pa_memblock_ref_is_one(pa_memblock
*b
) {
427 pa_assert_se((r
= PA_REFCNT_VALUE(b
)) > 0);
432 /* No lock necessary */
433 void* pa_memblock_acquire(pa_memblock
*b
) {
435 pa_assert(PA_REFCNT_VALUE(b
) > 0);
437 pa_atomic_inc(&b
->n_acquired
);
439 return pa_atomic_ptr_load(&b
->data
);
442 /* No lock necessary, in corner cases locks by its own */
443 void pa_memblock_release(pa_memblock
*b
) {
446 pa_assert(PA_REFCNT_VALUE(b
) > 0);
448 r
= pa_atomic_dec(&b
->n_acquired
);
451 /* Signal a waiting thread that this memblock is no longer used */
452 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
453 pa_semaphore_post(b
->pool
->semaphore
);
456 size_t pa_memblock_get_length(pa_memblock
*b
) {
458 pa_assert(PA_REFCNT_VALUE(b
) > 0);
463 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
465 pa_assert(PA_REFCNT_VALUE(b
) > 0);
470 /* No lock necessary */
471 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
473 pa_assert(PA_REFCNT_VALUE(b
) > 0);
479 static void memblock_free(pa_memblock
*b
) {
482 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
487 case PA_MEMBLOCK_USER
:
488 pa_assert(b
->per_type
.user
.free_cb
);
489 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
493 case PA_MEMBLOCK_FIXED
:
494 case PA_MEMBLOCK_APPENDED
:
495 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
500 case PA_MEMBLOCK_IMPORTED
: {
501 pa_memimport_segment
*segment
;
502 pa_memimport
*import
;
504 /* FIXME! This should be implemented lock-free */
506 segment
= b
->per_type
.imported
.segment
;
508 import
= segment
->import
;
511 pa_mutex_lock(import
->mutex
);
512 pa_hashmap_remove(import
->blocks
, PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
513 if (-- segment
->n_blocks
<= 0)
514 segment_detach(segment
);
516 pa_mutex_unlock(import
->mutex
);
518 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
520 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
525 case PA_MEMBLOCK_POOL_EXTERNAL
:
526 case PA_MEMBLOCK_POOL
: {
527 struct mempool_slot
*slot
;
530 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
533 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
535 /* The free list dimensions should easily allow all slots
536 * to fit in, hence try harder if pushing this slot into
537 * the free list fails */
538 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
542 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
548 case PA_MEMBLOCK_TYPE_MAX
:
550 pa_assert_not_reached();
554 /* No lock necessary */
555 void pa_memblock_unref(pa_memblock
*b
) {
557 pa_assert(PA_REFCNT_VALUE(b
) > 0);
559 if (PA_REFCNT_DEC(b
) > 0)
566 static void memblock_wait(pa_memblock
*b
) {
569 if (pa_atomic_load(&b
->n_acquired
) > 0) {
570 /* We need to wait until all threads gave up access to the
571 * memory block before we can go on. Unfortunately this means
572 * that we have to lock and wait here. Sniff! */
574 pa_atomic_inc(&b
->please_signal
);
576 while (pa_atomic_load(&b
->n_acquired
) > 0)
577 pa_semaphore_wait(b
->pool
->semaphore
);
579 pa_atomic_dec(&b
->please_signal
);
583 /* No lock necessary. This function is not multiple caller safe! */
584 static void memblock_make_local(pa_memblock
*b
) {
587 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
589 if (b
->length
<= b
->pool
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
))) {
590 struct mempool_slot
*slot
;
592 if ((slot
= mempool_allocate_slot(b
->pool
))) {
594 /* We can move it into a local pool, perfect! */
596 new_data
= mempool_slot_data(slot
);
597 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
598 pa_atomic_ptr_store(&b
->data
, new_data
);
600 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
607 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
608 b
->per_type
.user
.free_cb
= pa_xfree
;
609 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
611 b
->type
= PA_MEMBLOCK_USER
;
615 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
616 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
620 /* No lock necessary. This function is not multiple caller safe*/
621 void pa_memblock_unref_fixed(pa_memblock
*b
) {
623 pa_assert(PA_REFCNT_VALUE(b
) > 0);
624 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
626 if (PA_REFCNT_VALUE(b
) > 1)
627 memblock_make_local(b
);
629 pa_memblock_unref(b
);
632 /* No lock necessary. */
633 pa_memblock
*pa_memblock_will_need(pa_memblock
*b
) {
637 pa_assert(PA_REFCNT_VALUE(b
) > 0);
639 p
= pa_memblock_acquire(b
);
640 pa_will_need(p
, b
->length
);
641 pa_memblock_release(b
);
646 /* Self-locked. This function is not multiple-caller safe */
647 static void memblock_replace_import(pa_memblock
*b
) {
648 pa_memimport_segment
*seg
;
651 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
653 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
654 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
655 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
656 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
658 seg
= b
->per_type
.imported
.segment
;
660 pa_assert(seg
->import
);
662 pa_mutex_lock(seg
->import
->mutex
);
666 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
668 memblock_make_local(b
);
670 if (-- seg
->n_blocks
<= 0) {
671 pa_mutex_unlock(seg
->import
->mutex
);
674 pa_mutex_unlock(seg
->import
->mutex
);
677 pa_mempool
* pa_mempool_new(int shared
) {
680 p
= pa_xnew(pa_mempool
, 1);
682 p
->mutex
= pa_mutex_new(TRUE
, TRUE
);
683 p
->semaphore
= pa_semaphore_new(0);
685 p
->block_size
= PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE
);
686 if (p
->block_size
< PA_PAGE_SIZE
)
687 p
->block_size
= PA_PAGE_SIZE
;
689 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
691 pa_assert(p
->block_size
> PA_ALIGN(sizeof(struct mempool_slot
)));
693 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
698 memset(&p
->stat
, 0, sizeof(p
->stat
));
699 pa_atomic_store(&p
->n_init
, 0);
701 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
702 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
704 p
->free_slots
= pa_flist_new(p
->n_blocks
*2);
709 void pa_mempool_free(pa_mempool
*p
) {
712 pa_mutex_lock(p
->mutex
);
715 pa_memimport_free(p
->imports
);
718 pa_memexport_free(p
->exports
);
720 pa_mutex_unlock(p
->mutex
);
722 pa_flist_free(p
->free_slots
, NULL
);
724 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
725 /* raise(SIGTRAP); */
726 pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p
->stat
.n_allocated
));
729 pa_shm_free(&p
->memory
);
731 pa_mutex_free(p
->mutex
);
732 pa_semaphore_free(p
->semaphore
);
737 /* No lock necessary */
738 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
744 /* No lock necessary */
745 size_t pa_mempool_block_size_max(pa_mempool
*p
) {
748 return p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)) - PA_ALIGN(sizeof(pa_memblock
));
751 /* No lock necessary */
752 void pa_mempool_vacuum(pa_mempool
*p
) {
753 struct mempool_slot
*slot
;
758 list
= pa_flist_new(p
->n_blocks
*2);
760 while ((slot
= pa_flist_pop(p
->free_slots
)))
761 while (pa_flist_push(list
, slot
) < 0)
764 while ((slot
= pa_flist_pop(list
))) {
765 pa_shm_punch(&p
->memory
,
766 (uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
+ PA_ALIGN(sizeof(struct mempool_slot
)),
767 p
->block_size
- PA_ALIGN(sizeof(struct mempool_slot
)));
769 while (pa_flist_push(p
->free_slots
, slot
))
773 pa_flist_free(list
, NULL
);
776 /* No lock necessary */
777 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
780 if (!p
->memory
.shared
)
788 /* No lock necessary */
789 pa_bool_t
pa_mempool_is_shared(pa_mempool
*p
) {
792 return !!p
->memory
.shared
;
795 /* For recieving blocks from other nodes */
796 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
802 i
= pa_xnew(pa_memimport
, 1);
803 i
->mutex
= pa_mutex_new(TRUE
, TRUE
);
805 i
->segments
= pa_hashmap_new(NULL
, NULL
);
806 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
808 i
->userdata
= userdata
;
810 pa_mutex_lock(p
->mutex
);
811 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
812 pa_mutex_unlock(p
->mutex
);
817 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
819 /* Should be called locked */
820 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
821 pa_memimport_segment
* seg
;
823 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
826 seg
= pa_xnew(pa_memimport_segment
, 1);
828 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
836 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
840 /* Should be called locked */
841 static void segment_detach(pa_memimport_segment
*seg
) {
844 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
845 pa_shm_free(&seg
->memory
);
849 /* Self-locked. Not multiple-caller safe */
850 void pa_memimport_free(pa_memimport
*i
) {
856 pa_mutex_lock(i
->mutex
);
858 while ((b
= pa_hashmap_get_first(i
->blocks
)))
859 memblock_replace_import(b
);
861 pa_assert(pa_hashmap_size(i
->segments
) == 0);
863 pa_mutex_unlock(i
->mutex
);
865 pa_mutex_lock(i
->pool
->mutex
);
867 /* If we've exported this block further we need to revoke that export */
868 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
869 memexport_revoke_blocks(e
, i
);
871 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
873 pa_mutex_unlock(i
->pool
->mutex
);
875 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
876 pa_hashmap_free(i
->segments
, NULL
, NULL
);
878 pa_mutex_free(i
->mutex
);
884 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
885 pa_memblock
*b
= NULL
;
886 pa_memimport_segment
*seg
;
890 pa_mutex_lock(i
->mutex
);
892 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
895 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
896 if (!(seg
= segment_attach(i
, shm_id
)))
899 if (offset
+size
> seg
->memory
.size
)
902 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
903 b
= pa_xnew(pa_memblock
, 1);
907 b
->type
= PA_MEMBLOCK_IMPORTED
;
909 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
911 pa_atomic_store(&b
->n_acquired
, 0);
912 pa_atomic_store(&b
->please_signal
, 0);
913 b
->per_type
.imported
.id
= block_id
;
914 b
->per_type
.imported
.segment
= seg
;
916 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
921 pa_mutex_unlock(i
->mutex
);
929 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
934 pa_mutex_lock(i
->mutex
);
936 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
)))) {
941 memblock_replace_import(b
);
944 pa_mutex_unlock(i
->mutex
);
949 /* For sending blocks to other nodes */
950 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
956 if (!p
->memory
.shared
)
959 e
= pa_xnew(pa_memexport
, 1);
960 e
->mutex
= pa_mutex_new(TRUE
, TRUE
);
962 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
963 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
966 e
->userdata
= userdata
;
968 pa_mutex_lock(p
->mutex
);
969 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
970 pa_mutex_unlock(p
->mutex
);
974 void pa_memexport_free(pa_memexport
*e
) {
977 pa_mutex_lock(e
->mutex
);
978 while (e
->used_slots
)
979 pa_memexport_process_release(e
, e
->used_slots
- e
->slots
);
980 pa_mutex_unlock(e
->mutex
);
982 pa_mutex_lock(e
->pool
->mutex
);
983 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
984 pa_mutex_unlock(e
->pool
->mutex
);
986 pa_mutex_free(e
->mutex
);
991 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
996 pa_mutex_lock(e
->mutex
);
1001 if (!e
->slots
[id
].block
)
1004 b
= e
->slots
[id
].block
;
1005 e
->slots
[id
].block
= NULL
;
1007 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
1008 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
1010 pa_mutex_unlock(e
->mutex
);
1012 /* pa_log("Processing release for %u", id); */
1014 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
1015 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
1017 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
1018 pa_atomic_sub(&e
->pool
->stat
.exported_size
, b
->length
);
1020 pa_memblock_unref(b
);
1025 pa_mutex_unlock(e
->mutex
);
1031 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
1032 struct memexport_slot
*slot
, *next
;
1036 pa_mutex_lock(e
->mutex
);
1038 for (slot
= e
->used_slots
; slot
; slot
= next
) {
1042 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
1043 slot
->block
->per_type
.imported
.segment
->import
!= i
)
1046 idx
= slot
- e
->slots
;
1047 e
->revoke_cb(e
, idx
, e
->userdata
);
1048 pa_memexport_process_release(e
, idx
);
1051 pa_mutex_unlock(e
->mutex
);
1054 /* No lock necessary */
1055 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
1061 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1062 b
->type
== PA_MEMBLOCK_POOL
||
1063 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1064 pa_assert(b
->pool
== p
);
1065 return pa_memblock_ref(b
);
1068 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1071 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1076 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1078 struct memexport_slot
*slot
;
1083 pa_assert(block_id
);
1087 pa_assert(b
->pool
== e
->pool
);
1089 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1092 pa_mutex_lock(e
->mutex
);
1094 if (e
->free_slots
) {
1095 slot
= e
->free_slots
;
1096 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1097 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1098 slot
= &e
->slots
[e
->n_init
++];
1100 pa_mutex_unlock(e
->mutex
);
1101 pa_memblock_unref(b
);
1105 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1107 *block_id
= slot
- e
->slots
;
1109 pa_mutex_unlock(e
->mutex
);
1110 /* pa_log("Got block id %u", *block_id); */
1112 data
= pa_memblock_acquire(b
);
1114 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1115 pa_assert(b
->per_type
.imported
.segment
);
1116 memory
= &b
->per_type
.imported
.segment
->memory
;
1118 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1120 memory
= &b
->pool
->memory
;
1123 pa_assert(data
>= memory
->ptr
);
1124 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1126 *shm_id
= memory
->id
;
1127 *offset
= (uint8_t*) data
- (uint8_t*) memory
->ptr
;
1130 pa_memblock_release(b
);
1132 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1133 pa_atomic_add(&e
->pool
->stat
.exported_size
, b
->length
);