4 This file is part of PulseAudio.
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as
11 published by the Free Software Foundation; either version 2.1 of the
12 License, or (at your option) any later version.
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details
19 You should have received a copy of the GNU Lesser General Public
20 License along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
36 #include <pulse/xmalloc.h>
37 #include <pulse/def.h>
39 #include <pulsecore/shm.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/hashmap.h>
42 #include <pulsecore/semaphore.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/flist.h>
48 #define PA_MEMPOOL_SLOTS_MAX 128
49 #define PA_MEMPOOL_SLOT_SIZE (16*1024)
51 #define PA_MEMEXPORT_SLOTS_MAX 128
53 #define PA_MEMIMPORT_SLOTS_MAX 128
54 #define PA_MEMIMPORT_SEGMENTS_MAX 16
57 PA_REFCNT_DECLARE
; /* the reference counter */
60 pa_memblock_type_t type
;
61 int read_only
; /* boolean */
66 pa_atomic_t n_acquired
;
67 pa_atomic_t please_signal
;
71 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
77 pa_memimport_segment
*segment
;
82 struct pa_memimport_segment
{
95 /* Called whenever an imported memory block is no longer
97 pa_memimport_release_cb_t release_cb
;
100 PA_LLIST_FIELDS(pa_memimport
);
103 struct memexport_slot
{
104 PA_LLIST_FIELDS(struct memexport_slot
);
108 struct pa_memexport
{
112 struct memexport_slot slots
[PA_MEMEXPORT_SLOTS_MAX
];
114 PA_LLIST_HEAD(struct memexport_slot
, free_slots
);
115 PA_LLIST_HEAD(struct memexport_slot
, used_slots
);
118 /* Called whenever a client from which we imported a memory block
119 which we in turn exported to another client dies and we need to
120 revoke the memory block accordingly */
121 pa_memexport_revoke_cb_t revoke_cb
;
124 PA_LLIST_FIELDS(pa_memexport
);
127 struct mempool_slot
{
128 PA_LLIST_FIELDS(struct mempool_slot
);
129 /* the actual data follows immediately hereafter */
133 pa_semaphore
*semaphore
;
142 PA_LLIST_HEAD(pa_memimport
, imports
);
143 PA_LLIST_HEAD(pa_memexport
, exports
);
145 /* A list of free slots that may be reused */
146 pa_flist
*free_slots
;
148 pa_mempool_stat stat
;
151 static void segment_detach(pa_memimport_segment
*seg
);
153 PA_STATIC_FLIST_DECLARE(unused_memblocks
, 0);
155 /* No lock necessary */
156 static void stat_add(pa_memblock
*b
) {
160 pa_atomic_inc(&b
->pool
->stat
.n_allocated
);
161 pa_atomic_add(&b
->pool
->stat
.allocated_size
, b
->length
);
163 pa_atomic_inc(&b
->pool
->stat
.n_accumulated
);
164 pa_atomic_add(&b
->pool
->stat
.accumulated_size
, b
->length
);
166 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
167 pa_atomic_inc(&b
->pool
->stat
.n_imported
);
168 pa_atomic_add(&b
->pool
->stat
.imported_size
, b
->length
);
171 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
172 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
175 /* No lock necessary */
176 static void stat_remove(pa_memblock
*b
) {
180 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_allocated
) > 0);
181 pa_assert(pa_atomic_load(&b
->pool
->stat
.allocated_size
) >= (int) b
->length
);
183 pa_atomic_dec(&b
->pool
->stat
.n_allocated
);
184 pa_atomic_sub(&b
->pool
->stat
.allocated_size
, b
->length
);
186 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
187 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
188 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
190 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
191 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
194 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
197 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
);
199 /* No lock necessary */
200 pa_memblock
*pa_memblock_new(pa_mempool
*p
, size_t length
) {
204 pa_assert(length
> 0);
206 if (!(b
= pa_memblock_new_pool(p
, length
)))
207 b
= memblock_new_appended(p
, length
);
212 /* No lock necessary */
213 static pa_memblock
*memblock_new_appended(pa_mempool
*p
, size_t length
) {
217 pa_assert(length
> 0);
219 b
= pa_xmalloc(PA_ALIGN(sizeof(pa_memblock
)) + length
);
222 b
->type
= PA_MEMBLOCK_APPENDED
;
224 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ PA_ALIGN(sizeof(pa_memblock
)));
226 pa_atomic_store(&b
->n_acquired
, 0);
227 pa_atomic_store(&b
->please_signal
, 0);
233 /* No lock necessary */
234 static struct mempool_slot
* mempool_allocate_slot(pa_mempool
*p
) {
235 struct mempool_slot
*slot
;
238 if (!(slot
= pa_flist_pop(p
->free_slots
))) {
241 /* The free list was empty, we have to allocate a new entry */
243 if ((unsigned) (idx
= pa_atomic_inc(&p
->n_init
)) >= p
->n_blocks
)
244 pa_atomic_dec(&p
->n_init
);
246 slot
= (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (p
->block_size
* idx
));
249 pa_log_debug("Pool full");
250 pa_atomic_inc(&p
->stat
.n_pool_full
);
258 /* No lock necessary */
259 static void* mempool_slot_data(struct mempool_slot
*slot
) {
262 return (uint8_t*) slot
+ sizeof(struct mempool_slot
);
265 /* No lock necessary */
266 static unsigned mempool_slot_idx(pa_mempool
*p
, void *ptr
) {
269 pa_assert((uint8_t*) ptr
>= (uint8_t*) p
->memory
.ptr
);
270 pa_assert((uint8_t*) ptr
< (uint8_t*) p
->memory
.ptr
+ p
->memory
.size
);
272 return ((uint8_t*) ptr
- (uint8_t*) p
->memory
.ptr
) / p
->block_size
;
275 /* No lock necessary */
276 static struct mempool_slot
* mempool_slot_by_ptr(pa_mempool
*p
, void *ptr
) {
279 if ((idx
= mempool_slot_idx(p
, ptr
)) == (unsigned) -1)
282 return (struct mempool_slot
*) ((uint8_t*) p
->memory
.ptr
+ (idx
* p
->block_size
));
285 /* No lock necessary */
286 pa_memblock
*pa_memblock_new_pool(pa_mempool
*p
, size_t length
) {
287 pa_memblock
*b
= NULL
;
288 struct mempool_slot
*slot
;
291 pa_assert(length
> 0);
293 if (p
->block_size
- sizeof(struct mempool_slot
) >= sizeof(pa_memblock
) + length
) {
295 if (!(slot
= mempool_allocate_slot(p
)))
298 b
= mempool_slot_data(slot
);
299 b
->type
= PA_MEMBLOCK_POOL
;
300 pa_atomic_ptr_store(&b
->data
, (uint8_t*) b
+ sizeof(pa_memblock
));
302 } else if (p
->block_size
- sizeof(struct mempool_slot
) >= length
) {
304 if (!(slot
= mempool_allocate_slot(p
)))
307 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
308 b
= pa_xnew(pa_memblock
, 1);
310 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
311 pa_atomic_ptr_store(&b
->data
, mempool_slot_data(slot
));
314 pa_log_debug("Memory block too large for pool: %u > %u", length
, p
->block_size
- sizeof(struct mempool_slot
));
315 pa_atomic_inc(&p
->stat
.n_too_large_for_pool
);
323 pa_atomic_store(&b
->n_acquired
, 0);
324 pa_atomic_store(&b
->please_signal
, 0);
330 /* No lock necessary */
331 pa_memblock
*pa_memblock_new_fixed(pa_mempool
*p
, void *d
, size_t length
, int read_only
) {
336 pa_assert(length
> 0);
338 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
339 b
= pa_xnew(pa_memblock
, 1);
342 b
->type
= PA_MEMBLOCK_FIXED
;
343 b
->read_only
= read_only
;
344 pa_atomic_ptr_store(&b
->data
, d
);
346 pa_atomic_store(&b
->n_acquired
, 0);
347 pa_atomic_store(&b
->please_signal
, 0);
353 /* No lock necessary */
354 pa_memblock
*pa_memblock_new_user(pa_mempool
*p
, void *d
, size_t length
, void (*free_cb
)(void *p
), int read_only
) {
359 pa_assert(length
> 0);
362 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
363 b
= pa_xnew(pa_memblock
, 1);
366 b
->type
= PA_MEMBLOCK_USER
;
367 b
->read_only
= read_only
;
368 pa_atomic_ptr_store(&b
->data
, d
);
370 pa_atomic_store(&b
->n_acquired
, 0);
371 pa_atomic_store(&b
->please_signal
, 0);
373 b
->per_type
.user
.free_cb
= free_cb
;
379 /* No lock necessary */
380 int pa_memblock_is_read_only(pa_memblock
*b
) {
382 pa_assert(PA_REFCNT_VALUE(b
) > 0);
384 return b
->read_only
&& PA_REFCNT_VALUE(b
) == 1;
387 /* No lock necessary */
388 void* pa_memblock_acquire(pa_memblock
*b
) {
390 pa_assert(PA_REFCNT_VALUE(b
) > 0);
392 pa_atomic_inc(&b
->n_acquired
);
394 return pa_atomic_ptr_load(&b
->data
);
397 /* No lock necessary, in corner cases locks by its own */
398 void pa_memblock_release(pa_memblock
*b
) {
401 pa_assert(PA_REFCNT_VALUE(b
) > 0);
403 r
= pa_atomic_dec(&b
->n_acquired
);
406 /* Signal a waiting thread that this memblock is no longer used */
407 if (r
== 1 && pa_atomic_load(&b
->please_signal
))
408 pa_semaphore_post(b
->pool
->semaphore
);
411 size_t pa_memblock_get_length(pa_memblock
*b
) {
413 pa_assert(PA_REFCNT_VALUE(b
) > 0);
418 pa_mempool
* pa_memblock_get_pool(pa_memblock
*b
) {
420 pa_assert(PA_REFCNT_VALUE(b
) > 0);
425 /* No lock necessary */
426 pa_memblock
* pa_memblock_ref(pa_memblock
*b
) {
428 pa_assert(PA_REFCNT_VALUE(b
) > 0);
434 static void memblock_free(pa_memblock
*b
) {
437 pa_assert(pa_atomic_load(&b
->n_acquired
) == 0);
442 case PA_MEMBLOCK_USER
:
443 pa_assert(b
->per_type
.user
.free_cb
);
444 b
->per_type
.user
.free_cb(pa_atomic_ptr_load(&b
->data
));
448 case PA_MEMBLOCK_FIXED
:
449 case PA_MEMBLOCK_APPENDED
:
450 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
455 case PA_MEMBLOCK_IMPORTED
: {
456 pa_memimport_segment
*segment
;
457 pa_memimport
*import
;
459 /* FIXME! This should be implemented lock-free */
461 segment
= b
->per_type
.imported
.segment
;
463 import
= segment
->import
;
466 pa_mutex_lock(import
->mutex
);
467 pa_hashmap_remove(import
->blocks
, PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
468 if (-- segment
->n_blocks
<= 0)
469 segment_detach(segment
);
471 pa_mutex_unlock(import
->mutex
);
473 import
->release_cb(import
, b
->per_type
.imported
.id
, import
->userdata
);
475 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
480 case PA_MEMBLOCK_POOL_EXTERNAL
:
481 case PA_MEMBLOCK_POOL
: {
482 struct mempool_slot
*slot
;
485 slot
= mempool_slot_by_ptr(b
->pool
, pa_atomic_ptr_load(&b
->data
));
488 call_free
= b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
;
490 /* The free list dimensions should easily allow all slots
491 * to fit in, hence try harder if pushing this slot into
492 * the free list fails */
493 while (pa_flist_push(b
->pool
->free_slots
, slot
) < 0)
497 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks
), b
) < 0)
503 case PA_MEMBLOCK_TYPE_MAX
:
505 pa_assert_not_reached();
509 /* No lock necessary */
510 void pa_memblock_unref(pa_memblock
*b
) {
512 pa_assert(PA_REFCNT_VALUE(b
) > 0);
514 if (PA_REFCNT_DEC(b
) > 0)
521 static void memblock_wait(pa_memblock
*b
) {
524 if (pa_atomic_load(&b
->n_acquired
) > 0) {
525 /* We need to wait until all threads gave up access to the
526 * memory block before we can go on. Unfortunately this means
527 * that we have to lock and wait here. Sniff! */
529 pa_atomic_inc(&b
->please_signal
);
531 while (pa_atomic_load(&b
->n_acquired
) > 0)
532 pa_semaphore_wait(b
->pool
->semaphore
);
534 pa_atomic_dec(&b
->please_signal
);
538 /* No lock necessary. This function is not multiple caller safe! */
539 static void memblock_make_local(pa_memblock
*b
) {
542 pa_atomic_dec(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
544 if (b
->length
<= b
->pool
->block_size
- sizeof(struct mempool_slot
)) {
545 struct mempool_slot
*slot
;
547 if ((slot
= mempool_allocate_slot(b
->pool
))) {
549 /* We can move it into a local pool, perfect! */
551 new_data
= mempool_slot_data(slot
);
552 memcpy(new_data
, pa_atomic_ptr_load(&b
->data
), b
->length
);
553 pa_atomic_ptr_store(&b
->data
, new_data
);
555 b
->type
= PA_MEMBLOCK_POOL_EXTERNAL
;
562 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
563 b
->per_type
.user
.free_cb
= pa_xfree
;
564 pa_atomic_ptr_store(&b
->data
, pa_xmemdup(pa_atomic_ptr_load(&b
->data
), b
->length
));
566 b
->type
= PA_MEMBLOCK_USER
;
570 pa_atomic_inc(&b
->pool
->stat
.n_allocated_by_type
[b
->type
]);
571 pa_atomic_inc(&b
->pool
->stat
.n_accumulated_by_type
[b
->type
]);
575 /* No lock necessary. This function is not multiple caller safe*/
576 void pa_memblock_unref_fixed(pa_memblock
*b
) {
578 pa_assert(PA_REFCNT_VALUE(b
) > 0);
579 pa_assert(b
->type
== PA_MEMBLOCK_FIXED
);
581 if (PA_REFCNT_VALUE(b
) > 1)
582 memblock_make_local(b
);
584 pa_memblock_unref(b
);
587 /* Self-locked. This function is not multiple-caller safe */
588 static void memblock_replace_import(pa_memblock
*b
) {
589 pa_memimport_segment
*seg
;
592 pa_assert(b
->type
== PA_MEMBLOCK_IMPORTED
);
594 pa_assert(pa_atomic_load(&b
->pool
->stat
.n_imported
) > 0);
595 pa_assert(pa_atomic_load(&b
->pool
->stat
.imported_size
) >= (int) b
->length
);
596 pa_atomic_dec(&b
->pool
->stat
.n_imported
);
597 pa_atomic_sub(&b
->pool
->stat
.imported_size
, b
->length
);
599 seg
= b
->per_type
.imported
.segment
;
601 pa_assert(seg
->import
);
603 pa_mutex_lock(seg
->import
->mutex
);
607 PA_UINT32_TO_PTR(b
->per_type
.imported
.id
));
609 memblock_make_local(b
);
611 if (-- seg
->n_blocks
<= 0)
614 pa_mutex_unlock(seg
->import
->mutex
);
617 pa_mempool
* pa_mempool_new(int shared
) {
621 p
= pa_xnew(pa_mempool
, 1);
623 p
->mutex
= pa_mutex_new(1);
624 p
->semaphore
= pa_semaphore_new(0);
627 ps
= (size_t) sysconf(_SC_PAGESIZE
);
628 #elif defined(PAGE_SIZE)
629 ps
= (size_t) PAGE_SIZE
;
631 ps
= 4096; /* Let's hope it's like x86. */
634 p
->block_size
= (PA_MEMPOOL_SLOT_SIZE
/ps
)*ps
;
636 if (p
->block_size
< ps
)
639 p
->n_blocks
= PA_MEMPOOL_SLOTS_MAX
;
641 pa_assert(p
->block_size
> sizeof(struct mempool_slot
));
643 if (pa_shm_create_rw(&p
->memory
, p
->n_blocks
* p
->block_size
, shared
, 0700) < 0) {
648 memset(&p
->stat
, 0, sizeof(p
->stat
));
649 pa_atomic_store(&p
->n_init
, 0);
651 PA_LLIST_HEAD_INIT(pa_memimport
, p
->imports
);
652 PA_LLIST_HEAD_INIT(pa_memexport
, p
->exports
);
654 p
->free_slots
= pa_flist_new(p
->n_blocks
*2);
659 void pa_mempool_free(pa_mempool
*p
) {
662 pa_mutex_lock(p
->mutex
);
665 pa_memimport_free(p
->imports
);
668 pa_memexport_free(p
->exports
);
670 pa_mutex_unlock(p
->mutex
);
672 if (pa_atomic_load(&p
->stat
.n_allocated
) > 0) {
674 pa_log_warn("WARNING! Memory pool destroyed but not all memory blocks freed!");
677 pa_flist_free(p
->free_slots
, NULL
);
678 pa_shm_free(&p
->memory
);
680 pa_mutex_free(p
->mutex
);
681 pa_semaphore_free(p
->semaphore
);
686 /* No lock necessary */
687 const pa_mempool_stat
* pa_mempool_get_stat(pa_mempool
*p
) {
693 /* No lock necessary */
694 void pa_mempool_vacuum(pa_mempool
*p
) {
695 struct mempool_slot
*slot
;
700 list
= pa_flist_new(p
->n_blocks
*2);
702 while ((slot
= pa_flist_pop(p
->free_slots
)))
703 while (pa_flist_push(list
, slot
) < 0)
706 while ((slot
= pa_flist_pop(list
))) {
707 pa_shm_punch(&p
->memory
,
708 (uint8_t*) slot
- (uint8_t*) p
->memory
.ptr
+ sizeof(struct mempool_slot
),
709 p
->block_size
- sizeof(struct mempool_slot
));
711 while (pa_flist_push(p
->free_slots
, slot
))
715 pa_flist_free(list
, NULL
);
718 /* No lock necessary */
719 int pa_mempool_get_shm_id(pa_mempool
*p
, uint32_t *id
) {
722 if (!p
->memory
.shared
)
730 /* No lock necessary */
731 int pa_mempool_is_shared(pa_mempool
*p
) {
734 return !!p
->memory
.shared
;
737 /* For recieving blocks from other nodes */
738 pa_memimport
* pa_memimport_new(pa_mempool
*p
, pa_memimport_release_cb_t cb
, void *userdata
) {
744 i
= pa_xnew(pa_memimport
, 1);
745 i
->mutex
= pa_mutex_new(0);
747 i
->segments
= pa_hashmap_new(NULL
, NULL
);
748 i
->blocks
= pa_hashmap_new(NULL
, NULL
);
750 i
->userdata
= userdata
;
752 pa_mutex_lock(p
->mutex
);
753 PA_LLIST_PREPEND(pa_memimport
, p
->imports
, i
);
754 pa_mutex_unlock(p
->mutex
);
759 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
);
761 /* Should be called locked */
762 static pa_memimport_segment
* segment_attach(pa_memimport
*i
, uint32_t shm_id
) {
763 pa_memimport_segment
* seg
;
765 if (pa_hashmap_size(i
->segments
) >= PA_MEMIMPORT_SEGMENTS_MAX
)
768 seg
= pa_xnew(pa_memimport_segment
, 1);
770 if (pa_shm_attach_ro(&seg
->memory
, shm_id
) < 0) {
778 pa_hashmap_put(i
->segments
, PA_UINT32_TO_PTR(shm_id
), seg
);
782 /* Should be called locked */
783 static void segment_detach(pa_memimport_segment
*seg
) {
786 pa_hashmap_remove(seg
->import
->segments
, PA_UINT32_TO_PTR(seg
->memory
.id
));
787 pa_shm_free(&seg
->memory
);
791 /* Self-locked. Not multiple-caller safe */
792 void pa_memimport_free(pa_memimport
*i
) {
798 pa_mutex_lock(i
->mutex
);
800 while ((b
= pa_hashmap_get_first(i
->blocks
)))
801 memblock_replace_import(b
);
803 pa_assert(pa_hashmap_size(i
->segments
) == 0);
805 pa_mutex_unlock(i
->mutex
);
807 pa_mutex_lock(i
->pool
->mutex
);
809 /* If we've exported this block further we need to revoke that export */
810 for (e
= i
->pool
->exports
; e
; e
= e
->next
)
811 memexport_revoke_blocks(e
, i
);
813 PA_LLIST_REMOVE(pa_memimport
, i
->pool
->imports
, i
);
815 pa_mutex_unlock(i
->pool
->mutex
);
817 pa_hashmap_free(i
->blocks
, NULL
, NULL
);
818 pa_hashmap_free(i
->segments
, NULL
, NULL
);
820 pa_mutex_free(i
->mutex
);
826 pa_memblock
* pa_memimport_get(pa_memimport
*i
, uint32_t block_id
, uint32_t shm_id
, size_t offset
, size_t size
) {
827 pa_memblock
*b
= NULL
;
828 pa_memimport_segment
*seg
;
832 pa_mutex_lock(i
->mutex
);
834 if (pa_hashmap_size(i
->blocks
) >= PA_MEMIMPORT_SLOTS_MAX
)
837 if (!(seg
= pa_hashmap_get(i
->segments
, PA_UINT32_TO_PTR(shm_id
))))
838 if (!(seg
= segment_attach(i
, shm_id
)))
841 if (offset
+size
> seg
->memory
.size
)
844 if (!(b
= pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks
))))
845 b
= pa_xnew(pa_memblock
, 1);
849 b
->type
= PA_MEMBLOCK_IMPORTED
;
851 pa_atomic_ptr_store(&b
->data
, (uint8_t*) seg
->memory
.ptr
+ offset
);
853 pa_atomic_store(&b
->n_acquired
, 0);
854 pa_atomic_store(&b
->please_signal
, 0);
855 b
->per_type
.imported
.id
= block_id
;
856 b
->per_type
.imported
.segment
= seg
;
858 pa_hashmap_put(i
->blocks
, PA_UINT32_TO_PTR(block_id
), b
);
863 pa_mutex_unlock(i
->mutex
);
871 int pa_memimport_process_revoke(pa_memimport
*i
, uint32_t id
) {
875 pa_mutex_lock(i
->mutex
);
877 if (!(b
= pa_hashmap_get(i
->blocks
, PA_UINT32_TO_PTR(id
))))
880 memblock_replace_import(b
);
882 pa_mutex_unlock(i
->mutex
);
887 /* For sending blocks to other nodes */
888 pa_memexport
* pa_memexport_new(pa_mempool
*p
, pa_memexport_revoke_cb_t cb
, void *userdata
) {
894 if (!p
->memory
.shared
)
897 e
= pa_xnew(pa_memexport
, 1);
898 e
->mutex
= pa_mutex_new(1);
900 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->free_slots
);
901 PA_LLIST_HEAD_INIT(struct memexport_slot
, e
->used_slots
);
904 e
->userdata
= userdata
;
906 pa_mutex_lock(p
->mutex
);
907 PA_LLIST_PREPEND(pa_memexport
, p
->exports
, e
);
908 pa_mutex_unlock(p
->mutex
);
912 void pa_memexport_free(pa_memexport
*e
) {
915 pa_mutex_lock(e
->mutex
);
916 while (e
->used_slots
)
917 pa_memexport_process_release(e
, e
->used_slots
- e
->slots
);
918 pa_mutex_unlock(e
->mutex
);
920 pa_mutex_lock(e
->pool
->mutex
);
921 PA_LLIST_REMOVE(pa_memexport
, e
->pool
->exports
, e
);
922 pa_mutex_unlock(e
->pool
->mutex
);
924 pa_mutex_free(e
->mutex
);
929 int pa_memexport_process_release(pa_memexport
*e
, uint32_t id
) {
934 pa_mutex_lock(e
->mutex
);
939 if (!e
->slots
[id
].block
)
942 b
= e
->slots
[id
].block
;
943 e
->slots
[id
].block
= NULL
;
945 PA_LLIST_REMOVE(struct memexport_slot
, e
->used_slots
, &e
->slots
[id
]);
946 PA_LLIST_PREPEND(struct memexport_slot
, e
->free_slots
, &e
->slots
[id
]);
948 pa_mutex_unlock(e
->mutex
);
950 /* pa_log("Processing release for %u", id); */
952 pa_assert(pa_atomic_load(&e
->pool
->stat
.n_exported
) > 0);
953 pa_assert(pa_atomic_load(&e
->pool
->stat
.exported_size
) >= (int) b
->length
);
955 pa_atomic_dec(&e
->pool
->stat
.n_exported
);
956 pa_atomic_sub(&e
->pool
->stat
.exported_size
, b
->length
);
958 pa_memblock_unref(b
);
963 pa_mutex_unlock(e
->mutex
);
969 static void memexport_revoke_blocks(pa_memexport
*e
, pa_memimport
*i
) {
970 struct memexport_slot
*slot
, *next
;
974 pa_mutex_lock(e
->mutex
);
976 for (slot
= e
->used_slots
; slot
; slot
= next
) {
980 if (slot
->block
->type
!= PA_MEMBLOCK_IMPORTED
||
981 slot
->block
->per_type
.imported
.segment
->import
!= i
)
984 idx
= slot
- e
->slots
;
985 e
->revoke_cb(e
, idx
, e
->userdata
);
986 pa_memexport_process_release(e
, idx
);
989 pa_mutex_unlock(e
->mutex
);
992 /* No lock necessary */
993 static pa_memblock
*memblock_shared_copy(pa_mempool
*p
, pa_memblock
*b
) {
999 if (b
->type
== PA_MEMBLOCK_IMPORTED
||
1000 b
->type
== PA_MEMBLOCK_POOL
||
1001 b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
) {
1002 pa_assert(b
->pool
== p
);
1003 return pa_memblock_ref(b
);
1006 if (!(n
= pa_memblock_new_pool(p
, b
->length
)))
1009 memcpy(pa_atomic_ptr_load(&n
->data
), pa_atomic_ptr_load(&b
->data
), b
->length
);
1014 int pa_memexport_put(pa_memexport
*e
, pa_memblock
*b
, uint32_t *block_id
, uint32_t *shm_id
, size_t *offset
, size_t * size
) {
1016 struct memexport_slot
*slot
;
1021 pa_assert(block_id
);
1025 pa_assert(b
->pool
== e
->pool
);
1027 if (!(b
= memblock_shared_copy(e
->pool
, b
)))
1030 pa_mutex_lock(e
->mutex
);
1032 if (e
->free_slots
) {
1033 slot
= e
->free_slots
;
1034 PA_LLIST_REMOVE(struct memexport_slot
, e
->free_slots
, slot
);
1035 } else if (e
->n_init
< PA_MEMEXPORT_SLOTS_MAX
)
1036 slot
= &e
->slots
[e
->n_init
++];
1038 pa_mutex_unlock(e
->mutex
);
1039 pa_memblock_unref(b
);
1043 PA_LLIST_PREPEND(struct memexport_slot
, e
->used_slots
, slot
);
1045 *block_id
= slot
- e
->slots
;
1047 pa_mutex_unlock(e
->mutex
);
1048 /* pa_log("Got block id %u", *block_id); */
1050 data
= pa_memblock_acquire(b
);
1052 if (b
->type
== PA_MEMBLOCK_IMPORTED
) {
1053 pa_assert(b
->per_type
.imported
.segment
);
1054 memory
= &b
->per_type
.imported
.segment
->memory
;
1056 pa_assert(b
->type
== PA_MEMBLOCK_POOL
|| b
->type
== PA_MEMBLOCK_POOL_EXTERNAL
);
1058 memory
= &b
->pool
->memory
;
1061 pa_assert(data
>= memory
->ptr
);
1062 pa_assert((uint8_t*) data
+ b
->length
<= (uint8_t*) memory
->ptr
+ memory
->size
);
1064 *shm_id
= memory
->id
;
1065 *offset
= (uint8_t*) data
- (uint8_t*) memory
->ptr
;
1068 pa_memblock_release(b
);
1070 pa_atomic_inc(&e
->pool
->stat
.n_exported
);
1071 pa_atomic_add(&e
->pool
->stat
.exported_size
, b
->length
);