]> code.delx.au - pulseaudio/blob - src/pulsecore/memblock.c
card-restore: we don't need to save card data that came from the database
[pulseaudio] / src / pulsecore / memblock.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <signal.h>
32 #include <errno.h>
33
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
36 #endif
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
40
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
49
50 #include "memblock.h"
51
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
58
59 #define PA_MEMEXPORT_SLOTS_MAX 128
60
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
63
64 struct pa_memblock {
65 PA_REFCNT_DECLARE; /* the reference counter */
66 pa_mempool *pool;
67
68 pa_memblock_type_t type;
69
70 pa_bool_t read_only:1;
71 pa_bool_t is_silence:1;
72
73 pa_atomic_ptr_t data;
74 size_t length;
75
76 pa_atomic_t n_acquired;
77 pa_atomic_t please_signal;
78
79 union {
80 struct {
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
82 pa_free_cb_t free_cb;
83 } user;
84
85 struct {
86 uint32_t id;
87 pa_memimport_segment *segment;
88 } imported;
89 } per_type;
90 };
91
92 struct pa_memimport_segment {
93 pa_memimport *import;
94 pa_shm memory;
95 pa_memtrap *trap;
96 unsigned n_blocks;
97 };
98
99 struct pa_memimport {
100 pa_mutex *mutex;
101
102 pa_mempool *pool;
103 pa_hashmap *segments;
104 pa_hashmap *blocks;
105
106 /* Called whenever an imported memory block is no longer
107 * needed. */
108 pa_memimport_release_cb_t release_cb;
109 void *userdata;
110
111 PA_LLIST_FIELDS(pa_memimport);
112 };
113
114 struct memexport_slot {
115 PA_LLIST_FIELDS(struct memexport_slot);
116 pa_memblock *block;
117 };
118
119 struct pa_memexport {
120 pa_mutex *mutex;
121 pa_mempool *pool;
122
123 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
124
125 PA_LLIST_HEAD(struct memexport_slot, free_slots);
126 PA_LLIST_HEAD(struct memexport_slot, used_slots);
127 unsigned n_init;
128
129 /* Called whenever a client from which we imported a memory block
130 which we in turn exported to another client dies and we need to
131 revoke the memory block accordingly */
132 pa_memexport_revoke_cb_t revoke_cb;
133 void *userdata;
134
135 PA_LLIST_FIELDS(pa_memexport);
136 };
137
138 struct pa_mempool {
139 pa_semaphore *semaphore;
140 pa_mutex *mutex;
141
142 pa_shm memory;
143 size_t block_size;
144 unsigned n_blocks;
145
146 pa_atomic_t n_init;
147
148 PA_LLIST_HEAD(pa_memimport, imports);
149 PA_LLIST_HEAD(pa_memexport, exports);
150
151 /* A list of free slots that may be reused */
152 pa_flist *free_slots;
153
154 pa_mempool_stat stat;
155 };
156
157 static void segment_detach(pa_memimport_segment *seg);
158
159 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
160
161 /* No lock necessary */
162 static void stat_add(pa_memblock*b) {
163 pa_assert(b);
164 pa_assert(b->pool);
165
166 pa_atomic_inc(&b->pool->stat.n_allocated);
167 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
168
169 pa_atomic_inc(&b->pool->stat.n_accumulated);
170 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
171
172 if (b->type == PA_MEMBLOCK_IMPORTED) {
173 pa_atomic_inc(&b->pool->stat.n_imported);
174 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
175 }
176
177 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
178 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
179 }
180
181 /* No lock necessary */
182 static void stat_remove(pa_memblock *b) {
183 pa_assert(b);
184 pa_assert(b->pool);
185
186 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
187 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
188
189 pa_atomic_dec(&b->pool->stat.n_allocated);
190 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
191
192 if (b->type == PA_MEMBLOCK_IMPORTED) {
193 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
194 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
195
196 pa_atomic_dec(&b->pool->stat.n_imported);
197 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
198 }
199
200 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
201 }
202
203 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
204
205 /* No lock necessary */
206 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
207 pa_memblock *b;
208
209 pa_assert(p);
210 pa_assert(length);
211
212 if (!(b = pa_memblock_new_pool(p, length)))
213 b = memblock_new_appended(p, length);
214
215 return b;
216 }
217
218 /* No lock necessary */
219 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
220 pa_memblock *b;
221
222 pa_assert(p);
223 pa_assert(length);
224
225 /* If -1 is passed as length we choose the size for the caller. */
226
227 if (length == (size_t) -1)
228 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
229
230 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
231 PA_REFCNT_INIT(b);
232 b->pool = p;
233 b->type = PA_MEMBLOCK_APPENDED;
234 b->read_only = b->is_silence = FALSE;
235 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
236 b->length = length;
237 pa_atomic_store(&b->n_acquired, 0);
238 pa_atomic_store(&b->please_signal, 0);
239
240 stat_add(b);
241 return b;
242 }
243
244 /* No lock necessary */
245 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
246 struct mempool_slot *slot;
247 pa_assert(p);
248
249 if (!(slot = pa_flist_pop(p->free_slots))) {
250 int idx;
251
252 /* The free list was empty, we have to allocate a new entry */
253
254 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
255 pa_atomic_dec(&p->n_init);
256 else
257 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
258
259 if (!slot) {
260 pa_log_info("Pool full");
261 pa_atomic_inc(&p->stat.n_pool_full);
262 return NULL;
263 }
264 }
265
266 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
267 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
268 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
269 /* } */
270 /* #endif */
271
272 return slot;
273 }
274
275 /* No lock necessary, totally redundant anyway */
276 static inline void* mempool_slot_data(struct mempool_slot *slot) {
277 return slot;
278 }
279
280 /* No lock necessary */
281 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
282 pa_assert(p);
283
284 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
285 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
286
287 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
288 }
289
290 /* No lock necessary */
291 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
292 unsigned idx;
293
294 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
295 return NULL;
296
297 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
298 }
299
300 /* No lock necessary */
301 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
302 pa_memblock *b = NULL;
303 struct mempool_slot *slot;
304
305 pa_assert(p);
306 pa_assert(length);
307
308 /* If -1 is passed as length we choose the size for the caller: we
309 * take the largest size that fits in one of our slots. */
310
311 if (length == (size_t) -1)
312 length = pa_mempool_block_size_max(p);
313
314 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
315
316 if (!(slot = mempool_allocate_slot(p)))
317 return NULL;
318
319 b = mempool_slot_data(slot);
320 b->type = PA_MEMBLOCK_POOL;
321 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
322
323 } else if (p->block_size >= length) {
324
325 if (!(slot = mempool_allocate_slot(p)))
326 return NULL;
327
328 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
329 b = pa_xnew(pa_memblock, 1);
330
331 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
332 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
333
334 } else {
335 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
336 pa_atomic_inc(&p->stat.n_too_large_for_pool);
337 return NULL;
338 }
339
340 PA_REFCNT_INIT(b);
341 b->pool = p;
342 b->read_only = b->is_silence = FALSE;
343 b->length = length;
344 pa_atomic_store(&b->n_acquired, 0);
345 pa_atomic_store(&b->please_signal, 0);
346
347 stat_add(b);
348 return b;
349 }
350
351 /* No lock necessary */
352 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
353 pa_memblock *b;
354
355 pa_assert(p);
356 pa_assert(d);
357 pa_assert(length != (size_t) -1);
358 pa_assert(length);
359
360 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
361 b = pa_xnew(pa_memblock, 1);
362 PA_REFCNT_INIT(b);
363 b->pool = p;
364 b->type = PA_MEMBLOCK_FIXED;
365 b->read_only = read_only;
366 b->is_silence = FALSE;
367 pa_atomic_ptr_store(&b->data, d);
368 b->length = length;
369 pa_atomic_store(&b->n_acquired, 0);
370 pa_atomic_store(&b->please_signal, 0);
371
372 stat_add(b);
373 return b;
374 }
375
376 /* No lock necessary */
377 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
378 pa_memblock *b;
379
380 pa_assert(p);
381 pa_assert(d);
382 pa_assert(length);
383 pa_assert(length != (size_t) -1);
384 pa_assert(free_cb);
385
386 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
387 b = pa_xnew(pa_memblock, 1);
388 PA_REFCNT_INIT(b);
389 b->pool = p;
390 b->type = PA_MEMBLOCK_USER;
391 b->read_only = read_only;
392 b->is_silence = FALSE;
393 pa_atomic_ptr_store(&b->data, d);
394 b->length = length;
395 pa_atomic_store(&b->n_acquired, 0);
396 pa_atomic_store(&b->please_signal, 0);
397
398 b->per_type.user.free_cb = free_cb;
399
400 stat_add(b);
401 return b;
402 }
403
404 /* No lock necessary */
405 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
406 pa_assert(b);
407 pa_assert(PA_REFCNT_VALUE(b) > 0);
408
409 return b->read_only && PA_REFCNT_VALUE(b) == 1;
410 }
411
412 /* No lock necessary */
413 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
414 pa_assert(b);
415 pa_assert(PA_REFCNT_VALUE(b) > 0);
416
417 return b->is_silence;
418 }
419
420 /* No lock necessary */
421 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
422 pa_assert(b);
423 pa_assert(PA_REFCNT_VALUE(b) > 0);
424
425 b->is_silence = v;
426 }
427
428 /* No lock necessary */
429 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
430 int r;
431 pa_assert(b);
432
433 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
434
435 return r == 1;
436 }
437
438 /* No lock necessary */
439 void* pa_memblock_acquire(pa_memblock *b) {
440 pa_assert(b);
441 pa_assert(PA_REFCNT_VALUE(b) > 0);
442
443 pa_atomic_inc(&b->n_acquired);
444
445 return pa_atomic_ptr_load(&b->data);
446 }
447
448 /* No lock necessary, in corner cases locks by its own */
449 void pa_memblock_release(pa_memblock *b) {
450 int r;
451 pa_assert(b);
452 pa_assert(PA_REFCNT_VALUE(b) > 0);
453
454 r = pa_atomic_dec(&b->n_acquired);
455 pa_assert(r >= 1);
456
457 /* Signal a waiting thread that this memblock is no longer used */
458 if (r == 1 && pa_atomic_load(&b->please_signal))
459 pa_semaphore_post(b->pool->semaphore);
460 }
461
462 size_t pa_memblock_get_length(pa_memblock *b) {
463 pa_assert(b);
464 pa_assert(PA_REFCNT_VALUE(b) > 0);
465
466 return b->length;
467 }
468
469 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
470 pa_assert(b);
471 pa_assert(PA_REFCNT_VALUE(b) > 0);
472
473 return b->pool;
474 }
475
476 /* No lock necessary */
477 pa_memblock* pa_memblock_ref(pa_memblock*b) {
478 pa_assert(b);
479 pa_assert(PA_REFCNT_VALUE(b) > 0);
480
481 PA_REFCNT_INC(b);
482 return b;
483 }
484
485 static void memblock_free(pa_memblock *b) {
486 pa_assert(b);
487
488 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
489
490 stat_remove(b);
491
492 switch (b->type) {
493 case PA_MEMBLOCK_USER :
494 pa_assert(b->per_type.user.free_cb);
495 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
496
497 /* Fall through */
498
499 case PA_MEMBLOCK_FIXED:
500 case PA_MEMBLOCK_APPENDED :
501 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
502 pa_xfree(b);
503
504 break;
505
506 case PA_MEMBLOCK_IMPORTED : {
507 pa_memimport_segment *segment;
508 pa_memimport *import;
509
510 /* FIXME! This should be implemented lock-free */
511
512 segment = b->per_type.imported.segment;
513 pa_assert(segment);
514 import = segment->import;
515 pa_assert(import);
516
517 pa_mutex_lock(import->mutex);
518 pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
519 if (-- segment->n_blocks <= 0)
520 segment_detach(segment);
521
522 pa_mutex_unlock(import->mutex);
523
524 import->release_cb(import, b->per_type.imported.id, import->userdata);
525
526 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
527 pa_xfree(b);
528 break;
529 }
530
531 case PA_MEMBLOCK_POOL_EXTERNAL:
532 case PA_MEMBLOCK_POOL: {
533 struct mempool_slot *slot;
534 pa_bool_t call_free;
535
536 slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
537 pa_assert(slot);
538
539 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
540
541 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
542 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
543 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
544 /* } */
545 /* #endif */
546
547 /* The free list dimensions should easily allow all slots
548 * to fit in, hence try harder if pushing this slot into
549 * the free list fails */
550 while (pa_flist_push(b->pool->free_slots, slot) < 0)
551 ;
552
553 if (call_free)
554 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
555 pa_xfree(b);
556
557 break;
558 }
559
560 case PA_MEMBLOCK_TYPE_MAX:
561 default:
562 pa_assert_not_reached();
563 }
564 }
565
566 /* No lock necessary */
567 void pa_memblock_unref(pa_memblock*b) {
568 pa_assert(b);
569 pa_assert(PA_REFCNT_VALUE(b) > 0);
570
571 if (PA_REFCNT_DEC(b) > 0)
572 return;
573
574 memblock_free(b);
575 }
576
577 /* Self locked */
578 static void memblock_wait(pa_memblock *b) {
579 pa_assert(b);
580
581 if (pa_atomic_load(&b->n_acquired) > 0) {
582 /* We need to wait until all threads gave up access to the
583 * memory block before we can go on. Unfortunately this means
584 * that we have to lock and wait here. Sniff! */
585
586 pa_atomic_inc(&b->please_signal);
587
588 while (pa_atomic_load(&b->n_acquired) > 0)
589 pa_semaphore_wait(b->pool->semaphore);
590
591 pa_atomic_dec(&b->please_signal);
592 }
593 }
594
595 /* No lock necessary. This function is not multiple caller safe! */
596 static void memblock_make_local(pa_memblock *b) {
597 pa_assert(b);
598
599 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
600
601 if (b->length <= b->pool->block_size) {
602 struct mempool_slot *slot;
603
604 if ((slot = mempool_allocate_slot(b->pool))) {
605 void *new_data;
606 /* We can move it into a local pool, perfect! */
607
608 new_data = mempool_slot_data(slot);
609 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
610 pa_atomic_ptr_store(&b->data, new_data);
611
612 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
613 b->read_only = FALSE;
614
615 goto finish;
616 }
617 }
618
619 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
620 b->per_type.user.free_cb = pa_xfree;
621 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
622
623 b->type = PA_MEMBLOCK_USER;
624 b->read_only = FALSE;
625
626 finish:
627 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
628 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
629 memblock_wait(b);
630 }
631
632 /* No lock necessary. This function is not multiple caller safe*/
633 void pa_memblock_unref_fixed(pa_memblock *b) {
634 pa_assert(b);
635 pa_assert(PA_REFCNT_VALUE(b) > 0);
636 pa_assert(b->type == PA_MEMBLOCK_FIXED);
637
638 if (PA_REFCNT_VALUE(b) > 1)
639 memblock_make_local(b);
640
641 pa_memblock_unref(b);
642 }
643
644 /* No lock necessary. */
645 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
646 void *p;
647
648 pa_assert(b);
649 pa_assert(PA_REFCNT_VALUE(b) > 0);
650
651 p = pa_memblock_acquire(b);
652 pa_will_need(p, b->length);
653 pa_memblock_release(b);
654
655 return b;
656 }
657
658 /* Self-locked. This function is not multiple-caller safe */
659 static void memblock_replace_import(pa_memblock *b) {
660 pa_memimport_segment *seg;
661
662 pa_assert(b);
663 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
664
665 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
666 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
667 pa_atomic_dec(&b->pool->stat.n_imported);
668 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
669
670 seg = b->per_type.imported.segment;
671 pa_assert(seg);
672 pa_assert(seg->import);
673
674 pa_mutex_lock(seg->import->mutex);
675
676 pa_hashmap_remove(
677 seg->import->blocks,
678 PA_UINT32_TO_PTR(b->per_type.imported.id));
679
680 memblock_make_local(b);
681
682 if (-- seg->n_blocks <= 0) {
683 pa_mutex_unlock(seg->import->mutex);
684 segment_detach(seg);
685 } else
686 pa_mutex_unlock(seg->import->mutex);
687 }
688
689 pa_mempool* pa_mempool_new(pa_bool_t shared, size_t size) {
690 pa_mempool *p;
691 char t1[64], t2[64];
692
693 p = pa_xnew(pa_mempool, 1);
694
695 p->mutex = pa_mutex_new(TRUE, TRUE);
696 p->semaphore = pa_semaphore_new(0);
697
698 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
699 if (p->block_size < PA_PAGE_SIZE)
700 p->block_size = PA_PAGE_SIZE;
701
702 if (size <= 0)
703 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
704 else {
705 p->n_blocks = (unsigned) (size / p->block_size);
706
707 if (p->n_blocks < 2)
708 p->n_blocks = 2;
709 }
710
711 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
712 pa_xfree(p);
713 return NULL;
714 }
715
716 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
717 p->memory.shared ? "shared" : "private",
718 p->n_blocks,
719 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
720 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
721 (unsigned long) pa_mempool_block_size_max(p));
722
723 memset(&p->stat, 0, sizeof(p->stat));
724 pa_atomic_store(&p->n_init, 0);
725
726 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
727 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
728
729 p->free_slots = pa_flist_new(p->n_blocks);
730
731 return p;
732 }
733
734 void pa_mempool_free(pa_mempool *p) {
735 pa_assert(p);
736
737 pa_mutex_lock(p->mutex);
738
739 while (p->imports)
740 pa_memimport_free(p->imports);
741
742 while (p->exports)
743 pa_memexport_free(p->exports);
744
745 pa_mutex_unlock(p->mutex);
746
747 pa_flist_free(p->free_slots, NULL);
748
749 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
750
751 /* Ouch, somebody is retaining a memory block reference! */
752
753 #ifdef DEBUG_REF
754 unsigned i;
755 pa_flist *list;
756
757 /* Let's try to find at least one of those leaked memory blocks */
758
759 list = pa_flist_new(p->n_blocks);
760
761 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
762 struct mempool_slot *slot;
763 pa_memblock *b, *k;
764
765 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
766 b = mempool_slot_data(slot);
767
768 while ((k = pa_flist_pop(p->free_slots))) {
769 while (pa_flist_push(list, k) < 0)
770 ;
771
772 if (b == k)
773 break;
774 }
775
776 if (!k)
777 pa_log("REF: Leaked memory block %p", b);
778
779 while ((k = pa_flist_pop(list)))
780 while (pa_flist_push(p->free_slots, k) < 0)
781 ;
782 }
783
784 pa_flist_free(list, NULL);
785
786 #endif
787
788 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
789
790 /* PA_DEBUG_TRAP; */
791 }
792
793 pa_shm_free(&p->memory);
794
795 pa_mutex_free(p->mutex);
796 pa_semaphore_free(p->semaphore);
797
798 pa_xfree(p);
799 }
800
801 /* No lock necessary */
802 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
803 pa_assert(p);
804
805 return &p->stat;
806 }
807
808 /* No lock necessary */
809 size_t pa_mempool_block_size_max(pa_mempool *p) {
810 pa_assert(p);
811
812 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
813 }
814
815 /* No lock necessary */
816 void pa_mempool_vacuum(pa_mempool *p) {
817 struct mempool_slot *slot;
818 pa_flist *list;
819
820 pa_assert(p);
821
822 list = pa_flist_new(p->n_blocks);
823
824 while ((slot = pa_flist_pop(p->free_slots)))
825 while (pa_flist_push(list, slot) < 0)
826 ;
827
828 while ((slot = pa_flist_pop(list))) {
829 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
830
831 while (pa_flist_push(p->free_slots, slot))
832 ;
833 }
834
835 pa_flist_free(list, NULL);
836 }
837
838 /* No lock necessary */
839 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
840 pa_assert(p);
841
842 if (!p->memory.shared)
843 return -1;
844
845 *id = p->memory.id;
846
847 return 0;
848 }
849
850 /* No lock necessary */
851 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
852 pa_assert(p);
853
854 return !!p->memory.shared;
855 }
856
857 /* For recieving blocks from other nodes */
858 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
859 pa_memimport *i;
860
861 pa_assert(p);
862 pa_assert(cb);
863
864 i = pa_xnew(pa_memimport, 1);
865 i->mutex = pa_mutex_new(TRUE, TRUE);
866 i->pool = p;
867 i->segments = pa_hashmap_new(NULL, NULL);
868 i->blocks = pa_hashmap_new(NULL, NULL);
869 i->release_cb = cb;
870 i->userdata = userdata;
871
872 pa_mutex_lock(p->mutex);
873 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
874 pa_mutex_unlock(p->mutex);
875
876 return i;
877 }
878
879 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
880
881 /* Should be called locked */
882 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
883 pa_memimport_segment* seg;
884
885 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
886 return NULL;
887
888 seg = pa_xnew(pa_memimport_segment, 1);
889
890 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
891 pa_xfree(seg);
892 return NULL;
893 }
894
895 seg->import = i;
896 seg->n_blocks = 0;
897 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
898
899 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
900 return seg;
901 }
902
903 /* Should be called locked */
904 static void segment_detach(pa_memimport_segment *seg) {
905 pa_assert(seg);
906
907 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
908 pa_shm_free(&seg->memory);
909
910 if (seg->trap)
911 pa_memtrap_remove(seg->trap);
912
913 pa_xfree(seg);
914 }
915
916 /* Self-locked. Not multiple-caller safe */
917 void pa_memimport_free(pa_memimport *i) {
918 pa_memexport *e;
919 pa_memblock *b;
920
921 pa_assert(i);
922
923 pa_mutex_lock(i->mutex);
924
925 while ((b = pa_hashmap_first(i->blocks)))
926 memblock_replace_import(b);
927
928 pa_assert(pa_hashmap_size(i->segments) == 0);
929
930 pa_mutex_unlock(i->mutex);
931
932 pa_mutex_lock(i->pool->mutex);
933
934 /* If we've exported this block further we need to revoke that export */
935 for (e = i->pool->exports; e; e = e->next)
936 memexport_revoke_blocks(e, i);
937
938 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
939
940 pa_mutex_unlock(i->pool->mutex);
941
942 pa_hashmap_free(i->blocks, NULL, NULL);
943 pa_hashmap_free(i->segments, NULL, NULL);
944
945 pa_mutex_free(i->mutex);
946
947 pa_xfree(i);
948 }
949
950 /* Self-locked */
951 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
952 pa_memblock *b = NULL;
953 pa_memimport_segment *seg;
954
955 pa_assert(i);
956
957 pa_mutex_lock(i->mutex);
958
959 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
960 goto finish;
961
962 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
963 if (!(seg = segment_attach(i, shm_id)))
964 goto finish;
965
966 if (offset+size > seg->memory.size)
967 goto finish;
968
969 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
970 b = pa_xnew(pa_memblock, 1);
971
972 PA_REFCNT_INIT(b);
973 b->pool = i->pool;
974 b->type = PA_MEMBLOCK_IMPORTED;
975 b->read_only = TRUE;
976 b->is_silence = FALSE;
977 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
978 b->length = size;
979 pa_atomic_store(&b->n_acquired, 0);
980 pa_atomic_store(&b->please_signal, 0);
981 b->per_type.imported.id = block_id;
982 b->per_type.imported.segment = seg;
983
984 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
985
986 seg->n_blocks++;
987
988 finish:
989 pa_mutex_unlock(i->mutex);
990
991 if (b)
992 stat_add(b);
993
994 return b;
995 }
996
997 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
998 pa_memblock *b;
999 int ret = 0;
1000 pa_assert(i);
1001
1002 pa_mutex_lock(i->mutex);
1003
1004 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1005 ret = -1;
1006 goto finish;
1007 }
1008
1009 memblock_replace_import(b);
1010
1011 finish:
1012 pa_mutex_unlock(i->mutex);
1013
1014 return ret;
1015 }
1016
1017 /* For sending blocks to other nodes */
1018 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1019 pa_memexport *e;
1020
1021 pa_assert(p);
1022 pa_assert(cb);
1023
1024 if (!p->memory.shared)
1025 return NULL;
1026
1027 e = pa_xnew(pa_memexport, 1);
1028 e->mutex = pa_mutex_new(TRUE, TRUE);
1029 e->pool = p;
1030 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1031 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1032 e->n_init = 0;
1033 e->revoke_cb = cb;
1034 e->userdata = userdata;
1035
1036 pa_mutex_lock(p->mutex);
1037 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1038 pa_mutex_unlock(p->mutex);
1039 return e;
1040 }
1041
1042 void pa_memexport_free(pa_memexport *e) {
1043 pa_assert(e);
1044
1045 pa_mutex_lock(e->mutex);
1046 while (e->used_slots)
1047 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
1048 pa_mutex_unlock(e->mutex);
1049
1050 pa_mutex_lock(e->pool->mutex);
1051 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1052 pa_mutex_unlock(e->pool->mutex);
1053
1054 pa_mutex_free(e->mutex);
1055 pa_xfree(e);
1056 }
1057
1058 /* Self-locked */
1059 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1060 pa_memblock *b;
1061
1062 pa_assert(e);
1063
1064 pa_mutex_lock(e->mutex);
1065
1066 if (id >= e->n_init)
1067 goto fail;
1068
1069 if (!e->slots[id].block)
1070 goto fail;
1071
1072 b = e->slots[id].block;
1073 e->slots[id].block = NULL;
1074
1075 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1076 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1077
1078 pa_mutex_unlock(e->mutex);
1079
1080 /* pa_log("Processing release for %u", id); */
1081
1082 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1083 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1084
1085 pa_atomic_dec(&e->pool->stat.n_exported);
1086 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1087
1088 pa_memblock_unref(b);
1089
1090 return 0;
1091
1092 fail:
1093 pa_mutex_unlock(e->mutex);
1094
1095 return -1;
1096 }
1097
1098 /* Self-locked */
1099 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1100 struct memexport_slot *slot, *next;
1101 pa_assert(e);
1102 pa_assert(i);
1103
1104 pa_mutex_lock(e->mutex);
1105
1106 for (slot = e->used_slots; slot; slot = next) {
1107 uint32_t idx;
1108 next = slot->next;
1109
1110 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1111 slot->block->per_type.imported.segment->import != i)
1112 continue;
1113
1114 idx = (uint32_t) (slot - e->slots);
1115 e->revoke_cb(e, idx, e->userdata);
1116 pa_memexport_process_release(e, idx);
1117 }
1118
1119 pa_mutex_unlock(e->mutex);
1120 }
1121
1122 /* No lock necessary */
1123 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1124 pa_memblock *n;
1125
1126 pa_assert(p);
1127 pa_assert(b);
1128
1129 if (b->type == PA_MEMBLOCK_IMPORTED ||
1130 b->type == PA_MEMBLOCK_POOL ||
1131 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1132 pa_assert(b->pool == p);
1133 return pa_memblock_ref(b);
1134 }
1135
1136 if (!(n = pa_memblock_new_pool(p, b->length)))
1137 return NULL;
1138
1139 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1140 return n;
1141 }
1142
1143 /* Self-locked */
1144 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1145 pa_shm *memory;
1146 struct memexport_slot *slot;
1147 void *data;
1148
1149 pa_assert(e);
1150 pa_assert(b);
1151 pa_assert(block_id);
1152 pa_assert(shm_id);
1153 pa_assert(offset);
1154 pa_assert(size);
1155 pa_assert(b->pool == e->pool);
1156
1157 if (!(b = memblock_shared_copy(e->pool, b)))
1158 return -1;
1159
1160 pa_mutex_lock(e->mutex);
1161
1162 if (e->free_slots) {
1163 slot = e->free_slots;
1164 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1165 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1166 slot = &e->slots[e->n_init++];
1167 else {
1168 pa_mutex_unlock(e->mutex);
1169 pa_memblock_unref(b);
1170 return -1;
1171 }
1172
1173 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1174 slot->block = b;
1175 *block_id = (uint32_t) (slot - e->slots);
1176
1177 pa_mutex_unlock(e->mutex);
1178 /* pa_log("Got block id %u", *block_id); */
1179
1180 data = pa_memblock_acquire(b);
1181
1182 if (b->type == PA_MEMBLOCK_IMPORTED) {
1183 pa_assert(b->per_type.imported.segment);
1184 memory = &b->per_type.imported.segment->memory;
1185 } else {
1186 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1187 pa_assert(b->pool);
1188 memory = &b->pool->memory;
1189 }
1190
1191 pa_assert(data >= memory->ptr);
1192 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1193
1194 *shm_id = memory->id;
1195 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1196 *size = b->length;
1197
1198 pa_memblock_release(b);
1199
1200 pa_atomic_inc(&e->pool->stat.n_exported);
1201 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1202
1203 return 0;
1204 }