]> code.delx.au - pulseaudio/blob - src/pulsecore/memblockq.c
Merge remote branch 'mkbosmans/rate-adjustment'
[pulseaudio] / src / pulsecore / memblockq.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <sys/time.h>
27 #include <time.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31
32 #include <pulse/xmalloc.h>
33
34 #include <pulsecore/log.h>
35 #include <pulsecore/mcalign.h>
36 #include <pulsecore/macro.h>
37 #include <pulsecore/flist.h>
38
39 #include "memblockq.h"
40
41 struct list_item {
42 struct list_item *next, *prev;
43 int64_t index;
44 pa_memchunk chunk;
45 };
46
47 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
48
49 struct pa_memblockq {
50 struct list_item *blocks, *blocks_tail;
51 struct list_item *current_read, *current_write;
52 unsigned n_blocks;
53 size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
54 int64_t read_index, write_index;
55 pa_bool_t in_prebuf;
56 pa_memchunk silence;
57 pa_mcalign *mcalign;
58 int64_t missing, requested;
59 };
60
61 pa_memblockq* pa_memblockq_new(
62 int64_t idx,
63 size_t maxlength,
64 size_t tlength,
65 size_t base,
66 size_t prebuf,
67 size_t minreq,
68 size_t maxrewind,
69 pa_memchunk *silence) {
70
71 pa_memblockq* bq;
72
73 pa_assert(base > 0);
74
75 bq = pa_xnew(pa_memblockq, 1);
76 bq->blocks = bq->blocks_tail = NULL;
77 bq->current_read = bq->current_write = NULL;
78 bq->n_blocks = 0;
79
80 bq->base = base;
81 bq->read_index = bq->write_index = idx;
82
83 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
84 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
85
86 bq->missing = bq->requested = 0;
87 bq->maxlength = bq->tlength = bq->prebuf = bq->minreq = bq->maxrewind = 0;
88 bq->in_prebuf = TRUE;
89
90 pa_memblockq_set_maxlength(bq, maxlength);
91 pa_memblockq_set_tlength(bq, tlength);
92 pa_memblockq_set_minreq(bq, minreq);
93 pa_memblockq_set_prebuf(bq, prebuf);
94 pa_memblockq_set_maxrewind(bq, maxrewind);
95
96 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
97 (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
98
99 if (silence) {
100 bq->silence = *silence;
101 pa_memblock_ref(bq->silence.memblock);
102 } else
103 pa_memchunk_reset(&bq->silence);
104
105 bq->mcalign = pa_mcalign_new(bq->base);
106
107 return bq;
108 }
109
110 void pa_memblockq_free(pa_memblockq* bq) {
111 pa_assert(bq);
112
113 pa_memblockq_silence(bq);
114
115 if (bq->silence.memblock)
116 pa_memblock_unref(bq->silence.memblock);
117
118 if (bq->mcalign)
119 pa_mcalign_free(bq->mcalign);
120
121 pa_xfree(bq);
122 }
123
124 static void fix_current_read(pa_memblockq *bq) {
125 pa_assert(bq);
126
127 if (PA_UNLIKELY(!bq->blocks)) {
128 bq->current_read = NULL;
129 return;
130 }
131
132 if (PA_UNLIKELY(!bq->current_read))
133 bq->current_read = bq->blocks;
134
135 /* Scan left */
136 while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
137
138 if (bq->current_read->prev)
139 bq->current_read = bq->current_read->prev;
140 else
141 break;
142
143 /* Scan right */
144 while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
145 bq->current_read = bq->current_read->next;
146
147 /* At this point current_read will either point at or left of the
148 next block to play. It may be NULL in case everything in
149 the queue was already played */
150 }
151
152 static void fix_current_write(pa_memblockq *bq) {
153 pa_assert(bq);
154
155 if (PA_UNLIKELY(!bq->blocks)) {
156 bq->current_write = NULL;
157 return;
158 }
159
160 if (PA_UNLIKELY(!bq->current_write))
161 bq->current_write = bq->blocks_tail;
162
163 /* Scan right */
164 while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
165
166 if (bq->current_write->next)
167 bq->current_write = bq->current_write->next;
168 else
169 break;
170
171 /* Scan left */
172 while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
173 bq->current_write = bq->current_write->prev;
174
175 /* At this point current_write will either point at or right of
176 the next block to write data to. It may be NULL in case
177 everything in the queue is still to be played */
178 }
179
180 static void drop_block(pa_memblockq *bq, struct list_item *q) {
181 pa_assert(bq);
182 pa_assert(q);
183
184 pa_assert(bq->n_blocks >= 1);
185
186 if (q->prev)
187 q->prev->next = q->next;
188 else {
189 pa_assert(bq->blocks == q);
190 bq->blocks = q->next;
191 }
192
193 if (q->next)
194 q->next->prev = q->prev;
195 else {
196 pa_assert(bq->blocks_tail == q);
197 bq->blocks_tail = q->prev;
198 }
199
200 if (bq->current_write == q)
201 bq->current_write = q->prev;
202
203 if (bq->current_read == q)
204 bq->current_read = q->next;
205
206 pa_memblock_unref(q->chunk.memblock);
207
208 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
209 pa_xfree(q);
210
211 bq->n_blocks--;
212 }
213
214 static void drop_backlog(pa_memblockq *bq) {
215 int64_t boundary;
216 pa_assert(bq);
217
218 boundary = bq->read_index - (int64_t) bq->maxrewind;
219
220 while (bq->blocks && (bq->blocks->index + (int64_t) bq->blocks->chunk.length <= boundary))
221 drop_block(bq, bq->blocks);
222 }
223
224 static pa_bool_t can_push(pa_memblockq *bq, size_t l) {
225 int64_t end;
226
227 pa_assert(bq);
228
229 if (bq->read_index > bq->write_index) {
230 int64_t d = bq->read_index - bq->write_index;
231
232 if ((int64_t) l > d)
233 l -= (size_t) d;
234 else
235 return TRUE;
236 }
237
238 end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
239
240 /* Make sure that the list doesn't get too long */
241 if (bq->write_index + (int64_t) l > end)
242 if (bq->write_index + (int64_t) l - bq->read_index > (int64_t) bq->maxlength)
243 return FALSE;
244
245 return TRUE;
246 }
247
248 static void write_index_changed(pa_memblockq *bq, int64_t old_write_index, pa_bool_t account) {
249 int64_t delta;
250
251 pa_assert(bq);
252
253 delta = bq->write_index - old_write_index;
254
255 if (account)
256 bq->requested -= delta;
257 else
258 bq->missing -= delta;
259
260 /* pa_log("pushed/seeked %lli: requested counter at %lli, account=%i", (long long) delta, (long long) bq->requested, account); */
261 }
262
263 static void read_index_changed(pa_memblockq *bq, int64_t old_read_index) {
264 int64_t delta;
265
266 pa_assert(bq);
267
268 delta = bq->read_index - old_read_index;
269 bq->missing += delta;
270
271 /* pa_log("popped %lli: missing counter at %lli", (long long) delta, (long long) bq->missing); */
272 }
273
274 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
275 struct list_item *q, *n;
276 pa_memchunk chunk;
277 int64_t old;
278
279 pa_assert(bq);
280 pa_assert(uchunk);
281 pa_assert(uchunk->memblock);
282 pa_assert(uchunk->length > 0);
283 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
284
285 if (uchunk->length % bq->base)
286 return -1;
287
288 if (!can_push(bq, uchunk->length))
289 return -1;
290
291 old = bq->write_index;
292 chunk = *uchunk;
293
294 fix_current_write(bq);
295 q = bq->current_write;
296
297 /* First we advance the q pointer right of where we want to
298 * write to */
299
300 if (q) {
301 while (bq->write_index + (int64_t) chunk.length > q->index)
302 if (q->next)
303 q = q->next;
304 else
305 break;
306 }
307
308 if (!q)
309 q = bq->blocks_tail;
310
311 /* We go from back to front to look for the right place to add
312 * this new entry. Drop data we will overwrite on the way */
313
314 while (q) {
315
316 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
317 /* We found the entry where we need to place the new entry immediately after */
318 break;
319 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
320 /* This entry isn't touched at all, let's skip it */
321 q = q->prev;
322 } else if (bq->write_index <= q->index &&
323 bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) {
324
325 /* This entry is fully replaced by the new entry, so let's drop it */
326
327 struct list_item *p;
328 p = q;
329 q = q->prev;
330 drop_block(bq, p);
331 } else if (bq->write_index >= q->index) {
332 /* The write index points into this memblock, so let's
333 * truncate or split it */
334
335 if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) {
336
337 /* We need to save the end of this memchunk */
338 struct list_item *p;
339 size_t d;
340
341 /* Create a new list entry for the end of thie memchunk */
342 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
343 p = pa_xnew(struct list_item, 1);
344
345 p->chunk = q->chunk;
346 pa_memblock_ref(p->chunk.memblock);
347
348 /* Calculate offset */
349 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
350 pa_assert(d > 0);
351
352 /* Drop it from the new entry */
353 p->index = q->index + (int64_t) d;
354 p->chunk.length -= d;
355
356 /* Add it to the list */
357 p->prev = q;
358 if ((p->next = q->next))
359 q->next->prev = p;
360 else
361 bq->blocks_tail = p;
362 q->next = p;
363
364 bq->n_blocks++;
365 }
366
367 /* Truncate the chunk */
368 if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) {
369 struct list_item *p;
370 p = q;
371 q = q->prev;
372 drop_block(bq, p);
373 }
374
375 /* We had to truncate this block, hence we're now at the right position */
376 break;
377 } else {
378 size_t d;
379
380 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
381 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
382 bq->write_index < q->index);
383
384 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
385
386 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
387 q->index += (int64_t) d;
388 q->chunk.index += d;
389 q->chunk.length -= d;
390
391 q = q->prev;
392 }
393 }
394
395 if (q) {
396 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
397 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
398
399 /* Try to merge memory blocks */
400
401 if (q->chunk.memblock == chunk.memblock &&
402 q->chunk.index + q->chunk.length == chunk.index &&
403 bq->write_index == q->index + (int64_t) q->chunk.length) {
404
405 q->chunk.length += chunk.length;
406 bq->write_index += (int64_t) chunk.length;
407 goto finish;
408 }
409 } else
410 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
411
412 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
413 n = pa_xnew(struct list_item, 1);
414
415 n->chunk = chunk;
416 pa_memblock_ref(n->chunk.memblock);
417 n->index = bq->write_index;
418 bq->write_index += (int64_t) n->chunk.length;
419
420 n->next = q ? q->next : bq->blocks;
421 n->prev = q;
422
423 if (n->next)
424 n->next->prev = n;
425 else
426 bq->blocks_tail = n;
427
428 if (n->prev)
429 n->prev->next = n;
430 else
431 bq->blocks = n;
432
433 bq->n_blocks++;
434
435 finish:
436
437 write_index_changed(bq, old, TRUE);
438 return 0;
439 }
440
441 pa_bool_t pa_memblockq_prebuf_active(pa_memblockq *bq) {
442 pa_assert(bq);
443
444 if (bq->in_prebuf)
445 return pa_memblockq_get_length(bq) < bq->prebuf;
446 else
447 return bq->prebuf > 0 && bq->read_index >= bq->write_index;
448 }
449
450 static pa_bool_t update_prebuf(pa_memblockq *bq) {
451 pa_assert(bq);
452
453 if (bq->in_prebuf) {
454
455 if (pa_memblockq_get_length(bq) < bq->prebuf)
456 return TRUE;
457
458 bq->in_prebuf = FALSE;
459 return FALSE;
460 } else {
461
462 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
463 bq->in_prebuf = TRUE;
464 return TRUE;
465 }
466
467 return FALSE;
468 }
469 }
470
471 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
472 int64_t d;
473 pa_assert(bq);
474 pa_assert(chunk);
475
476 /* We need to pre-buffer */
477 if (update_prebuf(bq))
478 return -1;
479
480 fix_current_read(bq);
481
482 /* Do we need to spit out silence? */
483 if (!bq->current_read || bq->current_read->index > bq->read_index) {
484 size_t length;
485
486 /* How much silence shall we return? */
487 if (bq->current_read)
488 length = (size_t) (bq->current_read->index - bq->read_index);
489 else if (bq->write_index > bq->read_index)
490 length = (size_t) (bq->write_index - bq->read_index);
491 else
492 length = 0;
493
494 /* We need to return silence, since no data is yet available */
495 if (bq->silence.memblock) {
496 *chunk = bq->silence;
497 pa_memblock_ref(chunk->memblock);
498
499 if (length > 0 && length < chunk->length)
500 chunk->length = length;
501
502 } else {
503
504 /* If the memblockq is empty, return -1, otherwise return
505 * the time to sleep */
506 if (length <= 0)
507 return -1;
508
509 chunk->memblock = NULL;
510 chunk->length = length;
511 }
512
513 chunk->index = 0;
514 return 0;
515 }
516
517 /* Ok, let's pass real data to the caller */
518 *chunk = bq->current_read->chunk;
519 pa_memblock_ref(chunk->memblock);
520
521 pa_assert(bq->read_index >= bq->current_read->index);
522 d = bq->read_index - bq->current_read->index;
523 chunk->index += (size_t) d;
524 chunk->length -= (size_t) d;
525
526 return 0;
527 }
528
529 int pa_memblockq_peek_fixed_size(pa_memblockq *bq, size_t block_size, pa_memchunk *chunk) {
530 pa_memchunk tchunk, rchunk;
531 int64_t ri;
532 struct list_item *item;
533
534 pa_assert(bq);
535 pa_assert(block_size > 0);
536 pa_assert(chunk);
537 pa_assert(bq->silence.memblock);
538
539 if (pa_memblockq_peek(bq, &tchunk) < 0)
540 return -1;
541
542 if (tchunk.length >= block_size) {
543 *chunk = tchunk;
544 chunk->length = block_size;
545 return 0;
546 }
547
548 rchunk.memblock = pa_memblock_new(pa_memblock_get_pool(tchunk.memblock), block_size);
549 rchunk.index = 0;
550 rchunk.length = tchunk.length;
551
552 pa_memchunk_memcpy(&rchunk, &tchunk);
553 pa_memblock_unref(tchunk.memblock);
554
555 rchunk.index += tchunk.length;
556
557 /* We don't need to call fix_current_read() here, since
558 * pa_memblock_peek() already did that */
559 item = bq->current_read;
560 ri = bq->read_index + tchunk.length;
561
562 while (rchunk.index < block_size) {
563
564 if (!item || item->index > ri) {
565 /* Do we need to append silence? */
566 tchunk = bq->silence;
567
568 if (item)
569 tchunk.length = PA_MIN(tchunk.length, (size_t) (item->index - ri));
570
571 } else {
572 int64_t d;
573
574 /* We can append real data! */
575 tchunk = item->chunk;
576
577 d = ri - item->index;
578 tchunk.index += (size_t) d;
579 tchunk.length -= (size_t) d;
580
581 /* Go to next item for the next iteration */
582 item = item->next;
583 }
584
585 rchunk.length = tchunk.length = PA_MIN(tchunk.length, block_size - rchunk.index);
586 pa_memchunk_memcpy(&rchunk, &tchunk);
587
588 rchunk.index += rchunk.length;
589 ri += rchunk.length;
590 }
591
592 rchunk.index = 0;
593 rchunk.length = block_size;
594
595 *chunk = rchunk;
596 return 0;
597 }
598
599 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
600 int64_t old;
601 pa_assert(bq);
602 pa_assert(length % bq->base == 0);
603
604 old = bq->read_index;
605
606 while (length > 0) {
607
608 /* Do not drop any data when we are in prebuffering mode */
609 if (update_prebuf(bq))
610 break;
611
612 fix_current_read(bq);
613
614 if (bq->current_read) {
615 int64_t p, d;
616
617 /* We go through this piece by piece to make sure we don't
618 * drop more than allowed by prebuf */
619
620 p = bq->current_read->index + (int64_t) bq->current_read->chunk.length;
621 pa_assert(p >= bq->read_index);
622 d = p - bq->read_index;
623
624 if (d > (int64_t) length)
625 d = (int64_t) length;
626
627 bq->read_index += d;
628 length -= (size_t) d;
629
630 } else {
631
632 /* The list is empty, there's nothing we could drop */
633 bq->read_index += (int64_t) length;
634 break;
635 }
636 }
637
638 drop_backlog(bq);
639 read_index_changed(bq, old);
640 }
641
642 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
643 int64_t old;
644 pa_assert(bq);
645 pa_assert(length % bq->base == 0);
646
647 old = bq->read_index;
648
649 /* This is kind of the inverse of pa_memblockq_drop() */
650
651 bq->read_index -= (int64_t) length;
652
653 read_index_changed(bq, old);
654 }
655
656 pa_bool_t pa_memblockq_is_readable(pa_memblockq *bq) {
657 pa_assert(bq);
658
659 if (pa_memblockq_prebuf_active(bq))
660 return FALSE;
661
662 if (pa_memblockq_get_length(bq) <= 0)
663 return FALSE;
664
665 return TRUE;
666 }
667
668 size_t pa_memblockq_get_length(pa_memblockq *bq) {
669 pa_assert(bq);
670
671 if (bq->write_index <= bq->read_index)
672 return 0;
673
674 return (size_t) (bq->write_index - bq->read_index);
675 }
676
677 size_t pa_memblockq_missing(pa_memblockq *bq) {
678 size_t l;
679 pa_assert(bq);
680
681 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
682 return 0;
683
684 l = bq->tlength - l;
685
686 return l >= bq->minreq ? l : 0;
687 }
688
689 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek, pa_bool_t account) {
690 int64_t old;
691 pa_assert(bq);
692
693 old = bq->write_index;
694
695 switch (seek) {
696 case PA_SEEK_RELATIVE:
697 bq->write_index += offset;
698 break;
699 case PA_SEEK_ABSOLUTE:
700 bq->write_index = offset;
701 break;
702 case PA_SEEK_RELATIVE_ON_READ:
703 bq->write_index = bq->read_index + offset;
704 break;
705 case PA_SEEK_RELATIVE_END:
706 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
707 break;
708 default:
709 pa_assert_not_reached();
710 }
711
712 drop_backlog(bq);
713 write_index_changed(bq, old, account);
714 }
715
716 void pa_memblockq_flush_write(pa_memblockq *bq, pa_bool_t account) {
717 int64_t old;
718 pa_assert(bq);
719
720 pa_memblockq_silence(bq);
721
722 old = bq->write_index;
723 bq->write_index = bq->read_index;
724
725 pa_memblockq_prebuf_force(bq);
726 write_index_changed(bq, old, account);
727 }
728
729 void pa_memblockq_flush_read(pa_memblockq *bq) {
730 int64_t old;
731 pa_assert(bq);
732
733 pa_memblockq_silence(bq);
734
735 old = bq->read_index;
736 bq->read_index = bq->write_index;
737
738 pa_memblockq_prebuf_force(bq);
739 read_index_changed(bq, old);
740 }
741
742 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
743 pa_assert(bq);
744
745 return bq->tlength;
746 }
747
748 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
749 pa_assert(bq);
750
751 return bq->minreq;
752 }
753
754 size_t pa_memblockq_get_maxrewind(pa_memblockq *bq) {
755 pa_assert(bq);
756
757 return bq->maxrewind;
758 }
759
760 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
761 pa_assert(bq);
762
763 return bq->read_index;
764 }
765
766 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
767 pa_assert(bq);
768
769 return bq->write_index;
770 }
771
772 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
773 pa_memchunk rchunk;
774
775 pa_assert(bq);
776 pa_assert(chunk);
777
778 if (bq->base == 1)
779 return pa_memblockq_push(bq, chunk);
780
781 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
782 return -1;
783
784 pa_mcalign_push(bq->mcalign, chunk);
785
786 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
787 int r;
788 r = pa_memblockq_push(bq, &rchunk);
789 pa_memblock_unref(rchunk.memblock);
790
791 if (r < 0) {
792 pa_mcalign_flush(bq->mcalign);
793 return -1;
794 }
795 }
796
797 return 0;
798 }
799
800 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
801 pa_assert(bq);
802
803 bq->in_prebuf = FALSE;
804 }
805
806 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
807 pa_assert(bq);
808
809 if (bq->prebuf > 0)
810 bq->in_prebuf = TRUE;
811 }
812
813 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
814 pa_assert(bq);
815
816 return bq->maxlength;
817 }
818
819 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
820 pa_assert(bq);
821
822 return bq->prebuf;
823 }
824
825 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
826 size_t l;
827
828 pa_assert(bq);
829
830 /* pa_log("pop: %lli", bq->missing); */
831
832 if (bq->missing <= 0)
833 return 0;
834
835 l = (size_t) bq->missing;
836
837 bq->requested += bq->missing;
838 bq->missing = 0;
839
840 /* pa_log("sent %lli: request counter is at %lli", (long long) l, (long long) bq->requested); */
841
842 return l;
843 }
844
845 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
846 pa_assert(bq);
847
848 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
849
850 if (bq->maxlength < bq->base)
851 bq->maxlength = bq->base;
852
853 if (bq->tlength > bq->maxlength)
854 pa_memblockq_set_tlength(bq, bq->maxlength);
855 }
856
857 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
858 size_t old_tlength;
859 pa_assert(bq);
860
861 if (tlength <= 0 || tlength == (size_t) -1)
862 tlength = bq->maxlength;
863
864 old_tlength = bq->tlength;
865 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
866
867 if (bq->tlength > bq->maxlength)
868 bq->tlength = bq->maxlength;
869
870 if (bq->minreq > bq->tlength)
871 pa_memblockq_set_minreq(bq, bq->tlength);
872
873 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
874 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
875
876 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
877 }
878
879 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
880 pa_assert(bq);
881
882 bq->minreq = (minreq/bq->base)*bq->base;
883
884 if (bq->minreq > bq->tlength)
885 bq->minreq = bq->tlength;
886
887 if (bq->minreq < bq->base)
888 bq->minreq = bq->base;
889
890 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
891 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
892 }
893
894 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
895 pa_assert(bq);
896
897 if (prebuf == (size_t) -1)
898 prebuf = bq->tlength+bq->base-bq->minreq;
899
900 bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
901
902 if (prebuf > 0 && bq->prebuf < bq->base)
903 bq->prebuf = bq->base;
904
905 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
906 bq->prebuf = bq->tlength+bq->base-bq->minreq;
907
908 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
909 bq->in_prebuf = FALSE;
910 }
911
912 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
913 pa_assert(bq);
914
915 bq->maxrewind = (maxrewind/bq->base)*bq->base;
916 }
917
918 void pa_memblockq_apply_attr(pa_memblockq *bq, const pa_buffer_attr *a) {
919 pa_assert(bq);
920 pa_assert(a);
921
922 pa_memblockq_set_maxlength(bq, a->maxlength);
923 pa_memblockq_set_tlength(bq, a->tlength);
924 pa_memblockq_set_prebuf(bq, a->prebuf);
925 pa_memblockq_set_minreq(bq, a->minreq);
926 }
927
928 void pa_memblockq_get_attr(pa_memblockq *bq, pa_buffer_attr *a) {
929 pa_assert(bq);
930 pa_assert(a);
931
932 a->maxlength = (uint32_t) pa_memblockq_get_maxlength(bq);
933 a->tlength = (uint32_t) pa_memblockq_get_tlength(bq);
934 a->prebuf = (uint32_t) pa_memblockq_get_prebuf(bq);
935 a->minreq = (uint32_t) pa_memblockq_get_minreq(bq);
936 }
937
938 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
939
940 pa_assert(bq);
941 pa_assert(source);
942
943 pa_memblockq_prebuf_disable(bq);
944
945 for (;;) {
946 pa_memchunk chunk;
947
948 if (pa_memblockq_peek(source, &chunk) < 0)
949 return 0;
950
951 pa_assert(chunk.length > 0);
952
953 if (chunk.memblock) {
954
955 if (pa_memblockq_push_align(bq, &chunk) < 0) {
956 pa_memblock_unref(chunk.memblock);
957 return -1;
958 }
959
960 pa_memblock_unref(chunk.memblock);
961 } else
962 pa_memblockq_seek(bq, (int64_t) chunk.length, PA_SEEK_RELATIVE, TRUE);
963
964 pa_memblockq_drop(bq, chunk.length);
965 }
966 }
967
968 void pa_memblockq_willneed(pa_memblockq *bq) {
969 struct list_item *q;
970
971 pa_assert(bq);
972
973 fix_current_read(bq);
974
975 for (q = bq->current_read; q; q = q->next)
976 pa_memchunk_will_need(&q->chunk);
977 }
978
979 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
980 pa_assert(bq);
981
982 if (bq->silence.memblock)
983 pa_memblock_unref(bq->silence.memblock);
984
985 if (silence) {
986 bq->silence = *silence;
987 pa_memblock_ref(bq->silence.memblock);
988 } else
989 pa_memchunk_reset(&bq->silence);
990 }
991
992 pa_bool_t pa_memblockq_is_empty(pa_memblockq *bq) {
993 pa_assert(bq);
994
995 return !bq->blocks;
996 }
997
998 void pa_memblockq_silence(pa_memblockq *bq) {
999 pa_assert(bq);
1000
1001 while (bq->blocks)
1002 drop_block(bq, bq->blocks);
1003
1004 pa_assert(bq->n_blocks == 0);
1005 }
1006
1007 unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
1008 pa_assert(bq);
1009
1010 return bq->n_blocks;
1011 }
1012
1013 size_t pa_memblockq_get_base(pa_memblockq *bq) {
1014 pa_assert(bq);
1015
1016 return bq->base;
1017 }