]> code.delx.au - pulseaudio/blob - src/pulsecore/memblockq.c
native: fix request counter miscalculations
[pulseaudio] / src / pulsecore / memblockq.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <sys/time.h>
27 #include <time.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31
32 #include <pulse/xmalloc.h>
33
34 #include <pulsecore/log.h>
35 #include <pulsecore/mcalign.h>
36 #include <pulsecore/macro.h>
37 #include <pulsecore/flist.h>
38
39 #include "memblockq.h"
40
41 struct list_item {
42 struct list_item *next, *prev;
43 int64_t index;
44 pa_memchunk chunk;
45 };
46
47 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
48
49 struct pa_memblockq {
50 struct list_item *blocks, *blocks_tail;
51 struct list_item *current_read, *current_write;
52 unsigned n_blocks;
53 size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
54 int64_t read_index, write_index;
55 pa_bool_t in_prebuf;
56 pa_memchunk silence;
57 pa_mcalign *mcalign;
58 int64_t missing, requested;
59 };
60
61 pa_memblockq* pa_memblockq_new(
62 int64_t idx,
63 size_t maxlength,
64 size_t tlength,
65 size_t base,
66 size_t prebuf,
67 size_t minreq,
68 size_t maxrewind,
69 pa_memchunk *silence) {
70
71 pa_memblockq* bq;
72
73 pa_assert(base > 0);
74
75 bq = pa_xnew(pa_memblockq, 1);
76 bq->blocks = bq->blocks_tail = NULL;
77 bq->current_read = bq->current_write = NULL;
78 bq->n_blocks = 0;
79
80 bq->base = base;
81 bq->read_index = bq->write_index = idx;
82
83 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
84 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
85
86 bq->missing = bq->requested = 0;
87 bq->maxlength = bq->tlength = bq->prebuf = bq->minreq = bq->maxrewind = 0;
88 bq->in_prebuf = TRUE;
89
90 pa_memblockq_set_maxlength(bq, maxlength);
91 pa_memblockq_set_tlength(bq, tlength);
92 pa_memblockq_set_minreq(bq, minreq);
93 pa_memblockq_set_prebuf(bq, prebuf);
94 pa_memblockq_set_maxrewind(bq, maxrewind);
95
96 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
97 (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
98
99 if (silence) {
100 bq->silence = *silence;
101 pa_memblock_ref(bq->silence.memblock);
102 } else
103 pa_memchunk_reset(&bq->silence);
104
105 bq->mcalign = pa_mcalign_new(bq->base);
106
107 return bq;
108 }
109
110 void pa_memblockq_free(pa_memblockq* bq) {
111 pa_assert(bq);
112
113 pa_memblockq_silence(bq);
114
115 if (bq->silence.memblock)
116 pa_memblock_unref(bq->silence.memblock);
117
118 if (bq->mcalign)
119 pa_mcalign_free(bq->mcalign);
120
121 pa_xfree(bq);
122 }
123
124 static void fix_current_read(pa_memblockq *bq) {
125 pa_assert(bq);
126
127 if (PA_UNLIKELY(!bq->blocks)) {
128 bq->current_read = NULL;
129 return;
130 }
131
132 if (PA_UNLIKELY(!bq->current_read))
133 bq->current_read = bq->blocks;
134
135 /* Scan left */
136 while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
137
138 if (bq->current_read->prev)
139 bq->current_read = bq->current_read->prev;
140 else
141 break;
142
143 /* Scan right */
144 while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
145 bq->current_read = bq->current_read->next;
146
147 /* At this point current_read will either point at or left of the
148 next block to play. It may be NULL in case everything in
149 the queue was already played */
150 }
151
152 static void fix_current_write(pa_memblockq *bq) {
153 pa_assert(bq);
154
155 if (PA_UNLIKELY(!bq->blocks)) {
156 bq->current_write = NULL;
157 return;
158 }
159
160 if (PA_UNLIKELY(!bq->current_write))
161 bq->current_write = bq->blocks_tail;
162
163 /* Scan right */
164 while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
165
166 if (bq->current_write->next)
167 bq->current_write = bq->current_write->next;
168 else
169 break;
170
171 /* Scan left */
172 while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
173 bq->current_write = bq->current_write->prev;
174
175 /* At this point current_write will either point at or right of
176 the next block to write data to. It may be NULL in case
177 everything in the queue is still to be played */
178 }
179
180 static void drop_block(pa_memblockq *bq, struct list_item *q) {
181 pa_assert(bq);
182 pa_assert(q);
183
184 pa_assert(bq->n_blocks >= 1);
185
186 if (q->prev)
187 q->prev->next = q->next;
188 else {
189 pa_assert(bq->blocks == q);
190 bq->blocks = q->next;
191 }
192
193 if (q->next)
194 q->next->prev = q->prev;
195 else {
196 pa_assert(bq->blocks_tail == q);
197 bq->blocks_tail = q->prev;
198 }
199
200 if (bq->current_write == q)
201 bq->current_write = q->prev;
202
203 if (bq->current_read == q)
204 bq->current_read = q->next;
205
206 pa_memblock_unref(q->chunk.memblock);
207
208 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
209 pa_xfree(q);
210
211 bq->n_blocks--;
212 }
213
214 static void drop_backlog(pa_memblockq *bq) {
215 int64_t boundary;
216 pa_assert(bq);
217
218 boundary = bq->read_index - (int64_t) bq->maxrewind;
219
220 while (bq->blocks && (bq->blocks->index + (int64_t) bq->blocks->chunk.length <= boundary))
221 drop_block(bq, bq->blocks);
222 }
223
224 static pa_bool_t can_push(pa_memblockq *bq, size_t l) {
225 int64_t end;
226
227 pa_assert(bq);
228
229 if (bq->read_index > bq->write_index) {
230 int64_t d = bq->read_index - bq->write_index;
231
232 if ((int64_t) l > d)
233 l -= (size_t) d;
234 else
235 return TRUE;
236 }
237
238 end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
239
240 /* Make sure that the list doesn't get too long */
241 if (bq->write_index + (int64_t) l > end)
242 if (bq->write_index + (int64_t) l - bq->read_index > (int64_t) bq->maxlength)
243 return FALSE;
244
245 return TRUE;
246 }
247
248 static void write_index_changed(pa_memblockq *bq, int64_t old_write_index, pa_bool_t account) {
249 int64_t delta;
250
251 pa_assert(bq);
252
253 delta = bq->write_index - old_write_index;
254
255 if (account)
256 bq->requested -= delta;
257
258 /* pa_log("pushed/seeked %lli: requested counter at %lli, account=%i", (long long) delta, (long long) bq->requested, account); */
259 }
260
261 static void read_index_changed(pa_memblockq *bq, int64_t old_read_index) {
262 int64_t delta;
263
264 pa_assert(bq);
265
266 delta = bq->read_index - old_read_index;
267 bq->missing += delta;
268
269 /* pa_log("popped %lli: missing counter at %lli", (long long) delta, (long long) bq->missing); */
270 }
271
272 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
273 struct list_item *q, *n;
274 pa_memchunk chunk;
275 int64_t old;
276
277 pa_assert(bq);
278 pa_assert(uchunk);
279 pa_assert(uchunk->memblock);
280 pa_assert(uchunk->length > 0);
281 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
282
283 if (uchunk->length % bq->base)
284 return -1;
285
286 if (!can_push(bq, uchunk->length))
287 return -1;
288
289 old = bq->write_index;
290 chunk = *uchunk;
291
292 fix_current_write(bq);
293 q = bq->current_write;
294
295 /* First we advance the q pointer right of where we want to
296 * write to */
297
298 if (q) {
299 while (bq->write_index + (int64_t) chunk.length > q->index)
300 if (q->next)
301 q = q->next;
302 else
303 break;
304 }
305
306 if (!q)
307 q = bq->blocks_tail;
308
309 /* We go from back to front to look for the right place to add
310 * this new entry. Drop data we will overwrite on the way */
311
312 while (q) {
313
314 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
315 /* We found the entry where we need to place the new entry immediately after */
316 break;
317 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
318 /* This entry isn't touched at all, let's skip it */
319 q = q->prev;
320 } else if (bq->write_index <= q->index &&
321 bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) {
322
323 /* This entry is fully replaced by the new entry, so let's drop it */
324
325 struct list_item *p;
326 p = q;
327 q = q->prev;
328 drop_block(bq, p);
329 } else if (bq->write_index >= q->index) {
330 /* The write index points into this memblock, so let's
331 * truncate or split it */
332
333 if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) {
334
335 /* We need to save the end of this memchunk */
336 struct list_item *p;
337 size_t d;
338
339 /* Create a new list entry for the end of thie memchunk */
340 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
341 p = pa_xnew(struct list_item, 1);
342
343 p->chunk = q->chunk;
344 pa_memblock_ref(p->chunk.memblock);
345
346 /* Calculate offset */
347 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
348 pa_assert(d > 0);
349
350 /* Drop it from the new entry */
351 p->index = q->index + (int64_t) d;
352 p->chunk.length -= d;
353
354 /* Add it to the list */
355 p->prev = q;
356 if ((p->next = q->next))
357 q->next->prev = p;
358 else
359 bq->blocks_tail = p;
360 q->next = p;
361
362 bq->n_blocks++;
363 }
364
365 /* Truncate the chunk */
366 if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) {
367 struct list_item *p;
368 p = q;
369 q = q->prev;
370 drop_block(bq, p);
371 }
372
373 /* We had to truncate this block, hence we're now at the right position */
374 break;
375 } else {
376 size_t d;
377
378 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
379 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
380 bq->write_index < q->index);
381
382 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
383
384 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
385 q->index += (int64_t) d;
386 q->chunk.index += d;
387 q->chunk.length -= d;
388
389 q = q->prev;
390 }
391 }
392
393 if (q) {
394 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
395 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
396
397 /* Try to merge memory blocks */
398
399 if (q->chunk.memblock == chunk.memblock &&
400 q->chunk.index + q->chunk.length == chunk.index &&
401 bq->write_index == q->index + (int64_t) q->chunk.length) {
402
403 q->chunk.length += chunk.length;
404 bq->write_index += (int64_t) chunk.length;
405 goto finish;
406 }
407 } else
408 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
409
410 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
411 n = pa_xnew(struct list_item, 1);
412
413 n->chunk = chunk;
414 pa_memblock_ref(n->chunk.memblock);
415 n->index = bq->write_index;
416 bq->write_index += (int64_t) n->chunk.length;
417
418 n->next = q ? q->next : bq->blocks;
419 n->prev = q;
420
421 if (n->next)
422 n->next->prev = n;
423 else
424 bq->blocks_tail = n;
425
426 if (n->prev)
427 n->prev->next = n;
428 else
429 bq->blocks = n;
430
431 bq->n_blocks++;
432
433 finish:
434
435 write_index_changed(bq, old, TRUE);
436 return 0;
437 }
438
439 pa_bool_t pa_memblockq_prebuf_active(pa_memblockq *bq) {
440 pa_assert(bq);
441
442 if (bq->in_prebuf)
443 return pa_memblockq_get_length(bq) < bq->prebuf;
444 else
445 return bq->prebuf > 0 && bq->read_index >= bq->write_index;
446 }
447
448 static pa_bool_t update_prebuf(pa_memblockq *bq) {
449 pa_assert(bq);
450
451 if (bq->in_prebuf) {
452
453 if (pa_memblockq_get_length(bq) < bq->prebuf)
454 return TRUE;
455
456 bq->in_prebuf = FALSE;
457 return FALSE;
458 } else {
459
460 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
461 bq->in_prebuf = TRUE;
462 return TRUE;
463 }
464
465 return FALSE;
466 }
467 }
468
469 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
470 int64_t d;
471 pa_assert(bq);
472 pa_assert(chunk);
473
474 /* We need to pre-buffer */
475 if (update_prebuf(bq))
476 return -1;
477
478 fix_current_read(bq);
479
480 /* Do we need to spit out silence? */
481 if (!bq->current_read || bq->current_read->index > bq->read_index) {
482
483 size_t length;
484
485 /* How much silence shall we return? */
486 if (bq->current_read)
487 length = (size_t) (bq->current_read->index - bq->read_index);
488 else if (bq->write_index > bq->read_index)
489 length = (size_t) (bq->write_index - bq->read_index);
490 else
491 length = 0;
492
493 /* We need to return silence, since no data is yet available */
494 if (bq->silence.memblock) {
495 *chunk = bq->silence;
496 pa_memblock_ref(chunk->memblock);
497
498 if (length > 0 && length < chunk->length)
499 chunk->length = length;
500
501 } else {
502
503 /* If the memblockq is empty, return -1, otherwise return
504 * the time to sleep */
505 if (length <= 0)
506 return -1;
507
508 chunk->memblock = NULL;
509 chunk->length = length;
510 }
511
512 chunk->index = 0;
513 return 0;
514 }
515
516 /* Ok, let's pass real data to the caller */
517 *chunk = bq->current_read->chunk;
518 pa_memblock_ref(chunk->memblock);
519
520 pa_assert(bq->read_index >= bq->current_read->index);
521 d = bq->read_index - bq->current_read->index;
522 chunk->index += (size_t) d;
523 chunk->length -= (size_t) d;
524
525 return 0;
526 }
527
528 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
529 int64_t old;
530 pa_assert(bq);
531 pa_assert(length % bq->base == 0);
532
533 old = bq->read_index;
534
535 while (length > 0) {
536
537 /* Do not drop any data when we are in prebuffering mode */
538 if (update_prebuf(bq))
539 break;
540
541 fix_current_read(bq);
542
543 if (bq->current_read) {
544 int64_t p, d;
545
546 /* We go through this piece by piece to make sure we don't
547 * drop more than allowed by prebuf */
548
549 p = bq->current_read->index + (int64_t) bq->current_read->chunk.length;
550 pa_assert(p >= bq->read_index);
551 d = p - bq->read_index;
552
553 if (d > (int64_t) length)
554 d = (int64_t) length;
555
556 bq->read_index += d;
557 length -= (size_t) d;
558
559 } else {
560
561 /* The list is empty, there's nothing we could drop */
562 bq->read_index += (int64_t) length;
563 break;
564 }
565 }
566
567 drop_backlog(bq);
568 read_index_changed(bq, old);
569 }
570
571 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
572 int64_t old;
573 pa_assert(bq);
574 pa_assert(length % bq->base == 0);
575
576 old = bq->read_index;
577
578 /* This is kind of the inverse of pa_memblockq_drop() */
579
580 bq->read_index -= (int64_t) length;
581
582 read_index_changed(bq, old);
583 }
584
585 pa_bool_t pa_memblockq_is_readable(pa_memblockq *bq) {
586 pa_assert(bq);
587
588 if (pa_memblockq_prebuf_active(bq))
589 return FALSE;
590
591 if (pa_memblockq_get_length(bq) <= 0)
592 return FALSE;
593
594 return TRUE;
595 }
596
597 size_t pa_memblockq_get_length(pa_memblockq *bq) {
598 pa_assert(bq);
599
600 if (bq->write_index <= bq->read_index)
601 return 0;
602
603 return (size_t) (bq->write_index - bq->read_index);
604 }
605
606 size_t pa_memblockq_missing(pa_memblockq *bq) {
607 size_t l;
608 pa_assert(bq);
609
610 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
611 return 0;
612
613 l = bq->tlength - l;
614
615 return l >= bq->minreq ? l : 0;
616 }
617
618 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek, pa_bool_t account) {
619 int64_t old;
620 pa_assert(bq);
621
622 old = bq->write_index;
623
624 switch (seek) {
625 case PA_SEEK_RELATIVE:
626 bq->write_index += offset;
627 break;
628 case PA_SEEK_ABSOLUTE:
629 bq->write_index = offset;
630 break;
631 case PA_SEEK_RELATIVE_ON_READ:
632 bq->write_index = bq->read_index + offset;
633 break;
634 case PA_SEEK_RELATIVE_END:
635 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
636 break;
637 default:
638 pa_assert_not_reached();
639 }
640
641 drop_backlog(bq);
642 write_index_changed(bq, old, account);
643 }
644
645 void pa_memblockq_flush_write(pa_memblockq *bq) {
646 int64_t old;
647 pa_assert(bq);
648
649 pa_memblockq_silence(bq);
650
651 old = bq->write_index;
652 bq->write_index = bq->read_index;
653
654 pa_memblockq_prebuf_force(bq);
655 write_index_changed(bq, old, TRUE);
656 }
657
658 void pa_memblockq_flush_read(pa_memblockq *bq) {
659 int64_t old;
660 pa_assert(bq);
661
662 pa_memblockq_silence(bq);
663
664 old = bq->read_index;
665 bq->read_index = bq->write_index;
666
667 pa_memblockq_prebuf_force(bq);
668 read_index_changed(bq, old);
669 }
670
671 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
672 pa_assert(bq);
673
674 return bq->tlength;
675 }
676
677 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
678 pa_assert(bq);
679
680 return bq->minreq;
681 }
682
683 size_t pa_memblockq_get_maxrewind(pa_memblockq *bq) {
684 pa_assert(bq);
685
686 return bq->maxrewind;
687 }
688
689 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
690 pa_assert(bq);
691
692 return bq->read_index;
693 }
694
695 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
696 pa_assert(bq);
697
698 return bq->write_index;
699 }
700
701 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
702 pa_memchunk rchunk;
703
704 pa_assert(bq);
705 pa_assert(chunk);
706
707 if (bq->base == 1)
708 return pa_memblockq_push(bq, chunk);
709
710 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
711 return -1;
712
713 pa_mcalign_push(bq->mcalign, chunk);
714
715 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
716 int r;
717 r = pa_memblockq_push(bq, &rchunk);
718 pa_memblock_unref(rchunk.memblock);
719
720 if (r < 0) {
721 pa_mcalign_flush(bq->mcalign);
722 return -1;
723 }
724 }
725
726 return 0;
727 }
728
729 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
730 pa_assert(bq);
731
732 bq->in_prebuf = FALSE;
733 }
734
735 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
736 pa_assert(bq);
737
738 if (bq->prebuf > 0)
739 bq->in_prebuf = TRUE;
740 }
741
742 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
743 pa_assert(bq);
744
745 return bq->maxlength;
746 }
747
748 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
749 pa_assert(bq);
750
751 return bq->prebuf;
752 }
753
754 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
755 size_t l;
756
757 pa_assert(bq);
758
759 /* pa_log("pop: %lli", bq->missing); */
760
761 if (bq->missing <= 0)
762 return 0;
763
764 l = (size_t) bq->missing;
765
766 bq->requested += bq->missing;
767 bq->missing = 0;
768
769 /* pa_log("sent %lli: request counter is at %lli", (long long) l, (long long) bq->requested); */
770
771 return l;
772 }
773
774 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
775 pa_assert(bq);
776
777 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
778
779 if (bq->maxlength < bq->base)
780 bq->maxlength = bq->base;
781
782 if (bq->tlength > bq->maxlength)
783 pa_memblockq_set_tlength(bq, bq->maxlength);
784 }
785
786 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
787 size_t old_tlength;
788 pa_assert(bq);
789
790 if (tlength <= 0 || tlength == (size_t) -1)
791 tlength = bq->maxlength;
792
793 old_tlength = bq->tlength;
794 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
795
796 if (bq->tlength > bq->maxlength)
797 bq->tlength = bq->maxlength;
798
799 if (bq->minreq > bq->tlength)
800 pa_memblockq_set_minreq(bq, bq->tlength);
801
802 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
803 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
804
805 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
806 }
807
808 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
809 pa_assert(bq);
810
811 bq->minreq = (minreq/bq->base)*bq->base;
812
813 if (bq->minreq > bq->tlength)
814 bq->minreq = bq->tlength;
815
816 if (bq->minreq < bq->base)
817 bq->minreq = bq->base;
818
819 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
820 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
821 }
822
823 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
824 pa_assert(bq);
825
826 if (prebuf == (size_t) -1)
827 prebuf = bq->tlength+bq->base-bq->minreq;
828
829 bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
830
831 if (prebuf > 0 && bq->prebuf < bq->base)
832 bq->prebuf = bq->base;
833
834 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
835 bq->prebuf = bq->tlength+bq->base-bq->minreq;
836
837 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
838 bq->in_prebuf = FALSE;
839 }
840
841 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
842 pa_assert(bq);
843
844 bq->maxrewind = (maxrewind/bq->base)*bq->base;
845 }
846
847 void pa_memblockq_apply_attr(pa_memblockq *bq, const pa_buffer_attr *a) {
848 pa_assert(bq);
849 pa_assert(a);
850
851 pa_memblockq_set_maxlength(bq, a->maxlength);
852 pa_memblockq_set_tlength(bq, a->tlength);
853 pa_memblockq_set_prebuf(bq, a->prebuf);
854 pa_memblockq_set_minreq(bq, a->minreq);
855 }
856
857 void pa_memblockq_get_attr(pa_memblockq *bq, pa_buffer_attr *a) {
858 pa_assert(bq);
859 pa_assert(a);
860
861 a->maxlength = (uint32_t) pa_memblockq_get_maxlength(bq);
862 a->tlength = (uint32_t) pa_memblockq_get_tlength(bq);
863 a->prebuf = (uint32_t) pa_memblockq_get_prebuf(bq);
864 a->minreq = (uint32_t) pa_memblockq_get_minreq(bq);
865 }
866
867 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
868
869 pa_assert(bq);
870 pa_assert(source);
871
872 pa_memblockq_prebuf_disable(bq);
873
874 for (;;) {
875 pa_memchunk chunk;
876
877 if (pa_memblockq_peek(source, &chunk) < 0)
878 return 0;
879
880 pa_assert(chunk.length > 0);
881
882 if (chunk.memblock) {
883
884 if (pa_memblockq_push_align(bq, &chunk) < 0) {
885 pa_memblock_unref(chunk.memblock);
886 return -1;
887 }
888
889 pa_memblock_unref(chunk.memblock);
890 } else
891 pa_memblockq_seek(bq, (int64_t) chunk.length, PA_SEEK_RELATIVE, TRUE);
892
893 pa_memblockq_drop(bq, chunk.length);
894 }
895 }
896
897 void pa_memblockq_willneed(pa_memblockq *bq) {
898 struct list_item *q;
899
900 pa_assert(bq);
901
902 fix_current_read(bq);
903
904 for (q = bq->current_read; q; q = q->next)
905 pa_memchunk_will_need(&q->chunk);
906 }
907
908 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
909 pa_assert(bq);
910
911 if (bq->silence.memblock)
912 pa_memblock_unref(bq->silence.memblock);
913
914 if (silence) {
915 bq->silence = *silence;
916 pa_memblock_ref(bq->silence.memblock);
917 } else
918 pa_memchunk_reset(&bq->silence);
919 }
920
921 pa_bool_t pa_memblockq_is_empty(pa_memblockq *bq) {
922 pa_assert(bq);
923
924 return !bq->blocks;
925 }
926
927 void pa_memblockq_silence(pa_memblockq *bq) {
928 pa_assert(bq);
929
930 while (bq->blocks)
931 drop_block(bq, bq->blocks);
932
933 pa_assert(bq->n_blocks == 0);
934 }
935
936 unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
937 pa_assert(bq);
938
939 return bq->n_blocks;
940 }
941
942 size_t pa_memblockq_get_base(pa_memblockq *bq) {
943 pa_assert(bq);
944
945 return bq->base;
946 }