]>
code.delx.au - pulseaudio/blob - src/pulsecore/memblockq.c
2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
32 #include <pulse/xmalloc.h>
34 #include <pulsecore/log.h>
35 #include <pulsecore/mcalign.h>
36 #include <pulsecore/macro.h>
37 #include <pulsecore/flist.h>
39 #include "memblockq.h"
42 struct list_item
*next
, *prev
;
47 PA_STATIC_FLIST_DECLARE(list_items
, 0, pa_xfree
);
50 struct list_item
*blocks
, *blocks_tail
;
51 struct list_item
*current_read
, *current_write
;
53 size_t maxlength
, tlength
, base
, prebuf
, minreq
, maxrewind
;
54 int64_t read_index
, write_index
;
58 int64_t missing
, requested
;
61 pa_memblockq
* pa_memblockq_new(
69 pa_memchunk
*silence
) {
75 bq
= pa_xnew(pa_memblockq
, 1);
76 bq
->blocks
= bq
->blocks_tail
= NULL
;
77 bq
->current_read
= bq
->current_write
= NULL
;
81 bq
->read_index
= bq
->write_index
= idx
;
83 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
84 (unsigned long) maxlength
, (unsigned long) tlength
, (unsigned long) base
, (unsigned long) prebuf
, (unsigned long) minreq
, (unsigned long) maxrewind
);
86 bq
->missing
= bq
->requested
= 0;
87 bq
->maxlength
= bq
->tlength
= bq
->prebuf
= bq
->minreq
= bq
->maxrewind
= 0;
90 pa_memblockq_set_maxlength(bq
, maxlength
);
91 pa_memblockq_set_tlength(bq
, tlength
);
92 pa_memblockq_set_minreq(bq
, minreq
);
93 pa_memblockq_set_prebuf(bq
, prebuf
);
94 pa_memblockq_set_maxrewind(bq
, maxrewind
);
96 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
97 (unsigned long) bq
->maxlength
, (unsigned long) bq
->tlength
, (unsigned long) bq
->base
, (unsigned long) bq
->prebuf
, (unsigned long) bq
->minreq
, (unsigned long) bq
->maxrewind
);
100 bq
->silence
= *silence
;
101 pa_memblock_ref(bq
->silence
.memblock
);
103 pa_memchunk_reset(&bq
->silence
);
105 bq
->mcalign
= pa_mcalign_new(bq
->base
);
110 void pa_memblockq_free(pa_memblockq
* bq
) {
113 pa_memblockq_silence(bq
);
115 if (bq
->silence
.memblock
)
116 pa_memblock_unref(bq
->silence
.memblock
);
119 pa_mcalign_free(bq
->mcalign
);
124 static void fix_current_read(pa_memblockq
*bq
) {
127 if (PA_UNLIKELY(!bq
->blocks
)) {
128 bq
->current_read
= NULL
;
132 if (PA_UNLIKELY(!bq
->current_read
))
133 bq
->current_read
= bq
->blocks
;
136 while (PA_UNLIKELY(bq
->current_read
->index
> bq
->read_index
))
138 if (bq
->current_read
->prev
)
139 bq
->current_read
= bq
->current_read
->prev
;
144 while (PA_LIKELY(bq
->current_read
!= NULL
) && PA_UNLIKELY(bq
->current_read
->index
+ (int64_t) bq
->current_read
->chunk
.length
<= bq
->read_index
))
145 bq
->current_read
= bq
->current_read
->next
;
147 /* At this point current_read will either point at or left of the
148 next block to play. It may be NULL in case everything in
149 the queue was already played */
152 static void fix_current_write(pa_memblockq
*bq
) {
155 if (PA_UNLIKELY(!bq
->blocks
)) {
156 bq
->current_write
= NULL
;
160 if (PA_UNLIKELY(!bq
->current_write
))
161 bq
->current_write
= bq
->blocks_tail
;
164 while (PA_UNLIKELY(bq
->current_write
->index
+ (int64_t) bq
->current_write
->chunk
.length
<= bq
->write_index
))
166 if (bq
->current_write
->next
)
167 bq
->current_write
= bq
->current_write
->next
;
172 while (PA_LIKELY(bq
->current_write
!= NULL
) && PA_UNLIKELY(bq
->current_write
->index
> bq
->write_index
))
173 bq
->current_write
= bq
->current_write
->prev
;
175 /* At this point current_write will either point at or right of
176 the next block to write data to. It may be NULL in case
177 everything in the queue is still to be played */
180 static void drop_block(pa_memblockq
*bq
, struct list_item
*q
) {
184 pa_assert(bq
->n_blocks
>= 1);
187 q
->prev
->next
= q
->next
;
189 pa_assert(bq
->blocks
== q
);
190 bq
->blocks
= q
->next
;
194 q
->next
->prev
= q
->prev
;
196 pa_assert(bq
->blocks_tail
== q
);
197 bq
->blocks_tail
= q
->prev
;
200 if (bq
->current_write
== q
)
201 bq
->current_write
= q
->prev
;
203 if (bq
->current_read
== q
)
204 bq
->current_read
= q
->next
;
206 pa_memblock_unref(q
->chunk
.memblock
);
208 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items
), q
) < 0)
214 static void drop_backlog(pa_memblockq
*bq
) {
218 boundary
= bq
->read_index
- (int64_t) bq
->maxrewind
;
220 while (bq
->blocks
&& (bq
->blocks
->index
+ (int64_t) bq
->blocks
->chunk
.length
<= boundary
))
221 drop_block(bq
, bq
->blocks
);
224 static pa_bool_t
can_push(pa_memblockq
*bq
, size_t l
) {
229 if (bq
->read_index
> bq
->write_index
) {
230 int64_t d
= bq
->read_index
- bq
->write_index
;
238 end
= bq
->blocks_tail
? bq
->blocks_tail
->index
+ (int64_t) bq
->blocks_tail
->chunk
.length
: bq
->write_index
;
240 /* Make sure that the list doesn't get too long */
241 if (bq
->write_index
+ (int64_t) l
> end
)
242 if (bq
->write_index
+ (int64_t) l
- bq
->read_index
> (int64_t) bq
->maxlength
)
248 static void write_index_changed(pa_memblockq
*bq
, int64_t old_write_index
, pa_bool_t account
) {
253 delta
= bq
->write_index
- old_write_index
;
256 bq
->requested
-= delta
;
258 bq
->missing
-= delta
;
260 /* pa_log("pushed/seeked %lli: requested counter at %lli, account=%i", (long long) delta, (long long) bq->requested, account); */
263 static void read_index_changed(pa_memblockq
*bq
, int64_t old_read_index
) {
268 delta
= bq
->read_index
- old_read_index
;
269 bq
->missing
+= delta
;
271 /* pa_log("popped %lli: missing counter at %lli", (long long) delta, (long long) bq->missing); */
274 int pa_memblockq_push(pa_memblockq
* bq
, const pa_memchunk
*uchunk
) {
275 struct list_item
*q
, *n
;
281 pa_assert(uchunk
->memblock
);
282 pa_assert(uchunk
->length
> 0);
283 pa_assert(uchunk
->index
+ uchunk
->length
<= pa_memblock_get_length(uchunk
->memblock
));
285 if (uchunk
->length
% bq
->base
)
288 if (!can_push(bq
, uchunk
->length
))
291 old
= bq
->write_index
;
294 fix_current_write(bq
);
295 q
= bq
->current_write
;
297 /* First we advance the q pointer right of where we want to
301 while (bq
->write_index
+ (int64_t) chunk
.length
> q
->index
)
311 /* We go from back to front to look for the right place to add
312 * this new entry. Drop data we will overwrite on the way */
316 if (bq
->write_index
>= q
->index
+ (int64_t) q
->chunk
.length
)
317 /* We found the entry where we need to place the new entry immediately after */
319 else if (bq
->write_index
+ (int64_t) chunk
.length
<= q
->index
) {
320 /* This entry isn't touched at all, let's skip it */
322 } else if (bq
->write_index
<= q
->index
&&
323 bq
->write_index
+ (int64_t) chunk
.length
>= q
->index
+ (int64_t) q
->chunk
.length
) {
325 /* This entry is fully replaced by the new entry, so let's drop it */
331 } else if (bq
->write_index
>= q
->index
) {
332 /* The write index points into this memblock, so let's
333 * truncate or split it */
335 if (bq
->write_index
+ (int64_t) chunk
.length
< q
->index
+ (int64_t) q
->chunk
.length
) {
337 /* We need to save the end of this memchunk */
341 /* Create a new list entry for the end of thie memchunk */
342 if (!(p
= pa_flist_pop(PA_STATIC_FLIST_GET(list_items
))))
343 p
= pa_xnew(struct list_item
, 1);
346 pa_memblock_ref(p
->chunk
.memblock
);
348 /* Calculate offset */
349 d
= (size_t) (bq
->write_index
+ (int64_t) chunk
.length
- q
->index
);
352 /* Drop it from the new entry */
353 p
->index
= q
->index
+ (int64_t) d
;
354 p
->chunk
.length
-= d
;
356 /* Add it to the list */
358 if ((p
->next
= q
->next
))
367 /* Truncate the chunk */
368 if (!(q
->chunk
.length
= (size_t) (bq
->write_index
- q
->index
))) {
375 /* We had to truncate this block, hence we're now at the right position */
380 pa_assert(bq
->write_index
+ (int64_t)chunk
.length
> q
->index
&&
381 bq
->write_index
+ (int64_t)chunk
.length
< q
->index
+ (int64_t)q
->chunk
.length
&&
382 bq
->write_index
< q
->index
);
384 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
386 d
= (size_t) (bq
->write_index
+ (int64_t) chunk
.length
- q
->index
);
387 q
->index
+= (int64_t) d
;
389 q
->chunk
.length
-= d
;
396 pa_assert(bq
->write_index
>= q
->index
+ (int64_t)q
->chunk
.length
);
397 pa_assert(!q
->next
|| (bq
->write_index
+ (int64_t)chunk
.length
<= q
->next
->index
));
399 /* Try to merge memory blocks */
401 if (q
->chunk
.memblock
== chunk
.memblock
&&
402 q
->chunk
.index
+ q
->chunk
.length
== chunk
.index
&&
403 bq
->write_index
== q
->index
+ (int64_t) q
->chunk
.length
) {
405 q
->chunk
.length
+= chunk
.length
;
406 bq
->write_index
+= (int64_t) chunk
.length
;
410 pa_assert(!bq
->blocks
|| (bq
->write_index
+ (int64_t)chunk
.length
<= bq
->blocks
->index
));
412 if (!(n
= pa_flist_pop(PA_STATIC_FLIST_GET(list_items
))))
413 n
= pa_xnew(struct list_item
, 1);
416 pa_memblock_ref(n
->chunk
.memblock
);
417 n
->index
= bq
->write_index
;
418 bq
->write_index
+= (int64_t) n
->chunk
.length
;
420 n
->next
= q
? q
->next
: bq
->blocks
;
437 write_index_changed(bq
, old
, TRUE
);
441 pa_bool_t
pa_memblockq_prebuf_active(pa_memblockq
*bq
) {
445 return pa_memblockq_get_length(bq
) < bq
->prebuf
;
447 return bq
->prebuf
> 0 && bq
->read_index
>= bq
->write_index
;
450 static pa_bool_t
update_prebuf(pa_memblockq
*bq
) {
455 if (pa_memblockq_get_length(bq
) < bq
->prebuf
)
458 bq
->in_prebuf
= FALSE
;
462 if (bq
->prebuf
> 0 && bq
->read_index
>= bq
->write_index
) {
463 bq
->in_prebuf
= TRUE
;
471 int pa_memblockq_peek(pa_memblockq
* bq
, pa_memchunk
*chunk
) {
476 /* We need to pre-buffer */
477 if (update_prebuf(bq
))
480 fix_current_read(bq
);
482 /* Do we need to spit out silence? */
483 if (!bq
->current_read
|| bq
->current_read
->index
> bq
->read_index
) {
486 /* How much silence shall we return? */
487 if (bq
->current_read
)
488 length
= (size_t) (bq
->current_read
->index
- bq
->read_index
);
489 else if (bq
->write_index
> bq
->read_index
)
490 length
= (size_t) (bq
->write_index
- bq
->read_index
);
494 /* We need to return silence, since no data is yet available */
495 if (bq
->silence
.memblock
) {
496 *chunk
= bq
->silence
;
497 pa_memblock_ref(chunk
->memblock
);
499 if (length
> 0 && length
< chunk
->length
)
500 chunk
->length
= length
;
504 /* If the memblockq is empty, return -1, otherwise return
505 * the time to sleep */
509 chunk
->memblock
= NULL
;
510 chunk
->length
= length
;
517 /* Ok, let's pass real data to the caller */
518 *chunk
= bq
->current_read
->chunk
;
519 pa_memblock_ref(chunk
->memblock
);
521 pa_assert(bq
->read_index
>= bq
->current_read
->index
);
522 d
= bq
->read_index
- bq
->current_read
->index
;
523 chunk
->index
+= (size_t) d
;
524 chunk
->length
-= (size_t) d
;
529 int pa_memblockq_peek_fixed_size(pa_memblockq
*bq
, size_t block_size
, pa_memchunk
*chunk
) {
530 pa_memchunk tchunk
, rchunk
;
532 struct list_item
*item
;
535 pa_assert(block_size
> 0);
537 pa_assert(bq
->silence
.memblock
);
539 if (pa_memblockq_peek(bq
, &tchunk
) < 0)
542 if (tchunk
.length
>= block_size
) {
544 chunk
->length
= block_size
;
548 rchunk
.memblock
= pa_memblock_new(pa_memblock_get_pool(tchunk
.memblock
), block_size
);
550 rchunk
.length
= tchunk
.length
;
552 pa_memchunk_memcpy(&rchunk
, &tchunk
);
553 pa_memblock_unref(tchunk
.memblock
);
555 rchunk
.index
+= tchunk
.length
;
557 /* We don't need to call fix_current_read() here, since
558 * pa_memblock_peek() already did that */
559 item
= bq
->current_read
;
560 ri
= bq
->read_index
+ tchunk
.length
;
562 while (rchunk
.index
< block_size
) {
564 if (!item
|| item
->index
> ri
) {
565 /* Do we need to append silence? */
566 tchunk
= bq
->silence
;
569 tchunk
.length
= PA_MIN(tchunk
.length
, (size_t) (item
->index
- ri
));
574 /* We can append real data! */
575 tchunk
= item
->chunk
;
577 d
= ri
- item
->index
;
578 tchunk
.index
+= (size_t) d
;
579 tchunk
.length
-= (size_t) d
;
581 /* Go to next item for the next iteration */
585 rchunk
.length
= tchunk
.length
= PA_MIN(tchunk
.length
, block_size
- rchunk
.index
);
586 pa_memchunk_memcpy(&rchunk
, &tchunk
);
588 rchunk
.index
+= rchunk
.length
;
593 rchunk
.length
= block_size
;
599 void pa_memblockq_drop(pa_memblockq
*bq
, size_t length
) {
602 pa_assert(length
% bq
->base
== 0);
604 old
= bq
->read_index
;
608 /* Do not drop any data when we are in prebuffering mode */
609 if (update_prebuf(bq
))
612 fix_current_read(bq
);
614 if (bq
->current_read
) {
617 /* We go through this piece by piece to make sure we don't
618 * drop more than allowed by prebuf */
620 p
= bq
->current_read
->index
+ (int64_t) bq
->current_read
->chunk
.length
;
621 pa_assert(p
>= bq
->read_index
);
622 d
= p
- bq
->read_index
;
624 if (d
> (int64_t) length
)
625 d
= (int64_t) length
;
628 length
-= (size_t) d
;
632 /* The list is empty, there's nothing we could drop */
633 bq
->read_index
+= (int64_t) length
;
639 read_index_changed(bq
, old
);
642 void pa_memblockq_rewind(pa_memblockq
*bq
, size_t length
) {
645 pa_assert(length
% bq
->base
== 0);
647 old
= bq
->read_index
;
649 /* This is kind of the inverse of pa_memblockq_drop() */
651 bq
->read_index
-= (int64_t) length
;
653 read_index_changed(bq
, old
);
656 pa_bool_t
pa_memblockq_is_readable(pa_memblockq
*bq
) {
659 if (pa_memblockq_prebuf_active(bq
))
662 if (pa_memblockq_get_length(bq
) <= 0)
668 size_t pa_memblockq_get_length(pa_memblockq
*bq
) {
671 if (bq
->write_index
<= bq
->read_index
)
674 return (size_t) (bq
->write_index
- bq
->read_index
);
677 size_t pa_memblockq_missing(pa_memblockq
*bq
) {
681 if ((l
= pa_memblockq_get_length(bq
)) >= bq
->tlength
)
686 return l
>= bq
->minreq
? l
: 0;
689 void pa_memblockq_seek(pa_memblockq
*bq
, int64_t offset
, pa_seek_mode_t seek
, pa_bool_t account
) {
693 old
= bq
->write_index
;
696 case PA_SEEK_RELATIVE
:
697 bq
->write_index
+= offset
;
699 case PA_SEEK_ABSOLUTE
:
700 bq
->write_index
= offset
;
702 case PA_SEEK_RELATIVE_ON_READ
:
703 bq
->write_index
= bq
->read_index
+ offset
;
705 case PA_SEEK_RELATIVE_END
:
706 bq
->write_index
= (bq
->blocks_tail
? bq
->blocks_tail
->index
+ (int64_t) bq
->blocks_tail
->chunk
.length
: bq
->read_index
) + offset
;
709 pa_assert_not_reached();
713 write_index_changed(bq
, old
, account
);
716 void pa_memblockq_flush_write(pa_memblockq
*bq
, pa_bool_t account
) {
720 pa_memblockq_silence(bq
);
722 old
= bq
->write_index
;
723 bq
->write_index
= bq
->read_index
;
725 pa_memblockq_prebuf_force(bq
);
726 write_index_changed(bq
, old
, account
);
729 void pa_memblockq_flush_read(pa_memblockq
*bq
) {
733 pa_memblockq_silence(bq
);
735 old
= bq
->read_index
;
736 bq
->read_index
= bq
->write_index
;
738 pa_memblockq_prebuf_force(bq
);
739 read_index_changed(bq
, old
);
742 size_t pa_memblockq_get_tlength(pa_memblockq
*bq
) {
748 size_t pa_memblockq_get_minreq(pa_memblockq
*bq
) {
754 size_t pa_memblockq_get_maxrewind(pa_memblockq
*bq
) {
757 return bq
->maxrewind
;
760 int64_t pa_memblockq_get_read_index(pa_memblockq
*bq
) {
763 return bq
->read_index
;
766 int64_t pa_memblockq_get_write_index(pa_memblockq
*bq
) {
769 return bq
->write_index
;
772 int pa_memblockq_push_align(pa_memblockq
* bq
, const pa_memchunk
*chunk
) {
779 return pa_memblockq_push(bq
, chunk
);
781 if (!can_push(bq
, pa_mcalign_csize(bq
->mcalign
, chunk
->length
)))
784 pa_mcalign_push(bq
->mcalign
, chunk
);
786 while (pa_mcalign_pop(bq
->mcalign
, &rchunk
) >= 0) {
788 r
= pa_memblockq_push(bq
, &rchunk
);
789 pa_memblock_unref(rchunk
.memblock
);
792 pa_mcalign_flush(bq
->mcalign
);
800 void pa_memblockq_prebuf_disable(pa_memblockq
*bq
) {
803 bq
->in_prebuf
= FALSE
;
806 void pa_memblockq_prebuf_force(pa_memblockq
*bq
) {
810 bq
->in_prebuf
= TRUE
;
813 size_t pa_memblockq_get_maxlength(pa_memblockq
*bq
) {
816 return bq
->maxlength
;
819 size_t pa_memblockq_get_prebuf(pa_memblockq
*bq
) {
825 size_t pa_memblockq_pop_missing(pa_memblockq
*bq
) {
830 /* pa_log("pop: %lli", bq->missing); */
832 if (bq
->missing
<= 0)
835 l
= (size_t) bq
->missing
;
837 bq
->requested
+= bq
->missing
;
840 /* pa_log("sent %lli: request counter is at %lli", (long long) l, (long long) bq->requested); */
845 void pa_memblockq_set_maxlength(pa_memblockq
*bq
, size_t maxlength
) {
848 bq
->maxlength
= ((maxlength
+bq
->base
-1)/bq
->base
)*bq
->base
;
850 if (bq
->maxlength
< bq
->base
)
851 bq
->maxlength
= bq
->base
;
853 if (bq
->tlength
> bq
->maxlength
)
854 pa_memblockq_set_tlength(bq
, bq
->maxlength
);
857 void pa_memblockq_set_tlength(pa_memblockq
*bq
, size_t tlength
) {
861 if (tlength
<= 0 || tlength
== (size_t) -1)
862 tlength
= bq
->maxlength
;
864 old_tlength
= bq
->tlength
;
865 bq
->tlength
= ((tlength
+bq
->base
-1)/bq
->base
)*bq
->base
;
867 if (bq
->tlength
> bq
->maxlength
)
868 bq
->tlength
= bq
->maxlength
;
870 if (bq
->minreq
> bq
->tlength
)
871 pa_memblockq_set_minreq(bq
, bq
->tlength
);
873 if (bq
->prebuf
> bq
->tlength
+bq
->base
-bq
->minreq
)
874 pa_memblockq_set_prebuf(bq
, bq
->tlength
+bq
->base
-bq
->minreq
);
876 bq
->missing
+= (int64_t) bq
->tlength
- (int64_t) old_tlength
;
879 void pa_memblockq_set_minreq(pa_memblockq
*bq
, size_t minreq
) {
882 bq
->minreq
= (minreq
/bq
->base
)*bq
->base
;
884 if (bq
->minreq
> bq
->tlength
)
885 bq
->minreq
= bq
->tlength
;
887 if (bq
->minreq
< bq
->base
)
888 bq
->minreq
= bq
->base
;
890 if (bq
->prebuf
> bq
->tlength
+bq
->base
-bq
->minreq
)
891 pa_memblockq_set_prebuf(bq
, bq
->tlength
+bq
->base
-bq
->minreq
);
894 void pa_memblockq_set_prebuf(pa_memblockq
*bq
, size_t prebuf
) {
897 if (prebuf
== (size_t) -1)
898 prebuf
= bq
->tlength
+bq
->base
-bq
->minreq
;
900 bq
->prebuf
= ((prebuf
+bq
->base
-1)/bq
->base
)*bq
->base
;
902 if (prebuf
> 0 && bq
->prebuf
< bq
->base
)
903 bq
->prebuf
= bq
->base
;
905 if (bq
->prebuf
> bq
->tlength
+bq
->base
-bq
->minreq
)
906 bq
->prebuf
= bq
->tlength
+bq
->base
-bq
->minreq
;
908 if (bq
->prebuf
<= 0 || pa_memblockq_get_length(bq
) >= bq
->prebuf
)
909 bq
->in_prebuf
= FALSE
;
912 void pa_memblockq_set_maxrewind(pa_memblockq
*bq
, size_t maxrewind
) {
915 bq
->maxrewind
= (maxrewind
/bq
->base
)*bq
->base
;
918 void pa_memblockq_apply_attr(pa_memblockq
*bq
, const pa_buffer_attr
*a
) {
922 pa_memblockq_set_maxlength(bq
, a
->maxlength
);
923 pa_memblockq_set_tlength(bq
, a
->tlength
);
924 pa_memblockq_set_prebuf(bq
, a
->prebuf
);
925 pa_memblockq_set_minreq(bq
, a
->minreq
);
928 void pa_memblockq_get_attr(pa_memblockq
*bq
, pa_buffer_attr
*a
) {
932 a
->maxlength
= (uint32_t) pa_memblockq_get_maxlength(bq
);
933 a
->tlength
= (uint32_t) pa_memblockq_get_tlength(bq
);
934 a
->prebuf
= (uint32_t) pa_memblockq_get_prebuf(bq
);
935 a
->minreq
= (uint32_t) pa_memblockq_get_minreq(bq
);
938 int pa_memblockq_splice(pa_memblockq
*bq
, pa_memblockq
*source
) {
943 pa_memblockq_prebuf_disable(bq
);
948 if (pa_memblockq_peek(source
, &chunk
) < 0)
951 pa_assert(chunk
.length
> 0);
953 if (chunk
.memblock
) {
955 if (pa_memblockq_push_align(bq
, &chunk
) < 0) {
956 pa_memblock_unref(chunk
.memblock
);
960 pa_memblock_unref(chunk
.memblock
);
962 pa_memblockq_seek(bq
, (int64_t) chunk
.length
, PA_SEEK_RELATIVE
, TRUE
);
964 pa_memblockq_drop(bq
, chunk
.length
);
968 void pa_memblockq_willneed(pa_memblockq
*bq
) {
973 fix_current_read(bq
);
975 for (q
= bq
->current_read
; q
; q
= q
->next
)
976 pa_memchunk_will_need(&q
->chunk
);
979 void pa_memblockq_set_silence(pa_memblockq
*bq
, pa_memchunk
*silence
) {
982 if (bq
->silence
.memblock
)
983 pa_memblock_unref(bq
->silence
.memblock
);
986 bq
->silence
= *silence
;
987 pa_memblock_ref(bq
->silence
.memblock
);
989 pa_memchunk_reset(&bq
->silence
);
992 pa_bool_t
pa_memblockq_is_empty(pa_memblockq
*bq
) {
998 void pa_memblockq_silence(pa_memblockq
*bq
) {
1002 drop_block(bq
, bq
->blocks
);
1004 pa_assert(bq
->n_blocks
== 0);
1007 unsigned pa_memblockq_get_nblocks(pa_memblockq
*bq
) {
1010 return bq
->n_blocks
;
1013 size_t pa_memblockq_get_base(pa_memblockq
*bq
) {