2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
57 #include <modules/reserve-wrap.h>
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
62 /* #define DEBUG_TIMING */
64 #define DEFAULT_DEVICE "default"
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
82 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
84 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
86 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256) /* 1.33ms @48kHz, should work for most hardware */
94 pa_thread_mq thread_mq
;
97 snd_pcm_t
*pcm_handle
;
99 pa_alsa_fdlist
*mixer_fdl
;
100 snd_mixer_t
*mixer_handle
;
101 pa_alsa_path_set
*mixer_path_set
;
102 pa_alsa_path
*mixer_path
;
104 pa_cvolume hardware_volume
;
116 watermark_inc_threshold
,
117 watermark_dec_threshold
,
120 pa_usec_t watermark_dec_not_before
;
122 pa_memchunk memchunk
;
124 char *device_name
; /* name of the PCM device */
125 char *control_device
; /* name of the control device */
127 pa_bool_t use_mmap
:1, use_tsched
:1;
129 pa_bool_t first
, after_rewind
;
131 pa_rtpoll_item
*alsa_rtpoll_item
;
133 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
135 pa_smoother
*smoother
;
136 uint64_t write_count
;
137 uint64_t since_start
;
138 pa_usec_t smoother_interval
;
139 pa_usec_t last_smoother_update
;
141 pa_reserve_wrapper
*reserve
;
142 pa_hook_slot
*reserve_slot
;
143 pa_reserve_monitor_wrapper
*monitor
;
144 pa_hook_slot
*monitor_slot
;
147 static void userdata_free(struct userdata
*u
);
149 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
153 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
154 return PA_HOOK_CANCEL
;
159 static void reserve_done(struct userdata
*u
) {
162 if (u
->reserve_slot
) {
163 pa_hook_slot_free(u
->reserve_slot
);
164 u
->reserve_slot
= NULL
;
168 pa_reserve_wrapper_unref(u
->reserve
);
173 static void reserve_update(struct userdata
*u
) {
174 const char *description
;
177 if (!u
->sink
|| !u
->reserve
)
180 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
181 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
184 static int reserve_init(struct userdata
*u
, const char *dname
) {
193 if (pa_in_system_mode())
196 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
199 /* We are resuming, try to lock the device */
200 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
208 pa_assert(!u
->reserve_slot
);
209 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
214 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
220 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
222 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
226 static void monitor_done(struct userdata
*u
) {
229 if (u
->monitor_slot
) {
230 pa_hook_slot_free(u
->monitor_slot
);
231 u
->monitor_slot
= NULL
;
235 pa_reserve_monitor_wrapper_unref(u
->monitor
);
240 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
246 if (pa_in_system_mode())
249 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
252 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
258 pa_assert(!u
->monitor_slot
);
259 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
264 static void fix_min_sleep_wakeup(struct userdata
*u
) {
265 size_t max_use
, max_use_2
;
268 pa_assert(u
->use_tsched
);
270 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
271 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
273 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
274 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
276 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
277 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
280 static void fix_tsched_watermark(struct userdata
*u
) {
283 pa_assert(u
->use_tsched
);
285 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
287 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
288 u
->tsched_watermark
= max_use
- u
->min_sleep
;
290 if (u
->tsched_watermark
< u
->min_wakeup
)
291 u
->tsched_watermark
= u
->min_wakeup
;
294 static void increase_watermark(struct userdata
*u
) {
295 size_t old_watermark
;
296 pa_usec_t old_min_latency
, new_min_latency
;
299 pa_assert(u
->use_tsched
);
301 /* First, just try to increase the watermark */
302 old_watermark
= u
->tsched_watermark
;
303 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_inc_step
);
304 fix_tsched_watermark(u
);
306 if (old_watermark
!= u
->tsched_watermark
) {
307 pa_log_info("Increasing wakeup watermark to %0.2f ms",
308 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
312 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
313 old_min_latency
= u
->sink
->thread_info
.min_latency
;
314 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_INC_STEP_USEC
);
315 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
317 if (old_min_latency
!= new_min_latency
) {
318 pa_log_info("Increasing minimal latency to %0.2f ms",
319 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
321 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
324 /* When we reach this we're officialy fucked! */
327 static void decrease_watermark(struct userdata
*u
) {
328 size_t old_watermark
;
332 pa_assert(u
->use_tsched
);
334 now
= pa_rtclock_now();
336 if (u
->watermark_dec_not_before
<= 0)
339 if (u
->watermark_dec_not_before
> now
)
342 old_watermark
= u
->tsched_watermark
;
344 if (u
->tsched_watermark
< u
->watermark_dec_step
)
345 u
->tsched_watermark
= u
->tsched_watermark
/ 2;
347 u
->tsched_watermark
= PA_MAX(u
->tsched_watermark
/ 2, u
->tsched_watermark
- u
->watermark_dec_step
);
349 fix_tsched_watermark(u
);
351 if (old_watermark
!= u
->tsched_watermark
)
352 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
353 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
355 /* We don't change the latency range*/
358 u
->watermark_dec_not_before
= now
+ TSCHED_WATERMARK_VERIFY_AFTER_USEC
;
361 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
364 pa_assert(sleep_usec
);
365 pa_assert(process_usec
);
368 pa_assert(u
->use_tsched
);
370 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
372 if (usec
== (pa_usec_t
) -1)
373 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
375 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
380 *sleep_usec
= usec
- wm
;
384 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
385 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
386 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
387 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
391 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
396 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
398 pa_assert(err
!= -EAGAIN
);
401 pa_log_debug("%s: Buffer underrun!", call
);
403 if (err
== -ESTRPIPE
)
404 pa_log_debug("%s: System suspended!", call
);
406 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
407 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
416 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
, pa_bool_t on_timeout
) {
418 pa_bool_t underrun
= FALSE
;
420 /* We use <= instead of < for this check here because an underrun
421 * only happens after the last sample was processed, not already when
422 * it is removed from the buffer. This is particularly important
423 * when block transfer is used. */
425 if (n_bytes
<= u
->hwbuf_size
)
426 left_to_play
= u
->hwbuf_size
- n_bytes
;
429 /* We got a dropout. What a mess! */
437 if (!u
->first
&& !u
->after_rewind
)
438 if (pa_log_ratelimit())
439 pa_log_info("Underrun!");
443 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
444 (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
445 (double) pa_bytes_to_usec(u
->watermark_inc_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
446 (double) pa_bytes_to_usec(u
->watermark_dec_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
450 pa_bool_t reset_not_before
= TRUE
;
452 if (!u
->first
&& !u
->after_rewind
) {
453 if (underrun
|| left_to_play
< u
->watermark_inc_threshold
)
454 increase_watermark(u
);
455 else if (left_to_play
> u
->watermark_dec_threshold
) {
456 reset_not_before
= FALSE
;
458 /* We decrease the watermark only if have actually
459 * been woken up by a timeout. If something else woke
460 * us up it's too easy to fulfill the deadlines... */
463 decrease_watermark(u
);
467 if (reset_not_before
)
468 u
->watermark_dec_not_before
= 0;
474 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
475 pa_bool_t work_done
= TRUE
;
476 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
481 pa_sink_assert_ref(u
->sink
);
484 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
490 pa_bool_t after_avail
= TRUE
;
492 /* First we determine how many samples are missing to fill the
493 * buffer up to 100% */
495 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
497 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
503 n_bytes
= (size_t) n
* u
->frame_size
;
506 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
509 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
514 /* We won't fill up the playback buffer before at least
515 * half the sleep time is over because otherwise we might
516 * ask for more data from the clients then they expect. We
517 * need to guarantee that clients only have to keep around
518 * a single hw buffer length. */
521 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
523 pa_log_debug("Not filling up, because too early.");
528 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
532 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
533 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
534 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
535 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
541 pa_log_debug("Not filling up, because not necessary.");
549 pa_log_debug("Not filling up, because already too many iterations.");
555 n_bytes
-= u
->hwbuf_unused
;
559 pa_log_debug("Filling up");
566 const snd_pcm_channel_area_t
*areas
;
567 snd_pcm_uframes_t offset
, frames
;
568 snd_pcm_sframes_t sframes
;
570 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
571 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
573 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
575 if (!after_avail
&& err
== -EAGAIN
)
578 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
584 /* Make sure that if these memblocks need to be copied they will fit into one slot */
585 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
586 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
588 if (!after_avail
&& frames
== 0)
591 pa_assert(frames
> 0);
594 /* Check these are multiples of 8 bit */
595 pa_assert((areas
[0].first
& 7) == 0);
596 pa_assert((areas
[0].step
& 7)== 0);
598 /* We assume a single interleaved memory buffer */
599 pa_assert((areas
[0].first
>> 3) == 0);
600 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
602 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
604 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
605 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
608 pa_sink_render_into_full(u
->sink
, &chunk
);
609 pa_memblock_unref_fixed(chunk
.memblock
);
611 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
613 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
621 u
->write_count
+= frames
* u
->frame_size
;
622 u
->since_start
+= frames
* u
->frame_size
;
625 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
628 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
631 n_bytes
-= (size_t) frames
* u
->frame_size
;
636 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
638 if (*sleep_usec
> process_usec
)
639 *sleep_usec
-= process_usec
;
645 return work_done
? 1 : 0;
648 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
649 pa_bool_t work_done
= FALSE
;
650 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
655 pa_sink_assert_ref(u
->sink
);
658 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
664 pa_bool_t after_avail
= TRUE
;
666 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
668 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
674 n_bytes
= (size_t) n
* u
->frame_size
;
675 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
680 /* We won't fill up the playback buffer before at least
681 * half the sleep time is over because otherwise we might
682 * ask for more data from the clients then they expect. We
683 * need to guarantee that clients only have to keep around
684 * a single hw buffer length. */
687 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
690 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
694 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
695 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
696 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
697 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
707 pa_log_debug("Not filling up, because already too many iterations.");
713 n_bytes
-= u
->hwbuf_unused
;
717 snd_pcm_sframes_t frames
;
720 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
722 if (u
->memchunk
.length
<= 0)
723 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
725 pa_assert(u
->memchunk
.length
> 0);
727 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
729 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
730 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
732 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
733 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
734 pa_memblock_release(u
->memchunk
.memblock
);
736 if (PA_UNLIKELY(frames
< 0)) {
738 if (!after_avail
&& (int) frames
== -EAGAIN
)
741 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
747 if (!after_avail
&& frames
== 0)
750 pa_assert(frames
> 0);
753 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
754 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
756 if (u
->memchunk
.length
<= 0) {
757 pa_memblock_unref(u
->memchunk
.memblock
);
758 pa_memchunk_reset(&u
->memchunk
);
763 u
->write_count
+= frames
* u
->frame_size
;
764 u
->since_start
+= frames
* u
->frame_size
;
766 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
768 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
771 n_bytes
-= (size_t) frames
* u
->frame_size
;
776 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
778 if (*sleep_usec
> process_usec
)
779 *sleep_usec
-= process_usec
;
785 return work_done
? 1 : 0;
788 static void update_smoother(struct userdata
*u
) {
789 snd_pcm_sframes_t delay
= 0;
792 pa_usec_t now1
= 0, now2
;
793 snd_pcm_status_t
*status
;
795 snd_pcm_status_alloca(&status
);
798 pa_assert(u
->pcm_handle
);
800 /* Let's update the time smoother */
802 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
803 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
807 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
808 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
810 snd_htimestamp_t htstamp
= { 0, 0 };
811 snd_pcm_status_get_htstamp(status
, &htstamp
);
812 now1
= pa_timespec_load(&htstamp
);
815 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
817 now1
= pa_rtclock_now();
819 /* check if the time since the last update is bigger than the interval */
820 if (u
->last_smoother_update
> 0)
821 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
824 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
826 if (PA_UNLIKELY(position
< 0))
829 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
831 pa_smoother_put(u
->smoother
, now1
, now2
);
833 u
->last_smoother_update
= now1
;
834 /* exponentially increase the update interval up to the MAX limit */
835 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
838 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
841 pa_usec_t now1
, now2
;
845 now1
= pa_rtclock_now();
846 now2
= pa_smoother_get(u
->smoother
, now1
);
848 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
850 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
852 if (u
->memchunk
.memblock
)
853 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
858 static int build_pollfd(struct userdata
*u
) {
860 pa_assert(u
->pcm_handle
);
862 if (u
->alsa_rtpoll_item
)
863 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
865 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
871 /* Called from IO context */
872 static int suspend(struct userdata
*u
) {
874 pa_assert(u
->pcm_handle
);
876 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
878 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
879 * take awfully long with our long buffer sizes today. */
880 snd_pcm_close(u
->pcm_handle
);
881 u
->pcm_handle
= NULL
;
883 if (u
->alsa_rtpoll_item
) {
884 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
885 u
->alsa_rtpoll_item
= NULL
;
888 /* We reset max_rewind/max_request here to make sure that while we
889 * are suspended the old max_request/max_rewind values set before
890 * the suspend can influence the per-stream buffer of newly
891 * created streams, without their requirements having any
892 * influence on them. */
893 pa_sink_set_max_rewind_within_thread(u
->sink
, 0);
894 pa_sink_set_max_request_within_thread(u
->sink
, 0);
896 pa_log_info("Device suspended...");
901 /* Called from IO context */
902 static int update_sw_params(struct userdata
*u
) {
903 snd_pcm_uframes_t avail_min
;
908 /* Use the full buffer if noone asked us for anything specific */
914 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
917 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
919 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
921 /* We need at least one sample in our buffer */
923 if (PA_UNLIKELY(b
< u
->frame_size
))
926 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
929 fix_min_sleep_wakeup(u
);
930 fix_tsched_watermark(u
);
933 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
935 /* We need at last one frame in the used part of the buffer */
936 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
939 pa_usec_t sleep_usec
, process_usec
;
941 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
942 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
945 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
947 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
, !u
->use_tsched
)) < 0) {
948 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
952 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
953 pa_sink_set_max_rewind_within_thread(u
->sink
, u
->hwbuf_size
);
958 /* Called from IO context */
959 static int unsuspend(struct userdata
*u
) {
963 snd_pcm_uframes_t period_size
, buffer_size
;
966 pa_assert(!u
->pcm_handle
);
968 pa_log_info("Trying resume...");
970 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
972 SND_PCM_NO_AUTO_RESAMPLE
|
973 SND_PCM_NO_AUTO_CHANNELS
|
974 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
975 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
979 ss
= u
->sink
->sample_spec
;
980 period_size
= u
->fragment_size
/ u
->frame_size
;
981 buffer_size
= u
->hwbuf_size
/ u
->frame_size
;
985 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &period_size
, &buffer_size
, 0, &b
, &d
, TRUE
)) < 0) {
986 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
990 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
991 pa_log_warn("Resume failed, couldn't get original access mode.");
995 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
996 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1000 if (period_size
*u
->frame_size
!= u
->fragment_size
||
1001 buffer_size
*u
->frame_size
!= u
->hwbuf_size
) {
1002 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1003 (unsigned long) u
->hwbuf_size
, (unsigned long) u
->fragment_size
,
1004 (unsigned long) (buffer_size
*u
->frame_size
), (unsigned long) (period_size
*u
->frame_size
));
1008 if (update_sw_params(u
) < 0)
1011 if (build_pollfd(u
) < 0)
1015 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
1016 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1017 u
->last_smoother_update
= 0;
1022 pa_log_info("Resumed successfully...");
1027 if (u
->pcm_handle
) {
1028 snd_pcm_close(u
->pcm_handle
);
1029 u
->pcm_handle
= NULL
;
1035 /* Called from IO context */
1036 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
1037 struct userdata
*u
= PA_SINK(o
)->userdata
;
1041 case PA_SINK_MESSAGE_GET_LATENCY
: {
1045 r
= sink_get_latency(u
);
1047 *((pa_usec_t
*) data
) = r
;
1052 case PA_SINK_MESSAGE_SET_STATE
:
1054 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
1056 case PA_SINK_SUSPENDED
: {
1059 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
1061 if ((r
= suspend(u
)) < 0)
1068 case PA_SINK_RUNNING
: {
1071 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
1072 if (build_pollfd(u
) < 0)
1076 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1077 if ((r
= unsuspend(u
)) < 0)
1084 case PA_SINK_UNLINKED
:
1086 case PA_SINK_INVALID_STATE
:
1093 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
1096 /* Called from main context */
1097 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
1098 pa_sink_state_t old_state
;
1101 pa_sink_assert_ref(s
);
1102 pa_assert_se(u
= s
->userdata
);
1104 old_state
= pa_sink_get_state(u
->sink
);
1106 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
1108 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
1109 if (reserve_init(u
, u
->device_name
) < 0)
1110 return -PA_ERR_BUSY
;
1115 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1116 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1119 pa_assert(u
->mixer_handle
);
1121 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1124 if (u
->sink
->suspend_cause
& PA_SUSPEND_SESSION
)
1127 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1128 pa_sink_get_volume(u
->sink
, TRUE
);
1129 pa_sink_get_mute(u
->sink
, TRUE
);
1135 static void sink_get_volume_cb(pa_sink
*s
) {
1136 struct userdata
*u
= s
->userdata
;
1138 char t
[PA_CVOLUME_SNPRINT_MAX
];
1141 pa_assert(u
->mixer_path
);
1142 pa_assert(u
->mixer_handle
);
1144 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1147 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1148 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1150 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1152 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1155 s
->real_volume
= u
->hardware_volume
= r
;
1157 /* Hmm, so the hardware volume changed, let's reset our software volume */
1158 if (u
->mixer_path
->has_dB
)
1159 pa_sink_set_soft_volume(s
, NULL
);
1162 static void sink_set_volume_cb(pa_sink
*s
) {
1163 struct userdata
*u
= s
->userdata
;
1165 char t
[PA_CVOLUME_SNPRINT_MAX
];
1168 pa_assert(u
->mixer_path
);
1169 pa_assert(u
->mixer_handle
);
1171 /* Shift up by the base volume */
1172 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1174 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1177 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1178 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1180 u
->hardware_volume
= r
;
1182 if (u
->mixer_path
->has_dB
) {
1183 pa_cvolume new_soft_volume
;
1184 pa_bool_t accurate_enough
;
1186 /* Match exactly what the user requested by software */
1187 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1189 /* If the adjustment to do in software is only minimal we
1190 * can skip it. That saves us CPU at the expense of a bit of
1193 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1194 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1196 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->real_volume
));
1197 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &u
->hardware_volume
));
1198 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t
, sizeof(t
), &new_soft_volume
),
1199 pa_yes_no(accurate_enough
));
1201 if (!accurate_enough
)
1202 s
->soft_volume
= new_soft_volume
;
1205 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1207 /* We can't match exactly what the user requested, hence let's
1208 * at least tell the user about it */
1214 static void sink_get_mute_cb(pa_sink
*s
) {
1215 struct userdata
*u
= s
->userdata
;
1219 pa_assert(u
->mixer_path
);
1220 pa_assert(u
->mixer_handle
);
1222 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1228 static void sink_set_mute_cb(pa_sink
*s
) {
1229 struct userdata
*u
= s
->userdata
;
1232 pa_assert(u
->mixer_path
);
1233 pa_assert(u
->mixer_handle
);
1235 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1238 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1239 struct userdata
*u
= s
->userdata
;
1240 pa_alsa_port_data
*data
;
1244 pa_assert(u
->mixer_handle
);
1246 data
= PA_DEVICE_PORT_DATA(p
);
1248 pa_assert_se(u
->mixer_path
= data
->path
);
1249 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1251 if (u
->mixer_path
->has_volume
&& u
->mixer_path
->has_dB
) {
1252 s
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1253 s
->n_volume_steps
= PA_VOLUME_NORM
+1;
1255 if (u
->mixer_path
->max_dB
> 0.0)
1256 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s
->base_volume
));
1258 pa_log_info("No particular base volume set, fixing to 0 dB");
1260 s
->base_volume
= PA_VOLUME_NORM
;
1261 s
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1265 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1275 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1276 struct userdata
*u
= s
->userdata
;
1279 pa_assert(u
->use_tsched
); /* only when timer scheduling is used
1280 * we can dynamically adjust the
1286 before
= u
->hwbuf_unused
;
1287 update_sw_params(u
);
1289 /* Let's check whether we now use only a smaller part of the
1290 buffer then before. If so, we need to make sure that subsequent
1291 rewinds are relative to the new maximum fill level and not to the
1292 current fill level. Thus, let's do a full rewind once, to clear
1295 if (u
->hwbuf_unused
> before
) {
1296 pa_log_debug("Requesting rewind due to latency change.");
1297 pa_sink_request_rewind(s
, (size_t) -1);
1301 static int process_rewind(struct userdata
*u
) {
1302 snd_pcm_sframes_t unused
;
1303 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1306 /* Figure out how much we shall rewind and reset the counter */
1307 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1309 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1311 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1312 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1316 unused_nbytes
= (size_t) unused
* u
->frame_size
;
1318 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1319 unused_nbytes
+= u
->rewind_safeguard
;
1321 if (u
->hwbuf_size
> unused_nbytes
)
1322 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1326 if (rewind_nbytes
> limit_nbytes
)
1327 rewind_nbytes
= limit_nbytes
;
1329 if (rewind_nbytes
> 0) {
1330 snd_pcm_sframes_t in_frames
, out_frames
;
1332 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1334 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1335 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1336 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1337 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1338 if (try_recover(u
, "process_rewind", out_frames
) < 0)
1343 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1345 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1347 if (rewind_nbytes
<= 0)
1348 pa_log_info("Tried rewind, but was apparently not possible.");
1350 u
->write_count
-= rewind_nbytes
;
1351 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1352 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1354 u
->after_rewind
= TRUE
;
1358 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1360 pa_sink_process_rewind(u
->sink
, 0);
1364 static void thread_func(void *userdata
) {
1365 struct userdata
*u
= userdata
;
1366 unsigned short revents
= 0;
1370 pa_log_debug("Thread starting up");
1372 if (u
->core
->realtime_scheduling
)
1373 pa_make_realtime(u
->core
->realtime_priority
);
1375 pa_thread_mq_install(&u
->thread_mq
);
1381 pa_log_debug("Loop");
1384 /* Render some data and write it to the dsp */
1385 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1387 pa_usec_t sleep_usec
= 0;
1388 pa_bool_t on_timeout
= pa_rtpoll_timer_elapsed(u
->rtpoll
);
1390 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1391 if (process_rewind(u
) < 0)
1395 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1397 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1402 /* pa_log_debug("work_done = %i", work_done); */
1407 pa_log_info("Starting playback.");
1408 snd_pcm_start(u
->pcm_handle
);
1410 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1416 if (u
->use_tsched
) {
1419 if (u
->since_start
<= u
->hwbuf_size
) {
1421 /* USB devices on ALSA seem to hit a buffer
1422 * underrun during the first iterations much
1423 * quicker then we calculate here, probably due to
1424 * the transport latency. To accommodate for that
1425 * we artificially decrease the sleep time until
1426 * we have filled the buffer at least once
1429 if (pa_log_ratelimit())
1430 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1434 /* OK, the playback buffer is now full, let's
1435 * calculate when to wake up next */
1436 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1438 /* Convert from the sound card time domain to the
1439 * system time domain */
1440 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1442 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1444 /* We don't trust the conversion, so we wake up whatever comes first */
1445 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(sleep_usec
, cusec
));
1449 u
->after_rewind
= FALSE
;
1451 } else if (u
->use_tsched
)
1453 /* OK, we're in an invalid state, let's disable our timers */
1454 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1456 /* Hmm, nothing to do. Let's sleep */
1457 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1463 /* Tell ALSA about this and process its response */
1464 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1465 struct pollfd
*pollfd
;
1469 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1471 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1472 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1476 if (revents
& ~POLLOUT
) {
1477 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1482 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit())
1483 pa_log_debug("Wakeup from ALSA!");
1490 /* If this was no regular exit from the loop we have to continue
1491 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1492 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1493 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1496 pa_log_debug("Thread shutting down");
1499 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1505 pa_assert(device_name
);
1507 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1508 pa_sink_new_data_set_name(data
, n
);
1509 data
->namereg_fail
= TRUE
;
1513 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1514 data
->namereg_fail
= TRUE
;
1516 n
= device_id
? device_id
: device_name
;
1517 data
->namereg_fail
= FALSE
;
1521 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1523 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1525 pa_sink_new_data_set_name(data
, t
);
1529 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1531 if (!mapping
&& !element
)
1534 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
))) {
1535 pa_log_info("Failed to find a working mixer device.");
1541 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1544 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, ignore_dB
) < 0)
1547 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1548 pa_alsa_path_dump(u
->mixer_path
);
1551 if (!(u
->mixer_path_set
= pa_alsa_path_set_new(mapping
, PA_ALSA_DIRECTION_OUTPUT
)))
1554 pa_alsa_path_set_probe(u
->mixer_path_set
, u
->mixer_handle
, ignore_dB
);
1556 pa_log_debug("Probed mixer paths:");
1557 pa_alsa_path_set_dump(u
->mixer_path_set
);
1564 if (u
->mixer_path_set
) {
1565 pa_alsa_path_set_free(u
->mixer_path_set
);
1566 u
->mixer_path_set
= NULL
;
1567 } else if (u
->mixer_path
) {
1568 pa_alsa_path_free(u
->mixer_path
);
1569 u
->mixer_path
= NULL
;
1572 if (u
->mixer_handle
) {
1573 snd_mixer_close(u
->mixer_handle
);
1574 u
->mixer_handle
= NULL
;
1578 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1581 if (!u
->mixer_handle
)
1584 if (u
->sink
->active_port
) {
1585 pa_alsa_port_data
*data
;
1587 /* We have a list of supported paths, so let's activate the
1588 * one that has been chosen as active */
1590 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1591 u
->mixer_path
= data
->path
;
1593 pa_alsa_path_select(data
->path
, u
->mixer_handle
);
1596 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1600 if (!u
->mixer_path
&& u
->mixer_path_set
)
1601 u
->mixer_path
= u
->mixer_path_set
->paths
;
1603 if (u
->mixer_path
) {
1604 /* Hmm, we have only a single path, then let's activate it */
1606 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1608 if (u
->mixer_path
->settings
)
1609 pa_alsa_setting_select(u
->mixer_path
->settings
, u
->mixer_handle
);
1614 if (!u
->mixer_path
->has_volume
)
1615 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1618 if (u
->mixer_path
->has_dB
) {
1619 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1621 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1622 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1624 if (u
->mixer_path
->max_dB
> 0.0)
1625 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1627 pa_log_info("No particular base volume set, fixing to 0 dB");
1630 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1631 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1632 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1635 u
->sink
->get_volume
= sink_get_volume_cb
;
1636 u
->sink
->set_volume
= sink_set_volume_cb
;
1638 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->mixer_path
->has_dB
? PA_SINK_DECIBEL_VOLUME
: 0);
1639 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1642 if (!u
->mixer_path
->has_mute
) {
1643 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1645 u
->sink
->get_mute
= sink_get_mute_cb
;
1646 u
->sink
->set_mute
= sink_set_mute_cb
;
1647 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1648 pa_log_info("Using hardware mute control.");
1651 u
->mixer_fdl
= pa_alsa_fdlist_new();
1653 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1654 pa_log("Failed to initialize file descriptor monitoring");
1658 if (u
->mixer_path_set
)
1659 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1661 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1666 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1668 struct userdata
*u
= NULL
;
1669 const char *dev_id
= NULL
;
1670 pa_sample_spec ss
, requested_ss
;
1672 uint32_t nfrags
, frag_size
, buffer_size
, tsched_size
, tsched_watermark
;
1673 snd_pcm_uframes_t period_frames
, buffer_frames
, tsched_frames
;
1674 size_t frame_size
, rewind_safeguard
;
1675 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
;
1676 pa_sink_new_data data
;
1677 pa_alsa_profile_set
*profile_set
= NULL
;
1682 ss
= m
->core
->default_sample_spec
;
1683 map
= m
->core
->default_channel_map
;
1684 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1685 pa_log("Failed to parse sample specification and channel map");
1690 frame_size
= pa_frame_size(&ss
);
1692 nfrags
= m
->core
->default_n_fragments
;
1693 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1695 frag_size
= (uint32_t) frame_size
;
1696 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1697 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1699 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1700 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1701 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1702 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1703 pa_log("Failed to parse buffer metrics");
1707 buffer_size
= nfrags
* frag_size
;
1709 period_frames
= frag_size
/frame_size
;
1710 buffer_frames
= buffer_size
/frame_size
;
1711 tsched_frames
= tsched_size
/frame_size
;
1713 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1714 pa_log("Failed to parse mmap argument.");
1718 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1719 pa_log("Failed to parse tsched argument.");
1723 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1724 pa_log("Failed to parse ignore_dB argument.");
1728 rewind_safeguard
= DEFAULT_REWIND_SAFEGUARD_BYTES
;
1729 if (pa_modargs_get_value_u32(ma
, "rewind_safeguard", &rewind_safeguard
) < 0) {
1730 pa_log("Failed to parse rewind_safeguard argument");
1734 use_tsched
= pa_alsa_may_tsched(use_tsched
);
1736 u
= pa_xnew0(struct userdata
, 1);
1739 u
->use_mmap
= use_mmap
;
1740 u
->use_tsched
= use_tsched
;
1742 u
->rewind_safeguard
= rewind_safeguard
;
1743 u
->rtpoll
= pa_rtpoll_new();
1744 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1746 u
->smoother
= pa_smoother_new(
1747 DEFAULT_TSCHED_BUFFER_USEC
*2,
1748 DEFAULT_TSCHED_BUFFER_USEC
*2,
1754 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1756 dev_id
= pa_modargs_get_value(
1758 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
1760 if (reserve_init(u
, dev_id
) < 0)
1763 if (reserve_monitor_init(u
, dev_id
) < 0)
1771 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1772 pa_log("device_id= not set");
1776 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
1780 SND_PCM_STREAM_PLAYBACK
,
1781 &period_frames
, &buffer_frames
, tsched_frames
,
1786 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1788 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
1791 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
1795 SND_PCM_STREAM_PLAYBACK
,
1796 &period_frames
, &buffer_frames
, tsched_frames
,
1797 &b
, &d
, profile_set
, &mapping
)))
1803 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1804 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1807 SND_PCM_STREAM_PLAYBACK
,
1808 &period_frames
, &buffer_frames
, tsched_frames
,
1813 pa_assert(u
->device_name
);
1814 pa_log_info("Successfully opened device %s.", u
->device_name
);
1816 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
1817 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
1822 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
1824 if (use_mmap
&& !b
) {
1825 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1826 u
->use_mmap
= use_mmap
= FALSE
;
1829 if (use_tsched
&& (!b
|| !d
)) {
1830 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1831 u
->use_tsched
= use_tsched
= FALSE
;
1835 pa_log_info("Successfully enabled mmap() mode.");
1838 pa_log_info("Successfully enabled timer-based scheduling mode.");
1840 /* ALSA might tweak the sample spec, so recalculate the frame size */
1841 frame_size
= pa_frame_size(&ss
);
1843 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
1845 pa_sink_new_data_init(&data
);
1846 data
.driver
= driver
;
1849 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
1850 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1851 pa_sink_new_data_set_channel_map(&data
, &map
);
1853 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
1854 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1855 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (buffer_frames
* frame_size
));
1856 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1857 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1860 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
1861 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
1864 pa_alsa_init_description(data
.proplist
);
1866 if (u
->control_device
)
1867 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
1869 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
1870 pa_log("Invalid properties");
1871 pa_sink_new_data_done(&data
);
1875 if (u
->mixer_path_set
)
1876 pa_alsa_add_ports(&data
.ports
, u
->mixer_path_set
);
1878 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
|(u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0));
1879 pa_sink_new_data_done(&data
);
1882 pa_log("Failed to create sink object");
1886 u
->sink
->parent
.process_msg
= sink_process_msg
;
1888 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1889 u
->sink
->set_state
= sink_set_state_cb
;
1890 u
->sink
->set_port
= sink_set_port_cb
;
1891 u
->sink
->userdata
= u
;
1893 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1894 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1896 u
->frame_size
= frame_size
;
1897 u
->fragment_size
= frag_size
= (size_t) (period_frames
* frame_size
);
1898 u
->hwbuf_size
= buffer_size
= (size_t) (buffer_frames
* frame_size
);
1899 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
1901 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1902 (double) u
->hwbuf_size
/ (double) u
->fragment_size
,
1903 (long unsigned) u
->fragment_size
,
1904 (double) pa_bytes_to_usec(u
->fragment_size
, &ss
) / PA_USEC_PER_MSEC
,
1905 (long unsigned) u
->hwbuf_size
,
1906 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1908 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
1909 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
1911 if (u
->use_tsched
) {
1912 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, &requested_ss
), &u
->sink
->sample_spec
);
1914 u
->watermark_inc_step
= pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC
, &u
->sink
->sample_spec
);
1915 u
->watermark_dec_step
= pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC
, &u
->sink
->sample_spec
);
1917 u
->watermark_inc_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1918 u
->watermark_dec_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1920 fix_min_sleep_wakeup(u
);
1921 fix_tsched_watermark(u
);
1923 pa_sink_set_latency_range(u
->sink
,
1925 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1927 pa_log_info("Time scheduling watermark is %0.2fms",
1928 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1930 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1934 if (update_sw_params(u
) < 0)
1937 if (setup_mixer(u
, ignore_dB
) < 0)
1940 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
1942 if (!(u
->thread
= pa_thread_new("alsa-sink", thread_func
, u
))) {
1943 pa_log("Failed to create thread.");
1947 /* Get initial mixer settings */
1948 if (data
.volume_is_set
) {
1949 if (u
->sink
->set_volume
)
1950 u
->sink
->set_volume(u
->sink
);
1952 if (u
->sink
->get_volume
)
1953 u
->sink
->get_volume(u
->sink
);
1956 if (data
.muted_is_set
) {
1957 if (u
->sink
->set_mute
)
1958 u
->sink
->set_mute(u
->sink
);
1960 if (u
->sink
->get_mute
)
1961 u
->sink
->get_mute(u
->sink
);
1964 pa_sink_put(u
->sink
);
1967 pa_alsa_profile_set_free(profile_set
);
1977 pa_alsa_profile_set_free(profile_set
);
1982 static void userdata_free(struct userdata
*u
) {
1986 pa_sink_unlink(u
->sink
);
1989 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1990 pa_thread_free(u
->thread
);
1993 pa_thread_mq_done(&u
->thread_mq
);
1996 pa_sink_unref(u
->sink
);
1998 if (u
->memchunk
.memblock
)
1999 pa_memblock_unref(u
->memchunk
.memblock
);
2001 if (u
->alsa_rtpoll_item
)
2002 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
2005 pa_rtpoll_free(u
->rtpoll
);
2007 if (u
->pcm_handle
) {
2008 snd_pcm_drop(u
->pcm_handle
);
2009 snd_pcm_close(u
->pcm_handle
);
2013 pa_alsa_fdlist_free(u
->mixer_fdl
);
2015 if (u
->mixer_path_set
)
2016 pa_alsa_path_set_free(u
->mixer_path_set
);
2017 else if (u
->mixer_path
)
2018 pa_alsa_path_free(u
->mixer_path
);
2020 if (u
->mixer_handle
)
2021 snd_mixer_close(u
->mixer_handle
);
2024 pa_smoother_free(u
->smoother
);
2029 pa_xfree(u
->device_name
);
2030 pa_xfree(u
->control_device
);
2034 void pa_alsa_sink_free(pa_sink
*s
) {
2037 pa_sink_assert_ref(s
);
2038 pa_assert_se(u
= s
->userdata
);