2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
30 #include <asoundlib.h>
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
58 #include <modules/reserve-wrap.h>
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
63 /* #define DEBUG_TIMING */
65 #define DEFAULT_DEVICE "default"
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
99 pa_thread_mq thread_mq
;
102 snd_pcm_t
*pcm_handle
;
105 pa_alsa_fdlist
*mixer_fdl
;
106 pa_alsa_mixer_pdata
*mixer_pd
;
107 snd_mixer_t
*mixer_handle
;
108 pa_alsa_path_set
*mixer_path_set
;
109 pa_alsa_path
*mixer_path
;
111 pa_cvolume hardware_volume
;
120 tsched_watermark_ref
,
126 watermark_inc_threshold
,
127 watermark_dec_threshold
,
130 pa_usec_t watermark_dec_not_before
;
131 pa_usec_t min_latency_ref
;
133 pa_memchunk memchunk
;
135 char *device_name
; /* name of the PCM device */
136 char *control_device
; /* name of the control device */
138 pa_bool_t use_mmap
:1, use_tsched
:1, deferred_volume
:1, fixed_latency_range
:1;
140 pa_bool_t first
, after_rewind
;
142 pa_rtpoll_item
*alsa_rtpoll_item
;
144 pa_smoother
*smoother
;
145 uint64_t write_count
;
146 uint64_t since_start
;
147 pa_usec_t smoother_interval
;
148 pa_usec_t last_smoother_update
;
152 pa_reserve_wrapper
*reserve
;
153 pa_hook_slot
*reserve_slot
;
154 pa_reserve_monitor_wrapper
*monitor
;
155 pa_hook_slot
*monitor_slot
;
158 pa_alsa_ucm_mapping_context
*ucm_context
;
161 static void userdata_free(struct userdata
*u
);
163 /* FIXME: Is there a better way to do this than device names? */
164 static pa_bool_t
is_iec958(struct userdata
*u
) {
165 return (strncmp("iec958", u
->device_name
, 6) == 0);
168 static pa_bool_t
is_hdmi(struct userdata
*u
) {
169 return (strncmp("hdmi", u
->device_name
, 4) == 0);
172 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
176 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
177 return PA_HOOK_CANCEL
;
182 static void reserve_done(struct userdata
*u
) {
185 if (u
->reserve_slot
) {
186 pa_hook_slot_free(u
->reserve_slot
);
187 u
->reserve_slot
= NULL
;
191 pa_reserve_wrapper_unref(u
->reserve
);
196 static void reserve_update(struct userdata
*u
) {
197 const char *description
;
200 if (!u
->sink
|| !u
->reserve
)
203 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
204 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
207 static int reserve_init(struct userdata
*u
, const char *dname
) {
216 if (pa_in_system_mode())
219 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
222 /* We are resuming, try to lock the device */
223 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
231 pa_assert(!u
->reserve_slot
);
232 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
237 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
243 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
245 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
249 static void monitor_done(struct userdata
*u
) {
252 if (u
->monitor_slot
) {
253 pa_hook_slot_free(u
->monitor_slot
);
254 u
->monitor_slot
= NULL
;
258 pa_reserve_monitor_wrapper_unref(u
->monitor
);
263 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
269 if (pa_in_system_mode())
272 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
275 /* We are resuming, try to lock the device */
276 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
282 pa_assert(!u
->monitor_slot
);
283 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
288 static void fix_min_sleep_wakeup(struct userdata
*u
) {
289 size_t max_use
, max_use_2
;
292 pa_assert(u
->use_tsched
);
294 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
295 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
297 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
298 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
300 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
301 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
304 static void fix_tsched_watermark(struct userdata
*u
) {
307 pa_assert(u
->use_tsched
);
309 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
311 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
312 u
->tsched_watermark
= max_use
- u
->min_sleep
;
314 if (u
->tsched_watermark
< u
->min_wakeup
)
315 u
->tsched_watermark
= u
->min_wakeup
;
318 static void increase_watermark(struct userdata
*u
) {
319 size_t old_watermark
;
320 pa_usec_t old_min_latency
, new_min_latency
;
323 pa_assert(u
->use_tsched
);
325 /* First, just try to increase the watermark */
326 old_watermark
= u
->tsched_watermark
;
327 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_inc_step
);
328 fix_tsched_watermark(u
);
330 if (old_watermark
!= u
->tsched_watermark
) {
331 pa_log_info("Increasing wakeup watermark to %0.2f ms",
332 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
336 /* Hmm, we cannot increase the watermark any further, hence let's
337 raise the latency, unless doing so was disabled in
339 if (u
->fixed_latency_range
)
342 old_min_latency
= u
->sink
->thread_info
.min_latency
;
343 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_INC_STEP_USEC
);
344 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
346 if (old_min_latency
!= new_min_latency
) {
347 pa_log_info("Increasing minimal latency to %0.2f ms",
348 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
350 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
353 /* When we reach this we're officialy fucked! */
356 static void decrease_watermark(struct userdata
*u
) {
357 size_t old_watermark
;
361 pa_assert(u
->use_tsched
);
363 now
= pa_rtclock_now();
365 if (u
->watermark_dec_not_before
<= 0)
368 if (u
->watermark_dec_not_before
> now
)
371 old_watermark
= u
->tsched_watermark
;
373 if (u
->tsched_watermark
< u
->watermark_dec_step
)
374 u
->tsched_watermark
= u
->tsched_watermark
/ 2;
376 u
->tsched_watermark
= PA_MAX(u
->tsched_watermark
/ 2, u
->tsched_watermark
- u
->watermark_dec_step
);
378 fix_tsched_watermark(u
);
380 if (old_watermark
!= u
->tsched_watermark
)
381 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
382 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
384 /* We don't change the latency range*/
387 u
->watermark_dec_not_before
= now
+ TSCHED_WATERMARK_VERIFY_AFTER_USEC
;
390 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
393 pa_assert(sleep_usec
);
394 pa_assert(process_usec
);
397 pa_assert(u
->use_tsched
);
399 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
401 if (usec
== (pa_usec_t
) -1)
402 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
404 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
409 *sleep_usec
= usec
- wm
;
413 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
414 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
415 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
416 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
420 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
425 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
427 pa_assert(err
!= -EAGAIN
);
430 pa_log_debug("%s: Buffer underrun!", call
);
432 if (err
== -ESTRPIPE
)
433 pa_log_debug("%s: System suspended!", call
);
435 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
436 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
445 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
, pa_bool_t on_timeout
) {
447 pa_bool_t underrun
= FALSE
;
449 /* We use <= instead of < for this check here because an underrun
450 * only happens after the last sample was processed, not already when
451 * it is removed from the buffer. This is particularly important
452 * when block transfer is used. */
454 if (n_bytes
<= u
->hwbuf_size
)
455 left_to_play
= u
->hwbuf_size
- n_bytes
;
458 /* We got a dropout. What a mess! */
466 if (!u
->first
&& !u
->after_rewind
)
467 if (pa_log_ratelimit(PA_LOG_INFO
))
468 pa_log_info("Underrun!");
472 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
473 (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
474 (double) pa_bytes_to_usec(u
->watermark_inc_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
475 (double) pa_bytes_to_usec(u
->watermark_dec_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
479 pa_bool_t reset_not_before
= TRUE
;
481 if (!u
->first
&& !u
->after_rewind
) {
482 if (underrun
|| left_to_play
< u
->watermark_inc_threshold
)
483 increase_watermark(u
);
484 else if (left_to_play
> u
->watermark_dec_threshold
) {
485 reset_not_before
= FALSE
;
487 /* We decrease the watermark only if have actually
488 * been woken up by a timeout. If something else woke
489 * us up it's too easy to fulfill the deadlines... */
492 decrease_watermark(u
);
496 if (reset_not_before
)
497 u
->watermark_dec_not_before
= 0;
503 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
504 pa_bool_t work_done
= FALSE
;
505 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
510 pa_sink_assert_ref(u
->sink
);
513 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
519 pa_bool_t after_avail
= TRUE
;
521 /* First we determine how many samples are missing to fill the
522 * buffer up to 100% */
524 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
526 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
532 n_bytes
= (size_t) n
* u
->frame_size
;
535 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
538 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
543 /* We won't fill up the playback buffer before at least
544 * half the sleep time is over because otherwise we might
545 * ask for more data from the clients then they expect. We
546 * need to guarantee that clients only have to keep around
547 * a single hw buffer length. */
550 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
552 pa_log_debug("Not filling up, because too early.");
557 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
561 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
562 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
563 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
564 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
570 pa_log_debug("Not filling up, because not necessary.");
578 pa_log_debug("Not filling up, because already too many iterations.");
584 n_bytes
-= u
->hwbuf_unused
;
588 pa_log_debug("Filling up");
595 const snd_pcm_channel_area_t
*areas
;
596 snd_pcm_uframes_t offset
, frames
;
597 snd_pcm_sframes_t sframes
;
599 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
600 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
602 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
604 if (!after_avail
&& err
== -EAGAIN
)
607 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
613 /* Make sure that if these memblocks need to be copied they will fit into one slot */
614 if (frames
> pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
)
615 frames
= pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
;
617 if (!after_avail
&& frames
== 0)
620 pa_assert(frames
> 0);
623 /* Check these are multiples of 8 bit */
624 pa_assert((areas
[0].first
& 7) == 0);
625 pa_assert((areas
[0].step
& 7)== 0);
627 /* We assume a single interleaved memory buffer */
628 pa_assert((areas
[0].first
>> 3) == 0);
629 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
631 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
633 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
634 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
637 pa_sink_render_into_full(u
->sink
, &chunk
);
638 pa_memblock_unref_fixed(chunk
.memblock
);
640 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
642 if (!after_avail
&& (int) sframes
== -EAGAIN
)
645 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
653 u
->write_count
+= frames
* u
->frame_size
;
654 u
->since_start
+= frames
* u
->frame_size
;
657 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
660 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
663 n_bytes
-= (size_t) frames
* u
->frame_size
;
668 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
669 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
671 if (*sleep_usec
> process_usec
)
672 *sleep_usec
-= process_usec
;
678 return work_done
? 1 : 0;
681 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
682 pa_bool_t work_done
= FALSE
;
683 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
688 pa_sink_assert_ref(u
->sink
);
691 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
697 pa_bool_t after_avail
= TRUE
;
699 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
701 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
707 n_bytes
= (size_t) n
* u
->frame_size
;
708 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
713 /* We won't fill up the playback buffer before at least
714 * half the sleep time is over because otherwise we might
715 * ask for more data from the clients then they expect. We
716 * need to guarantee that clients only have to keep around
717 * a single hw buffer length. */
720 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
723 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
727 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
728 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
729 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
730 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
740 pa_log_debug("Not filling up, because already too many iterations.");
746 n_bytes
-= u
->hwbuf_unused
;
750 snd_pcm_sframes_t frames
;
753 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
755 if (u
->memchunk
.length
<= 0)
756 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
758 pa_assert(u
->memchunk
.length
> 0);
760 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
762 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
763 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
765 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
766 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
767 pa_memblock_release(u
->memchunk
.memblock
);
769 if (PA_UNLIKELY(frames
< 0)) {
771 if (!after_avail
&& (int) frames
== -EAGAIN
)
774 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
780 if (!after_avail
&& frames
== 0)
783 pa_assert(frames
> 0);
786 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
787 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
789 if (u
->memchunk
.length
<= 0) {
790 pa_memblock_unref(u
->memchunk
.memblock
);
791 pa_memchunk_reset(&u
->memchunk
);
796 u
->write_count
+= frames
* u
->frame_size
;
797 u
->since_start
+= frames
* u
->frame_size
;
799 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
801 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
804 n_bytes
-= (size_t) frames
* u
->frame_size
;
809 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
810 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
812 if (*sleep_usec
> process_usec
)
813 *sleep_usec
-= process_usec
;
819 return work_done
? 1 : 0;
822 static void update_smoother(struct userdata
*u
) {
823 snd_pcm_sframes_t delay
= 0;
826 pa_usec_t now1
= 0, now2
;
827 snd_pcm_status_t
*status
;
828 snd_htimestamp_t htstamp
= { 0, 0 };
830 snd_pcm_status_alloca(&status
);
833 pa_assert(u
->pcm_handle
);
835 /* Let's update the time smoother */
837 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, status
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
, FALSE
)) < 0)) {
838 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
842 snd_pcm_status_get_htstamp(status
, &htstamp
);
843 now1
= pa_timespec_load(&htstamp
);
845 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
847 now1
= pa_rtclock_now();
849 /* check if the time since the last update is bigger than the interval */
850 if (u
->last_smoother_update
> 0)
851 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
854 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
856 if (PA_UNLIKELY(position
< 0))
859 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
861 pa_smoother_put(u
->smoother
, now1
, now2
);
863 u
->last_smoother_update
= now1
;
864 /* exponentially increase the update interval up to the MAX limit */
865 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
868 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
871 pa_usec_t now1
, now2
;
875 now1
= pa_rtclock_now();
876 now2
= pa_smoother_get(u
->smoother
, now1
);
878 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
880 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
882 if (u
->memchunk
.memblock
)
883 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
888 static int build_pollfd(struct userdata
*u
) {
890 pa_assert(u
->pcm_handle
);
892 if (u
->alsa_rtpoll_item
)
893 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
895 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
901 /* Called from IO context */
902 static int suspend(struct userdata
*u
) {
904 pa_assert(u
->pcm_handle
);
906 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
908 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
909 * take awfully long with our long buffer sizes today. */
910 snd_pcm_close(u
->pcm_handle
);
911 u
->pcm_handle
= NULL
;
913 if (u
->alsa_rtpoll_item
) {
914 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
915 u
->alsa_rtpoll_item
= NULL
;
918 /* We reset max_rewind/max_request here to make sure that while we
919 * are suspended the old max_request/max_rewind values set before
920 * the suspend can influence the per-stream buffer of newly
921 * created streams, without their requirements having any
922 * influence on them. */
923 pa_sink_set_max_rewind_within_thread(u
->sink
, 0);
924 pa_sink_set_max_request_within_thread(u
->sink
, 0);
926 pa_log_info("Device suspended...");
931 /* Called from IO context */
932 static int update_sw_params(struct userdata
*u
) {
933 snd_pcm_uframes_t avail_min
;
938 /* Use the full buffer if no one asked us for anything specific */
944 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
947 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
949 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
951 /* We need at least one sample in our buffer */
953 if (PA_UNLIKELY(b
< u
->frame_size
))
956 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
959 fix_min_sleep_wakeup(u
);
960 fix_tsched_watermark(u
);
963 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
965 /* We need at last one frame in the used part of the buffer */
966 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
969 pa_usec_t sleep_usec
, process_usec
;
971 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
972 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
975 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
977 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
, !u
->use_tsched
)) < 0) {
978 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
982 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
983 if (pa_alsa_pcm_is_hw(u
->pcm_handle
))
984 pa_sink_set_max_rewind_within_thread(u
->sink
, u
->hwbuf_size
);
986 pa_log_info("Disabling rewind_within_thread for device %s", u
->device_name
);
987 pa_sink_set_max_rewind_within_thread(u
->sink
, 0);
993 /* Called from IO Context on unsuspend or from main thread when creating sink */
994 static void reset_watermark(struct userdata
*u
, size_t tsched_watermark
, pa_sample_spec
*ss
,
997 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, ss
),
998 &u
->sink
->sample_spec
);
1000 u
->watermark_inc_step
= pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC
, &u
->sink
->sample_spec
);
1001 u
->watermark_dec_step
= pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC
, &u
->sink
->sample_spec
);
1003 u
->watermark_inc_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1004 u
->watermark_dec_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1006 fix_min_sleep_wakeup(u
);
1007 fix_tsched_watermark(u
);
1010 pa_sink_set_latency_range_within_thread(u
->sink
,
1012 pa_bytes_to_usec(u
->hwbuf_size
, ss
));
1014 pa_sink_set_latency_range(u
->sink
,
1016 pa_bytes_to_usec(u
->hwbuf_size
, ss
));
1018 /* work-around assert in pa_sink_set_latency_within_thead,
1019 keep track of min_latency and reuse it when
1020 this routine is called from IO context */
1021 u
->min_latency_ref
= u
->sink
->thread_info
.min_latency
;
1024 pa_log_info("Time scheduling watermark is %0.2fms",
1025 (double) pa_bytes_to_usec(u
->tsched_watermark
, ss
) / PA_USEC_PER_MSEC
);
1028 /* Called from IO context */
1029 static int unsuspend(struct userdata
*u
) {
1033 snd_pcm_uframes_t period_size
, buffer_size
;
1034 char *device_name
= NULL
;
1037 pa_assert(!u
->pcm_handle
);
1039 pa_log_info("Trying resume...");
1041 if ((is_iec958(u
) || is_hdmi(u
)) && pa_sink_is_passthrough(u
->sink
)) {
1042 /* Need to open device in NONAUDIO mode */
1043 int len
= strlen(u
->device_name
) + 8;
1045 device_name
= pa_xmalloc(len
);
1046 pa_snprintf(device_name
, len
, "%s,AES0=6", u
->device_name
);
1049 if ((err
= snd_pcm_open(&u
->pcm_handle
, device_name
? device_name
: u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
1051 SND_PCM_NO_AUTO_RESAMPLE
|
1052 SND_PCM_NO_AUTO_CHANNELS
|
1053 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
1054 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
1058 ss
= u
->sink
->sample_spec
;
1059 period_size
= u
->fragment_size
/ u
->frame_size
;
1060 buffer_size
= u
->hwbuf_size
/ u
->frame_size
;
1064 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &period_size
, &buffer_size
, 0, &b
, &d
, TRUE
)) < 0) {
1065 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
1069 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
1070 pa_log_warn("Resume failed, couldn't get original access mode.");
1074 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
1075 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1079 if (period_size
*u
->frame_size
!= u
->fragment_size
||
1080 buffer_size
*u
->frame_size
!= u
->hwbuf_size
) {
1081 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1082 (unsigned long) u
->hwbuf_size
, (unsigned long) u
->fragment_size
,
1083 (unsigned long) (buffer_size
*u
->frame_size
), (unsigned long) (period_size
*u
->frame_size
));
1087 if (update_sw_params(u
) < 0)
1090 if (build_pollfd(u
) < 0)
1094 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
1095 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1096 u
->last_smoother_update
= 0;
1101 /* reset the watermark to the value defined when sink was created */
1103 reset_watermark(u
, u
->tsched_watermark_ref
, &u
->sink
->sample_spec
, TRUE
);
1105 pa_log_info("Resumed successfully...");
1107 pa_xfree(device_name
);
1111 if (u
->pcm_handle
) {
1112 snd_pcm_close(u
->pcm_handle
);
1113 u
->pcm_handle
= NULL
;
1116 pa_xfree(device_name
);
1121 /* Called from IO context */
1122 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
1123 struct userdata
*u
= PA_SINK(o
)->userdata
;
1127 case PA_SINK_MESSAGE_GET_LATENCY
: {
1131 r
= sink_get_latency(u
);
1133 *((pa_usec_t
*) data
) = r
;
1138 case PA_SINK_MESSAGE_SET_STATE
:
1140 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
1142 case PA_SINK_SUSPENDED
: {
1145 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
1147 if ((r
= suspend(u
)) < 0)
1154 case PA_SINK_RUNNING
: {
1157 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
1158 if (build_pollfd(u
) < 0)
1162 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1163 if ((r
= unsuspend(u
)) < 0)
1170 case PA_SINK_UNLINKED
:
1172 case PA_SINK_INVALID_STATE
:
1179 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
1182 /* Called from main context */
1183 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
1184 pa_sink_state_t old_state
;
1187 pa_sink_assert_ref(s
);
1188 pa_assert_se(u
= s
->userdata
);
1190 old_state
= pa_sink_get_state(u
->sink
);
1192 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
1194 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
1195 if (reserve_init(u
, u
->device_name
) < 0)
1196 return -PA_ERR_BUSY
;
1201 static int ctl_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1202 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1205 pa_assert(u
->mixer_handle
);
1207 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1210 if (!PA_SINK_IS_LINKED(u
->sink
->state
))
1213 if (u
->sink
->suspend_cause
& PA_SUSPEND_SESSION
) {
1214 pa_sink_set_mixer_dirty(u
->sink
, TRUE
);
1218 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1219 pa_sink_get_volume(u
->sink
, TRUE
);
1220 pa_sink_get_mute(u
->sink
, TRUE
);
1226 static int io_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1227 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1230 pa_assert(u
->mixer_handle
);
1232 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1235 if (u
->sink
->suspend_cause
& PA_SUSPEND_SESSION
) {
1236 pa_sink_set_mixer_dirty(u
->sink
, TRUE
);
1240 if (mask
& SND_CTL_EVENT_MASK_VALUE
)
1241 pa_sink_update_volume_and_mute(u
->sink
);
1246 static void sink_get_volume_cb(pa_sink
*s
) {
1247 struct userdata
*u
= s
->userdata
;
1249 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1252 pa_assert(u
->mixer_path
);
1253 pa_assert(u
->mixer_handle
);
1255 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1258 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1259 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1261 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1263 if (u
->mixer_path
->has_dB
) {
1264 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1266 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &r
));
1269 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1272 s
->real_volume
= u
->hardware_volume
= r
;
1274 /* Hmm, so the hardware volume changed, let's reset our software volume */
1275 if (u
->mixer_path
->has_dB
)
1276 pa_sink_set_soft_volume(s
, NULL
);
1279 static void sink_set_volume_cb(pa_sink
*s
) {
1280 struct userdata
*u
= s
->userdata
;
1282 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1283 pa_bool_t deferred_volume
= !!(s
->flags
& PA_SINK_DEFERRED_VOLUME
);
1286 pa_assert(u
->mixer_path
);
1287 pa_assert(u
->mixer_handle
);
1289 /* Shift up by the base volume */
1290 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1292 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
, deferred_volume
, !deferred_volume
) < 0)
1295 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1296 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1298 u
->hardware_volume
= r
;
1300 if (u
->mixer_path
->has_dB
) {
1301 pa_cvolume new_soft_volume
;
1302 pa_bool_t accurate_enough
;
1303 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1305 /* Match exactly what the user requested by software */
1306 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1308 /* If the adjustment to do in software is only minimal we
1309 * can skip it. That saves us CPU at the expense of a bit of
1312 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1313 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1315 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &s
->real_volume
));
1316 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &s
->real_volume
));
1317 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &u
->hardware_volume
));
1318 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &u
->hardware_volume
));
1319 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1320 pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &new_soft_volume
),
1321 pa_yes_no(accurate_enough
));
1322 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &new_soft_volume
));
1324 if (!accurate_enough
)
1325 s
->soft_volume
= new_soft_volume
;
1328 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1330 /* We can't match exactly what the user requested, hence let's
1331 * at least tell the user about it */
1337 static void sink_write_volume_cb(pa_sink
*s
) {
1338 struct userdata
*u
= s
->userdata
;
1339 pa_cvolume hw_vol
= s
->thread_info
.current_hw_volume
;
1342 pa_assert(u
->mixer_path
);
1343 pa_assert(u
->mixer_handle
);
1344 pa_assert(s
->flags
& PA_SINK_DEFERRED_VOLUME
);
1346 /* Shift up by the base volume */
1347 pa_sw_cvolume_divide_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1349 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &hw_vol
, TRUE
, TRUE
) < 0)
1350 pa_log_error("Writing HW volume failed");
1353 pa_bool_t accurate_enough
;
1355 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1356 pa_sw_cvolume_multiply_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1358 pa_sw_cvolume_divide(&tmp_vol
, &hw_vol
, &s
->thread_info
.current_hw_volume
);
1360 (pa_cvolume_min(&tmp_vol
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1361 (pa_cvolume_max(&tmp_vol
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1363 if (!accurate_enough
) {
1365 char db
[2][PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1366 char pcnt
[2][PA_CVOLUME_SNPRINT_MAX
];
1369 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1370 pa_cvolume_snprint(vol
.pcnt
[0], sizeof(vol
.pcnt
[0]), &s
->thread_info
.current_hw_volume
),
1371 pa_cvolume_snprint(vol
.pcnt
[1], sizeof(vol
.pcnt
[1]), &hw_vol
));
1372 pa_log_debug(" in dB: %s (request) != %s",
1373 pa_sw_cvolume_snprint_dB(vol
.db
[0], sizeof(vol
.db
[0]), &s
->thread_info
.current_hw_volume
),
1374 pa_sw_cvolume_snprint_dB(vol
.db
[1], sizeof(vol
.db
[1]), &hw_vol
));
1379 static void sink_get_mute_cb(pa_sink
*s
) {
1380 struct userdata
*u
= s
->userdata
;
1384 pa_assert(u
->mixer_path
);
1385 pa_assert(u
->mixer_handle
);
1387 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1393 static void sink_set_mute_cb(pa_sink
*s
) {
1394 struct userdata
*u
= s
->userdata
;
1397 pa_assert(u
->mixer_path
);
1398 pa_assert(u
->mixer_handle
);
1400 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1403 static void mixer_volume_init(struct userdata
*u
) {
1406 if (!u
->mixer_path
->has_volume
) {
1407 pa_sink_set_write_volume_callback(u
->sink
, NULL
);
1408 pa_sink_set_get_volume_callback(u
->sink
, NULL
);
1409 pa_sink_set_set_volume_callback(u
->sink
, NULL
);
1411 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1413 pa_sink_set_get_volume_callback(u
->sink
, sink_get_volume_cb
);
1414 pa_sink_set_set_volume_callback(u
->sink
, sink_set_volume_cb
);
1416 if (u
->mixer_path
->has_dB
&& u
->deferred_volume
) {
1417 pa_sink_set_write_volume_callback(u
->sink
, sink_write_volume_cb
);
1418 pa_log_info("Successfully enabled deferred volume.");
1420 pa_sink_set_write_volume_callback(u
->sink
, NULL
);
1422 if (u
->mixer_path
->has_dB
) {
1423 pa_sink_enable_decibel_volume(u
->sink
, TRUE
);
1424 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1426 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1427 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1429 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1431 pa_sink_enable_decibel_volume(u
->sink
, FALSE
);
1432 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1434 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1435 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1438 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1441 if (!u
->mixer_path
->has_mute
) {
1442 pa_sink_set_get_mute_callback(u
->sink
, NULL
);
1443 pa_sink_set_set_mute_callback(u
->sink
, NULL
);
1444 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1446 pa_sink_set_get_mute_callback(u
->sink
, sink_get_mute_cb
);
1447 pa_sink_set_set_mute_callback(u
->sink
, sink_set_mute_cb
);
1448 pa_log_info("Using hardware mute control.");
1452 static int sink_set_port_ucm_cb(pa_sink
*s
, pa_device_port
*p
) {
1453 struct userdata
*u
= s
->userdata
;
1457 pa_assert(u
->ucm_context
);
1459 return pa_alsa_ucm_set_port(u
->ucm_context
, p
, TRUE
);
1462 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1463 struct userdata
*u
= s
->userdata
;
1464 pa_alsa_port_data
*data
;
1468 pa_assert(u
->mixer_handle
);
1470 data
= PA_DEVICE_PORT_DATA(p
);
1472 pa_assert_se(u
->mixer_path
= data
->path
);
1473 pa_alsa_path_select(u
->mixer_path
, data
->setting
, u
->mixer_handle
, s
->muted
);
1475 mixer_volume_init(u
);
1479 if (s
->flags
& PA_SINK_DEFERRED_VOLUME
) {
1480 if (s
->write_volume
)
1490 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1491 struct userdata
*u
= s
->userdata
;
1494 pa_assert(u
->use_tsched
); /* only when timer scheduling is used
1495 * we can dynamically adjust the
1501 before
= u
->hwbuf_unused
;
1502 update_sw_params(u
);
1504 /* Let's check whether we now use only a smaller part of the
1505 buffer then before. If so, we need to make sure that subsequent
1506 rewinds are relative to the new maximum fill level and not to the
1507 current fill level. Thus, let's do a full rewind once, to clear
1510 if (u
->hwbuf_unused
> before
) {
1511 pa_log_debug("Requesting rewind due to latency change.");
1512 pa_sink_request_rewind(s
, (size_t) -1);
1516 static pa_idxset
* sink_get_formats(pa_sink
*s
) {
1517 struct userdata
*u
= s
->userdata
;
1518 pa_idxset
*ret
= pa_idxset_new(NULL
, NULL
);
1524 PA_IDXSET_FOREACH(f
, u
->formats
, idx
) {
1525 pa_idxset_put(ret
, pa_format_info_copy(f
), NULL
);
1531 static pa_bool_t
sink_set_formats(pa_sink
*s
, pa_idxset
*formats
) {
1532 struct userdata
*u
= s
->userdata
;
1533 pa_format_info
*f
, *g
;
1538 /* FIXME: also validate sample rates against what the device supports */
1539 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1540 if (is_iec958(u
) && f
->encoding
== PA_ENCODING_EAC3_IEC61937
)
1541 /* EAC3 cannot be sent over over S/PDIF */
1545 pa_idxset_free(u
->formats
, (pa_free2_cb_t
) pa_format_info_free2
, NULL
);
1546 u
->formats
= pa_idxset_new(NULL
, NULL
);
1548 /* Note: the logic below won't apply if we're using software encoding.
1549 * This is fine for now since we don't support that via the passthrough
1550 * framework, but this must be changed if we do. */
1552 /* Count how many sample rates we support */
1553 for (idx
= 0, n
= 0; u
->rates
[idx
]; idx
++)
1556 /* First insert non-PCM formats since we prefer those. */
1557 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1558 if (!pa_format_info_is_pcm(f
)) {
1559 g
= pa_format_info_copy(f
);
1560 pa_format_info_set_prop_int_array(g
, PA_PROP_FORMAT_RATE
, (int *) u
->rates
, n
);
1561 pa_idxset_put(u
->formats
, g
, NULL
);
1565 /* Now add any PCM formats */
1566 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1567 if (pa_format_info_is_pcm(f
)) {
1568 /* We don't set rates here since we'll just tack on a resampler for
1569 * unsupported rates */
1570 pa_idxset_put(u
->formats
, pa_format_info_copy(f
), NULL
);
1577 static pa_bool_t
sink_update_rate_cb(pa_sink
*s
, uint32_t rate
)
1579 struct userdata
*u
= s
->userdata
;
1581 pa_bool_t supported
= FALSE
;
1585 for (i
= 0; u
->rates
[i
]; i
++) {
1586 if (u
->rates
[i
] == rate
) {
1593 pa_log_info("Sink does not support sample rate of %d Hz", rate
);
1597 if (!PA_SINK_IS_OPENED(s
->state
)) {
1598 pa_log_info("Updating rate for device %s, new rate is %d",u
->device_name
, rate
);
1599 u
->sink
->sample_spec
.rate
= rate
;
1606 static int process_rewind(struct userdata
*u
) {
1607 snd_pcm_sframes_t unused
;
1608 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1611 if (!PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1612 pa_sink_process_rewind(u
->sink
, 0);
1616 /* Figure out how much we shall rewind and reset the counter */
1617 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1619 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1621 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1622 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1626 unused_nbytes
= (size_t) unused
* u
->frame_size
;
1628 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1629 unused_nbytes
+= u
->rewind_safeguard
;
1631 if (u
->hwbuf_size
> unused_nbytes
)
1632 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1636 if (rewind_nbytes
> limit_nbytes
)
1637 rewind_nbytes
= limit_nbytes
;
1639 if (rewind_nbytes
> 0) {
1640 snd_pcm_sframes_t in_frames
, out_frames
;
1642 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1644 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1645 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1646 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1647 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1648 if (try_recover(u
, "process_rewind", out_frames
) < 0)
1653 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1655 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1657 if (rewind_nbytes
<= 0)
1658 pa_log_info("Tried rewind, but was apparently not possible.");
1660 u
->write_count
-= rewind_nbytes
;
1661 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1662 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1664 u
->after_rewind
= TRUE
;
1668 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1670 pa_sink_process_rewind(u
->sink
, 0);
1674 static void thread_func(void *userdata
) {
1675 struct userdata
*u
= userdata
;
1676 unsigned short revents
= 0;
1680 pa_log_debug("Thread starting up");
1682 if (u
->core
->realtime_scheduling
)
1683 pa_make_realtime(u
->core
->realtime_priority
);
1685 pa_thread_mq_install(&u
->thread_mq
);
1689 pa_usec_t rtpoll_sleep
= 0, real_sleep
;
1692 pa_log_debug("Loop");
1695 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
)) {
1696 if (process_rewind(u
) < 0)
1700 /* Render some data and write it to the dsp */
1701 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1703 pa_usec_t sleep_usec
= 0;
1704 pa_bool_t on_timeout
= pa_rtpoll_timer_elapsed(u
->rtpoll
);
1707 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1709 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1714 /* pa_log_debug("work_done = %i", work_done); */
1719 pa_log_info("Starting playback.");
1720 snd_pcm_start(u
->pcm_handle
);
1722 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1730 if (u
->use_tsched
) {
1733 if (u
->since_start
<= u
->hwbuf_size
) {
1735 /* USB devices on ALSA seem to hit a buffer
1736 * underrun during the first iterations much
1737 * quicker then we calculate here, probably due to
1738 * the transport latency. To accommodate for that
1739 * we artificially decrease the sleep time until
1740 * we have filled the buffer at least once
1743 if (pa_log_ratelimit(PA_LOG_DEBUG
))
1744 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1748 /* OK, the playback buffer is now full, let's
1749 * calculate when to wake up next */
1751 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec
/ PA_USEC_PER_MSEC
);
1754 /* Convert from the sound card time domain to the
1755 * system time domain */
1756 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1759 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec
/ PA_USEC_PER_MSEC
);
1762 /* We don't trust the conversion, so we wake up whatever comes first */
1763 rtpoll_sleep
= PA_MIN(sleep_usec
, cusec
);
1766 u
->after_rewind
= FALSE
;
1770 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
) {
1771 pa_usec_t volume_sleep
;
1772 pa_sink_volume_change_apply(u
->sink
, &volume_sleep
);
1773 if (volume_sleep
> 0) {
1774 if (rtpoll_sleep
> 0)
1775 rtpoll_sleep
= PA_MIN(volume_sleep
, rtpoll_sleep
);
1777 rtpoll_sleep
= volume_sleep
;
1781 if (rtpoll_sleep
> 0) {
1782 pa_rtpoll_set_timer_relative(u
->rtpoll
, rtpoll_sleep
);
1783 real_sleep
= pa_rtclock_now();
1786 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1788 /* Hmm, nothing to do. Let's sleep */
1789 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1792 if (rtpoll_sleep
> 0) {
1793 real_sleep
= pa_rtclock_now() - real_sleep
;
1795 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1796 (double) rtpoll_sleep
/ PA_USEC_PER_MSEC
, (double) real_sleep
/ PA_USEC_PER_MSEC
,
1797 (double) ((int64_t) real_sleep
- (int64_t) rtpoll_sleep
) / PA_USEC_PER_MSEC
);
1799 if (u
->use_tsched
&& real_sleep
> rtpoll_sleep
+ u
->tsched_watermark
)
1800 pa_log_info("Scheduling delay of %0.2fms, you might want to investigate this to improve latency...",
1801 (double) (real_sleep
- rtpoll_sleep
) / PA_USEC_PER_MSEC
);
1804 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
)
1805 pa_sink_volume_change_apply(u
->sink
, NULL
);
1810 /* Tell ALSA about this and process its response */
1811 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1812 struct pollfd
*pollfd
;
1816 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1818 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1819 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1823 if (revents
& ~POLLOUT
) {
1824 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1830 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit(PA_LOG_DEBUG
))
1831 pa_log_debug("Wakeup from ALSA!");
1838 /* If this was no regular exit from the loop we have to continue
1839 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1840 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1841 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1844 pa_log_debug("Thread shutting down");
1847 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1853 pa_assert(device_name
);
1855 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1856 pa_sink_new_data_set_name(data
, n
);
1857 data
->namereg_fail
= TRUE
;
1861 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1862 data
->namereg_fail
= TRUE
;
1864 n
= device_id
? device_id
: device_name
;
1865 data
->namereg_fail
= FALSE
;
1869 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1871 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1873 pa_sink_new_data_set_name(data
, t
);
1877 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1880 if (!mapping
&& !element
)
1883 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
, &hctl
))) {
1884 pa_log_info("Failed to find a working mixer device.");
1890 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1893 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, hctl
, ignore_dB
) < 0)
1896 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1897 pa_alsa_path_dump(u
->mixer_path
);
1898 } else if (!(u
->mixer_path_set
= mapping
->output_path_set
))
1905 if (u
->mixer_path
) {
1906 pa_alsa_path_free(u
->mixer_path
);
1907 u
->mixer_path
= NULL
;
1910 if (u
->mixer_handle
) {
1911 snd_mixer_close(u
->mixer_handle
);
1912 u
->mixer_handle
= NULL
;
1916 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1917 pa_bool_t need_mixer_callback
= FALSE
;
1921 if (!u
->mixer_handle
)
1924 if (u
->sink
->active_port
) {
1925 pa_alsa_port_data
*data
;
1927 /* We have a list of supported paths, so let's activate the
1928 * one that has been chosen as active */
1930 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1931 u
->mixer_path
= data
->path
;
1933 pa_alsa_path_select(data
->path
, data
->setting
, u
->mixer_handle
, u
->sink
->muted
);
1937 if (!u
->mixer_path
&& u
->mixer_path_set
)
1938 u
->mixer_path
= pa_hashmap_first(u
->mixer_path_set
->paths
);
1940 if (u
->mixer_path
) {
1941 /* Hmm, we have only a single path, then let's activate it */
1943 pa_alsa_path_select(u
->mixer_path
, u
->mixer_path
->settings
, u
->mixer_handle
, u
->sink
->muted
);
1949 mixer_volume_init(u
);
1951 /* Will we need to register callbacks? */
1952 if (u
->mixer_path_set
&& u
->mixer_path_set
->paths
) {
1956 PA_HASHMAP_FOREACH(p
, u
->mixer_path_set
->paths
, state
) {
1957 if (p
->has_volume
|| p
->has_mute
)
1958 need_mixer_callback
= TRUE
;
1961 else if (u
->mixer_path
)
1962 need_mixer_callback
= u
->mixer_path
->has_volume
|| u
->mixer_path
->has_mute
;
1964 if (need_mixer_callback
) {
1965 int (*mixer_callback
)(snd_mixer_elem_t
*, unsigned int);
1966 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
) {
1967 u
->mixer_pd
= pa_alsa_mixer_pdata_new();
1968 mixer_callback
= io_mixer_callback
;
1970 if (pa_alsa_set_mixer_rtpoll(u
->mixer_pd
, u
->mixer_handle
, u
->rtpoll
) < 0) {
1971 pa_log("Failed to initialize file descriptor monitoring");
1975 u
->mixer_fdl
= pa_alsa_fdlist_new();
1976 mixer_callback
= ctl_mixer_callback
;
1978 if (pa_alsa_fdlist_set_handle(u
->mixer_fdl
, u
->mixer_handle
, NULL
, u
->core
->mainloop
) < 0) {
1979 pa_log("Failed to initialize file descriptor monitoring");
1984 if (u
->mixer_path_set
)
1985 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1987 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1993 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1995 struct userdata
*u
= NULL
;
1996 const char *dev_id
= NULL
, *key
, *mod_name
;
1998 uint32_t alternate_sample_rate
;
2000 uint32_t nfrags
, frag_size
, buffer_size
, tsched_size
, tsched_watermark
, rewind_safeguard
;
2001 snd_pcm_uframes_t period_frames
, buffer_frames
, tsched_frames
;
2003 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
, namereg_fail
= FALSE
, deferred_volume
= FALSE
, set_formats
= FALSE
, fixed_latency_range
= FALSE
;
2004 pa_sink_new_data data
;
2005 pa_alsa_profile_set
*profile_set
= NULL
;
2011 ss
= m
->core
->default_sample_spec
;
2012 map
= m
->core
->default_channel_map
;
2013 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
2014 pa_log("Failed to parse sample specification and channel map");
2018 alternate_sample_rate
= m
->core
->alternate_sample_rate
;
2019 if (pa_modargs_get_alternate_sample_rate(ma
, &alternate_sample_rate
) < 0) {
2020 pa_log("Failed to parse alternate sample rate");
2024 frame_size
= pa_frame_size(&ss
);
2026 nfrags
= m
->core
->default_n_fragments
;
2027 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
2029 frag_size
= (uint32_t) frame_size
;
2030 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
2031 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
2033 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
2034 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
2035 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
2036 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
2037 pa_log("Failed to parse buffer metrics");
2041 buffer_size
= nfrags
* frag_size
;
2043 period_frames
= frag_size
/frame_size
;
2044 buffer_frames
= buffer_size
/frame_size
;
2045 tsched_frames
= tsched_size
/frame_size
;
2047 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
2048 pa_log("Failed to parse mmap argument.");
2052 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
2053 pa_log("Failed to parse tsched argument.");
2057 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
2058 pa_log("Failed to parse ignore_dB argument.");
2062 rewind_safeguard
= PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES
, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC
, &ss
));
2063 if (pa_modargs_get_value_u32(ma
, "rewind_safeguard", &rewind_safeguard
) < 0) {
2064 pa_log("Failed to parse rewind_safeguard argument");
2068 deferred_volume
= m
->core
->deferred_volume
;
2069 if (pa_modargs_get_value_boolean(ma
, "deferred_volume", &deferred_volume
) < 0) {
2070 pa_log("Failed to parse deferred_volume argument.");
2074 if (pa_modargs_get_value_boolean(ma
, "fixed_latency_range", &fixed_latency_range
) < 0) {
2075 pa_log("Failed to parse fixed_latency_range argument.");
2079 use_tsched
= pa_alsa_may_tsched(use_tsched
);
2081 u
= pa_xnew0(struct userdata
, 1);
2084 u
->use_mmap
= use_mmap
;
2085 u
->use_tsched
= use_tsched
;
2086 u
->deferred_volume
= deferred_volume
;
2087 u
->fixed_latency_range
= fixed_latency_range
;
2089 u
->rewind_safeguard
= rewind_safeguard
;
2090 u
->rtpoll
= pa_rtpoll_new();
2091 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
2093 u
->smoother
= pa_smoother_new(
2094 SMOOTHER_ADJUST_USEC
,
2095 SMOOTHER_WINDOW_USEC
,
2101 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
2104 if (mapping
&& mapping
->ucm_context
.ucm
)
2105 u
->ucm_context
= &mapping
->ucm_context
;
2107 dev_id
= pa_modargs_get_value(
2109 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
2111 u
->paths_dir
= pa_xstrdup(pa_modargs_get_value(ma
, "paths_dir", NULL
));
2113 if (reserve_init(u
, dev_id
) < 0)
2116 if (reserve_monitor_init(u
, dev_id
) < 0)
2124 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
2125 pa_log("device_id= not set");
2129 if ((mod_name
= pa_proplist_gets(mapping
->proplist
, PA_ALSA_PROP_UCM_MODIFIER
))) {
2130 if (snd_use_case_set(u
->ucm_context
->ucm
->ucm_mgr
, "_enamod", mod_name
) < 0)
2131 pa_log("Failed to enable ucm modifier %s", mod_name
);
2133 pa_log_debug("Enabled ucm modifier %s", mod_name
);
2136 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
2140 SND_PCM_STREAM_PLAYBACK
,
2141 &period_frames
, &buffer_frames
, tsched_frames
,
2145 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
2147 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
2150 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
2154 SND_PCM_STREAM_PLAYBACK
,
2155 &period_frames
, &buffer_frames
, tsched_frames
,
2156 &b
, &d
, profile_set
, &mapping
)))
2161 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
2162 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
2165 SND_PCM_STREAM_PLAYBACK
,
2166 &period_frames
, &buffer_frames
, tsched_frames
,
2171 pa_assert(u
->device_name
);
2172 pa_log_info("Successfully opened device %s.", u
->device_name
);
2174 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
2175 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
2180 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
2182 if (use_mmap
&& !b
) {
2183 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2184 u
->use_mmap
= use_mmap
= FALSE
;
2187 if (use_tsched
&& (!b
|| !d
)) {
2188 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2189 u
->use_tsched
= use_tsched
= FALSE
;
2193 pa_log_info("Successfully enabled mmap() mode.");
2195 if (u
->use_tsched
) {
2196 pa_log_info("Successfully enabled timer-based scheduling mode.");
2198 if (u
->fixed_latency_range
)
2199 pa_log_info("Disabling latency range changes on underrun");
2202 if (is_iec958(u
) || is_hdmi(u
))
2205 u
->rates
= pa_alsa_get_supported_rates(u
->pcm_handle
);
2207 pa_log_error("Failed to find any supported sample rates.");
2211 /* ALSA might tweak the sample spec, so recalculate the frame size */
2212 frame_size
= pa_frame_size(&ss
);
2214 if (!u
->ucm_context
)
2215 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
2217 pa_sink_new_data_init(&data
);
2218 data
.driver
= driver
;
2221 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
2223 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2224 * variable instead of using &data.namereg_fail directly, because
2225 * data.namereg_fail is a bitfield and taking the address of a bitfield
2226 * variable is impossible. */
2227 namereg_fail
= data
.namereg_fail
;
2228 if (pa_modargs_get_value_boolean(ma
, "namereg_fail", &namereg_fail
) < 0) {
2229 pa_log("Failed to parse namereg_fail argument.");
2230 pa_sink_new_data_done(&data
);
2233 data
.namereg_fail
= namereg_fail
;
2235 pa_sink_new_data_set_sample_spec(&data
, &ss
);
2236 pa_sink_new_data_set_channel_map(&data
, &map
);
2237 pa_sink_new_data_set_alternate_sample_rate(&data
, alternate_sample_rate
);
2239 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
2240 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
2241 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (buffer_frames
* frame_size
));
2242 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
2243 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
2246 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
2247 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
2249 while ((key
= pa_proplist_iterate(mapping
->proplist
, &state
)))
2250 pa_proplist_sets(data
.proplist
, key
, pa_proplist_gets(mapping
->proplist
, key
));
2253 pa_alsa_init_description(data
.proplist
);
2255 if (u
->control_device
)
2256 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
2258 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
2259 pa_log("Invalid properties");
2260 pa_sink_new_data_done(&data
);
2265 pa_alsa_ucm_add_ports(&data
.ports
, data
.proplist
, u
->ucm_context
, TRUE
, card
);
2266 else if (u
->mixer_path_set
)
2267 pa_alsa_add_ports(&data
, u
->mixer_path_set
, card
);
2269 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
| PA_SINK_LATENCY
| (u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0) |
2270 (set_formats
? PA_SINK_SET_FORMATS
: 0));
2271 pa_sink_new_data_done(&data
);
2274 pa_log("Failed to create sink object");
2278 if (pa_modargs_get_value_u32(ma
, "deferred_volume_safety_margin",
2279 &u
->sink
->thread_info
.volume_change_safety_margin
) < 0) {
2280 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2284 if (pa_modargs_get_value_s32(ma
, "deferred_volume_extra_delay",
2285 &u
->sink
->thread_info
.volume_change_extra_delay
) < 0) {
2286 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2290 u
->sink
->parent
.process_msg
= sink_process_msg
;
2292 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
2293 u
->sink
->set_state
= sink_set_state_cb
;
2295 u
->sink
->set_port
= sink_set_port_ucm_cb
;
2297 u
->sink
->set_port
= sink_set_port_cb
;
2298 if (u
->sink
->alternate_sample_rate
)
2299 u
->sink
->update_rate
= sink_update_rate_cb
;
2300 u
->sink
->userdata
= u
;
2302 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
2303 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
2305 u
->frame_size
= frame_size
;
2306 u
->fragment_size
= frag_size
= (size_t) (period_frames
* frame_size
);
2307 u
->hwbuf_size
= buffer_size
= (size_t) (buffer_frames
* frame_size
);
2308 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
2310 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2311 (double) u
->hwbuf_size
/ (double) u
->fragment_size
,
2312 (long unsigned) u
->fragment_size
,
2313 (double) pa_bytes_to_usec(u
->fragment_size
, &ss
) / PA_USEC_PER_MSEC
,
2314 (long unsigned) u
->hwbuf_size
,
2315 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
2317 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
2318 if (pa_alsa_pcm_is_hw(u
->pcm_handle
))
2319 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
2321 pa_log_info("Disabling rewind for device %s", u
->device_name
);
2322 pa_sink_set_max_rewind(u
->sink
, 0);
2325 if (u
->use_tsched
) {
2326 u
->tsched_watermark_ref
= tsched_watermark
;
2327 reset_watermark(u
, u
->tsched_watermark_ref
, &ss
, FALSE
);
2329 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
2333 if (update_sw_params(u
) < 0)
2336 if (u
->ucm_context
) {
2337 if (u
->sink
->active_port
&& pa_alsa_ucm_set_port(u
->ucm_context
, u
->sink
->active_port
, TRUE
) < 0)
2339 } else if (setup_mixer(u
, ignore_dB
) < 0)
2342 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
2344 if (!(u
->thread
= pa_thread_new("alsa-sink", thread_func
, u
))) {
2345 pa_log("Failed to create thread.");
2349 /* Get initial mixer settings */
2350 if (data
.volume_is_set
) {
2351 if (u
->sink
->set_volume
)
2352 u
->sink
->set_volume(u
->sink
);
2354 if (u
->sink
->get_volume
)
2355 u
->sink
->get_volume(u
->sink
);
2358 if (data
.muted_is_set
) {
2359 if (u
->sink
->set_mute
)
2360 u
->sink
->set_mute(u
->sink
);
2362 if (u
->sink
->get_mute
)
2363 u
->sink
->get_mute(u
->sink
);
2366 if ((data
.volume_is_set
|| data
.muted_is_set
) && u
->sink
->write_volume
)
2367 u
->sink
->write_volume(u
->sink
);
2370 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2371 pa_format_info
*format
;
2373 /* To start with, we only support PCM formats. Other formats may be added
2374 * with pa_sink_set_formats().*/
2375 format
= pa_format_info_new();
2376 format
->encoding
= PA_ENCODING_PCM
;
2377 u
->formats
= pa_idxset_new(NULL
, NULL
);
2378 pa_idxset_put(u
->formats
, format
, NULL
);
2380 u
->sink
->get_formats
= sink_get_formats
;
2381 u
->sink
->set_formats
= sink_set_formats
;
2384 pa_sink_put(u
->sink
);
2387 pa_alsa_profile_set_free(profile_set
);
2397 pa_alsa_profile_set_free(profile_set
);
2402 static void userdata_free(struct userdata
*u
) {
2406 pa_sink_unlink(u
->sink
);
2409 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
2410 pa_thread_free(u
->thread
);
2413 pa_thread_mq_done(&u
->thread_mq
);
2416 pa_sink_unref(u
->sink
);
2418 if (u
->memchunk
.memblock
)
2419 pa_memblock_unref(u
->memchunk
.memblock
);
2422 pa_alsa_mixer_pdata_free(u
->mixer_pd
);
2424 if (u
->alsa_rtpoll_item
)
2425 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
2428 pa_rtpoll_free(u
->rtpoll
);
2430 if (u
->pcm_handle
) {
2431 snd_pcm_drop(u
->pcm_handle
);
2432 snd_pcm_close(u
->pcm_handle
);
2436 pa_alsa_fdlist_free(u
->mixer_fdl
);
2438 if (u
->mixer_path
&& !u
->mixer_path_set
)
2439 pa_alsa_path_free(u
->mixer_path
);
2441 if (u
->mixer_handle
)
2442 snd_mixer_close(u
->mixer_handle
);
2445 pa_smoother_free(u
->smoother
);
2448 pa_idxset_free(u
->formats
, (pa_free2_cb_t
) pa_format_info_free2
, NULL
);
2456 pa_xfree(u
->device_name
);
2457 pa_xfree(u
->control_device
);
2458 pa_xfree(u
->paths_dir
);
2462 void pa_alsa_sink_free(pa_sink
*s
) {
2465 pa_sink_assert_ref(s
);
2466 pa_assert_se(u
= s
->userdata
);