2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
30 #include <asoundlib.h>
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
58 #include <modules/reserve-wrap.h>
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
63 /* #define DEBUG_TIMING */
65 #define DEFAULT_DEVICE "default"
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
99 pa_thread_mq thread_mq
;
102 snd_pcm_t
*pcm_handle
;
105 pa_alsa_fdlist
*mixer_fdl
;
106 pa_alsa_mixer_pdata
*mixer_pd
;
107 snd_mixer_t
*mixer_handle
;
108 pa_alsa_path_set
*mixer_path_set
;
109 pa_alsa_path
*mixer_path
;
111 pa_cvolume hardware_volume
;
120 tsched_watermark_ref
,
126 watermark_inc_threshold
,
127 watermark_dec_threshold
,
130 pa_usec_t watermark_dec_not_before
;
131 pa_usec_t min_latency_ref
;
133 pa_memchunk memchunk
;
135 char *device_name
; /* name of the PCM device */
136 char *control_device
; /* name of the control device */
138 pa_bool_t use_mmap
:1, use_tsched
:1, deferred_volume
:1, fixed_latency_range
:1;
140 pa_bool_t first
, after_rewind
;
142 pa_rtpoll_item
*alsa_rtpoll_item
;
144 pa_smoother
*smoother
;
145 uint64_t write_count
;
146 uint64_t since_start
;
147 pa_usec_t smoother_interval
;
148 pa_usec_t last_smoother_update
;
152 pa_reserve_wrapper
*reserve
;
153 pa_hook_slot
*reserve_slot
;
154 pa_reserve_monitor_wrapper
*monitor
;
155 pa_hook_slot
*monitor_slot
;
158 pa_alsa_ucm_mapping_context
*ucm_context
;
161 static void userdata_free(struct userdata
*u
);
163 /* FIXME: Is there a better way to do this than device names? */
164 static pa_bool_t
is_iec958(struct userdata
*u
) {
165 return (strncmp("iec958", u
->device_name
, 6) == 0);
168 static pa_bool_t
is_hdmi(struct userdata
*u
) {
169 return (strncmp("hdmi", u
->device_name
, 4) == 0);
172 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
176 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
177 return PA_HOOK_CANCEL
;
182 static void reserve_done(struct userdata
*u
) {
185 if (u
->reserve_slot
) {
186 pa_hook_slot_free(u
->reserve_slot
);
187 u
->reserve_slot
= NULL
;
191 pa_reserve_wrapper_unref(u
->reserve
);
196 static void reserve_update(struct userdata
*u
) {
197 const char *description
;
200 if (!u
->sink
|| !u
->reserve
)
203 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
204 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
207 static int reserve_init(struct userdata
*u
, const char *dname
) {
216 if (pa_in_system_mode())
219 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
222 /* We are resuming, try to lock the device */
223 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
231 pa_assert(!u
->reserve_slot
);
232 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
237 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
243 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
245 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
249 static void monitor_done(struct userdata
*u
) {
252 if (u
->monitor_slot
) {
253 pa_hook_slot_free(u
->monitor_slot
);
254 u
->monitor_slot
= NULL
;
258 pa_reserve_monitor_wrapper_unref(u
->monitor
);
263 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
269 if (pa_in_system_mode())
272 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
275 /* We are resuming, try to lock the device */
276 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
282 pa_assert(!u
->monitor_slot
);
283 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
288 static void fix_min_sleep_wakeup(struct userdata
*u
) {
289 size_t max_use
, max_use_2
;
292 pa_assert(u
->use_tsched
);
294 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
295 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
297 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
298 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
300 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
301 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
304 static void fix_tsched_watermark(struct userdata
*u
) {
307 pa_assert(u
->use_tsched
);
309 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
311 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
312 u
->tsched_watermark
= max_use
- u
->min_sleep
;
314 if (u
->tsched_watermark
< u
->min_wakeup
)
315 u
->tsched_watermark
= u
->min_wakeup
;
318 static void increase_watermark(struct userdata
*u
) {
319 size_t old_watermark
;
320 pa_usec_t old_min_latency
, new_min_latency
;
323 pa_assert(u
->use_tsched
);
325 /* First, just try to increase the watermark */
326 old_watermark
= u
->tsched_watermark
;
327 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_inc_step
);
328 fix_tsched_watermark(u
);
330 if (old_watermark
!= u
->tsched_watermark
) {
331 pa_log_info("Increasing wakeup watermark to %0.2f ms",
332 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
336 /* Hmm, we cannot increase the watermark any further, hence let's
337 raise the latency, unless doing so was disabled in
339 if (u
->fixed_latency_range
)
342 old_min_latency
= u
->sink
->thread_info
.min_latency
;
343 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_INC_STEP_USEC
);
344 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
346 if (old_min_latency
!= new_min_latency
) {
347 pa_log_info("Increasing minimal latency to %0.2f ms",
348 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
350 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
353 /* When we reach this we're officialy fucked! */
356 static void decrease_watermark(struct userdata
*u
) {
357 size_t old_watermark
;
361 pa_assert(u
->use_tsched
);
363 now
= pa_rtclock_now();
365 if (u
->watermark_dec_not_before
<= 0)
368 if (u
->watermark_dec_not_before
> now
)
371 old_watermark
= u
->tsched_watermark
;
373 if (u
->tsched_watermark
< u
->watermark_dec_step
)
374 u
->tsched_watermark
= u
->tsched_watermark
/ 2;
376 u
->tsched_watermark
= PA_MAX(u
->tsched_watermark
/ 2, u
->tsched_watermark
- u
->watermark_dec_step
);
378 fix_tsched_watermark(u
);
380 if (old_watermark
!= u
->tsched_watermark
)
381 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
382 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
384 /* We don't change the latency range*/
387 u
->watermark_dec_not_before
= now
+ TSCHED_WATERMARK_VERIFY_AFTER_USEC
;
390 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
393 pa_assert(sleep_usec
);
394 pa_assert(process_usec
);
397 pa_assert(u
->use_tsched
);
399 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
401 if (usec
== (pa_usec_t
) -1)
402 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
404 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
409 *sleep_usec
= usec
- wm
;
413 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
414 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
415 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
416 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
420 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
425 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
427 pa_assert(err
!= -EAGAIN
);
430 pa_log_debug("%s: Buffer underrun!", call
);
432 if (err
== -ESTRPIPE
)
433 pa_log_debug("%s: System suspended!", call
);
435 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
436 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
445 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
, pa_bool_t on_timeout
) {
447 pa_bool_t underrun
= FALSE
;
449 /* We use <= instead of < for this check here because an underrun
450 * only happens after the last sample was processed, not already when
451 * it is removed from the buffer. This is particularly important
452 * when block transfer is used. */
454 if (n_bytes
<= u
->hwbuf_size
)
455 left_to_play
= u
->hwbuf_size
- n_bytes
;
458 /* We got a dropout. What a mess! */
466 if (!u
->first
&& !u
->after_rewind
)
467 if (pa_log_ratelimit(PA_LOG_INFO
))
468 pa_log_info("Underrun!");
472 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
473 (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
474 (double) pa_bytes_to_usec(u
->watermark_inc_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
475 (double) pa_bytes_to_usec(u
->watermark_dec_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
479 pa_bool_t reset_not_before
= TRUE
;
481 if (!u
->first
&& !u
->after_rewind
) {
482 if (underrun
|| left_to_play
< u
->watermark_inc_threshold
)
483 increase_watermark(u
);
484 else if (left_to_play
> u
->watermark_dec_threshold
) {
485 reset_not_before
= FALSE
;
487 /* We decrease the watermark only if have actually
488 * been woken up by a timeout. If something else woke
489 * us up it's too easy to fulfill the deadlines... */
492 decrease_watermark(u
);
496 if (reset_not_before
)
497 u
->watermark_dec_not_before
= 0;
503 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
504 pa_bool_t work_done
= FALSE
;
505 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
510 pa_sink_assert_ref(u
->sink
);
513 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
519 pa_bool_t after_avail
= TRUE
;
521 /* First we determine how many samples are missing to fill the
522 * buffer up to 100% */
524 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
526 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
532 n_bytes
= (size_t) n
* u
->frame_size
;
535 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
538 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
543 /* We won't fill up the playback buffer before at least
544 * half the sleep time is over because otherwise we might
545 * ask for more data from the clients then they expect. We
546 * need to guarantee that clients only have to keep around
547 * a single hw buffer length. */
550 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
552 pa_log_debug("Not filling up, because too early.");
557 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
561 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
562 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
563 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
564 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
570 pa_log_debug("Not filling up, because not necessary.");
578 pa_log_debug("Not filling up, because already too many iterations.");
584 n_bytes
-= u
->hwbuf_unused
;
588 pa_log_debug("Filling up");
595 const snd_pcm_channel_area_t
*areas
;
596 snd_pcm_uframes_t offset
, frames
;
597 snd_pcm_sframes_t sframes
;
599 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
600 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
602 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
604 if (!after_avail
&& err
== -EAGAIN
)
607 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
613 /* Make sure that if these memblocks need to be copied they will fit into one slot */
614 if (frames
> pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
)
615 frames
= pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
;
617 if (!after_avail
&& frames
== 0)
620 pa_assert(frames
> 0);
623 /* Check these are multiples of 8 bit */
624 pa_assert((areas
[0].first
& 7) == 0);
625 pa_assert((areas
[0].step
& 7)== 0);
627 /* We assume a single interleaved memory buffer */
628 pa_assert((areas
[0].first
>> 3) == 0);
629 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
631 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
633 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
634 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
637 pa_sink_render_into_full(u
->sink
, &chunk
);
638 pa_memblock_unref_fixed(chunk
.memblock
);
640 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
642 if (!after_avail
&& (int) sframes
== -EAGAIN
)
645 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
653 u
->write_count
+= frames
* u
->frame_size
;
654 u
->since_start
+= frames
* u
->frame_size
;
657 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
660 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
663 n_bytes
-= (size_t) frames
* u
->frame_size
;
668 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
669 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
671 if (*sleep_usec
> process_usec
)
672 *sleep_usec
-= process_usec
;
678 return work_done
? 1 : 0;
681 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
682 pa_bool_t work_done
= FALSE
;
683 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
688 pa_sink_assert_ref(u
->sink
);
691 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
697 pa_bool_t after_avail
= TRUE
;
699 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
701 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
707 n_bytes
= (size_t) n
* u
->frame_size
;
708 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
713 /* We won't fill up the playback buffer before at least
714 * half the sleep time is over because otherwise we might
715 * ask for more data from the clients then they expect. We
716 * need to guarantee that clients only have to keep around
717 * a single hw buffer length. */
720 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
723 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
727 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
728 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
729 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
730 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
740 pa_log_debug("Not filling up, because already too many iterations.");
746 n_bytes
-= u
->hwbuf_unused
;
750 snd_pcm_sframes_t frames
;
753 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
755 if (u
->memchunk
.length
<= 0)
756 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
758 pa_assert(u
->memchunk
.length
> 0);
760 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
762 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
763 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
765 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
766 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
767 pa_memblock_release(u
->memchunk
.memblock
);
769 if (PA_UNLIKELY(frames
< 0)) {
771 if (!after_avail
&& (int) frames
== -EAGAIN
)
774 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
780 if (!after_avail
&& frames
== 0)
783 pa_assert(frames
> 0);
786 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
787 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
789 if (u
->memchunk
.length
<= 0) {
790 pa_memblock_unref(u
->memchunk
.memblock
);
791 pa_memchunk_reset(&u
->memchunk
);
796 u
->write_count
+= frames
* u
->frame_size
;
797 u
->since_start
+= frames
* u
->frame_size
;
799 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
801 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
804 n_bytes
-= (size_t) frames
* u
->frame_size
;
809 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
810 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
812 if (*sleep_usec
> process_usec
)
813 *sleep_usec
-= process_usec
;
819 return work_done
? 1 : 0;
822 static void update_smoother(struct userdata
*u
) {
823 snd_pcm_sframes_t delay
= 0;
826 pa_usec_t now1
= 0, now2
;
827 snd_pcm_status_t
*status
;
829 snd_pcm_status_alloca(&status
);
832 pa_assert(u
->pcm_handle
);
834 /* Let's update the time smoother */
836 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
, FALSE
)) < 0)) {
837 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
841 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
842 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
844 snd_htimestamp_t htstamp
= { 0, 0 };
845 snd_pcm_status_get_htstamp(status
, &htstamp
);
846 now1
= pa_timespec_load(&htstamp
);
849 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
851 now1
= pa_rtclock_now();
853 /* check if the time since the last update is bigger than the interval */
854 if (u
->last_smoother_update
> 0)
855 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
858 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
860 if (PA_UNLIKELY(position
< 0))
863 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
865 pa_smoother_put(u
->smoother
, now1
, now2
);
867 u
->last_smoother_update
= now1
;
868 /* exponentially increase the update interval up to the MAX limit */
869 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
872 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
875 pa_usec_t now1
, now2
;
879 now1
= pa_rtclock_now();
880 now2
= pa_smoother_get(u
->smoother
, now1
);
882 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
884 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
886 if (u
->memchunk
.memblock
)
887 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
892 static int build_pollfd(struct userdata
*u
) {
894 pa_assert(u
->pcm_handle
);
896 if (u
->alsa_rtpoll_item
)
897 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
899 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
905 /* Called from IO context */
906 static int suspend(struct userdata
*u
) {
907 const char *mod_name
;
910 pa_assert(u
->pcm_handle
);
912 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
914 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
915 * take awfully long with our long buffer sizes today. */
916 snd_pcm_close(u
->pcm_handle
);
917 u
->pcm_handle
= NULL
;
919 if ((mod_name
= pa_proplist_gets(u
->sink
->proplist
, PA_ALSA_PROP_UCM_MODIFIER
))) {
920 pa_log_info("Disable ucm modifier %s", mod_name
);
922 if (snd_use_case_set(u
->ucm_context
->ucm
->ucm_mgr
, "_dismod", mod_name
) < 0)
923 pa_log("Failed to disable ucm modifier %s", mod_name
);
926 if (u
->alsa_rtpoll_item
) {
927 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
928 u
->alsa_rtpoll_item
= NULL
;
931 /* We reset max_rewind/max_request here to make sure that while we
932 * are suspended the old max_request/max_rewind values set before
933 * the suspend can influence the per-stream buffer of newly
934 * created streams, without their requirements having any
935 * influence on them. */
936 pa_sink_set_max_rewind_within_thread(u
->sink
, 0);
937 pa_sink_set_max_request_within_thread(u
->sink
, 0);
939 pa_log_info("Device suspended...");
944 /* Called from IO context */
945 static int update_sw_params(struct userdata
*u
) {
946 snd_pcm_uframes_t avail_min
;
951 /* Use the full buffer if no one asked us for anything specific */
957 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
960 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
962 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
964 /* We need at least one sample in our buffer */
966 if (PA_UNLIKELY(b
< u
->frame_size
))
969 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
972 fix_min_sleep_wakeup(u
);
973 fix_tsched_watermark(u
);
976 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
978 /* We need at last one frame in the used part of the buffer */
979 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
982 pa_usec_t sleep_usec
, process_usec
;
984 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
985 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
988 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
990 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
, !u
->use_tsched
)) < 0) {
991 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
995 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
996 if (pa_alsa_pcm_is_hw(u
->pcm_handle
))
997 pa_sink_set_max_rewind_within_thread(u
->sink
, u
->hwbuf_size
);
999 pa_log_info("Disabling rewind_within_thread for device %s", u
->device_name
);
1000 pa_sink_set_max_rewind_within_thread(u
->sink
, 0);
1006 /* Called from IO Context on unsuspend or from main thread when creating sink */
1007 static void reset_watermark(struct userdata
*u
, size_t tsched_watermark
, pa_sample_spec
*ss
,
1008 pa_bool_t in_thread
)
1010 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, ss
),
1011 &u
->sink
->sample_spec
);
1013 u
->watermark_inc_step
= pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC
, &u
->sink
->sample_spec
);
1014 u
->watermark_dec_step
= pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC
, &u
->sink
->sample_spec
);
1016 u
->watermark_inc_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1017 u
->watermark_dec_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1019 fix_min_sleep_wakeup(u
);
1020 fix_tsched_watermark(u
);
1023 pa_sink_set_latency_range_within_thread(u
->sink
,
1025 pa_bytes_to_usec(u
->hwbuf_size
, ss
));
1027 pa_sink_set_latency_range(u
->sink
,
1029 pa_bytes_to_usec(u
->hwbuf_size
, ss
));
1031 /* work-around assert in pa_sink_set_latency_within_thead,
1032 keep track of min_latency and reuse it when
1033 this routine is called from IO context */
1034 u
->min_latency_ref
= u
->sink
->thread_info
.min_latency
;
1037 pa_log_info("Time scheduling watermark is %0.2fms",
1038 (double) pa_bytes_to_usec(u
->tsched_watermark
, ss
) / PA_USEC_PER_MSEC
);
1041 /* Called from IO context */
1042 static int unsuspend(struct userdata
*u
) {
1046 snd_pcm_uframes_t period_size
, buffer_size
;
1047 char *device_name
= NULL
;
1048 const char *mod_name
;
1051 pa_assert(!u
->pcm_handle
);
1053 pa_log_info("Trying resume...");
1055 if ((mod_name
= pa_proplist_gets(u
->sink
->proplist
, PA_ALSA_PROP_UCM_MODIFIER
))) {
1056 pa_log_info("Enable ucm modifier %s", mod_name
);
1058 if (snd_use_case_set(u
->ucm_context
->ucm
->ucm_mgr
, "_enamod", mod_name
) < 0)
1059 pa_log("Failed to enable ucm modifier %s", mod_name
);
1062 if ((is_iec958(u
) || is_hdmi(u
)) && pa_sink_is_passthrough(u
->sink
)) {
1063 /* Need to open device in NONAUDIO mode */
1064 int len
= strlen(u
->device_name
) + 8;
1066 device_name
= pa_xmalloc(len
);
1067 pa_snprintf(device_name
, len
, "%s,AES0=6", u
->device_name
);
1070 if ((err
= snd_pcm_open(&u
->pcm_handle
, device_name
? device_name
: u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
1072 SND_PCM_NO_AUTO_RESAMPLE
|
1073 SND_PCM_NO_AUTO_CHANNELS
|
1074 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
1075 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
1079 ss
= u
->sink
->sample_spec
;
1080 period_size
= u
->fragment_size
/ u
->frame_size
;
1081 buffer_size
= u
->hwbuf_size
/ u
->frame_size
;
1085 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &period_size
, &buffer_size
, 0, &b
, &d
, TRUE
)) < 0) {
1086 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
1090 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
1091 pa_log_warn("Resume failed, couldn't get original access mode.");
1095 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
1096 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1100 if (period_size
*u
->frame_size
!= u
->fragment_size
||
1101 buffer_size
*u
->frame_size
!= u
->hwbuf_size
) {
1102 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1103 (unsigned long) u
->hwbuf_size
, (unsigned long) u
->fragment_size
,
1104 (unsigned long) (buffer_size
*u
->frame_size
), (unsigned long) (period_size
*u
->frame_size
));
1108 if (update_sw_params(u
) < 0)
1111 if (build_pollfd(u
) < 0)
1115 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
1116 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1117 u
->last_smoother_update
= 0;
1122 /* reset the watermark to the value defined when sink was created */
1124 reset_watermark(u
, u
->tsched_watermark_ref
, &u
->sink
->sample_spec
, TRUE
);
1126 pa_log_info("Resumed successfully...");
1128 pa_xfree(device_name
);
1132 if (u
->pcm_handle
) {
1133 snd_pcm_close(u
->pcm_handle
);
1134 u
->pcm_handle
= NULL
;
1137 pa_xfree(device_name
);
1142 /* Called from IO context */
1143 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
1144 struct userdata
*u
= PA_SINK(o
)->userdata
;
1148 case PA_SINK_MESSAGE_GET_LATENCY
: {
1152 r
= sink_get_latency(u
);
1154 *((pa_usec_t
*) data
) = r
;
1159 case PA_SINK_MESSAGE_SET_STATE
:
1161 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
1163 case PA_SINK_SUSPENDED
: {
1166 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
1168 if ((r
= suspend(u
)) < 0)
1175 case PA_SINK_RUNNING
: {
1178 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
1179 if (build_pollfd(u
) < 0)
1183 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1184 if ((r
= unsuspend(u
)) < 0)
1191 case PA_SINK_UNLINKED
:
1193 case PA_SINK_INVALID_STATE
:
1200 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
1203 /* Called from main context */
1204 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
1205 pa_sink_state_t old_state
;
1208 pa_sink_assert_ref(s
);
1209 pa_assert_se(u
= s
->userdata
);
1211 old_state
= pa_sink_get_state(u
->sink
);
1213 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
1215 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
1216 if (reserve_init(u
, u
->device_name
) < 0)
1217 return -PA_ERR_BUSY
;
1222 static int ctl_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1223 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1226 pa_assert(u
->mixer_handle
);
1228 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1231 if (!PA_SINK_IS_LINKED(u
->sink
->state
))
1234 if (u
->sink
->suspend_cause
& PA_SUSPEND_SESSION
) {
1235 pa_sink_set_mixer_dirty(u
->sink
, TRUE
);
1239 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1240 pa_sink_get_volume(u
->sink
, TRUE
);
1241 pa_sink_get_mute(u
->sink
, TRUE
);
1247 static int io_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1248 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1251 pa_assert(u
->mixer_handle
);
1253 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1256 if (u
->sink
->suspend_cause
& PA_SUSPEND_SESSION
) {
1257 pa_sink_set_mixer_dirty(u
->sink
, TRUE
);
1261 if (mask
& SND_CTL_EVENT_MASK_VALUE
)
1262 pa_sink_update_volume_and_mute(u
->sink
);
1267 static void sink_get_volume_cb(pa_sink
*s
) {
1268 struct userdata
*u
= s
->userdata
;
1270 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1273 pa_assert(u
->mixer_path
);
1274 pa_assert(u
->mixer_handle
);
1276 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1279 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1280 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1282 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1284 if (u
->mixer_path
->has_dB
) {
1285 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1287 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &r
));
1290 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1293 s
->real_volume
= u
->hardware_volume
= r
;
1295 /* Hmm, so the hardware volume changed, let's reset our software volume */
1296 if (u
->mixer_path
->has_dB
)
1297 pa_sink_set_soft_volume(s
, NULL
);
1300 static void sink_set_volume_cb(pa_sink
*s
) {
1301 struct userdata
*u
= s
->userdata
;
1303 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1304 pa_bool_t deferred_volume
= !!(s
->flags
& PA_SINK_DEFERRED_VOLUME
);
1307 pa_assert(u
->mixer_path
);
1308 pa_assert(u
->mixer_handle
);
1310 /* Shift up by the base volume */
1311 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1313 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
, deferred_volume
, !deferred_volume
) < 0)
1316 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1317 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1319 u
->hardware_volume
= r
;
1321 if (u
->mixer_path
->has_dB
) {
1322 pa_cvolume new_soft_volume
;
1323 pa_bool_t accurate_enough
;
1324 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1326 /* Match exactly what the user requested by software */
1327 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1329 /* If the adjustment to do in software is only minimal we
1330 * can skip it. That saves us CPU at the expense of a bit of
1333 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1334 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1336 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &s
->real_volume
));
1337 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &s
->real_volume
));
1338 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &u
->hardware_volume
));
1339 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &u
->hardware_volume
));
1340 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1341 pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &new_soft_volume
),
1342 pa_yes_no(accurate_enough
));
1343 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &new_soft_volume
));
1345 if (!accurate_enough
)
1346 s
->soft_volume
= new_soft_volume
;
1349 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1351 /* We can't match exactly what the user requested, hence let's
1352 * at least tell the user about it */
1358 static void sink_write_volume_cb(pa_sink
*s
) {
1359 struct userdata
*u
= s
->userdata
;
1360 pa_cvolume hw_vol
= s
->thread_info
.current_hw_volume
;
1363 pa_assert(u
->mixer_path
);
1364 pa_assert(u
->mixer_handle
);
1365 pa_assert(s
->flags
& PA_SINK_DEFERRED_VOLUME
);
1367 /* Shift up by the base volume */
1368 pa_sw_cvolume_divide_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1370 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &hw_vol
, TRUE
, TRUE
) < 0)
1371 pa_log_error("Writing HW volume failed");
1374 pa_bool_t accurate_enough
;
1376 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1377 pa_sw_cvolume_multiply_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1379 pa_sw_cvolume_divide(&tmp_vol
, &hw_vol
, &s
->thread_info
.current_hw_volume
);
1381 (pa_cvolume_min(&tmp_vol
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1382 (pa_cvolume_max(&tmp_vol
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1384 if (!accurate_enough
) {
1386 char db
[2][PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1387 char pcnt
[2][PA_CVOLUME_SNPRINT_MAX
];
1390 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1391 pa_cvolume_snprint(vol
.pcnt
[0], sizeof(vol
.pcnt
[0]), &s
->thread_info
.current_hw_volume
),
1392 pa_cvolume_snprint(vol
.pcnt
[1], sizeof(vol
.pcnt
[1]), &hw_vol
));
1393 pa_log_debug(" in dB: %s (request) != %s",
1394 pa_sw_cvolume_snprint_dB(vol
.db
[0], sizeof(vol
.db
[0]), &s
->thread_info
.current_hw_volume
),
1395 pa_sw_cvolume_snprint_dB(vol
.db
[1], sizeof(vol
.db
[1]), &hw_vol
));
1400 static void sink_get_mute_cb(pa_sink
*s
) {
1401 struct userdata
*u
= s
->userdata
;
1405 pa_assert(u
->mixer_path
);
1406 pa_assert(u
->mixer_handle
);
1408 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1414 static void sink_set_mute_cb(pa_sink
*s
) {
1415 struct userdata
*u
= s
->userdata
;
1418 pa_assert(u
->mixer_path
);
1419 pa_assert(u
->mixer_handle
);
1421 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1424 static void mixer_volume_init(struct userdata
*u
) {
1427 if (!u
->mixer_path
->has_volume
) {
1428 pa_sink_set_write_volume_callback(u
->sink
, NULL
);
1429 pa_sink_set_get_volume_callback(u
->sink
, NULL
);
1430 pa_sink_set_set_volume_callback(u
->sink
, NULL
);
1432 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1434 pa_sink_set_get_volume_callback(u
->sink
, sink_get_volume_cb
);
1435 pa_sink_set_set_volume_callback(u
->sink
, sink_set_volume_cb
);
1437 if (u
->mixer_path
->has_dB
&& u
->deferred_volume
) {
1438 pa_sink_set_write_volume_callback(u
->sink
, sink_write_volume_cb
);
1439 pa_log_info("Successfully enabled deferred volume.");
1441 pa_sink_set_write_volume_callback(u
->sink
, NULL
);
1443 if (u
->mixer_path
->has_dB
) {
1444 pa_sink_enable_decibel_volume(u
->sink
, TRUE
);
1445 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1447 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1448 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1450 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1452 pa_sink_enable_decibel_volume(u
->sink
, FALSE
);
1453 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1455 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1456 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1459 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1462 if (!u
->mixer_path
->has_mute
) {
1463 pa_sink_set_get_mute_callback(u
->sink
, NULL
);
1464 pa_sink_set_set_mute_callback(u
->sink
, NULL
);
1465 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1467 pa_sink_set_get_mute_callback(u
->sink
, sink_get_mute_cb
);
1468 pa_sink_set_set_mute_callback(u
->sink
, sink_set_mute_cb
);
1469 pa_log_info("Using hardware mute control.");
1473 static int sink_set_port_ucm_cb(pa_sink
*s
, pa_device_port
*p
) {
1474 struct userdata
*u
= s
->userdata
;
1478 pa_assert(u
->ucm_context
);
1480 return pa_alsa_ucm_set_port(u
->ucm_context
, p
, TRUE
);
1483 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1484 struct userdata
*u
= s
->userdata
;
1485 pa_alsa_port_data
*data
;
1489 pa_assert(u
->mixer_handle
);
1491 data
= PA_DEVICE_PORT_DATA(p
);
1493 pa_assert_se(u
->mixer_path
= data
->path
);
1494 pa_alsa_path_select(u
->mixer_path
, data
->setting
, u
->mixer_handle
, s
->muted
);
1496 mixer_volume_init(u
);
1500 if (s
->flags
& PA_SINK_DEFERRED_VOLUME
) {
1501 if (s
->write_volume
)
1511 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1512 struct userdata
*u
= s
->userdata
;
1515 pa_assert(u
->use_tsched
); /* only when timer scheduling is used
1516 * we can dynamically adjust the
1522 before
= u
->hwbuf_unused
;
1523 update_sw_params(u
);
1525 /* Let's check whether we now use only a smaller part of the
1526 buffer then before. If so, we need to make sure that subsequent
1527 rewinds are relative to the new maximum fill level and not to the
1528 current fill level. Thus, let's do a full rewind once, to clear
1531 if (u
->hwbuf_unused
> before
) {
1532 pa_log_debug("Requesting rewind due to latency change.");
1533 pa_sink_request_rewind(s
, (size_t) -1);
1537 static pa_idxset
* sink_get_formats(pa_sink
*s
) {
1538 struct userdata
*u
= s
->userdata
;
1539 pa_idxset
*ret
= pa_idxset_new(NULL
, NULL
);
1545 PA_IDXSET_FOREACH(f
, u
->formats
, idx
) {
1546 pa_idxset_put(ret
, pa_format_info_copy(f
), NULL
);
1552 static pa_bool_t
sink_set_formats(pa_sink
*s
, pa_idxset
*formats
) {
1553 struct userdata
*u
= s
->userdata
;
1554 pa_format_info
*f
, *g
;
1559 /* FIXME: also validate sample rates against what the device supports */
1560 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1561 if (is_iec958(u
) && f
->encoding
== PA_ENCODING_EAC3_IEC61937
)
1562 /* EAC3 cannot be sent over over S/PDIF */
1566 pa_idxset_free(u
->formats
, (pa_free2_cb_t
) pa_format_info_free2
, NULL
);
1567 u
->formats
= pa_idxset_new(NULL
, NULL
);
1569 /* Note: the logic below won't apply if we're using software encoding.
1570 * This is fine for now since we don't support that via the passthrough
1571 * framework, but this must be changed if we do. */
1573 /* Count how many sample rates we support */
1574 for (idx
= 0, n
= 0; u
->rates
[idx
]; idx
++)
1577 /* First insert non-PCM formats since we prefer those. */
1578 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1579 if (!pa_format_info_is_pcm(f
)) {
1580 g
= pa_format_info_copy(f
);
1581 pa_format_info_set_prop_int_array(g
, PA_PROP_FORMAT_RATE
, (int *) u
->rates
, n
);
1582 pa_idxset_put(u
->formats
, g
, NULL
);
1586 /* Now add any PCM formats */
1587 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1588 if (pa_format_info_is_pcm(f
)) {
1589 /* We don't set rates here since we'll just tack on a resampler for
1590 * unsupported rates */
1591 pa_idxset_put(u
->formats
, pa_format_info_copy(f
), NULL
);
1598 static pa_bool_t
sink_update_rate_cb(pa_sink
*s
, uint32_t rate
)
1600 struct userdata
*u
= s
->userdata
;
1602 pa_bool_t supported
= FALSE
;
1606 for (i
= 0; u
->rates
[i
]; i
++) {
1607 if (u
->rates
[i
] == rate
) {
1614 pa_log_info("Sink does not support sample rate of %d Hz", rate
);
1618 if (!PA_SINK_IS_OPENED(s
->state
)) {
1619 pa_log_info("Updating rate for device %s, new rate is %d",u
->device_name
, rate
);
1620 u
->sink
->sample_spec
.rate
= rate
;
1627 static int process_rewind(struct userdata
*u
) {
1628 snd_pcm_sframes_t unused
;
1629 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1632 /* Figure out how much we shall rewind and reset the counter */
1633 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1635 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1637 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1638 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1642 unused_nbytes
= (size_t) unused
* u
->frame_size
;
1644 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1645 unused_nbytes
+= u
->rewind_safeguard
;
1647 if (u
->hwbuf_size
> unused_nbytes
)
1648 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1652 if (rewind_nbytes
> limit_nbytes
)
1653 rewind_nbytes
= limit_nbytes
;
1655 if (rewind_nbytes
> 0) {
1656 snd_pcm_sframes_t in_frames
, out_frames
;
1658 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1660 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1661 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1662 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1663 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1664 if (try_recover(u
, "process_rewind", out_frames
) < 0)
1669 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1671 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1673 if (rewind_nbytes
<= 0)
1674 pa_log_info("Tried rewind, but was apparently not possible.");
1676 u
->write_count
-= rewind_nbytes
;
1677 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1678 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1680 u
->after_rewind
= TRUE
;
1684 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1686 pa_sink_process_rewind(u
->sink
, 0);
1690 static void thread_func(void *userdata
) {
1691 struct userdata
*u
= userdata
;
1692 unsigned short revents
= 0;
1696 pa_log_debug("Thread starting up");
1698 if (u
->core
->realtime_scheduling
)
1699 pa_make_realtime(u
->core
->realtime_priority
);
1701 pa_thread_mq_install(&u
->thread_mq
);
1705 pa_usec_t rtpoll_sleep
= 0;
1708 pa_log_debug("Loop");
1711 /* Render some data and write it to the dsp */
1712 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1714 pa_usec_t sleep_usec
= 0;
1715 pa_bool_t on_timeout
= pa_rtpoll_timer_elapsed(u
->rtpoll
);
1717 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1718 if (process_rewind(u
) < 0)
1722 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1724 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1729 /* pa_log_debug("work_done = %i", work_done); */
1734 pa_log_info("Starting playback.");
1735 snd_pcm_start(u
->pcm_handle
);
1737 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1745 if (u
->use_tsched
) {
1748 if (u
->since_start
<= u
->hwbuf_size
) {
1750 /* USB devices on ALSA seem to hit a buffer
1751 * underrun during the first iterations much
1752 * quicker then we calculate here, probably due to
1753 * the transport latency. To accommodate for that
1754 * we artificially decrease the sleep time until
1755 * we have filled the buffer at least once
1758 if (pa_log_ratelimit(PA_LOG_DEBUG
))
1759 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1763 /* OK, the playback buffer is now full, let's
1764 * calculate when to wake up next */
1766 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec
/ PA_USEC_PER_MSEC
);
1769 /* Convert from the sound card time domain to the
1770 * system time domain */
1771 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1774 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec
/ PA_USEC_PER_MSEC
);
1777 /* We don't trust the conversion, so we wake up whatever comes first */
1778 rtpoll_sleep
= PA_MIN(sleep_usec
, cusec
);
1781 u
->after_rewind
= FALSE
;
1785 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
) {
1786 pa_usec_t volume_sleep
;
1787 pa_sink_volume_change_apply(u
->sink
, &volume_sleep
);
1788 if (volume_sleep
> 0) {
1789 if (rtpoll_sleep
> 0)
1790 rtpoll_sleep
= PA_MIN(volume_sleep
, rtpoll_sleep
);
1792 rtpoll_sleep
= volume_sleep
;
1796 if (rtpoll_sleep
> 0)
1797 pa_rtpoll_set_timer_relative(u
->rtpoll
, rtpoll_sleep
);
1799 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1801 /* Hmm, nothing to do. Let's sleep */
1802 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1805 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
)
1806 pa_sink_volume_change_apply(u
->sink
, NULL
);
1811 /* Tell ALSA about this and process its response */
1812 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1813 struct pollfd
*pollfd
;
1817 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1819 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1820 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1824 if (revents
& ~POLLOUT
) {
1825 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1831 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit(PA_LOG_DEBUG
))
1832 pa_log_debug("Wakeup from ALSA!");
1839 /* If this was no regular exit from the loop we have to continue
1840 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1841 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1842 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1845 pa_log_debug("Thread shutting down");
1848 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1854 pa_assert(device_name
);
1856 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1857 pa_sink_new_data_set_name(data
, n
);
1858 data
->namereg_fail
= TRUE
;
1862 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1863 data
->namereg_fail
= TRUE
;
1865 n
= device_id
? device_id
: device_name
;
1866 data
->namereg_fail
= FALSE
;
1870 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1872 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1874 pa_sink_new_data_set_name(data
, t
);
1878 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1881 if (!mapping
&& !element
)
1884 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
, &hctl
))) {
1885 pa_log_info("Failed to find a working mixer device.");
1891 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1894 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, hctl
, ignore_dB
) < 0)
1897 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1898 pa_alsa_path_dump(u
->mixer_path
);
1899 } else if (!(u
->mixer_path_set
= mapping
->output_path_set
))
1906 if (u
->mixer_path
) {
1907 pa_alsa_path_free(u
->mixer_path
);
1908 u
->mixer_path
= NULL
;
1911 if (u
->mixer_handle
) {
1912 snd_mixer_close(u
->mixer_handle
);
1913 u
->mixer_handle
= NULL
;
1917 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1918 pa_bool_t need_mixer_callback
= FALSE
;
1922 if (!u
->mixer_handle
)
1925 if (u
->sink
->active_port
) {
1926 pa_alsa_port_data
*data
;
1928 /* We have a list of supported paths, so let's activate the
1929 * one that has been chosen as active */
1931 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1932 u
->mixer_path
= data
->path
;
1934 pa_alsa_path_select(data
->path
, data
->setting
, u
->mixer_handle
, u
->sink
->muted
);
1938 if (!u
->mixer_path
&& u
->mixer_path_set
)
1939 u
->mixer_path
= pa_hashmap_first(u
->mixer_path_set
->paths
);
1941 if (u
->mixer_path
) {
1942 /* Hmm, we have only a single path, then let's activate it */
1944 pa_alsa_path_select(u
->mixer_path
, u
->mixer_path
->settings
, u
->mixer_handle
, u
->sink
->muted
);
1950 mixer_volume_init(u
);
1952 /* Will we need to register callbacks? */
1953 if (u
->mixer_path_set
&& u
->mixer_path_set
->paths
) {
1957 PA_HASHMAP_FOREACH(p
, u
->mixer_path_set
->paths
, state
) {
1958 if (p
->has_volume
|| p
->has_mute
)
1959 need_mixer_callback
= TRUE
;
1962 else if (u
->mixer_path
)
1963 need_mixer_callback
= u
->mixer_path
->has_volume
|| u
->mixer_path
->has_mute
;
1965 if (need_mixer_callback
) {
1966 int (*mixer_callback
)(snd_mixer_elem_t
*, unsigned int);
1967 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
) {
1968 u
->mixer_pd
= pa_alsa_mixer_pdata_new();
1969 mixer_callback
= io_mixer_callback
;
1971 if (pa_alsa_set_mixer_rtpoll(u
->mixer_pd
, u
->mixer_handle
, u
->rtpoll
) < 0) {
1972 pa_log("Failed to initialize file descriptor monitoring");
1976 u
->mixer_fdl
= pa_alsa_fdlist_new();
1977 mixer_callback
= ctl_mixer_callback
;
1979 if (pa_alsa_fdlist_set_handle(u
->mixer_fdl
, u
->mixer_handle
, NULL
, u
->core
->mainloop
) < 0) {
1980 pa_log("Failed to initialize file descriptor monitoring");
1985 if (u
->mixer_path_set
)
1986 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1988 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1994 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1996 struct userdata
*u
= NULL
;
1997 const char *dev_id
= NULL
, *key
, *mod_name
;
1999 uint32_t alternate_sample_rate
;
2001 uint32_t nfrags
, frag_size
, buffer_size
, tsched_size
, tsched_watermark
, rewind_safeguard
;
2002 snd_pcm_uframes_t period_frames
, buffer_frames
, tsched_frames
;
2004 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
, namereg_fail
= FALSE
, deferred_volume
= FALSE
, set_formats
= FALSE
, fixed_latency_range
= FALSE
;
2005 pa_sink_new_data data
;
2006 pa_alsa_profile_set
*profile_set
= NULL
;
2012 ss
= m
->core
->default_sample_spec
;
2013 map
= m
->core
->default_channel_map
;
2014 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
2015 pa_log("Failed to parse sample specification and channel map");
2019 alternate_sample_rate
= m
->core
->alternate_sample_rate
;
2020 if (pa_modargs_get_alternate_sample_rate(ma
, &alternate_sample_rate
) < 0) {
2021 pa_log("Failed to parse alternate sample rate");
2025 frame_size
= pa_frame_size(&ss
);
2027 nfrags
= m
->core
->default_n_fragments
;
2028 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
2030 frag_size
= (uint32_t) frame_size
;
2031 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
2032 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
2034 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
2035 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
2036 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
2037 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
2038 pa_log("Failed to parse buffer metrics");
2042 buffer_size
= nfrags
* frag_size
;
2044 period_frames
= frag_size
/frame_size
;
2045 buffer_frames
= buffer_size
/frame_size
;
2046 tsched_frames
= tsched_size
/frame_size
;
2048 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
2049 pa_log("Failed to parse mmap argument.");
2053 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
2054 pa_log("Failed to parse tsched argument.");
2058 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
2059 pa_log("Failed to parse ignore_dB argument.");
2063 rewind_safeguard
= PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES
, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC
, &ss
));
2064 if (pa_modargs_get_value_u32(ma
, "rewind_safeguard", &rewind_safeguard
) < 0) {
2065 pa_log("Failed to parse rewind_safeguard argument");
2069 deferred_volume
= m
->core
->deferred_volume
;
2070 if (pa_modargs_get_value_boolean(ma
, "deferred_volume", &deferred_volume
) < 0) {
2071 pa_log("Failed to parse deferred_volume argument.");
2075 if (pa_modargs_get_value_boolean(ma
, "fixed_latency_range", &fixed_latency_range
) < 0) {
2076 pa_log("Failed to parse fixed_latency_range argument.");
2080 use_tsched
= pa_alsa_may_tsched(use_tsched
);
2082 u
= pa_xnew0(struct userdata
, 1);
2085 u
->use_mmap
= use_mmap
;
2086 u
->use_tsched
= use_tsched
;
2087 u
->deferred_volume
= deferred_volume
;
2088 u
->fixed_latency_range
= fixed_latency_range
;
2090 u
->rewind_safeguard
= rewind_safeguard
;
2091 u
->rtpoll
= pa_rtpoll_new();
2092 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
2094 u
->smoother
= pa_smoother_new(
2095 SMOOTHER_ADJUST_USEC
,
2096 SMOOTHER_WINDOW_USEC
,
2102 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
2105 if (mapping
&& mapping
->ucm_context
.ucm
)
2106 u
->ucm_context
= &mapping
->ucm_context
;
2108 dev_id
= pa_modargs_get_value(
2110 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
2112 u
->paths_dir
= pa_xstrdup(pa_modargs_get_value(ma
, "paths_dir", NULL
));
2114 if (reserve_init(u
, dev_id
) < 0)
2117 if (reserve_monitor_init(u
, dev_id
) < 0)
2125 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
2126 pa_log("device_id= not set");
2130 if ((mod_name
= pa_proplist_gets(mapping
->proplist
, PA_ALSA_PROP_UCM_MODIFIER
))) {
2131 if (snd_use_case_set(u
->ucm_context
->ucm
->ucm_mgr
, "_enamod", mod_name
) < 0)
2132 pa_log("Failed to enable ucm modifier %s", mod_name
);
2134 pa_log_debug("Enabled ucm modifier %s", mod_name
);
2137 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
2141 SND_PCM_STREAM_PLAYBACK
,
2142 &period_frames
, &buffer_frames
, tsched_frames
,
2146 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
2148 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
2151 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
2155 SND_PCM_STREAM_PLAYBACK
,
2156 &period_frames
, &buffer_frames
, tsched_frames
,
2157 &b
, &d
, profile_set
, &mapping
)))
2162 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
2163 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
2166 SND_PCM_STREAM_PLAYBACK
,
2167 &period_frames
, &buffer_frames
, tsched_frames
,
2172 pa_assert(u
->device_name
);
2173 pa_log_info("Successfully opened device %s.", u
->device_name
);
2175 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
2176 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
2181 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
2183 if (use_mmap
&& !b
) {
2184 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2185 u
->use_mmap
= use_mmap
= FALSE
;
2188 if (use_tsched
&& (!b
|| !d
)) {
2189 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2190 u
->use_tsched
= use_tsched
= FALSE
;
2194 pa_log_info("Successfully enabled mmap() mode.");
2196 if (u
->use_tsched
) {
2197 pa_log_info("Successfully enabled timer-based scheduling mode.");
2199 if (u
->fixed_latency_range
)
2200 pa_log_info("Disabling latency range changes on underrun");
2203 if (is_iec958(u
) || is_hdmi(u
))
2206 u
->rates
= pa_alsa_get_supported_rates(u
->pcm_handle
);
2208 pa_log_error("Failed to find any supported sample rates.");
2212 /* ALSA might tweak the sample spec, so recalculate the frame size */
2213 frame_size
= pa_frame_size(&ss
);
2215 if (!u
->ucm_context
)
2216 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
2218 pa_sink_new_data_init(&data
);
2219 data
.driver
= driver
;
2222 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
2224 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2225 * variable instead of using &data.namereg_fail directly, because
2226 * data.namereg_fail is a bitfield and taking the address of a bitfield
2227 * variable is impossible. */
2228 namereg_fail
= data
.namereg_fail
;
2229 if (pa_modargs_get_value_boolean(ma
, "namereg_fail", &namereg_fail
) < 0) {
2230 pa_log("Failed to parse namereg_fail argument.");
2231 pa_sink_new_data_done(&data
);
2234 data
.namereg_fail
= namereg_fail
;
2236 pa_sink_new_data_set_sample_spec(&data
, &ss
);
2237 pa_sink_new_data_set_channel_map(&data
, &map
);
2238 pa_sink_new_data_set_alternate_sample_rate(&data
, alternate_sample_rate
);
2240 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
2241 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
2242 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (buffer_frames
* frame_size
));
2243 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
2244 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
2247 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
2248 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
2250 while ((key
= pa_proplist_iterate(mapping
->proplist
, &state
)))
2251 pa_proplist_sets(data
.proplist
, key
, pa_proplist_gets(mapping
->proplist
, key
));
2254 pa_alsa_init_description(data
.proplist
);
2256 if (u
->control_device
)
2257 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
2259 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
2260 pa_log("Invalid properties");
2261 pa_sink_new_data_done(&data
);
2266 pa_alsa_ucm_add_ports(&data
.ports
, data
.proplist
, u
->ucm_context
, TRUE
, card
);
2267 else if (u
->mixer_path_set
)
2268 pa_alsa_add_ports(&data
, u
->mixer_path_set
, card
);
2270 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
| PA_SINK_LATENCY
| (u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0) |
2271 (set_formats
? PA_SINK_SET_FORMATS
: 0));
2272 pa_sink_new_data_done(&data
);
2275 pa_log("Failed to create sink object");
2279 if (pa_modargs_get_value_u32(ma
, "deferred_volume_safety_margin",
2280 &u
->sink
->thread_info
.volume_change_safety_margin
) < 0) {
2281 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2285 if (pa_modargs_get_value_s32(ma
, "deferred_volume_extra_delay",
2286 &u
->sink
->thread_info
.volume_change_extra_delay
) < 0) {
2287 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2291 u
->sink
->parent
.process_msg
= sink_process_msg
;
2293 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
2294 u
->sink
->set_state
= sink_set_state_cb
;
2296 u
->sink
->set_port
= sink_set_port_ucm_cb
;
2298 u
->sink
->set_port
= sink_set_port_cb
;
2299 if (u
->sink
->alternate_sample_rate
)
2300 u
->sink
->update_rate
= sink_update_rate_cb
;
2301 u
->sink
->userdata
= u
;
2303 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
2304 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
2306 u
->frame_size
= frame_size
;
2307 u
->fragment_size
= frag_size
= (size_t) (period_frames
* frame_size
);
2308 u
->hwbuf_size
= buffer_size
= (size_t) (buffer_frames
* frame_size
);
2309 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
2311 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2312 (double) u
->hwbuf_size
/ (double) u
->fragment_size
,
2313 (long unsigned) u
->fragment_size
,
2314 (double) pa_bytes_to_usec(u
->fragment_size
, &ss
) / PA_USEC_PER_MSEC
,
2315 (long unsigned) u
->hwbuf_size
,
2316 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
2318 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
2319 if (pa_alsa_pcm_is_hw(u
->pcm_handle
))
2320 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
2322 pa_log_info("Disabling rewind for device %s", u
->device_name
);
2323 pa_sink_set_max_rewind(u
->sink
, 0);
2326 if (u
->use_tsched
) {
2327 u
->tsched_watermark_ref
= tsched_watermark
;
2328 reset_watermark(u
, u
->tsched_watermark_ref
, &ss
, FALSE
);
2330 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
2334 if (update_sw_params(u
) < 0)
2337 if (u
->ucm_context
) {
2338 if (u
->sink
->active_port
&& pa_alsa_ucm_set_port(u
->ucm_context
, u
->sink
->active_port
, TRUE
) < 0)
2340 } else if (setup_mixer(u
, ignore_dB
) < 0)
2343 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
2345 if (!(u
->thread
= pa_thread_new("alsa-sink", thread_func
, u
))) {
2346 pa_log("Failed to create thread.");
2350 /* Get initial mixer settings */
2351 if (data
.volume_is_set
) {
2352 if (u
->sink
->set_volume
)
2353 u
->sink
->set_volume(u
->sink
);
2355 if (u
->sink
->get_volume
)
2356 u
->sink
->get_volume(u
->sink
);
2359 if (data
.muted_is_set
) {
2360 if (u
->sink
->set_mute
)
2361 u
->sink
->set_mute(u
->sink
);
2363 if (u
->sink
->get_mute
)
2364 u
->sink
->get_mute(u
->sink
);
2367 if ((data
.volume_is_set
|| data
.muted_is_set
) && u
->sink
->write_volume
)
2368 u
->sink
->write_volume(u
->sink
);
2371 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2372 pa_format_info
*format
;
2374 /* To start with, we only support PCM formats. Other formats may be added
2375 * with pa_sink_set_formats().*/
2376 format
= pa_format_info_new();
2377 format
->encoding
= PA_ENCODING_PCM
;
2378 u
->formats
= pa_idxset_new(NULL
, NULL
);
2379 pa_idxset_put(u
->formats
, format
, NULL
);
2381 u
->sink
->get_formats
= sink_get_formats
;
2382 u
->sink
->set_formats
= sink_set_formats
;
2385 pa_sink_put(u
->sink
);
2388 pa_alsa_profile_set_free(profile_set
);
2398 pa_alsa_profile_set_free(profile_set
);
2403 static void userdata_free(struct userdata
*u
) {
2407 pa_sink_unlink(u
->sink
);
2410 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
2411 pa_thread_free(u
->thread
);
2414 pa_thread_mq_done(&u
->thread_mq
);
2417 pa_sink_unref(u
->sink
);
2419 if (u
->memchunk
.memblock
)
2420 pa_memblock_unref(u
->memchunk
.memblock
);
2423 pa_alsa_mixer_pdata_free(u
->mixer_pd
);
2425 if (u
->alsa_rtpoll_item
)
2426 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
2429 pa_rtpoll_free(u
->rtpoll
);
2431 if (u
->pcm_handle
) {
2432 snd_pcm_drop(u
->pcm_handle
);
2433 snd_pcm_close(u
->pcm_handle
);
2437 pa_alsa_fdlist_free(u
->mixer_fdl
);
2439 if (u
->mixer_path
&& !u
->mixer_path_set
)
2440 pa_alsa_path_free(u
->mixer_path
);
2442 if (u
->mixer_handle
)
2443 snd_mixer_close(u
->mixer_handle
);
2446 pa_smoother_free(u
->smoother
);
2449 pa_idxset_free(u
->formats
, (pa_free2_cb_t
) pa_format_info_free2
, NULL
);
2457 pa_xfree(u
->device_name
);
2458 pa_xfree(u
->control_device
);
2459 pa_xfree(u
->paths_dir
);
2463 void pa_alsa_sink_free(pa_sink
*s
) {
2466 pa_sink_assert_ref(s
);
2467 pa_assert_se(u
= s
->userdata
);