2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
57 #include <modules/reserve-wrap.h>
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
62 /* #define DEBUG_TIMING */
64 #define DEFAULT_DEVICE "default"
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
76 * will increase the watermark only if we hit a real underrun. */
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
98 pa_thread_mq thread_mq
;
101 snd_pcm_t
*pcm_handle
;
103 pa_alsa_fdlist
*mixer_fdl
;
104 pa_alsa_mixer_pdata
*mixer_pd
;
105 snd_mixer_t
*mixer_handle
;
106 pa_alsa_path_set
*mixer_path_set
;
107 pa_alsa_path
*mixer_path
;
109 pa_cvolume hardware_volume
;
118 tsched_watermark_ref
,
124 watermark_inc_threshold
,
125 watermark_dec_threshold
,
128 pa_usec_t watermark_dec_not_before
;
129 pa_usec_t min_latency_ref
;
131 pa_memchunk memchunk
;
133 char *device_name
; /* name of the PCM device */
134 char *control_device
; /* name of the control device */
136 pa_bool_t use_mmap
:1, use_tsched
:1, deferred_volume
:1;
138 pa_bool_t first
, after_rewind
;
140 pa_rtpoll_item
*alsa_rtpoll_item
;
142 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
144 pa_smoother
*smoother
;
145 uint64_t write_count
;
146 uint64_t since_start
;
147 pa_usec_t smoother_interval
;
148 pa_usec_t last_smoother_update
;
152 pa_reserve_wrapper
*reserve
;
153 pa_hook_slot
*reserve_slot
;
154 pa_reserve_monitor_wrapper
*monitor
;
155 pa_hook_slot
*monitor_slot
;
158 static void userdata_free(struct userdata
*u
);
160 /* FIXME: Is there a better way to do this than device names? */
161 static pa_bool_t
is_iec958(struct userdata
*u
) {
162 return (strncmp("iec958", u
->device_name
, 6) == 0);
165 static pa_bool_t
is_hdmi(struct userdata
*u
) {
166 return (strncmp("hdmi", u
->device_name
, 4) == 0);
169 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
173 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
174 return PA_HOOK_CANCEL
;
179 static void reserve_done(struct userdata
*u
) {
182 if (u
->reserve_slot
) {
183 pa_hook_slot_free(u
->reserve_slot
);
184 u
->reserve_slot
= NULL
;
188 pa_reserve_wrapper_unref(u
->reserve
);
193 static void reserve_update(struct userdata
*u
) {
194 const char *description
;
197 if (!u
->sink
|| !u
->reserve
)
200 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
201 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
204 static int reserve_init(struct userdata
*u
, const char *dname
) {
213 if (pa_in_system_mode())
216 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
219 /* We are resuming, try to lock the device */
220 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
228 pa_assert(!u
->reserve_slot
);
229 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
234 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
240 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
242 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
246 static void monitor_done(struct userdata
*u
) {
249 if (u
->monitor_slot
) {
250 pa_hook_slot_free(u
->monitor_slot
);
251 u
->monitor_slot
= NULL
;
255 pa_reserve_monitor_wrapper_unref(u
->monitor
);
260 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
266 if (pa_in_system_mode())
269 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
272 /* We are resuming, try to lock the device */
273 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
279 pa_assert(!u
->monitor_slot
);
280 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
285 static void fix_min_sleep_wakeup(struct userdata
*u
) {
286 size_t max_use
, max_use_2
;
289 pa_assert(u
->use_tsched
);
291 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
292 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
294 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
295 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
297 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
298 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
301 static void fix_tsched_watermark(struct userdata
*u
) {
304 pa_assert(u
->use_tsched
);
306 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
308 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
309 u
->tsched_watermark
= max_use
- u
->min_sleep
;
311 if (u
->tsched_watermark
< u
->min_wakeup
)
312 u
->tsched_watermark
= u
->min_wakeup
;
315 static void increase_watermark(struct userdata
*u
) {
316 size_t old_watermark
;
317 pa_usec_t old_min_latency
, new_min_latency
;
320 pa_assert(u
->use_tsched
);
322 /* First, just try to increase the watermark */
323 old_watermark
= u
->tsched_watermark
;
324 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_inc_step
);
325 fix_tsched_watermark(u
);
327 if (old_watermark
!= u
->tsched_watermark
) {
328 pa_log_info("Increasing wakeup watermark to %0.2f ms",
329 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
333 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
334 old_min_latency
= u
->sink
->thread_info
.min_latency
;
335 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_INC_STEP_USEC
);
336 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
338 if (old_min_latency
!= new_min_latency
) {
339 pa_log_info("Increasing minimal latency to %0.2f ms",
340 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
342 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
345 /* When we reach this we're officialy fucked! */
348 static void decrease_watermark(struct userdata
*u
) {
349 size_t old_watermark
;
353 pa_assert(u
->use_tsched
);
355 now
= pa_rtclock_now();
357 if (u
->watermark_dec_not_before
<= 0)
360 if (u
->watermark_dec_not_before
> now
)
363 old_watermark
= u
->tsched_watermark
;
365 if (u
->tsched_watermark
< u
->watermark_dec_step
)
366 u
->tsched_watermark
= u
->tsched_watermark
/ 2;
368 u
->tsched_watermark
= PA_MAX(u
->tsched_watermark
/ 2, u
->tsched_watermark
- u
->watermark_dec_step
);
370 fix_tsched_watermark(u
);
372 if (old_watermark
!= u
->tsched_watermark
)
373 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
374 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
376 /* We don't change the latency range*/
379 u
->watermark_dec_not_before
= now
+ TSCHED_WATERMARK_VERIFY_AFTER_USEC
;
382 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
385 pa_assert(sleep_usec
);
386 pa_assert(process_usec
);
389 pa_assert(u
->use_tsched
);
391 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
393 if (usec
== (pa_usec_t
) -1)
394 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
396 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
401 *sleep_usec
= usec
- wm
;
405 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
406 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
407 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
408 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
412 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
417 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
419 pa_assert(err
!= -EAGAIN
);
422 pa_log_debug("%s: Buffer underrun!", call
);
424 if (err
== -ESTRPIPE
)
425 pa_log_debug("%s: System suspended!", call
);
427 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
428 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
437 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
, pa_bool_t on_timeout
) {
439 pa_bool_t underrun
= FALSE
;
441 /* We use <= instead of < for this check here because an underrun
442 * only happens after the last sample was processed, not already when
443 * it is removed from the buffer. This is particularly important
444 * when block transfer is used. */
446 if (n_bytes
<= u
->hwbuf_size
)
447 left_to_play
= u
->hwbuf_size
- n_bytes
;
450 /* We got a dropout. What a mess! */
458 if (!u
->first
&& !u
->after_rewind
)
459 if (pa_log_ratelimit(PA_LOG_INFO
))
460 pa_log_info("Underrun!");
464 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
465 (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
466 (double) pa_bytes_to_usec(u
->watermark_inc_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
467 (double) pa_bytes_to_usec(u
->watermark_dec_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
471 pa_bool_t reset_not_before
= TRUE
;
473 if (!u
->first
&& !u
->after_rewind
) {
474 if (underrun
|| left_to_play
< u
->watermark_inc_threshold
)
475 increase_watermark(u
);
476 else if (left_to_play
> u
->watermark_dec_threshold
) {
477 reset_not_before
= FALSE
;
479 /* We decrease the watermark only if have actually
480 * been woken up by a timeout. If something else woke
481 * us up it's too easy to fulfill the deadlines... */
484 decrease_watermark(u
);
488 if (reset_not_before
)
489 u
->watermark_dec_not_before
= 0;
495 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
496 pa_bool_t work_done
= FALSE
;
497 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
502 pa_sink_assert_ref(u
->sink
);
505 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
511 pa_bool_t after_avail
= TRUE
;
513 /* First we determine how many samples are missing to fill the
514 * buffer up to 100% */
516 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
518 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
524 n_bytes
= (size_t) n
* u
->frame_size
;
527 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
530 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
535 /* We won't fill up the playback buffer before at least
536 * half the sleep time is over because otherwise we might
537 * ask for more data from the clients then they expect. We
538 * need to guarantee that clients only have to keep around
539 * a single hw buffer length. */
542 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
544 pa_log_debug("Not filling up, because too early.");
549 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
553 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
554 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
555 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
556 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
562 pa_log_debug("Not filling up, because not necessary.");
570 pa_log_debug("Not filling up, because already too many iterations.");
576 n_bytes
-= u
->hwbuf_unused
;
580 pa_log_debug("Filling up");
587 const snd_pcm_channel_area_t
*areas
;
588 snd_pcm_uframes_t offset
, frames
;
589 snd_pcm_sframes_t sframes
;
591 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
592 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
594 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
596 if (!after_avail
&& err
== -EAGAIN
)
599 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
605 /* Make sure that if these memblocks need to be copied they will fit into one slot */
606 if (frames
> pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
)
607 frames
= pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
;
609 if (!after_avail
&& frames
== 0)
612 pa_assert(frames
> 0);
615 /* Check these are multiples of 8 bit */
616 pa_assert((areas
[0].first
& 7) == 0);
617 pa_assert((areas
[0].step
& 7)== 0);
619 /* We assume a single interleaved memory buffer */
620 pa_assert((areas
[0].first
>> 3) == 0);
621 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
623 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
625 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
626 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
629 pa_sink_render_into_full(u
->sink
, &chunk
);
630 pa_memblock_unref_fixed(chunk
.memblock
);
632 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
634 if (!after_avail
&& (int) sframes
== -EAGAIN
)
637 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
645 u
->write_count
+= frames
* u
->frame_size
;
646 u
->since_start
+= frames
* u
->frame_size
;
649 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
652 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
655 n_bytes
-= (size_t) frames
* u
->frame_size
;
660 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
661 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
663 if (*sleep_usec
> process_usec
)
664 *sleep_usec
-= process_usec
;
670 return work_done
? 1 : 0;
673 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
674 pa_bool_t work_done
= FALSE
;
675 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
680 pa_sink_assert_ref(u
->sink
);
683 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
689 pa_bool_t after_avail
= TRUE
;
691 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
693 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
699 n_bytes
= (size_t) n
* u
->frame_size
;
700 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
705 /* We won't fill up the playback buffer before at least
706 * half the sleep time is over because otherwise we might
707 * ask for more data from the clients then they expect. We
708 * need to guarantee that clients only have to keep around
709 * a single hw buffer length. */
712 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
715 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
719 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
720 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
721 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
722 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
732 pa_log_debug("Not filling up, because already too many iterations.");
738 n_bytes
-= u
->hwbuf_unused
;
742 snd_pcm_sframes_t frames
;
745 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
747 if (u
->memchunk
.length
<= 0)
748 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
750 pa_assert(u
->memchunk
.length
> 0);
752 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
754 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
755 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
757 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
758 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
759 pa_memblock_release(u
->memchunk
.memblock
);
761 if (PA_UNLIKELY(frames
< 0)) {
763 if (!after_avail
&& (int) frames
== -EAGAIN
)
766 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
772 if (!after_avail
&& frames
== 0)
775 pa_assert(frames
> 0);
778 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
779 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
781 if (u
->memchunk
.length
<= 0) {
782 pa_memblock_unref(u
->memchunk
.memblock
);
783 pa_memchunk_reset(&u
->memchunk
);
788 u
->write_count
+= frames
* u
->frame_size
;
789 u
->since_start
+= frames
* u
->frame_size
;
791 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
793 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
796 n_bytes
-= (size_t) frames
* u
->frame_size
;
801 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
802 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
804 if (*sleep_usec
> process_usec
)
805 *sleep_usec
-= process_usec
;
811 return work_done
? 1 : 0;
814 static void update_smoother(struct userdata
*u
) {
815 snd_pcm_sframes_t delay
= 0;
818 pa_usec_t now1
= 0, now2
;
819 snd_pcm_status_t
*status
;
821 snd_pcm_status_alloca(&status
);
824 pa_assert(u
->pcm_handle
);
826 /* Let's update the time smoother */
828 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
, FALSE
)) < 0)) {
829 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
833 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
834 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
836 snd_htimestamp_t htstamp
= { 0, 0 };
837 snd_pcm_status_get_htstamp(status
, &htstamp
);
838 now1
= pa_timespec_load(&htstamp
);
841 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
843 now1
= pa_rtclock_now();
845 /* check if the time since the last update is bigger than the interval */
846 if (u
->last_smoother_update
> 0)
847 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
850 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
852 if (PA_UNLIKELY(position
< 0))
855 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
857 pa_smoother_put(u
->smoother
, now1
, now2
);
859 u
->last_smoother_update
= now1
;
860 /* exponentially increase the update interval up to the MAX limit */
861 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
864 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
867 pa_usec_t now1
, now2
;
871 now1
= pa_rtclock_now();
872 now2
= pa_smoother_get(u
->smoother
, now1
);
874 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
876 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
878 if (u
->memchunk
.memblock
)
879 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
884 static int build_pollfd(struct userdata
*u
) {
886 pa_assert(u
->pcm_handle
);
888 if (u
->alsa_rtpoll_item
)
889 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
891 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
897 /* Called from IO context */
898 static int suspend(struct userdata
*u
) {
900 pa_assert(u
->pcm_handle
);
902 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
904 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
905 * take awfully long with our long buffer sizes today. */
906 snd_pcm_close(u
->pcm_handle
);
907 u
->pcm_handle
= NULL
;
909 if (u
->alsa_rtpoll_item
) {
910 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
911 u
->alsa_rtpoll_item
= NULL
;
914 /* We reset max_rewind/max_request here to make sure that while we
915 * are suspended the old max_request/max_rewind values set before
916 * the suspend can influence the per-stream buffer of newly
917 * created streams, without their requirements having any
918 * influence on them. */
919 pa_sink_set_max_rewind_within_thread(u
->sink
, 0);
920 pa_sink_set_max_request_within_thread(u
->sink
, 0);
922 pa_log_info("Device suspended...");
927 /* Called from IO context */
928 static int update_sw_params(struct userdata
*u
) {
929 snd_pcm_uframes_t avail_min
;
934 /* Use the full buffer if no one asked us for anything specific */
940 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
943 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
945 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
947 /* We need at least one sample in our buffer */
949 if (PA_UNLIKELY(b
< u
->frame_size
))
952 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
955 fix_min_sleep_wakeup(u
);
956 fix_tsched_watermark(u
);
959 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
961 /* We need at last one frame in the used part of the buffer */
962 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
965 pa_usec_t sleep_usec
, process_usec
;
967 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
968 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
971 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
973 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
, !u
->use_tsched
)) < 0) {
974 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
978 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
979 if (pa_alsa_pcm_is_hw(u
->pcm_handle
))
980 pa_sink_set_max_rewind_within_thread(u
->sink
, u
->hwbuf_size
);
982 pa_log_info("Disabling rewind_within_thread for device %s", u
->device_name
);
983 pa_sink_set_max_rewind_within_thread(u
->sink
, 0);
989 /* Called from IO Context on unsuspend or from main thread when creating sink */
990 static void reset_watermark(struct userdata
*u
, size_t tsched_watermark
, pa_sample_spec
*ss
,
993 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, ss
),
994 &u
->sink
->sample_spec
);
996 u
->watermark_inc_step
= pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC
, &u
->sink
->sample_spec
);
997 u
->watermark_dec_step
= pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC
, &u
->sink
->sample_spec
);
999 u
->watermark_inc_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1000 u
->watermark_dec_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1002 fix_min_sleep_wakeup(u
);
1003 fix_tsched_watermark(u
);
1006 pa_sink_set_latency_range_within_thread(u
->sink
,
1008 pa_bytes_to_usec(u
->hwbuf_size
, ss
));
1010 pa_sink_set_latency_range(u
->sink
,
1012 pa_bytes_to_usec(u
->hwbuf_size
, ss
));
1014 /* work-around assert in pa_sink_set_latency_within_thead,
1015 keep track of min_latency and reuse it when
1016 this routine is called from IO context */
1017 u
->min_latency_ref
= u
->sink
->thread_info
.min_latency
;
1020 pa_log_info("Time scheduling watermark is %0.2fms",
1021 (double) pa_bytes_to_usec(u
->tsched_watermark
, ss
) / PA_USEC_PER_MSEC
);
1024 /* Called from IO context */
1025 static int unsuspend(struct userdata
*u
) {
1029 snd_pcm_uframes_t period_size
, buffer_size
;
1030 char *device_name
= NULL
;
1033 pa_assert(!u
->pcm_handle
);
1035 pa_log_info("Trying resume...");
1037 if ((is_iec958(u
) || is_hdmi(u
)) && pa_sink_is_passthrough(u
->sink
)) {
1038 /* Need to open device in NONAUDIO mode */
1039 int len
= strlen(u
->device_name
) + 8;
1041 device_name
= pa_xmalloc(len
);
1042 pa_snprintf(device_name
, len
, "%s,AES0=6", u
->device_name
);
1045 if ((err
= snd_pcm_open(&u
->pcm_handle
, device_name
? device_name
: u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
1047 SND_PCM_NO_AUTO_RESAMPLE
|
1048 SND_PCM_NO_AUTO_CHANNELS
|
1049 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
1050 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
1054 ss
= u
->sink
->sample_spec
;
1055 period_size
= u
->fragment_size
/ u
->frame_size
;
1056 buffer_size
= u
->hwbuf_size
/ u
->frame_size
;
1060 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &period_size
, &buffer_size
, 0, &b
, &d
, TRUE
)) < 0) {
1061 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
1065 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
1066 pa_log_warn("Resume failed, couldn't get original access mode.");
1070 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
1071 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1075 if (period_size
*u
->frame_size
!= u
->fragment_size
||
1076 buffer_size
*u
->frame_size
!= u
->hwbuf_size
) {
1077 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1078 (unsigned long) u
->hwbuf_size
, (unsigned long) u
->fragment_size
,
1079 (unsigned long) (buffer_size
*u
->frame_size
), (unsigned long) (period_size
*u
->frame_size
));
1083 if (update_sw_params(u
) < 0)
1086 if (build_pollfd(u
) < 0)
1090 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
1091 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1092 u
->last_smoother_update
= 0;
1097 /* reset the watermark to the value defined when sink was created */
1099 reset_watermark(u
, u
->tsched_watermark_ref
, &u
->sink
->sample_spec
, TRUE
);
1101 pa_log_info("Resumed successfully...");
1103 pa_xfree(device_name
);
1107 if (u
->pcm_handle
) {
1108 snd_pcm_close(u
->pcm_handle
);
1109 u
->pcm_handle
= NULL
;
1112 pa_xfree(device_name
);
1117 /* Called from IO context */
1118 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
1119 struct userdata
*u
= PA_SINK(o
)->userdata
;
1123 case PA_SINK_MESSAGE_FINISH_MOVE
:
1124 case PA_SINK_MESSAGE_ADD_INPUT
: {
1125 pa_sink_input
*i
= PA_SINK_INPUT(data
);
1128 if (PA_LIKELY(!pa_sink_input_is_passthrough(i
)))
1131 u
->old_rate
= u
->sink
->sample_spec
.rate
;
1133 /* Passthrough format, see if we need to reset sink sample rate */
1134 if (u
->sink
->sample_spec
.rate
== i
->thread_info
.sample_spec
.rate
)
1138 if ((r
= suspend(u
)) < 0)
1141 u
->sink
->sample_spec
.rate
= i
->thread_info
.sample_spec
.rate
;
1143 if ((r
= unsuspend(u
)) < 0)
1149 case PA_SINK_MESSAGE_START_MOVE
:
1150 case PA_SINK_MESSAGE_REMOVE_INPUT
: {
1151 pa_sink_input
*i
= PA_SINK_INPUT(data
);
1154 if (PA_LIKELY(!pa_sink_input_is_passthrough(i
)))
1157 /* Passthrough format, see if we need to reset sink sample rate */
1158 if (u
->sink
->sample_spec
.rate
== u
->old_rate
)
1162 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
) && ((r
= suspend(u
)) < 0))
1165 u
->sink
->sample_spec
.rate
= u
->old_rate
;
1167 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
) && ((r
= unsuspend(u
)) < 0))
1173 case PA_SINK_MESSAGE_GET_LATENCY
: {
1177 r
= sink_get_latency(u
);
1179 *((pa_usec_t
*) data
) = r
;
1184 case PA_SINK_MESSAGE_SET_STATE
:
1186 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
1188 case PA_SINK_SUSPENDED
: {
1191 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
1193 if ((r
= suspend(u
)) < 0)
1200 case PA_SINK_RUNNING
: {
1203 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
1204 if (build_pollfd(u
) < 0)
1208 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1209 if ((r
= unsuspend(u
)) < 0)
1216 case PA_SINK_UNLINKED
:
1218 case PA_SINK_INVALID_STATE
:
1225 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
1228 /* Called from main context */
1229 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
1230 pa_sink_state_t old_state
;
1233 pa_sink_assert_ref(s
);
1234 pa_assert_se(u
= s
->userdata
);
1236 old_state
= pa_sink_get_state(u
->sink
);
1238 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
1240 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
1241 if (reserve_init(u
, u
->device_name
) < 0)
1242 return -PA_ERR_BUSY
;
1247 static int ctl_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1248 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1251 pa_assert(u
->mixer_handle
);
1253 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1256 if (!PA_SINK_IS_LINKED(u
->sink
->state
))
1259 if (u
->sink
->suspend_cause
& PA_SUSPEND_SESSION
)
1262 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1263 pa_sink_get_volume(u
->sink
, TRUE
);
1264 pa_sink_get_mute(u
->sink
, TRUE
);
1270 static int io_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1271 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1274 pa_assert(u
->mixer_handle
);
1276 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1279 if (u
->sink
->suspend_cause
& PA_SUSPEND_SESSION
)
1282 if (mask
& SND_CTL_EVENT_MASK_VALUE
)
1283 pa_sink_update_volume_and_mute(u
->sink
);
1288 static void sink_get_volume_cb(pa_sink
*s
) {
1289 struct userdata
*u
= s
->userdata
;
1291 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1294 pa_assert(u
->mixer_path
);
1295 pa_assert(u
->mixer_handle
);
1297 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1300 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1301 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1303 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1305 if (u
->mixer_path
->has_dB
) {
1306 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1308 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &r
));
1311 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1314 s
->real_volume
= u
->hardware_volume
= r
;
1316 /* Hmm, so the hardware volume changed, let's reset our software volume */
1317 if (u
->mixer_path
->has_dB
)
1318 pa_sink_set_soft_volume(s
, NULL
);
1321 static void sink_set_volume_cb(pa_sink
*s
) {
1322 struct userdata
*u
= s
->userdata
;
1324 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1325 pa_bool_t deferred_volume
= !!(s
->flags
& PA_SINK_DEFERRED_VOLUME
);
1328 pa_assert(u
->mixer_path
);
1329 pa_assert(u
->mixer_handle
);
1331 /* Shift up by the base volume */
1332 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1334 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
, deferred_volume
, !deferred_volume
) < 0)
1337 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1338 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1340 u
->hardware_volume
= r
;
1342 if (u
->mixer_path
->has_dB
) {
1343 pa_cvolume new_soft_volume
;
1344 pa_bool_t accurate_enough
;
1345 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1347 /* Match exactly what the user requested by software */
1348 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1350 /* If the adjustment to do in software is only minimal we
1351 * can skip it. That saves us CPU at the expense of a bit of
1354 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1355 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1357 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &s
->real_volume
));
1358 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &s
->real_volume
));
1359 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &u
->hardware_volume
));
1360 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &u
->hardware_volume
));
1361 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1362 pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &new_soft_volume
),
1363 pa_yes_no(accurate_enough
));
1364 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &new_soft_volume
));
1366 if (!accurate_enough
)
1367 s
->soft_volume
= new_soft_volume
;
1370 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1372 /* We can't match exactly what the user requested, hence let's
1373 * at least tell the user about it */
1379 static void sink_write_volume_cb(pa_sink
*s
) {
1380 struct userdata
*u
= s
->userdata
;
1381 pa_cvolume hw_vol
= s
->thread_info
.current_hw_volume
;
1384 pa_assert(u
->mixer_path
);
1385 pa_assert(u
->mixer_handle
);
1386 pa_assert(s
->flags
& PA_SINK_DEFERRED_VOLUME
);
1388 /* Shift up by the base volume */
1389 pa_sw_cvolume_divide_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1391 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &hw_vol
, TRUE
, TRUE
) < 0)
1392 pa_log_error("Writing HW volume failed");
1395 pa_bool_t accurate_enough
;
1397 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1398 pa_sw_cvolume_multiply_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1400 pa_sw_cvolume_divide(&tmp_vol
, &hw_vol
, &s
->thread_info
.current_hw_volume
);
1402 (pa_cvolume_min(&tmp_vol
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1403 (pa_cvolume_max(&tmp_vol
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1405 if (!accurate_enough
) {
1407 char db
[2][PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1408 char pcnt
[2][PA_CVOLUME_SNPRINT_MAX
];
1411 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1412 pa_cvolume_snprint(vol
.pcnt
[0], sizeof(vol
.pcnt
[0]), &s
->thread_info
.current_hw_volume
),
1413 pa_cvolume_snprint(vol
.pcnt
[1], sizeof(vol
.pcnt
[1]), &hw_vol
));
1414 pa_log_debug(" in dB: %s (request) != %s",
1415 pa_sw_cvolume_snprint_dB(vol
.db
[0], sizeof(vol
.db
[0]), &s
->thread_info
.current_hw_volume
),
1416 pa_sw_cvolume_snprint_dB(vol
.db
[1], sizeof(vol
.db
[1]), &hw_vol
));
1421 static void sink_get_mute_cb(pa_sink
*s
) {
1422 struct userdata
*u
= s
->userdata
;
1426 pa_assert(u
->mixer_path
);
1427 pa_assert(u
->mixer_handle
);
1429 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1435 static void sink_set_mute_cb(pa_sink
*s
) {
1436 struct userdata
*u
= s
->userdata
;
1439 pa_assert(u
->mixer_path
);
1440 pa_assert(u
->mixer_handle
);
1442 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1445 static void mixer_volume_init(struct userdata
*u
) {
1448 if (!u
->mixer_path
->has_volume
) {
1449 pa_sink_set_write_volume_callback(u
->sink
, NULL
);
1450 pa_sink_set_get_volume_callback(u
->sink
, NULL
);
1451 pa_sink_set_set_volume_callback(u
->sink
, NULL
);
1453 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1455 pa_sink_set_get_volume_callback(u
->sink
, sink_get_volume_cb
);
1456 pa_sink_set_set_volume_callback(u
->sink
, sink_set_volume_cb
);
1458 if (u
->mixer_path
->has_dB
&& u
->deferred_volume
) {
1459 pa_sink_set_write_volume_callback(u
->sink
, sink_write_volume_cb
);
1460 pa_log_info("Successfully enabled synchronous volume.");
1462 pa_sink_set_write_volume_callback(u
->sink
, NULL
);
1464 if (u
->mixer_path
->has_dB
) {
1465 pa_sink_enable_decibel_volume(u
->sink
, TRUE
);
1466 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1468 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1469 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1471 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1473 pa_sink_enable_decibel_volume(u
->sink
, FALSE
);
1474 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1476 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1477 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1480 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1483 if (!u
->mixer_path
->has_mute
) {
1484 pa_sink_set_get_mute_callback(u
->sink
, NULL
);
1485 pa_sink_set_set_mute_callback(u
->sink
, NULL
);
1486 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1488 pa_sink_set_get_mute_callback(u
->sink
, sink_get_mute_cb
);
1489 pa_sink_set_set_mute_callback(u
->sink
, sink_set_mute_cb
);
1490 pa_log_info("Using hardware mute control.");
1494 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1495 struct userdata
*u
= s
->userdata
;
1496 pa_alsa_port_data
*data
;
1500 pa_assert(u
->mixer_handle
);
1502 data
= PA_DEVICE_PORT_DATA(p
);
1504 pa_assert_se(u
->mixer_path
= data
->path
);
1505 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1507 mixer_volume_init(u
);
1510 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1520 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1521 struct userdata
*u
= s
->userdata
;
1524 pa_assert(u
->use_tsched
); /* only when timer scheduling is used
1525 * we can dynamically adjust the
1531 before
= u
->hwbuf_unused
;
1532 update_sw_params(u
);
1534 /* Let's check whether we now use only a smaller part of the
1535 buffer then before. If so, we need to make sure that subsequent
1536 rewinds are relative to the new maximum fill level and not to the
1537 current fill level. Thus, let's do a full rewind once, to clear
1540 if (u
->hwbuf_unused
> before
) {
1541 pa_log_debug("Requesting rewind due to latency change.");
1542 pa_sink_request_rewind(s
, (size_t) -1);
1546 static pa_idxset
* sink_get_formats(pa_sink
*s
) {
1547 struct userdata
*u
= s
->userdata
;
1548 pa_idxset
*ret
= pa_idxset_new(NULL
, NULL
);
1554 PA_IDXSET_FOREACH(f
, u
->formats
, idx
) {
1555 pa_idxset_put(ret
, pa_format_info_copy(f
), NULL
);
1561 static pa_bool_t
sink_set_formats(pa_sink
*s
, pa_idxset
*formats
) {
1562 struct userdata
*u
= s
->userdata
;
1568 /* FIXME: also validate sample rates against what the device supports */
1569 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1570 if (is_iec958(u
) && f
->encoding
== PA_ENCODING_EAC3_IEC61937
)
1571 /* EAC3 cannot be sent over over S/PDIF */
1575 pa_idxset_free(u
->formats
, (pa_free2_cb_t
) pa_format_info_free2
, NULL
);
1576 u
->formats
= pa_idxset_new(NULL
, NULL
);
1578 /* Note: the logic below won't apply if we're using software encoding.
1579 * This is fine for now since we don't support that via the passthrough
1580 * framework, but this must be changed if we do. */
1582 /* First insert non-PCM formats since we prefer those. */
1583 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1584 if (!pa_format_info_is_pcm(f
))
1585 pa_idxset_put(u
->formats
, pa_format_info_copy(f
), NULL
);
1588 /* Now add any PCM formats */
1589 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1590 if (pa_format_info_is_pcm(f
))
1591 pa_idxset_put(u
->formats
, pa_format_info_copy(f
), NULL
);
1597 static int process_rewind(struct userdata
*u
) {
1598 snd_pcm_sframes_t unused
;
1599 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1602 /* Figure out how much we shall rewind and reset the counter */
1603 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1605 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1607 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1608 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1612 unused_nbytes
= (size_t) unused
* u
->frame_size
;
1614 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1615 unused_nbytes
+= u
->rewind_safeguard
;
1617 if (u
->hwbuf_size
> unused_nbytes
)
1618 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1622 if (rewind_nbytes
> limit_nbytes
)
1623 rewind_nbytes
= limit_nbytes
;
1625 if (rewind_nbytes
> 0) {
1626 snd_pcm_sframes_t in_frames
, out_frames
;
1628 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1630 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1631 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1632 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1633 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1634 if (try_recover(u
, "process_rewind", out_frames
) < 0)
1639 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1641 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1643 if (rewind_nbytes
<= 0)
1644 pa_log_info("Tried rewind, but was apparently not possible.");
1646 u
->write_count
-= rewind_nbytes
;
1647 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1648 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1650 u
->after_rewind
= TRUE
;
1654 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1656 pa_sink_process_rewind(u
->sink
, 0);
1660 static void thread_func(void *userdata
) {
1661 struct userdata
*u
= userdata
;
1662 unsigned short revents
= 0;
1666 pa_log_debug("Thread starting up");
1668 if (u
->core
->realtime_scheduling
)
1669 pa_make_realtime(u
->core
->realtime_priority
);
1671 pa_thread_mq_install(&u
->thread_mq
);
1675 pa_usec_t rtpoll_sleep
= 0;
1678 pa_log_debug("Loop");
1681 /* Render some data and write it to the dsp */
1682 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1684 pa_usec_t sleep_usec
= 0;
1685 pa_bool_t on_timeout
= pa_rtpoll_timer_elapsed(u
->rtpoll
);
1687 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1688 if (process_rewind(u
) < 0)
1692 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1694 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1699 /* pa_log_debug("work_done = %i", work_done); */
1704 pa_log_info("Starting playback.");
1705 snd_pcm_start(u
->pcm_handle
);
1707 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1715 if (u
->use_tsched
) {
1718 if (u
->since_start
<= u
->hwbuf_size
) {
1720 /* USB devices on ALSA seem to hit a buffer
1721 * underrun during the first iterations much
1722 * quicker then we calculate here, probably due to
1723 * the transport latency. To accommodate for that
1724 * we artificially decrease the sleep time until
1725 * we have filled the buffer at least once
1728 if (pa_log_ratelimit(PA_LOG_DEBUG
))
1729 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1733 /* OK, the playback buffer is now full, let's
1734 * calculate when to wake up next */
1735 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1737 /* Convert from the sound card time domain to the
1738 * system time domain */
1739 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1741 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1743 /* We don't trust the conversion, so we wake up whatever comes first */
1744 rtpoll_sleep
= PA_MIN(sleep_usec
, cusec
);
1747 u
->after_rewind
= FALSE
;
1751 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
) {
1752 pa_usec_t volume_sleep
;
1753 pa_sink_volume_change_apply(u
->sink
, &volume_sleep
);
1754 if (volume_sleep
> 0)
1755 rtpoll_sleep
= PA_MIN(volume_sleep
, rtpoll_sleep
);
1758 if (rtpoll_sleep
> 0)
1759 pa_rtpoll_set_timer_relative(u
->rtpoll
, rtpoll_sleep
);
1761 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1763 /* Hmm, nothing to do. Let's sleep */
1764 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1767 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
)
1768 pa_sink_volume_change_apply(u
->sink
, NULL
);
1773 /* Tell ALSA about this and process its response */
1774 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1775 struct pollfd
*pollfd
;
1779 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1781 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1782 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1786 if (revents
& ~POLLOUT
) {
1787 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1793 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit(PA_LOG_DEBUG
))
1794 pa_log_debug("Wakeup from ALSA!");
1801 /* If this was no regular exit from the loop we have to continue
1802 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1803 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1804 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1807 pa_log_debug("Thread shutting down");
1810 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1816 pa_assert(device_name
);
1818 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1819 pa_sink_new_data_set_name(data
, n
);
1820 data
->namereg_fail
= TRUE
;
1824 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1825 data
->namereg_fail
= TRUE
;
1827 n
= device_id
? device_id
: device_name
;
1828 data
->namereg_fail
= FALSE
;
1832 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1834 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1836 pa_sink_new_data_set_name(data
, t
);
1840 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1842 if (!mapping
&& !element
)
1845 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
))) {
1846 pa_log_info("Failed to find a working mixer device.");
1852 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1855 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, ignore_dB
) < 0)
1858 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1859 pa_alsa_path_dump(u
->mixer_path
);
1862 if (!(u
->mixer_path_set
= pa_alsa_path_set_new(mapping
, PA_ALSA_DIRECTION_OUTPUT
)))
1865 pa_alsa_path_set_probe(u
->mixer_path_set
, u
->mixer_handle
, ignore_dB
);
1872 if (u
->mixer_path_set
) {
1873 pa_alsa_path_set_free(u
->mixer_path_set
);
1874 u
->mixer_path_set
= NULL
;
1875 } else if (u
->mixer_path
) {
1876 pa_alsa_path_free(u
->mixer_path
);
1877 u
->mixer_path
= NULL
;
1880 if (u
->mixer_handle
) {
1881 snd_mixer_close(u
->mixer_handle
);
1882 u
->mixer_handle
= NULL
;
1887 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1888 pa_bool_t need_mixer_callback
= FALSE
;
1892 if (!u
->mixer_handle
)
1895 if (u
->sink
->active_port
) {
1896 pa_alsa_port_data
*data
;
1898 /* We have a list of supported paths, so let's activate the
1899 * one that has been chosen as active */
1901 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1902 u
->mixer_path
= data
->path
;
1904 pa_alsa_path_select(data
->path
, u
->mixer_handle
);
1907 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1911 if (!u
->mixer_path
&& u
->mixer_path_set
)
1912 u
->mixer_path
= u
->mixer_path_set
->paths
;
1914 if (u
->mixer_path
) {
1915 /* Hmm, we have only a single path, then let's activate it */
1917 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1919 if (u
->mixer_path
->settings
)
1920 pa_alsa_setting_select(u
->mixer_path
->settings
, u
->mixer_handle
);
1925 mixer_volume_init(u
);
1927 /* Will we need to register callbacks? */
1928 if (u
->mixer_path_set
&& u
->mixer_path_set
->paths
) {
1931 PA_LLIST_FOREACH(p
, u
->mixer_path_set
->paths
) {
1932 if (p
->has_volume
|| p
->has_mute
)
1933 need_mixer_callback
= TRUE
;
1936 else if (u
->mixer_path
)
1937 need_mixer_callback
= u
->mixer_path
->has_volume
|| u
->mixer_path
->has_mute
;
1939 if (need_mixer_callback
) {
1940 int (*mixer_callback
)(snd_mixer_elem_t
*, unsigned int);
1941 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
) {
1942 u
->mixer_pd
= pa_alsa_mixer_pdata_new();
1943 mixer_callback
= io_mixer_callback
;
1945 if (pa_alsa_set_mixer_rtpoll(u
->mixer_pd
, u
->mixer_handle
, u
->rtpoll
) < 0) {
1946 pa_log("Failed to initialize file descriptor monitoring");
1950 u
->mixer_fdl
= pa_alsa_fdlist_new();
1951 mixer_callback
= ctl_mixer_callback
;
1953 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1954 pa_log("Failed to initialize file descriptor monitoring");
1959 if (u
->mixer_path_set
)
1960 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1962 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1968 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1970 struct userdata
*u
= NULL
;
1971 const char *dev_id
= NULL
;
1974 uint32_t nfrags
, frag_size
, buffer_size
, tsched_size
, tsched_watermark
, rewind_safeguard
;
1975 snd_pcm_uframes_t period_frames
, buffer_frames
, tsched_frames
;
1977 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
, namereg_fail
= FALSE
, deferred_volume
= FALSE
, set_formats
= FALSE
;
1978 pa_sink_new_data data
;
1979 pa_alsa_profile_set
*profile_set
= NULL
;
1984 ss
= m
->core
->default_sample_spec
;
1985 map
= m
->core
->default_channel_map
;
1986 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1987 pa_log("Failed to parse sample specification and channel map");
1991 frame_size
= pa_frame_size(&ss
);
1993 nfrags
= m
->core
->default_n_fragments
;
1994 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1996 frag_size
= (uint32_t) frame_size
;
1997 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1998 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
2000 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
2001 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
2002 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
2003 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
2004 pa_log("Failed to parse buffer metrics");
2008 buffer_size
= nfrags
* frag_size
;
2010 period_frames
= frag_size
/frame_size
;
2011 buffer_frames
= buffer_size
/frame_size
;
2012 tsched_frames
= tsched_size
/frame_size
;
2014 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
2015 pa_log("Failed to parse mmap argument.");
2019 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
2020 pa_log("Failed to parse tsched argument.");
2024 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
2025 pa_log("Failed to parse ignore_dB argument.");
2029 rewind_safeguard
= PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES
, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC
, &ss
));
2030 if (pa_modargs_get_value_u32(ma
, "rewind_safeguard", &rewind_safeguard
) < 0) {
2031 pa_log("Failed to parse rewind_safeguard argument");
2035 deferred_volume
= m
->core
->deferred_volume
;
2036 if (pa_modargs_get_value_boolean(ma
, "deferred_volume", &deferred_volume
) < 0) {
2037 pa_log("Failed to parse deferred_volume argument.");
2041 use_tsched
= pa_alsa_may_tsched(use_tsched
);
2043 u
= pa_xnew0(struct userdata
, 1);
2046 u
->use_mmap
= use_mmap
;
2047 u
->use_tsched
= use_tsched
;
2048 u
->deferred_volume
= deferred_volume
;
2050 u
->rewind_safeguard
= rewind_safeguard
;
2051 u
->rtpoll
= pa_rtpoll_new();
2052 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
2054 u
->smoother
= pa_smoother_new(
2055 SMOOTHER_ADJUST_USEC
,
2056 SMOOTHER_WINDOW_USEC
,
2062 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
2064 dev_id
= pa_modargs_get_value(
2066 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
2068 if (reserve_init(u
, dev_id
) < 0)
2071 if (reserve_monitor_init(u
, dev_id
) < 0)
2079 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
2080 pa_log("device_id= not set");
2084 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
2088 SND_PCM_STREAM_PLAYBACK
,
2089 &period_frames
, &buffer_frames
, tsched_frames
,
2093 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
2095 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
2098 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
2102 SND_PCM_STREAM_PLAYBACK
,
2103 &period_frames
, &buffer_frames
, tsched_frames
,
2104 &b
, &d
, profile_set
, &mapping
)))
2109 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
2110 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
2113 SND_PCM_STREAM_PLAYBACK
,
2114 &period_frames
, &buffer_frames
, tsched_frames
,
2119 pa_assert(u
->device_name
);
2120 pa_log_info("Successfully opened device %s.", u
->device_name
);
2122 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
2123 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
2128 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
2130 if (use_mmap
&& !b
) {
2131 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2132 u
->use_mmap
= use_mmap
= FALSE
;
2135 if (use_tsched
&& (!b
|| !d
)) {
2136 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2137 u
->use_tsched
= use_tsched
= FALSE
;
2141 pa_log_info("Successfully enabled mmap() mode.");
2144 pa_log_info("Successfully enabled timer-based scheduling mode.");
2146 if (is_iec958(u
) || is_hdmi(u
))
2149 /* ALSA might tweak the sample spec, so recalculate the frame size */
2150 frame_size
= pa_frame_size(&ss
);
2152 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
2154 pa_sink_new_data_init(&data
);
2155 data
.driver
= driver
;
2158 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
2160 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2161 * variable instead of using &data.namereg_fail directly, because
2162 * data.namereg_fail is a bitfield and taking the address of a bitfield
2163 * variable is impossible. */
2164 namereg_fail
= data
.namereg_fail
;
2165 if (pa_modargs_get_value_boolean(ma
, "namereg_fail", &namereg_fail
) < 0) {
2166 pa_log("Failed to parse namereg_fail argument.");
2167 pa_sink_new_data_done(&data
);
2170 data
.namereg_fail
= namereg_fail
;
2172 pa_sink_new_data_set_sample_spec(&data
, &ss
);
2173 pa_sink_new_data_set_channel_map(&data
, &map
);
2175 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
2176 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
2177 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (buffer_frames
* frame_size
));
2178 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
2179 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
2182 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
2183 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
2186 pa_alsa_init_description(data
.proplist
);
2188 if (u
->control_device
)
2189 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
2191 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
2192 pa_log("Invalid properties");
2193 pa_sink_new_data_done(&data
);
2197 if (u
->mixer_path_set
)
2198 pa_alsa_add_ports(&data
.ports
, u
->mixer_path_set
);
2200 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
| PA_SINK_LATENCY
| (u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0) |
2201 (set_formats
? PA_SINK_SET_FORMATS
: 0));
2202 pa_sink_new_data_done(&data
);
2205 pa_log("Failed to create sink object");
2209 if (pa_modargs_get_value_u32(ma
, "deferred_volume_safety_margin",
2210 &u
->sink
->thread_info
.volume_change_safety_margin
) < 0) {
2211 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2215 if (pa_modargs_get_value_s32(ma
, "deferred_volume_extra_delay",
2216 &u
->sink
->thread_info
.volume_change_extra_delay
) < 0) {
2217 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2221 u
->sink
->parent
.process_msg
= sink_process_msg
;
2223 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
2224 u
->sink
->set_state
= sink_set_state_cb
;
2225 u
->sink
->set_port
= sink_set_port_cb
;
2226 u
->sink
->userdata
= u
;
2228 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
2229 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
2231 u
->frame_size
= frame_size
;
2232 u
->fragment_size
= frag_size
= (size_t) (period_frames
* frame_size
);
2233 u
->hwbuf_size
= buffer_size
= (size_t) (buffer_frames
* frame_size
);
2234 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
2236 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2237 (double) u
->hwbuf_size
/ (double) u
->fragment_size
,
2238 (long unsigned) u
->fragment_size
,
2239 (double) pa_bytes_to_usec(u
->fragment_size
, &ss
) / PA_USEC_PER_MSEC
,
2240 (long unsigned) u
->hwbuf_size
,
2241 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
2243 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
2244 if (pa_alsa_pcm_is_hw(u
->pcm_handle
))
2245 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
2247 pa_log_info("Disabling rewind for device %s", u
->device_name
);
2248 pa_sink_set_max_rewind(u
->sink
, 0);
2251 if (u
->use_tsched
) {
2252 u
->tsched_watermark_ref
= tsched_watermark
;
2253 reset_watermark(u
, u
->tsched_watermark_ref
, &ss
, FALSE
);
2255 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
2259 if (update_sw_params(u
) < 0)
2262 if (setup_mixer(u
, ignore_dB
) < 0)
2265 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
2267 if (!(u
->thread
= pa_thread_new("alsa-sink", thread_func
, u
))) {
2268 pa_log("Failed to create thread.");
2272 /* Get initial mixer settings */
2273 if (data
.volume_is_set
) {
2274 if (u
->sink
->set_volume
)
2275 u
->sink
->set_volume(u
->sink
);
2277 if (u
->sink
->get_volume
)
2278 u
->sink
->get_volume(u
->sink
);
2281 if (data
.muted_is_set
) {
2282 if (u
->sink
->set_mute
)
2283 u
->sink
->set_mute(u
->sink
);
2285 if (u
->sink
->get_mute
)
2286 u
->sink
->get_mute(u
->sink
);
2289 if ((data
.volume_is_set
|| data
.muted_is_set
) && u
->sink
->write_volume
)
2290 u
->sink
->write_volume(u
->sink
);
2293 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2294 pa_format_info
*format
;
2296 /* To start with, we only support PCM formats. Other formats may be added
2297 * with pa_sink_set_formats().*/
2298 format
= pa_format_info_new();
2299 format
->encoding
= PA_ENCODING_PCM
;
2300 u
->formats
= pa_idxset_new(NULL
, NULL
);
2301 pa_idxset_put(u
->formats
, format
, NULL
);
2303 u
->sink
->get_formats
= sink_get_formats
;
2304 u
->sink
->set_formats
= sink_set_formats
;
2307 pa_sink_put(u
->sink
);
2310 pa_alsa_profile_set_free(profile_set
);
2320 pa_alsa_profile_set_free(profile_set
);
2325 static void userdata_free(struct userdata
*u
) {
2329 pa_sink_unlink(u
->sink
);
2332 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
2333 pa_thread_free(u
->thread
);
2336 pa_thread_mq_done(&u
->thread_mq
);
2339 pa_sink_unref(u
->sink
);
2341 if (u
->memchunk
.memblock
)
2342 pa_memblock_unref(u
->memchunk
.memblock
);
2345 pa_alsa_mixer_pdata_free(u
->mixer_pd
);
2347 if (u
->alsa_rtpoll_item
)
2348 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
2351 pa_rtpoll_free(u
->rtpoll
);
2353 if (u
->pcm_handle
) {
2354 snd_pcm_drop(u
->pcm_handle
);
2355 snd_pcm_close(u
->pcm_handle
);
2359 pa_alsa_fdlist_free(u
->mixer_fdl
);
2361 if (u
->mixer_path_set
)
2362 pa_alsa_path_set_free(u
->mixer_path_set
);
2363 else if (u
->mixer_path
)
2364 pa_alsa_path_free(u
->mixer_path
);
2366 if (u
->mixer_handle
)
2367 snd_mixer_close(u
->mixer_handle
);
2370 pa_smoother_free(u
->smoother
);
2373 pa_idxset_free(u
->formats
, (pa_free2_cb_t
) pa_format_info_free2
, NULL
);
2378 pa_xfree(u
->device_name
);
2379 pa_xfree(u
->control_device
);
2383 void pa_alsa_sink_free(pa_sink
*s
) {
2386 pa_sink_assert_ref(s
);
2387 pa_assert_se(u
= s
->userdata
);