2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
57 #include <modules/reserve-wrap.h>
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
62 /* #define DEBUG_TIMING */
64 #define DEFAULT_DEVICE "default"
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (1*PA_USEC_PER_MSEC) /* 3ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
75 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
76 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89 pa_thread_mq thread_mq
;
92 snd_pcm_t
*pcm_handle
;
94 pa_alsa_fdlist
*mixer_fdl
;
95 snd_mixer_t
*mixer_handle
;
96 pa_alsa_path_set
*mixer_path_set
;
97 pa_alsa_path
*mixer_path
;
99 pa_cvolume hardware_volume
;
111 watermark_inc_threshold
,
112 watermark_dec_threshold
;
114 pa_usec_t watermark_dec_not_before
;
117 pa_memchunk memchunk
;
119 char *device_name
; /* name of the PCM device */
120 char *control_device
; /* name of the control device */
122 pa_bool_t use_mmap
:1, use_tsched
:1;
124 pa_bool_t first
, after_rewind
;
126 pa_rtpoll_item
*alsa_rtpoll_item
;
128 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
130 pa_smoother
*smoother
;
131 uint64_t write_count
;
132 uint64_t since_start
;
133 pa_usec_t smoother_interval
;
134 pa_usec_t last_smoother_update
;
136 pa_reserve_wrapper
*reserve
;
137 pa_hook_slot
*reserve_slot
;
138 pa_reserve_monitor_wrapper
*monitor
;
139 pa_hook_slot
*monitor_slot
;
142 static void userdata_free(struct userdata
*u
);
144 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
148 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
149 return PA_HOOK_CANCEL
;
154 static void reserve_done(struct userdata
*u
) {
157 if (u
->reserve_slot
) {
158 pa_hook_slot_free(u
->reserve_slot
);
159 u
->reserve_slot
= NULL
;
163 pa_reserve_wrapper_unref(u
->reserve
);
168 static void reserve_update(struct userdata
*u
) {
169 const char *description
;
172 if (!u
->sink
|| !u
->reserve
)
175 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
176 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
179 static int reserve_init(struct userdata
*u
, const char *dname
) {
188 if (pa_in_system_mode())
191 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
194 /* We are resuming, try to lock the device */
195 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
203 pa_assert(!u
->reserve_slot
);
204 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
209 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
215 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
217 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
221 static void monitor_done(struct userdata
*u
) {
224 if (u
->monitor_slot
) {
225 pa_hook_slot_free(u
->monitor_slot
);
226 u
->monitor_slot
= NULL
;
230 pa_reserve_monitor_wrapper_unref(u
->monitor
);
235 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
241 if (pa_in_system_mode())
244 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
247 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
253 pa_assert(!u
->monitor_slot
);
254 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
259 static void fix_min_sleep_wakeup(struct userdata
*u
) {
260 size_t max_use
, max_use_2
;
263 pa_assert(u
->use_tsched
);
265 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
266 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
268 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
269 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
271 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
272 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
275 static void fix_tsched_watermark(struct userdata
*u
) {
278 pa_assert(u
->use_tsched
);
280 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
282 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
283 u
->tsched_watermark
= max_use
- u
->min_sleep
;
285 if (u
->tsched_watermark
< u
->min_wakeup
)
286 u
->tsched_watermark
= u
->min_wakeup
;
289 static void increase_watermark(struct userdata
*u
) {
290 size_t old_watermark
;
291 pa_usec_t old_min_latency
, new_min_latency
;
294 pa_assert(u
->use_tsched
);
296 /* First, just try to increase the watermark */
297 old_watermark
= u
->tsched_watermark
;
298 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_inc_step
);
299 fix_tsched_watermark(u
);
301 if (old_watermark
!= u
->tsched_watermark
) {
302 pa_log_info("Increasing wakeup watermark to %0.2f ms",
303 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
307 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
308 old_min_latency
= u
->sink
->thread_info
.min_latency
;
309 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_INC_STEP_USEC
);
310 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
312 if (old_min_latency
!= new_min_latency
) {
313 pa_log_info("Increasing minimal latency to %0.2f ms",
314 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
316 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
319 /* When we reach this we're officialy fucked! */
322 static void decrease_watermark(struct userdata
*u
) {
323 size_t old_watermark
;
327 pa_assert(u
->use_tsched
);
329 now
= pa_rtclock_now();
331 if (u
->watermark_dec_not_before
<= 0)
334 if (u
->watermark_dec_not_before
> now
)
337 old_watermark
= u
->tsched_watermark
;
339 if (u
->tsched_watermark
< u
->watermark_dec_step
)
340 u
->tsched_watermark
= u
->tsched_watermark
/ 2;
342 u
->tsched_watermark
= PA_MAX(u
->tsched_watermark
/ 2, u
->tsched_watermark
- u
->watermark_dec_step
);
344 fix_tsched_watermark(u
);
346 if (old_watermark
!= u
->tsched_watermark
)
347 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
348 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
350 /* We don't change the latency range*/
353 u
->watermark_dec_not_before
= now
+ TSCHED_WATERMARK_VERIFY_AFTER_USEC
;
356 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
359 pa_assert(sleep_usec
);
360 pa_assert(process_usec
);
363 pa_assert(u
->use_tsched
);
365 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
367 if (usec
== (pa_usec_t
) -1)
368 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
370 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
375 *sleep_usec
= usec
- wm
;
379 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
380 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
381 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
382 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
386 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
391 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
393 pa_assert(err
!= -EAGAIN
);
396 pa_log_debug("%s: Buffer underrun!", call
);
398 if (err
== -ESTRPIPE
)
399 pa_log_debug("%s: System suspended!", call
);
401 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
402 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
411 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
, pa_bool_t on_timeout
) {
413 pa_bool_t underrun
= FALSE
;
415 /* We use <= instead of < for this check here because an underrun
416 * only happens after the last sample was processed, not already when
417 * it is removed from the buffer. This is particularly important
418 * when block transfer is used. */
420 if (n_bytes
<= u
->hwbuf_size
)
421 left_to_play
= u
->hwbuf_size
- n_bytes
;
424 /* We got a dropout. What a mess! */
432 if (!u
->first
&& !u
->after_rewind
)
433 if (pa_log_ratelimit())
434 pa_log_info("Underrun!");
438 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
439 (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
440 (double) pa_bytes_to_usec(u
->watermark_inc_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
441 (double) pa_bytes_to_usec(u
->watermark_dec_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
445 pa_bool_t reset_not_before
= TRUE
;
447 if (!u
->first
&& !u
->after_rewind
) {
448 if (underrun
|| left_to_play
< u
->watermark_inc_threshold
)
449 increase_watermark(u
);
450 else if (left_to_play
> u
->watermark_dec_threshold
) {
451 reset_not_before
= FALSE
;
453 /* We decrease the watermark only if have actually
454 * been woken up by a timeout. If something else woke
455 * us up it's too easy to fulfill the deadlines... */
458 decrease_watermark(u
);
462 if (reset_not_before
)
463 u
->watermark_dec_not_before
= 0;
469 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
470 pa_bool_t work_done
= TRUE
;
471 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
476 pa_sink_assert_ref(u
->sink
);
479 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
485 pa_bool_t after_avail
= TRUE
;
487 /* First we determine how many samples are missing to fill the
488 * buffer up to 100% */
490 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
492 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
498 n_bytes
= (size_t) n
* u
->frame_size
;
501 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
504 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
509 /* We won't fill up the playback buffer before at least
510 * half the sleep time is over because otherwise we might
511 * ask for more data from the clients then they expect. We
512 * need to guarantee that clients only have to keep around
513 * a single hw buffer length. */
516 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
518 pa_log_debug("Not filling up, because too early.");
523 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
527 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
528 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
529 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
530 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
536 pa_log_debug("Not filling up, because not necessary.");
544 pa_log_debug("Not filling up, because already too many iterations.");
550 n_bytes
-= u
->hwbuf_unused
;
554 pa_log_debug("Filling up");
561 const snd_pcm_channel_area_t
*areas
;
562 snd_pcm_uframes_t offset
, frames
;
563 snd_pcm_sframes_t sframes
;
565 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
566 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
568 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
570 if (!after_avail
&& err
== -EAGAIN
)
573 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
579 /* Make sure that if these memblocks need to be copied they will fit into one slot */
580 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
581 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
583 if (!after_avail
&& frames
== 0)
586 pa_assert(frames
> 0);
589 /* Check these are multiples of 8 bit */
590 pa_assert((areas
[0].first
& 7) == 0);
591 pa_assert((areas
[0].step
& 7)== 0);
593 /* We assume a single interleaved memory buffer */
594 pa_assert((areas
[0].first
>> 3) == 0);
595 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
597 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
599 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
600 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
603 pa_sink_render_into_full(u
->sink
, &chunk
);
604 pa_memblock_unref_fixed(chunk
.memblock
);
606 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
608 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
616 u
->write_count
+= frames
* u
->frame_size
;
617 u
->since_start
+= frames
* u
->frame_size
;
620 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
623 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
626 n_bytes
-= (size_t) frames
* u
->frame_size
;
630 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
632 if (*sleep_usec
> process_usec
)
633 *sleep_usec
-= process_usec
;
637 return work_done
? 1 : 0;
640 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
641 pa_bool_t work_done
= FALSE
;
642 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
647 pa_sink_assert_ref(u
->sink
);
650 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
657 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
659 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
665 n_bytes
= (size_t) n
* u
->frame_size
;
666 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
671 /* We won't fill up the playback buffer before at least
672 * half the sleep time is over because otherwise we might
673 * ask for more data from the clients then they expect. We
674 * need to guarantee that clients only have to keep around
675 * a single hw buffer length. */
678 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
681 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
685 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
686 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
687 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
688 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
698 pa_log_debug("Not filling up, because already too many iterations.");
704 n_bytes
-= u
->hwbuf_unused
;
708 snd_pcm_sframes_t frames
;
710 pa_bool_t after_avail
= TRUE
;
712 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
714 if (u
->memchunk
.length
<= 0)
715 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
717 pa_assert(u
->memchunk
.length
> 0);
719 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
721 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
722 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
724 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
725 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
726 pa_memblock_release(u
->memchunk
.memblock
);
728 if (PA_UNLIKELY(frames
< 0)) {
730 if (!after_avail
&& (int) frames
== -EAGAIN
)
733 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
739 if (!after_avail
&& frames
== 0)
742 pa_assert(frames
> 0);
745 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
746 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
748 if (u
->memchunk
.length
<= 0) {
749 pa_memblock_unref(u
->memchunk
.memblock
);
750 pa_memchunk_reset(&u
->memchunk
);
755 u
->write_count
+= frames
* u
->frame_size
;
756 u
->since_start
+= frames
* u
->frame_size
;
758 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
760 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
763 n_bytes
-= (size_t) frames
* u
->frame_size
;
767 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
769 if (*sleep_usec
> process_usec
)
770 *sleep_usec
-= process_usec
;
774 return work_done
? 1 : 0;
777 static void update_smoother(struct userdata
*u
) {
778 snd_pcm_sframes_t delay
= 0;
781 pa_usec_t now1
= 0, now2
;
782 snd_pcm_status_t
*status
;
784 snd_pcm_status_alloca(&status
);
787 pa_assert(u
->pcm_handle
);
789 /* Let's update the time smoother */
791 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
792 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
796 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
797 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
799 snd_htimestamp_t htstamp
= { 0, 0 };
800 snd_pcm_status_get_htstamp(status
, &htstamp
);
801 now1
= pa_timespec_load(&htstamp
);
804 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
806 now1
= pa_rtclock_now();
808 /* check if the time since the last update is bigger than the interval */
809 if (u
->last_smoother_update
> 0)
810 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
813 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
815 if (PA_UNLIKELY(position
< 0))
818 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
820 pa_smoother_put(u
->smoother
, now1
, now2
);
822 u
->last_smoother_update
= now1
;
823 /* exponentially increase the update interval up to the MAX limit */
824 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
827 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
830 pa_usec_t now1
, now2
;
834 now1
= pa_rtclock_now();
835 now2
= pa_smoother_get(u
->smoother
, now1
);
837 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
839 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
841 if (u
->memchunk
.memblock
)
842 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
847 static int build_pollfd(struct userdata
*u
) {
849 pa_assert(u
->pcm_handle
);
851 if (u
->alsa_rtpoll_item
)
852 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
854 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
860 /* Called from IO context */
861 static int suspend(struct userdata
*u
) {
863 pa_assert(u
->pcm_handle
);
865 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
867 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
868 * take awfully long with our long buffer sizes today. */
869 snd_pcm_close(u
->pcm_handle
);
870 u
->pcm_handle
= NULL
;
872 if (u
->alsa_rtpoll_item
) {
873 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
874 u
->alsa_rtpoll_item
= NULL
;
877 pa_log_info("Device suspended...");
882 /* Called from IO context */
883 static int update_sw_params(struct userdata
*u
) {
884 snd_pcm_uframes_t avail_min
;
889 /* Use the full buffer if noone asked us for anything specific */
895 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
898 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
900 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
902 /* We need at least one sample in our buffer */
904 if (PA_UNLIKELY(b
< u
->frame_size
))
907 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
910 fix_min_sleep_wakeup(u
);
911 fix_tsched_watermark(u
);
914 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
916 /* We need at last one frame in the used part of the buffer */
917 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
920 pa_usec_t sleep_usec
, process_usec
;
922 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
923 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
926 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
928 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
)) < 0) {
929 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
933 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
938 /* Called from IO context */
939 static int unsuspend(struct userdata
*u
) {
944 snd_pcm_uframes_t period_size
;
947 pa_assert(!u
->pcm_handle
);
949 pa_log_info("Trying resume...");
951 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
952 /*SND_PCM_NONBLOCK|*/
953 SND_PCM_NO_AUTO_RESAMPLE
|
954 SND_PCM_NO_AUTO_CHANNELS
|
955 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
956 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
960 ss
= u
->sink
->sample_spec
;
961 nfrags
= u
->nfragments
;
962 period_size
= u
->fragment_size
/ u
->frame_size
;
966 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &nfrags
, &period_size
, u
->hwbuf_size
/ u
->frame_size
, &b
, &d
, TRUE
)) < 0) {
967 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
971 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
972 pa_log_warn("Resume failed, couldn't get original access mode.");
976 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
977 pa_log_warn("Resume failed, couldn't restore original sample settings.");
981 if (nfrags
!= u
->nfragments
|| period_size
*u
->frame_size
!= u
->fragment_size
) {
982 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
983 (unsigned long) u
->nfragments
, (unsigned long) u
->fragment_size
,
984 (unsigned long) nfrags
, period_size
* u
->frame_size
);
988 if (update_sw_params(u
) < 0)
991 if (build_pollfd(u
) < 0)
995 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
996 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
997 u
->last_smoother_update
= 0;
1002 pa_log_info("Resumed successfully...");
1007 if (u
->pcm_handle
) {
1008 snd_pcm_close(u
->pcm_handle
);
1009 u
->pcm_handle
= NULL
;
1015 /* Called from IO context */
1016 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
1017 struct userdata
*u
= PA_SINK(o
)->userdata
;
1021 case PA_SINK_MESSAGE_GET_LATENCY
: {
1025 r
= sink_get_latency(u
);
1027 *((pa_usec_t
*) data
) = r
;
1032 case PA_SINK_MESSAGE_SET_STATE
:
1034 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
1036 case PA_SINK_SUSPENDED
:
1037 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
1045 case PA_SINK_RUNNING
:
1047 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
1048 if (build_pollfd(u
) < 0)
1052 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1053 if (unsuspend(u
) < 0)
1059 case PA_SINK_UNLINKED
:
1061 case PA_SINK_INVALID_STATE
:
1068 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
1071 /* Called from main context */
1072 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
1073 pa_sink_state_t old_state
;
1076 pa_sink_assert_ref(s
);
1077 pa_assert_se(u
= s
->userdata
);
1079 old_state
= pa_sink_get_state(u
->sink
);
1081 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
1083 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
1084 if (reserve_init(u
, u
->device_name
) < 0)
1090 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1091 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1094 pa_assert(u
->mixer_handle
);
1096 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1099 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1100 pa_sink_get_volume(u
->sink
, TRUE
);
1101 pa_sink_get_mute(u
->sink
, TRUE
);
1107 static void sink_get_volume_cb(pa_sink
*s
) {
1108 struct userdata
*u
= s
->userdata
;
1110 char t
[PA_CVOLUME_SNPRINT_MAX
];
1113 pa_assert(u
->mixer_path
);
1114 pa_assert(u
->mixer_handle
);
1116 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1119 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1120 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1122 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1124 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1127 s
->real_volume
= u
->hardware_volume
= r
;
1129 /* Hmm, so the hardware volume changed, let's reset our software volume */
1130 if (u
->mixer_path
->has_dB
)
1131 pa_sink_set_soft_volume(s
, NULL
);
1134 static void sink_set_volume_cb(pa_sink
*s
) {
1135 struct userdata
*u
= s
->userdata
;
1137 char t
[PA_CVOLUME_SNPRINT_MAX
];
1140 pa_assert(u
->mixer_path
);
1141 pa_assert(u
->mixer_handle
);
1143 /* Shift up by the base volume */
1144 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1146 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1149 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1150 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1152 u
->hardware_volume
= r
;
1154 if (u
->mixer_path
->has_dB
) {
1155 pa_cvolume new_soft_volume
;
1156 pa_bool_t accurate_enough
;
1158 /* Match exactly what the user requested by software */
1159 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1161 /* If the adjustment to do in software is only minimal we
1162 * can skip it. That saves us CPU at the expense of a bit of
1165 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1166 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1168 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->real_volume
));
1169 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &u
->hardware_volume
));
1170 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t
, sizeof(t
), &new_soft_volume
),
1171 pa_yes_no(accurate_enough
));
1173 if (!accurate_enough
)
1174 s
->soft_volume
= new_soft_volume
;
1177 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1179 /* We can't match exactly what the user requested, hence let's
1180 * at least tell the user about it */
1186 static void sink_get_mute_cb(pa_sink
*s
) {
1187 struct userdata
*u
= s
->userdata
;
1191 pa_assert(u
->mixer_path
);
1192 pa_assert(u
->mixer_handle
);
1194 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1200 static void sink_set_mute_cb(pa_sink
*s
) {
1201 struct userdata
*u
= s
->userdata
;
1204 pa_assert(u
->mixer_path
);
1205 pa_assert(u
->mixer_handle
);
1207 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1210 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1211 struct userdata
*u
= s
->userdata
;
1212 pa_alsa_port_data
*data
;
1216 pa_assert(u
->mixer_handle
);
1218 data
= PA_DEVICE_PORT_DATA(p
);
1220 pa_assert_se(u
->mixer_path
= data
->path
);
1221 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1223 if (u
->mixer_path
->has_volume
&& u
->mixer_path
->has_dB
) {
1224 s
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1225 s
->n_volume_steps
= PA_VOLUME_NORM
+1;
1227 if (u
->mixer_path
->max_dB
> 0.0)
1228 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s
->base_volume
));
1230 pa_log_info("No particular base volume set, fixing to 0 dB");
1232 s
->base_volume
= PA_VOLUME_NORM
;
1233 s
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1237 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1247 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1248 struct userdata
*u
= s
->userdata
;
1255 before
= u
->hwbuf_unused
;
1256 update_sw_params(u
);
1258 /* Let's check whether we now use only a smaller part of the
1259 buffer then before. If so, we need to make sure that subsequent
1260 rewinds are relative to the new maximum fill level and not to the
1261 current fill level. Thus, let's do a full rewind once, to clear
1264 if (u
->hwbuf_unused
> before
) {
1265 pa_log_debug("Requesting rewind due to latency change.");
1266 pa_sink_request_rewind(s
, (size_t) -1);
1270 static int process_rewind(struct userdata
*u
) {
1271 snd_pcm_sframes_t unused
;
1272 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1275 /* Figure out how much we shall rewind and reset the counter */
1276 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1278 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1280 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1281 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1285 unused_nbytes
= u
->tsched_watermark
+ (size_t) unused
* u
->frame_size
;
1287 if (u
->hwbuf_size
> unused_nbytes
)
1288 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1292 if (rewind_nbytes
> limit_nbytes
)
1293 rewind_nbytes
= limit_nbytes
;
1295 if (rewind_nbytes
> 0) {
1296 snd_pcm_sframes_t in_frames
, out_frames
;
1298 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1300 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1301 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1302 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1303 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1304 if (try_recover(u
, "process_rewind", out_frames
) < 0)
1309 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1311 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1313 if (rewind_nbytes
<= 0)
1314 pa_log_info("Tried rewind, but was apparently not possible.");
1316 u
->write_count
-= rewind_nbytes
;
1317 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1318 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1320 u
->after_rewind
= TRUE
;
1324 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1326 pa_sink_process_rewind(u
->sink
, 0);
1330 static void thread_func(void *userdata
) {
1331 struct userdata
*u
= userdata
;
1332 unsigned short revents
= 0;
1336 pa_log_debug("Thread starting up");
1338 if (u
->core
->realtime_scheduling
)
1339 pa_make_realtime(u
->core
->realtime_priority
);
1341 pa_thread_mq_install(&u
->thread_mq
);
1347 pa_log_debug("Loop");
1350 /* Render some data and write it to the dsp */
1351 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1353 pa_usec_t sleep_usec
= 0;
1354 pa_bool_t on_timeout
= pa_rtpoll_timer_elapsed(u
->rtpoll
);
1356 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1357 if (process_rewind(u
) < 0)
1361 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1363 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1368 /* pa_log_debug("work_done = %i", work_done); */
1373 pa_log_info("Starting playback.");
1374 snd_pcm_start(u
->pcm_handle
);
1376 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1382 if (u
->use_tsched
) {
1385 if (u
->since_start
<= u
->hwbuf_size
) {
1387 /* USB devices on ALSA seem to hit a buffer
1388 * underrun during the first iterations much
1389 * quicker then we calculate here, probably due to
1390 * the transport latency. To accommodate for that
1391 * we artificially decrease the sleep time until
1392 * we have filled the buffer at least once
1395 if (pa_log_ratelimit())
1396 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1400 /* OK, the playback buffer is now full, let's
1401 * calculate when to wake up next */
1402 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1404 /* Convert from the sound card time domain to the
1405 * system time domain */
1406 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1408 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1410 /* We don't trust the conversion, so we wake up whatever comes first */
1411 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(sleep_usec
, cusec
));
1415 u
->after_rewind
= FALSE
;
1417 } else if (u
->use_tsched
)
1419 /* OK, we're in an invalid state, let's disable our timers */
1420 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1422 /* Hmm, nothing to do. Let's sleep */
1423 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1429 /* Tell ALSA about this and process its response */
1430 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1431 struct pollfd
*pollfd
;
1435 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1437 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1438 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1442 if (revents
& ~POLLOUT
) {
1443 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1448 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit())
1449 pa_log_debug("Wakeup from ALSA!");
1456 /* If this was no regular exit from the loop we have to continue
1457 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1458 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1459 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1462 pa_log_debug("Thread shutting down");
1465 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1471 pa_assert(device_name
);
1473 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1474 pa_sink_new_data_set_name(data
, n
);
1475 data
->namereg_fail
= TRUE
;
1479 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1480 data
->namereg_fail
= TRUE
;
1482 n
= device_id
? device_id
: device_name
;
1483 data
->namereg_fail
= FALSE
;
1487 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1489 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1491 pa_sink_new_data_set_name(data
, t
);
1495 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1497 if (!mapping
&& !element
)
1500 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
))) {
1501 pa_log_info("Failed to find a working mixer device.");
1507 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1510 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, ignore_dB
) < 0)
1513 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1514 pa_alsa_path_dump(u
->mixer_path
);
1517 if (!(u
->mixer_path_set
= pa_alsa_path_set_new(mapping
, PA_ALSA_DIRECTION_OUTPUT
)))
1520 pa_alsa_path_set_probe(u
->mixer_path_set
, u
->mixer_handle
, ignore_dB
);
1522 pa_log_debug("Probed mixer paths:");
1523 pa_alsa_path_set_dump(u
->mixer_path_set
);
1530 if (u
->mixer_path_set
) {
1531 pa_alsa_path_set_free(u
->mixer_path_set
);
1532 u
->mixer_path_set
= NULL
;
1533 } else if (u
->mixer_path
) {
1534 pa_alsa_path_free(u
->mixer_path
);
1535 u
->mixer_path
= NULL
;
1538 if (u
->mixer_handle
) {
1539 snd_mixer_close(u
->mixer_handle
);
1540 u
->mixer_handle
= NULL
;
1544 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1547 if (!u
->mixer_handle
)
1550 if (u
->sink
->active_port
) {
1551 pa_alsa_port_data
*data
;
1553 /* We have a list of supported paths, so let's activate the
1554 * one that has been chosen as active */
1556 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1557 u
->mixer_path
= data
->path
;
1559 pa_alsa_path_select(data
->path
, u
->mixer_handle
);
1562 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1566 if (!u
->mixer_path
&& u
->mixer_path_set
)
1567 u
->mixer_path
= u
->mixer_path_set
->paths
;
1569 if (u
->mixer_path
) {
1570 /* Hmm, we have only a single path, then let's activate it */
1572 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1574 if (u
->mixer_path
->settings
)
1575 pa_alsa_setting_select(u
->mixer_path
->settings
, u
->mixer_handle
);
1580 if (!u
->mixer_path
->has_volume
)
1581 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1584 if (u
->mixer_path
->has_dB
) {
1585 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1587 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1588 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1590 if (u
->mixer_path
->max_dB
> 0.0)
1591 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1593 pa_log_info("No particular base volume set, fixing to 0 dB");
1596 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1597 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1598 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1601 u
->sink
->get_volume
= sink_get_volume_cb
;
1602 u
->sink
->set_volume
= sink_set_volume_cb
;
1604 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->mixer_path
->has_dB
? PA_SINK_DECIBEL_VOLUME
: 0);
1605 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1608 if (!u
->mixer_path
->has_mute
) {
1609 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1611 u
->sink
->get_mute
= sink_get_mute_cb
;
1612 u
->sink
->set_mute
= sink_set_mute_cb
;
1613 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1614 pa_log_info("Using hardware mute control.");
1617 u
->mixer_fdl
= pa_alsa_fdlist_new();
1619 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1620 pa_log("Failed to initialize file descriptor monitoring");
1624 if (u
->mixer_path_set
)
1625 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1627 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1632 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1634 struct userdata
*u
= NULL
;
1635 const char *dev_id
= NULL
;
1636 pa_sample_spec ss
, requested_ss
;
1638 uint32_t nfrags
, hwbuf_size
, frag_size
, tsched_size
, tsched_watermark
;
1639 snd_pcm_uframes_t period_frames
, tsched_frames
;
1641 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
;
1642 pa_sink_new_data data
;
1643 pa_alsa_profile_set
*profile_set
= NULL
;
1648 ss
= m
->core
->default_sample_spec
;
1649 map
= m
->core
->default_channel_map
;
1650 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1651 pa_log("Failed to parse sample specification and channel map");
1656 frame_size
= pa_frame_size(&ss
);
1658 nfrags
= m
->core
->default_n_fragments
;
1659 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1661 frag_size
= (uint32_t) frame_size
;
1662 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1663 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1665 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1666 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1667 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1668 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1669 pa_log("Failed to parse buffer metrics");
1673 hwbuf_size
= frag_size
* nfrags
;
1674 period_frames
= frag_size
/frame_size
;
1675 tsched_frames
= tsched_size
/frame_size
;
1677 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1678 pa_log("Failed to parse mmap argument.");
1682 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1683 pa_log("Failed to parse tsched argument.");
1687 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1688 pa_log("Failed to parse ignore_dB argument.");
1692 if (use_tsched
&& !pa_rtclock_hrtimer()) {
1693 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1697 u
= pa_xnew0(struct userdata
, 1);
1700 u
->use_mmap
= use_mmap
;
1701 u
->use_tsched
= use_tsched
;
1703 u
->rtpoll
= pa_rtpoll_new();
1704 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1706 u
->smoother
= pa_smoother_new(
1707 DEFAULT_TSCHED_BUFFER_USEC
*2,
1708 DEFAULT_TSCHED_BUFFER_USEC
*2,
1714 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1716 dev_id
= pa_modargs_get_value(
1718 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
1720 if (reserve_init(u
, dev_id
) < 0)
1723 if (reserve_monitor_init(u
, dev_id
) < 0)
1731 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1732 pa_log("device_id= not set");
1736 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
1740 SND_PCM_STREAM_PLAYBACK
,
1741 &nfrags
, &period_frames
, tsched_frames
,
1746 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1748 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
1751 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
1755 SND_PCM_STREAM_PLAYBACK
,
1756 &nfrags
, &period_frames
, tsched_frames
,
1757 &b
, &d
, profile_set
, &mapping
)))
1763 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1764 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1767 SND_PCM_STREAM_PLAYBACK
,
1768 &nfrags
, &period_frames
, tsched_frames
,
1773 pa_assert(u
->device_name
);
1774 pa_log_info("Successfully opened device %s.", u
->device_name
);
1776 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
1777 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
1782 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
1784 if (use_mmap
&& !b
) {
1785 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1786 u
->use_mmap
= use_mmap
= FALSE
;
1789 if (use_tsched
&& (!b
|| !d
)) {
1790 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1791 u
->use_tsched
= use_tsched
= FALSE
;
1794 if (use_tsched
&& !pa_alsa_pcm_is_hw(u
->pcm_handle
)) {
1795 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1796 u
->use_tsched
= use_tsched
= FALSE
;
1800 pa_log_info("Successfully enabled mmap() mode.");
1803 pa_log_info("Successfully enabled timer-based scheduling mode.");
1805 /* ALSA might tweak the sample spec, so recalculate the frame size */
1806 frame_size
= pa_frame_size(&ss
);
1808 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
1810 pa_sink_new_data_init(&data
);
1811 data
.driver
= driver
;
1814 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
1815 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1816 pa_sink_new_data_set_channel_map(&data
, &map
);
1818 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
1819 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1820 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
* nfrags
));
1821 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1822 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1825 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
1826 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
1829 pa_alsa_init_description(data
.proplist
);
1831 if (u
->control_device
)
1832 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
1834 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
1835 pa_log("Invalid properties");
1836 pa_sink_new_data_done(&data
);
1840 if (u
->mixer_path_set
)
1841 pa_alsa_add_ports(&data
.ports
, u
->mixer_path_set
);
1843 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
|(u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0));
1844 pa_sink_new_data_done(&data
);
1847 pa_log("Failed to create sink object");
1851 u
->sink
->parent
.process_msg
= sink_process_msg
;
1852 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1853 u
->sink
->set_state
= sink_set_state_cb
;
1854 u
->sink
->set_port
= sink_set_port_cb
;
1855 u
->sink
->userdata
= u
;
1857 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1858 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1860 u
->frame_size
= frame_size
;
1861 u
->fragment_size
= frag_size
= (uint32_t) (period_frames
* frame_size
);
1862 u
->nfragments
= nfrags
;
1863 u
->hwbuf_size
= u
->fragment_size
* nfrags
;
1864 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
1866 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1867 nfrags
, (long unsigned) u
->fragment_size
,
1868 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1870 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
1871 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
1873 if (u
->use_tsched
) {
1874 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, &requested_ss
), &u
->sink
->sample_spec
);
1876 u
->watermark_inc_step
= pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC
, &u
->sink
->sample_spec
);
1877 u
->watermark_dec_step
= pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC
, &u
->sink
->sample_spec
);
1879 u
->watermark_inc_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1880 u
->watermark_dec_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1882 fix_min_sleep_wakeup(u
);
1883 fix_tsched_watermark(u
);
1885 pa_sink_set_latency_range(u
->sink
,
1887 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1889 pa_log_info("Time scheduling watermark is %0.2fms",
1890 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1892 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1897 if (update_sw_params(u
) < 0)
1900 if (setup_mixer(u
, ignore_dB
) < 0)
1903 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
1905 if (!(u
->thread
= pa_thread_new(thread_func
, u
))) {
1906 pa_log("Failed to create thread.");
1910 /* Get initial mixer settings */
1911 if (data
.volume_is_set
) {
1912 if (u
->sink
->set_volume
)
1913 u
->sink
->set_volume(u
->sink
);
1915 if (u
->sink
->get_volume
)
1916 u
->sink
->get_volume(u
->sink
);
1919 if (data
.muted_is_set
) {
1920 if (u
->sink
->set_mute
)
1921 u
->sink
->set_mute(u
->sink
);
1923 if (u
->sink
->get_mute
)
1924 u
->sink
->get_mute(u
->sink
);
1927 pa_sink_put(u
->sink
);
1930 pa_alsa_profile_set_free(profile_set
);
1940 pa_alsa_profile_set_free(profile_set
);
1945 static void userdata_free(struct userdata
*u
) {
1949 pa_sink_unlink(u
->sink
);
1952 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1953 pa_thread_free(u
->thread
);
1956 pa_thread_mq_done(&u
->thread_mq
);
1959 pa_sink_unref(u
->sink
);
1961 if (u
->memchunk
.memblock
)
1962 pa_memblock_unref(u
->memchunk
.memblock
);
1964 if (u
->alsa_rtpoll_item
)
1965 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1968 pa_rtpoll_free(u
->rtpoll
);
1970 if (u
->pcm_handle
) {
1971 snd_pcm_drop(u
->pcm_handle
);
1972 snd_pcm_close(u
->pcm_handle
);
1976 pa_alsa_fdlist_free(u
->mixer_fdl
);
1978 if (u
->mixer_path_set
)
1979 pa_alsa_path_set_free(u
->mixer_path_set
);
1980 else if (u
->mixer_path
)
1981 pa_alsa_path_free(u
->mixer_path
);
1983 if (u
->mixer_handle
)
1984 snd_mixer_close(u
->mixer_handle
);
1987 pa_smoother_free(u
->smoother
);
1992 pa_xfree(u
->device_name
);
1993 pa_xfree(u
->control_device
);
1997 void pa_alsa_sink_free(pa_sink
*s
) {
2000 pa_sink_assert_ref(s
);
2001 pa_assert_se(u
= s
->userdata
);