2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
57 #include <modules/reserve-wrap.h>
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
62 /* #define DEBUG_TIMING */
64 #define DEFAULT_DEVICE "default"
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
76 * will increase the watermark only if we hit a real underrun. */
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
98 pa_thread_mq thread_mq
;
101 snd_pcm_t
*pcm_handle
;
104 pa_alsa_fdlist
*mixer_fdl
;
105 pa_alsa_mixer_pdata
*mixer_pd
;
106 snd_mixer_t
*mixer_handle
;
107 pa_alsa_path_set
*mixer_path_set
;
108 pa_alsa_path
*mixer_path
;
110 pa_cvolume hardware_volume
;
119 tsched_watermark_ref
,
125 watermark_inc_threshold
,
126 watermark_dec_threshold
,
129 pa_usec_t watermark_dec_not_before
;
130 pa_usec_t min_latency_ref
;
132 pa_memchunk memchunk
;
134 char *device_name
; /* name of the PCM device */
135 char *control_device
; /* name of the control device */
137 pa_bool_t use_mmap
:1, use_tsched
:1, deferred_volume
:1, fixed_latency_range
:1;
139 pa_bool_t first
, after_rewind
;
141 pa_rtpoll_item
*alsa_rtpoll_item
;
143 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
145 pa_smoother
*smoother
;
146 uint64_t write_count
;
147 uint64_t since_start
;
148 pa_usec_t smoother_interval
;
149 pa_usec_t last_smoother_update
;
153 pa_reserve_wrapper
*reserve
;
154 pa_hook_slot
*reserve_slot
;
155 pa_reserve_monitor_wrapper
*monitor
;
156 pa_hook_slot
*monitor_slot
;
159 static void userdata_free(struct userdata
*u
);
161 /* FIXME: Is there a better way to do this than device names? */
162 static pa_bool_t
is_iec958(struct userdata
*u
) {
163 return (strncmp("iec958", u
->device_name
, 6) == 0);
166 static pa_bool_t
is_hdmi(struct userdata
*u
) {
167 return (strncmp("hdmi", u
->device_name
, 4) == 0);
170 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
174 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
175 return PA_HOOK_CANCEL
;
180 static void reserve_done(struct userdata
*u
) {
183 if (u
->reserve_slot
) {
184 pa_hook_slot_free(u
->reserve_slot
);
185 u
->reserve_slot
= NULL
;
189 pa_reserve_wrapper_unref(u
->reserve
);
194 static void reserve_update(struct userdata
*u
) {
195 const char *description
;
198 if (!u
->sink
|| !u
->reserve
)
201 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
202 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
205 static int reserve_init(struct userdata
*u
, const char *dname
) {
214 if (pa_in_system_mode())
217 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
220 /* We are resuming, try to lock the device */
221 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
229 pa_assert(!u
->reserve_slot
);
230 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
235 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
241 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
243 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
247 static void monitor_done(struct userdata
*u
) {
250 if (u
->monitor_slot
) {
251 pa_hook_slot_free(u
->monitor_slot
);
252 u
->monitor_slot
= NULL
;
256 pa_reserve_monitor_wrapper_unref(u
->monitor
);
261 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
267 if (pa_in_system_mode())
270 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
273 /* We are resuming, try to lock the device */
274 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
280 pa_assert(!u
->monitor_slot
);
281 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
286 static void fix_min_sleep_wakeup(struct userdata
*u
) {
287 size_t max_use
, max_use_2
;
290 pa_assert(u
->use_tsched
);
292 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
293 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
295 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
296 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
298 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
299 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
302 static void fix_tsched_watermark(struct userdata
*u
) {
305 pa_assert(u
->use_tsched
);
307 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
309 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
310 u
->tsched_watermark
= max_use
- u
->min_sleep
;
312 if (u
->tsched_watermark
< u
->min_wakeup
)
313 u
->tsched_watermark
= u
->min_wakeup
;
316 static void increase_watermark(struct userdata
*u
) {
317 size_t old_watermark
;
318 pa_usec_t old_min_latency
, new_min_latency
;
321 pa_assert(u
->use_tsched
);
323 /* First, just try to increase the watermark */
324 old_watermark
= u
->tsched_watermark
;
325 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_inc_step
);
326 fix_tsched_watermark(u
);
328 if (old_watermark
!= u
->tsched_watermark
) {
329 pa_log_info("Increasing wakeup watermark to %0.2f ms",
330 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
334 /* Hmm, we cannot increase the watermark any further, hence let's
335 raise the latency, unless doing so was disabled in
337 if (u
->fixed_latency_range
)
340 old_min_latency
= u
->sink
->thread_info
.min_latency
;
341 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_INC_STEP_USEC
);
342 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
344 if (old_min_latency
!= new_min_latency
) {
345 pa_log_info("Increasing minimal latency to %0.2f ms",
346 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
348 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
351 /* When we reach this we're officialy fucked! */
354 static void decrease_watermark(struct userdata
*u
) {
355 size_t old_watermark
;
359 pa_assert(u
->use_tsched
);
361 now
= pa_rtclock_now();
363 if (u
->watermark_dec_not_before
<= 0)
366 if (u
->watermark_dec_not_before
> now
)
369 old_watermark
= u
->tsched_watermark
;
371 if (u
->tsched_watermark
< u
->watermark_dec_step
)
372 u
->tsched_watermark
= u
->tsched_watermark
/ 2;
374 u
->tsched_watermark
= PA_MAX(u
->tsched_watermark
/ 2, u
->tsched_watermark
- u
->watermark_dec_step
);
376 fix_tsched_watermark(u
);
378 if (old_watermark
!= u
->tsched_watermark
)
379 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
380 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
382 /* We don't change the latency range*/
385 u
->watermark_dec_not_before
= now
+ TSCHED_WATERMARK_VERIFY_AFTER_USEC
;
388 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
391 pa_assert(sleep_usec
);
392 pa_assert(process_usec
);
395 pa_assert(u
->use_tsched
);
397 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
399 if (usec
== (pa_usec_t
) -1)
400 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
402 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
407 *sleep_usec
= usec
- wm
;
411 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
412 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
413 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
414 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
418 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
423 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
425 pa_assert(err
!= -EAGAIN
);
428 pa_log_debug("%s: Buffer underrun!", call
);
430 if (err
== -ESTRPIPE
)
431 pa_log_debug("%s: System suspended!", call
);
433 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
434 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
443 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
, pa_bool_t on_timeout
) {
445 pa_bool_t underrun
= FALSE
;
447 /* We use <= instead of < for this check here because an underrun
448 * only happens after the last sample was processed, not already when
449 * it is removed from the buffer. This is particularly important
450 * when block transfer is used. */
452 if (n_bytes
<= u
->hwbuf_size
)
453 left_to_play
= u
->hwbuf_size
- n_bytes
;
456 /* We got a dropout. What a mess! */
464 if (!u
->first
&& !u
->after_rewind
)
465 if (pa_log_ratelimit(PA_LOG_INFO
))
466 pa_log_info("Underrun!");
470 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
471 (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
472 (double) pa_bytes_to_usec(u
->watermark_inc_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
473 (double) pa_bytes_to_usec(u
->watermark_dec_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
477 pa_bool_t reset_not_before
= TRUE
;
479 if (!u
->first
&& !u
->after_rewind
) {
480 if (underrun
|| left_to_play
< u
->watermark_inc_threshold
)
481 increase_watermark(u
);
482 else if (left_to_play
> u
->watermark_dec_threshold
) {
483 reset_not_before
= FALSE
;
485 /* We decrease the watermark only if have actually
486 * been woken up by a timeout. If something else woke
487 * us up it's too easy to fulfill the deadlines... */
490 decrease_watermark(u
);
494 if (reset_not_before
)
495 u
->watermark_dec_not_before
= 0;
501 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
502 pa_bool_t work_done
= FALSE
;
503 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
508 pa_sink_assert_ref(u
->sink
);
511 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
517 pa_bool_t after_avail
= TRUE
;
519 /* First we determine how many samples are missing to fill the
520 * buffer up to 100% */
522 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
524 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
530 n_bytes
= (size_t) n
* u
->frame_size
;
533 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
536 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
541 /* We won't fill up the playback buffer before at least
542 * half the sleep time is over because otherwise we might
543 * ask for more data from the clients then they expect. We
544 * need to guarantee that clients only have to keep around
545 * a single hw buffer length. */
548 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
550 pa_log_debug("Not filling up, because too early.");
555 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
559 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
560 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
561 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
562 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
568 pa_log_debug("Not filling up, because not necessary.");
576 pa_log_debug("Not filling up, because already too many iterations.");
582 n_bytes
-= u
->hwbuf_unused
;
586 pa_log_debug("Filling up");
593 const snd_pcm_channel_area_t
*areas
;
594 snd_pcm_uframes_t offset
, frames
;
595 snd_pcm_sframes_t sframes
;
597 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
598 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
600 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
602 if (!after_avail
&& err
== -EAGAIN
)
605 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
611 /* Make sure that if these memblocks need to be copied they will fit into one slot */
612 if (frames
> pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
)
613 frames
= pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
;
615 if (!after_avail
&& frames
== 0)
618 pa_assert(frames
> 0);
621 /* Check these are multiples of 8 bit */
622 pa_assert((areas
[0].first
& 7) == 0);
623 pa_assert((areas
[0].step
& 7)== 0);
625 /* We assume a single interleaved memory buffer */
626 pa_assert((areas
[0].first
>> 3) == 0);
627 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
629 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
631 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
632 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
635 pa_sink_render_into_full(u
->sink
, &chunk
);
636 pa_memblock_unref_fixed(chunk
.memblock
);
638 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
640 if (!after_avail
&& (int) sframes
== -EAGAIN
)
643 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
651 u
->write_count
+= frames
* u
->frame_size
;
652 u
->since_start
+= frames
* u
->frame_size
;
655 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
658 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
661 n_bytes
-= (size_t) frames
* u
->frame_size
;
666 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
667 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
669 if (*sleep_usec
> process_usec
)
670 *sleep_usec
-= process_usec
;
676 return work_done
? 1 : 0;
679 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
680 pa_bool_t work_done
= FALSE
;
681 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
686 pa_sink_assert_ref(u
->sink
);
689 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
695 pa_bool_t after_avail
= TRUE
;
697 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
699 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
705 n_bytes
= (size_t) n
* u
->frame_size
;
706 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
711 /* We won't fill up the playback buffer before at least
712 * half the sleep time is over because otherwise we might
713 * ask for more data from the clients then they expect. We
714 * need to guarantee that clients only have to keep around
715 * a single hw buffer length. */
718 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
721 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
725 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
726 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
727 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
728 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
738 pa_log_debug("Not filling up, because already too many iterations.");
744 n_bytes
-= u
->hwbuf_unused
;
748 snd_pcm_sframes_t frames
;
751 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
753 if (u
->memchunk
.length
<= 0)
754 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
756 pa_assert(u
->memchunk
.length
> 0);
758 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
760 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
761 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
763 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
764 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
765 pa_memblock_release(u
->memchunk
.memblock
);
767 if (PA_UNLIKELY(frames
< 0)) {
769 if (!after_avail
&& (int) frames
== -EAGAIN
)
772 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
778 if (!after_avail
&& frames
== 0)
781 pa_assert(frames
> 0);
784 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
785 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
787 if (u
->memchunk
.length
<= 0) {
788 pa_memblock_unref(u
->memchunk
.memblock
);
789 pa_memchunk_reset(&u
->memchunk
);
794 u
->write_count
+= frames
* u
->frame_size
;
795 u
->since_start
+= frames
* u
->frame_size
;
797 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
799 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
802 n_bytes
-= (size_t) frames
* u
->frame_size
;
807 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
808 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
810 if (*sleep_usec
> process_usec
)
811 *sleep_usec
-= process_usec
;
817 return work_done
? 1 : 0;
820 static void update_smoother(struct userdata
*u
) {
821 snd_pcm_sframes_t delay
= 0;
824 pa_usec_t now1
= 0, now2
;
825 snd_pcm_status_t
*status
;
827 snd_pcm_status_alloca(&status
);
830 pa_assert(u
->pcm_handle
);
832 /* Let's update the time smoother */
834 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
, FALSE
)) < 0)) {
835 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
839 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
840 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
842 snd_htimestamp_t htstamp
= { 0, 0 };
843 snd_pcm_status_get_htstamp(status
, &htstamp
);
844 now1
= pa_timespec_load(&htstamp
);
847 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
849 now1
= pa_rtclock_now();
851 /* check if the time since the last update is bigger than the interval */
852 if (u
->last_smoother_update
> 0)
853 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
856 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
858 if (PA_UNLIKELY(position
< 0))
861 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
863 pa_smoother_put(u
->smoother
, now1
, now2
);
865 u
->last_smoother_update
= now1
;
866 /* exponentially increase the update interval up to the MAX limit */
867 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
870 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
873 pa_usec_t now1
, now2
;
877 now1
= pa_rtclock_now();
878 now2
= pa_smoother_get(u
->smoother
, now1
);
880 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
882 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
884 if (u
->memchunk
.memblock
)
885 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
890 static int build_pollfd(struct userdata
*u
) {
892 pa_assert(u
->pcm_handle
);
894 if (u
->alsa_rtpoll_item
)
895 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
897 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
903 /* Called from IO context */
904 static int suspend(struct userdata
*u
) {
906 pa_assert(u
->pcm_handle
);
908 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
910 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
911 * take awfully long with our long buffer sizes today. */
912 snd_pcm_close(u
->pcm_handle
);
913 u
->pcm_handle
= NULL
;
915 if (u
->alsa_rtpoll_item
) {
916 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
917 u
->alsa_rtpoll_item
= NULL
;
920 /* We reset max_rewind/max_request here to make sure that while we
921 * are suspended the old max_request/max_rewind values set before
922 * the suspend can influence the per-stream buffer of newly
923 * created streams, without their requirements having any
924 * influence on them. */
925 pa_sink_set_max_rewind_within_thread(u
->sink
, 0);
926 pa_sink_set_max_request_within_thread(u
->sink
, 0);
928 pa_log_info("Device suspended...");
933 /* Called from IO context */
934 static int update_sw_params(struct userdata
*u
) {
935 snd_pcm_uframes_t avail_min
;
940 /* Use the full buffer if no one asked us for anything specific */
946 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
949 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
951 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
953 /* We need at least one sample in our buffer */
955 if (PA_UNLIKELY(b
< u
->frame_size
))
958 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
961 fix_min_sleep_wakeup(u
);
962 fix_tsched_watermark(u
);
965 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
967 /* We need at last one frame in the used part of the buffer */
968 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
971 pa_usec_t sleep_usec
, process_usec
;
973 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
974 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
977 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
979 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
, !u
->use_tsched
)) < 0) {
980 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
984 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
985 if (pa_alsa_pcm_is_hw(u
->pcm_handle
))
986 pa_sink_set_max_rewind_within_thread(u
->sink
, u
->hwbuf_size
);
988 pa_log_info("Disabling rewind_within_thread for device %s", u
->device_name
);
989 pa_sink_set_max_rewind_within_thread(u
->sink
, 0);
995 /* Called from IO Context on unsuspend or from main thread when creating sink */
996 static void reset_watermark(struct userdata
*u
, size_t tsched_watermark
, pa_sample_spec
*ss
,
999 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, ss
),
1000 &u
->sink
->sample_spec
);
1002 u
->watermark_inc_step
= pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC
, &u
->sink
->sample_spec
);
1003 u
->watermark_dec_step
= pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC
, &u
->sink
->sample_spec
);
1005 u
->watermark_inc_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1006 u
->watermark_dec_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1008 fix_min_sleep_wakeup(u
);
1009 fix_tsched_watermark(u
);
1012 pa_sink_set_latency_range_within_thread(u
->sink
,
1014 pa_bytes_to_usec(u
->hwbuf_size
, ss
));
1016 pa_sink_set_latency_range(u
->sink
,
1018 pa_bytes_to_usec(u
->hwbuf_size
, ss
));
1020 /* work-around assert in pa_sink_set_latency_within_thead,
1021 keep track of min_latency and reuse it when
1022 this routine is called from IO context */
1023 u
->min_latency_ref
= u
->sink
->thread_info
.min_latency
;
1026 pa_log_info("Time scheduling watermark is %0.2fms",
1027 (double) pa_bytes_to_usec(u
->tsched_watermark
, ss
) / PA_USEC_PER_MSEC
);
1030 /* Called from IO context */
1031 static int unsuspend(struct userdata
*u
) {
1035 snd_pcm_uframes_t period_size
, buffer_size
;
1036 char *device_name
= NULL
;
1039 pa_assert(!u
->pcm_handle
);
1041 pa_log_info("Trying resume...");
1043 if ((is_iec958(u
) || is_hdmi(u
)) && pa_sink_is_passthrough(u
->sink
)) {
1044 /* Need to open device in NONAUDIO mode */
1045 int len
= strlen(u
->device_name
) + 8;
1047 device_name
= pa_xmalloc(len
);
1048 pa_snprintf(device_name
, len
, "%s,AES0=6", u
->device_name
);
1051 if ((err
= snd_pcm_open(&u
->pcm_handle
, device_name
? device_name
: u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
1053 SND_PCM_NO_AUTO_RESAMPLE
|
1054 SND_PCM_NO_AUTO_CHANNELS
|
1055 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
1056 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
1060 ss
= u
->sink
->sample_spec
;
1061 period_size
= u
->fragment_size
/ u
->frame_size
;
1062 buffer_size
= u
->hwbuf_size
/ u
->frame_size
;
1066 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &period_size
, &buffer_size
, 0, &b
, &d
, TRUE
)) < 0) {
1067 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
1071 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
1072 pa_log_warn("Resume failed, couldn't get original access mode.");
1076 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
1077 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1081 if (period_size
*u
->frame_size
!= u
->fragment_size
||
1082 buffer_size
*u
->frame_size
!= u
->hwbuf_size
) {
1083 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1084 (unsigned long) u
->hwbuf_size
, (unsigned long) u
->fragment_size
,
1085 (unsigned long) (buffer_size
*u
->frame_size
), (unsigned long) (period_size
*u
->frame_size
));
1089 if (update_sw_params(u
) < 0)
1092 if (build_pollfd(u
) < 0)
1096 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
1097 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1098 u
->last_smoother_update
= 0;
1103 /* reset the watermark to the value defined when sink was created */
1105 reset_watermark(u
, u
->tsched_watermark_ref
, &u
->sink
->sample_spec
, TRUE
);
1107 pa_log_info("Resumed successfully...");
1109 pa_xfree(device_name
);
1113 if (u
->pcm_handle
) {
1114 snd_pcm_close(u
->pcm_handle
);
1115 u
->pcm_handle
= NULL
;
1118 pa_xfree(device_name
);
1123 /* Called from IO context */
1124 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
1125 struct userdata
*u
= PA_SINK(o
)->userdata
;
1129 case PA_SINK_MESSAGE_GET_LATENCY
: {
1133 r
= sink_get_latency(u
);
1135 *((pa_usec_t
*) data
) = r
;
1140 case PA_SINK_MESSAGE_SET_STATE
:
1142 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
1144 case PA_SINK_SUSPENDED
: {
1147 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
1149 if ((r
= suspend(u
)) < 0)
1156 case PA_SINK_RUNNING
: {
1159 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
1160 if (build_pollfd(u
) < 0)
1164 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1165 if ((r
= unsuspend(u
)) < 0)
1172 case PA_SINK_UNLINKED
:
1174 case PA_SINK_INVALID_STATE
:
1181 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
1184 /* Called from main context */
1185 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
1186 pa_sink_state_t old_state
;
1189 pa_sink_assert_ref(s
);
1190 pa_assert_se(u
= s
->userdata
);
1192 old_state
= pa_sink_get_state(u
->sink
);
1194 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
1196 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
1197 if (reserve_init(u
, u
->device_name
) < 0)
1198 return -PA_ERR_BUSY
;
1203 static int ctl_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1204 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1207 pa_assert(u
->mixer_handle
);
1209 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1212 if (!PA_SINK_IS_LINKED(u
->sink
->state
))
1215 if (u
->sink
->suspend_cause
& PA_SUSPEND_SESSION
)
1218 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1219 pa_sink_get_volume(u
->sink
, TRUE
);
1220 pa_sink_get_mute(u
->sink
, TRUE
);
1226 static int io_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1227 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1230 pa_assert(u
->mixer_handle
);
1232 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1235 if (u
->sink
->suspend_cause
& PA_SUSPEND_SESSION
)
1238 if (mask
& SND_CTL_EVENT_MASK_VALUE
)
1239 pa_sink_update_volume_and_mute(u
->sink
);
1244 static void sink_get_volume_cb(pa_sink
*s
) {
1245 struct userdata
*u
= s
->userdata
;
1247 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1250 pa_assert(u
->mixer_path
);
1251 pa_assert(u
->mixer_handle
);
1253 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1256 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1257 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1259 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1261 if (u
->mixer_path
->has_dB
) {
1262 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1264 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &r
));
1267 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1270 s
->real_volume
= u
->hardware_volume
= r
;
1272 /* Hmm, so the hardware volume changed, let's reset our software volume */
1273 if (u
->mixer_path
->has_dB
)
1274 pa_sink_set_soft_volume(s
, NULL
);
1277 static void sink_set_volume_cb(pa_sink
*s
) {
1278 struct userdata
*u
= s
->userdata
;
1280 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1281 pa_bool_t deferred_volume
= !!(s
->flags
& PA_SINK_DEFERRED_VOLUME
);
1284 pa_assert(u
->mixer_path
);
1285 pa_assert(u
->mixer_handle
);
1287 /* Shift up by the base volume */
1288 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1290 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
, deferred_volume
, !deferred_volume
) < 0)
1293 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1294 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1296 u
->hardware_volume
= r
;
1298 if (u
->mixer_path
->has_dB
) {
1299 pa_cvolume new_soft_volume
;
1300 pa_bool_t accurate_enough
;
1301 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1303 /* Match exactly what the user requested by software */
1304 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1306 /* If the adjustment to do in software is only minimal we
1307 * can skip it. That saves us CPU at the expense of a bit of
1310 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1311 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1313 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &s
->real_volume
));
1314 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &s
->real_volume
));
1315 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &u
->hardware_volume
));
1316 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &u
->hardware_volume
));
1317 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1318 pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &new_soft_volume
),
1319 pa_yes_no(accurate_enough
));
1320 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &new_soft_volume
));
1322 if (!accurate_enough
)
1323 s
->soft_volume
= new_soft_volume
;
1326 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1328 /* We can't match exactly what the user requested, hence let's
1329 * at least tell the user about it */
1335 static void sink_write_volume_cb(pa_sink
*s
) {
1336 struct userdata
*u
= s
->userdata
;
1337 pa_cvolume hw_vol
= s
->thread_info
.current_hw_volume
;
1340 pa_assert(u
->mixer_path
);
1341 pa_assert(u
->mixer_handle
);
1342 pa_assert(s
->flags
& PA_SINK_DEFERRED_VOLUME
);
1344 /* Shift up by the base volume */
1345 pa_sw_cvolume_divide_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1347 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &hw_vol
, TRUE
, TRUE
) < 0)
1348 pa_log_error("Writing HW volume failed");
1351 pa_bool_t accurate_enough
;
1353 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1354 pa_sw_cvolume_multiply_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1356 pa_sw_cvolume_divide(&tmp_vol
, &hw_vol
, &s
->thread_info
.current_hw_volume
);
1358 (pa_cvolume_min(&tmp_vol
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1359 (pa_cvolume_max(&tmp_vol
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1361 if (!accurate_enough
) {
1363 char db
[2][PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1364 char pcnt
[2][PA_CVOLUME_SNPRINT_MAX
];
1367 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1368 pa_cvolume_snprint(vol
.pcnt
[0], sizeof(vol
.pcnt
[0]), &s
->thread_info
.current_hw_volume
),
1369 pa_cvolume_snprint(vol
.pcnt
[1], sizeof(vol
.pcnt
[1]), &hw_vol
));
1370 pa_log_debug(" in dB: %s (request) != %s",
1371 pa_sw_cvolume_snprint_dB(vol
.db
[0], sizeof(vol
.db
[0]), &s
->thread_info
.current_hw_volume
),
1372 pa_sw_cvolume_snprint_dB(vol
.db
[1], sizeof(vol
.db
[1]), &hw_vol
));
1377 static void sink_get_mute_cb(pa_sink
*s
) {
1378 struct userdata
*u
= s
->userdata
;
1382 pa_assert(u
->mixer_path
);
1383 pa_assert(u
->mixer_handle
);
1385 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1391 static void sink_set_mute_cb(pa_sink
*s
) {
1392 struct userdata
*u
= s
->userdata
;
1395 pa_assert(u
->mixer_path
);
1396 pa_assert(u
->mixer_handle
);
1398 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1401 static void mixer_volume_init(struct userdata
*u
) {
1404 if (!u
->mixer_path
->has_volume
) {
1405 pa_sink_set_write_volume_callback(u
->sink
, NULL
);
1406 pa_sink_set_get_volume_callback(u
->sink
, NULL
);
1407 pa_sink_set_set_volume_callback(u
->sink
, NULL
);
1409 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1411 pa_sink_set_get_volume_callback(u
->sink
, sink_get_volume_cb
);
1412 pa_sink_set_set_volume_callback(u
->sink
, sink_set_volume_cb
);
1414 if (u
->mixer_path
->has_dB
&& u
->deferred_volume
) {
1415 pa_sink_set_write_volume_callback(u
->sink
, sink_write_volume_cb
);
1416 pa_log_info("Successfully enabled synchronous volume.");
1418 pa_sink_set_write_volume_callback(u
->sink
, NULL
);
1420 if (u
->mixer_path
->has_dB
) {
1421 pa_sink_enable_decibel_volume(u
->sink
, TRUE
);
1422 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1424 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1425 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1427 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1429 pa_sink_enable_decibel_volume(u
->sink
, FALSE
);
1430 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1432 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1433 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1436 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1439 if (!u
->mixer_path
->has_mute
) {
1440 pa_sink_set_get_mute_callback(u
->sink
, NULL
);
1441 pa_sink_set_set_mute_callback(u
->sink
, NULL
);
1442 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1444 pa_sink_set_get_mute_callback(u
->sink
, sink_get_mute_cb
);
1445 pa_sink_set_set_mute_callback(u
->sink
, sink_set_mute_cb
);
1446 pa_log_info("Using hardware mute control.");
1450 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1451 struct userdata
*u
= s
->userdata
;
1452 pa_alsa_port_data
*data
;
1456 pa_assert(u
->mixer_handle
);
1458 data
= PA_DEVICE_PORT_DATA(p
);
1460 pa_assert_se(u
->mixer_path
= data
->path
);
1461 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1463 mixer_volume_init(u
);
1466 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1476 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1477 struct userdata
*u
= s
->userdata
;
1480 pa_assert(u
->use_tsched
); /* only when timer scheduling is used
1481 * we can dynamically adjust the
1487 before
= u
->hwbuf_unused
;
1488 update_sw_params(u
);
1490 /* Let's check whether we now use only a smaller part of the
1491 buffer then before. If so, we need to make sure that subsequent
1492 rewinds are relative to the new maximum fill level and not to the
1493 current fill level. Thus, let's do a full rewind once, to clear
1496 if (u
->hwbuf_unused
> before
) {
1497 pa_log_debug("Requesting rewind due to latency change.");
1498 pa_sink_request_rewind(s
, (size_t) -1);
1502 static pa_idxset
* sink_get_formats(pa_sink
*s
) {
1503 struct userdata
*u
= s
->userdata
;
1504 pa_idxset
*ret
= pa_idxset_new(NULL
, NULL
);
1510 PA_IDXSET_FOREACH(f
, u
->formats
, idx
) {
1511 pa_idxset_put(ret
, pa_format_info_copy(f
), NULL
);
1517 static pa_bool_t
sink_set_formats(pa_sink
*s
, pa_idxset
*formats
) {
1518 struct userdata
*u
= s
->userdata
;
1519 pa_format_info
*f
, *g
;
1524 /* FIXME: also validate sample rates against what the device supports */
1525 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1526 if (is_iec958(u
) && f
->encoding
== PA_ENCODING_EAC3_IEC61937
)
1527 /* EAC3 cannot be sent over over S/PDIF */
1531 pa_idxset_free(u
->formats
, (pa_free2_cb_t
) pa_format_info_free2
, NULL
);
1532 u
->formats
= pa_idxset_new(NULL
, NULL
);
1534 /* Note: the logic below won't apply if we're using software encoding.
1535 * This is fine for now since we don't support that via the passthrough
1536 * framework, but this must be changed if we do. */
1538 /* Count how many sample rates we support */
1539 for (idx
= 0, n
= 0; u
->rates
[idx
]; idx
++)
1542 /* First insert non-PCM formats since we prefer those. */
1543 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1544 if (!pa_format_info_is_pcm(f
)) {
1545 g
= pa_format_info_copy(f
);
1546 pa_format_info_set_prop_int_array(g
, PA_PROP_FORMAT_RATE
, (int *) u
->rates
, n
);
1547 pa_idxset_put(u
->formats
, g
, NULL
);
1551 /* Now add any PCM formats */
1552 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1553 if (pa_format_info_is_pcm(f
)) {
1554 /* We don't set rates here since we'll just tack on a resampler for
1555 * unsupported rates */
1556 pa_idxset_put(u
->formats
, pa_format_info_copy(f
), NULL
);
1563 static pa_bool_t
sink_update_rate_cb(pa_sink
*s
, uint32_t rate
)
1565 struct userdata
*u
= s
->userdata
;
1567 pa_bool_t supported
= FALSE
;
1571 for (i
= 0; u
->rates
[i
]; i
++) {
1572 if (u
->rates
[i
] == rate
) {
1579 pa_log_info("Sink does not support sample rate of %d Hz", rate
);
1583 if (!PA_SINK_IS_OPENED(s
->state
)) {
1584 pa_log_info("Updating rate for device %s, new rate is %d",u
->device_name
, rate
);
1585 u
->sink
->sample_spec
.rate
= rate
;
1592 static int process_rewind(struct userdata
*u
) {
1593 snd_pcm_sframes_t unused
;
1594 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1597 /* Figure out how much we shall rewind and reset the counter */
1598 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1600 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1602 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1603 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1607 unused_nbytes
= (size_t) unused
* u
->frame_size
;
1609 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1610 unused_nbytes
+= u
->rewind_safeguard
;
1612 if (u
->hwbuf_size
> unused_nbytes
)
1613 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1617 if (rewind_nbytes
> limit_nbytes
)
1618 rewind_nbytes
= limit_nbytes
;
1620 if (rewind_nbytes
> 0) {
1621 snd_pcm_sframes_t in_frames
, out_frames
;
1623 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1625 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1626 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1627 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1628 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1629 if (try_recover(u
, "process_rewind", out_frames
) < 0)
1634 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1636 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1638 if (rewind_nbytes
<= 0)
1639 pa_log_info("Tried rewind, but was apparently not possible.");
1641 u
->write_count
-= rewind_nbytes
;
1642 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1643 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1645 u
->after_rewind
= TRUE
;
1649 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1651 pa_sink_process_rewind(u
->sink
, 0);
1655 static void thread_func(void *userdata
) {
1656 struct userdata
*u
= userdata
;
1657 unsigned short revents
= 0;
1661 pa_log_debug("Thread starting up");
1663 if (u
->core
->realtime_scheduling
)
1664 pa_make_realtime(u
->core
->realtime_priority
);
1666 pa_thread_mq_install(&u
->thread_mq
);
1670 pa_usec_t rtpoll_sleep
= 0;
1673 pa_log_debug("Loop");
1676 /* Render some data and write it to the dsp */
1677 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1679 pa_usec_t sleep_usec
= 0;
1680 pa_bool_t on_timeout
= pa_rtpoll_timer_elapsed(u
->rtpoll
);
1682 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1683 if (process_rewind(u
) < 0)
1687 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1689 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1694 /* pa_log_debug("work_done = %i", work_done); */
1699 pa_log_info("Starting playback.");
1700 snd_pcm_start(u
->pcm_handle
);
1702 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1710 if (u
->use_tsched
) {
1713 if (u
->since_start
<= u
->hwbuf_size
) {
1715 /* USB devices on ALSA seem to hit a buffer
1716 * underrun during the first iterations much
1717 * quicker then we calculate here, probably due to
1718 * the transport latency. To accommodate for that
1719 * we artificially decrease the sleep time until
1720 * we have filled the buffer at least once
1723 if (pa_log_ratelimit(PA_LOG_DEBUG
))
1724 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1728 /* OK, the playback buffer is now full, let's
1729 * calculate when to wake up next */
1731 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec
/ PA_USEC_PER_MSEC
);
1734 /* Convert from the sound card time domain to the
1735 * system time domain */
1736 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1739 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec
/ PA_USEC_PER_MSEC
);
1742 /* We don't trust the conversion, so we wake up whatever comes first */
1743 rtpoll_sleep
= PA_MIN(sleep_usec
, cusec
);
1746 u
->after_rewind
= FALSE
;
1750 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
) {
1751 pa_usec_t volume_sleep
;
1752 pa_sink_volume_change_apply(u
->sink
, &volume_sleep
);
1753 if (volume_sleep
> 0) {
1754 if (rtpoll_sleep
> 0)
1755 rtpoll_sleep
= PA_MIN(volume_sleep
, rtpoll_sleep
);
1757 rtpoll_sleep
= volume_sleep
;
1761 if (rtpoll_sleep
> 0)
1762 pa_rtpoll_set_timer_relative(u
->rtpoll
, rtpoll_sleep
);
1764 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1766 /* Hmm, nothing to do. Let's sleep */
1767 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1770 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
)
1771 pa_sink_volume_change_apply(u
->sink
, NULL
);
1776 /* Tell ALSA about this and process its response */
1777 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1778 struct pollfd
*pollfd
;
1782 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1784 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1785 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1789 if (revents
& ~POLLOUT
) {
1790 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1796 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit(PA_LOG_DEBUG
))
1797 pa_log_debug("Wakeup from ALSA!");
1804 /* If this was no regular exit from the loop we have to continue
1805 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1806 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1807 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1810 pa_log_debug("Thread shutting down");
1813 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1819 pa_assert(device_name
);
1821 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1822 pa_sink_new_data_set_name(data
, n
);
1823 data
->namereg_fail
= TRUE
;
1827 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1828 data
->namereg_fail
= TRUE
;
1830 n
= device_id
? device_id
: device_name
;
1831 data
->namereg_fail
= FALSE
;
1835 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1837 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1839 pa_sink_new_data_set_name(data
, t
);
1843 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1845 if (!mapping
&& !element
)
1848 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
))) {
1849 pa_log_info("Failed to find a working mixer device.");
1855 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1858 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, ignore_dB
) < 0)
1861 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1862 pa_alsa_path_dump(u
->mixer_path
);
1865 if (!(u
->mixer_path_set
= pa_alsa_path_set_new(mapping
, PA_ALSA_DIRECTION_OUTPUT
, u
->paths_dir
)))
1868 pa_alsa_path_set_probe(u
->mixer_path_set
, u
->mixer_handle
, ignore_dB
);
1875 if (u
->mixer_path_set
) {
1876 pa_alsa_path_set_free(u
->mixer_path_set
);
1877 u
->mixer_path_set
= NULL
;
1878 } else if (u
->mixer_path
) {
1879 pa_alsa_path_free(u
->mixer_path
);
1880 u
->mixer_path
= NULL
;
1883 if (u
->mixer_handle
) {
1884 snd_mixer_close(u
->mixer_handle
);
1885 u
->mixer_handle
= NULL
;
1890 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1891 pa_bool_t need_mixer_callback
= FALSE
;
1895 if (!u
->mixer_handle
)
1898 if (u
->sink
->active_port
) {
1899 pa_alsa_port_data
*data
;
1901 /* We have a list of supported paths, so let's activate the
1902 * one that has been chosen as active */
1904 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1905 u
->mixer_path
= data
->path
;
1907 pa_alsa_path_select(data
->path
, u
->mixer_handle
);
1910 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1914 if (!u
->mixer_path
&& u
->mixer_path_set
)
1915 u
->mixer_path
= u
->mixer_path_set
->paths
;
1917 if (u
->mixer_path
) {
1918 /* Hmm, we have only a single path, then let's activate it */
1920 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1922 if (u
->mixer_path
->settings
)
1923 pa_alsa_setting_select(u
->mixer_path
->settings
, u
->mixer_handle
);
1928 mixer_volume_init(u
);
1930 /* Will we need to register callbacks? */
1931 if (u
->mixer_path_set
&& u
->mixer_path_set
->paths
) {
1934 PA_LLIST_FOREACH(p
, u
->mixer_path_set
->paths
) {
1935 if (p
->has_volume
|| p
->has_mute
)
1936 need_mixer_callback
= TRUE
;
1939 else if (u
->mixer_path
)
1940 need_mixer_callback
= u
->mixer_path
->has_volume
|| u
->mixer_path
->has_mute
;
1942 if (need_mixer_callback
) {
1943 int (*mixer_callback
)(snd_mixer_elem_t
*, unsigned int);
1944 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
) {
1945 u
->mixer_pd
= pa_alsa_mixer_pdata_new();
1946 mixer_callback
= io_mixer_callback
;
1948 if (pa_alsa_set_mixer_rtpoll(u
->mixer_pd
, u
->mixer_handle
, u
->rtpoll
) < 0) {
1949 pa_log("Failed to initialize file descriptor monitoring");
1953 u
->mixer_fdl
= pa_alsa_fdlist_new();
1954 mixer_callback
= ctl_mixer_callback
;
1956 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1957 pa_log("Failed to initialize file descriptor monitoring");
1962 if (u
->mixer_path_set
)
1963 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1965 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1971 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1973 struct userdata
*u
= NULL
;
1974 const char *dev_id
= NULL
;
1976 uint32_t alternate_sample_rate
;
1978 uint32_t nfrags
, frag_size
, buffer_size
, tsched_size
, tsched_watermark
, rewind_safeguard
;
1979 snd_pcm_uframes_t period_frames
, buffer_frames
, tsched_frames
;
1981 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
, namereg_fail
= FALSE
, deferred_volume
= FALSE
, set_formats
= FALSE
, fixed_latency_range
= FALSE
;
1982 pa_sink_new_data data
;
1983 pa_alsa_profile_set
*profile_set
= NULL
;
1988 ss
= m
->core
->default_sample_spec
;
1989 map
= m
->core
->default_channel_map
;
1990 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1991 pa_log("Failed to parse sample specification and channel map");
1995 alternate_sample_rate
= m
->core
->alternate_sample_rate
;
1996 if (pa_modargs_get_alternate_sample_rate(ma
, &alternate_sample_rate
) < 0) {
1997 pa_log("Failed to parse alternate sample rate");
2001 frame_size
= pa_frame_size(&ss
);
2003 nfrags
= m
->core
->default_n_fragments
;
2004 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
2006 frag_size
= (uint32_t) frame_size
;
2007 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
2008 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
2010 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
2011 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
2012 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
2013 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
2014 pa_log("Failed to parse buffer metrics");
2018 buffer_size
= nfrags
* frag_size
;
2020 period_frames
= frag_size
/frame_size
;
2021 buffer_frames
= buffer_size
/frame_size
;
2022 tsched_frames
= tsched_size
/frame_size
;
2024 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
2025 pa_log("Failed to parse mmap argument.");
2029 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
2030 pa_log("Failed to parse tsched argument.");
2034 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
2035 pa_log("Failed to parse ignore_dB argument.");
2039 rewind_safeguard
= PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES
, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC
, &ss
));
2040 if (pa_modargs_get_value_u32(ma
, "rewind_safeguard", &rewind_safeguard
) < 0) {
2041 pa_log("Failed to parse rewind_safeguard argument");
2045 deferred_volume
= m
->core
->deferred_volume
;
2046 if (pa_modargs_get_value_boolean(ma
, "deferred_volume", &deferred_volume
) < 0) {
2047 pa_log("Failed to parse deferred_volume argument.");
2051 if (pa_modargs_get_value_boolean(ma
, "fixed_latency_range", &fixed_latency_range
) < 0) {
2052 pa_log("Failed to parse fixed_latency_range argument.");
2056 use_tsched
= pa_alsa_may_tsched(use_tsched
);
2058 u
= pa_xnew0(struct userdata
, 1);
2061 u
->use_mmap
= use_mmap
;
2062 u
->use_tsched
= use_tsched
;
2063 u
->deferred_volume
= deferred_volume
;
2064 u
->fixed_latency_range
= fixed_latency_range
;
2066 u
->rewind_safeguard
= rewind_safeguard
;
2067 u
->rtpoll
= pa_rtpoll_new();
2068 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
2070 u
->smoother
= pa_smoother_new(
2071 SMOOTHER_ADJUST_USEC
,
2072 SMOOTHER_WINDOW_USEC
,
2078 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
2080 dev_id
= pa_modargs_get_value(
2082 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
2084 u
->paths_dir
= pa_xstrdup(pa_modargs_get_value(ma
, "paths_dir", NULL
));
2086 if (reserve_init(u
, dev_id
) < 0)
2089 if (reserve_monitor_init(u
, dev_id
) < 0)
2097 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
2098 pa_log("device_id= not set");
2102 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
2106 SND_PCM_STREAM_PLAYBACK
,
2107 &period_frames
, &buffer_frames
, tsched_frames
,
2111 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
2113 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
2116 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
2120 SND_PCM_STREAM_PLAYBACK
,
2121 &period_frames
, &buffer_frames
, tsched_frames
,
2122 &b
, &d
, profile_set
, &mapping
)))
2127 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
2128 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
2131 SND_PCM_STREAM_PLAYBACK
,
2132 &period_frames
, &buffer_frames
, tsched_frames
,
2137 pa_assert(u
->device_name
);
2138 pa_log_info("Successfully opened device %s.", u
->device_name
);
2140 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
2141 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
2146 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
2148 if (use_mmap
&& !b
) {
2149 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2150 u
->use_mmap
= use_mmap
= FALSE
;
2153 if (use_tsched
&& (!b
|| !d
)) {
2154 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2155 u
->use_tsched
= use_tsched
= FALSE
;
2159 pa_log_info("Successfully enabled mmap() mode.");
2161 if (u
->use_tsched
) {
2162 pa_log_info("Successfully enabled timer-based scheduling mode.");
2164 if (u
->fixed_latency_range
)
2165 pa_log_info("Disabling latency range changes on underrun");
2168 if (is_iec958(u
) || is_hdmi(u
))
2171 u
->rates
= pa_alsa_get_supported_rates(u
->pcm_handle
);
2173 pa_log_error("Failed to find any supported sample rates.");
2177 /* ALSA might tweak the sample spec, so recalculate the frame size */
2178 frame_size
= pa_frame_size(&ss
);
2180 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
2182 pa_sink_new_data_init(&data
);
2183 data
.driver
= driver
;
2186 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
2188 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2189 * variable instead of using &data.namereg_fail directly, because
2190 * data.namereg_fail is a bitfield and taking the address of a bitfield
2191 * variable is impossible. */
2192 namereg_fail
= data
.namereg_fail
;
2193 if (pa_modargs_get_value_boolean(ma
, "namereg_fail", &namereg_fail
) < 0) {
2194 pa_log("Failed to parse namereg_fail argument.");
2195 pa_sink_new_data_done(&data
);
2198 data
.namereg_fail
= namereg_fail
;
2200 pa_sink_new_data_set_sample_spec(&data
, &ss
);
2201 pa_sink_new_data_set_channel_map(&data
, &map
);
2202 pa_sink_new_data_set_alternate_sample_rate(&data
, alternate_sample_rate
);
2204 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
2205 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
2206 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (buffer_frames
* frame_size
));
2207 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
2208 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
2211 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
2212 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
2215 pa_alsa_init_description(data
.proplist
);
2217 if (u
->control_device
)
2218 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
2220 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
2221 pa_log("Invalid properties");
2222 pa_sink_new_data_done(&data
);
2226 if (u
->mixer_path_set
)
2227 pa_alsa_add_ports(u
->core
, &data
.ports
, u
->mixer_path_set
);
2229 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
| PA_SINK_LATENCY
| (u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0) |
2230 (set_formats
? PA_SINK_SET_FORMATS
: 0));
2231 pa_sink_new_data_done(&data
);
2234 pa_log("Failed to create sink object");
2238 if (pa_modargs_get_value_u32(ma
, "deferred_volume_safety_margin",
2239 &u
->sink
->thread_info
.volume_change_safety_margin
) < 0) {
2240 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2244 if (pa_modargs_get_value_s32(ma
, "deferred_volume_extra_delay",
2245 &u
->sink
->thread_info
.volume_change_extra_delay
) < 0) {
2246 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2250 u
->sink
->parent
.process_msg
= sink_process_msg
;
2252 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
2253 u
->sink
->set_state
= sink_set_state_cb
;
2254 u
->sink
->set_port
= sink_set_port_cb
;
2255 if (u
->sink
->alternate_sample_rate
)
2256 u
->sink
->update_rate
= sink_update_rate_cb
;
2257 u
->sink
->userdata
= u
;
2259 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
2260 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
2262 u
->frame_size
= frame_size
;
2263 u
->fragment_size
= frag_size
= (size_t) (period_frames
* frame_size
);
2264 u
->hwbuf_size
= buffer_size
= (size_t) (buffer_frames
* frame_size
);
2265 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
2267 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2268 (double) u
->hwbuf_size
/ (double) u
->fragment_size
,
2269 (long unsigned) u
->fragment_size
,
2270 (double) pa_bytes_to_usec(u
->fragment_size
, &ss
) / PA_USEC_PER_MSEC
,
2271 (long unsigned) u
->hwbuf_size
,
2272 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
2274 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
2275 if (pa_alsa_pcm_is_hw(u
->pcm_handle
))
2276 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
2278 pa_log_info("Disabling rewind for device %s", u
->device_name
);
2279 pa_sink_set_max_rewind(u
->sink
, 0);
2282 if (u
->use_tsched
) {
2283 u
->tsched_watermark_ref
= tsched_watermark
;
2284 reset_watermark(u
, u
->tsched_watermark_ref
, &ss
, FALSE
);
2286 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
2290 if (update_sw_params(u
) < 0)
2293 if (setup_mixer(u
, ignore_dB
) < 0)
2296 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
2298 if (!(u
->thread
= pa_thread_new("alsa-sink", thread_func
, u
))) {
2299 pa_log("Failed to create thread.");
2303 /* Get initial mixer settings */
2304 if (data
.volume_is_set
) {
2305 if (u
->sink
->set_volume
)
2306 u
->sink
->set_volume(u
->sink
);
2308 if (u
->sink
->get_volume
)
2309 u
->sink
->get_volume(u
->sink
);
2312 if (data
.muted_is_set
) {
2313 if (u
->sink
->set_mute
)
2314 u
->sink
->set_mute(u
->sink
);
2316 if (u
->sink
->get_mute
)
2317 u
->sink
->get_mute(u
->sink
);
2320 if ((data
.volume_is_set
|| data
.muted_is_set
) && u
->sink
->write_volume
)
2321 u
->sink
->write_volume(u
->sink
);
2324 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2325 pa_format_info
*format
;
2327 /* To start with, we only support PCM formats. Other formats may be added
2328 * with pa_sink_set_formats().*/
2329 format
= pa_format_info_new();
2330 format
->encoding
= PA_ENCODING_PCM
;
2331 u
->formats
= pa_idxset_new(NULL
, NULL
);
2332 pa_idxset_put(u
->formats
, format
, NULL
);
2334 u
->sink
->get_formats
= sink_get_formats
;
2335 u
->sink
->set_formats
= sink_set_formats
;
2338 pa_sink_put(u
->sink
);
2341 pa_alsa_profile_set_free(profile_set
);
2351 pa_alsa_profile_set_free(profile_set
);
2356 static void userdata_free(struct userdata
*u
) {
2360 pa_sink_unlink(u
->sink
);
2363 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
2364 pa_thread_free(u
->thread
);
2367 pa_thread_mq_done(&u
->thread_mq
);
2370 pa_sink_unref(u
->sink
);
2372 if (u
->memchunk
.memblock
)
2373 pa_memblock_unref(u
->memchunk
.memblock
);
2376 pa_alsa_mixer_pdata_free(u
->mixer_pd
);
2378 if (u
->alsa_rtpoll_item
)
2379 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
2382 pa_rtpoll_free(u
->rtpoll
);
2384 if (u
->pcm_handle
) {
2385 snd_pcm_drop(u
->pcm_handle
);
2386 snd_pcm_close(u
->pcm_handle
);
2390 pa_alsa_fdlist_free(u
->mixer_fdl
);
2392 if (u
->mixer_path_set
)
2393 pa_alsa_path_set_free(u
->mixer_path_set
);
2394 else if (u
->mixer_path
)
2395 pa_alsa_path_free(u
->mixer_path
);
2397 if (u
->mixer_handle
)
2398 snd_mixer_close(u
->mixer_handle
);
2401 pa_smoother_free(u
->smoother
);
2404 pa_idxset_free(u
->formats
, (pa_free2_cb_t
) pa_format_info_free2
, NULL
);
2412 pa_xfree(u
->device_name
);
2413 pa_xfree(u
->control_device
);
2414 pa_xfree(u
->paths_dir
);
2418 void pa_alsa_sink_free(pa_sink
*s
) {
2421 pa_sink_assert_ref(s
);
2422 pa_assert_se(u
= s
->userdata
);