2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
57 #include <modules/reserve-wrap.h>
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
62 /* #define DEBUG_TIMING */
64 #define DEFAULT_DEVICE "default"
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
76 * will increase the watermark only if we hit a real underrun. */
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
98 pa_thread_mq thread_mq
;
101 snd_pcm_t
*pcm_handle
;
103 pa_alsa_fdlist
*mixer_fdl
;
104 pa_alsa_mixer_pdata
*mixer_pd
;
105 snd_mixer_t
*mixer_handle
;
106 pa_alsa_path_set
*mixer_path_set
;
107 pa_alsa_path
*mixer_path
;
109 pa_cvolume hardware_volume
;
123 watermark_inc_threshold
,
124 watermark_dec_threshold
,
127 pa_usec_t watermark_dec_not_before
;
129 pa_memchunk memchunk
;
131 char *device_name
; /* name of the PCM device */
132 char *control_device
; /* name of the control device */
134 pa_bool_t use_mmap
:1, use_tsched
:1, deferred_volume
:1;
136 pa_bool_t first
, after_rewind
;
138 pa_rtpoll_item
*alsa_rtpoll_item
;
140 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
142 pa_smoother
*smoother
;
143 uint64_t write_count
;
144 uint64_t since_start
;
145 pa_usec_t smoother_interval
;
146 pa_usec_t last_smoother_update
;
150 pa_reserve_wrapper
*reserve
;
151 pa_hook_slot
*reserve_slot
;
152 pa_reserve_monitor_wrapper
*monitor
;
153 pa_hook_slot
*monitor_slot
;
156 static void userdata_free(struct userdata
*u
);
158 /* FIXME: Is there a better way to do this than device names? */
159 static pa_bool_t
is_iec958(struct userdata
*u
) {
160 return (strncmp("iec958", u
->device_name
, 6) == 0);
163 static pa_bool_t
is_hdmi(struct userdata
*u
) {
164 return (strncmp("hdmi", u
->device_name
, 4) == 0);
167 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
171 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
172 return PA_HOOK_CANCEL
;
177 static void reserve_done(struct userdata
*u
) {
180 if (u
->reserve_slot
) {
181 pa_hook_slot_free(u
->reserve_slot
);
182 u
->reserve_slot
= NULL
;
186 pa_reserve_wrapper_unref(u
->reserve
);
191 static void reserve_update(struct userdata
*u
) {
192 const char *description
;
195 if (!u
->sink
|| !u
->reserve
)
198 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
199 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
202 static int reserve_init(struct userdata
*u
, const char *dname
) {
211 if (pa_in_system_mode())
214 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
217 /* We are resuming, try to lock the device */
218 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
226 pa_assert(!u
->reserve_slot
);
227 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
232 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
238 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
240 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
244 static void monitor_done(struct userdata
*u
) {
247 if (u
->monitor_slot
) {
248 pa_hook_slot_free(u
->monitor_slot
);
249 u
->monitor_slot
= NULL
;
253 pa_reserve_monitor_wrapper_unref(u
->monitor
);
258 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
264 if (pa_in_system_mode())
267 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
270 /* We are resuming, try to lock the device */
271 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
277 pa_assert(!u
->monitor_slot
);
278 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
283 static void fix_min_sleep_wakeup(struct userdata
*u
) {
284 size_t max_use
, max_use_2
;
287 pa_assert(u
->use_tsched
);
289 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
290 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
292 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
293 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
295 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
296 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
299 static void fix_tsched_watermark(struct userdata
*u
) {
302 pa_assert(u
->use_tsched
);
304 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
306 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
307 u
->tsched_watermark
= max_use
- u
->min_sleep
;
309 if (u
->tsched_watermark
< u
->min_wakeup
)
310 u
->tsched_watermark
= u
->min_wakeup
;
313 static void increase_watermark(struct userdata
*u
) {
314 size_t old_watermark
;
315 pa_usec_t old_min_latency
, new_min_latency
;
318 pa_assert(u
->use_tsched
);
320 /* First, just try to increase the watermark */
321 old_watermark
= u
->tsched_watermark
;
322 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_inc_step
);
323 fix_tsched_watermark(u
);
325 if (old_watermark
!= u
->tsched_watermark
) {
326 pa_log_info("Increasing wakeup watermark to %0.2f ms",
327 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
331 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
332 old_min_latency
= u
->sink
->thread_info
.min_latency
;
333 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_INC_STEP_USEC
);
334 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
336 if (old_min_latency
!= new_min_latency
) {
337 pa_log_info("Increasing minimal latency to %0.2f ms",
338 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
340 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
343 /* When we reach this we're officialy fucked! */
346 static void decrease_watermark(struct userdata
*u
) {
347 size_t old_watermark
;
351 pa_assert(u
->use_tsched
);
353 now
= pa_rtclock_now();
355 if (u
->watermark_dec_not_before
<= 0)
358 if (u
->watermark_dec_not_before
> now
)
361 old_watermark
= u
->tsched_watermark
;
363 if (u
->tsched_watermark
< u
->watermark_dec_step
)
364 u
->tsched_watermark
= u
->tsched_watermark
/ 2;
366 u
->tsched_watermark
= PA_MAX(u
->tsched_watermark
/ 2, u
->tsched_watermark
- u
->watermark_dec_step
);
368 fix_tsched_watermark(u
);
370 if (old_watermark
!= u
->tsched_watermark
)
371 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
372 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
374 /* We don't change the latency range*/
377 u
->watermark_dec_not_before
= now
+ TSCHED_WATERMARK_VERIFY_AFTER_USEC
;
380 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
383 pa_assert(sleep_usec
);
384 pa_assert(process_usec
);
387 pa_assert(u
->use_tsched
);
389 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
391 if (usec
== (pa_usec_t
) -1)
392 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
394 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
399 *sleep_usec
= usec
- wm
;
403 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
404 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
405 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
406 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
410 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
415 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
417 pa_assert(err
!= -EAGAIN
);
420 pa_log_debug("%s: Buffer underrun!", call
);
422 if (err
== -ESTRPIPE
)
423 pa_log_debug("%s: System suspended!", call
);
425 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
426 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
435 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
, pa_bool_t on_timeout
) {
437 pa_bool_t underrun
= FALSE
;
439 /* We use <= instead of < for this check here because an underrun
440 * only happens after the last sample was processed, not already when
441 * it is removed from the buffer. This is particularly important
442 * when block transfer is used. */
444 if (n_bytes
<= u
->hwbuf_size
)
445 left_to_play
= u
->hwbuf_size
- n_bytes
;
448 /* We got a dropout. What a mess! */
456 if (!u
->first
&& !u
->after_rewind
)
457 if (pa_log_ratelimit(PA_LOG_INFO
))
458 pa_log_info("Underrun!");
462 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
463 (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
464 (double) pa_bytes_to_usec(u
->watermark_inc_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
465 (double) pa_bytes_to_usec(u
->watermark_dec_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
469 pa_bool_t reset_not_before
= TRUE
;
471 if (!u
->first
&& !u
->after_rewind
) {
472 if (underrun
|| left_to_play
< u
->watermark_inc_threshold
)
473 increase_watermark(u
);
474 else if (left_to_play
> u
->watermark_dec_threshold
) {
475 reset_not_before
= FALSE
;
477 /* We decrease the watermark only if have actually
478 * been woken up by a timeout. If something else woke
479 * us up it's too easy to fulfill the deadlines... */
482 decrease_watermark(u
);
486 if (reset_not_before
)
487 u
->watermark_dec_not_before
= 0;
493 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
494 pa_bool_t work_done
= FALSE
;
495 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
500 pa_sink_assert_ref(u
->sink
);
503 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
509 pa_bool_t after_avail
= TRUE
;
511 /* First we determine how many samples are missing to fill the
512 * buffer up to 100% */
514 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
516 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
522 n_bytes
= (size_t) n
* u
->frame_size
;
525 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
528 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
533 /* We won't fill up the playback buffer before at least
534 * half the sleep time is over because otherwise we might
535 * ask for more data from the clients then they expect. We
536 * need to guarantee that clients only have to keep around
537 * a single hw buffer length. */
540 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
542 pa_log_debug("Not filling up, because too early.");
547 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
551 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
552 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
553 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
554 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
560 pa_log_debug("Not filling up, because not necessary.");
568 pa_log_debug("Not filling up, because already too many iterations.");
574 n_bytes
-= u
->hwbuf_unused
;
578 pa_log_debug("Filling up");
585 const snd_pcm_channel_area_t
*areas
;
586 snd_pcm_uframes_t offset
, frames
;
587 snd_pcm_sframes_t sframes
;
589 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
590 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
592 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
594 if (!after_avail
&& err
== -EAGAIN
)
597 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
603 /* Make sure that if these memblocks need to be copied they will fit into one slot */
604 if (frames
> pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
)
605 frames
= pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
;
607 if (!after_avail
&& frames
== 0)
610 pa_assert(frames
> 0);
613 /* Check these are multiples of 8 bit */
614 pa_assert((areas
[0].first
& 7) == 0);
615 pa_assert((areas
[0].step
& 7)== 0);
617 /* We assume a single interleaved memory buffer */
618 pa_assert((areas
[0].first
>> 3) == 0);
619 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
621 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
623 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
624 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
627 pa_sink_render_into_full(u
->sink
, &chunk
);
628 pa_memblock_unref_fixed(chunk
.memblock
);
630 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
632 if (!after_avail
&& (int) sframes
== -EAGAIN
)
635 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
643 u
->write_count
+= frames
* u
->frame_size
;
644 u
->since_start
+= frames
* u
->frame_size
;
647 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
650 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
653 n_bytes
-= (size_t) frames
* u
->frame_size
;
658 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
659 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
661 if (*sleep_usec
> process_usec
)
662 *sleep_usec
-= process_usec
;
668 return work_done
? 1 : 0;
671 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
672 pa_bool_t work_done
= FALSE
;
673 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
678 pa_sink_assert_ref(u
->sink
);
681 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
687 pa_bool_t after_avail
= TRUE
;
689 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
691 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
697 n_bytes
= (size_t) n
* u
->frame_size
;
698 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
703 /* We won't fill up the playback buffer before at least
704 * half the sleep time is over because otherwise we might
705 * ask for more data from the clients then they expect. We
706 * need to guarantee that clients only have to keep around
707 * a single hw buffer length. */
710 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
713 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
717 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
718 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
719 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
720 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
730 pa_log_debug("Not filling up, because already too many iterations.");
736 n_bytes
-= u
->hwbuf_unused
;
740 snd_pcm_sframes_t frames
;
743 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
745 if (u
->memchunk
.length
<= 0)
746 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
748 pa_assert(u
->memchunk
.length
> 0);
750 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
752 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
753 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
755 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
756 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
757 pa_memblock_release(u
->memchunk
.memblock
);
759 if (PA_UNLIKELY(frames
< 0)) {
761 if (!after_avail
&& (int) frames
== -EAGAIN
)
764 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
770 if (!after_avail
&& frames
== 0)
773 pa_assert(frames
> 0);
776 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
777 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
779 if (u
->memchunk
.length
<= 0) {
780 pa_memblock_unref(u
->memchunk
.memblock
);
781 pa_memchunk_reset(&u
->memchunk
);
786 u
->write_count
+= frames
* u
->frame_size
;
787 u
->since_start
+= frames
* u
->frame_size
;
789 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
791 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
794 n_bytes
-= (size_t) frames
* u
->frame_size
;
799 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
800 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
802 if (*sleep_usec
> process_usec
)
803 *sleep_usec
-= process_usec
;
809 return work_done
? 1 : 0;
812 static void update_smoother(struct userdata
*u
) {
813 snd_pcm_sframes_t delay
= 0;
816 pa_usec_t now1
= 0, now2
;
817 snd_pcm_status_t
*status
;
819 snd_pcm_status_alloca(&status
);
822 pa_assert(u
->pcm_handle
);
824 /* Let's update the time smoother */
826 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
, FALSE
)) < 0)) {
827 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
831 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
832 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
834 snd_htimestamp_t htstamp
= { 0, 0 };
835 snd_pcm_status_get_htstamp(status
, &htstamp
);
836 now1
= pa_timespec_load(&htstamp
);
839 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
841 now1
= pa_rtclock_now();
843 /* check if the time since the last update is bigger than the interval */
844 if (u
->last_smoother_update
> 0)
845 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
848 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
850 if (PA_UNLIKELY(position
< 0))
853 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
855 pa_smoother_put(u
->smoother
, now1
, now2
);
857 u
->last_smoother_update
= now1
;
858 /* exponentially increase the update interval up to the MAX limit */
859 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
862 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
865 pa_usec_t now1
, now2
;
869 now1
= pa_rtclock_now();
870 now2
= pa_smoother_get(u
->smoother
, now1
);
872 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
874 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
876 if (u
->memchunk
.memblock
)
877 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
882 static int build_pollfd(struct userdata
*u
) {
884 pa_assert(u
->pcm_handle
);
886 if (u
->alsa_rtpoll_item
)
887 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
889 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
895 /* Called from IO context */
896 static int suspend(struct userdata
*u
) {
898 pa_assert(u
->pcm_handle
);
900 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
902 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
903 * take awfully long with our long buffer sizes today. */
904 snd_pcm_close(u
->pcm_handle
);
905 u
->pcm_handle
= NULL
;
907 if (u
->alsa_rtpoll_item
) {
908 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
909 u
->alsa_rtpoll_item
= NULL
;
912 /* We reset max_rewind/max_request here to make sure that while we
913 * are suspended the old max_request/max_rewind values set before
914 * the suspend can influence the per-stream buffer of newly
915 * created streams, without their requirements having any
916 * influence on them. */
917 pa_sink_set_max_rewind_within_thread(u
->sink
, 0);
918 pa_sink_set_max_request_within_thread(u
->sink
, 0);
920 pa_log_info("Device suspended...");
925 /* Called from IO context */
926 static int update_sw_params(struct userdata
*u
) {
927 snd_pcm_uframes_t avail_min
;
932 /* Use the full buffer if no one asked us for anything specific */
938 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
941 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
943 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
945 /* We need at least one sample in our buffer */
947 if (PA_UNLIKELY(b
< u
->frame_size
))
950 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
953 fix_min_sleep_wakeup(u
);
954 fix_tsched_watermark(u
);
957 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
959 /* We need at last one frame in the used part of the buffer */
960 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
963 pa_usec_t sleep_usec
, process_usec
;
965 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
966 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
969 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
971 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
, !u
->use_tsched
)) < 0) {
972 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
976 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
977 if (pa_alsa_pcm_is_hw(u
->pcm_handle
))
978 pa_sink_set_max_rewind_within_thread(u
->sink
, u
->hwbuf_size
);
980 pa_log_info("Disabling rewind_within_thread for device %s", u
->device_name
);
981 pa_sink_set_max_rewind_within_thread(u
->sink
, 0);
987 /* Called from IO context */
988 static int unsuspend(struct userdata
*u
) {
992 snd_pcm_uframes_t period_size
, buffer_size
;
993 char *device_name
= NULL
;
996 pa_assert(!u
->pcm_handle
);
998 pa_log_info("Trying resume...");
1000 if ((is_iec958(u
) || is_hdmi(u
)) && pa_sink_is_passthrough(u
->sink
)) {
1001 /* Need to open device in NONAUDIO mode */
1002 int len
= strlen(u
->device_name
) + 8;
1004 device_name
= pa_xmalloc(len
);
1005 pa_snprintf(device_name
, len
, "%s,AES0=6", u
->device_name
);
1008 if ((err
= snd_pcm_open(&u
->pcm_handle
, device_name
? device_name
: u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
1010 SND_PCM_NO_AUTO_RESAMPLE
|
1011 SND_PCM_NO_AUTO_CHANNELS
|
1012 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
1013 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
1017 ss
= u
->sink
->sample_spec
;
1018 period_size
= u
->fragment_size
/ u
->frame_size
;
1019 buffer_size
= u
->hwbuf_size
/ u
->frame_size
;
1023 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &period_size
, &buffer_size
, 0, &b
, &d
, TRUE
)) < 0) {
1024 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
1028 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
1029 pa_log_warn("Resume failed, couldn't get original access mode.");
1033 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
1034 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1038 if (period_size
*u
->frame_size
!= u
->fragment_size
||
1039 buffer_size
*u
->frame_size
!= u
->hwbuf_size
) {
1040 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1041 (unsigned long) u
->hwbuf_size
, (unsigned long) u
->fragment_size
,
1042 (unsigned long) (buffer_size
*u
->frame_size
), (unsigned long) (period_size
*u
->frame_size
));
1046 if (update_sw_params(u
) < 0)
1049 if (build_pollfd(u
) < 0)
1053 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
1054 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1055 u
->last_smoother_update
= 0;
1060 pa_log_info("Resumed successfully...");
1062 pa_xfree(device_name
);
1066 if (u
->pcm_handle
) {
1067 snd_pcm_close(u
->pcm_handle
);
1068 u
->pcm_handle
= NULL
;
1071 pa_xfree(device_name
);
1076 /* Called from IO context */
1077 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
1078 struct userdata
*u
= PA_SINK(o
)->userdata
;
1082 case PA_SINK_MESSAGE_FINISH_MOVE
:
1083 case PA_SINK_MESSAGE_ADD_INPUT
: {
1084 pa_sink_input
*i
= PA_SINK_INPUT(data
);
1087 if (PA_LIKELY(!pa_sink_input_is_passthrough(i
)))
1090 u
->old_rate
= u
->sink
->sample_spec
.rate
;
1092 /* Passthrough format, see if we need to reset sink sample rate */
1093 if (u
->sink
->sample_spec
.rate
== i
->thread_info
.sample_spec
.rate
)
1097 if ((r
= suspend(u
)) < 0)
1100 u
->sink
->sample_spec
.rate
= i
->thread_info
.sample_spec
.rate
;
1102 if ((r
= unsuspend(u
)) < 0)
1108 case PA_SINK_MESSAGE_START_MOVE
:
1109 case PA_SINK_MESSAGE_REMOVE_INPUT
: {
1110 pa_sink_input
*i
= PA_SINK_INPUT(data
);
1113 if (PA_LIKELY(!pa_sink_input_is_passthrough(i
)))
1116 /* Passthrough format, see if we need to reset sink sample rate */
1117 if (u
->sink
->sample_spec
.rate
== u
->old_rate
)
1121 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
) && ((r
= suspend(u
)) < 0))
1124 u
->sink
->sample_spec
.rate
= u
->old_rate
;
1126 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
) && ((r
= unsuspend(u
)) < 0))
1132 case PA_SINK_MESSAGE_GET_LATENCY
: {
1136 r
= sink_get_latency(u
);
1138 *((pa_usec_t
*) data
) = r
;
1143 case PA_SINK_MESSAGE_SET_STATE
:
1145 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
1147 case PA_SINK_SUSPENDED
: {
1150 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
1152 if ((r
= suspend(u
)) < 0)
1159 case PA_SINK_RUNNING
: {
1162 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
1163 if (build_pollfd(u
) < 0)
1167 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1168 if ((r
= unsuspend(u
)) < 0)
1175 case PA_SINK_UNLINKED
:
1177 case PA_SINK_INVALID_STATE
:
1184 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
1187 /* Called from main context */
1188 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
1189 pa_sink_state_t old_state
;
1192 pa_sink_assert_ref(s
);
1193 pa_assert_se(u
= s
->userdata
);
1195 old_state
= pa_sink_get_state(u
->sink
);
1197 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
1199 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
1200 if (reserve_init(u
, u
->device_name
) < 0)
1201 return -PA_ERR_BUSY
;
1206 static int ctl_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1207 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1210 pa_assert(u
->mixer_handle
);
1212 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1215 if (!PA_SINK_IS_LINKED(u
->sink
->state
))
1218 if (u
->sink
->suspend_cause
& PA_SUSPEND_SESSION
)
1221 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1222 pa_sink_get_volume(u
->sink
, TRUE
);
1223 pa_sink_get_mute(u
->sink
, TRUE
);
1229 static int io_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1230 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1233 pa_assert(u
->mixer_handle
);
1235 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1238 if (u
->sink
->suspend_cause
& PA_SUSPEND_SESSION
)
1241 if (mask
& SND_CTL_EVENT_MASK_VALUE
)
1242 pa_sink_update_volume_and_mute(u
->sink
);
1247 static void sink_get_volume_cb(pa_sink
*s
) {
1248 struct userdata
*u
= s
->userdata
;
1250 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1253 pa_assert(u
->mixer_path
);
1254 pa_assert(u
->mixer_handle
);
1256 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1259 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1260 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1262 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1264 if (u
->mixer_path
->has_dB
) {
1265 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1267 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &r
));
1270 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1273 s
->real_volume
= u
->hardware_volume
= r
;
1275 /* Hmm, so the hardware volume changed, let's reset our software volume */
1276 if (u
->mixer_path
->has_dB
)
1277 pa_sink_set_soft_volume(s
, NULL
);
1280 static void sink_set_volume_cb(pa_sink
*s
) {
1281 struct userdata
*u
= s
->userdata
;
1283 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1284 pa_bool_t deferred_volume
= !!(s
->flags
& PA_SINK_DEFERRED_VOLUME
);
1287 pa_assert(u
->mixer_path
);
1288 pa_assert(u
->mixer_handle
);
1290 /* Shift up by the base volume */
1291 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1293 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
, deferred_volume
, !deferred_volume
) < 0)
1296 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1297 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1299 u
->hardware_volume
= r
;
1301 if (u
->mixer_path
->has_dB
) {
1302 pa_cvolume new_soft_volume
;
1303 pa_bool_t accurate_enough
;
1304 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1306 /* Match exactly what the user requested by software */
1307 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1309 /* If the adjustment to do in software is only minimal we
1310 * can skip it. That saves us CPU at the expense of a bit of
1313 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1314 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1316 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &s
->real_volume
));
1317 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &s
->real_volume
));
1318 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &u
->hardware_volume
));
1319 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &u
->hardware_volume
));
1320 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1321 pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &new_soft_volume
),
1322 pa_yes_no(accurate_enough
));
1323 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &new_soft_volume
));
1325 if (!accurate_enough
)
1326 s
->soft_volume
= new_soft_volume
;
1329 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1331 /* We can't match exactly what the user requested, hence let's
1332 * at least tell the user about it */
1338 static void sink_write_volume_cb(pa_sink
*s
) {
1339 struct userdata
*u
= s
->userdata
;
1340 pa_cvolume hw_vol
= s
->thread_info
.current_hw_volume
;
1343 pa_assert(u
->mixer_path
);
1344 pa_assert(u
->mixer_handle
);
1345 pa_assert(s
->flags
& PA_SINK_DEFERRED_VOLUME
);
1347 /* Shift up by the base volume */
1348 pa_sw_cvolume_divide_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1350 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &hw_vol
, TRUE
, TRUE
) < 0)
1351 pa_log_error("Writing HW volume failed");
1354 pa_bool_t accurate_enough
;
1356 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1357 pa_sw_cvolume_multiply_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1359 pa_sw_cvolume_divide(&tmp_vol
, &hw_vol
, &s
->thread_info
.current_hw_volume
);
1361 (pa_cvolume_min(&tmp_vol
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1362 (pa_cvolume_max(&tmp_vol
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1364 if (!accurate_enough
) {
1366 char db
[2][PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1367 char pcnt
[2][PA_CVOLUME_SNPRINT_MAX
];
1370 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1371 pa_cvolume_snprint(vol
.pcnt
[0], sizeof(vol
.pcnt
[0]), &s
->thread_info
.current_hw_volume
),
1372 pa_cvolume_snprint(vol
.pcnt
[1], sizeof(vol
.pcnt
[1]), &hw_vol
));
1373 pa_log_debug(" in dB: %s (request) != %s",
1374 pa_sw_cvolume_snprint_dB(vol
.db
[0], sizeof(vol
.db
[0]), &s
->thread_info
.current_hw_volume
),
1375 pa_sw_cvolume_snprint_dB(vol
.db
[1], sizeof(vol
.db
[1]), &hw_vol
));
1380 static void sink_get_mute_cb(pa_sink
*s
) {
1381 struct userdata
*u
= s
->userdata
;
1385 pa_assert(u
->mixer_path
);
1386 pa_assert(u
->mixer_handle
);
1388 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1394 static void sink_set_mute_cb(pa_sink
*s
) {
1395 struct userdata
*u
= s
->userdata
;
1398 pa_assert(u
->mixer_path
);
1399 pa_assert(u
->mixer_handle
);
1401 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1404 static void mixer_volume_init(struct userdata
*u
) {
1407 if (!u
->mixer_path
->has_volume
) {
1408 pa_sink_set_write_volume_callback(u
->sink
, NULL
);
1409 pa_sink_set_get_volume_callback(u
->sink
, NULL
);
1410 pa_sink_set_set_volume_callback(u
->sink
, NULL
);
1412 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1414 pa_sink_set_get_volume_callback(u
->sink
, sink_get_volume_cb
);
1415 pa_sink_set_set_volume_callback(u
->sink
, sink_set_volume_cb
);
1417 if (u
->mixer_path
->has_dB
&& u
->deferred_volume
) {
1418 pa_sink_set_write_volume_callback(u
->sink
, sink_write_volume_cb
);
1419 pa_log_info("Successfully enabled synchronous volume.");
1421 pa_sink_set_write_volume_callback(u
->sink
, NULL
);
1423 if (u
->mixer_path
->has_dB
) {
1424 pa_sink_enable_decibel_volume(u
->sink
, TRUE
);
1425 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1427 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1428 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1430 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1432 pa_sink_enable_decibel_volume(u
->sink
, FALSE
);
1433 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1435 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1436 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1439 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1442 if (!u
->mixer_path
->has_mute
) {
1443 pa_sink_set_get_mute_callback(u
->sink
, NULL
);
1444 pa_sink_set_set_mute_callback(u
->sink
, NULL
);
1445 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1447 pa_sink_set_get_mute_callback(u
->sink
, sink_get_mute_cb
);
1448 pa_sink_set_set_mute_callback(u
->sink
, sink_set_mute_cb
);
1449 pa_log_info("Using hardware mute control.");
1453 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1454 struct userdata
*u
= s
->userdata
;
1455 pa_alsa_port_data
*data
;
1459 pa_assert(u
->mixer_handle
);
1461 data
= PA_DEVICE_PORT_DATA(p
);
1463 pa_assert_se(u
->mixer_path
= data
->path
);
1464 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1466 mixer_volume_init(u
);
1469 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1479 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1480 struct userdata
*u
= s
->userdata
;
1483 pa_assert(u
->use_tsched
); /* only when timer scheduling is used
1484 * we can dynamically adjust the
1490 before
= u
->hwbuf_unused
;
1491 update_sw_params(u
);
1493 /* Let's check whether we now use only a smaller part of the
1494 buffer then before. If so, we need to make sure that subsequent
1495 rewinds are relative to the new maximum fill level and not to the
1496 current fill level. Thus, let's do a full rewind once, to clear
1499 if (u
->hwbuf_unused
> before
) {
1500 pa_log_debug("Requesting rewind due to latency change.");
1501 pa_sink_request_rewind(s
, (size_t) -1);
1505 static pa_idxset
* sink_get_formats(pa_sink
*s
) {
1506 struct userdata
*u
= s
->userdata
;
1507 pa_idxset
*ret
= pa_idxset_new(NULL
, NULL
);
1513 PA_IDXSET_FOREACH(f
, u
->formats
, idx
) {
1514 pa_idxset_put(ret
, pa_format_info_copy(f
), NULL
);
1520 static pa_bool_t
sink_set_formats(pa_sink
*s
, pa_idxset
*formats
) {
1521 struct userdata
*u
= s
->userdata
;
1527 /* FIXME: also validate sample rates against what the device supports */
1528 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1529 if (is_iec958(u
) && f
->encoding
== PA_ENCODING_EAC3_IEC61937
)
1530 /* EAC3 cannot be sent over over S/PDIF */
1534 pa_idxset_free(u
->formats
, (pa_free2_cb_t
) pa_format_info_free2
, NULL
);
1535 u
->formats
= pa_idxset_new(NULL
, NULL
);
1537 /* Note: the logic below won't apply if we're using software encoding.
1538 * This is fine for now since we don't support that via the passthrough
1539 * framework, but this must be changed if we do. */
1541 /* First insert non-PCM formats since we prefer those. */
1542 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1543 if (!pa_format_info_is_pcm(f
))
1544 pa_idxset_put(u
->formats
, pa_format_info_copy(f
), NULL
);
1547 /* Now add any PCM formats */
1548 PA_IDXSET_FOREACH(f
, formats
, idx
) {
1549 if (pa_format_info_is_pcm(f
))
1550 pa_idxset_put(u
->formats
, pa_format_info_copy(f
), NULL
);
1556 static int process_rewind(struct userdata
*u
) {
1557 snd_pcm_sframes_t unused
;
1558 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1561 /* Figure out how much we shall rewind and reset the counter */
1562 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1564 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1566 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1567 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1571 unused_nbytes
= (size_t) unused
* u
->frame_size
;
1573 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1574 unused_nbytes
+= u
->rewind_safeguard
;
1576 if (u
->hwbuf_size
> unused_nbytes
)
1577 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1581 if (rewind_nbytes
> limit_nbytes
)
1582 rewind_nbytes
= limit_nbytes
;
1584 if (rewind_nbytes
> 0) {
1585 snd_pcm_sframes_t in_frames
, out_frames
;
1587 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1589 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1590 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1591 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1592 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1593 if (try_recover(u
, "process_rewind", out_frames
) < 0)
1598 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1600 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1602 if (rewind_nbytes
<= 0)
1603 pa_log_info("Tried rewind, but was apparently not possible.");
1605 u
->write_count
-= rewind_nbytes
;
1606 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1607 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1609 u
->after_rewind
= TRUE
;
1613 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1615 pa_sink_process_rewind(u
->sink
, 0);
1619 static void thread_func(void *userdata
) {
1620 struct userdata
*u
= userdata
;
1621 unsigned short revents
= 0;
1625 pa_log_debug("Thread starting up");
1627 if (u
->core
->realtime_scheduling
)
1628 pa_make_realtime(u
->core
->realtime_priority
);
1630 pa_thread_mq_install(&u
->thread_mq
);
1634 pa_usec_t rtpoll_sleep
= 0;
1637 pa_log_debug("Loop");
1640 /* Render some data and write it to the dsp */
1641 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1643 pa_usec_t sleep_usec
= 0;
1644 pa_bool_t on_timeout
= pa_rtpoll_timer_elapsed(u
->rtpoll
);
1646 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1647 if (process_rewind(u
) < 0)
1651 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1653 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1658 /* pa_log_debug("work_done = %i", work_done); */
1663 pa_log_info("Starting playback.");
1664 snd_pcm_start(u
->pcm_handle
);
1666 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1674 if (u
->use_tsched
) {
1677 if (u
->since_start
<= u
->hwbuf_size
) {
1679 /* USB devices on ALSA seem to hit a buffer
1680 * underrun during the first iterations much
1681 * quicker then we calculate here, probably due to
1682 * the transport latency. To accommodate for that
1683 * we artificially decrease the sleep time until
1684 * we have filled the buffer at least once
1687 if (pa_log_ratelimit(PA_LOG_DEBUG
))
1688 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1692 /* OK, the playback buffer is now full, let's
1693 * calculate when to wake up next */
1694 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1696 /* Convert from the sound card time domain to the
1697 * system time domain */
1698 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1700 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1702 /* We don't trust the conversion, so we wake up whatever comes first */
1703 rtpoll_sleep
= PA_MIN(sleep_usec
, cusec
);
1706 u
->after_rewind
= FALSE
;
1710 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
) {
1711 pa_usec_t volume_sleep
;
1712 pa_sink_volume_change_apply(u
->sink
, &volume_sleep
);
1713 if (volume_sleep
> 0)
1714 rtpoll_sleep
= PA_MIN(volume_sleep
, rtpoll_sleep
);
1717 if (rtpoll_sleep
> 0)
1718 pa_rtpoll_set_timer_relative(u
->rtpoll
, rtpoll_sleep
);
1720 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1722 /* Hmm, nothing to do. Let's sleep */
1723 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1726 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
)
1727 pa_sink_volume_change_apply(u
->sink
, NULL
);
1732 /* Tell ALSA about this and process its response */
1733 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1734 struct pollfd
*pollfd
;
1738 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1740 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1741 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1745 if (revents
& ~POLLOUT
) {
1746 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1752 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit(PA_LOG_DEBUG
))
1753 pa_log_debug("Wakeup from ALSA!");
1760 /* If this was no regular exit from the loop we have to continue
1761 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1762 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1763 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1766 pa_log_debug("Thread shutting down");
1769 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1775 pa_assert(device_name
);
1777 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1778 pa_sink_new_data_set_name(data
, n
);
1779 data
->namereg_fail
= TRUE
;
1783 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1784 data
->namereg_fail
= TRUE
;
1786 n
= device_id
? device_id
: device_name
;
1787 data
->namereg_fail
= FALSE
;
1791 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1793 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1795 pa_sink_new_data_set_name(data
, t
);
1799 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1801 if (!mapping
&& !element
)
1804 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
))) {
1805 pa_log_info("Failed to find a working mixer device.");
1811 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1814 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, ignore_dB
) < 0)
1817 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1818 pa_alsa_path_dump(u
->mixer_path
);
1821 if (!(u
->mixer_path_set
= pa_alsa_path_set_new(mapping
, PA_ALSA_DIRECTION_OUTPUT
)))
1824 pa_alsa_path_set_probe(u
->mixer_path_set
, u
->mixer_handle
, ignore_dB
);
1831 if (u
->mixer_path_set
) {
1832 pa_alsa_path_set_free(u
->mixer_path_set
);
1833 u
->mixer_path_set
= NULL
;
1834 } else if (u
->mixer_path
) {
1835 pa_alsa_path_free(u
->mixer_path
);
1836 u
->mixer_path
= NULL
;
1839 if (u
->mixer_handle
) {
1840 snd_mixer_close(u
->mixer_handle
);
1841 u
->mixer_handle
= NULL
;
1846 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1847 pa_bool_t need_mixer_callback
= FALSE
;
1851 if (!u
->mixer_handle
)
1854 if (u
->sink
->active_port
) {
1855 pa_alsa_port_data
*data
;
1857 /* We have a list of supported paths, so let's activate the
1858 * one that has been chosen as active */
1860 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1861 u
->mixer_path
= data
->path
;
1863 pa_alsa_path_select(data
->path
, u
->mixer_handle
);
1866 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1870 if (!u
->mixer_path
&& u
->mixer_path_set
)
1871 u
->mixer_path
= u
->mixer_path_set
->paths
;
1873 if (u
->mixer_path
) {
1874 /* Hmm, we have only a single path, then let's activate it */
1876 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1878 if (u
->mixer_path
->settings
)
1879 pa_alsa_setting_select(u
->mixer_path
->settings
, u
->mixer_handle
);
1884 mixer_volume_init(u
);
1886 /* Will we need to register callbacks? */
1887 if (u
->mixer_path_set
&& u
->mixer_path_set
->paths
) {
1890 PA_LLIST_FOREACH(p
, u
->mixer_path_set
->paths
) {
1891 if (p
->has_volume
|| p
->has_mute
)
1892 need_mixer_callback
= TRUE
;
1895 else if (u
->mixer_path
)
1896 need_mixer_callback
= u
->mixer_path
->has_volume
|| u
->mixer_path
->has_mute
;
1898 if (need_mixer_callback
) {
1899 int (*mixer_callback
)(snd_mixer_elem_t
*, unsigned int);
1900 if (u
->sink
->flags
& PA_SINK_DEFERRED_VOLUME
) {
1901 u
->mixer_pd
= pa_alsa_mixer_pdata_new();
1902 mixer_callback
= io_mixer_callback
;
1904 if (pa_alsa_set_mixer_rtpoll(u
->mixer_pd
, u
->mixer_handle
, u
->rtpoll
) < 0) {
1905 pa_log("Failed to initialize file descriptor monitoring");
1909 u
->mixer_fdl
= pa_alsa_fdlist_new();
1910 mixer_callback
= ctl_mixer_callback
;
1912 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1913 pa_log("Failed to initialize file descriptor monitoring");
1918 if (u
->mixer_path_set
)
1919 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1921 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1927 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1929 struct userdata
*u
= NULL
;
1930 const char *dev_id
= NULL
;
1931 pa_sample_spec ss
, requested_ss
;
1933 uint32_t nfrags
, frag_size
, buffer_size
, tsched_size
, tsched_watermark
, rewind_safeguard
;
1934 snd_pcm_uframes_t period_frames
, buffer_frames
, tsched_frames
;
1936 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
, namereg_fail
= FALSE
, deferred_volume
= FALSE
, set_formats
= FALSE
;
1937 pa_sink_new_data data
;
1938 pa_alsa_profile_set
*profile_set
= NULL
;
1943 ss
= m
->core
->default_sample_spec
;
1944 map
= m
->core
->default_channel_map
;
1945 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1946 pa_log("Failed to parse sample specification and channel map");
1951 frame_size
= pa_frame_size(&ss
);
1953 nfrags
= m
->core
->default_n_fragments
;
1954 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1956 frag_size
= (uint32_t) frame_size
;
1957 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1958 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1960 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1961 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1962 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1963 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1964 pa_log("Failed to parse buffer metrics");
1968 buffer_size
= nfrags
* frag_size
;
1970 period_frames
= frag_size
/frame_size
;
1971 buffer_frames
= buffer_size
/frame_size
;
1972 tsched_frames
= tsched_size
/frame_size
;
1974 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1975 pa_log("Failed to parse mmap argument.");
1979 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1980 pa_log("Failed to parse tsched argument.");
1984 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1985 pa_log("Failed to parse ignore_dB argument.");
1989 rewind_safeguard
= PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES
, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC
, &ss
));
1990 if (pa_modargs_get_value_u32(ma
, "rewind_safeguard", &rewind_safeguard
) < 0) {
1991 pa_log("Failed to parse rewind_safeguard argument");
1995 deferred_volume
= m
->core
->deferred_volume
;
1996 if (pa_modargs_get_value_boolean(ma
, "deferred_volume", &deferred_volume
) < 0) {
1997 pa_log("Failed to parse deferred_volume argument.");
2001 use_tsched
= pa_alsa_may_tsched(use_tsched
);
2003 u
= pa_xnew0(struct userdata
, 1);
2006 u
->use_mmap
= use_mmap
;
2007 u
->use_tsched
= use_tsched
;
2008 u
->deferred_volume
= deferred_volume
;
2010 u
->rewind_safeguard
= rewind_safeguard
;
2011 u
->rtpoll
= pa_rtpoll_new();
2012 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
2014 u
->smoother
= pa_smoother_new(
2015 SMOOTHER_ADJUST_USEC
,
2016 SMOOTHER_WINDOW_USEC
,
2022 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
2024 dev_id
= pa_modargs_get_value(
2026 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
2028 if (reserve_init(u
, dev_id
) < 0)
2031 if (reserve_monitor_init(u
, dev_id
) < 0)
2039 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
2040 pa_log("device_id= not set");
2044 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
2048 SND_PCM_STREAM_PLAYBACK
,
2049 &period_frames
, &buffer_frames
, tsched_frames
,
2053 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
2055 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
2058 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
2062 SND_PCM_STREAM_PLAYBACK
,
2063 &period_frames
, &buffer_frames
, tsched_frames
,
2064 &b
, &d
, profile_set
, &mapping
)))
2069 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
2070 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
2073 SND_PCM_STREAM_PLAYBACK
,
2074 &period_frames
, &buffer_frames
, tsched_frames
,
2079 pa_assert(u
->device_name
);
2080 pa_log_info("Successfully opened device %s.", u
->device_name
);
2082 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
2083 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
2088 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
2090 if (use_mmap
&& !b
) {
2091 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2092 u
->use_mmap
= use_mmap
= FALSE
;
2095 if (use_tsched
&& (!b
|| !d
)) {
2096 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2097 u
->use_tsched
= use_tsched
= FALSE
;
2101 pa_log_info("Successfully enabled mmap() mode.");
2104 pa_log_info("Successfully enabled timer-based scheduling mode.");
2106 if (is_iec958(u
) || is_hdmi(u
))
2109 /* ALSA might tweak the sample spec, so recalculate the frame size */
2110 frame_size
= pa_frame_size(&ss
);
2112 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
2114 pa_sink_new_data_init(&data
);
2115 data
.driver
= driver
;
2118 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
2120 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2121 * variable instead of using &data.namereg_fail directly, because
2122 * data.namereg_fail is a bitfield and taking the address of a bitfield
2123 * variable is impossible. */
2124 namereg_fail
= data
.namereg_fail
;
2125 if (pa_modargs_get_value_boolean(ma
, "namereg_fail", &namereg_fail
) < 0) {
2126 pa_log("Failed to parse namereg_fail argument.");
2127 pa_sink_new_data_done(&data
);
2130 data
.namereg_fail
= namereg_fail
;
2132 pa_sink_new_data_set_sample_spec(&data
, &ss
);
2133 pa_sink_new_data_set_channel_map(&data
, &map
);
2135 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
2136 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
2137 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (buffer_frames
* frame_size
));
2138 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
2139 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
2142 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
2143 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
2146 pa_alsa_init_description(data
.proplist
);
2148 if (u
->control_device
)
2149 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
2151 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
2152 pa_log("Invalid properties");
2153 pa_sink_new_data_done(&data
);
2157 if (u
->mixer_path_set
)
2158 pa_alsa_add_ports(&data
.ports
, u
->mixer_path_set
);
2160 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
| PA_SINK_LATENCY
| (u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0) |
2161 (set_formats
? PA_SINK_SET_FORMATS
: 0));
2162 pa_sink_new_data_done(&data
);
2165 pa_log("Failed to create sink object");
2169 if (pa_modargs_get_value_u32(ma
, "deferred_volume_safety_margin",
2170 &u
->sink
->thread_info
.volume_change_safety_margin
) < 0) {
2171 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2175 if (pa_modargs_get_value_s32(ma
, "deferred_volume_extra_delay",
2176 &u
->sink
->thread_info
.volume_change_extra_delay
) < 0) {
2177 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2181 u
->sink
->parent
.process_msg
= sink_process_msg
;
2183 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
2184 u
->sink
->set_state
= sink_set_state_cb
;
2185 u
->sink
->set_port
= sink_set_port_cb
;
2186 u
->sink
->userdata
= u
;
2188 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
2189 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
2191 u
->frame_size
= frame_size
;
2192 u
->fragment_size
= frag_size
= (size_t) (period_frames
* frame_size
);
2193 u
->hwbuf_size
= buffer_size
= (size_t) (buffer_frames
* frame_size
);
2194 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
2196 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2197 (double) u
->hwbuf_size
/ (double) u
->fragment_size
,
2198 (long unsigned) u
->fragment_size
,
2199 (double) pa_bytes_to_usec(u
->fragment_size
, &ss
) / PA_USEC_PER_MSEC
,
2200 (long unsigned) u
->hwbuf_size
,
2201 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
2203 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
2204 if (pa_alsa_pcm_is_hw(u
->pcm_handle
))
2205 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
2207 pa_log_info("Disabling rewind for device %s", u
->device_name
);
2208 pa_sink_set_max_rewind(u
->sink
, 0);
2211 if (u
->use_tsched
) {
2212 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, &requested_ss
), &u
->sink
->sample_spec
);
2214 u
->watermark_inc_step
= pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC
, &u
->sink
->sample_spec
);
2215 u
->watermark_dec_step
= pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC
, &u
->sink
->sample_spec
);
2217 u
->watermark_inc_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
2218 u
->watermark_dec_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
2220 fix_min_sleep_wakeup(u
);
2221 fix_tsched_watermark(u
);
2223 pa_sink_set_latency_range(u
->sink
,
2225 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
2227 pa_log_info("Time scheduling watermark is %0.2fms",
2228 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
2230 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
2234 if (update_sw_params(u
) < 0)
2237 if (setup_mixer(u
, ignore_dB
) < 0)
2240 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
2242 if (!(u
->thread
= pa_thread_new("alsa-sink", thread_func
, u
))) {
2243 pa_log("Failed to create thread.");
2247 /* Get initial mixer settings */
2248 if (data
.volume_is_set
) {
2249 if (u
->sink
->set_volume
)
2250 u
->sink
->set_volume(u
->sink
);
2252 if (u
->sink
->get_volume
)
2253 u
->sink
->get_volume(u
->sink
);
2256 if (data
.muted_is_set
) {
2257 if (u
->sink
->set_mute
)
2258 u
->sink
->set_mute(u
->sink
);
2260 if (u
->sink
->get_mute
)
2261 u
->sink
->get_mute(u
->sink
);
2264 if ((data
.volume_is_set
|| data
.muted_is_set
) && u
->sink
->write_volume
)
2265 u
->sink
->write_volume(u
->sink
);
2268 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2269 pa_format_info
*format
;
2271 /* To start with, we only support PCM formats. Other formats may be added
2272 * with pa_sink_set_formats().*/
2273 format
= pa_format_info_new();
2274 format
->encoding
= PA_ENCODING_PCM
;
2275 u
->formats
= pa_idxset_new(NULL
, NULL
);
2276 pa_idxset_put(u
->formats
, format
, NULL
);
2278 u
->sink
->get_formats
= sink_get_formats
;
2279 u
->sink
->set_formats
= sink_set_formats
;
2282 pa_sink_put(u
->sink
);
2285 pa_alsa_profile_set_free(profile_set
);
2295 pa_alsa_profile_set_free(profile_set
);
2300 static void userdata_free(struct userdata
*u
) {
2304 pa_sink_unlink(u
->sink
);
2307 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
2308 pa_thread_free(u
->thread
);
2311 pa_thread_mq_done(&u
->thread_mq
);
2314 pa_sink_unref(u
->sink
);
2316 if (u
->memchunk
.memblock
)
2317 pa_memblock_unref(u
->memchunk
.memblock
);
2320 pa_alsa_mixer_pdata_free(u
->mixer_pd
);
2322 if (u
->alsa_rtpoll_item
)
2323 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
2326 pa_rtpoll_free(u
->rtpoll
);
2328 if (u
->pcm_handle
) {
2329 snd_pcm_drop(u
->pcm_handle
);
2330 snd_pcm_close(u
->pcm_handle
);
2334 pa_alsa_fdlist_free(u
->mixer_fdl
);
2336 if (u
->mixer_path_set
)
2337 pa_alsa_path_set_free(u
->mixer_path_set
);
2338 else if (u
->mixer_path
)
2339 pa_alsa_path_free(u
->mixer_path
);
2341 if (u
->mixer_handle
)
2342 snd_mixer_close(u
->mixer_handle
);
2345 pa_smoother_free(u
->smoother
);
2348 pa_idxset_free(u
->formats
, (pa_free2_cb_t
) pa_format_info_free2
, NULL
);
2353 pa_xfree(u
->device_name
);
2354 pa_xfree(u
->control_device
);
2358 void pa_alsa_sink_free(pa_sink
*s
) {
2361 pa_sink_assert_ref(s
);
2362 pa_assert_se(u
= s
->userdata
);