2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
39 #include <pulsecore/core.h>
40 #include <pulsecore/module.h>
41 #include <pulsecore/memchunk.h>
42 #include <pulsecore/sink.h>
43 #include <pulsecore/modargs.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/rtclock.h>
53 #include <pulsecore/time-smoother.h>
55 #include "alsa-util.h"
56 #include "module-alsa-sink-symdef.h"
58 PA_MODULE_AUTHOR("Lennart Poettering");
59 PA_MODULE_DESCRIPTION("ALSA Sink");
60 PA_MODULE_VERSION(PACKAGE_VERSION
);
61 PA_MODULE_LOAD_ONCE(FALSE
);
63 "sink_name=<name for the sink> "
64 "device=<ALSA device> "
65 "device_id=<ALSA card index> "
66 "format=<sample format> "
68 "channels=<number of channels> "
69 "channel_map=<channel map> "
70 "fragments=<number of fragments> "
71 "fragment_size=<fragment size> "
72 "mmap=<enable memory mapping?> "
73 "tsched=<enable system timer based scheduling mode?> "
74 "tsched_buffer_size=<buffer size when using timer based scheduling> "
75 "tsched_buffer_watermark=<lower fill watermark>");
77 static const char* const valid_modargs
[] = {
90 "tsched_buffer_watermark",
94 #define DEFAULT_DEVICE "default"
95 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
96 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
97 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
98 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
106 pa_thread_mq thread_mq
;
109 snd_pcm_t
*pcm_handle
;
111 pa_alsa_fdlist
*mixer_fdl
;
112 snd_mixer_t
*mixer_handle
;
113 snd_mixer_elem_t
*mixer_elem
;
114 long hw_volume_max
, hw_volume_min
;
115 long hw_dB_max
, hw_dB_min
;
116 pa_bool_t hw_dB_supported
;
117 pa_bool_t mixer_seperate_channels
;
118 pa_cvolume hardware_volume
;
120 size_t frame_size
, fragment_size
, hwbuf_size
, tsched_watermark
;
122 pa_memchunk memchunk
;
126 pa_bool_t use_mmap
, use_tsched
;
128 pa_bool_t first
, after_rewind
;
130 pa_rtpoll_item
*alsa_rtpoll_item
;
132 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
134 pa_smoother
*smoother
;
136 uint64_t since_start
;
138 snd_pcm_sframes_t hwbuf_unused_frames
;
141 static void fix_tsched_watermark(struct userdata
*u
) {
143 size_t min_sleep
, min_wakeup
;
146 max_use
= u
->hwbuf_size
- u
->hwbuf_unused_frames
* u
->frame_size
;
148 min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
149 min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
151 if (min_sleep
> max_use
/2)
152 min_sleep
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
153 if (min_sleep
< u
->frame_size
)
154 min_sleep
= u
->frame_size
;
156 if (min_wakeup
> max_use
/2)
157 min_wakeup
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
158 if (min_wakeup
< u
->frame_size
)
159 min_wakeup
= u
->frame_size
;
161 if (u
->tsched_watermark
> max_use
-min_sleep
)
162 u
->tsched_watermark
= max_use
-min_sleep
;
164 if (u
->tsched_watermark
< min_wakeup
)
165 u
->tsched_watermark
= min_wakeup
;
168 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
171 pa_assert(sleep_usec
);
172 pa_assert(process_usec
);
176 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
178 if (usec
== (pa_usec_t
) -1)
179 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
181 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
183 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
186 *sleep_usec
= usec
- wm
;
189 *process_usec
= *sleep_usec
= usec
/ 2;
191 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
194 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
199 pa_log_debug("%s: %s", call
, snd_strerror(err
));
201 pa_assert(err
!= -EAGAIN
);
204 pa_log_debug("%s: Buffer underrun!", call
);
206 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) == 0) {
212 pa_log("%s: %s", call
, snd_strerror(err
));
216 static size_t check_left_to_play(struct userdata
*u
, snd_pcm_sframes_t n
) {
219 if (n
*u
->frame_size
< u
->hwbuf_size
)
220 left_to_play
= u
->hwbuf_size
- (n
*u
->frame_size
);
224 if (left_to_play
> 0) {
225 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
226 } else if (!u
->first
&& !u
->after_rewind
) {
227 pa_log_info("Underrun!");
230 size_t old_watermark
= u
->tsched_watermark
;
232 u
->tsched_watermark
*= 2;
233 fix_tsched_watermark(u
);
235 if (old_watermark
!= u
->tsched_watermark
)
236 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
237 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
244 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
) {
246 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
250 pa_sink_assert_ref(u
->sink
);
253 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
259 snd_pcm_hwsync(u
->pcm_handle
);
261 /* First we determine how many samples are missing to fill the
262 * buffer up to 100% */
264 if (PA_UNLIKELY((n
= snd_pcm_avail_update(u
->pcm_handle
)) < 0)) {
266 if ((r
= try_recover(u
, "snd_pcm_avail_update", n
)) == 0)
272 left_to_play
= check_left_to_play(u
, n
);
276 /* We won't fill up the playback buffer before at least
277 * half the sleep time is over because otherwise we might
278 * ask for more data from the clients then they expect. We
279 * need to guarantee that clients only have to keep around
280 * a single hw buffer length. */
282 if (pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
285 if (PA_UNLIKELY(n
<= u
->hwbuf_unused_frames
))
288 n
-= u
->hwbuf_unused_frames
;
290 /* pa_log_debug("Filling up"); */
296 const snd_pcm_channel_area_t
*areas
;
297 snd_pcm_uframes_t offset
, frames
= (snd_pcm_uframes_t
) n
;
299 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
301 if (PA_UNLIKELY((err
= snd_pcm_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
)) < 0)) {
303 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
309 /* Make sure that if these memblocks need to be copied they will fit into one slot */
310 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
311 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
313 /* Check these are multiples of 8 bit */
314 pa_assert((areas
[0].first
& 7) == 0);
315 pa_assert((areas
[0].step
& 7)== 0);
317 /* We assume a single interleaved memory buffer */
318 pa_assert((areas
[0].first
>> 3) == 0);
319 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
321 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
323 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
324 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
327 pa_sink_render_into_full(u
->sink
, &chunk
);
329 /* FIXME: Maybe we can do something to keep this memory block
330 * a little bit longer around? */
331 pa_memblock_unref_fixed(chunk
.memblock
);
333 if (PA_UNLIKELY((err
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
335 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", err
)) == 0)
343 u
->frame_index
+= frames
;
344 u
->since_start
+= frames
* u
->frame_size
;
346 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
348 if (frames
>= (snd_pcm_uframes_t
) n
)
355 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) - process_usec
;
359 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
) {
361 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
365 pa_sink_assert_ref(u
->sink
);
368 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
374 snd_pcm_hwsync(u
->pcm_handle
);
376 if (PA_UNLIKELY((n
= snd_pcm_avail_update(u
->pcm_handle
)) < 0)) {
378 if ((r
= try_recover(u
, "snd_pcm_avail_update", n
)) == 0)
384 left_to_play
= check_left_to_play(u
, n
);
388 /* We won't fill up the playback buffer before at least
389 * half the sleep time is over because otherwise we might
390 * ask for more data from the clients then they expect. We
391 * need to guarantee that clients only have to keep around
392 * a single hw buffer length. */
394 if (pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
397 if (PA_UNLIKELY(n
<= u
->hwbuf_unused_frames
))
400 n
-= u
->hwbuf_unused_frames
;
403 snd_pcm_sframes_t frames
;
406 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
408 if (u
->memchunk
.length
<= 0)
409 pa_sink_render(u
->sink
, n
* u
->frame_size
, &u
->memchunk
);
411 pa_assert(u
->memchunk
.length
> 0);
413 frames
= u
->memchunk
.length
/ u
->frame_size
;
418 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
419 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, frames
);
420 pa_memblock_release(u
->memchunk
.memblock
);
422 pa_assert(frames
!= 0);
424 if (PA_UNLIKELY(frames
< 0)) {
426 if ((r
= try_recover(u
, "snd_pcm_writei", n
)) == 0)
432 u
->memchunk
.index
+= frames
* u
->frame_size
;
433 u
->memchunk
.length
-= frames
* u
->frame_size
;
435 if (u
->memchunk
.length
<= 0) {
436 pa_memblock_unref(u
->memchunk
.memblock
);
437 pa_memchunk_reset(&u
->memchunk
);
442 u
->frame_index
+= frames
;
443 u
->since_start
+= frames
* u
->frame_size
;
445 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
454 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) - process_usec
;
458 static void update_smoother(struct userdata
*u
) {
459 snd_pcm_sframes_t delay
= 0;
462 pa_usec_t now1
, now2
;
463 /* struct timeval timestamp; */
464 snd_pcm_status_t
*status
;
466 snd_pcm_status_alloca(&status
);
469 pa_assert(u
->pcm_handle
);
471 /* Let's update the time smoother */
473 snd_pcm_hwsync(u
->pcm_handle
);
474 snd_pcm_avail_update(u
->pcm_handle
);
476 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
477 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
481 /* delay = snd_pcm_status_get_delay(status); */
483 if (PA_UNLIKELY((err
= snd_pcm_delay(u
->pcm_handle
, &delay
)) < 0)) {
484 pa_log("Failed to query DSP status data: %s", snd_strerror(err
));
488 frames
= u
->frame_index
- delay
;
490 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
492 /* snd_pcm_status_get_tstamp(status, ×tamp); */
493 /* pa_rtclock_from_wallclock(×tamp); */
494 /* now1 = pa_timeval_load(×tamp); */
496 now1
= pa_rtclock_usec();
497 now2
= pa_bytes_to_usec(frames
* u
->frame_size
, &u
->sink
->sample_spec
);
498 pa_smoother_put(u
->smoother
, now1
, now2
);
501 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
504 pa_usec_t now1
, now2
;
508 now1
= pa_rtclock_usec();
509 now2
= pa_smoother_get(u
->smoother
, now1
);
511 delay
= (int64_t) pa_bytes_to_usec(u
->frame_index
* u
->frame_size
, &u
->sink
->sample_spec
) - (int64_t) now2
;
514 r
= (pa_usec_t
) delay
;
516 if (u
->memchunk
.memblock
)
517 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
522 static int build_pollfd(struct userdata
*u
) {
524 pa_assert(u
->pcm_handle
);
526 if (u
->alsa_rtpoll_item
)
527 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
529 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
535 static int suspend(struct userdata
*u
) {
537 pa_assert(u
->pcm_handle
);
539 pa_smoother_pause(u
->smoother
, pa_rtclock_usec());
542 snd_pcm_drain(u
->pcm_handle
);
543 snd_pcm_close(u
->pcm_handle
);
544 u
->pcm_handle
= NULL
;
546 if (u
->alsa_rtpoll_item
) {
547 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
548 u
->alsa_rtpoll_item
= NULL
;
551 pa_log_info("Device suspended...");
556 static int update_sw_params(struct userdata
*u
) {
557 snd_pcm_uframes_t avail_min
;
562 /* Use the full buffer if noone asked us for anything specific */
563 u
->hwbuf_unused_frames
= 0;
568 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
571 pa_log_debug("latency set to %0.2f", (double) latency
/ PA_USEC_PER_MSEC
);
573 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
575 /* We need at least one sample in our buffer */
577 if (PA_UNLIKELY(b
< u
->frame_size
))
580 u
->hwbuf_unused_frames
=
581 PA_LIKELY(b
< u
->hwbuf_size
) ?
582 ((u
->hwbuf_size
- b
) / u
->frame_size
) : 0;
584 fix_tsched_watermark(u
);
588 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u
->hwbuf_unused_frames
);
590 /* We need at last one frame in the used part of the buffer */
591 avail_min
= u
->hwbuf_unused_frames
+ 1;
594 pa_usec_t sleep_usec
, process_usec
;
596 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
597 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
);
600 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
602 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
)) < 0) {
603 pa_log("Failed to set software parameters: %s", snd_strerror(err
));
607 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused_frames
* u
->frame_size
);
612 static int unsuspend(struct userdata
*u
) {
617 snd_pcm_uframes_t period_size
;
620 pa_assert(!u
->pcm_handle
);
622 pa_log_info("Trying resume...");
624 snd_config_update_free_global();
625 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
, SND_PCM_NONBLOCK
)) < 0) {
626 pa_log("Error opening PCM device %s: %s", u
->device_name
, snd_strerror(err
));
630 ss
= u
->sink
->sample_spec
;
631 nfrags
= u
->nfragments
;
632 period_size
= u
->fragment_size
/ u
->frame_size
;
636 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &nfrags
, &period_size
, u
->hwbuf_size
/ u
->frame_size
, &b
, &d
, TRUE
)) < 0) {
637 pa_log("Failed to set hardware parameters: %s", snd_strerror(err
));
641 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
642 pa_log_warn("Resume failed, couldn't get original access mode.");
646 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
647 pa_log_warn("Resume failed, couldn't restore original sample settings.");
651 if (nfrags
!= u
->nfragments
|| period_size
*u
->frame_size
!= u
->fragment_size
) {
652 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
656 if (update_sw_params(u
) < 0)
659 if (build_pollfd(u
) < 0)
662 /* FIXME: We need to reload the volume somehow */
667 pa_log_info("Resumed successfully...");
673 snd_pcm_close(u
->pcm_handle
);
674 u
->pcm_handle
= NULL
;
680 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
681 struct userdata
*u
= PA_SINK(o
)->userdata
;
685 case PA_SINK_MESSAGE_GET_LATENCY
: {
689 r
= sink_get_latency(u
);
691 *((pa_usec_t
*) data
) = r
;
696 case PA_SINK_MESSAGE_SET_STATE
:
698 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
700 case PA_SINK_SUSPENDED
:
701 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
709 case PA_SINK_RUNNING
:
711 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
712 if (build_pollfd(u
) < 0)
716 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
717 if (unsuspend(u
) < 0)
723 case PA_SINK_UNLINKED
:
731 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
734 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
735 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
738 pa_assert(u
->mixer_handle
);
740 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
743 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
744 pa_sink_get_volume(u
->sink
, TRUE
);
745 pa_sink_get_mute(u
->sink
, TRUE
);
751 static int sink_get_volume_cb(pa_sink
*s
) {
752 struct userdata
*u
= s
->userdata
;
756 char t
[PA_CVOLUME_SNPRINT_MAX
];
759 pa_assert(u
->mixer_elem
);
761 if (u
->mixer_seperate_channels
) {
763 r
.channels
= s
->sample_spec
.channels
;
765 for (i
= 0; i
< s
->sample_spec
.channels
; i
++) {
768 if (u
->hw_dB_supported
) {
770 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
773 #ifdef HAVE_VALGRIND_MEMCHECK_H
774 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol
, sizeof(alsa_vol
));
777 r
.values
[i
] = pa_sw_volume_from_dB((double) alsa_vol
/ 100.0);
780 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
783 r
.values
[i
] = (pa_volume_t
) round(((double) (alsa_vol
- u
->hw_volume_min
) * PA_VOLUME_NORM
) / (u
->hw_volume_max
- u
->hw_volume_min
));
790 pa_assert(u
->hw_dB_supported
);
792 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
795 #ifdef HAVE_VALGRIND_MEMCHECK_H
796 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol
, sizeof(alsa_vol
));
799 pa_cvolume_set(&r
, s
->sample_spec
.channels
, pa_sw_volume_from_dB((double) alsa_vol
/ 100.0));
802 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
804 if (!pa_cvolume_equal(&u
->hardware_volume
, &r
)) {
806 u
->hardware_volume
= s
->volume
= r
;
808 if (u
->hw_dB_supported
) {
811 /* Hmm, so the hardware volume changed, let's reset our software volume */
813 pa_cvolume_reset(&reset
, s
->sample_spec
.channels
);
814 pa_sink_set_soft_volume(s
, &reset
);
821 pa_log_error("Unable to read volume: %s", snd_strerror(err
));
826 static int sink_set_volume_cb(pa_sink
*s
) {
827 struct userdata
*u
= s
->userdata
;
833 pa_assert(u
->mixer_elem
);
835 if (u
->mixer_seperate_channels
) {
837 r
.channels
= s
->sample_spec
.channels
;
839 for (i
= 0; i
< s
->sample_spec
.channels
; i
++) {
843 vol
= s
->volume
.values
[i
];
845 if (u
->hw_dB_supported
) {
847 alsa_vol
= (long) (pa_sw_volume_to_dB(vol
) * 100);
848 alsa_vol
= PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_dB_min
, u
->hw_dB_max
);
850 if ((err
= snd_mixer_selem_set_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], alsa_vol
, 1)) < 0)
853 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
856 r
.values
[i
] = pa_sw_volume_from_dB((double) alsa_vol
/ 100.0);
859 alsa_vol
= (long) round(((double) vol
* (u
->hw_volume_max
- u
->hw_volume_min
)) / PA_VOLUME_NORM
) + u
->hw_volume_min
;
860 alsa_vol
= PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_volume_min
, u
->hw_volume_max
);
862 if ((err
= snd_mixer_selem_set_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], alsa_vol
)) < 0)
865 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
868 r
.values
[i
] = (pa_volume_t
) round(((double) (alsa_vol
- u
->hw_volume_min
) * PA_VOLUME_NORM
) / (u
->hw_volume_max
- u
->hw_volume_min
));
876 pa_assert(u
->hw_dB_supported
);
878 vol
= pa_cvolume_max(&s
->volume
);
880 alsa_vol
= (long) (pa_sw_volume_to_dB(vol
) * 100);
881 alsa_vol
= PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_dB_min
, u
->hw_dB_max
);
883 if ((err
= snd_mixer_selem_set_playback_dB_all(u
->mixer_elem
, alsa_vol
, 1)) < 0)
886 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
889 pa_cvolume_set(&r
, s
->volume
.channels
, pa_sw_volume_from_dB((double) alsa_vol
/ 100.0));
892 u
->hardware_volume
= r
;
894 if (u
->hw_dB_supported
) {
895 char t
[PA_CVOLUME_SNPRINT_MAX
];
897 /* Match exactly what the user requested by software */
899 pa_alsa_volume_divide(&r
, &s
->volume
);
900 pa_sink_set_soft_volume(s
, &r
);
902 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->volume
));
903 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &u
->hardware_volume
));
904 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
908 /* We can't match exactly what the user requested, hence let's
909 * at least tell the user about it */
916 pa_log_error("Unable to set volume: %s", snd_strerror(err
));
921 static int sink_get_mute_cb(pa_sink
*s
) {
922 struct userdata
*u
= s
->userdata
;
926 pa_assert(u
->mixer_elem
);
928 if ((err
= snd_mixer_selem_get_playback_switch(u
->mixer_elem
, 0, &sw
)) < 0) {
929 pa_log_error("Unable to get switch: %s", snd_strerror(err
));
938 static int sink_set_mute_cb(pa_sink
*s
) {
939 struct userdata
*u
= s
->userdata
;
943 pa_assert(u
->mixer_elem
);
945 if ((err
= snd_mixer_selem_set_playback_switch_all(u
->mixer_elem
, !s
->muted
)) < 0) {
946 pa_log_error("Unable to set switch: %s", snd_strerror(err
));
953 static void sink_update_requested_latency_cb(pa_sink
*s
) {
954 struct userdata
*u
= s
->userdata
;
955 snd_pcm_sframes_t before
;
961 before
= u
->hwbuf_unused_frames
;
964 /* Let's check whether we now use only a smaller part of the
965 buffer then before. If so, we need to make sure that subsequent
966 rewinds are relative to the new maxium fill level and not to the
967 current fill level. Thus, let's do a full rewind once, to clear
970 if (u
->hwbuf_unused_frames
> before
) {
971 pa_log_debug("Requesting rewind due to latency change.");
972 pa_sink_request_rewind(s
, (size_t) -1);
976 static int process_rewind(struct userdata
*u
) {
977 snd_pcm_sframes_t unused
;
978 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
981 /* Figure out how much we shall rewind and reset the counter */
982 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
983 u
->sink
->thread_info
.rewind_nbytes
= 0;
985 if (rewind_nbytes
<= 0)
988 pa_assert(rewind_nbytes
> 0);
989 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
991 snd_pcm_hwsync(u
->pcm_handle
);
992 if ((unused
= snd_pcm_avail_update(u
->pcm_handle
)) < 0) {
993 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror(unused
));
997 unused_nbytes
= u
->tsched_watermark
+ (size_t) unused
* u
->frame_size
;
999 if (u
->hwbuf_size
> unused_nbytes
)
1000 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1004 if (rewind_nbytes
> limit_nbytes
)
1005 rewind_nbytes
= limit_nbytes
;
1007 if (rewind_nbytes
> 0) {
1008 snd_pcm_sframes_t in_frames
, out_frames
;
1010 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1012 in_frames
= (snd_pcm_sframes_t
) rewind_nbytes
/ u
->frame_size
;
1013 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1014 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, in_frames
)) < 0) {
1015 pa_log("snd_pcm_rewind() failed: %s", snd_strerror(out_frames
));
1018 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1020 rewind_nbytes
= out_frames
* u
->frame_size
;
1022 if (rewind_nbytes
<= 0)
1023 pa_log_info("Tried rewind, but was apparently not possible.");
1025 u
->frame_index
-= out_frames
;
1026 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1027 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1029 u
->after_rewind
= TRUE
;
1033 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1037 pa_sink_process_rewind(u
->sink
, 0);
1043 static void thread_func(void *userdata
) {
1044 struct userdata
*u
= userdata
;
1048 pa_log_debug("Thread starting up");
1050 if (u
->core
->realtime_scheduling
)
1051 pa_make_realtime(u
->core
->realtime_priority
);
1053 pa_thread_mq_install(&u
->thread_mq
);
1054 pa_rtpoll_install(u
->rtpoll
);
1059 /* pa_log_debug("loop"); */
1061 /* Render some data and write it to the dsp */
1062 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1064 pa_usec_t sleep_usec
= 0;
1066 if (u
->sink
->thread_info
.rewind_requested
)
1067 if (process_rewind(u
) < 0)
1071 work_done
= mmap_write(u
, &sleep_usec
);
1073 work_done
= unix_write(u
, &sleep_usec
);
1078 /* pa_log_debug("work_done = %i", work_done); */
1083 pa_log_info("Starting playback.");
1084 snd_pcm_start(u
->pcm_handle
);
1086 pa_smoother_resume(u
->smoother
, pa_rtclock_usec());
1092 if (u
->use_tsched
) {
1095 if (u
->since_start
<= u
->hwbuf_size
) {
1097 /* USB devices on ALSA seem to hit a buffer
1098 * underrun during the first iterations much
1099 * quicker then we calculate here, probably due to
1100 * the transport latency. To accomodate for that
1101 * we artificially decrease the sleep time until
1102 * we have filled the buffer at least once
1105 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1109 /* OK, the playback buffer is now full, let's
1110 * calculate when to wake up next */
1111 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1113 /* Convert from the sound card time domain to the
1114 * system time domain */
1115 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_usec(), sleep_usec
);
1117 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1119 /* We don't trust the conversion, so we wake up whatever comes first */
1120 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(sleep_usec
, cusec
));
1124 u
->after_rewind
= FALSE
;
1126 } else if (u
->use_tsched
)
1128 /* OK, we're in an invalid state, let's disable our timers */
1129 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1131 /* Hmm, nothing to do. Let's sleep */
1132 if ((ret
= pa_rtpoll_run(u
->rtpoll
, 1)) < 0)
1138 /* Tell ALSA about this and process its response */
1139 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1140 struct pollfd
*pollfd
;
1141 unsigned short revents
= 0;
1145 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1147 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1148 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err
));
1152 if (revents
& (POLLERR
|POLLNVAL
|POLLHUP
)) {
1153 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1160 if (revents
&& u
->use_tsched
)
1161 pa_log_debug("Wakeup from ALSA! (%i)", revents
);
1166 /* If this was no regular exit from the loop we have to continue
1167 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1168 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1169 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1172 pa_log_debug("Thread shutting down");
1175 int pa__init(pa_module
*m
) {
1177 pa_modargs
*ma
= NULL
;
1178 struct userdata
*u
= NULL
;
1182 uint32_t nfrags
, hwbuf_size
, frag_size
, tsched_size
, tsched_watermark
;
1183 snd_pcm_uframes_t period_frames
, tsched_frames
;
1185 snd_pcm_info_t
*pcm_info
= NULL
;
1188 char *name_buf
= NULL
;
1189 pa_bool_t namereg_fail
;
1190 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
;
1192 pa_sink_new_data data
;
1194 snd_pcm_info_alloca(&pcm_info
);
1198 pa_alsa_redirect_errors_inc();
1200 if (!(ma
= pa_modargs_new(m
->argument
, valid_modargs
))) {
1201 pa_log("Failed to parse module arguments");
1205 ss
= m
->core
->default_sample_spec
;
1206 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1207 pa_log("Failed to parse sample specification and channel map");
1211 frame_size
= pa_frame_size(&ss
);
1213 nfrags
= m
->core
->default_n_fragments
;
1214 frag_size
= pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1216 frag_size
= frame_size
;
1217 tsched_size
= pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1218 tsched_watermark
= pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1220 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1221 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1222 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1223 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1224 pa_log("Failed to parse buffer metrics");
1228 hwbuf_size
= frag_size
* nfrags
;
1229 period_frames
= frag_size
/frame_size
;
1230 tsched_frames
= tsched_size
/frame_size
;
1232 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1233 pa_log("Failed to parse mmap argument.");
1237 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1238 pa_log("Failed to parse tsched argument.");
1242 if (use_tsched
&& !pa_rtclock_hrtimer()) {
1243 pa_log("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1247 u
= pa_xnew0(struct userdata
, 1);
1251 u
->use_mmap
= use_mmap
;
1252 u
->use_tsched
= use_tsched
;
1255 u
->after_rewind
= FALSE
;
1256 u
->rtpoll
= pa_rtpoll_new();
1257 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1258 u
->alsa_rtpoll_item
= NULL
;
1260 u
->smoother
= pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC
*2, DEFAULT_TSCHED_BUFFER_USEC
*2, TRUE
, 5);
1261 usec
= pa_rtclock_usec();
1262 pa_smoother_set_time_offset(u
->smoother
, usec
);
1263 pa_smoother_pause(u
->smoother
, usec
);
1265 snd_config_update_free_global();
1270 if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1272 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id(
1276 SND_PCM_STREAM_PLAYBACK
,
1277 &nfrags
, &period_frames
, tsched_frames
,
1284 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1285 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1288 SND_PCM_STREAM_PLAYBACK
,
1289 &nfrags
, &period_frames
, tsched_frames
,
1295 pa_assert(u
->device_name
);
1296 pa_log_info("Successfully opened device %s.", u
->device_name
);
1298 if (use_mmap
&& !b
) {
1299 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1300 u
->use_mmap
= use_mmap
= FALSE
;
1303 if (use_tsched
&& (!b
|| !d
)) {
1304 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1305 u
->use_tsched
= use_tsched
= FALSE
;
1309 pa_log_info("Successfully enabled mmap() mode.");
1312 pa_log_info("Successfully enabled timer-based scheduling mode.");
1314 if ((err
= snd_pcm_info(u
->pcm_handle
, pcm_info
)) < 0) {
1315 pa_log("Error fetching PCM info: %s", snd_strerror(err
));
1319 /* ALSA might tweak the sample spec, so recalculate the frame size */
1320 frame_size
= pa_frame_size(&ss
);
1322 if ((err
= snd_mixer_open(&u
->mixer_handle
, 0)) < 0)
1323 pa_log_warn("Error opening mixer: %s", snd_strerror(err
));
1325 pa_bool_t found
= FALSE
;
1327 if (pa_alsa_prepare_mixer(u
->mixer_handle
, u
->device_name
) >= 0)
1330 snd_pcm_info_t
*info
;
1332 snd_pcm_info_alloca(&info
);
1334 if (snd_pcm_info(u
->pcm_handle
, info
) >= 0) {
1338 if ((card
= snd_pcm_info_get_card(info
)) >= 0) {
1340 md
= pa_sprintf_malloc("hw:%i", card
);
1342 if (strcmp(u
->device_name
, md
))
1343 if (pa_alsa_prepare_mixer(u
->mixer_handle
, md
) >= 0)
1351 if (!(u
->mixer_elem
= pa_alsa_find_elem(u
->mixer_handle
, "Master", "PCM")))
1355 snd_mixer_close(u
->mixer_handle
);
1356 u
->mixer_handle
= NULL
;
1360 if ((name
= pa_modargs_get_value(ma
, "sink_name", NULL
)))
1361 namereg_fail
= TRUE
;
1363 name
= name_buf
= pa_sprintf_malloc("alsa_output.%s", u
->device_name
);
1364 namereg_fail
= FALSE
;
1367 pa_sink_new_data_init(&data
);
1368 data
.driver
= __FILE__
;
1370 pa_sink_new_data_set_name(&data
, name
);
1371 data
.namereg_fail
= namereg_fail
;
1372 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1373 pa_sink_new_data_set_channel_map(&data
, &map
);
1375 pa_alsa_init_proplist(data
.proplist
, pcm_info
);
1376 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1377 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
* nfrags
));
1378 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1379 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1381 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
);
1382 pa_sink_new_data_done(&data
);
1386 pa_log("Failed to create sink object");
1390 u
->sink
->parent
.process_msg
= sink_process_msg
;
1391 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1392 u
->sink
->userdata
= u
;
1394 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1395 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1397 u
->frame_size
= frame_size
;
1398 u
->fragment_size
= frag_size
= period_frames
* frame_size
;
1399 u
->nfragments
= nfrags
;
1400 u
->hwbuf_size
= u
->fragment_size
* nfrags
;
1401 u
->hwbuf_unused_frames
= 0;
1402 u
->tsched_watermark
= tsched_watermark
;
1404 u
->hw_dB_supported
= FALSE
;
1405 u
->hw_dB_min
= u
->hw_dB_max
= 0;
1406 u
->hw_volume_min
= u
->hw_volume_max
= 0;
1407 u
->mixer_seperate_channels
= FALSE
;
1408 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
1411 fix_tsched_watermark(u
);
1413 u
->sink
->thread_info
.max_rewind
= use_tsched
? u
->hwbuf_size
: 0;
1414 u
->sink
->thread_info
.max_request
= u
->hwbuf_size
;
1416 pa_sink_set_latency_range(u
->sink
,
1417 !use_tsched
? pa_bytes_to_usec(u
->hwbuf_size
, &ss
) : (pa_usec_t
) -1,
1418 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1420 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1421 nfrags
, (long unsigned) u
->fragment_size
,
1422 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1425 pa_log_info("Time scheduling watermark is %0.2fms",
1426 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1428 if (update_sw_params(u
) < 0)
1431 pa_memchunk_reset(&u
->memchunk
);
1433 if (u
->mixer_handle
) {
1434 pa_assert(u
->mixer_elem
);
1436 if (snd_mixer_selem_has_playback_volume(u
->mixer_elem
)) {
1437 pa_bool_t suitable
= TRUE
;
1439 if (snd_mixer_selem_get_playback_volume_range(u
->mixer_elem
, &u
->hw_volume_min
, &u
->hw_volume_max
) < 0) {
1440 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1443 pa_log_info("Volume ranges from %li to %li.", u
->hw_volume_min
, u
->hw_volume_max
);
1444 pa_assert(u
->hw_volume_min
< u
->hw_volume_max
);
1447 if (snd_mixer_selem_get_playback_dB_range(u
->mixer_elem
, &u
->hw_dB_min
, &u
->hw_dB_max
) < 0)
1448 pa_log_info("Mixer doesn't support dB information.");
1450 #ifdef HAVE_VALGRIND_MEMCHECK_H
1451 VALGRIND_MAKE_MEM_DEFINED(&u
->hw_dB_min
, sizeof(u
->hw_dB_min
));
1452 VALGRIND_MAKE_MEM_DEFINED(&u
->hw_dB_max
, sizeof(u
->hw_dB_max
));
1455 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", u
->hw_dB_min
/100.0, u
->hw_dB_max
/100.0);
1456 pa_assert(u
->hw_dB_min
< u
->hw_dB_max
);
1457 u
->hw_dB_supported
= TRUE
;
1461 !u
->hw_dB_supported
&&
1462 u
->hw_volume_max
- u
->hw_volume_min
< 3) {
1464 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1469 u
->mixer_seperate_channels
= pa_alsa_calc_mixer_map(u
->mixer_elem
, &map
, u
->mixer_map
, TRUE
) >= 0;
1471 u
->sink
->get_volume
= sink_get_volume_cb
;
1472 u
->sink
->set_volume
= sink_set_volume_cb
;
1473 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->hw_dB_supported
? PA_SINK_DECIBEL_VOLUME
: 0);
1474 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->hw_dB_supported
? "supported" : "not supported");
1477 pa_log_info("Using software volume control.");
1480 if (snd_mixer_selem_has_playback_switch(u
->mixer_elem
)) {
1481 u
->sink
->get_mute
= sink_get_mute_cb
;
1482 u
->sink
->set_mute
= sink_set_mute_cb
;
1483 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1485 pa_log_info("Using software mute control.");
1487 u
->mixer_fdl
= pa_alsa_fdlist_new();
1489 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, m
->core
->mainloop
) < 0) {
1490 pa_log("Failed to initialize file descriptor monitoring");
1494 snd_mixer_elem_set_callback(u
->mixer_elem
, mixer_callback
);
1495 snd_mixer_elem_set_callback_private(u
->mixer_elem
, u
);
1497 u
->mixer_fdl
= NULL
;
1499 pa_alsa_dump(u
->pcm_handle
);
1501 if (!(u
->thread
= pa_thread_new(thread_func
, u
))) {
1502 pa_log("Failed to create thread.");
1506 /* Get initial mixer settings */
1507 if (data
.volume_is_set
) {
1508 if (u
->sink
->set_volume
)
1509 u
->sink
->set_volume(u
->sink
);
1511 if (u
->sink
->get_volume
)
1512 u
->sink
->get_volume(u
->sink
);
1515 if (data
.muted_is_set
) {
1516 if (u
->sink
->set_mute
)
1517 u
->sink
->set_mute(u
->sink
);
1519 if (u
->sink
->get_mute
)
1520 u
->sink
->get_mute(u
->sink
);
1523 pa_sink_put(u
->sink
);
1525 pa_modargs_free(ma
);
1532 pa_modargs_free(ma
);
1539 void pa__done(pa_module
*m
) {
1544 if (!(u
= m
->userdata
)) {
1545 pa_alsa_redirect_errors_dec();
1550 pa_sink_unlink(u
->sink
);
1553 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1554 pa_thread_free(u
->thread
);
1557 pa_thread_mq_done(&u
->thread_mq
);
1560 pa_sink_unref(u
->sink
);
1562 if (u
->memchunk
.memblock
)
1563 pa_memblock_unref(u
->memchunk
.memblock
);
1565 if (u
->alsa_rtpoll_item
)
1566 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1569 pa_rtpoll_free(u
->rtpoll
);
1572 pa_alsa_fdlist_free(u
->mixer_fdl
);
1574 if (u
->mixer_handle
)
1575 snd_mixer_close(u
->mixer_handle
);
1577 if (u
->pcm_handle
) {
1578 snd_pcm_drop(u
->pcm_handle
);
1579 snd_pcm_close(u
->pcm_handle
);
1583 pa_smoother_free(u
->smoother
);
1585 pa_xfree(u
->device_name
);
1588 snd_config_update_free_global();
1590 pa_alsa_redirect_errors_dec();