2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
30 #include <asoundlib.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
37 #include <pulsecore/core.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/thread-mq.h>
50 #include <pulsecore/rtpoll.h>
51 #include <pulsecore/time-smoother.h>
53 #include <modules/reserve-wrap.h>
55 #include "alsa-util.h"
56 #include "alsa-source.h"
58 /* #define DEBUG_TIMING */
60 #define DEFAULT_DEVICE "default"
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
65 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
66 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
67 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
68 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
69 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
70 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
73 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
75 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
76 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
89 pa_thread_mq thread_mq
;
92 snd_pcm_t
*pcm_handle
;
95 pa_alsa_fdlist
*mixer_fdl
;
96 pa_alsa_mixer_pdata
*mixer_pd
;
97 snd_mixer_t
*mixer_handle
;
98 pa_alsa_path_set
*mixer_path_set
;
99 pa_alsa_path
*mixer_path
;
101 pa_cvolume hardware_volume
;
110 tsched_watermark_ref
,
116 watermark_inc_threshold
,
117 watermark_dec_threshold
;
119 pa_usec_t watermark_dec_not_before
;
120 pa_usec_t min_latency_ref
;
122 char *device_name
; /* name of the PCM device */
123 char *control_device
; /* name of the control device */
125 pa_bool_t use_mmap
:1, use_tsched
:1, deferred_volume
:1, fixed_latency_range
:1;
129 pa_rtpoll_item
*alsa_rtpoll_item
;
131 pa_smoother
*smoother
;
133 pa_usec_t smoother_interval
;
134 pa_usec_t last_smoother_update
;
136 pa_reserve_wrapper
*reserve
;
137 pa_hook_slot
*reserve_slot
;
138 pa_reserve_monitor_wrapper
*monitor
;
139 pa_hook_slot
*monitor_slot
;
142 pa_alsa_ucm_mapping_context
*ucm_context
;
145 static void userdata_free(struct userdata
*u
);
147 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
151 if (pa_source_suspend(u
->source
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
152 return PA_HOOK_CANCEL
;
157 static void reserve_done(struct userdata
*u
) {
160 if (u
->reserve_slot
) {
161 pa_hook_slot_free(u
->reserve_slot
);
162 u
->reserve_slot
= NULL
;
166 pa_reserve_wrapper_unref(u
->reserve
);
171 static void reserve_update(struct userdata
*u
) {
172 const char *description
;
175 if (!u
->source
|| !u
->reserve
)
178 if ((description
= pa_proplist_gets(u
->source
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
179 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
182 static int reserve_init(struct userdata
*u
, const char *dname
) {
191 if (pa_in_system_mode())
194 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
197 /* We are resuming, try to lock the device */
198 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
206 pa_assert(!u
->reserve_slot
);
207 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
212 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
218 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
220 pa_source_suspend(u
->source
, b
, PA_SUSPEND_APPLICATION
);
224 static void monitor_done(struct userdata
*u
) {
227 if (u
->monitor_slot
) {
228 pa_hook_slot_free(u
->monitor_slot
);
229 u
->monitor_slot
= NULL
;
233 pa_reserve_monitor_wrapper_unref(u
->monitor
);
238 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
244 if (pa_in_system_mode())
247 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
250 /* We are resuming, try to lock the device */
251 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
257 pa_assert(!u
->monitor_slot
);
258 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
263 static void fix_min_sleep_wakeup(struct userdata
*u
) {
264 size_t max_use
, max_use_2
;
267 pa_assert(u
->use_tsched
);
269 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
270 max_use_2
= pa_frame_align(max_use
/2, &u
->source
->sample_spec
);
272 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->source
->sample_spec
);
273 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
275 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->source
->sample_spec
);
276 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
279 static void fix_tsched_watermark(struct userdata
*u
) {
282 pa_assert(u
->use_tsched
);
284 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
286 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
287 u
->tsched_watermark
= max_use
- u
->min_sleep
;
289 if (u
->tsched_watermark
< u
->min_wakeup
)
290 u
->tsched_watermark
= u
->min_wakeup
;
293 static void increase_watermark(struct userdata
*u
) {
294 size_t old_watermark
;
295 pa_usec_t old_min_latency
, new_min_latency
;
298 pa_assert(u
->use_tsched
);
300 /* First, just try to increase the watermark */
301 old_watermark
= u
->tsched_watermark
;
302 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_inc_step
);
303 fix_tsched_watermark(u
);
305 if (old_watermark
!= u
->tsched_watermark
) {
306 pa_log_info("Increasing wakeup watermark to %0.2f ms",
307 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->source
->sample_spec
) / PA_USEC_PER_MSEC
);
311 /* Hmm, we cannot increase the watermark any further, hence let's
312 raise the latency unless doing so was disabled in
314 if (u
->fixed_latency_range
)
317 old_min_latency
= u
->source
->thread_info
.min_latency
;
318 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_INC_STEP_USEC
);
319 new_min_latency
= PA_MIN(new_min_latency
, u
->source
->thread_info
.max_latency
);
321 if (old_min_latency
!= new_min_latency
) {
322 pa_log_info("Increasing minimal latency to %0.2f ms",
323 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
325 pa_source_set_latency_range_within_thread(u
->source
, new_min_latency
, u
->source
->thread_info
.max_latency
);
328 /* When we reach this we're officialy fucked! */
331 static void decrease_watermark(struct userdata
*u
) {
332 size_t old_watermark
;
336 pa_assert(u
->use_tsched
);
338 now
= pa_rtclock_now();
340 if (u
->watermark_dec_not_before
<= 0)
343 if (u
->watermark_dec_not_before
> now
)
346 old_watermark
= u
->tsched_watermark
;
348 if (u
->tsched_watermark
< u
->watermark_dec_step
)
349 u
->tsched_watermark
= u
->tsched_watermark
/ 2;
351 u
->tsched_watermark
= PA_MAX(u
->tsched_watermark
/ 2, u
->tsched_watermark
- u
->watermark_dec_step
);
353 fix_tsched_watermark(u
);
355 if (old_watermark
!= u
->tsched_watermark
)
356 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
357 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->source
->sample_spec
) / PA_USEC_PER_MSEC
);
359 /* We don't change the latency range*/
362 u
->watermark_dec_not_before
= now
+ TSCHED_WATERMARK_VERIFY_AFTER_USEC
;
365 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
368 pa_assert(sleep_usec
);
369 pa_assert(process_usec
);
372 pa_assert(u
->use_tsched
);
374 usec
= pa_source_get_requested_latency_within_thread(u
->source
);
376 if (usec
== (pa_usec_t
) -1)
377 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->source
->sample_spec
);
379 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->source
->sample_spec
);
384 *sleep_usec
= usec
- wm
;
388 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
389 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
390 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
391 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
395 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
400 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
402 pa_assert(err
!= -EAGAIN
);
405 pa_log_debug("%s: Buffer overrun!", call
);
407 if (err
== -ESTRPIPE
)
408 pa_log_debug("%s: System suspended!", call
);
410 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
411 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
419 static size_t check_left_to_record(struct userdata
*u
, size_t n_bytes
, pa_bool_t on_timeout
) {
420 size_t left_to_record
;
421 size_t rec_space
= u
->hwbuf_size
- u
->hwbuf_unused
;
422 pa_bool_t overrun
= FALSE
;
424 /* We use <= instead of < for this check here because an overrun
425 * only happens after the last sample was processed, not already when
426 * it is removed from the buffer. This is particularly important
427 * when block transfer is used. */
429 if (n_bytes
<= rec_space
)
430 left_to_record
= rec_space
- n_bytes
;
433 /* We got a dropout. What a mess! */
441 if (pa_log_ratelimit(PA_LOG_INFO
))
442 pa_log_info("Overrun!");
446 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record
, &u
->source
->sample_spec
) / PA_USEC_PER_MSEC
);
450 pa_bool_t reset_not_before
= TRUE
;
452 if (overrun
|| left_to_record
< u
->watermark_inc_threshold
)
453 increase_watermark(u
);
454 else if (left_to_record
> u
->watermark_dec_threshold
) {
455 reset_not_before
= FALSE
;
457 /* We decrease the watermark only if have actually
458 * been woken up by a timeout. If something else woke
459 * us up it's too easy to fulfill the deadlines... */
462 decrease_watermark(u
);
465 if (reset_not_before
)
466 u
->watermark_dec_not_before
= 0;
469 return left_to_record
;
472 static int mmap_read(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
473 pa_bool_t work_done
= FALSE
;
474 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
475 size_t left_to_record
;
479 pa_source_assert_ref(u
->source
);
482 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
488 pa_bool_t after_avail
= TRUE
;
490 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->source
->sample_spec
)) < 0)) {
492 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
498 n_bytes
= (size_t) n
* u
->frame_size
;
501 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
504 left_to_record
= check_left_to_record(u
, n_bytes
, on_timeout
);
509 pa_bytes_to_usec(left_to_record
, &u
->source
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
511 pa_log_debug("Not reading, because too early.");
516 if (PA_UNLIKELY(n_bytes
<= 0)) {
520 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
521 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
522 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
523 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
529 pa_log_debug("Not reading, because not necessary.");
537 pa_log_debug("Not filling up, because already too many iterations.");
546 pa_log_debug("Reading");
553 const snd_pcm_channel_area_t
*areas
;
554 snd_pcm_uframes_t offset
, frames
;
555 snd_pcm_sframes_t sframes
;
557 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
558 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
560 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->source
->sample_spec
)) < 0)) {
562 if (!after_avail
&& err
== -EAGAIN
)
565 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
571 /* Make sure that if these memblocks need to be copied they will fit into one slot */
572 if (frames
> pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
)
573 frames
= pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
;
575 if (!after_avail
&& frames
== 0)
578 pa_assert(frames
> 0);
581 /* Check these are multiples of 8 bit */
582 pa_assert((areas
[0].first
& 7) == 0);
583 pa_assert((areas
[0].step
& 7)== 0);
585 /* We assume a single interleaved memory buffer */
586 pa_assert((areas
[0].first
>> 3) == 0);
587 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
589 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
591 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
592 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
595 pa_source_post(u
->source
, &chunk
);
596 pa_memblock_unref_fixed(chunk
.memblock
);
598 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
600 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
608 u
->read_count
+= frames
* u
->frame_size
;
611 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
614 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
617 n_bytes
-= (size_t) frames
* u
->frame_size
;
622 *sleep_usec
= pa_bytes_to_usec(left_to_record
, &u
->source
->sample_spec
);
623 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->source
->sample_spec
);
625 if (*sleep_usec
> process_usec
)
626 *sleep_usec
-= process_usec
;
631 return work_done
? 1 : 0;
634 static int unix_read(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
635 int work_done
= FALSE
;
636 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
637 size_t left_to_record
;
641 pa_source_assert_ref(u
->source
);
644 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
650 pa_bool_t after_avail
= TRUE
;
652 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->source
->sample_spec
)) < 0)) {
654 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
660 n_bytes
= (size_t) n
* u
->frame_size
;
661 left_to_record
= check_left_to_record(u
, n_bytes
, on_timeout
);
666 pa_bytes_to_usec(left_to_record
, &u
->source
->sample_spec
) > process_usec
+max_sleep_usec
/2)
669 if (PA_UNLIKELY(n_bytes
<= 0)) {
673 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
674 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
675 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
676 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
686 pa_log_debug("Not filling up, because already too many iterations.");
696 snd_pcm_sframes_t frames
;
699 chunk
.memblock
= pa_memblock_new(u
->core
->mempool
, (size_t) -1);
701 frames
= (snd_pcm_sframes_t
) (pa_memblock_get_length(chunk
.memblock
) / u
->frame_size
);
703 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
704 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
706 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
708 p
= pa_memblock_acquire(chunk
.memblock
);
709 frames
= snd_pcm_readi(u
->pcm_handle
, (uint8_t*) p
, (snd_pcm_uframes_t
) frames
);
710 pa_memblock_release(chunk
.memblock
);
712 if (PA_UNLIKELY(frames
< 0)) {
713 pa_memblock_unref(chunk
.memblock
);
715 if (!after_avail
&& (int) frames
== -EAGAIN
)
718 if ((r
= try_recover(u
, "snd_pcm_readi", (int) frames
)) == 0)
724 if (!after_avail
&& frames
== 0) {
725 pa_memblock_unref(chunk
.memblock
);
729 pa_assert(frames
> 0);
733 chunk
.length
= (size_t) frames
* u
->frame_size
;
735 pa_source_post(u
->source
, &chunk
);
736 pa_memblock_unref(chunk
.memblock
);
740 u
->read_count
+= frames
* u
->frame_size
;
742 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
744 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
747 n_bytes
-= (size_t) frames
* u
->frame_size
;
752 *sleep_usec
= pa_bytes_to_usec(left_to_record
, &u
->source
->sample_spec
);
753 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->source
->sample_spec
);
755 if (*sleep_usec
> process_usec
)
756 *sleep_usec
-= process_usec
;
761 return work_done
? 1 : 0;
764 static void update_smoother(struct userdata
*u
) {
765 snd_pcm_sframes_t delay
= 0;
768 pa_usec_t now1
= 0, now2
;
769 snd_pcm_status_t
*status
;
771 snd_pcm_status_alloca(&status
);
774 pa_assert(u
->pcm_handle
);
776 /* Let's update the time smoother */
778 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->source
->sample_spec
, TRUE
)) < 0)) {
779 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err
));
783 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
784 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
786 snd_htimestamp_t htstamp
= { 0, 0 };
787 snd_pcm_status_get_htstamp(status
, &htstamp
);
788 now1
= pa_timespec_load(&htstamp
);
791 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
793 now1
= pa_rtclock_now();
795 /* check if the time since the last update is bigger than the interval */
796 if (u
->last_smoother_update
> 0)
797 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
800 position
= u
->read_count
+ ((uint64_t) delay
* (uint64_t) u
->frame_size
);
801 now2
= pa_bytes_to_usec(position
, &u
->source
->sample_spec
);
803 pa_smoother_put(u
->smoother
, now1
, now2
);
805 u
->last_smoother_update
= now1
;
806 /* exponentially increase the update interval up to the MAX limit */
807 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
810 static pa_usec_t
source_get_latency(struct userdata
*u
) {
812 pa_usec_t now1
, now2
;
816 now1
= pa_rtclock_now();
817 now2
= pa_smoother_get(u
->smoother
, now1
);
819 delay
= (int64_t) now2
- (int64_t) pa_bytes_to_usec(u
->read_count
, &u
->source
->sample_spec
);
821 return delay
>= 0 ? (pa_usec_t
) delay
: 0;
824 static int build_pollfd(struct userdata
*u
) {
826 pa_assert(u
->pcm_handle
);
828 if (u
->alsa_rtpoll_item
)
829 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
831 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
837 /* Called from IO context */
838 static int suspend(struct userdata
*u
) {
839 const char *mod_name
;
842 pa_assert(u
->pcm_handle
);
844 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
847 snd_pcm_close(u
->pcm_handle
);
848 u
->pcm_handle
= NULL
;
850 if ((mod_name
= pa_proplist_gets(u
->source
->proplist
, PA_ALSA_PROP_UCM_MODIFIER
))) {
851 pa_log_info("Disable ucm modifier %s", mod_name
);
853 if (snd_use_case_set(u
->ucm_context
->ucm
->ucm_mgr
, "_dismod", mod_name
) < 0)
854 pa_log("Failed to disable ucm modifier %s", mod_name
);
857 if (u
->alsa_rtpoll_item
) {
858 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
859 u
->alsa_rtpoll_item
= NULL
;
862 pa_log_info("Device suspended...");
867 /* Called from IO context */
868 static int update_sw_params(struct userdata
*u
) {
869 snd_pcm_uframes_t avail_min
;
874 /* Use the full buffer if no one asked us for anything specific */
880 if ((latency
= pa_source_get_requested_latency_within_thread(u
->source
)) != (pa_usec_t
) -1) {
883 pa_log_debug("latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
885 b
= pa_usec_to_bytes(latency
, &u
->source
->sample_spec
);
887 /* We need at least one sample in our buffer */
889 if (PA_UNLIKELY(b
< u
->frame_size
))
892 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
895 fix_min_sleep_wakeup(u
);
896 fix_tsched_watermark(u
);
899 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
904 pa_usec_t sleep_usec
, process_usec
;
906 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
907 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->source
->sample_spec
) / u
->frame_size
;
910 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
912 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
, !u
->use_tsched
)) < 0) {
913 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
920 /* Called from IO Context on unsuspend or from main thread when creating source */
921 static void reset_watermark(struct userdata
*u
, size_t tsched_watermark
, pa_sample_spec
*ss
,
924 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, ss
),
925 &u
->source
->sample_spec
);
927 u
->watermark_inc_step
= pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC
, &u
->source
->sample_spec
);
928 u
->watermark_dec_step
= pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC
, &u
->source
->sample_spec
);
930 u
->watermark_inc_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC
, &u
->source
->sample_spec
);
931 u
->watermark_dec_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC
, &u
->source
->sample_spec
);
933 fix_min_sleep_wakeup(u
);
934 fix_tsched_watermark(u
);
937 pa_source_set_latency_range_within_thread(u
->source
,
939 pa_bytes_to_usec(u
->hwbuf_size
, ss
));
941 pa_source_set_latency_range(u
->source
,
943 pa_bytes_to_usec(u
->hwbuf_size
, ss
));
945 /* work-around assert in pa_source_set_latency_within_thead,
946 keep track of min_latency and reuse it when
947 this routine is called from IO context */
948 u
->min_latency_ref
= u
->source
->thread_info
.min_latency
;
951 pa_log_info("Time scheduling watermark is %0.2fms",
952 (double) pa_bytes_to_usec(u
->tsched_watermark
, ss
) / PA_USEC_PER_MSEC
);
955 /* Called from IO context */
956 static int unsuspend(struct userdata
*u
) {
960 snd_pcm_uframes_t period_size
, buffer_size
;
961 const char *mod_name
;
964 pa_assert(!u
->pcm_handle
);
966 pa_log_info("Trying resume...");
968 if ((mod_name
= pa_proplist_gets(u
->source
->proplist
, PA_ALSA_PROP_UCM_MODIFIER
))) {
969 pa_log_info("Enable ucm modifier %s", mod_name
);
971 if (snd_use_case_set(u
->ucm_context
->ucm
->ucm_mgr
, "_enamod", mod_name
) < 0)
972 pa_log("Failed to enable ucm modifier %s", mod_name
);
975 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_CAPTURE
,
977 SND_PCM_NO_AUTO_RESAMPLE
|
978 SND_PCM_NO_AUTO_CHANNELS
|
979 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
980 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
984 ss
= u
->source
->sample_spec
;
985 period_size
= u
->fragment_size
/ u
->frame_size
;
986 buffer_size
= u
->hwbuf_size
/ u
->frame_size
;
990 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &period_size
, &buffer_size
, 0, &b
, &d
, TRUE
)) < 0) {
991 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
995 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
996 pa_log_warn("Resume failed, couldn't get original access mode.");
1000 if (!pa_sample_spec_equal(&ss
, &u
->source
->sample_spec
)) {
1001 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1005 if (period_size
*u
->frame_size
!= u
->fragment_size
||
1006 buffer_size
*u
->frame_size
!= u
->hwbuf_size
) {
1007 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1008 (unsigned long) u
->hwbuf_size
, (unsigned long) u
->fragment_size
,
1009 (unsigned long) (buffer_size
*u
->frame_size
), (unsigned long) (period_size
*u
->frame_size
));
1013 if (update_sw_params(u
) < 0)
1016 if (build_pollfd(u
) < 0)
1019 /* FIXME: We need to reload the volume somehow */
1022 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
1023 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1024 u
->last_smoother_update
= 0;
1028 /* reset the watermark to the value defined when source was created */
1030 reset_watermark(u
, u
->tsched_watermark_ref
, &u
->source
->sample_spec
, TRUE
);
1032 pa_log_info("Resumed successfully...");
1037 if (u
->pcm_handle
) {
1038 snd_pcm_close(u
->pcm_handle
);
1039 u
->pcm_handle
= NULL
;
1045 /* Called from IO context */
1046 static int source_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
1047 struct userdata
*u
= PA_SOURCE(o
)->userdata
;
1051 case PA_SOURCE_MESSAGE_GET_LATENCY
: {
1055 r
= source_get_latency(u
);
1057 *((pa_usec_t
*) data
) = r
;
1062 case PA_SOURCE_MESSAGE_SET_STATE
:
1064 switch ((pa_source_state_t
) PA_PTR_TO_UINT(data
)) {
1066 case PA_SOURCE_SUSPENDED
: {
1069 pa_assert(PA_SOURCE_IS_OPENED(u
->source
->thread_info
.state
));
1071 if ((r
= suspend(u
)) < 0)
1077 case PA_SOURCE_IDLE
:
1078 case PA_SOURCE_RUNNING
: {
1081 if (u
->source
->thread_info
.state
== PA_SOURCE_INIT
) {
1082 if (build_pollfd(u
) < 0)
1086 if (u
->source
->thread_info
.state
== PA_SOURCE_SUSPENDED
) {
1087 if ((r
= unsuspend(u
)) < 0)
1094 case PA_SOURCE_UNLINKED
:
1095 case PA_SOURCE_INIT
:
1096 case PA_SOURCE_INVALID_STATE
:
1103 return pa_source_process_msg(o
, code
, data
, offset
, chunk
);
1106 /* Called from main context */
1107 static int source_set_state_cb(pa_source
*s
, pa_source_state_t new_state
) {
1108 pa_source_state_t old_state
;
1111 pa_source_assert_ref(s
);
1112 pa_assert_se(u
= s
->userdata
);
1114 old_state
= pa_source_get_state(u
->source
);
1116 if (PA_SOURCE_IS_OPENED(old_state
) && new_state
== PA_SOURCE_SUSPENDED
)
1118 else if (old_state
== PA_SOURCE_SUSPENDED
&& PA_SOURCE_IS_OPENED(new_state
))
1119 if (reserve_init(u
, u
->device_name
) < 0)
1120 return -PA_ERR_BUSY
;
1125 static int ctl_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1126 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1129 pa_assert(u
->mixer_handle
);
1131 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1134 if (!PA_SOURCE_IS_LINKED(u
->source
->state
))
1137 if (u
->source
->suspend_cause
& PA_SUSPEND_SESSION
) {
1138 pa_source_set_mixer_dirty(u
->source
, TRUE
);
1142 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1143 pa_source_get_volume(u
->source
, TRUE
);
1144 pa_source_get_mute(u
->source
, TRUE
);
1150 static int io_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1151 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1154 pa_assert(u
->mixer_handle
);
1156 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1159 if (u
->source
->suspend_cause
& PA_SUSPEND_SESSION
) {
1160 pa_source_set_mixer_dirty(u
->source
, TRUE
);
1164 if (mask
& SND_CTL_EVENT_MASK_VALUE
)
1165 pa_source_update_volume_and_mute(u
->source
);
1170 static void source_get_volume_cb(pa_source
*s
) {
1171 struct userdata
*u
= s
->userdata
;
1173 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1176 pa_assert(u
->mixer_path
);
1177 pa_assert(u
->mixer_handle
);
1179 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1182 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1183 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1185 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1187 if (u
->mixer_path
->has_dB
) {
1188 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1190 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &r
));
1193 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1196 s
->real_volume
= u
->hardware_volume
= r
;
1198 /* Hmm, so the hardware volume changed, let's reset our software volume */
1199 if (u
->mixer_path
->has_dB
)
1200 pa_source_set_soft_volume(s
, NULL
);
1203 static void source_set_volume_cb(pa_source
*s
) {
1204 struct userdata
*u
= s
->userdata
;
1206 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1207 pa_bool_t deferred_volume
= !!(s
->flags
& PA_SOURCE_DEFERRED_VOLUME
);
1210 pa_assert(u
->mixer_path
);
1211 pa_assert(u
->mixer_handle
);
1213 /* Shift up by the base volume */
1214 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1216 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
, deferred_volume
, !deferred_volume
) < 0)
1219 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1220 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1222 u
->hardware_volume
= r
;
1224 if (u
->mixer_path
->has_dB
) {
1225 pa_cvolume new_soft_volume
;
1226 pa_bool_t accurate_enough
;
1227 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1229 /* Match exactly what the user requested by software */
1230 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1232 /* If the adjustment to do in software is only minimal we
1233 * can skip it. That saves us CPU at the expense of a bit of
1236 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1237 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1239 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &s
->real_volume
));
1240 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &s
->real_volume
));
1241 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &u
->hardware_volume
));
1242 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &u
->hardware_volume
));
1243 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1244 pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &new_soft_volume
),
1245 pa_yes_no(accurate_enough
));
1246 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &new_soft_volume
));
1248 if (!accurate_enough
)
1249 s
->soft_volume
= new_soft_volume
;
1252 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1254 /* We can't match exactly what the user requested, hence let's
1255 * at least tell the user about it */
1261 static void source_write_volume_cb(pa_source
*s
) {
1262 struct userdata
*u
= s
->userdata
;
1263 pa_cvolume hw_vol
= s
->thread_info
.current_hw_volume
;
1266 pa_assert(u
->mixer_path
);
1267 pa_assert(u
->mixer_handle
);
1268 pa_assert(s
->flags
& PA_SOURCE_DEFERRED_VOLUME
);
1270 /* Shift up by the base volume */
1271 pa_sw_cvolume_divide_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1273 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &hw_vol
, TRUE
, TRUE
) < 0)
1274 pa_log_error("Writing HW volume failed");
1277 pa_bool_t accurate_enough
;
1279 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1280 pa_sw_cvolume_multiply_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1282 pa_sw_cvolume_divide(&tmp_vol
, &hw_vol
, &s
->thread_info
.current_hw_volume
);
1284 (pa_cvolume_min(&tmp_vol
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1285 (pa_cvolume_max(&tmp_vol
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1287 if (!accurate_enough
) {
1289 char db
[2][PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1290 char pcnt
[2][PA_CVOLUME_SNPRINT_MAX
];
1293 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1294 pa_cvolume_snprint(vol
.pcnt
[0], sizeof(vol
.pcnt
[0]), &s
->thread_info
.current_hw_volume
),
1295 pa_cvolume_snprint(vol
.pcnt
[1], sizeof(vol
.pcnt
[1]), &hw_vol
));
1296 pa_log_debug(" in dB: %s (request) != %s",
1297 pa_sw_cvolume_snprint_dB(vol
.db
[0], sizeof(vol
.db
[0]), &s
->thread_info
.current_hw_volume
),
1298 pa_sw_cvolume_snprint_dB(vol
.db
[1], sizeof(vol
.db
[1]), &hw_vol
));
1303 static void source_get_mute_cb(pa_source
*s
) {
1304 struct userdata
*u
= s
->userdata
;
1308 pa_assert(u
->mixer_path
);
1309 pa_assert(u
->mixer_handle
);
1311 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1317 static void source_set_mute_cb(pa_source
*s
) {
1318 struct userdata
*u
= s
->userdata
;
1321 pa_assert(u
->mixer_path
);
1322 pa_assert(u
->mixer_handle
);
1324 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1327 static void mixer_volume_init(struct userdata
*u
) {
1330 if (!u
->mixer_path
->has_volume
) {
1331 pa_source_set_write_volume_callback(u
->source
, NULL
);
1332 pa_source_set_get_volume_callback(u
->source
, NULL
);
1333 pa_source_set_set_volume_callback(u
->source
, NULL
);
1335 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1337 pa_source_set_get_volume_callback(u
->source
, source_get_volume_cb
);
1338 pa_source_set_set_volume_callback(u
->source
, source_set_volume_cb
);
1340 if (u
->mixer_path
->has_dB
&& u
->deferred_volume
) {
1341 pa_source_set_write_volume_callback(u
->source
, source_write_volume_cb
);
1342 pa_log_info("Successfully enabled deferred volume.");
1344 pa_source_set_write_volume_callback(u
->source
, NULL
);
1346 if (u
->mixer_path
->has_dB
) {
1347 pa_source_enable_decibel_volume(u
->source
, TRUE
);
1348 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1350 u
->source
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1351 u
->source
->n_volume_steps
= PA_VOLUME_NORM
+1;
1353 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->source
->base_volume
));
1355 pa_source_enable_decibel_volume(u
->source
, FALSE
);
1356 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1358 u
->source
->base_volume
= PA_VOLUME_NORM
;
1359 u
->source
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1362 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1365 if (!u
->mixer_path
->has_mute
) {
1366 pa_source_set_get_mute_callback(u
->source
, NULL
);
1367 pa_source_set_set_mute_callback(u
->source
, NULL
);
1368 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1370 pa_source_set_get_mute_callback(u
->source
, source_get_mute_cb
);
1371 pa_source_set_set_mute_callback(u
->source
, source_set_mute_cb
);
1372 pa_log_info("Using hardware mute control.");
1376 static int source_set_port_ucm_cb(pa_source
*s
, pa_device_port
*p
) {
1377 struct userdata
*u
= s
->userdata
;
1381 pa_assert(u
->ucm_context
);
1383 return pa_alsa_ucm_set_port(u
->ucm_context
, p
, FALSE
);
1386 static int source_set_port_cb(pa_source
*s
, pa_device_port
*p
) {
1387 struct userdata
*u
= s
->userdata
;
1388 pa_alsa_port_data
*data
;
1392 pa_assert(u
->mixer_handle
);
1394 data
= PA_DEVICE_PORT_DATA(p
);
1396 pa_assert_se(u
->mixer_path
= data
->path
);
1397 pa_alsa_path_select(u
->mixer_path
, data
->setting
, u
->mixer_handle
, s
->muted
);
1399 mixer_volume_init(u
);
1403 if (s
->flags
& PA_SOURCE_DEFERRED_VOLUME
) {
1404 if (s
->write_volume
)
1414 static void source_update_requested_latency_cb(pa_source
*s
) {
1415 struct userdata
*u
= s
->userdata
;
1417 pa_assert(u
->use_tsched
); /* only when timer scheduling is used
1418 * we can dynamically adjust the
1424 update_sw_params(u
);
1427 static pa_bool_t
source_update_rate_cb(pa_source
*s
, uint32_t rate
)
1429 struct userdata
*u
= s
->userdata
;
1431 pa_bool_t supported
= FALSE
;
1435 for (i
= 0; u
->rates
[i
]; i
++) {
1436 if (u
->rates
[i
] == rate
) {
1443 pa_log_info("Sink does not support sample rate of %d Hz", rate
);
1447 if (!PA_SOURCE_IS_OPENED(s
->state
)) {
1448 pa_log_info("Updating rate for device %s, new rate is %d", u
->device_name
, rate
);
1449 u
->source
->sample_spec
.rate
= rate
;
1456 static void thread_func(void *userdata
) {
1457 struct userdata
*u
= userdata
;
1458 unsigned short revents
= 0;
1462 pa_log_debug("Thread starting up");
1464 if (u
->core
->realtime_scheduling
)
1465 pa_make_realtime(u
->core
->realtime_priority
);
1467 pa_thread_mq_install(&u
->thread_mq
);
1471 pa_usec_t rtpoll_sleep
= 0;
1474 pa_log_debug("Loop");
1477 /* Read some data and pass it to the sources */
1478 if (PA_SOURCE_IS_OPENED(u
->source
->thread_info
.state
)) {
1480 pa_usec_t sleep_usec
= 0;
1481 pa_bool_t on_timeout
= pa_rtpoll_timer_elapsed(u
->rtpoll
);
1484 pa_log_info("Starting capture.");
1485 snd_pcm_start(u
->pcm_handle
);
1487 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1493 work_done
= mmap_read(u
, &sleep_usec
, revents
& POLLIN
, on_timeout
);
1495 work_done
= unix_read(u
, &sleep_usec
, revents
& POLLIN
, on_timeout
);
1500 /* pa_log_debug("work_done = %i", work_done); */
1505 if (u
->use_tsched
) {
1508 /* OK, the capture buffer is now empty, let's
1509 * calculate when to wake up next */
1511 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1513 /* Convert from the sound card time domain to the
1514 * system time domain */
1515 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1517 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1519 /* We don't trust the conversion, so we wake up whatever comes first */
1520 rtpoll_sleep
= PA_MIN(sleep_usec
, cusec
);
1524 if (u
->source
->flags
& PA_SOURCE_DEFERRED_VOLUME
) {
1525 pa_usec_t volume_sleep
;
1526 pa_source_volume_change_apply(u
->source
, &volume_sleep
);
1527 if (volume_sleep
> 0) {
1528 if (rtpoll_sleep
> 0)
1529 rtpoll_sleep
= PA_MIN(volume_sleep
, rtpoll_sleep
);
1531 rtpoll_sleep
= volume_sleep
;
1535 if (rtpoll_sleep
> 0)
1536 pa_rtpoll_set_timer_relative(u
->rtpoll
, rtpoll_sleep
);
1538 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1540 /* Hmm, nothing to do. Let's sleep */
1541 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1544 if (u
->source
->flags
& PA_SOURCE_DEFERRED_VOLUME
)
1545 pa_source_volume_change_apply(u
->source
, NULL
);
1550 /* Tell ALSA about this and process its response */
1551 if (PA_SOURCE_IS_OPENED(u
->source
->thread_info
.state
)) {
1552 struct pollfd
*pollfd
;
1556 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1558 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1559 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1563 if (revents
& ~POLLIN
) {
1564 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1569 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit(PA_LOG_DEBUG
))
1570 pa_log_debug("Wakeup from ALSA!");
1577 /* If this was no regular exit from the loop we have to continue
1578 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1579 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1580 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1583 pa_log_debug("Thread shutting down");
1586 static void set_source_name(pa_source_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1592 pa_assert(device_name
);
1594 if ((n
= pa_modargs_get_value(ma
, "source_name", NULL
))) {
1595 pa_source_new_data_set_name(data
, n
);
1596 data
->namereg_fail
= TRUE
;
1600 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1601 data
->namereg_fail
= TRUE
;
1603 n
= device_id
? device_id
: device_name
;
1604 data
->namereg_fail
= FALSE
;
1608 t
= pa_sprintf_malloc("alsa_input.%s.%s", n
, mapping
->name
);
1610 t
= pa_sprintf_malloc("alsa_input.%s", n
);
1612 pa_source_new_data_set_name(data
, t
);
1616 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1619 if (!mapping
&& !element
)
1622 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
, &hctl
))) {
1623 pa_log_info("Failed to find a working mixer device.");
1629 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_INPUT
)))
1632 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, hctl
, ignore_dB
) < 0)
1635 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1636 pa_alsa_path_dump(u
->mixer_path
);
1637 } else if (!(u
->mixer_path_set
= mapping
->input_path_set
))
1644 if (u
->mixer_path
) {
1645 pa_alsa_path_free(u
->mixer_path
);
1646 u
->mixer_path
= NULL
;
1649 if (u
->mixer_handle
) {
1650 snd_mixer_close(u
->mixer_handle
);
1651 u
->mixer_handle
= NULL
;
1655 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1656 pa_bool_t need_mixer_callback
= FALSE
;
1660 if (!u
->mixer_handle
)
1663 if (u
->source
->active_port
) {
1664 pa_alsa_port_data
*data
;
1666 /* We have a list of supported paths, so let's activate the
1667 * one that has been chosen as active */
1669 data
= PA_DEVICE_PORT_DATA(u
->source
->active_port
);
1670 u
->mixer_path
= data
->path
;
1672 pa_alsa_path_select(data
->path
, data
->setting
, u
->mixer_handle
, u
->source
->muted
);
1676 if (!u
->mixer_path
&& u
->mixer_path_set
)
1677 u
->mixer_path
= pa_hashmap_first(u
->mixer_path_set
->paths
);
1679 if (u
->mixer_path
) {
1680 /* Hmm, we have only a single path, then let's activate it */
1682 pa_alsa_path_select(u
->mixer_path
, u
->mixer_path
->settings
, u
->mixer_handle
, u
->source
->muted
);
1687 mixer_volume_init(u
);
1689 /* Will we need to register callbacks? */
1690 if (u
->mixer_path_set
&& u
->mixer_path_set
->paths
) {
1694 PA_HASHMAP_FOREACH(p
, u
->mixer_path_set
->paths
, state
) {
1695 if (p
->has_volume
|| p
->has_mute
)
1696 need_mixer_callback
= TRUE
;
1699 else if (u
->mixer_path
)
1700 need_mixer_callback
= u
->mixer_path
->has_volume
|| u
->mixer_path
->has_mute
;
1702 if (need_mixer_callback
) {
1703 int (*mixer_callback
)(snd_mixer_elem_t
*, unsigned int);
1704 if (u
->source
->flags
& PA_SOURCE_DEFERRED_VOLUME
) {
1705 u
->mixer_pd
= pa_alsa_mixer_pdata_new();
1706 mixer_callback
= io_mixer_callback
;
1708 if (pa_alsa_set_mixer_rtpoll(u
->mixer_pd
, u
->mixer_handle
, u
->rtpoll
) < 0) {
1709 pa_log("Failed to initialize file descriptor monitoring");
1713 u
->mixer_fdl
= pa_alsa_fdlist_new();
1714 mixer_callback
= ctl_mixer_callback
;
1716 if (pa_alsa_fdlist_set_handle(u
->mixer_fdl
, u
->mixer_handle
, NULL
, u
->core
->mainloop
) < 0) {
1717 pa_log("Failed to initialize file descriptor monitoring");
1722 if (u
->mixer_path_set
)
1723 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1725 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1731 pa_source
*pa_alsa_source_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1733 struct userdata
*u
= NULL
;
1734 const char *dev_id
= NULL
, *key
, *mod_name
;
1736 uint32_t alternate_sample_rate
;
1738 uint32_t nfrags
, frag_size
, buffer_size
, tsched_size
, tsched_watermark
;
1739 snd_pcm_uframes_t period_frames
, buffer_frames
, tsched_frames
;
1741 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
, namereg_fail
= FALSE
, deferred_volume
= FALSE
, fixed_latency_range
= FALSE
;
1742 pa_source_new_data data
;
1743 pa_alsa_profile_set
*profile_set
= NULL
;
1749 ss
= m
->core
->default_sample_spec
;
1750 map
= m
->core
->default_channel_map
;
1751 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1752 pa_log("Failed to parse sample specification and channel map");
1756 alternate_sample_rate
= m
->core
->alternate_sample_rate
;
1757 if (pa_modargs_get_alternate_sample_rate(ma
, &alternate_sample_rate
) < 0) {
1758 pa_log("Failed to parse alternate sample rate");
1762 frame_size
= pa_frame_size(&ss
);
1764 nfrags
= m
->core
->default_n_fragments
;
1765 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1767 frag_size
= (uint32_t) frame_size
;
1768 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1769 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1771 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1772 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1773 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1774 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1775 pa_log("Failed to parse buffer metrics");
1779 buffer_size
= nfrags
* frag_size
;
1781 period_frames
= frag_size
/frame_size
;
1782 buffer_frames
= buffer_size
/frame_size
;
1783 tsched_frames
= tsched_size
/frame_size
;
1785 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1786 pa_log("Failed to parse mmap argument.");
1790 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1791 pa_log("Failed to parse tsched argument.");
1795 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1796 pa_log("Failed to parse ignore_dB argument.");
1800 deferred_volume
= m
->core
->deferred_volume
;
1801 if (pa_modargs_get_value_boolean(ma
, "deferred_volume", &deferred_volume
) < 0) {
1802 pa_log("Failed to parse deferred_volume argument.");
1806 if (pa_modargs_get_value_boolean(ma
, "fixed_latency_range", &fixed_latency_range
) < 0) {
1807 pa_log("Failed to parse fixed_latency_range argument.");
1811 use_tsched
= pa_alsa_may_tsched(use_tsched
);
1813 u
= pa_xnew0(struct userdata
, 1);
1816 u
->use_mmap
= use_mmap
;
1817 u
->use_tsched
= use_tsched
;
1818 u
->deferred_volume
= deferred_volume
;
1819 u
->fixed_latency_range
= fixed_latency_range
;
1821 u
->rtpoll
= pa_rtpoll_new();
1822 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1824 u
->smoother
= pa_smoother_new(
1825 SMOOTHER_ADJUST_USEC
,
1826 SMOOTHER_WINDOW_USEC
,
1832 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1835 if (mapping
&& mapping
->ucm_context
.ucm
)
1836 u
->ucm_context
= &mapping
->ucm_context
;
1838 dev_id
= pa_modargs_get_value(
1840 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
1842 u
->paths_dir
= pa_xstrdup(pa_modargs_get_value(ma
, "paths_dir", NULL
));
1844 if (reserve_init(u
, dev_id
) < 0)
1847 if (reserve_monitor_init(u
, dev_id
) < 0)
1855 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1856 pa_log("device_id= not set");
1860 if ((mod_name
= pa_proplist_gets(mapping
->proplist
, PA_ALSA_PROP_UCM_MODIFIER
))) {
1861 if (snd_use_case_set(u
->ucm_context
->ucm
->ucm_mgr
, "_enamod", mod_name
) < 0)
1862 pa_log("Failed to enable ucm modifier %s", mod_name
);
1864 pa_log_debug("Enabled ucm modifier %s", mod_name
);
1867 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
1871 SND_PCM_STREAM_CAPTURE
,
1872 &period_frames
, &buffer_frames
, tsched_frames
,
1876 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1878 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
1881 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
1885 SND_PCM_STREAM_CAPTURE
,
1886 &period_frames
, &buffer_frames
, tsched_frames
,
1887 &b
, &d
, profile_set
, &mapping
)))
1892 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1893 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1896 SND_PCM_STREAM_CAPTURE
,
1897 &period_frames
, &buffer_frames
, tsched_frames
,
1902 pa_assert(u
->device_name
);
1903 pa_log_info("Successfully opened device %s.", u
->device_name
);
1905 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
1906 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
1911 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
1913 if (use_mmap
&& !b
) {
1914 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1915 u
->use_mmap
= use_mmap
= FALSE
;
1918 if (use_tsched
&& (!b
|| !d
)) {
1919 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1920 u
->use_tsched
= use_tsched
= FALSE
;
1924 pa_log_info("Successfully enabled mmap() mode.");
1926 if (u
->use_tsched
) {
1927 pa_log_info("Successfully enabled timer-based scheduling mode.");
1928 if (u
->fixed_latency_range
)
1929 pa_log_info("Disabling latency range changes on overrun");
1932 u
->rates
= pa_alsa_get_supported_rates(u
->pcm_handle
);
1934 pa_log_error("Failed to find any supported sample rates.");
1938 /* ALSA might tweak the sample spec, so recalculate the frame size */
1939 frame_size
= pa_frame_size(&ss
);
1941 if (!u
->ucm_context
)
1942 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
1944 pa_source_new_data_init(&data
);
1945 data
.driver
= driver
;
1948 set_source_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
1950 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1951 * variable instead of using &data.namereg_fail directly, because
1952 * data.namereg_fail is a bitfield and taking the address of a bitfield
1953 * variable is impossible. */
1954 namereg_fail
= data
.namereg_fail
;
1955 if (pa_modargs_get_value_boolean(ma
, "namereg_fail", &namereg_fail
) < 0) {
1956 pa_log("Failed to parse namereg_fail argument.");
1957 pa_source_new_data_done(&data
);
1960 data
.namereg_fail
= namereg_fail
;
1962 pa_source_new_data_set_sample_spec(&data
, &ss
);
1963 pa_source_new_data_set_channel_map(&data
, &map
);
1964 pa_source_new_data_set_alternate_sample_rate(&data
, alternate_sample_rate
);
1966 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
1967 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1968 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (buffer_frames
* frame_size
));
1969 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1970 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1973 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
1974 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
1976 while ((key
= pa_proplist_iterate(mapping
->proplist
, &state
)))
1977 pa_proplist_sets(data
.proplist
, key
, pa_proplist_gets(mapping
->proplist
, key
));
1980 pa_alsa_init_description(data
.proplist
);
1982 if (u
->control_device
)
1983 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
1985 if (pa_modargs_get_proplist(ma
, "source_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
1986 pa_log("Invalid properties");
1987 pa_source_new_data_done(&data
);
1992 pa_alsa_ucm_add_ports(&data
.ports
, data
.proplist
, u
->ucm_context
, FALSE
, card
);
1993 else if (u
->mixer_path_set
)
1994 pa_alsa_add_ports(&data
, u
->mixer_path_set
, card
);
1996 u
->source
= pa_source_new(m
->core
, &data
, PA_SOURCE_HARDWARE
|PA_SOURCE_LATENCY
|(u
->use_tsched
? PA_SOURCE_DYNAMIC_LATENCY
: 0));
1997 pa_source_new_data_done(&data
);
2000 pa_log("Failed to create source object");
2004 if (pa_modargs_get_value_u32(ma
, "deferred_volume_safety_margin",
2005 &u
->source
->thread_info
.volume_change_safety_margin
) < 0) {
2006 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2010 if (pa_modargs_get_value_s32(ma
, "deferred_volume_extra_delay",
2011 &u
->source
->thread_info
.volume_change_extra_delay
) < 0) {
2012 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2016 u
->source
->parent
.process_msg
= source_process_msg
;
2018 u
->source
->update_requested_latency
= source_update_requested_latency_cb
;
2019 u
->source
->set_state
= source_set_state_cb
;
2021 u
->source
->set_port
= source_set_port_ucm_cb
;
2023 u
->source
->set_port
= source_set_port_cb
;
2024 if (u
->source
->alternate_sample_rate
)
2025 u
->source
->update_rate
= source_update_rate_cb
;
2026 u
->source
->userdata
= u
;
2028 pa_source_set_asyncmsgq(u
->source
, u
->thread_mq
.inq
);
2029 pa_source_set_rtpoll(u
->source
, u
->rtpoll
);
2031 u
->frame_size
= frame_size
;
2032 u
->fragment_size
= frag_size
= (size_t) (period_frames
* frame_size
);
2033 u
->hwbuf_size
= buffer_size
= (size_t) (buffer_frames
* frame_size
);
2034 pa_cvolume_mute(&u
->hardware_volume
, u
->source
->sample_spec
.channels
);
2036 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2037 (double) u
->hwbuf_size
/ (double) u
->fragment_size
,
2038 (long unsigned) u
->fragment_size
,
2039 (double) pa_bytes_to_usec(u
->fragment_size
, &ss
) / PA_USEC_PER_MSEC
,
2040 (long unsigned) u
->hwbuf_size
,
2041 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
2043 if (u
->use_tsched
) {
2044 u
->tsched_watermark_ref
= tsched_watermark
;
2045 reset_watermark(u
, u
->tsched_watermark_ref
, &ss
, FALSE
);
2048 pa_source_set_fixed_latency(u
->source
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
2052 if (update_sw_params(u
) < 0)
2055 if (u
->ucm_context
) {
2056 if (u
->source
->active_port
&& pa_alsa_ucm_set_port(u
->ucm_context
, u
->source
->active_port
, FALSE
) < 0)
2058 } else if (setup_mixer(u
, ignore_dB
) < 0)
2061 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
2063 if (!(u
->thread
= pa_thread_new("alsa-source", thread_func
, u
))) {
2064 pa_log("Failed to create thread.");
2068 /* Get initial mixer settings */
2069 if (data
.volume_is_set
) {
2070 if (u
->source
->set_volume
)
2071 u
->source
->set_volume(u
->source
);
2073 if (u
->source
->get_volume
)
2074 u
->source
->get_volume(u
->source
);
2077 if (data
.muted_is_set
) {
2078 if (u
->source
->set_mute
)
2079 u
->source
->set_mute(u
->source
);
2081 if (u
->source
->get_mute
)
2082 u
->source
->get_mute(u
->source
);
2085 if ((data
.volume_is_set
|| data
.muted_is_set
) && u
->source
->write_volume
)
2086 u
->source
->write_volume(u
->source
);
2088 pa_source_put(u
->source
);
2091 pa_alsa_profile_set_free(profile_set
);
2101 pa_alsa_profile_set_free(profile_set
);
2106 static void userdata_free(struct userdata
*u
) {
2110 pa_source_unlink(u
->source
);
2113 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
2114 pa_thread_free(u
->thread
);
2117 pa_thread_mq_done(&u
->thread_mq
);
2120 pa_source_unref(u
->source
);
2123 pa_alsa_mixer_pdata_free(u
->mixer_pd
);
2125 if (u
->alsa_rtpoll_item
)
2126 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
2129 pa_rtpoll_free(u
->rtpoll
);
2131 if (u
->pcm_handle
) {
2132 snd_pcm_drop(u
->pcm_handle
);
2133 snd_pcm_close(u
->pcm_handle
);
2137 pa_alsa_fdlist_free(u
->mixer_fdl
);
2139 if (u
->mixer_path
&& !u
->mixer_path_set
)
2140 pa_alsa_path_free(u
->mixer_path
);
2142 if (u
->mixer_handle
)
2143 snd_mixer_close(u
->mixer_handle
);
2146 pa_smoother_free(u
->smoother
);
2154 pa_xfree(u
->device_name
);
2155 pa_xfree(u
->control_device
);
2156 pa_xfree(u
->paths_dir
);
2160 void pa_alsa_source_free(pa_source
*s
) {
2163 pa_source_assert_ref(s
);
2164 pa_assert_se(u
= s
->userdata
);