2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/xmalloc.h>
37 #include <pulsecore/core-error.h>
38 #include <pulsecore/core.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/time-smoother.h>
54 #include <modules/reserve-wrap.h>
56 #include "alsa-util.h"
57 #include "alsa-source.h"
59 /* #define DEBUG_TIMING */
61 #define DEFAULT_DEVICE "default"
63 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
64 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
66 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
67 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
68 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
69 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
70 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
71 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
73 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
74 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
76 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
77 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
79 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
80 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
82 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
90 pa_thread_mq thread_mq
;
93 snd_pcm_t
*pcm_handle
;
95 pa_alsa_fdlist
*mixer_fdl
;
96 pa_alsa_mixer_pdata
*mixer_pd
;
97 snd_mixer_t
*mixer_handle
;
98 pa_alsa_path_set
*mixer_path_set
;
99 pa_alsa_path
*mixer_path
;
101 pa_cvolume hardware_volume
;
113 watermark_inc_threshold
,
114 watermark_dec_threshold
;
116 pa_usec_t watermark_dec_not_before
;
118 char *device_name
; /* name of the PCM device */
119 char *control_device
; /* name of the control device */
121 pa_bool_t use_mmap
:1, use_tsched
:1;
125 pa_rtpoll_item
*alsa_rtpoll_item
;
127 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
129 pa_smoother
*smoother
;
131 pa_usec_t smoother_interval
;
132 pa_usec_t last_smoother_update
;
134 pa_reserve_wrapper
*reserve
;
135 pa_hook_slot
*reserve_slot
;
136 pa_reserve_monitor_wrapper
*monitor
;
137 pa_hook_slot
*monitor_slot
;
140 static void userdata_free(struct userdata
*u
);
142 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
146 if (pa_source_suspend(u
->source
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
147 return PA_HOOK_CANCEL
;
152 static void reserve_done(struct userdata
*u
) {
155 if (u
->reserve_slot
) {
156 pa_hook_slot_free(u
->reserve_slot
);
157 u
->reserve_slot
= NULL
;
161 pa_reserve_wrapper_unref(u
->reserve
);
166 static void reserve_update(struct userdata
*u
) {
167 const char *description
;
170 if (!u
->source
|| !u
->reserve
)
173 if ((description
= pa_proplist_gets(u
->source
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
174 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
177 static int reserve_init(struct userdata
*u
, const char *dname
) {
186 if (pa_in_system_mode())
189 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
192 /* We are resuming, try to lock the device */
193 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
201 pa_assert(!u
->reserve_slot
);
202 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
207 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
213 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
215 pa_source_suspend(u
->source
, b
, PA_SUSPEND_APPLICATION
);
219 static void monitor_done(struct userdata
*u
) {
222 if (u
->monitor_slot
) {
223 pa_hook_slot_free(u
->monitor_slot
);
224 u
->monitor_slot
= NULL
;
228 pa_reserve_monitor_wrapper_unref(u
->monitor
);
233 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
239 if (pa_in_system_mode())
242 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
245 /* We are resuming, try to lock the device */
246 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
252 pa_assert(!u
->monitor_slot
);
253 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
258 static void fix_min_sleep_wakeup(struct userdata
*u
) {
259 size_t max_use
, max_use_2
;
262 pa_assert(u
->use_tsched
);
264 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
265 max_use_2
= pa_frame_align(max_use
/2, &u
->source
->sample_spec
);
267 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->source
->sample_spec
);
268 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
270 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->source
->sample_spec
);
271 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
274 static void fix_tsched_watermark(struct userdata
*u
) {
277 pa_assert(u
->use_tsched
);
279 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
281 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
282 u
->tsched_watermark
= max_use
- u
->min_sleep
;
284 if (u
->tsched_watermark
< u
->min_wakeup
)
285 u
->tsched_watermark
= u
->min_wakeup
;
288 static void increase_watermark(struct userdata
*u
) {
289 size_t old_watermark
;
290 pa_usec_t old_min_latency
, new_min_latency
;
293 pa_assert(u
->use_tsched
);
295 /* First, just try to increase the watermark */
296 old_watermark
= u
->tsched_watermark
;
297 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_inc_step
);
298 fix_tsched_watermark(u
);
300 if (old_watermark
!= u
->tsched_watermark
) {
301 pa_log_info("Increasing wakeup watermark to %0.2f ms",
302 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->source
->sample_spec
) / PA_USEC_PER_MSEC
);
306 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
307 old_min_latency
= u
->source
->thread_info
.min_latency
;
308 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_INC_STEP_USEC
);
309 new_min_latency
= PA_MIN(new_min_latency
, u
->source
->thread_info
.max_latency
);
311 if (old_min_latency
!= new_min_latency
) {
312 pa_log_info("Increasing minimal latency to %0.2f ms",
313 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
315 pa_source_set_latency_range_within_thread(u
->source
, new_min_latency
, u
->source
->thread_info
.max_latency
);
318 /* When we reach this we're officialy fucked! */
321 static void decrease_watermark(struct userdata
*u
) {
322 size_t old_watermark
;
326 pa_assert(u
->use_tsched
);
328 now
= pa_rtclock_now();
330 if (u
->watermark_dec_not_before
<= 0)
333 if (u
->watermark_dec_not_before
> now
)
336 old_watermark
= u
->tsched_watermark
;
338 if (u
->tsched_watermark
< u
->watermark_dec_step
)
339 u
->tsched_watermark
= u
->tsched_watermark
/ 2;
341 u
->tsched_watermark
= PA_MAX(u
->tsched_watermark
/ 2, u
->tsched_watermark
- u
->watermark_dec_step
);
343 fix_tsched_watermark(u
);
345 if (old_watermark
!= u
->tsched_watermark
)
346 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
347 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->source
->sample_spec
) / PA_USEC_PER_MSEC
);
349 /* We don't change the latency range*/
352 u
->watermark_dec_not_before
= now
+ TSCHED_WATERMARK_VERIFY_AFTER_USEC
;
355 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
358 pa_assert(sleep_usec
);
359 pa_assert(process_usec
);
362 pa_assert(u
->use_tsched
);
364 usec
= pa_source_get_requested_latency_within_thread(u
->source
);
366 if (usec
== (pa_usec_t
) -1)
367 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->source
->sample_spec
);
369 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->source
->sample_spec
);
374 *sleep_usec
= usec
- wm
;
378 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
379 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
380 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
381 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
385 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
390 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
392 pa_assert(err
!= -EAGAIN
);
395 pa_log_debug("%s: Buffer overrun!", call
);
397 if (err
== -ESTRPIPE
)
398 pa_log_debug("%s: System suspended!", call
);
400 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
401 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
409 static size_t check_left_to_record(struct userdata
*u
, size_t n_bytes
, pa_bool_t on_timeout
) {
410 size_t left_to_record
;
411 size_t rec_space
= u
->hwbuf_size
- u
->hwbuf_unused
;
412 pa_bool_t overrun
= FALSE
;
414 /* We use <= instead of < for this check here because an overrun
415 * only happens after the last sample was processed, not already when
416 * it is removed from the buffer. This is particularly important
417 * when block transfer is used. */
419 if (n_bytes
<= rec_space
)
420 left_to_record
= rec_space
- n_bytes
;
423 /* We got a dropout. What a mess! */
431 if (pa_log_ratelimit(PA_LOG_INFO
))
432 pa_log_info("Overrun!");
436 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record
, &u
->source
->sample_spec
) / PA_USEC_PER_MSEC
);
440 pa_bool_t reset_not_before
= TRUE
;
442 if (overrun
|| left_to_record
< u
->watermark_inc_threshold
)
443 increase_watermark(u
);
444 else if (left_to_record
> u
->watermark_dec_threshold
) {
445 reset_not_before
= FALSE
;
447 /* We decrease the watermark only if have actually
448 * been woken up by a timeout. If something else woke
449 * us up it's too easy to fulfill the deadlines... */
452 decrease_watermark(u
);
455 if (reset_not_before
)
456 u
->watermark_dec_not_before
= 0;
459 return left_to_record
;
462 static int mmap_read(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
463 pa_bool_t work_done
= FALSE
;
464 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
465 size_t left_to_record
;
469 pa_source_assert_ref(u
->source
);
472 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
478 pa_bool_t after_avail
= TRUE
;
480 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->source
->sample_spec
)) < 0)) {
482 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
488 n_bytes
= (size_t) n
* u
->frame_size
;
491 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
494 left_to_record
= check_left_to_record(u
, n_bytes
, on_timeout
);
499 pa_bytes_to_usec(left_to_record
, &u
->source
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
501 pa_log_debug("Not reading, because too early.");
506 if (PA_UNLIKELY(n_bytes
<= 0)) {
510 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
511 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
512 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
513 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
519 pa_log_debug("Not reading, because not necessary.");
527 pa_log_debug("Not filling up, because already too many iterations.");
536 pa_log_debug("Reading");
543 const snd_pcm_channel_area_t
*areas
;
544 snd_pcm_uframes_t offset
, frames
;
545 snd_pcm_sframes_t sframes
;
547 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
548 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
550 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->source
->sample_spec
)) < 0)) {
552 if (!after_avail
&& err
== -EAGAIN
)
555 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
561 /* Make sure that if these memblocks need to be copied they will fit into one slot */
562 if (frames
> pa_mempool_block_size_max(u
->source
->core
->mempool
)/u
->frame_size
)
563 frames
= pa_mempool_block_size_max(u
->source
->core
->mempool
)/u
->frame_size
;
565 if (!after_avail
&& frames
== 0)
568 pa_assert(frames
> 0);
571 /* Check these are multiples of 8 bit */
572 pa_assert((areas
[0].first
& 7) == 0);
573 pa_assert((areas
[0].step
& 7)== 0);
575 /* We assume a single interleaved memory buffer */
576 pa_assert((areas
[0].first
>> 3) == 0);
577 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
579 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
581 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
582 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
585 pa_source_post(u
->source
, &chunk
);
586 pa_memblock_unref_fixed(chunk
.memblock
);
588 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
590 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
598 u
->read_count
+= frames
* u
->frame_size
;
601 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
604 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
607 n_bytes
-= (size_t) frames
* u
->frame_size
;
612 *sleep_usec
= pa_bytes_to_usec(left_to_record
, &u
->source
->sample_spec
);
614 if (*sleep_usec
> process_usec
)
615 *sleep_usec
-= process_usec
;
620 return work_done
? 1 : 0;
623 static int unix_read(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
624 int work_done
= FALSE
;
625 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
626 size_t left_to_record
;
630 pa_source_assert_ref(u
->source
);
633 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
639 pa_bool_t after_avail
= TRUE
;
641 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->source
->sample_spec
)) < 0)) {
643 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
649 n_bytes
= (size_t) n
* u
->frame_size
;
650 left_to_record
= check_left_to_record(u
, n_bytes
, on_timeout
);
655 pa_bytes_to_usec(left_to_record
, &u
->source
->sample_spec
) > process_usec
+max_sleep_usec
/2)
658 if (PA_UNLIKELY(n_bytes
<= 0)) {
662 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
663 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
664 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
665 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
675 pa_log_debug("Not filling up, because already too many iterations.");
685 snd_pcm_sframes_t frames
;
688 chunk
.memblock
= pa_memblock_new(u
->core
->mempool
, (size_t) -1);
690 frames
= (snd_pcm_sframes_t
) (pa_memblock_get_length(chunk
.memblock
) / u
->frame_size
);
692 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
693 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
695 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
697 p
= pa_memblock_acquire(chunk
.memblock
);
698 frames
= snd_pcm_readi(u
->pcm_handle
, (uint8_t*) p
, (snd_pcm_uframes_t
) frames
);
699 pa_memblock_release(chunk
.memblock
);
701 if (PA_UNLIKELY(frames
< 0)) {
702 pa_memblock_unref(chunk
.memblock
);
704 if (!after_avail
&& (int) frames
== -EAGAIN
)
707 if ((r
= try_recover(u
, "snd_pcm_readi", (int) frames
)) == 0)
713 if (!after_avail
&& frames
== 0) {
714 pa_memblock_unref(chunk
.memblock
);
718 pa_assert(frames
> 0);
722 chunk
.length
= (size_t) frames
* u
->frame_size
;
724 pa_source_post(u
->source
, &chunk
);
725 pa_memblock_unref(chunk
.memblock
);
729 u
->read_count
+= frames
* u
->frame_size
;
731 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
733 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
736 n_bytes
-= (size_t) frames
* u
->frame_size
;
741 *sleep_usec
= pa_bytes_to_usec(left_to_record
, &u
->source
->sample_spec
);
743 if (*sleep_usec
> process_usec
)
744 *sleep_usec
-= process_usec
;
749 return work_done
? 1 : 0;
752 static void update_smoother(struct userdata
*u
) {
753 snd_pcm_sframes_t delay
= 0;
756 pa_usec_t now1
= 0, now2
;
757 snd_pcm_status_t
*status
;
759 snd_pcm_status_alloca(&status
);
762 pa_assert(u
->pcm_handle
);
764 /* Let's update the time smoother */
766 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->source
->sample_spec
, TRUE
)) < 0)) {
767 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err
));
771 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
772 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
774 snd_htimestamp_t htstamp
= { 0, 0 };
775 snd_pcm_status_get_htstamp(status
, &htstamp
);
776 now1
= pa_timespec_load(&htstamp
);
779 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
781 now1
= pa_rtclock_now();
783 /* check if the time since the last update is bigger than the interval */
784 if (u
->last_smoother_update
> 0)
785 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
788 position
= u
->read_count
+ ((uint64_t) delay
* (uint64_t) u
->frame_size
);
789 now2
= pa_bytes_to_usec(position
, &u
->source
->sample_spec
);
791 pa_smoother_put(u
->smoother
, now1
, now2
);
793 u
->last_smoother_update
= now1
;
794 /* exponentially increase the update interval up to the MAX limit */
795 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
798 static pa_usec_t
source_get_latency(struct userdata
*u
) {
800 pa_usec_t now1
, now2
;
804 now1
= pa_rtclock_now();
805 now2
= pa_smoother_get(u
->smoother
, now1
);
807 delay
= (int64_t) now2
- (int64_t) pa_bytes_to_usec(u
->read_count
, &u
->source
->sample_spec
);
809 return delay
>= 0 ? (pa_usec_t
) delay
: 0;
812 static int build_pollfd(struct userdata
*u
) {
814 pa_assert(u
->pcm_handle
);
816 if (u
->alsa_rtpoll_item
)
817 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
819 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
825 /* Called from IO context */
826 static int suspend(struct userdata
*u
) {
828 pa_assert(u
->pcm_handle
);
830 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
833 snd_pcm_close(u
->pcm_handle
);
834 u
->pcm_handle
= NULL
;
836 if (u
->alsa_rtpoll_item
) {
837 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
838 u
->alsa_rtpoll_item
= NULL
;
841 pa_log_info("Device suspended...");
846 /* Called from IO context */
847 static int update_sw_params(struct userdata
*u
) {
848 snd_pcm_uframes_t avail_min
;
853 /* Use the full buffer if noone asked us for anything specific */
859 if ((latency
= pa_source_get_requested_latency_within_thread(u
->source
)) != (pa_usec_t
) -1) {
862 pa_log_debug("latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
864 b
= pa_usec_to_bytes(latency
, &u
->source
->sample_spec
);
866 /* We need at least one sample in our buffer */
868 if (PA_UNLIKELY(b
< u
->frame_size
))
871 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
874 fix_min_sleep_wakeup(u
);
875 fix_tsched_watermark(u
);
878 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
883 pa_usec_t sleep_usec
, process_usec
;
885 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
886 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->source
->sample_spec
) / u
->frame_size
;
889 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
891 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
, !u
->use_tsched
)) < 0) {
892 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
899 /* Called from IO context */
900 static int unsuspend(struct userdata
*u
) {
904 snd_pcm_uframes_t period_size
, buffer_size
;
907 pa_assert(!u
->pcm_handle
);
909 pa_log_info("Trying resume...");
911 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_CAPTURE
,
913 SND_PCM_NO_AUTO_RESAMPLE
|
914 SND_PCM_NO_AUTO_CHANNELS
|
915 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
916 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
920 ss
= u
->source
->sample_spec
;
921 period_size
= u
->fragment_size
/ u
->frame_size
;
922 buffer_size
= u
->hwbuf_size
/ u
->frame_size
;
926 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &period_size
, &buffer_size
, 0, &b
, &d
, TRUE
)) < 0) {
927 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
931 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
932 pa_log_warn("Resume failed, couldn't get original access mode.");
936 if (!pa_sample_spec_equal(&ss
, &u
->source
->sample_spec
)) {
937 pa_log_warn("Resume failed, couldn't restore original sample settings.");
941 if (period_size
*u
->frame_size
!= u
->fragment_size
||
942 buffer_size
*u
->frame_size
!= u
->hwbuf_size
) {
943 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
944 (unsigned long) u
->hwbuf_size
, (unsigned long) u
->fragment_size
,
945 (unsigned long) (buffer_size
*u
->frame_size
), (unsigned long) (period_size
*u
->frame_size
));
949 if (update_sw_params(u
) < 0)
952 if (build_pollfd(u
) < 0)
955 /* FIXME: We need to reload the volume somehow */
958 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
959 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
960 u
->last_smoother_update
= 0;
964 pa_log_info("Resumed successfully...");
970 snd_pcm_close(u
->pcm_handle
);
971 u
->pcm_handle
= NULL
;
977 /* Called from IO context */
978 static int source_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
979 struct userdata
*u
= PA_SOURCE(o
)->userdata
;
983 case PA_SOURCE_MESSAGE_GET_LATENCY
: {
987 r
= source_get_latency(u
);
989 *((pa_usec_t
*) data
) = r
;
994 case PA_SOURCE_MESSAGE_SET_STATE
:
996 switch ((pa_source_state_t
) PA_PTR_TO_UINT(data
)) {
998 case PA_SOURCE_SUSPENDED
: {
1001 pa_assert(PA_SOURCE_IS_OPENED(u
->source
->thread_info
.state
));
1003 if ((r
= suspend(u
)) < 0)
1009 case PA_SOURCE_IDLE
:
1010 case PA_SOURCE_RUNNING
: {
1013 if (u
->source
->thread_info
.state
== PA_SOURCE_INIT
) {
1014 if (build_pollfd(u
) < 0)
1018 if (u
->source
->thread_info
.state
== PA_SOURCE_SUSPENDED
) {
1019 if ((r
= unsuspend(u
)) < 0)
1026 case PA_SOURCE_UNLINKED
:
1027 case PA_SOURCE_INIT
:
1028 case PA_SOURCE_INVALID_STATE
:
1035 return pa_source_process_msg(o
, code
, data
, offset
, chunk
);
1038 /* Called from main context */
1039 static int source_set_state_cb(pa_source
*s
, pa_source_state_t new_state
) {
1040 pa_source_state_t old_state
;
1043 pa_source_assert_ref(s
);
1044 pa_assert_se(u
= s
->userdata
);
1046 old_state
= pa_source_get_state(u
->source
);
1048 if (PA_SOURCE_IS_OPENED(old_state
) && new_state
== PA_SOURCE_SUSPENDED
)
1050 else if (old_state
== PA_SOURCE_SUSPENDED
&& PA_SOURCE_IS_OPENED(new_state
))
1051 if (reserve_init(u
, u
->device_name
) < 0)
1052 return -PA_ERR_BUSY
;
1057 static int ctl_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1058 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1061 pa_assert(u
->mixer_handle
);
1063 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1066 if (u
->source
->suspend_cause
& PA_SUSPEND_SESSION
)
1069 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1070 pa_source_get_volume(u
->source
, TRUE
);
1071 pa_source_get_mute(u
->source
, TRUE
);
1077 static int io_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1078 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1081 pa_assert(u
->mixer_handle
);
1083 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1086 if (u
->source
->suspend_cause
& PA_SUSPEND_SESSION
)
1089 if (mask
& SND_CTL_EVENT_MASK_VALUE
)
1090 pa_source_update_volume_and_mute(u
->source
);
1095 static void source_get_volume_cb(pa_source
*s
) {
1096 struct userdata
*u
= s
->userdata
;
1098 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1101 pa_assert(u
->mixer_path
);
1102 pa_assert(u
->mixer_handle
);
1104 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1107 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1108 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1110 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1112 if (u
->mixer_path
->has_dB
) {
1113 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1115 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &r
));
1118 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1121 s
->real_volume
= u
->hardware_volume
= r
;
1123 /* Hmm, so the hardware volume changed, let's reset our software volume */
1124 if (u
->mixer_path
->has_dB
)
1125 pa_source_set_soft_volume(s
, NULL
);
1128 static void source_set_volume_cb(pa_source
*s
) {
1129 struct userdata
*u
= s
->userdata
;
1131 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1132 pa_bool_t write_to_hw
= (s
->flags
& PA_SOURCE_SYNC_VOLUME
) ? FALSE
: TRUE
;
1135 pa_assert(u
->mixer_path
);
1136 pa_assert(u
->mixer_handle
);
1138 /* Shift up by the base volume */
1139 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1141 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
, write_to_hw
) < 0)
1144 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1145 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1147 u
->hardware_volume
= r
;
1149 if (u
->mixer_path
->has_dB
) {
1150 pa_cvolume new_soft_volume
;
1151 pa_bool_t accurate_enough
;
1152 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1154 /* Match exactly what the user requested by software */
1155 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1157 /* If the adjustment to do in software is only minimal we
1158 * can skip it. That saves us CPU at the expense of a bit of
1161 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1162 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1164 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &s
->real_volume
));
1165 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &s
->real_volume
));
1166 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &u
->hardware_volume
));
1167 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &u
->hardware_volume
));
1168 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1169 pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &new_soft_volume
),
1170 pa_yes_no(accurate_enough
));
1171 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &new_soft_volume
));
1173 if (!accurate_enough
)
1174 s
->soft_volume
= new_soft_volume
;
1177 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1179 /* We can't match exactly what the user requested, hence let's
1180 * at least tell the user about it */
1186 static void source_write_volume_cb(pa_source
*s
) {
1187 struct userdata
*u
= s
->userdata
;
1188 pa_cvolume hw_vol
= s
->thread_info
.current_hw_volume
;
1191 pa_assert(u
->mixer_path
);
1192 pa_assert(u
->mixer_handle
);
1193 pa_assert(s
->flags
& PA_SOURCE_SYNC_VOLUME
);
1195 /* Shift up by the base volume */
1196 pa_sw_cvolume_divide_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1198 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &hw_vol
, TRUE
) < 0)
1199 pa_log_error("Writing HW volume failed");
1202 pa_bool_t accurate_enough
;
1204 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1205 pa_sw_cvolume_multiply_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1207 pa_sw_cvolume_divide(&tmp_vol
, &hw_vol
, &s
->thread_info
.current_hw_volume
);
1209 (pa_cvolume_min(&tmp_vol
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1210 (pa_cvolume_max(&tmp_vol
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1212 if (!accurate_enough
) {
1214 char db
[2][PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1215 char pcnt
[2][PA_CVOLUME_SNPRINT_MAX
];
1218 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1219 pa_cvolume_snprint(vol
.pcnt
[0], sizeof(vol
.pcnt
[0]), &s
->thread_info
.current_hw_volume
),
1220 pa_cvolume_snprint(vol
.pcnt
[1], sizeof(vol
.pcnt
[1]), &hw_vol
));
1221 pa_log_debug(" in dB: %s (request) != %s",
1222 pa_sw_cvolume_snprint_dB(vol
.db
[0], sizeof(vol
.db
[0]), &s
->thread_info
.current_hw_volume
),
1223 pa_sw_cvolume_snprint_dB(vol
.db
[1], sizeof(vol
.db
[1]), &hw_vol
));
1228 static void source_get_mute_cb(pa_source
*s
) {
1229 struct userdata
*u
= s
->userdata
;
1233 pa_assert(u
->mixer_path
);
1234 pa_assert(u
->mixer_handle
);
1236 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1242 static void source_set_mute_cb(pa_source
*s
) {
1243 struct userdata
*u
= s
->userdata
;
1246 pa_assert(u
->mixer_path
);
1247 pa_assert(u
->mixer_handle
);
1249 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1252 static int source_set_port_cb(pa_source
*s
, pa_device_port
*p
) {
1253 struct userdata
*u
= s
->userdata
;
1254 pa_alsa_port_data
*data
;
1258 pa_assert(u
->mixer_handle
);
1260 data
= PA_DEVICE_PORT_DATA(p
);
1262 pa_assert_se(u
->mixer_path
= data
->path
);
1263 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1265 if (u
->mixer_path
->has_volume
&& u
->mixer_path
->has_dB
) {
1266 s
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1267 s
->n_volume_steps
= PA_VOLUME_NORM
+1;
1269 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s
->base_volume
));
1271 s
->base_volume
= PA_VOLUME_NORM
;
1272 s
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1276 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1286 static void source_update_requested_latency_cb(pa_source
*s
) {
1287 struct userdata
*u
= s
->userdata
;
1289 pa_assert(u
->use_tsched
); /* only when timer scheduling is used
1290 * we can dynamically adjust the
1296 update_sw_params(u
);
1299 static void thread_func(void *userdata
) {
1300 struct userdata
*u
= userdata
;
1301 unsigned short revents
= 0;
1305 pa_log_debug("Thread starting up");
1307 if (u
->core
->realtime_scheduling
)
1308 pa_make_realtime(u
->core
->realtime_priority
);
1310 pa_thread_mq_install(&u
->thread_mq
);
1314 pa_usec_t rtpoll_sleep
= 0;
1317 pa_log_debug("Loop");
1320 /* Read some data and pass it to the sources */
1321 if (PA_SOURCE_IS_OPENED(u
->source
->thread_info
.state
)) {
1323 pa_usec_t sleep_usec
= 0;
1324 pa_bool_t on_timeout
= pa_rtpoll_timer_elapsed(u
->rtpoll
);
1327 pa_log_info("Starting capture.");
1328 snd_pcm_start(u
->pcm_handle
);
1330 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1336 work_done
= mmap_read(u
, &sleep_usec
, revents
& POLLIN
, on_timeout
);
1338 work_done
= unix_read(u
, &sleep_usec
, revents
& POLLIN
, on_timeout
);
1343 /* pa_log_debug("work_done = %i", work_done); */
1348 if (u
->use_tsched
) {
1351 /* OK, the capture buffer is now empty, let's
1352 * calculate when to wake up next */
1354 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1356 /* Convert from the sound card time domain to the
1357 * system time domain */
1358 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1360 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1362 /* We don't trust the conversion, so we wake up whatever comes first */
1363 rtpoll_sleep
= PA_MIN(sleep_usec
, cusec
);
1367 if (u
->source
->flags
& PA_SOURCE_SYNC_VOLUME
) {
1368 pa_usec_t volume_sleep
;
1369 pa_source_volume_change_apply(u
->source
, &volume_sleep
);
1370 if (volume_sleep
> 0)
1371 rtpoll_sleep
= PA_MIN(volume_sleep
, rtpoll_sleep
);
1374 if (rtpoll_sleep
> 0)
1375 pa_rtpoll_set_timer_relative(u
->rtpoll
, rtpoll_sleep
);
1377 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1379 /* Hmm, nothing to do. Let's sleep */
1380 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1383 if (u
->source
->flags
& PA_SOURCE_SYNC_VOLUME
)
1384 pa_source_volume_change_apply(u
->source
, NULL
);
1389 /* Tell ALSA about this and process its response */
1390 if (PA_SOURCE_IS_OPENED(u
->source
->thread_info
.state
)) {
1391 struct pollfd
*pollfd
;
1395 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1397 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1398 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1402 if (revents
& ~POLLIN
) {
1403 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1407 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit(PA_LOG_DEBUG
))
1408 pa_log_debug("Wakeup from ALSA!");
1415 /* If this was no regular exit from the loop we have to continue
1416 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1417 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1418 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1421 pa_log_debug("Thread shutting down");
1424 static void set_source_name(pa_source_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1430 pa_assert(device_name
);
1432 if ((n
= pa_modargs_get_value(ma
, "source_name", NULL
))) {
1433 pa_source_new_data_set_name(data
, n
);
1434 data
->namereg_fail
= TRUE
;
1438 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1439 data
->namereg_fail
= TRUE
;
1441 n
= device_id
? device_id
: device_name
;
1442 data
->namereg_fail
= FALSE
;
1446 t
= pa_sprintf_malloc("alsa_input.%s.%s", n
, mapping
->name
);
1448 t
= pa_sprintf_malloc("alsa_input.%s", n
);
1450 pa_source_new_data_set_name(data
, t
);
1454 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1456 if (!mapping
&& !element
)
1459 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
))) {
1460 pa_log_info("Failed to find a working mixer device.");
1466 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_INPUT
)))
1469 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, ignore_dB
) < 0)
1472 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1473 pa_alsa_path_dump(u
->mixer_path
);
1476 if (!(u
->mixer_path_set
= pa_alsa_path_set_new(mapping
, PA_ALSA_DIRECTION_INPUT
)))
1479 pa_alsa_path_set_probe(u
->mixer_path_set
, u
->mixer_handle
, ignore_dB
);
1481 pa_log_debug("Probed mixer paths:");
1482 pa_alsa_path_set_dump(u
->mixer_path_set
);
1489 if (u
->mixer_path_set
) {
1490 pa_alsa_path_set_free(u
->mixer_path_set
);
1491 u
->mixer_path_set
= NULL
;
1492 } else if (u
->mixer_path
) {
1493 pa_alsa_path_free(u
->mixer_path
);
1494 u
->mixer_path
= NULL
;
1497 if (u
->mixer_handle
) {
1498 snd_mixer_close(u
->mixer_handle
);
1499 u
->mixer_handle
= NULL
;
1503 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
, pa_bool_t sync_volume
) {
1506 if (!u
->mixer_handle
)
1509 if (u
->source
->active_port
) {
1510 pa_alsa_port_data
*data
;
1512 /* We have a list of supported paths, so let's activate the
1513 * one that has been chosen as active */
1515 data
= PA_DEVICE_PORT_DATA(u
->source
->active_port
);
1516 u
->mixer_path
= data
->path
;
1518 pa_alsa_path_select(data
->path
, u
->mixer_handle
);
1521 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1525 if (!u
->mixer_path
&& u
->mixer_path_set
)
1526 u
->mixer_path
= u
->mixer_path_set
->paths
;
1528 if (u
->mixer_path
) {
1529 /* Hmm, we have only a single path, then let's activate it */
1531 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1533 if (u
->mixer_path
->settings
)
1534 pa_alsa_setting_select(u
->mixer_path
->settings
, u
->mixer_handle
);
1539 if (!u
->mixer_path
->has_volume
)
1540 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1543 if (u
->mixer_path
->has_dB
) {
1544 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1546 u
->source
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1547 u
->source
->n_volume_steps
= PA_VOLUME_NORM
+1;
1549 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->source
->base_volume
));
1552 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1553 u
->source
->base_volume
= PA_VOLUME_NORM
;
1554 u
->source
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1557 u
->source
->get_volume
= source_get_volume_cb
;
1558 u
->source
->set_volume
= source_set_volume_cb
;
1559 u
->source
->write_volume
= source_write_volume_cb
;
1561 u
->source
->flags
|= PA_SOURCE_HW_VOLUME_CTRL
;
1562 if (u
->mixer_path
->has_dB
) {
1563 u
->source
->flags
|= PA_SOURCE_DECIBEL_VOLUME
;
1565 u
->source
->flags
|= PA_SOURCE_SYNC_VOLUME
;
1566 pa_log_info("Successfully enabled synchronous volume.");
1570 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1573 if (!u
->mixer_path
->has_mute
) {
1574 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1576 u
->source
->get_mute
= source_get_mute_cb
;
1577 u
->source
->set_mute
= source_set_mute_cb
;
1578 u
->source
->flags
|= PA_SOURCE_HW_MUTE_CTRL
;
1579 pa_log_info("Using hardware mute control.");
1582 if (u
->source
->flags
& (PA_SOURCE_HW_VOLUME_CTRL
|PA_SOURCE_HW_MUTE_CTRL
)) {
1583 int (*mixer_callback
)(snd_mixer_elem_t
*, unsigned int);
1584 if (u
->source
->flags
& PA_SOURCE_SYNC_VOLUME
) {
1585 u
->mixer_pd
= pa_alsa_mixer_pdata_new();
1586 mixer_callback
= io_mixer_callback
;
1588 if (pa_alsa_set_mixer_rtpoll(u
->mixer_pd
, u
->mixer_handle
, u
->rtpoll
) < 0) {
1589 pa_log("Failed to initialize file descriptor monitoring");
1593 u
->mixer_fdl
= pa_alsa_fdlist_new();
1594 mixer_callback
= ctl_mixer_callback
;
1596 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1597 pa_log("Failed to initialize file descriptor monitoring");
1602 if (u
->mixer_path_set
)
1603 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1605 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1611 pa_source
*pa_alsa_source_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1613 struct userdata
*u
= NULL
;
1614 const char *dev_id
= NULL
;
1615 pa_sample_spec ss
, requested_ss
;
1617 uint32_t nfrags
, frag_size
, buffer_size
, tsched_size
, tsched_watermark
;
1618 snd_pcm_uframes_t period_frames
, buffer_frames
, tsched_frames
;
1620 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
, namereg_fail
= FALSE
, sync_volume
= FALSE
;
1621 pa_source_new_data data
;
1622 pa_alsa_profile_set
*profile_set
= NULL
;
1627 ss
= m
->core
->default_sample_spec
;
1628 map
= m
->core
->default_channel_map
;
1629 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1630 pa_log("Failed to parse sample specification and channel map");
1635 frame_size
= pa_frame_size(&ss
);
1637 nfrags
= m
->core
->default_n_fragments
;
1638 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1640 frag_size
= (uint32_t) frame_size
;
1641 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1642 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1644 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1645 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1646 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1647 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1648 pa_log("Failed to parse buffer metrics");
1652 buffer_size
= nfrags
* frag_size
;
1654 period_frames
= frag_size
/frame_size
;
1655 buffer_frames
= buffer_size
/frame_size
;
1656 tsched_frames
= tsched_size
/frame_size
;
1658 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1659 pa_log("Failed to parse mmap argument.");
1663 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1664 pa_log("Failed to parse tsched argument.");
1668 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1669 pa_log("Failed to parse ignore_dB argument.");
1673 sync_volume
= m
->core
->sync_volume
;
1674 if (pa_modargs_get_value_boolean(ma
, "sync_volume", &sync_volume
) < 0) {
1675 pa_log("Failed to parse sync_volume argument.");
1679 use_tsched
= pa_alsa_may_tsched(use_tsched
);
1681 u
= pa_xnew0(struct userdata
, 1);
1684 u
->use_mmap
= use_mmap
;
1685 u
->use_tsched
= use_tsched
;
1687 u
->rtpoll
= pa_rtpoll_new();
1688 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1690 u
->smoother
= pa_smoother_new(
1691 SMOOTHER_ADJUST_USEC
,
1692 SMOOTHER_WINDOW_USEC
,
1698 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1700 dev_id
= pa_modargs_get_value(
1702 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
1704 if (reserve_init(u
, dev_id
) < 0)
1707 if (reserve_monitor_init(u
, dev_id
) < 0)
1715 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1716 pa_log("device_id= not set");
1720 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
1724 SND_PCM_STREAM_CAPTURE
,
1725 &period_frames
, &buffer_frames
, tsched_frames
,
1729 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1731 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
1734 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
1738 SND_PCM_STREAM_CAPTURE
,
1739 &period_frames
, &buffer_frames
, tsched_frames
,
1740 &b
, &d
, profile_set
, &mapping
)))
1745 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1746 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1749 SND_PCM_STREAM_CAPTURE
,
1750 &period_frames
, &buffer_frames
, tsched_frames
,
1755 pa_assert(u
->device_name
);
1756 pa_log_info("Successfully opened device %s.", u
->device_name
);
1758 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
1759 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
1764 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
1766 if (use_mmap
&& !b
) {
1767 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1768 u
->use_mmap
= use_mmap
= FALSE
;
1771 if (use_tsched
&& (!b
|| !d
)) {
1772 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1773 u
->use_tsched
= use_tsched
= FALSE
;
1777 pa_log_info("Successfully enabled mmap() mode.");
1780 pa_log_info("Successfully enabled timer-based scheduling mode.");
1782 /* ALSA might tweak the sample spec, so recalculate the frame size */
1783 frame_size
= pa_frame_size(&ss
);
1785 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
1787 pa_source_new_data_init(&data
);
1788 data
.driver
= driver
;
1791 set_source_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
1793 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1794 * variable instead of using &data.namereg_fail directly, because
1795 * data.namereg_fail is a bitfield and taking the address of a bitfield
1796 * variable is impossible. */
1797 namereg_fail
= data
.namereg_fail
;
1798 if (pa_modargs_get_value_boolean(ma
, "namereg_fail", &namereg_fail
) < 0) {
1799 pa_log("Failed to parse boolean argument namereg_fail.");
1800 pa_source_new_data_done(&data
);
1803 data
.namereg_fail
= namereg_fail
;
1805 pa_source_new_data_set_sample_spec(&data
, &ss
);
1806 pa_source_new_data_set_channel_map(&data
, &map
);
1808 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
1809 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1810 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (buffer_frames
* frame_size
));
1811 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1812 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1815 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
1816 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
1819 pa_alsa_init_description(data
.proplist
);
1821 if (u
->control_device
)
1822 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
1824 if (pa_modargs_get_proplist(ma
, "source_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
1825 pa_log("Invalid properties");
1826 pa_source_new_data_done(&data
);
1830 if (u
->mixer_path_set
)
1831 pa_alsa_add_ports(&data
.ports
, u
->mixer_path_set
);
1833 u
->source
= pa_source_new(m
->core
, &data
, PA_SOURCE_HARDWARE
|PA_SOURCE_LATENCY
|(u
->use_tsched
? PA_SOURCE_DYNAMIC_LATENCY
: 0));
1834 pa_source_new_data_done(&data
);
1837 pa_log("Failed to create source object");
1841 if (pa_modargs_get_value_u32(ma
, "sync_volume_safety_margin",
1842 &u
->source
->thread_info
.volume_change_safety_margin
) < 0) {
1843 pa_log("Failed to parse sync_volume_safety_margin parameter");
1847 if (pa_modargs_get_value_s32(ma
, "sync_volume_extra_delay",
1848 &u
->source
->thread_info
.volume_change_extra_delay
) < 0) {
1849 pa_log("Failed to parse sync_volume_extra_delay parameter");
1853 u
->source
->parent
.process_msg
= source_process_msg
;
1855 u
->source
->update_requested_latency
= source_update_requested_latency_cb
;
1856 u
->source
->set_state
= source_set_state_cb
;
1857 u
->source
->set_port
= source_set_port_cb
;
1858 u
->source
->userdata
= u
;
1860 pa_source_set_asyncmsgq(u
->source
, u
->thread_mq
.inq
);
1861 pa_source_set_rtpoll(u
->source
, u
->rtpoll
);
1863 u
->frame_size
= frame_size
;
1864 u
->fragment_size
= frag_size
= (size_t) (period_frames
* frame_size
);
1865 u
->hwbuf_size
= buffer_size
= (size_t) (buffer_frames
* frame_size
);
1866 pa_cvolume_mute(&u
->hardware_volume
, u
->source
->sample_spec
.channels
);
1868 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1869 (double) u
->hwbuf_size
/ (double) u
->fragment_size
,
1870 (long unsigned) u
->fragment_size
,
1871 (double) pa_bytes_to_usec(u
->fragment_size
, &ss
) / PA_USEC_PER_MSEC
,
1872 (long unsigned) u
->hwbuf_size
,
1873 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1875 if (u
->use_tsched
) {
1876 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, &requested_ss
), &u
->source
->sample_spec
);
1878 u
->watermark_inc_step
= pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC
, &u
->source
->sample_spec
);
1879 u
->watermark_dec_step
= pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC
, &u
->source
->sample_spec
);
1881 u
->watermark_inc_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC
, &u
->source
->sample_spec
);
1882 u
->watermark_dec_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC
, &u
->source
->sample_spec
);
1884 fix_min_sleep_wakeup(u
);
1885 fix_tsched_watermark(u
);
1887 pa_source_set_latency_range(u
->source
,
1889 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1891 pa_log_info("Time scheduling watermark is %0.2fms",
1892 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1894 pa_source_set_fixed_latency(u
->source
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1898 if (update_sw_params(u
) < 0)
1901 if (setup_mixer(u
, ignore_dB
, sync_volume
) < 0)
1904 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
1906 if (!(u
->thread
= pa_thread_new("alsa-source", thread_func
, u
))) {
1907 pa_log("Failed to create thread.");
1911 /* Get initial mixer settings */
1912 if (data
.volume_is_set
) {
1913 if (u
->source
->set_volume
)
1914 u
->source
->set_volume(u
->source
);
1916 if (u
->source
->get_volume
)
1917 u
->source
->get_volume(u
->source
);
1920 if (data
.muted_is_set
) {
1921 if (u
->source
->set_mute
)
1922 u
->source
->set_mute(u
->source
);
1924 if (u
->source
->get_mute
)
1925 u
->source
->get_mute(u
->source
);
1928 pa_source_put(u
->source
);
1931 pa_alsa_profile_set_free(profile_set
);
1941 pa_alsa_profile_set_free(profile_set
);
1946 static void userdata_free(struct userdata
*u
) {
1950 pa_source_unlink(u
->source
);
1953 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1954 pa_thread_free(u
->thread
);
1957 pa_thread_mq_done(&u
->thread_mq
);
1960 pa_source_unref(u
->source
);
1963 pa_alsa_mixer_pdata_free(u
->mixer_pd
);
1965 if (u
->alsa_rtpoll_item
)
1966 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1969 pa_rtpoll_free(u
->rtpoll
);
1971 if (u
->pcm_handle
) {
1972 snd_pcm_drop(u
->pcm_handle
);
1973 snd_pcm_close(u
->pcm_handle
);
1977 pa_alsa_fdlist_free(u
->mixer_fdl
);
1979 if (u
->mixer_path_set
)
1980 pa_alsa_path_set_free(u
->mixer_path_set
);
1981 else if (u
->mixer_path
)
1982 pa_alsa_path_free(u
->mixer_path
);
1984 if (u
->mixer_handle
)
1985 snd_mixer_close(u
->mixer_handle
);
1988 pa_smoother_free(u
->smoother
);
1993 pa_xfree(u
->device_name
);
1994 pa_xfree(u
->control_device
);
1998 void pa_alsa_source_free(pa_source
*s
) {
2001 pa_source_assert_ref(s
);
2002 pa_assert_se(u
= s
->userdata
);