]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
alsa: Add separate sinks/sources for UCM modifiers if needed
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/thread-mq.h>
50 #include <pulsecore/rtpoll.h>
51 #include <pulsecore/time-smoother.h>
52
53 #include <modules/reserve-wrap.h>
54
55 #include "alsa-util.h"
56 #include "alsa-source.h"
57
58 /* #define DEBUG_TIMING */
59
60 #define DEFAULT_DEVICE "default"
61
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64
65 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
66 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
67 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
68 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
69 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
70 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
71
72 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
73 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
74
75 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
76 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
77
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
80
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
82
83 struct userdata {
84 pa_core *core;
85 pa_module *module;
86 pa_source *source;
87
88 pa_thread *thread;
89 pa_thread_mq thread_mq;
90 pa_rtpoll *rtpoll;
91
92 snd_pcm_t *pcm_handle;
93
94 char *paths_dir;
95 pa_alsa_fdlist *mixer_fdl;
96 pa_alsa_mixer_pdata *mixer_pd;
97 snd_mixer_t *mixer_handle;
98 pa_alsa_path_set *mixer_path_set;
99 pa_alsa_path *mixer_path;
100
101 pa_cvolume hardware_volume;
102
103 unsigned int *rates;
104
105 size_t
106 frame_size,
107 fragment_size,
108 hwbuf_size,
109 tsched_watermark,
110 tsched_watermark_ref,
111 hwbuf_unused,
112 min_sleep,
113 min_wakeup,
114 watermark_inc_step,
115 watermark_dec_step,
116 watermark_inc_threshold,
117 watermark_dec_threshold;
118
119 pa_usec_t watermark_dec_not_before;
120 pa_usec_t min_latency_ref;
121
122 char *device_name; /* name of the PCM device */
123 char *control_device; /* name of the control device */
124
125 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
126
127 pa_bool_t first;
128
129 pa_rtpoll_item *alsa_rtpoll_item;
130
131 pa_smoother *smoother;
132 uint64_t read_count;
133 pa_usec_t smoother_interval;
134 pa_usec_t last_smoother_update;
135
136 pa_reserve_wrapper *reserve;
137 pa_hook_slot *reserve_slot;
138 pa_reserve_monitor_wrapper *monitor;
139 pa_hook_slot *monitor_slot;
140
141 /* ucm context */
142 pa_alsa_ucm_mapping_context *ucm_context;
143 };
144
145 static void userdata_free(struct userdata *u);
146
147 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
148 pa_assert(r);
149 pa_assert(u);
150
151 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
152 return PA_HOOK_CANCEL;
153
154 return PA_HOOK_OK;
155 }
156
157 static void reserve_done(struct userdata *u) {
158 pa_assert(u);
159
160 if (u->reserve_slot) {
161 pa_hook_slot_free(u->reserve_slot);
162 u->reserve_slot = NULL;
163 }
164
165 if (u->reserve) {
166 pa_reserve_wrapper_unref(u->reserve);
167 u->reserve = NULL;
168 }
169 }
170
171 static void reserve_update(struct userdata *u) {
172 const char *description;
173 pa_assert(u);
174
175 if (!u->source || !u->reserve)
176 return;
177
178 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
179 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
180 }
181
182 static int reserve_init(struct userdata *u, const char *dname) {
183 char *rname;
184
185 pa_assert(u);
186 pa_assert(dname);
187
188 if (u->reserve)
189 return 0;
190
191 if (pa_in_system_mode())
192 return 0;
193
194 if (!(rname = pa_alsa_get_reserve_name(dname)))
195 return 0;
196
197 /* We are resuming, try to lock the device */
198 u->reserve = pa_reserve_wrapper_get(u->core, rname);
199 pa_xfree(rname);
200
201 if (!(u->reserve))
202 return -1;
203
204 reserve_update(u);
205
206 pa_assert(!u->reserve_slot);
207 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
208
209 return 0;
210 }
211
212 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
213 pa_bool_t b;
214
215 pa_assert(w);
216 pa_assert(u);
217
218 b = PA_PTR_TO_UINT(busy) && !u->reserve;
219
220 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
221 return PA_HOOK_OK;
222 }
223
224 static void monitor_done(struct userdata *u) {
225 pa_assert(u);
226
227 if (u->monitor_slot) {
228 pa_hook_slot_free(u->monitor_slot);
229 u->monitor_slot = NULL;
230 }
231
232 if (u->monitor) {
233 pa_reserve_monitor_wrapper_unref(u->monitor);
234 u->monitor = NULL;
235 }
236 }
237
238 static int reserve_monitor_init(struct userdata *u, const char *dname) {
239 char *rname;
240
241 pa_assert(u);
242 pa_assert(dname);
243
244 if (pa_in_system_mode())
245 return 0;
246
247 if (!(rname = pa_alsa_get_reserve_name(dname)))
248 return 0;
249
250 /* We are resuming, try to lock the device */
251 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
252 pa_xfree(rname);
253
254 if (!(u->monitor))
255 return -1;
256
257 pa_assert(!u->monitor_slot);
258 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
259
260 return 0;
261 }
262
263 static void fix_min_sleep_wakeup(struct userdata *u) {
264 size_t max_use, max_use_2;
265
266 pa_assert(u);
267 pa_assert(u->use_tsched);
268
269 max_use = u->hwbuf_size - u->hwbuf_unused;
270 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
271
272 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
273 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
274
275 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
276 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
277 }
278
279 static void fix_tsched_watermark(struct userdata *u) {
280 size_t max_use;
281 pa_assert(u);
282 pa_assert(u->use_tsched);
283
284 max_use = u->hwbuf_size - u->hwbuf_unused;
285
286 if (u->tsched_watermark > max_use - u->min_sleep)
287 u->tsched_watermark = max_use - u->min_sleep;
288
289 if (u->tsched_watermark < u->min_wakeup)
290 u->tsched_watermark = u->min_wakeup;
291 }
292
293 static void increase_watermark(struct userdata *u) {
294 size_t old_watermark;
295 pa_usec_t old_min_latency, new_min_latency;
296
297 pa_assert(u);
298 pa_assert(u->use_tsched);
299
300 /* First, just try to increase the watermark */
301 old_watermark = u->tsched_watermark;
302 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
303 fix_tsched_watermark(u);
304
305 if (old_watermark != u->tsched_watermark) {
306 pa_log_info("Increasing wakeup watermark to %0.2f ms",
307 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
308 return;
309 }
310
311 /* Hmm, we cannot increase the watermark any further, hence let's
312 raise the latency unless doing so was disabled in
313 configuration */
314 if (u->fixed_latency_range)
315 return;
316
317 old_min_latency = u->source->thread_info.min_latency;
318 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
319 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
320
321 if (old_min_latency != new_min_latency) {
322 pa_log_info("Increasing minimal latency to %0.2f ms",
323 (double) new_min_latency / PA_USEC_PER_MSEC);
324
325 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
326 }
327
328 /* When we reach this we're officialy fucked! */
329 }
330
331 static void decrease_watermark(struct userdata *u) {
332 size_t old_watermark;
333 pa_usec_t now;
334
335 pa_assert(u);
336 pa_assert(u->use_tsched);
337
338 now = pa_rtclock_now();
339
340 if (u->watermark_dec_not_before <= 0)
341 goto restart;
342
343 if (u->watermark_dec_not_before > now)
344 return;
345
346 old_watermark = u->tsched_watermark;
347
348 if (u->tsched_watermark < u->watermark_dec_step)
349 u->tsched_watermark = u->tsched_watermark / 2;
350 else
351 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
352
353 fix_tsched_watermark(u);
354
355 if (old_watermark != u->tsched_watermark)
356 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
357 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
358
359 /* We don't change the latency range*/
360
361 restart:
362 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
363 }
364
365 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
366 pa_usec_t wm, usec;
367
368 pa_assert(sleep_usec);
369 pa_assert(process_usec);
370
371 pa_assert(u);
372 pa_assert(u->use_tsched);
373
374 usec = pa_source_get_requested_latency_within_thread(u->source);
375
376 if (usec == (pa_usec_t) -1)
377 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
378
379 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
380
381 if (wm > usec)
382 wm = usec/2;
383
384 *sleep_usec = usec - wm;
385 *process_usec = wm;
386
387 #ifdef DEBUG_TIMING
388 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
389 (unsigned long) (usec / PA_USEC_PER_MSEC),
390 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
391 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
392 #endif
393 }
394
395 static int try_recover(struct userdata *u, const char *call, int err) {
396 pa_assert(u);
397 pa_assert(call);
398 pa_assert(err < 0);
399
400 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
401
402 pa_assert(err != -EAGAIN);
403
404 if (err == -EPIPE)
405 pa_log_debug("%s: Buffer overrun!", call);
406
407 if (err == -ESTRPIPE)
408 pa_log_debug("%s: System suspended!", call);
409
410 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
411 pa_log("%s: %s", call, pa_alsa_strerror(err));
412 return -1;
413 }
414
415 u->first = TRUE;
416 return 0;
417 }
418
419 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
420 size_t left_to_record;
421 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
422 pa_bool_t overrun = FALSE;
423
424 /* We use <= instead of < for this check here because an overrun
425 * only happens after the last sample was processed, not already when
426 * it is removed from the buffer. This is particularly important
427 * when block transfer is used. */
428
429 if (n_bytes <= rec_space)
430 left_to_record = rec_space - n_bytes;
431 else {
432
433 /* We got a dropout. What a mess! */
434 left_to_record = 0;
435 overrun = TRUE;
436
437 #ifdef DEBUG_TIMING
438 PA_DEBUG_TRAP;
439 #endif
440
441 if (pa_log_ratelimit(PA_LOG_INFO))
442 pa_log_info("Overrun!");
443 }
444
445 #ifdef DEBUG_TIMING
446 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
447 #endif
448
449 if (u->use_tsched) {
450 pa_bool_t reset_not_before = TRUE;
451
452 if (overrun || left_to_record < u->watermark_inc_threshold)
453 increase_watermark(u);
454 else if (left_to_record > u->watermark_dec_threshold) {
455 reset_not_before = FALSE;
456
457 /* We decrease the watermark only if have actually
458 * been woken up by a timeout. If something else woke
459 * us up it's too easy to fulfill the deadlines... */
460
461 if (on_timeout)
462 decrease_watermark(u);
463 }
464
465 if (reset_not_before)
466 u->watermark_dec_not_before = 0;
467 }
468
469 return left_to_record;
470 }
471
472 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
473 pa_bool_t work_done = FALSE;
474 pa_usec_t max_sleep_usec = 0, process_usec = 0;
475 size_t left_to_record;
476 unsigned j = 0;
477
478 pa_assert(u);
479 pa_source_assert_ref(u->source);
480
481 if (u->use_tsched)
482 hw_sleep_time(u, &max_sleep_usec, &process_usec);
483
484 for (;;) {
485 snd_pcm_sframes_t n;
486 size_t n_bytes;
487 int r;
488 pa_bool_t after_avail = TRUE;
489
490 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
491
492 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
493 continue;
494
495 return r;
496 }
497
498 n_bytes = (size_t) n * u->frame_size;
499
500 #ifdef DEBUG_TIMING
501 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
502 #endif
503
504 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
505 on_timeout = FALSE;
506
507 if (u->use_tsched)
508 if (!polled &&
509 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
510 #ifdef DEBUG_TIMING
511 pa_log_debug("Not reading, because too early.");
512 #endif
513 break;
514 }
515
516 if (PA_UNLIKELY(n_bytes <= 0)) {
517
518 if (polled)
519 PA_ONCE_BEGIN {
520 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
521 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
522 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
523 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
524 pa_strnull(dn));
525 pa_xfree(dn);
526 } PA_ONCE_END;
527
528 #ifdef DEBUG_TIMING
529 pa_log_debug("Not reading, because not necessary.");
530 #endif
531 break;
532 }
533
534
535 if (++j > 10) {
536 #ifdef DEBUG_TIMING
537 pa_log_debug("Not filling up, because already too many iterations.");
538 #endif
539
540 break;
541 }
542
543 polled = FALSE;
544
545 #ifdef DEBUG_TIMING
546 pa_log_debug("Reading");
547 #endif
548
549 for (;;) {
550 pa_memchunk chunk;
551 void *p;
552 int err;
553 const snd_pcm_channel_area_t *areas;
554 snd_pcm_uframes_t offset, frames;
555 snd_pcm_sframes_t sframes;
556
557 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
558 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
559
560 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
561
562 if (!after_avail && err == -EAGAIN)
563 break;
564
565 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
566 continue;
567
568 return r;
569 }
570
571 /* Make sure that if these memblocks need to be copied they will fit into one slot */
572 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
573 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
574
575 if (!after_avail && frames == 0)
576 break;
577
578 pa_assert(frames > 0);
579 after_avail = FALSE;
580
581 /* Check these are multiples of 8 bit */
582 pa_assert((areas[0].first & 7) == 0);
583 pa_assert((areas[0].step & 7)== 0);
584
585 /* We assume a single interleaved memory buffer */
586 pa_assert((areas[0].first >> 3) == 0);
587 pa_assert((areas[0].step >> 3) == u->frame_size);
588
589 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
590
591 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
592 chunk.length = pa_memblock_get_length(chunk.memblock);
593 chunk.index = 0;
594
595 pa_source_post(u->source, &chunk);
596 pa_memblock_unref_fixed(chunk.memblock);
597
598 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
599
600 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
601 continue;
602
603 return r;
604 }
605
606 work_done = TRUE;
607
608 u->read_count += frames * u->frame_size;
609
610 #ifdef DEBUG_TIMING
611 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
612 #endif
613
614 if ((size_t) frames * u->frame_size >= n_bytes)
615 break;
616
617 n_bytes -= (size_t) frames * u->frame_size;
618 }
619 }
620
621 if (u->use_tsched) {
622 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
623 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
624
625 if (*sleep_usec > process_usec)
626 *sleep_usec -= process_usec;
627 else
628 *sleep_usec = 0;
629 }
630
631 return work_done ? 1 : 0;
632 }
633
634 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
635 int work_done = FALSE;
636 pa_usec_t max_sleep_usec = 0, process_usec = 0;
637 size_t left_to_record;
638 unsigned j = 0;
639
640 pa_assert(u);
641 pa_source_assert_ref(u->source);
642
643 if (u->use_tsched)
644 hw_sleep_time(u, &max_sleep_usec, &process_usec);
645
646 for (;;) {
647 snd_pcm_sframes_t n;
648 size_t n_bytes;
649 int r;
650 pa_bool_t after_avail = TRUE;
651
652 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
653
654 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
655 continue;
656
657 return r;
658 }
659
660 n_bytes = (size_t) n * u->frame_size;
661 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
662 on_timeout = FALSE;
663
664 if (u->use_tsched)
665 if (!polled &&
666 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
667 break;
668
669 if (PA_UNLIKELY(n_bytes <= 0)) {
670
671 if (polled)
672 PA_ONCE_BEGIN {
673 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
674 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
675 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
676 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
677 pa_strnull(dn));
678 pa_xfree(dn);
679 } PA_ONCE_END;
680
681 break;
682 }
683
684 if (++j > 10) {
685 #ifdef DEBUG_TIMING
686 pa_log_debug("Not filling up, because already too many iterations.");
687 #endif
688
689 break;
690 }
691
692 polled = FALSE;
693
694 for (;;) {
695 void *p;
696 snd_pcm_sframes_t frames;
697 pa_memchunk chunk;
698
699 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
700
701 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
702
703 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
704 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
705
706 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
707
708 p = pa_memblock_acquire(chunk.memblock);
709 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
710 pa_memblock_release(chunk.memblock);
711
712 if (PA_UNLIKELY(frames < 0)) {
713 pa_memblock_unref(chunk.memblock);
714
715 if (!after_avail && (int) frames == -EAGAIN)
716 break;
717
718 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
719 continue;
720
721 return r;
722 }
723
724 if (!after_avail && frames == 0) {
725 pa_memblock_unref(chunk.memblock);
726 break;
727 }
728
729 pa_assert(frames > 0);
730 after_avail = FALSE;
731
732 chunk.index = 0;
733 chunk.length = (size_t) frames * u->frame_size;
734
735 pa_source_post(u->source, &chunk);
736 pa_memblock_unref(chunk.memblock);
737
738 work_done = TRUE;
739
740 u->read_count += frames * u->frame_size;
741
742 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
743
744 if ((size_t) frames * u->frame_size >= n_bytes)
745 break;
746
747 n_bytes -= (size_t) frames * u->frame_size;
748 }
749 }
750
751 if (u->use_tsched) {
752 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
753 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
754
755 if (*sleep_usec > process_usec)
756 *sleep_usec -= process_usec;
757 else
758 *sleep_usec = 0;
759 }
760
761 return work_done ? 1 : 0;
762 }
763
764 static void update_smoother(struct userdata *u) {
765 snd_pcm_sframes_t delay = 0;
766 uint64_t position;
767 int err;
768 pa_usec_t now1 = 0, now2;
769 snd_pcm_status_t *status;
770
771 snd_pcm_status_alloca(&status);
772
773 pa_assert(u);
774 pa_assert(u->pcm_handle);
775
776 /* Let's update the time smoother */
777
778 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
779 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
780 return;
781 }
782
783 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
784 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
785 else {
786 snd_htimestamp_t htstamp = { 0, 0 };
787 snd_pcm_status_get_htstamp(status, &htstamp);
788 now1 = pa_timespec_load(&htstamp);
789 }
790
791 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
792 if (now1 <= 0)
793 now1 = pa_rtclock_now();
794
795 /* check if the time since the last update is bigger than the interval */
796 if (u->last_smoother_update > 0)
797 if (u->last_smoother_update + u->smoother_interval > now1)
798 return;
799
800 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
801 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
802
803 pa_smoother_put(u->smoother, now1, now2);
804
805 u->last_smoother_update = now1;
806 /* exponentially increase the update interval up to the MAX limit */
807 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
808 }
809
810 static pa_usec_t source_get_latency(struct userdata *u) {
811 int64_t delay;
812 pa_usec_t now1, now2;
813
814 pa_assert(u);
815
816 now1 = pa_rtclock_now();
817 now2 = pa_smoother_get(u->smoother, now1);
818
819 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
820
821 return delay >= 0 ? (pa_usec_t) delay : 0;
822 }
823
824 static int build_pollfd(struct userdata *u) {
825 pa_assert(u);
826 pa_assert(u->pcm_handle);
827
828 if (u->alsa_rtpoll_item)
829 pa_rtpoll_item_free(u->alsa_rtpoll_item);
830
831 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
832 return -1;
833
834 return 0;
835 }
836
837 /* Called from IO context */
838 static int suspend(struct userdata *u) {
839 const char *mod_name;
840
841 pa_assert(u);
842 pa_assert(u->pcm_handle);
843
844 pa_smoother_pause(u->smoother, pa_rtclock_now());
845
846 /* Let's suspend */
847 snd_pcm_close(u->pcm_handle);
848 u->pcm_handle = NULL;
849
850 if ((mod_name = pa_proplist_gets(u->source->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
851 pa_log_info("Disable ucm modifier %s", mod_name);
852
853 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_dismod", mod_name) < 0)
854 pa_log("Failed to disable ucm modifier %s", mod_name);
855 }
856
857 if (u->alsa_rtpoll_item) {
858 pa_rtpoll_item_free(u->alsa_rtpoll_item);
859 u->alsa_rtpoll_item = NULL;
860 }
861
862 pa_log_info("Device suspended...");
863
864 return 0;
865 }
866
867 /* Called from IO context */
868 static int update_sw_params(struct userdata *u) {
869 snd_pcm_uframes_t avail_min;
870 int err;
871
872 pa_assert(u);
873
874 /* Use the full buffer if no one asked us for anything specific */
875 u->hwbuf_unused = 0;
876
877 if (u->use_tsched) {
878 pa_usec_t latency;
879
880 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
881 size_t b;
882
883 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
884
885 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
886
887 /* We need at least one sample in our buffer */
888
889 if (PA_UNLIKELY(b < u->frame_size))
890 b = u->frame_size;
891
892 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
893 }
894
895 fix_min_sleep_wakeup(u);
896 fix_tsched_watermark(u);
897 }
898
899 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
900
901 avail_min = 1;
902
903 if (u->use_tsched) {
904 pa_usec_t sleep_usec, process_usec;
905
906 hw_sleep_time(u, &sleep_usec, &process_usec);
907 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
908 }
909
910 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
911
912 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
913 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
914 return err;
915 }
916
917 return 0;
918 }
919
920 /* Called from IO Context on unsuspend or from main thread when creating source */
921 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
922 pa_bool_t in_thread)
923 {
924 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
925 &u->source->sample_spec);
926
927 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
928 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
929
930 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
931 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
932
933 fix_min_sleep_wakeup(u);
934 fix_tsched_watermark(u);
935
936 if (in_thread)
937 pa_source_set_latency_range_within_thread(u->source,
938 u->min_latency_ref,
939 pa_bytes_to_usec(u->hwbuf_size, ss));
940 else {
941 pa_source_set_latency_range(u->source,
942 0,
943 pa_bytes_to_usec(u->hwbuf_size, ss));
944
945 /* work-around assert in pa_source_set_latency_within_thead,
946 keep track of min_latency and reuse it when
947 this routine is called from IO context */
948 u->min_latency_ref = u->source->thread_info.min_latency;
949 }
950
951 pa_log_info("Time scheduling watermark is %0.2fms",
952 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
953 }
954
955 /* Called from IO context */
956 static int unsuspend(struct userdata *u) {
957 pa_sample_spec ss;
958 int err;
959 pa_bool_t b, d;
960 snd_pcm_uframes_t period_size, buffer_size;
961 const char *mod_name;
962
963 pa_assert(u);
964 pa_assert(!u->pcm_handle);
965
966 pa_log_info("Trying resume...");
967
968 if ((mod_name = pa_proplist_gets(u->source->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
969 pa_log_info("Enable ucm modifier %s", mod_name);
970
971 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
972 pa_log("Failed to enable ucm modifier %s", mod_name);
973 }
974
975 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
976 SND_PCM_NONBLOCK|
977 SND_PCM_NO_AUTO_RESAMPLE|
978 SND_PCM_NO_AUTO_CHANNELS|
979 SND_PCM_NO_AUTO_FORMAT)) < 0) {
980 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
981 goto fail;
982 }
983
984 ss = u->source->sample_spec;
985 period_size = u->fragment_size / u->frame_size;
986 buffer_size = u->hwbuf_size / u->frame_size;
987 b = u->use_mmap;
988 d = u->use_tsched;
989
990 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
991 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
992 goto fail;
993 }
994
995 if (b != u->use_mmap || d != u->use_tsched) {
996 pa_log_warn("Resume failed, couldn't get original access mode.");
997 goto fail;
998 }
999
1000 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
1001 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1002 goto fail;
1003 }
1004
1005 if (period_size*u->frame_size != u->fragment_size ||
1006 buffer_size*u->frame_size != u->hwbuf_size) {
1007 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1008 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1009 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1010 goto fail;
1011 }
1012
1013 if (update_sw_params(u) < 0)
1014 goto fail;
1015
1016 if (build_pollfd(u) < 0)
1017 goto fail;
1018
1019 /* FIXME: We need to reload the volume somehow */
1020
1021 u->read_count = 0;
1022 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1023 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1024 u->last_smoother_update = 0;
1025
1026 u->first = TRUE;
1027
1028 /* reset the watermark to the value defined when source was created */
1029 if (u->use_tsched)
1030 reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1031
1032 pa_log_info("Resumed successfully...");
1033
1034 return 0;
1035
1036 fail:
1037 if (u->pcm_handle) {
1038 snd_pcm_close(u->pcm_handle);
1039 u->pcm_handle = NULL;
1040 }
1041
1042 return -PA_ERR_IO;
1043 }
1044
1045 /* Called from IO context */
1046 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1047 struct userdata *u = PA_SOURCE(o)->userdata;
1048
1049 switch (code) {
1050
1051 case PA_SOURCE_MESSAGE_GET_LATENCY: {
1052 pa_usec_t r = 0;
1053
1054 if (u->pcm_handle)
1055 r = source_get_latency(u);
1056
1057 *((pa_usec_t*) data) = r;
1058
1059 return 0;
1060 }
1061
1062 case PA_SOURCE_MESSAGE_SET_STATE:
1063
1064 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1065
1066 case PA_SOURCE_SUSPENDED: {
1067 int r;
1068
1069 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1070
1071 if ((r = suspend(u)) < 0)
1072 return r;
1073
1074 break;
1075 }
1076
1077 case PA_SOURCE_IDLE:
1078 case PA_SOURCE_RUNNING: {
1079 int r;
1080
1081 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1082 if (build_pollfd(u) < 0)
1083 return -PA_ERR_IO;
1084 }
1085
1086 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1087 if ((r = unsuspend(u)) < 0)
1088 return r;
1089 }
1090
1091 break;
1092 }
1093
1094 case PA_SOURCE_UNLINKED:
1095 case PA_SOURCE_INIT:
1096 case PA_SOURCE_INVALID_STATE:
1097 ;
1098 }
1099
1100 break;
1101 }
1102
1103 return pa_source_process_msg(o, code, data, offset, chunk);
1104 }
1105
1106 /* Called from main context */
1107 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1108 pa_source_state_t old_state;
1109 struct userdata *u;
1110
1111 pa_source_assert_ref(s);
1112 pa_assert_se(u = s->userdata);
1113
1114 old_state = pa_source_get_state(u->source);
1115
1116 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1117 reserve_done(u);
1118 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1119 if (reserve_init(u, u->device_name) < 0)
1120 return -PA_ERR_BUSY;
1121
1122 return 0;
1123 }
1124
1125 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1126 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1127
1128 pa_assert(u);
1129 pa_assert(u->mixer_handle);
1130
1131 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1132 return 0;
1133
1134 if (!PA_SOURCE_IS_LINKED(u->source->state))
1135 return 0;
1136
1137 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1138 pa_source_set_mixer_dirty(u->source, TRUE);
1139 return 0;
1140 }
1141
1142 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1143 pa_source_get_volume(u->source, TRUE);
1144 pa_source_get_mute(u->source, TRUE);
1145 }
1146
1147 return 0;
1148 }
1149
1150 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1151 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1152
1153 pa_assert(u);
1154 pa_assert(u->mixer_handle);
1155
1156 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1157 return 0;
1158
1159 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1160 pa_source_set_mixer_dirty(u->source, TRUE);
1161 return 0;
1162 }
1163
1164 if (mask & SND_CTL_EVENT_MASK_VALUE)
1165 pa_source_update_volume_and_mute(u->source);
1166
1167 return 0;
1168 }
1169
1170 static void source_get_volume_cb(pa_source *s) {
1171 struct userdata *u = s->userdata;
1172 pa_cvolume r;
1173 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1174
1175 pa_assert(u);
1176 pa_assert(u->mixer_path);
1177 pa_assert(u->mixer_handle);
1178
1179 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1180 return;
1181
1182 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1183 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1184
1185 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1186
1187 if (u->mixer_path->has_dB) {
1188 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1189
1190 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1191 }
1192
1193 if (pa_cvolume_equal(&u->hardware_volume, &r))
1194 return;
1195
1196 s->real_volume = u->hardware_volume = r;
1197
1198 /* Hmm, so the hardware volume changed, let's reset our software volume */
1199 if (u->mixer_path->has_dB)
1200 pa_source_set_soft_volume(s, NULL);
1201 }
1202
1203 static void source_set_volume_cb(pa_source *s) {
1204 struct userdata *u = s->userdata;
1205 pa_cvolume r;
1206 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1207 pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1208
1209 pa_assert(u);
1210 pa_assert(u->mixer_path);
1211 pa_assert(u->mixer_handle);
1212
1213 /* Shift up by the base volume */
1214 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1215
1216 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1217 return;
1218
1219 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1220 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1221
1222 u->hardware_volume = r;
1223
1224 if (u->mixer_path->has_dB) {
1225 pa_cvolume new_soft_volume;
1226 pa_bool_t accurate_enough;
1227 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1228
1229 /* Match exactly what the user requested by software */
1230 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1231
1232 /* If the adjustment to do in software is only minimal we
1233 * can skip it. That saves us CPU at the expense of a bit of
1234 * accuracy */
1235 accurate_enough =
1236 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1237 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1238
1239 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1240 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1241 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1242 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1243 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1244 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1245 pa_yes_no(accurate_enough));
1246 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1247
1248 if (!accurate_enough)
1249 s->soft_volume = new_soft_volume;
1250
1251 } else {
1252 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1253
1254 /* We can't match exactly what the user requested, hence let's
1255 * at least tell the user about it */
1256
1257 s->real_volume = r;
1258 }
1259 }
1260
1261 static void source_write_volume_cb(pa_source *s) {
1262 struct userdata *u = s->userdata;
1263 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1264
1265 pa_assert(u);
1266 pa_assert(u->mixer_path);
1267 pa_assert(u->mixer_handle);
1268 pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1269
1270 /* Shift up by the base volume */
1271 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1272
1273 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1274 pa_log_error("Writing HW volume failed");
1275 else {
1276 pa_cvolume tmp_vol;
1277 pa_bool_t accurate_enough;
1278
1279 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1280 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1281
1282 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1283 accurate_enough =
1284 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1285 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1286
1287 if (!accurate_enough) {
1288 union {
1289 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1290 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1291 } vol;
1292
1293 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1294 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1295 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1296 pa_log_debug(" in dB: %s (request) != %s",
1297 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1298 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1299 }
1300 }
1301 }
1302
1303 static void source_get_mute_cb(pa_source *s) {
1304 struct userdata *u = s->userdata;
1305 pa_bool_t b;
1306
1307 pa_assert(u);
1308 pa_assert(u->mixer_path);
1309 pa_assert(u->mixer_handle);
1310
1311 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1312 return;
1313
1314 s->muted = b;
1315 }
1316
1317 static void source_set_mute_cb(pa_source *s) {
1318 struct userdata *u = s->userdata;
1319
1320 pa_assert(u);
1321 pa_assert(u->mixer_path);
1322 pa_assert(u->mixer_handle);
1323
1324 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1325 }
1326
1327 static void mixer_volume_init(struct userdata *u) {
1328 pa_assert(u);
1329
1330 if (!u->mixer_path->has_volume) {
1331 pa_source_set_write_volume_callback(u->source, NULL);
1332 pa_source_set_get_volume_callback(u->source, NULL);
1333 pa_source_set_set_volume_callback(u->source, NULL);
1334
1335 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1336 } else {
1337 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1338 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1339
1340 if (u->mixer_path->has_dB && u->deferred_volume) {
1341 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1342 pa_log_info("Successfully enabled deferred volume.");
1343 } else
1344 pa_source_set_write_volume_callback(u->source, NULL);
1345
1346 if (u->mixer_path->has_dB) {
1347 pa_source_enable_decibel_volume(u->source, TRUE);
1348 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1349
1350 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1351 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1352
1353 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1354 } else {
1355 pa_source_enable_decibel_volume(u->source, FALSE);
1356 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1357
1358 u->source->base_volume = PA_VOLUME_NORM;
1359 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1360 }
1361
1362 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1363 }
1364
1365 if (!u->mixer_path->has_mute) {
1366 pa_source_set_get_mute_callback(u->source, NULL);
1367 pa_source_set_set_mute_callback(u->source, NULL);
1368 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1369 } else {
1370 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1371 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1372 pa_log_info("Using hardware mute control.");
1373 }
1374 }
1375
1376 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1377 struct userdata *u = s->userdata;
1378
1379 pa_assert(u);
1380 pa_assert(p);
1381 pa_assert(u->ucm_context);
1382
1383 return pa_alsa_ucm_set_port(u->ucm_context, p, FALSE);
1384 }
1385
1386 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1387 struct userdata *u = s->userdata;
1388 pa_alsa_port_data *data;
1389
1390 pa_assert(u);
1391 pa_assert(p);
1392 pa_assert(u->mixer_handle);
1393
1394 data = PA_DEVICE_PORT_DATA(p);
1395
1396 pa_assert_se(u->mixer_path = data->path);
1397 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1398
1399 mixer_volume_init(u);
1400
1401 if (s->set_mute)
1402 s->set_mute(s);
1403 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1404 if (s->write_volume)
1405 s->write_volume(s);
1406 } else {
1407 if (s->set_volume)
1408 s->set_volume(s);
1409 }
1410
1411 return 0;
1412 }
1413
1414 static void source_update_requested_latency_cb(pa_source *s) {
1415 struct userdata *u = s->userdata;
1416 pa_assert(u);
1417 pa_assert(u->use_tsched); /* only when timer scheduling is used
1418 * we can dynamically adjust the
1419 * latency */
1420
1421 if (!u->pcm_handle)
1422 return;
1423
1424 update_sw_params(u);
1425 }
1426
1427 static pa_bool_t source_update_rate_cb(pa_source *s, uint32_t rate)
1428 {
1429 struct userdata *u = s->userdata;
1430 int i;
1431 pa_bool_t supported = FALSE;
1432
1433 pa_assert(u);
1434
1435 for (i = 0; u->rates[i]; i++) {
1436 if (u->rates[i] == rate) {
1437 supported = TRUE;
1438 break;
1439 }
1440 }
1441
1442 if (!supported) {
1443 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1444 return FALSE;
1445 }
1446
1447 if (!PA_SOURCE_IS_OPENED(s->state)) {
1448 pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1449 u->source->sample_spec.rate = rate;
1450 return TRUE;
1451 }
1452
1453 return FALSE;
1454 }
1455
1456 static void thread_func(void *userdata) {
1457 struct userdata *u = userdata;
1458 unsigned short revents = 0;
1459
1460 pa_assert(u);
1461
1462 pa_log_debug("Thread starting up");
1463
1464 if (u->core->realtime_scheduling)
1465 pa_make_realtime(u->core->realtime_priority);
1466
1467 pa_thread_mq_install(&u->thread_mq);
1468
1469 for (;;) {
1470 int ret;
1471 pa_usec_t rtpoll_sleep = 0;
1472
1473 #ifdef DEBUG_TIMING
1474 pa_log_debug("Loop");
1475 #endif
1476
1477 /* Read some data and pass it to the sources */
1478 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1479 int work_done;
1480 pa_usec_t sleep_usec = 0;
1481 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1482
1483 if (u->first) {
1484 pa_log_info("Starting capture.");
1485 snd_pcm_start(u->pcm_handle);
1486
1487 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1488
1489 u->first = FALSE;
1490 }
1491
1492 if (u->use_mmap)
1493 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1494 else
1495 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1496
1497 if (work_done < 0)
1498 goto fail;
1499
1500 /* pa_log_debug("work_done = %i", work_done); */
1501
1502 if (work_done)
1503 update_smoother(u);
1504
1505 if (u->use_tsched) {
1506 pa_usec_t cusec;
1507
1508 /* OK, the capture buffer is now empty, let's
1509 * calculate when to wake up next */
1510
1511 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1512
1513 /* Convert from the sound card time domain to the
1514 * system time domain */
1515 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1516
1517 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1518
1519 /* We don't trust the conversion, so we wake up whatever comes first */
1520 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1521 }
1522 }
1523
1524 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1525 pa_usec_t volume_sleep;
1526 pa_source_volume_change_apply(u->source, &volume_sleep);
1527 if (volume_sleep > 0) {
1528 if (rtpoll_sleep > 0)
1529 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1530 else
1531 rtpoll_sleep = volume_sleep;
1532 }
1533 }
1534
1535 if (rtpoll_sleep > 0)
1536 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1537 else
1538 pa_rtpoll_set_timer_disabled(u->rtpoll);
1539
1540 /* Hmm, nothing to do. Let's sleep */
1541 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1542 goto fail;
1543
1544 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1545 pa_source_volume_change_apply(u->source, NULL);
1546
1547 if (ret == 0)
1548 goto finish;
1549
1550 /* Tell ALSA about this and process its response */
1551 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1552 struct pollfd *pollfd;
1553 int err;
1554 unsigned n;
1555
1556 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1557
1558 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1559 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1560 goto fail;
1561 }
1562
1563 if (revents & ~POLLIN) {
1564 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1565 goto fail;
1566
1567 u->first = TRUE;
1568 revents = 0;
1569 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1570 pa_log_debug("Wakeup from ALSA!");
1571
1572 } else
1573 revents = 0;
1574 }
1575
1576 fail:
1577 /* If this was no regular exit from the loop we have to continue
1578 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1579 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1580 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1581
1582 finish:
1583 pa_log_debug("Thread shutting down");
1584 }
1585
1586 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1587 const char *n;
1588 char *t;
1589
1590 pa_assert(data);
1591 pa_assert(ma);
1592 pa_assert(device_name);
1593
1594 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1595 pa_source_new_data_set_name(data, n);
1596 data->namereg_fail = TRUE;
1597 return;
1598 }
1599
1600 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1601 data->namereg_fail = TRUE;
1602 else {
1603 n = device_id ? device_id : device_name;
1604 data->namereg_fail = FALSE;
1605 }
1606
1607 if (mapping)
1608 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1609 else
1610 t = pa_sprintf_malloc("alsa_input.%s", n);
1611
1612 pa_source_new_data_set_name(data, t);
1613 pa_xfree(t);
1614 }
1615
1616 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1617 snd_hctl_t *hctl;
1618
1619 if (!mapping && !element)
1620 return;
1621
1622 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1623 pa_log_info("Failed to find a working mixer device.");
1624 return;
1625 }
1626
1627 if (element) {
1628
1629 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1630 goto fail;
1631
1632 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1633 goto fail;
1634
1635 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1636 pa_alsa_path_dump(u->mixer_path);
1637 } else if (!(u->mixer_path_set = mapping->input_path_set))
1638 goto fail;
1639
1640 return;
1641
1642 fail:
1643
1644 if (u->mixer_path) {
1645 pa_alsa_path_free(u->mixer_path);
1646 u->mixer_path = NULL;
1647 }
1648
1649 if (u->mixer_handle) {
1650 snd_mixer_close(u->mixer_handle);
1651 u->mixer_handle = NULL;
1652 }
1653 }
1654
1655 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1656 pa_bool_t need_mixer_callback = FALSE;
1657
1658 pa_assert(u);
1659
1660 if (!u->mixer_handle)
1661 return 0;
1662
1663 if (u->source->active_port) {
1664 pa_alsa_port_data *data;
1665
1666 /* We have a list of supported paths, so let's activate the
1667 * one that has been chosen as active */
1668
1669 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1670 u->mixer_path = data->path;
1671
1672 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1673
1674 } else {
1675
1676 if (!u->mixer_path && u->mixer_path_set)
1677 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1678
1679 if (u->mixer_path) {
1680 /* Hmm, we have only a single path, then let's activate it */
1681
1682 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1683 } else
1684 return 0;
1685 }
1686
1687 mixer_volume_init(u);
1688
1689 /* Will we need to register callbacks? */
1690 if (u->mixer_path_set && u->mixer_path_set->paths) {
1691 pa_alsa_path *p;
1692 void *state;
1693
1694 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1695 if (p->has_volume || p->has_mute)
1696 need_mixer_callback = TRUE;
1697 }
1698 }
1699 else if (u->mixer_path)
1700 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1701
1702 if (need_mixer_callback) {
1703 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1704 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1705 u->mixer_pd = pa_alsa_mixer_pdata_new();
1706 mixer_callback = io_mixer_callback;
1707
1708 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1709 pa_log("Failed to initialize file descriptor monitoring");
1710 return -1;
1711 }
1712 } else {
1713 u->mixer_fdl = pa_alsa_fdlist_new();
1714 mixer_callback = ctl_mixer_callback;
1715
1716 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1717 pa_log("Failed to initialize file descriptor monitoring");
1718 return -1;
1719 }
1720 }
1721
1722 if (u->mixer_path_set)
1723 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1724 else
1725 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1726 }
1727
1728 return 0;
1729 }
1730
1731 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1732
1733 struct userdata *u = NULL;
1734 const char *dev_id = NULL, *key, *mod_name;
1735 pa_sample_spec ss;
1736 uint32_t alternate_sample_rate;
1737 pa_channel_map map;
1738 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1739 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1740 size_t frame_size;
1741 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, fixed_latency_range = FALSE;
1742 pa_source_new_data data;
1743 pa_alsa_profile_set *profile_set = NULL;
1744 void *state = NULL;
1745
1746 pa_assert(m);
1747 pa_assert(ma);
1748
1749 ss = m->core->default_sample_spec;
1750 map = m->core->default_channel_map;
1751 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1752 pa_log("Failed to parse sample specification and channel map");
1753 goto fail;
1754 }
1755
1756 alternate_sample_rate = m->core->alternate_sample_rate;
1757 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1758 pa_log("Failed to parse alternate sample rate");
1759 goto fail;
1760 }
1761
1762 frame_size = pa_frame_size(&ss);
1763
1764 nfrags = m->core->default_n_fragments;
1765 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1766 if (frag_size <= 0)
1767 frag_size = (uint32_t) frame_size;
1768 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1769 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1770
1771 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1772 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1773 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1774 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1775 pa_log("Failed to parse buffer metrics");
1776 goto fail;
1777 }
1778
1779 buffer_size = nfrags * frag_size;
1780
1781 period_frames = frag_size/frame_size;
1782 buffer_frames = buffer_size/frame_size;
1783 tsched_frames = tsched_size/frame_size;
1784
1785 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1786 pa_log("Failed to parse mmap argument.");
1787 goto fail;
1788 }
1789
1790 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1791 pa_log("Failed to parse tsched argument.");
1792 goto fail;
1793 }
1794
1795 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1796 pa_log("Failed to parse ignore_dB argument.");
1797 goto fail;
1798 }
1799
1800 deferred_volume = m->core->deferred_volume;
1801 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1802 pa_log("Failed to parse deferred_volume argument.");
1803 goto fail;
1804 }
1805
1806 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1807 pa_log("Failed to parse fixed_latency_range argument.");
1808 goto fail;
1809 }
1810
1811 use_tsched = pa_alsa_may_tsched(use_tsched);
1812
1813 u = pa_xnew0(struct userdata, 1);
1814 u->core = m->core;
1815 u->module = m;
1816 u->use_mmap = use_mmap;
1817 u->use_tsched = use_tsched;
1818 u->deferred_volume = deferred_volume;
1819 u->fixed_latency_range = fixed_latency_range;
1820 u->first = TRUE;
1821 u->rtpoll = pa_rtpoll_new();
1822 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1823
1824 u->smoother = pa_smoother_new(
1825 SMOOTHER_ADJUST_USEC,
1826 SMOOTHER_WINDOW_USEC,
1827 TRUE,
1828 TRUE,
1829 5,
1830 pa_rtclock_now(),
1831 TRUE);
1832 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1833
1834 /* use ucm */
1835 if (mapping && mapping->ucm_context.ucm)
1836 u->ucm_context = &mapping->ucm_context;
1837
1838 dev_id = pa_modargs_get_value(
1839 ma, "device_id",
1840 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1841
1842 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1843
1844 if (reserve_init(u, dev_id) < 0)
1845 goto fail;
1846
1847 if (reserve_monitor_init(u, dev_id) < 0)
1848 goto fail;
1849
1850 b = use_mmap;
1851 d = use_tsched;
1852
1853 if (mapping) {
1854
1855 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1856 pa_log("device_id= not set");
1857 goto fail;
1858 }
1859
1860 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
1861 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
1862 pa_log("Failed to enable ucm modifier %s", mod_name);
1863 else
1864 pa_log_debug("Enabled ucm modifier %s", mod_name);
1865 }
1866
1867 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1868 dev_id,
1869 &u->device_name,
1870 &ss, &map,
1871 SND_PCM_STREAM_CAPTURE,
1872 &period_frames, &buffer_frames, tsched_frames,
1873 &b, &d, mapping)))
1874 goto fail;
1875
1876 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1877
1878 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1879 goto fail;
1880
1881 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1882 dev_id,
1883 &u->device_name,
1884 &ss, &map,
1885 SND_PCM_STREAM_CAPTURE,
1886 &period_frames, &buffer_frames, tsched_frames,
1887 &b, &d, profile_set, &mapping)))
1888 goto fail;
1889
1890 } else {
1891
1892 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1893 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1894 &u->device_name,
1895 &ss, &map,
1896 SND_PCM_STREAM_CAPTURE,
1897 &period_frames, &buffer_frames, tsched_frames,
1898 &b, &d, FALSE)))
1899 goto fail;
1900 }
1901
1902 pa_assert(u->device_name);
1903 pa_log_info("Successfully opened device %s.", u->device_name);
1904
1905 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1906 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1907 goto fail;
1908 }
1909
1910 if (mapping)
1911 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1912
1913 if (use_mmap && !b) {
1914 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1915 u->use_mmap = use_mmap = FALSE;
1916 }
1917
1918 if (use_tsched && (!b || !d)) {
1919 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1920 u->use_tsched = use_tsched = FALSE;
1921 }
1922
1923 if (u->use_mmap)
1924 pa_log_info("Successfully enabled mmap() mode.");
1925
1926 if (u->use_tsched) {
1927 pa_log_info("Successfully enabled timer-based scheduling mode.");
1928 if (u->fixed_latency_range)
1929 pa_log_info("Disabling latency range changes on overrun");
1930 }
1931
1932 u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
1933 if (!u->rates) {
1934 pa_log_error("Failed to find any supported sample rates.");
1935 goto fail;
1936 }
1937
1938 /* ALSA might tweak the sample spec, so recalculate the frame size */
1939 frame_size = pa_frame_size(&ss);
1940
1941 if (!u->ucm_context)
1942 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1943
1944 pa_source_new_data_init(&data);
1945 data.driver = driver;
1946 data.module = m;
1947 data.card = card;
1948 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1949
1950 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1951 * variable instead of using &data.namereg_fail directly, because
1952 * data.namereg_fail is a bitfield and taking the address of a bitfield
1953 * variable is impossible. */
1954 namereg_fail = data.namereg_fail;
1955 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1956 pa_log("Failed to parse namereg_fail argument.");
1957 pa_source_new_data_done(&data);
1958 goto fail;
1959 }
1960 data.namereg_fail = namereg_fail;
1961
1962 pa_source_new_data_set_sample_spec(&data, &ss);
1963 pa_source_new_data_set_channel_map(&data, &map);
1964 pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1965
1966 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1967 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1968 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1969 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1970 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1971
1972 if (mapping) {
1973 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1974 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1975
1976 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
1977 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
1978 }
1979
1980 pa_alsa_init_description(data.proplist);
1981
1982 if (u->control_device)
1983 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1984
1985 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1986 pa_log("Invalid properties");
1987 pa_source_new_data_done(&data);
1988 goto fail;
1989 }
1990
1991 if (u->ucm_context)
1992 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, FALSE, card);
1993 else if (u->mixer_path_set)
1994 pa_alsa_add_ports(&data, u->mixer_path_set, card);
1995
1996 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1997 pa_source_new_data_done(&data);
1998
1999 if (!u->source) {
2000 pa_log("Failed to create source object");
2001 goto fail;
2002 }
2003
2004 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2005 &u->source->thread_info.volume_change_safety_margin) < 0) {
2006 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2007 goto fail;
2008 }
2009
2010 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2011 &u->source->thread_info.volume_change_extra_delay) < 0) {
2012 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2013 goto fail;
2014 }
2015
2016 u->source->parent.process_msg = source_process_msg;
2017 if (u->use_tsched)
2018 u->source->update_requested_latency = source_update_requested_latency_cb;
2019 u->source->set_state = source_set_state_cb;
2020 if (u->ucm_context)
2021 u->source->set_port = source_set_port_ucm_cb;
2022 else
2023 u->source->set_port = source_set_port_cb;
2024 if (u->source->alternate_sample_rate)
2025 u->source->update_rate = source_update_rate_cb;
2026 u->source->userdata = u;
2027
2028 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2029 pa_source_set_rtpoll(u->source, u->rtpoll);
2030
2031 u->frame_size = frame_size;
2032 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2033 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2034 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2035
2036 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2037 (double) u->hwbuf_size / (double) u->fragment_size,
2038 (long unsigned) u->fragment_size,
2039 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2040 (long unsigned) u->hwbuf_size,
2041 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2042
2043 if (u->use_tsched) {
2044 u->tsched_watermark_ref = tsched_watermark;
2045 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2046 }
2047 else
2048 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2049
2050 reserve_update(u);
2051
2052 if (update_sw_params(u) < 0)
2053 goto fail;
2054
2055 if (u->ucm_context) {
2056 if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, FALSE) < 0)
2057 goto fail;
2058 } else if (setup_mixer(u, ignore_dB) < 0)
2059 goto fail;
2060
2061 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2062
2063 if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
2064 pa_log("Failed to create thread.");
2065 goto fail;
2066 }
2067
2068 /* Get initial mixer settings */
2069 if (data.volume_is_set) {
2070 if (u->source->set_volume)
2071 u->source->set_volume(u->source);
2072 } else {
2073 if (u->source->get_volume)
2074 u->source->get_volume(u->source);
2075 }
2076
2077 if (data.muted_is_set) {
2078 if (u->source->set_mute)
2079 u->source->set_mute(u->source);
2080 } else {
2081 if (u->source->get_mute)
2082 u->source->get_mute(u->source);
2083 }
2084
2085 if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2086 u->source->write_volume(u->source);
2087
2088 pa_source_put(u->source);
2089
2090 if (profile_set)
2091 pa_alsa_profile_set_free(profile_set);
2092
2093 return u->source;
2094
2095 fail:
2096
2097 if (u)
2098 userdata_free(u);
2099
2100 if (profile_set)
2101 pa_alsa_profile_set_free(profile_set);
2102
2103 return NULL;
2104 }
2105
2106 static void userdata_free(struct userdata *u) {
2107 pa_assert(u);
2108
2109 if (u->source)
2110 pa_source_unlink(u->source);
2111
2112 if (u->thread) {
2113 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2114 pa_thread_free(u->thread);
2115 }
2116
2117 pa_thread_mq_done(&u->thread_mq);
2118
2119 if (u->source)
2120 pa_source_unref(u->source);
2121
2122 if (u->mixer_pd)
2123 pa_alsa_mixer_pdata_free(u->mixer_pd);
2124
2125 if (u->alsa_rtpoll_item)
2126 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2127
2128 if (u->rtpoll)
2129 pa_rtpoll_free(u->rtpoll);
2130
2131 if (u->pcm_handle) {
2132 snd_pcm_drop(u->pcm_handle);
2133 snd_pcm_close(u->pcm_handle);
2134 }
2135
2136 if (u->mixer_fdl)
2137 pa_alsa_fdlist_free(u->mixer_fdl);
2138
2139 if (u->mixer_path && !u->mixer_path_set)
2140 pa_alsa_path_free(u->mixer_path);
2141
2142 if (u->mixer_handle)
2143 snd_mixer_close(u->mixer_handle);
2144
2145 if (u->smoother)
2146 pa_smoother_free(u->smoother);
2147
2148 if (u->rates)
2149 pa_xfree(u->rates);
2150
2151 reserve_done(u);
2152 monitor_done(u);
2153
2154 pa_xfree(u->device_name);
2155 pa_xfree(u->control_device);
2156 pa_xfree(u->paths_dir);
2157 pa_xfree(u);
2158 }
2159
2160 void pa_alsa_source_free(pa_source *s) {
2161 struct userdata *u;
2162
2163 pa_source_assert_ref(s);
2164 pa_assert_se(u = s->userdata);
2165
2166 userdata_free(u);
2167 }