]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
alsa: Use card description in default sink/source prefix when available
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/thread-mq.h>
50 #include <pulsecore/rtpoll.h>
51 #include <pulsecore/time-smoother.h>
52
53 #include <modules/reserve-wrap.h>
54
55 #include "alsa-util.h"
56 #include "alsa-source.h"
57
58 /* #define DEBUG_TIMING */
59
60 #define DEFAULT_DEVICE "default"
61
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64
65 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
66 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
67 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
68 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
69 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
70 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
71
72 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
73 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
74
75 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
76 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
77
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
80
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
82
83 struct userdata {
84 pa_core *core;
85 pa_module *module;
86 pa_source *source;
87
88 pa_thread *thread;
89 pa_thread_mq thread_mq;
90 pa_rtpoll *rtpoll;
91
92 snd_pcm_t *pcm_handle;
93
94 char *paths_dir;
95 pa_alsa_fdlist *mixer_fdl;
96 pa_alsa_mixer_pdata *mixer_pd;
97 snd_mixer_t *mixer_handle;
98 pa_alsa_path_set *mixer_path_set;
99 pa_alsa_path *mixer_path;
100
101 pa_cvolume hardware_volume;
102
103 unsigned int *rates;
104
105 size_t
106 frame_size,
107 fragment_size,
108 hwbuf_size,
109 tsched_watermark,
110 tsched_watermark_ref,
111 hwbuf_unused,
112 min_sleep,
113 min_wakeup,
114 watermark_inc_step,
115 watermark_dec_step,
116 watermark_inc_threshold,
117 watermark_dec_threshold;
118
119 pa_usec_t watermark_dec_not_before;
120 pa_usec_t min_latency_ref;
121 pa_usec_t tsched_watermark_usec;
122
123 char *device_name; /* name of the PCM device */
124 char *control_device; /* name of the control device */
125
126 bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
127
128 bool first;
129
130 pa_rtpoll_item *alsa_rtpoll_item;
131
132 pa_smoother *smoother;
133 uint64_t read_count;
134 pa_usec_t smoother_interval;
135 pa_usec_t last_smoother_update;
136
137 pa_reserve_wrapper *reserve;
138 pa_hook_slot *reserve_slot;
139 pa_reserve_monitor_wrapper *monitor;
140 pa_hook_slot *monitor_slot;
141
142 /* ucm context */
143 pa_alsa_ucm_mapping_context *ucm_context;
144 };
145
146 static void userdata_free(struct userdata *u);
147
148 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
149 pa_assert(r);
150 pa_assert(u);
151
152 pa_log_debug("Suspending source %s, because another application requested us to release the device.", u->source->name);
153
154 if (pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION) < 0)
155 return PA_HOOK_CANCEL;
156
157 return PA_HOOK_OK;
158 }
159
160 static void reserve_done(struct userdata *u) {
161 pa_assert(u);
162
163 if (u->reserve_slot) {
164 pa_hook_slot_free(u->reserve_slot);
165 u->reserve_slot = NULL;
166 }
167
168 if (u->reserve) {
169 pa_reserve_wrapper_unref(u->reserve);
170 u->reserve = NULL;
171 }
172 }
173
174 static void reserve_update(struct userdata *u) {
175 const char *description;
176 pa_assert(u);
177
178 if (!u->source || !u->reserve)
179 return;
180
181 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
182 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
183 }
184
185 static int reserve_init(struct userdata *u, const char *dname) {
186 char *rname;
187
188 pa_assert(u);
189 pa_assert(dname);
190
191 if (u->reserve)
192 return 0;
193
194 if (pa_in_system_mode())
195 return 0;
196
197 if (!(rname = pa_alsa_get_reserve_name(dname)))
198 return 0;
199
200 /* We are resuming, try to lock the device */
201 u->reserve = pa_reserve_wrapper_get(u->core, rname);
202 pa_xfree(rname);
203
204 if (!(u->reserve))
205 return -1;
206
207 reserve_update(u);
208
209 pa_assert(!u->reserve_slot);
210 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
211
212 return 0;
213 }
214
215 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
216 pa_assert(w);
217 pa_assert(u);
218
219 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
220 pa_log_debug("Suspending source %s, because another application is blocking the access to the device.", u->source->name);
221 pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION);
222 } else {
223 pa_log_debug("Resuming source %s, because other applications aren't blocking access to the device any more.", u->source->name);
224 pa_source_suspend(u->source, false, PA_SUSPEND_APPLICATION);
225 }
226
227 return PA_HOOK_OK;
228 }
229
230 static void monitor_done(struct userdata *u) {
231 pa_assert(u);
232
233 if (u->monitor_slot) {
234 pa_hook_slot_free(u->monitor_slot);
235 u->monitor_slot = NULL;
236 }
237
238 if (u->monitor) {
239 pa_reserve_monitor_wrapper_unref(u->monitor);
240 u->monitor = NULL;
241 }
242 }
243
244 static int reserve_monitor_init(struct userdata *u, const char *dname) {
245 char *rname;
246
247 pa_assert(u);
248 pa_assert(dname);
249
250 if (pa_in_system_mode())
251 return 0;
252
253 if (!(rname = pa_alsa_get_reserve_name(dname)))
254 return 0;
255
256 /* We are resuming, try to lock the device */
257 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
258 pa_xfree(rname);
259
260 if (!(u->monitor))
261 return -1;
262
263 pa_assert(!u->monitor_slot);
264 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
265
266 return 0;
267 }
268
269 static void fix_min_sleep_wakeup(struct userdata *u) {
270 size_t max_use, max_use_2;
271
272 pa_assert(u);
273 pa_assert(u->use_tsched);
274
275 max_use = u->hwbuf_size - u->hwbuf_unused;
276 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
277
278 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
279 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
280
281 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
282 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
283 }
284
285 static void fix_tsched_watermark(struct userdata *u) {
286 size_t max_use;
287 pa_assert(u);
288 pa_assert(u->use_tsched);
289
290 max_use = u->hwbuf_size - u->hwbuf_unused;
291
292 if (u->tsched_watermark > max_use - u->min_sleep)
293 u->tsched_watermark = max_use - u->min_sleep;
294
295 if (u->tsched_watermark < u->min_wakeup)
296 u->tsched_watermark = u->min_wakeup;
297
298 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
299 }
300
301 static void increase_watermark(struct userdata *u) {
302 size_t old_watermark;
303 pa_usec_t old_min_latency, new_min_latency;
304
305 pa_assert(u);
306 pa_assert(u->use_tsched);
307
308 /* First, just try to increase the watermark */
309 old_watermark = u->tsched_watermark;
310 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
311 fix_tsched_watermark(u);
312
313 if (old_watermark != u->tsched_watermark) {
314 pa_log_info("Increasing wakeup watermark to %0.2f ms",
315 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
316 return;
317 }
318
319 /* Hmm, we cannot increase the watermark any further, hence let's
320 raise the latency unless doing so was disabled in
321 configuration */
322 if (u->fixed_latency_range)
323 return;
324
325 old_min_latency = u->source->thread_info.min_latency;
326 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
327 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
328
329 if (old_min_latency != new_min_latency) {
330 pa_log_info("Increasing minimal latency to %0.2f ms",
331 (double) new_min_latency / PA_USEC_PER_MSEC);
332
333 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
334 }
335
336 /* When we reach this we're officialy fucked! */
337 }
338
339 static void decrease_watermark(struct userdata *u) {
340 size_t old_watermark;
341 pa_usec_t now;
342
343 pa_assert(u);
344 pa_assert(u->use_tsched);
345
346 now = pa_rtclock_now();
347
348 if (u->watermark_dec_not_before <= 0)
349 goto restart;
350
351 if (u->watermark_dec_not_before > now)
352 return;
353
354 old_watermark = u->tsched_watermark;
355
356 if (u->tsched_watermark < u->watermark_dec_step)
357 u->tsched_watermark = u->tsched_watermark / 2;
358 else
359 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
360
361 fix_tsched_watermark(u);
362
363 if (old_watermark != u->tsched_watermark)
364 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
365 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
366
367 /* We don't change the latency range*/
368
369 restart:
370 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
371 }
372
373 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
374 pa_usec_t wm, usec;
375
376 pa_assert(sleep_usec);
377 pa_assert(process_usec);
378
379 pa_assert(u);
380 pa_assert(u->use_tsched);
381
382 usec = pa_source_get_requested_latency_within_thread(u->source);
383
384 if (usec == (pa_usec_t) -1)
385 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
386
387 wm = u->tsched_watermark_usec;
388
389 if (wm > usec)
390 wm = usec/2;
391
392 *sleep_usec = usec - wm;
393 *process_usec = wm;
394
395 #ifdef DEBUG_TIMING
396 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
397 (unsigned long) (usec / PA_USEC_PER_MSEC),
398 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
399 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
400 #endif
401 }
402
403 static int try_recover(struct userdata *u, const char *call, int err) {
404 pa_assert(u);
405 pa_assert(call);
406 pa_assert(err < 0);
407
408 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
409
410 pa_assert(err != -EAGAIN);
411
412 if (err == -EPIPE)
413 pa_log_debug("%s: Buffer overrun!", call);
414
415 if (err == -ESTRPIPE)
416 pa_log_debug("%s: System suspended!", call);
417
418 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
419 pa_log("%s: %s", call, pa_alsa_strerror(err));
420 return -1;
421 }
422
423 u->first = true;
424 return 0;
425 }
426
427 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, bool on_timeout) {
428 size_t left_to_record;
429 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
430 bool overrun = false;
431
432 /* We use <= instead of < for this check here because an overrun
433 * only happens after the last sample was processed, not already when
434 * it is removed from the buffer. This is particularly important
435 * when block transfer is used. */
436
437 if (n_bytes <= rec_space)
438 left_to_record = rec_space - n_bytes;
439 else {
440
441 /* We got a dropout. What a mess! */
442 left_to_record = 0;
443 overrun = true;
444
445 #ifdef DEBUG_TIMING
446 PA_DEBUG_TRAP;
447 #endif
448
449 if (pa_log_ratelimit(PA_LOG_INFO))
450 pa_log_info("Overrun!");
451 }
452
453 #ifdef DEBUG_TIMING
454 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
455 #endif
456
457 if (u->use_tsched) {
458 bool reset_not_before = true;
459
460 if (overrun || left_to_record < u->watermark_inc_threshold)
461 increase_watermark(u);
462 else if (left_to_record > u->watermark_dec_threshold) {
463 reset_not_before = false;
464
465 /* We decrease the watermark only if have actually
466 * been woken up by a timeout. If something else woke
467 * us up it's too easy to fulfill the deadlines... */
468
469 if (on_timeout)
470 decrease_watermark(u);
471 }
472
473 if (reset_not_before)
474 u->watermark_dec_not_before = 0;
475 }
476
477 return left_to_record;
478 }
479
480 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
481 bool work_done = false;
482 pa_usec_t max_sleep_usec = 0, process_usec = 0;
483 size_t left_to_record;
484 unsigned j = 0;
485
486 pa_assert(u);
487 pa_source_assert_ref(u->source);
488
489 if (u->use_tsched)
490 hw_sleep_time(u, &max_sleep_usec, &process_usec);
491
492 for (;;) {
493 snd_pcm_sframes_t n;
494 size_t n_bytes;
495 int r;
496 bool after_avail = true;
497
498 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
499
500 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
501 continue;
502
503 return r;
504 }
505
506 n_bytes = (size_t) n * u->frame_size;
507
508 #ifdef DEBUG_TIMING
509 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
510 #endif
511
512 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
513 on_timeout = false;
514
515 if (u->use_tsched)
516 if (!polled &&
517 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
518 #ifdef DEBUG_TIMING
519 pa_log_debug("Not reading, because too early.");
520 #endif
521 break;
522 }
523
524 if (PA_UNLIKELY(n_bytes <= 0)) {
525
526 if (polled)
527 PA_ONCE_BEGIN {
528 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
529 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
530 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
531 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
532 pa_strnull(dn));
533 pa_xfree(dn);
534 } PA_ONCE_END;
535
536 #ifdef DEBUG_TIMING
537 pa_log_debug("Not reading, because not necessary.");
538 #endif
539 break;
540 }
541
542 if (++j > 10) {
543 #ifdef DEBUG_TIMING
544 pa_log_debug("Not filling up, because already too many iterations.");
545 #endif
546
547 break;
548 }
549
550 polled = false;
551
552 #ifdef DEBUG_TIMING
553 pa_log_debug("Reading");
554 #endif
555
556 for (;;) {
557 pa_memchunk chunk;
558 void *p;
559 int err;
560 const snd_pcm_channel_area_t *areas;
561 snd_pcm_uframes_t offset, frames;
562 snd_pcm_sframes_t sframes;
563
564 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
565 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
566
567 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
568
569 if (!after_avail && err == -EAGAIN)
570 break;
571
572 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
573 continue;
574
575 return r;
576 }
577
578 /* Make sure that if these memblocks need to be copied they will fit into one slot */
579 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
580 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
581
582 if (!after_avail && frames == 0)
583 break;
584
585 pa_assert(frames > 0);
586 after_avail = false;
587
588 /* Check these are multiples of 8 bit */
589 pa_assert((areas[0].first & 7) == 0);
590 pa_assert((areas[0].step & 7)== 0);
591
592 /* We assume a single interleaved memory buffer */
593 pa_assert((areas[0].first >> 3) == 0);
594 pa_assert((areas[0].step >> 3) == u->frame_size);
595
596 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
597
598 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, true);
599 chunk.length = pa_memblock_get_length(chunk.memblock);
600 chunk.index = 0;
601
602 pa_source_post(u->source, &chunk);
603 pa_memblock_unref_fixed(chunk.memblock);
604
605 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
606
607 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
608 continue;
609
610 return r;
611 }
612
613 work_done = true;
614
615 u->read_count += frames * u->frame_size;
616
617 #ifdef DEBUG_TIMING
618 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
619 #endif
620
621 if ((size_t) frames * u->frame_size >= n_bytes)
622 break;
623
624 n_bytes -= (size_t) frames * u->frame_size;
625 }
626 }
627
628 if (u->use_tsched) {
629 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
630 process_usec = u->tsched_watermark_usec;
631
632 if (*sleep_usec > process_usec)
633 *sleep_usec -= process_usec;
634 else
635 *sleep_usec = 0;
636 }
637
638 return work_done ? 1 : 0;
639 }
640
641 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
642 int work_done = false;
643 pa_usec_t max_sleep_usec = 0, process_usec = 0;
644 size_t left_to_record;
645 unsigned j = 0;
646
647 pa_assert(u);
648 pa_source_assert_ref(u->source);
649
650 if (u->use_tsched)
651 hw_sleep_time(u, &max_sleep_usec, &process_usec);
652
653 for (;;) {
654 snd_pcm_sframes_t n;
655 size_t n_bytes;
656 int r;
657 bool after_avail = true;
658
659 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
660
661 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
662 continue;
663
664 return r;
665 }
666
667 n_bytes = (size_t) n * u->frame_size;
668 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
669 on_timeout = false;
670
671 if (u->use_tsched)
672 if (!polled &&
673 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
674 break;
675
676 if (PA_UNLIKELY(n_bytes <= 0)) {
677
678 if (polled)
679 PA_ONCE_BEGIN {
680 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
681 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
682 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
683 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
684 pa_strnull(dn));
685 pa_xfree(dn);
686 } PA_ONCE_END;
687
688 break;
689 }
690
691 if (++j > 10) {
692 #ifdef DEBUG_TIMING
693 pa_log_debug("Not filling up, because already too many iterations.");
694 #endif
695
696 break;
697 }
698
699 polled = false;
700
701 for (;;) {
702 void *p;
703 snd_pcm_sframes_t frames;
704 pa_memchunk chunk;
705
706 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
707
708 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
709
710 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
711 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
712
713 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
714
715 p = pa_memblock_acquire(chunk.memblock);
716 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
717 pa_memblock_release(chunk.memblock);
718
719 if (PA_UNLIKELY(frames < 0)) {
720 pa_memblock_unref(chunk.memblock);
721
722 if (!after_avail && (int) frames == -EAGAIN)
723 break;
724
725 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
726 continue;
727
728 return r;
729 }
730
731 if (!after_avail && frames == 0) {
732 pa_memblock_unref(chunk.memblock);
733 break;
734 }
735
736 pa_assert(frames > 0);
737 after_avail = false;
738
739 chunk.index = 0;
740 chunk.length = (size_t) frames * u->frame_size;
741
742 pa_source_post(u->source, &chunk);
743 pa_memblock_unref(chunk.memblock);
744
745 work_done = true;
746
747 u->read_count += frames * u->frame_size;
748
749 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
750
751 if ((size_t) frames * u->frame_size >= n_bytes)
752 break;
753
754 n_bytes -= (size_t) frames * u->frame_size;
755 }
756 }
757
758 if (u->use_tsched) {
759 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
760 process_usec = u->tsched_watermark_usec;
761
762 if (*sleep_usec > process_usec)
763 *sleep_usec -= process_usec;
764 else
765 *sleep_usec = 0;
766 }
767
768 return work_done ? 1 : 0;
769 }
770
771 static void update_smoother(struct userdata *u) {
772 snd_pcm_sframes_t delay = 0;
773 uint64_t position;
774 int err;
775 pa_usec_t now1 = 0, now2;
776 snd_pcm_status_t *status;
777 snd_htimestamp_t htstamp = { 0, 0 };
778
779 snd_pcm_status_alloca(&status);
780
781 pa_assert(u);
782 pa_assert(u->pcm_handle);
783
784 /* Let's update the time smoother */
785
786 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->source->sample_spec, true)) < 0)) {
787 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
788 return;
789 }
790
791 snd_pcm_status_get_htstamp(status, &htstamp);
792 now1 = pa_timespec_load(&htstamp);
793
794 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
795 if (now1 <= 0)
796 now1 = pa_rtclock_now();
797
798 /* check if the time since the last update is bigger than the interval */
799 if (u->last_smoother_update > 0)
800 if (u->last_smoother_update + u->smoother_interval > now1)
801 return;
802
803 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
804 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
805
806 pa_smoother_put(u->smoother, now1, now2);
807
808 u->last_smoother_update = now1;
809 /* exponentially increase the update interval up to the MAX limit */
810 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
811 }
812
813 static pa_usec_t source_get_latency(struct userdata *u) {
814 int64_t delay;
815 pa_usec_t now1, now2;
816
817 pa_assert(u);
818
819 now1 = pa_rtclock_now();
820 now2 = pa_smoother_get(u->smoother, now1);
821
822 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
823
824 return delay >= 0 ? (pa_usec_t) delay : 0;
825 }
826
827 static int build_pollfd(struct userdata *u) {
828 pa_assert(u);
829 pa_assert(u->pcm_handle);
830
831 if (u->alsa_rtpoll_item)
832 pa_rtpoll_item_free(u->alsa_rtpoll_item);
833
834 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
835 return -1;
836
837 return 0;
838 }
839
840 /* Called from IO context */
841 static int suspend(struct userdata *u) {
842 pa_assert(u);
843 pa_assert(u->pcm_handle);
844
845 pa_smoother_pause(u->smoother, pa_rtclock_now());
846
847 /* Let's suspend */
848 snd_pcm_close(u->pcm_handle);
849 u->pcm_handle = NULL;
850
851 if (u->alsa_rtpoll_item) {
852 pa_rtpoll_item_free(u->alsa_rtpoll_item);
853 u->alsa_rtpoll_item = NULL;
854 }
855
856 pa_log_info("Device suspended...");
857
858 return 0;
859 }
860
861 /* Called from IO context */
862 static int update_sw_params(struct userdata *u) {
863 snd_pcm_uframes_t avail_min;
864 int err;
865
866 pa_assert(u);
867
868 /* Use the full buffer if no one asked us for anything specific */
869 u->hwbuf_unused = 0;
870
871 if (u->use_tsched) {
872 pa_usec_t latency;
873
874 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
875 size_t b;
876
877 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
878
879 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
880
881 /* We need at least one sample in our buffer */
882
883 if (PA_UNLIKELY(b < u->frame_size))
884 b = u->frame_size;
885
886 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
887 }
888
889 fix_min_sleep_wakeup(u);
890 fix_tsched_watermark(u);
891 }
892
893 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
894
895 avail_min = 1;
896
897 if (u->use_tsched) {
898 pa_usec_t sleep_usec, process_usec;
899
900 hw_sleep_time(u, &sleep_usec, &process_usec);
901 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
902 }
903
904 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
905
906 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
907 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
908 return err;
909 }
910
911 return 0;
912 }
913
914 /* Called from IO Context on unsuspend or from main thread when creating source */
915 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
916 bool in_thread) {
917 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
918 &u->source->sample_spec);
919
920 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
921 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
922
923 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
924 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
925
926 fix_min_sleep_wakeup(u);
927 fix_tsched_watermark(u);
928
929 if (in_thread)
930 pa_source_set_latency_range_within_thread(u->source,
931 u->min_latency_ref,
932 pa_bytes_to_usec(u->hwbuf_size, ss));
933 else {
934 pa_source_set_latency_range(u->source,
935 0,
936 pa_bytes_to_usec(u->hwbuf_size, ss));
937
938 /* work-around assert in pa_source_set_latency_within_thead,
939 keep track of min_latency and reuse it when
940 this routine is called from IO context */
941 u->min_latency_ref = u->source->thread_info.min_latency;
942 }
943
944 pa_log_info("Time scheduling watermark is %0.2fms",
945 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
946 }
947
948 /* Called from IO context */
949 static int unsuspend(struct userdata *u) {
950 pa_sample_spec ss;
951 int err;
952 bool b, d;
953 snd_pcm_uframes_t period_size, buffer_size;
954
955 pa_assert(u);
956 pa_assert(!u->pcm_handle);
957
958 pa_log_info("Trying resume...");
959
960 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
961 SND_PCM_NONBLOCK|
962 SND_PCM_NO_AUTO_RESAMPLE|
963 SND_PCM_NO_AUTO_CHANNELS|
964 SND_PCM_NO_AUTO_FORMAT)) < 0) {
965 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
966 goto fail;
967 }
968
969 ss = u->source->sample_spec;
970 period_size = u->fragment_size / u->frame_size;
971 buffer_size = u->hwbuf_size / u->frame_size;
972 b = u->use_mmap;
973 d = u->use_tsched;
974
975 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, true)) < 0) {
976 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
977 goto fail;
978 }
979
980 if (b != u->use_mmap || d != u->use_tsched) {
981 pa_log_warn("Resume failed, couldn't get original access mode.");
982 goto fail;
983 }
984
985 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
986 pa_log_warn("Resume failed, couldn't restore original sample settings.");
987 goto fail;
988 }
989
990 if (period_size*u->frame_size != u->fragment_size ||
991 buffer_size*u->frame_size != u->hwbuf_size) {
992 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
993 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
994 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
995 goto fail;
996 }
997
998 if (update_sw_params(u) < 0)
999 goto fail;
1000
1001 if (build_pollfd(u) < 0)
1002 goto fail;
1003
1004 /* FIXME: We need to reload the volume somehow */
1005
1006 u->read_count = 0;
1007 pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
1008 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1009 u->last_smoother_update = 0;
1010
1011 u->first = true;
1012
1013 /* reset the watermark to the value defined when source was created */
1014 if (u->use_tsched)
1015 reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, true);
1016
1017 pa_log_info("Resumed successfully...");
1018
1019 return 0;
1020
1021 fail:
1022 if (u->pcm_handle) {
1023 snd_pcm_close(u->pcm_handle);
1024 u->pcm_handle = NULL;
1025 }
1026
1027 return -PA_ERR_IO;
1028 }
1029
1030 /* Called from IO context */
1031 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1032 struct userdata *u = PA_SOURCE(o)->userdata;
1033
1034 switch (code) {
1035
1036 case PA_SOURCE_MESSAGE_GET_LATENCY: {
1037 pa_usec_t r = 0;
1038
1039 if (u->pcm_handle)
1040 r = source_get_latency(u);
1041
1042 *((pa_usec_t*) data) = r;
1043
1044 return 0;
1045 }
1046
1047 case PA_SOURCE_MESSAGE_SET_STATE:
1048
1049 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1050
1051 case PA_SOURCE_SUSPENDED: {
1052 int r;
1053
1054 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1055
1056 if ((r = suspend(u)) < 0)
1057 return r;
1058
1059 break;
1060 }
1061
1062 case PA_SOURCE_IDLE:
1063 case PA_SOURCE_RUNNING: {
1064 int r;
1065
1066 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1067 if (build_pollfd(u) < 0)
1068 return -PA_ERR_IO;
1069 }
1070
1071 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1072 if ((r = unsuspend(u)) < 0)
1073 return r;
1074 }
1075
1076 break;
1077 }
1078
1079 case PA_SOURCE_UNLINKED:
1080 case PA_SOURCE_INIT:
1081 case PA_SOURCE_INVALID_STATE:
1082 ;
1083 }
1084
1085 break;
1086 }
1087
1088 return pa_source_process_msg(o, code, data, offset, chunk);
1089 }
1090
1091 /* Called from main context */
1092 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1093 pa_source_state_t old_state;
1094 struct userdata *u;
1095
1096 pa_source_assert_ref(s);
1097 pa_assert_se(u = s->userdata);
1098
1099 old_state = pa_source_get_state(u->source);
1100
1101 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1102 reserve_done(u);
1103 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1104 if (reserve_init(u, u->device_name) < 0)
1105 return -PA_ERR_BUSY;
1106
1107 return 0;
1108 }
1109
1110 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1111 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1112
1113 pa_assert(u);
1114 pa_assert(u->mixer_handle);
1115
1116 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1117 return 0;
1118
1119 if (!PA_SOURCE_IS_LINKED(u->source->state))
1120 return 0;
1121
1122 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1123 pa_source_set_mixer_dirty(u->source, true);
1124 return 0;
1125 }
1126
1127 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1128 pa_source_get_volume(u->source, true);
1129 pa_source_get_mute(u->source, true);
1130 }
1131
1132 return 0;
1133 }
1134
1135 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1136 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1137
1138 pa_assert(u);
1139 pa_assert(u->mixer_handle);
1140
1141 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1142 return 0;
1143
1144 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1145 pa_source_set_mixer_dirty(u->source, true);
1146 return 0;
1147 }
1148
1149 if (mask & SND_CTL_EVENT_MASK_VALUE)
1150 pa_source_update_volume_and_mute(u->source);
1151
1152 return 0;
1153 }
1154
1155 static void source_get_volume_cb(pa_source *s) {
1156 struct userdata *u = s->userdata;
1157 pa_cvolume r;
1158 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1159
1160 pa_assert(u);
1161 pa_assert(u->mixer_path);
1162 pa_assert(u->mixer_handle);
1163
1164 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1165 return;
1166
1167 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1168 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1169
1170 pa_log_debug("Read hardware volume: %s",
1171 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1172
1173 if (pa_cvolume_equal(&u->hardware_volume, &r))
1174 return;
1175
1176 s->real_volume = u->hardware_volume = r;
1177
1178 /* Hmm, so the hardware volume changed, let's reset our software volume */
1179 if (u->mixer_path->has_dB)
1180 pa_source_set_soft_volume(s, NULL);
1181 }
1182
1183 static void source_set_volume_cb(pa_source *s) {
1184 struct userdata *u = s->userdata;
1185 pa_cvolume r;
1186 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1187 bool deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1188
1189 pa_assert(u);
1190 pa_assert(u->mixer_path);
1191 pa_assert(u->mixer_handle);
1192
1193 /* Shift up by the base volume */
1194 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1195
1196 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1197 return;
1198
1199 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1200 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1201
1202 u->hardware_volume = r;
1203
1204 if (u->mixer_path->has_dB) {
1205 pa_cvolume new_soft_volume;
1206 bool accurate_enough;
1207
1208 /* Match exactly what the user requested by software */
1209 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1210
1211 /* If the adjustment to do in software is only minimal we
1212 * can skip it. That saves us CPU at the expense of a bit of
1213 * accuracy */
1214 accurate_enough =
1215 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1216 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1217
1218 pa_log_debug("Requested volume: %s",
1219 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1220 pa_log_debug("Got hardware volume: %s",
1221 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1222 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1223 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1224 pa_yes_no(accurate_enough));
1225
1226 if (!accurate_enough)
1227 s->soft_volume = new_soft_volume;
1228
1229 } else {
1230 pa_log_debug("Wrote hardware volume: %s",
1231 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1232
1233 /* We can't match exactly what the user requested, hence let's
1234 * at least tell the user about it */
1235
1236 s->real_volume = r;
1237 }
1238 }
1239
1240 static void source_write_volume_cb(pa_source *s) {
1241 struct userdata *u = s->userdata;
1242 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1243
1244 pa_assert(u);
1245 pa_assert(u->mixer_path);
1246 pa_assert(u->mixer_handle);
1247 pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1248
1249 /* Shift up by the base volume */
1250 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1251
1252 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1253 pa_log_error("Writing HW volume failed");
1254 else {
1255 pa_cvolume tmp_vol;
1256 bool accurate_enough;
1257
1258 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1259 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1260
1261 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1262 accurate_enough =
1263 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1264 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1265
1266 if (!accurate_enough) {
1267 char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1268
1269 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1270 pa_cvolume_snprint_verbose(volume_buf[0],
1271 sizeof(volume_buf[0]),
1272 &s->thread_info.current_hw_volume,
1273 &s->channel_map,
1274 true),
1275 pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1276 }
1277 }
1278 }
1279
1280 static void source_get_mute_cb(pa_source *s) {
1281 struct userdata *u = s->userdata;
1282 bool b;
1283
1284 pa_assert(u);
1285 pa_assert(u->mixer_path);
1286 pa_assert(u->mixer_handle);
1287
1288 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1289 return;
1290
1291 s->muted = b;
1292 }
1293
1294 static void source_set_mute_cb(pa_source *s) {
1295 struct userdata *u = s->userdata;
1296
1297 pa_assert(u);
1298 pa_assert(u->mixer_path);
1299 pa_assert(u->mixer_handle);
1300
1301 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1302 }
1303
1304 static void mixer_volume_init(struct userdata *u) {
1305 pa_assert(u);
1306
1307 if (!u->mixer_path->has_volume) {
1308 pa_source_set_write_volume_callback(u->source, NULL);
1309 pa_source_set_get_volume_callback(u->source, NULL);
1310 pa_source_set_set_volume_callback(u->source, NULL);
1311
1312 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1313 } else {
1314 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1315 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1316
1317 if (u->mixer_path->has_dB && u->deferred_volume) {
1318 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1319 pa_log_info("Successfully enabled deferred volume.");
1320 } else
1321 pa_source_set_write_volume_callback(u->source, NULL);
1322
1323 if (u->mixer_path->has_dB) {
1324 pa_source_enable_decibel_volume(u->source, true);
1325 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1326
1327 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1328 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1329
1330 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1331 } else {
1332 pa_source_enable_decibel_volume(u->source, false);
1333 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1334
1335 u->source->base_volume = PA_VOLUME_NORM;
1336 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1337 }
1338
1339 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1340 }
1341
1342 if (!u->mixer_path->has_mute) {
1343 pa_source_set_get_mute_callback(u->source, NULL);
1344 pa_source_set_set_mute_callback(u->source, NULL);
1345 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1346 } else {
1347 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1348 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1349 pa_log_info("Using hardware mute control.");
1350 }
1351 }
1352
1353 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1354 struct userdata *u = s->userdata;
1355
1356 pa_assert(u);
1357 pa_assert(p);
1358 pa_assert(u->ucm_context);
1359
1360 return pa_alsa_ucm_set_port(u->ucm_context, p, false);
1361 }
1362
1363 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1364 struct userdata *u = s->userdata;
1365 pa_alsa_port_data *data;
1366
1367 pa_assert(u);
1368 pa_assert(p);
1369 pa_assert(u->mixer_handle);
1370
1371 data = PA_DEVICE_PORT_DATA(p);
1372
1373 pa_assert_se(u->mixer_path = data->path);
1374 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1375
1376 mixer_volume_init(u);
1377
1378 if (s->set_mute)
1379 s->set_mute(s);
1380 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1381 if (s->write_volume)
1382 s->write_volume(s);
1383 } else {
1384 if (s->set_volume)
1385 s->set_volume(s);
1386 }
1387
1388 return 0;
1389 }
1390
1391 static void source_update_requested_latency_cb(pa_source *s) {
1392 struct userdata *u = s->userdata;
1393 pa_assert(u);
1394 pa_assert(u->use_tsched); /* only when timer scheduling is used
1395 * we can dynamically adjust the
1396 * latency */
1397
1398 if (!u->pcm_handle)
1399 return;
1400
1401 update_sw_params(u);
1402 }
1403
1404 static int source_update_rate_cb(pa_source *s, uint32_t rate) {
1405 struct userdata *u = s->userdata;
1406 int i;
1407 bool supported = false;
1408
1409 pa_assert(u);
1410
1411 for (i = 0; u->rates[i]; i++) {
1412 if (u->rates[i] == rate) {
1413 supported = true;
1414 break;
1415 }
1416 }
1417
1418 if (!supported) {
1419 pa_log_info("Source does not support sample rate of %d Hz", rate);
1420 return -1;
1421 }
1422
1423 if (!PA_SOURCE_IS_OPENED(s->state)) {
1424 pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1425 u->source->sample_spec.rate = rate;
1426 return 0;
1427 }
1428
1429 return -1;
1430 }
1431
1432 static void thread_func(void *userdata) {
1433 struct userdata *u = userdata;
1434 unsigned short revents = 0;
1435
1436 pa_assert(u);
1437
1438 pa_log_debug("Thread starting up");
1439
1440 if (u->core->realtime_scheduling)
1441 pa_make_realtime(u->core->realtime_priority);
1442
1443 pa_thread_mq_install(&u->thread_mq);
1444
1445 for (;;) {
1446 int ret;
1447 pa_usec_t rtpoll_sleep = 0, real_sleep;
1448
1449 #ifdef DEBUG_TIMING
1450 pa_log_debug("Loop");
1451 #endif
1452
1453 /* Read some data and pass it to the sources */
1454 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1455 int work_done;
1456 pa_usec_t sleep_usec = 0;
1457 bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1458
1459 if (u->first) {
1460 pa_log_info("Starting capture.");
1461 snd_pcm_start(u->pcm_handle);
1462
1463 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1464
1465 u->first = false;
1466 }
1467
1468 if (u->use_mmap)
1469 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1470 else
1471 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1472
1473 if (work_done < 0)
1474 goto fail;
1475
1476 /* pa_log_debug("work_done = %i", work_done); */
1477
1478 if (work_done)
1479 update_smoother(u);
1480
1481 if (u->use_tsched) {
1482 pa_usec_t cusec;
1483
1484 /* OK, the capture buffer is now empty, let's
1485 * calculate when to wake up next */
1486
1487 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1488
1489 /* Convert from the sound card time domain to the
1490 * system time domain */
1491 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1492
1493 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1494
1495 /* We don't trust the conversion, so we wake up whatever comes first */
1496 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1497 }
1498 }
1499
1500 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1501 pa_usec_t volume_sleep;
1502 pa_source_volume_change_apply(u->source, &volume_sleep);
1503 if (volume_sleep > 0) {
1504 if (rtpoll_sleep > 0)
1505 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1506 else
1507 rtpoll_sleep = volume_sleep;
1508 }
1509 }
1510
1511 if (rtpoll_sleep > 0) {
1512 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1513 real_sleep = pa_rtclock_now();
1514 }
1515 else
1516 pa_rtpoll_set_timer_disabled(u->rtpoll);
1517
1518 /* Hmm, nothing to do. Let's sleep */
1519 if ((ret = pa_rtpoll_run(u->rtpoll, true)) < 0)
1520 goto fail;
1521
1522 if (rtpoll_sleep > 0) {
1523 real_sleep = pa_rtclock_now() - real_sleep;
1524 #ifdef DEBUG_TIMING
1525 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1526 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1527 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1528 #endif
1529 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1530 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1531 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1532 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1533 }
1534
1535 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1536 pa_source_volume_change_apply(u->source, NULL);
1537
1538 if (ret == 0)
1539 goto finish;
1540
1541 /* Tell ALSA about this and process its response */
1542 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1543 struct pollfd *pollfd;
1544 int err;
1545 unsigned n;
1546
1547 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1548
1549 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1550 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1551 goto fail;
1552 }
1553
1554 if (revents & ~POLLIN) {
1555 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1556 goto fail;
1557
1558 u->first = true;
1559 revents = 0;
1560 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1561 pa_log_debug("Wakeup from ALSA!");
1562
1563 } else
1564 revents = 0;
1565 }
1566
1567 fail:
1568 /* If this was no regular exit from the loop we have to continue
1569 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1570 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1571 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1572
1573 finish:
1574 pa_log_debug("Thread shutting down");
1575 }
1576
1577 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1578 const char *n;
1579 char *t;
1580
1581 pa_assert(data);
1582 pa_assert(ma);
1583 pa_assert(device_name);
1584
1585 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1586 pa_source_new_data_set_name(data, n);
1587 data->namereg_fail = true;
1588 return;
1589 }
1590
1591 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1592 data->namereg_fail = true;
1593 else {
1594 n = device_id ? device_id : device_name;
1595 data->namereg_fail = false;
1596 }
1597
1598 if (mapping)
1599 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1600 else
1601 t = pa_sprintf_malloc("alsa_input.%s", n);
1602
1603 pa_source_new_data_set_name(data, t);
1604 pa_xfree(t);
1605 }
1606
1607 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1608 snd_hctl_t *hctl;
1609
1610 if (!mapping && !element)
1611 return;
1612
1613 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1614 pa_log_info("Failed to find a working mixer device.");
1615 return;
1616 }
1617
1618 if (element) {
1619
1620 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1621 goto fail;
1622
1623 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1624 goto fail;
1625
1626 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1627 pa_alsa_path_dump(u->mixer_path);
1628 } else if (!(u->mixer_path_set = mapping->input_path_set))
1629 goto fail;
1630
1631 return;
1632
1633 fail:
1634
1635 if (u->mixer_path) {
1636 pa_alsa_path_free(u->mixer_path);
1637 u->mixer_path = NULL;
1638 }
1639
1640 if (u->mixer_handle) {
1641 snd_mixer_close(u->mixer_handle);
1642 u->mixer_handle = NULL;
1643 }
1644 }
1645
1646 static int setup_mixer(struct userdata *u, bool ignore_dB) {
1647 bool need_mixer_callback = false;
1648
1649 pa_assert(u);
1650
1651 if (!u->mixer_handle)
1652 return 0;
1653
1654 if (u->source->active_port) {
1655 pa_alsa_port_data *data;
1656
1657 /* We have a list of supported paths, so let's activate the
1658 * one that has been chosen as active */
1659
1660 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1661 u->mixer_path = data->path;
1662
1663 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1664
1665 } else {
1666
1667 if (!u->mixer_path && u->mixer_path_set)
1668 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1669
1670 if (u->mixer_path) {
1671 /* Hmm, we have only a single path, then let's activate it */
1672
1673 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1674 } else
1675 return 0;
1676 }
1677
1678 mixer_volume_init(u);
1679
1680 /* Will we need to register callbacks? */
1681 if (u->mixer_path_set && u->mixer_path_set->paths) {
1682 pa_alsa_path *p;
1683 void *state;
1684
1685 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1686 if (p->has_volume || p->has_mute)
1687 need_mixer_callback = true;
1688 }
1689 }
1690 else if (u->mixer_path)
1691 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1692
1693 if (need_mixer_callback) {
1694 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1695 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1696 u->mixer_pd = pa_alsa_mixer_pdata_new();
1697 mixer_callback = io_mixer_callback;
1698
1699 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1700 pa_log("Failed to initialize file descriptor monitoring");
1701 return -1;
1702 }
1703 } else {
1704 u->mixer_fdl = pa_alsa_fdlist_new();
1705 mixer_callback = ctl_mixer_callback;
1706
1707 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1708 pa_log("Failed to initialize file descriptor monitoring");
1709 return -1;
1710 }
1711 }
1712
1713 if (u->mixer_path_set)
1714 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1715 else
1716 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1717 }
1718
1719 return 0;
1720 }
1721
1722 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1723
1724 struct userdata *u = NULL;
1725 const char *dev_id = NULL, *key, *mod_name;
1726 pa_sample_spec ss;
1727 char *thread_name = NULL;
1728 uint32_t alternate_sample_rate;
1729 pa_channel_map map;
1730 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1731 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1732 size_t frame_size;
1733 bool use_mmap = true, b, use_tsched = true, d, ignore_dB = false, namereg_fail = false, deferred_volume = false, fixed_latency_range = false;
1734 pa_source_new_data data;
1735 pa_alsa_profile_set *profile_set = NULL;
1736 void *state = NULL;
1737
1738 pa_assert(m);
1739 pa_assert(ma);
1740
1741 ss = m->core->default_sample_spec;
1742 map = m->core->default_channel_map;
1743
1744 /* Pick sample spec overrides from the mapping, if any */
1745 if (mapping) {
1746 if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
1747 ss.format = mapping->sample_spec.format;
1748 if (mapping->sample_spec.rate != 0)
1749 ss.rate = mapping->sample_spec.rate;
1750 if (mapping->sample_spec.channels != 0) {
1751 ss.channels = mapping->sample_spec.channels;
1752 if (pa_channel_map_valid(&mapping->channel_map))
1753 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
1754 }
1755 }
1756
1757 /* Override with modargs if provided */
1758 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1759 pa_log("Failed to parse sample specification and channel map");
1760 goto fail;
1761 }
1762
1763 alternate_sample_rate = m->core->alternate_sample_rate;
1764 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1765 pa_log("Failed to parse alternate sample rate");
1766 goto fail;
1767 }
1768
1769 frame_size = pa_frame_size(&ss);
1770
1771 nfrags = m->core->default_n_fragments;
1772 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1773 if (frag_size <= 0)
1774 frag_size = (uint32_t) frame_size;
1775 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1776 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1777
1778 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1779 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1780 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1781 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1782 pa_log("Failed to parse buffer metrics");
1783 goto fail;
1784 }
1785
1786 buffer_size = nfrags * frag_size;
1787
1788 period_frames = frag_size/frame_size;
1789 buffer_frames = buffer_size/frame_size;
1790 tsched_frames = tsched_size/frame_size;
1791
1792 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1793 pa_log("Failed to parse mmap argument.");
1794 goto fail;
1795 }
1796
1797 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1798 pa_log("Failed to parse tsched argument.");
1799 goto fail;
1800 }
1801
1802 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1803 pa_log("Failed to parse ignore_dB argument.");
1804 goto fail;
1805 }
1806
1807 deferred_volume = m->core->deferred_volume;
1808 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1809 pa_log("Failed to parse deferred_volume argument.");
1810 goto fail;
1811 }
1812
1813 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1814 pa_log("Failed to parse fixed_latency_range argument.");
1815 goto fail;
1816 }
1817
1818 use_tsched = pa_alsa_may_tsched(use_tsched);
1819
1820 u = pa_xnew0(struct userdata, 1);
1821 u->core = m->core;
1822 u->module = m;
1823 u->use_mmap = use_mmap;
1824 u->use_tsched = use_tsched;
1825 u->deferred_volume = deferred_volume;
1826 u->fixed_latency_range = fixed_latency_range;
1827 u->first = true;
1828 u->rtpoll = pa_rtpoll_new();
1829 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1830
1831 u->smoother = pa_smoother_new(
1832 SMOOTHER_ADJUST_USEC,
1833 SMOOTHER_WINDOW_USEC,
1834 true,
1835 true,
1836 5,
1837 pa_rtclock_now(),
1838 true);
1839 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1840
1841 /* use ucm */
1842 if (mapping && mapping->ucm_context.ucm)
1843 u->ucm_context = &mapping->ucm_context;
1844
1845 dev_id = pa_modargs_get_value(
1846 ma, "device_id",
1847 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1848
1849 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1850
1851 if (reserve_init(u, dev_id) < 0)
1852 goto fail;
1853
1854 if (reserve_monitor_init(u, dev_id) < 0)
1855 goto fail;
1856
1857 b = use_mmap;
1858 d = use_tsched;
1859
1860 if (mapping) {
1861
1862 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1863 pa_log("device_id= not set");
1864 goto fail;
1865 }
1866
1867 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
1868 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
1869 pa_log("Failed to enable ucm modifier %s", mod_name);
1870 else
1871 pa_log_debug("Enabled ucm modifier %s", mod_name);
1872 }
1873
1874 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1875 dev_id,
1876 &u->device_name,
1877 &ss, &map,
1878 SND_PCM_STREAM_CAPTURE,
1879 &period_frames, &buffer_frames, tsched_frames,
1880 &b, &d, mapping)))
1881 goto fail;
1882
1883 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1884
1885 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1886 goto fail;
1887
1888 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1889 dev_id,
1890 &u->device_name,
1891 &ss, &map,
1892 SND_PCM_STREAM_CAPTURE,
1893 &period_frames, &buffer_frames, tsched_frames,
1894 &b, &d, profile_set, &mapping)))
1895 goto fail;
1896
1897 } else {
1898
1899 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1900 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1901 &u->device_name,
1902 &ss, &map,
1903 SND_PCM_STREAM_CAPTURE,
1904 &period_frames, &buffer_frames, tsched_frames,
1905 &b, &d, false)))
1906 goto fail;
1907 }
1908
1909 pa_assert(u->device_name);
1910 pa_log_info("Successfully opened device %s.", u->device_name);
1911
1912 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1913 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1914 goto fail;
1915 }
1916
1917 if (mapping)
1918 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1919
1920 if (use_mmap && !b) {
1921 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1922 u->use_mmap = use_mmap = false;
1923 }
1924
1925 if (use_tsched && (!b || !d)) {
1926 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1927 u->use_tsched = use_tsched = false;
1928 }
1929
1930 if (u->use_mmap)
1931 pa_log_info("Successfully enabled mmap() mode.");
1932
1933 if (u->use_tsched) {
1934 pa_log_info("Successfully enabled timer-based scheduling mode.");
1935 if (u->fixed_latency_range)
1936 pa_log_info("Disabling latency range changes on overrun");
1937 }
1938
1939 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
1940 if (!u->rates) {
1941 pa_log_error("Failed to find any supported sample rates.");
1942 goto fail;
1943 }
1944
1945 /* ALSA might tweak the sample spec, so recalculate the frame size */
1946 frame_size = pa_frame_size(&ss);
1947
1948 if (!u->ucm_context)
1949 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1950
1951 pa_source_new_data_init(&data);
1952 data.driver = driver;
1953 data.module = m;
1954 data.card = card;
1955 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1956
1957 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1958 * variable instead of using &data.namereg_fail directly, because
1959 * data.namereg_fail is a bitfield and taking the address of a bitfield
1960 * variable is impossible. */
1961 namereg_fail = data.namereg_fail;
1962 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1963 pa_log("Failed to parse namereg_fail argument.");
1964 pa_source_new_data_done(&data);
1965 goto fail;
1966 }
1967 data.namereg_fail = namereg_fail;
1968
1969 pa_source_new_data_set_sample_spec(&data, &ss);
1970 pa_source_new_data_set_channel_map(&data, &map);
1971 pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1972
1973 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1974 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1975 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1976 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1977 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1978
1979 if (mapping) {
1980 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1981 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1982
1983 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
1984 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
1985 }
1986
1987 pa_alsa_init_description(data.proplist, card);
1988
1989 if (u->control_device)
1990 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1991
1992 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1993 pa_log("Invalid properties");
1994 pa_source_new_data_done(&data);
1995 goto fail;
1996 }
1997
1998 if (u->ucm_context)
1999 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, false, card);
2000 else if (u->mixer_path_set)
2001 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2002
2003 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
2004 pa_source_new_data_done(&data);
2005
2006 if (!u->source) {
2007 pa_log("Failed to create source object");
2008 goto fail;
2009 }
2010
2011 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2012 &u->source->thread_info.volume_change_safety_margin) < 0) {
2013 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2014 goto fail;
2015 }
2016
2017 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2018 &u->source->thread_info.volume_change_extra_delay) < 0) {
2019 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2020 goto fail;
2021 }
2022
2023 u->source->parent.process_msg = source_process_msg;
2024 if (u->use_tsched)
2025 u->source->update_requested_latency = source_update_requested_latency_cb;
2026 u->source->set_state = source_set_state_cb;
2027 if (u->ucm_context)
2028 u->source->set_port = source_set_port_ucm_cb;
2029 else
2030 u->source->set_port = source_set_port_cb;
2031 if (u->source->alternate_sample_rate)
2032 u->source->update_rate = source_update_rate_cb;
2033 u->source->userdata = u;
2034
2035 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2036 pa_source_set_rtpoll(u->source, u->rtpoll);
2037
2038 u->frame_size = frame_size;
2039 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2040 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2041 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2042
2043 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2044 (double) u->hwbuf_size / (double) u->fragment_size,
2045 (long unsigned) u->fragment_size,
2046 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2047 (long unsigned) u->hwbuf_size,
2048 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2049
2050 if (u->use_tsched) {
2051 u->tsched_watermark_ref = tsched_watermark;
2052 reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2053 }
2054 else
2055 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2056
2057 reserve_update(u);
2058
2059 if (update_sw_params(u) < 0)
2060 goto fail;
2061
2062 if (u->ucm_context) {
2063 if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, false) < 0)
2064 goto fail;
2065 } else if (setup_mixer(u, ignore_dB) < 0)
2066 goto fail;
2067
2068 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2069
2070 thread_name = pa_sprintf_malloc("alsa-source-%s", pa_strnull(pa_proplist_gets(u->source->proplist, "alsa.id")));
2071 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2072 pa_log("Failed to create thread.");
2073 goto fail;
2074 }
2075 pa_xfree(thread_name);
2076 thread_name = NULL;
2077
2078 /* Get initial mixer settings */
2079 if (data.volume_is_set) {
2080 if (u->source->set_volume)
2081 u->source->set_volume(u->source);
2082 } else {
2083 if (u->source->get_volume)
2084 u->source->get_volume(u->source);
2085 }
2086
2087 if (data.muted_is_set) {
2088 if (u->source->set_mute)
2089 u->source->set_mute(u->source);
2090 } else {
2091 if (u->source->get_mute)
2092 u->source->get_mute(u->source);
2093 }
2094
2095 if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2096 u->source->write_volume(u->source);
2097
2098 pa_source_put(u->source);
2099
2100 if (profile_set)
2101 pa_alsa_profile_set_free(profile_set);
2102
2103 return u->source;
2104
2105 fail:
2106 pa_xfree(thread_name);
2107
2108 if (u)
2109 userdata_free(u);
2110
2111 if (profile_set)
2112 pa_alsa_profile_set_free(profile_set);
2113
2114 return NULL;
2115 }
2116
2117 static void userdata_free(struct userdata *u) {
2118 pa_assert(u);
2119
2120 if (u->source)
2121 pa_source_unlink(u->source);
2122
2123 if (u->thread) {
2124 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2125 pa_thread_free(u->thread);
2126 }
2127
2128 pa_thread_mq_done(&u->thread_mq);
2129
2130 if (u->source)
2131 pa_source_unref(u->source);
2132
2133 if (u->mixer_pd)
2134 pa_alsa_mixer_pdata_free(u->mixer_pd);
2135
2136 if (u->alsa_rtpoll_item)
2137 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2138
2139 if (u->rtpoll)
2140 pa_rtpoll_free(u->rtpoll);
2141
2142 if (u->pcm_handle) {
2143 snd_pcm_drop(u->pcm_handle);
2144 snd_pcm_close(u->pcm_handle);
2145 }
2146
2147 if (u->mixer_fdl)
2148 pa_alsa_fdlist_free(u->mixer_fdl);
2149
2150 if (u->mixer_path && !u->mixer_path_set)
2151 pa_alsa_path_free(u->mixer_path);
2152
2153 if (u->mixer_handle)
2154 snd_mixer_close(u->mixer_handle);
2155
2156 if (u->smoother)
2157 pa_smoother_free(u->smoother);
2158
2159 if (u->rates)
2160 pa_xfree(u->rates);
2161
2162 reserve_done(u);
2163 monitor_done(u);
2164
2165 pa_xfree(u->device_name);
2166 pa_xfree(u->control_device);
2167 pa_xfree(u->paths_dir);
2168 pa_xfree(u);
2169 }
2170
2171 void pa_alsa_source_free(pa_source *s) {
2172 struct userdata *u;
2173
2174 pa_source_assert_ref(s);
2175 pa_assert_se(u = s->userdata);
2176
2177 userdata_free(u);
2178 }