]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
alsa-mixer: Add surround 2.1 profile
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/thread-mq.h>
50 #include <pulsecore/rtpoll.h>
51 #include <pulsecore/time-smoother.h>
52
53 #include <modules/reserve-wrap.h>
54
55 #include "alsa-util.h"
56 #include "alsa-source.h"
57
58 /* #define DEBUG_TIMING */
59
60 #define DEFAULT_DEVICE "default"
61
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64
65 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
66 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
67 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
68 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
69 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
70 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
71
72 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
73 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
74
75 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
76 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
77
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
80
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
82
83 struct userdata {
84 pa_core *core;
85 pa_module *module;
86 pa_source *source;
87
88 pa_thread *thread;
89 pa_thread_mq thread_mq;
90 pa_rtpoll *rtpoll;
91
92 snd_pcm_t *pcm_handle;
93
94 char *paths_dir;
95 pa_alsa_fdlist *mixer_fdl;
96 pa_alsa_mixer_pdata *mixer_pd;
97 snd_mixer_t *mixer_handle;
98 pa_alsa_path_set *mixer_path_set;
99 pa_alsa_path *mixer_path;
100
101 pa_cvolume hardware_volume;
102
103 unsigned int *rates;
104
105 size_t
106 frame_size,
107 fragment_size,
108 hwbuf_size,
109 tsched_watermark,
110 tsched_watermark_ref,
111 hwbuf_unused,
112 min_sleep,
113 min_wakeup,
114 watermark_inc_step,
115 watermark_dec_step,
116 watermark_inc_threshold,
117 watermark_dec_threshold;
118
119 pa_usec_t watermark_dec_not_before;
120 pa_usec_t min_latency_ref;
121 pa_usec_t tsched_watermark_usec;
122
123 char *device_name; /* name of the PCM device */
124 char *control_device; /* name of the control device */
125
126 bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
127
128 bool first;
129
130 pa_rtpoll_item *alsa_rtpoll_item;
131
132 pa_smoother *smoother;
133 uint64_t read_count;
134 pa_usec_t smoother_interval;
135 pa_usec_t last_smoother_update;
136
137 pa_reserve_wrapper *reserve;
138 pa_hook_slot *reserve_slot;
139 pa_reserve_monitor_wrapper *monitor;
140 pa_hook_slot *monitor_slot;
141
142 /* ucm context */
143 pa_alsa_ucm_mapping_context *ucm_context;
144 };
145
146 static void userdata_free(struct userdata *u);
147
148 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
149 pa_assert(r);
150 pa_assert(u);
151
152 pa_log_debug("Suspending source %s, because another application requested us to release the device.", u->source->name);
153
154 if (pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION) < 0)
155 return PA_HOOK_CANCEL;
156
157 return PA_HOOK_OK;
158 }
159
160 static void reserve_done(struct userdata *u) {
161 pa_assert(u);
162
163 if (u->reserve_slot) {
164 pa_hook_slot_free(u->reserve_slot);
165 u->reserve_slot = NULL;
166 }
167
168 if (u->reserve) {
169 pa_reserve_wrapper_unref(u->reserve);
170 u->reserve = NULL;
171 }
172 }
173
174 static void reserve_update(struct userdata *u) {
175 const char *description;
176 pa_assert(u);
177
178 if (!u->source || !u->reserve)
179 return;
180
181 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
182 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
183 }
184
185 static int reserve_init(struct userdata *u, const char *dname) {
186 char *rname;
187
188 pa_assert(u);
189 pa_assert(dname);
190
191 if (u->reserve)
192 return 0;
193
194 if (pa_in_system_mode())
195 return 0;
196
197 if (!(rname = pa_alsa_get_reserve_name(dname)))
198 return 0;
199
200 /* We are resuming, try to lock the device */
201 u->reserve = pa_reserve_wrapper_get(u->core, rname);
202 pa_xfree(rname);
203
204 if (!(u->reserve))
205 return -1;
206
207 reserve_update(u);
208
209 pa_assert(!u->reserve_slot);
210 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
211
212 return 0;
213 }
214
215 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
216 pa_assert(w);
217 pa_assert(u);
218
219 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
220 pa_log_debug("Suspending source %s, because another application is blocking the access to the device.", u->source->name);
221 pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION);
222 } else {
223 pa_log_debug("Resuming source %s, because other applications aren't blocking access to the device any more.", u->source->name);
224 pa_source_suspend(u->source, false, PA_SUSPEND_APPLICATION);
225 }
226
227 return PA_HOOK_OK;
228 }
229
230 static void monitor_done(struct userdata *u) {
231 pa_assert(u);
232
233 if (u->monitor_slot) {
234 pa_hook_slot_free(u->monitor_slot);
235 u->monitor_slot = NULL;
236 }
237
238 if (u->monitor) {
239 pa_reserve_monitor_wrapper_unref(u->monitor);
240 u->monitor = NULL;
241 }
242 }
243
244 static int reserve_monitor_init(struct userdata *u, const char *dname) {
245 char *rname;
246
247 pa_assert(u);
248 pa_assert(dname);
249
250 if (pa_in_system_mode())
251 return 0;
252
253 if (!(rname = pa_alsa_get_reserve_name(dname)))
254 return 0;
255
256 /* We are resuming, try to lock the device */
257 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
258 pa_xfree(rname);
259
260 if (!(u->monitor))
261 return -1;
262
263 pa_assert(!u->monitor_slot);
264 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
265
266 return 0;
267 }
268
269 static void fix_min_sleep_wakeup(struct userdata *u) {
270 size_t max_use, max_use_2;
271
272 pa_assert(u);
273 pa_assert(u->use_tsched);
274
275 max_use = u->hwbuf_size - u->hwbuf_unused;
276 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
277
278 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
279 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
280
281 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
282 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
283 }
284
285 static void fix_tsched_watermark(struct userdata *u) {
286 size_t max_use;
287 pa_assert(u);
288 pa_assert(u->use_tsched);
289
290 max_use = u->hwbuf_size - u->hwbuf_unused;
291
292 if (u->tsched_watermark > max_use - u->min_sleep)
293 u->tsched_watermark = max_use - u->min_sleep;
294
295 if (u->tsched_watermark < u->min_wakeup)
296 u->tsched_watermark = u->min_wakeup;
297
298 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
299 }
300
301 static void increase_watermark(struct userdata *u) {
302 size_t old_watermark;
303 pa_usec_t old_min_latency, new_min_latency;
304
305 pa_assert(u);
306 pa_assert(u->use_tsched);
307
308 /* First, just try to increase the watermark */
309 old_watermark = u->tsched_watermark;
310 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
311 fix_tsched_watermark(u);
312
313 if (old_watermark != u->tsched_watermark) {
314 pa_log_info("Increasing wakeup watermark to %0.2f ms",
315 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
316 return;
317 }
318
319 /* Hmm, we cannot increase the watermark any further, hence let's
320 raise the latency unless doing so was disabled in
321 configuration */
322 if (u->fixed_latency_range)
323 return;
324
325 old_min_latency = u->source->thread_info.min_latency;
326 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
327 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
328
329 if (old_min_latency != new_min_latency) {
330 pa_log_info("Increasing minimal latency to %0.2f ms",
331 (double) new_min_latency / PA_USEC_PER_MSEC);
332
333 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
334 }
335
336 /* When we reach this we're officialy fucked! */
337 }
338
339 static void decrease_watermark(struct userdata *u) {
340 size_t old_watermark;
341 pa_usec_t now;
342
343 pa_assert(u);
344 pa_assert(u->use_tsched);
345
346 now = pa_rtclock_now();
347
348 if (u->watermark_dec_not_before <= 0)
349 goto restart;
350
351 if (u->watermark_dec_not_before > now)
352 return;
353
354 old_watermark = u->tsched_watermark;
355
356 if (u->tsched_watermark < u->watermark_dec_step)
357 u->tsched_watermark = u->tsched_watermark / 2;
358 else
359 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
360
361 fix_tsched_watermark(u);
362
363 if (old_watermark != u->tsched_watermark)
364 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
365 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
366
367 /* We don't change the latency range*/
368
369 restart:
370 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
371 }
372
373 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
374 pa_usec_t wm, usec;
375
376 pa_assert(sleep_usec);
377 pa_assert(process_usec);
378
379 pa_assert(u);
380 pa_assert(u->use_tsched);
381
382 usec = pa_source_get_requested_latency_within_thread(u->source);
383
384 if (usec == (pa_usec_t) -1)
385 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
386
387 wm = u->tsched_watermark_usec;
388
389 if (wm > usec)
390 wm = usec/2;
391
392 *sleep_usec = usec - wm;
393 *process_usec = wm;
394
395 #ifdef DEBUG_TIMING
396 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
397 (unsigned long) (usec / PA_USEC_PER_MSEC),
398 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
399 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
400 #endif
401 }
402
403 static int try_recover(struct userdata *u, const char *call, int err) {
404 pa_assert(u);
405 pa_assert(call);
406 pa_assert(err < 0);
407
408 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
409
410 pa_assert(err != -EAGAIN);
411
412 if (err == -EPIPE)
413 pa_log_debug("%s: Buffer overrun!", call);
414
415 if (err == -ESTRPIPE)
416 pa_log_debug("%s: System suspended!", call);
417
418 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
419 pa_log("%s: %s", call, pa_alsa_strerror(err));
420 return -1;
421 }
422
423 u->first = true;
424 return 0;
425 }
426
427 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, bool on_timeout) {
428 size_t left_to_record;
429 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
430 bool overrun = false;
431
432 /* We use <= instead of < for this check here because an overrun
433 * only happens after the last sample was processed, not already when
434 * it is removed from the buffer. This is particularly important
435 * when block transfer is used. */
436
437 if (n_bytes <= rec_space)
438 left_to_record = rec_space - n_bytes;
439 else {
440
441 /* We got a dropout. What a mess! */
442 left_to_record = 0;
443 overrun = true;
444
445 #ifdef DEBUG_TIMING
446 PA_DEBUG_TRAP;
447 #endif
448
449 if (pa_log_ratelimit(PA_LOG_INFO))
450 pa_log_info("Overrun!");
451 }
452
453 #ifdef DEBUG_TIMING
454 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
455 #endif
456
457 if (u->use_tsched) {
458 bool reset_not_before = true;
459
460 if (overrun || left_to_record < u->watermark_inc_threshold)
461 increase_watermark(u);
462 else if (left_to_record > u->watermark_dec_threshold) {
463 reset_not_before = false;
464
465 /* We decrease the watermark only if have actually
466 * been woken up by a timeout. If something else woke
467 * us up it's too easy to fulfill the deadlines... */
468
469 if (on_timeout)
470 decrease_watermark(u);
471 }
472
473 if (reset_not_before)
474 u->watermark_dec_not_before = 0;
475 }
476
477 return left_to_record;
478 }
479
480 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
481 bool work_done = false;
482 pa_usec_t max_sleep_usec = 0, process_usec = 0;
483 size_t left_to_record;
484 unsigned j = 0;
485
486 pa_assert(u);
487 pa_source_assert_ref(u->source);
488
489 if (u->use_tsched)
490 hw_sleep_time(u, &max_sleep_usec, &process_usec);
491
492 for (;;) {
493 snd_pcm_sframes_t n;
494 size_t n_bytes;
495 int r;
496 bool after_avail = true;
497
498 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
499
500 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
501 continue;
502
503 return r;
504 }
505
506 n_bytes = (size_t) n * u->frame_size;
507
508 #ifdef DEBUG_TIMING
509 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
510 #endif
511
512 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
513 on_timeout = false;
514
515 if (u->use_tsched)
516 if (!polled &&
517 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
518 #ifdef DEBUG_TIMING
519 pa_log_debug("Not reading, because too early.");
520 #endif
521 break;
522 }
523
524 if (PA_UNLIKELY(n_bytes <= 0)) {
525
526 if (polled)
527 PA_ONCE_BEGIN {
528 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
529 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
530 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
531 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
532 pa_strnull(dn));
533 pa_xfree(dn);
534 } PA_ONCE_END;
535
536 #ifdef DEBUG_TIMING
537 pa_log_debug("Not reading, because not necessary.");
538 #endif
539 break;
540 }
541
542 if (++j > 10) {
543 #ifdef DEBUG_TIMING
544 pa_log_debug("Not filling up, because already too many iterations.");
545 #endif
546
547 break;
548 }
549
550 polled = false;
551
552 #ifdef DEBUG_TIMING
553 pa_log_debug("Reading");
554 #endif
555
556 for (;;) {
557 pa_memchunk chunk;
558 void *p;
559 int err;
560 const snd_pcm_channel_area_t *areas;
561 snd_pcm_uframes_t offset, frames;
562 snd_pcm_sframes_t sframes;
563
564 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
565 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
566
567 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
568
569 if (!after_avail && err == -EAGAIN)
570 break;
571
572 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
573 continue;
574
575 return r;
576 }
577
578 /* Make sure that if these memblocks need to be copied they will fit into one slot */
579 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
580 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
581
582 if (!after_avail && frames == 0)
583 break;
584
585 pa_assert(frames > 0);
586 after_avail = false;
587
588 /* Check these are multiples of 8 bit */
589 pa_assert((areas[0].first & 7) == 0);
590 pa_assert((areas[0].step & 7)== 0);
591
592 /* We assume a single interleaved memory buffer */
593 pa_assert((areas[0].first >> 3) == 0);
594 pa_assert((areas[0].step >> 3) == u->frame_size);
595
596 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
597
598 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, true);
599 chunk.length = pa_memblock_get_length(chunk.memblock);
600 chunk.index = 0;
601
602 pa_source_post(u->source, &chunk);
603 pa_memblock_unref_fixed(chunk.memblock);
604
605 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
606
607 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
608 continue;
609
610 return r;
611 }
612
613 work_done = true;
614
615 u->read_count += frames * u->frame_size;
616
617 #ifdef DEBUG_TIMING
618 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
619 #endif
620
621 if ((size_t) frames * u->frame_size >= n_bytes)
622 break;
623
624 n_bytes -= (size_t) frames * u->frame_size;
625 }
626 }
627
628 if (u->use_tsched) {
629 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
630 process_usec = u->tsched_watermark_usec;
631
632 if (*sleep_usec > process_usec)
633 *sleep_usec -= process_usec;
634 else
635 *sleep_usec = 0;
636 }
637
638 return work_done ? 1 : 0;
639 }
640
641 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
642 int work_done = false;
643 pa_usec_t max_sleep_usec = 0, process_usec = 0;
644 size_t left_to_record;
645 unsigned j = 0;
646
647 pa_assert(u);
648 pa_source_assert_ref(u->source);
649
650 if (u->use_tsched)
651 hw_sleep_time(u, &max_sleep_usec, &process_usec);
652
653 for (;;) {
654 snd_pcm_sframes_t n;
655 size_t n_bytes;
656 int r;
657 bool after_avail = true;
658
659 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
660
661 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
662 continue;
663
664 return r;
665 }
666
667 n_bytes = (size_t) n * u->frame_size;
668 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
669 on_timeout = false;
670
671 if (u->use_tsched)
672 if (!polled &&
673 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
674 break;
675
676 if (PA_UNLIKELY(n_bytes <= 0)) {
677
678 if (polled)
679 PA_ONCE_BEGIN {
680 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
681 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
682 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
683 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
684 pa_strnull(dn));
685 pa_xfree(dn);
686 } PA_ONCE_END;
687
688 break;
689 }
690
691 if (++j > 10) {
692 #ifdef DEBUG_TIMING
693 pa_log_debug("Not filling up, because already too many iterations.");
694 #endif
695
696 break;
697 }
698
699 polled = false;
700
701 for (;;) {
702 void *p;
703 snd_pcm_sframes_t frames;
704 pa_memchunk chunk;
705
706 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
707
708 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
709
710 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
711 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
712
713 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
714
715 p = pa_memblock_acquire(chunk.memblock);
716 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
717 pa_memblock_release(chunk.memblock);
718
719 if (PA_UNLIKELY(frames < 0)) {
720 pa_memblock_unref(chunk.memblock);
721
722 if (!after_avail && (int) frames == -EAGAIN)
723 break;
724
725 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
726 continue;
727
728 return r;
729 }
730
731 if (!after_avail && frames == 0) {
732 pa_memblock_unref(chunk.memblock);
733 break;
734 }
735
736 pa_assert(frames > 0);
737 after_avail = false;
738
739 chunk.index = 0;
740 chunk.length = (size_t) frames * u->frame_size;
741
742 pa_source_post(u->source, &chunk);
743 pa_memblock_unref(chunk.memblock);
744
745 work_done = true;
746
747 u->read_count += frames * u->frame_size;
748
749 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
750
751 if ((size_t) frames * u->frame_size >= n_bytes)
752 break;
753
754 n_bytes -= (size_t) frames * u->frame_size;
755 }
756 }
757
758 if (u->use_tsched) {
759 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
760 process_usec = u->tsched_watermark_usec;
761
762 if (*sleep_usec > process_usec)
763 *sleep_usec -= process_usec;
764 else
765 *sleep_usec = 0;
766 }
767
768 return work_done ? 1 : 0;
769 }
770
771 static void update_smoother(struct userdata *u) {
772 snd_pcm_sframes_t delay = 0;
773 uint64_t position;
774 int err;
775 pa_usec_t now1 = 0, now2;
776 snd_pcm_status_t *status;
777 snd_htimestamp_t htstamp = { 0, 0 };
778
779 snd_pcm_status_alloca(&status);
780
781 pa_assert(u);
782 pa_assert(u->pcm_handle);
783
784 /* Let's update the time smoother */
785
786 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->source->sample_spec, true)) < 0)) {
787 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
788 return;
789 }
790
791 snd_pcm_status_get_htstamp(status, &htstamp);
792 now1 = pa_timespec_load(&htstamp);
793
794 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
795 if (now1 <= 0)
796 now1 = pa_rtclock_now();
797
798 /* check if the time since the last update is bigger than the interval */
799 if (u->last_smoother_update > 0)
800 if (u->last_smoother_update + u->smoother_interval > now1)
801 return;
802
803 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
804 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
805
806 pa_smoother_put(u->smoother, now1, now2);
807
808 u->last_smoother_update = now1;
809 /* exponentially increase the update interval up to the MAX limit */
810 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
811 }
812
813 static pa_usec_t source_get_latency(struct userdata *u) {
814 int64_t delay;
815 pa_usec_t now1, now2;
816
817 pa_assert(u);
818
819 now1 = pa_rtclock_now();
820 now2 = pa_smoother_get(u->smoother, now1);
821
822 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
823
824 return delay >= 0 ? (pa_usec_t) delay : 0;
825 }
826
827 static int build_pollfd(struct userdata *u) {
828 pa_assert(u);
829 pa_assert(u->pcm_handle);
830
831 if (u->alsa_rtpoll_item)
832 pa_rtpoll_item_free(u->alsa_rtpoll_item);
833
834 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
835 return -1;
836
837 return 0;
838 }
839
840 /* Called from IO context */
841 static int suspend(struct userdata *u) {
842 pa_assert(u);
843 pa_assert(u->pcm_handle);
844
845 pa_smoother_pause(u->smoother, pa_rtclock_now());
846
847 /* Let's suspend */
848 snd_pcm_close(u->pcm_handle);
849 u->pcm_handle = NULL;
850
851 if (u->alsa_rtpoll_item) {
852 pa_rtpoll_item_free(u->alsa_rtpoll_item);
853 u->alsa_rtpoll_item = NULL;
854 }
855
856 pa_log_info("Device suspended...");
857
858 return 0;
859 }
860
861 /* Called from IO context */
862 static int update_sw_params(struct userdata *u) {
863 snd_pcm_uframes_t avail_min;
864 int err;
865
866 pa_assert(u);
867
868 /* Use the full buffer if no one asked us for anything specific */
869 u->hwbuf_unused = 0;
870
871 if (u->use_tsched) {
872 pa_usec_t latency;
873
874 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
875 size_t b;
876
877 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
878
879 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
880
881 /* We need at least one sample in our buffer */
882
883 if (PA_UNLIKELY(b < u->frame_size))
884 b = u->frame_size;
885
886 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
887 }
888
889 fix_min_sleep_wakeup(u);
890 fix_tsched_watermark(u);
891 }
892
893 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
894
895 avail_min = 1;
896
897 if (u->use_tsched) {
898 pa_usec_t sleep_usec, process_usec;
899
900 hw_sleep_time(u, &sleep_usec, &process_usec);
901 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
902 }
903
904 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
905
906 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
907 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
908 return err;
909 }
910
911 return 0;
912 }
913
914 /* Called from IO Context on unsuspend or from main thread when creating source */
915 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
916 bool in_thread) {
917 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
918 &u->source->sample_spec);
919
920 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
921 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
922
923 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
924 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
925
926 fix_min_sleep_wakeup(u);
927 fix_tsched_watermark(u);
928
929 if (in_thread)
930 pa_source_set_latency_range_within_thread(u->source,
931 u->min_latency_ref,
932 pa_bytes_to_usec(u->hwbuf_size, ss));
933 else {
934 pa_source_set_latency_range(u->source,
935 0,
936 pa_bytes_to_usec(u->hwbuf_size, ss));
937
938 /* work-around assert in pa_source_set_latency_within_thead,
939 keep track of min_latency and reuse it when
940 this routine is called from IO context */
941 u->min_latency_ref = u->source->thread_info.min_latency;
942 }
943
944 pa_log_info("Time scheduling watermark is %0.2fms",
945 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
946 }
947
948 /* Called from IO context */
949 static int unsuspend(struct userdata *u) {
950 pa_sample_spec ss;
951 int err;
952 bool b, d;
953 snd_pcm_uframes_t period_size, buffer_size;
954
955 pa_assert(u);
956 pa_assert(!u->pcm_handle);
957
958 pa_log_info("Trying resume...");
959
960 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
961 SND_PCM_NONBLOCK|
962 SND_PCM_NO_AUTO_RESAMPLE|
963 SND_PCM_NO_AUTO_CHANNELS|
964 SND_PCM_NO_AUTO_FORMAT)) < 0) {
965 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
966 goto fail;
967 }
968
969 ss = u->source->sample_spec;
970 period_size = u->fragment_size / u->frame_size;
971 buffer_size = u->hwbuf_size / u->frame_size;
972 b = u->use_mmap;
973 d = u->use_tsched;
974
975 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, true)) < 0) {
976 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
977 goto fail;
978 }
979
980 if (b != u->use_mmap || d != u->use_tsched) {
981 pa_log_warn("Resume failed, couldn't get original access mode.");
982 goto fail;
983 }
984
985 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
986 pa_log_warn("Resume failed, couldn't restore original sample settings.");
987 goto fail;
988 }
989
990 if (period_size*u->frame_size != u->fragment_size ||
991 buffer_size*u->frame_size != u->hwbuf_size) {
992 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
993 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
994 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
995 goto fail;
996 }
997
998 if (update_sw_params(u) < 0)
999 goto fail;
1000
1001 if (build_pollfd(u) < 0)
1002 goto fail;
1003
1004 /* FIXME: We need to reload the volume somehow */
1005
1006 u->read_count = 0;
1007 pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
1008 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1009 u->last_smoother_update = 0;
1010
1011 u->first = true;
1012
1013 /* reset the watermark to the value defined when source was created */
1014 if (u->use_tsched)
1015 reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, true);
1016
1017 pa_log_info("Resumed successfully...");
1018
1019 return 0;
1020
1021 fail:
1022 if (u->pcm_handle) {
1023 snd_pcm_close(u->pcm_handle);
1024 u->pcm_handle = NULL;
1025 }
1026
1027 return -PA_ERR_IO;
1028 }
1029
1030 /* Called from IO context */
1031 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1032 struct userdata *u = PA_SOURCE(o)->userdata;
1033
1034 switch (code) {
1035
1036 case PA_SOURCE_MESSAGE_GET_LATENCY: {
1037 pa_usec_t r = 0;
1038
1039 if (u->pcm_handle)
1040 r = source_get_latency(u);
1041
1042 *((pa_usec_t*) data) = r;
1043
1044 return 0;
1045 }
1046
1047 case PA_SOURCE_MESSAGE_SET_STATE:
1048
1049 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1050
1051 case PA_SOURCE_SUSPENDED: {
1052 int r;
1053
1054 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1055
1056 if ((r = suspend(u)) < 0)
1057 return r;
1058
1059 break;
1060 }
1061
1062 case PA_SOURCE_IDLE:
1063 case PA_SOURCE_RUNNING: {
1064 int r;
1065
1066 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1067 if (build_pollfd(u) < 0)
1068 return -PA_ERR_IO;
1069 }
1070
1071 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1072 if ((r = unsuspend(u)) < 0)
1073 return r;
1074 }
1075
1076 break;
1077 }
1078
1079 case PA_SOURCE_UNLINKED:
1080 case PA_SOURCE_INIT:
1081 case PA_SOURCE_INVALID_STATE:
1082 ;
1083 }
1084
1085 break;
1086 }
1087
1088 return pa_source_process_msg(o, code, data, offset, chunk);
1089 }
1090
1091 /* Called from main context */
1092 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1093 pa_source_state_t old_state;
1094 struct userdata *u;
1095
1096 pa_source_assert_ref(s);
1097 pa_assert_se(u = s->userdata);
1098
1099 old_state = pa_source_get_state(u->source);
1100
1101 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1102 reserve_done(u);
1103 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1104 if (reserve_init(u, u->device_name) < 0)
1105 return -PA_ERR_BUSY;
1106
1107 return 0;
1108 }
1109
1110 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1111 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1112
1113 pa_assert(u);
1114 pa_assert(u->mixer_handle);
1115
1116 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1117 return 0;
1118
1119 if (!PA_SOURCE_IS_LINKED(u->source->state))
1120 return 0;
1121
1122 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1123 pa_source_set_mixer_dirty(u->source, true);
1124 return 0;
1125 }
1126
1127 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1128 pa_source_get_volume(u->source, true);
1129 pa_source_get_mute(u->source, true);
1130 }
1131
1132 return 0;
1133 }
1134
1135 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1136 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1137
1138 pa_assert(u);
1139 pa_assert(u->mixer_handle);
1140
1141 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1142 return 0;
1143
1144 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1145 pa_source_set_mixer_dirty(u->source, true);
1146 return 0;
1147 }
1148
1149 if (mask & SND_CTL_EVENT_MASK_VALUE)
1150 pa_source_update_volume_and_mute(u->source);
1151
1152 return 0;
1153 }
1154
1155 static void source_get_volume_cb(pa_source *s) {
1156 struct userdata *u = s->userdata;
1157 pa_cvolume r;
1158 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1159
1160 pa_assert(u);
1161 pa_assert(u->mixer_path);
1162 pa_assert(u->mixer_handle);
1163
1164 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1165 return;
1166
1167 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1168 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1169
1170 pa_log_debug("Read hardware volume: %s",
1171 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1172
1173 if (pa_cvolume_equal(&u->hardware_volume, &r))
1174 return;
1175
1176 s->real_volume = u->hardware_volume = r;
1177
1178 /* Hmm, so the hardware volume changed, let's reset our software volume */
1179 if (u->mixer_path->has_dB)
1180 pa_source_set_soft_volume(s, NULL);
1181 }
1182
1183 static void source_set_volume_cb(pa_source *s) {
1184 struct userdata *u = s->userdata;
1185 pa_cvolume r;
1186 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1187 bool deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1188
1189 pa_assert(u);
1190 pa_assert(u->mixer_path);
1191 pa_assert(u->mixer_handle);
1192
1193 /* Shift up by the base volume */
1194 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1195
1196 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1197 return;
1198
1199 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1200 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1201
1202 u->hardware_volume = r;
1203
1204 if (u->mixer_path->has_dB) {
1205 pa_cvolume new_soft_volume;
1206 bool accurate_enough;
1207
1208 /* Match exactly what the user requested by software */
1209 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1210
1211 /* If the adjustment to do in software is only minimal we
1212 * can skip it. That saves us CPU at the expense of a bit of
1213 * accuracy */
1214 accurate_enough =
1215 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1216 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1217
1218 pa_log_debug("Requested volume: %s",
1219 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1220 pa_log_debug("Got hardware volume: %s",
1221 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1222 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1223 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1224 pa_yes_no(accurate_enough));
1225
1226 if (!accurate_enough)
1227 s->soft_volume = new_soft_volume;
1228
1229 } else {
1230 pa_log_debug("Wrote hardware volume: %s",
1231 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1232
1233 /* We can't match exactly what the user requested, hence let's
1234 * at least tell the user about it */
1235
1236 s->real_volume = r;
1237 }
1238 }
1239
1240 static void source_write_volume_cb(pa_source *s) {
1241 struct userdata *u = s->userdata;
1242 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1243
1244 pa_assert(u);
1245 pa_assert(u->mixer_path);
1246 pa_assert(u->mixer_handle);
1247 pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1248
1249 /* Shift up by the base volume */
1250 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1251
1252 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1253 pa_log_error("Writing HW volume failed");
1254 else {
1255 pa_cvolume tmp_vol;
1256 bool accurate_enough;
1257
1258 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1259 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1260
1261 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1262 accurate_enough =
1263 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1264 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1265
1266 if (!accurate_enough) {
1267 char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1268
1269 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1270 pa_cvolume_snprint_verbose(volume_buf[0],
1271 sizeof(volume_buf[0]),
1272 &s->thread_info.current_hw_volume,
1273 &s->channel_map,
1274 true),
1275 pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1276 }
1277 }
1278 }
1279
1280 static int source_get_mute_cb(pa_source *s, bool *mute) {
1281 struct userdata *u = s->userdata;
1282
1283 pa_assert(u);
1284 pa_assert(u->mixer_path);
1285 pa_assert(u->mixer_handle);
1286
1287 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1288 return -1;
1289
1290 return 0;
1291 }
1292
1293 static void source_set_mute_cb(pa_source *s) {
1294 struct userdata *u = s->userdata;
1295
1296 pa_assert(u);
1297 pa_assert(u->mixer_path);
1298 pa_assert(u->mixer_handle);
1299
1300 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1301 }
1302
1303 static void mixer_volume_init(struct userdata *u) {
1304 pa_assert(u);
1305
1306 if (!u->mixer_path->has_volume) {
1307 pa_source_set_write_volume_callback(u->source, NULL);
1308 pa_source_set_get_volume_callback(u->source, NULL);
1309 pa_source_set_set_volume_callback(u->source, NULL);
1310
1311 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1312 } else {
1313 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1314 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1315
1316 if (u->mixer_path->has_dB && u->deferred_volume) {
1317 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1318 pa_log_info("Successfully enabled deferred volume.");
1319 } else
1320 pa_source_set_write_volume_callback(u->source, NULL);
1321
1322 if (u->mixer_path->has_dB) {
1323 pa_source_enable_decibel_volume(u->source, true);
1324 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1325
1326 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1327 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1328
1329 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1330 } else {
1331 pa_source_enable_decibel_volume(u->source, false);
1332 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1333
1334 u->source->base_volume = PA_VOLUME_NORM;
1335 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1336 }
1337
1338 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1339 }
1340
1341 if (!u->mixer_path->has_mute) {
1342 pa_source_set_get_mute_callback(u->source, NULL);
1343 pa_source_set_set_mute_callback(u->source, NULL);
1344 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1345 } else {
1346 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1347 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1348 pa_log_info("Using hardware mute control.");
1349 }
1350 }
1351
1352 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1353 struct userdata *u = s->userdata;
1354
1355 pa_assert(u);
1356 pa_assert(p);
1357 pa_assert(u->ucm_context);
1358
1359 return pa_alsa_ucm_set_port(u->ucm_context, p, false);
1360 }
1361
1362 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1363 struct userdata *u = s->userdata;
1364 pa_alsa_port_data *data;
1365
1366 pa_assert(u);
1367 pa_assert(p);
1368 pa_assert(u->mixer_handle);
1369
1370 data = PA_DEVICE_PORT_DATA(p);
1371
1372 pa_assert_se(u->mixer_path = data->path);
1373 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1374
1375 mixer_volume_init(u);
1376
1377 if (s->set_mute)
1378 s->set_mute(s);
1379 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1380 if (s->write_volume)
1381 s->write_volume(s);
1382 } else {
1383 if (s->set_volume)
1384 s->set_volume(s);
1385 }
1386
1387 return 0;
1388 }
1389
1390 static void source_update_requested_latency_cb(pa_source *s) {
1391 struct userdata *u = s->userdata;
1392 pa_assert(u);
1393 pa_assert(u->use_tsched); /* only when timer scheduling is used
1394 * we can dynamically adjust the
1395 * latency */
1396
1397 if (!u->pcm_handle)
1398 return;
1399
1400 update_sw_params(u);
1401 }
1402
1403 static int source_update_rate_cb(pa_source *s, uint32_t rate) {
1404 struct userdata *u = s->userdata;
1405 int i;
1406 bool supported = false;
1407
1408 pa_assert(u);
1409
1410 for (i = 0; u->rates[i]; i++) {
1411 if (u->rates[i] == rate) {
1412 supported = true;
1413 break;
1414 }
1415 }
1416
1417 if (!supported) {
1418 pa_log_info("Source does not support sample rate of %d Hz", rate);
1419 return -1;
1420 }
1421
1422 if (!PA_SOURCE_IS_OPENED(s->state)) {
1423 pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1424 u->source->sample_spec.rate = rate;
1425 return 0;
1426 }
1427
1428 return -1;
1429 }
1430
1431 static void thread_func(void *userdata) {
1432 struct userdata *u = userdata;
1433 unsigned short revents = 0;
1434
1435 pa_assert(u);
1436
1437 pa_log_debug("Thread starting up");
1438
1439 if (u->core->realtime_scheduling)
1440 pa_make_realtime(u->core->realtime_priority);
1441
1442 pa_thread_mq_install(&u->thread_mq);
1443
1444 for (;;) {
1445 int ret;
1446 pa_usec_t rtpoll_sleep = 0, real_sleep;
1447
1448 #ifdef DEBUG_TIMING
1449 pa_log_debug("Loop");
1450 #endif
1451
1452 /* Read some data and pass it to the sources */
1453 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1454 int work_done;
1455 pa_usec_t sleep_usec = 0;
1456 bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1457
1458 if (u->first) {
1459 pa_log_info("Starting capture.");
1460 snd_pcm_start(u->pcm_handle);
1461
1462 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1463
1464 u->first = false;
1465 }
1466
1467 if (u->use_mmap)
1468 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1469 else
1470 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1471
1472 if (work_done < 0)
1473 goto fail;
1474
1475 /* pa_log_debug("work_done = %i", work_done); */
1476
1477 if (work_done)
1478 update_smoother(u);
1479
1480 if (u->use_tsched) {
1481 pa_usec_t cusec;
1482
1483 /* OK, the capture buffer is now empty, let's
1484 * calculate when to wake up next */
1485
1486 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1487
1488 /* Convert from the sound card time domain to the
1489 * system time domain */
1490 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1491
1492 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1493
1494 /* We don't trust the conversion, so we wake up whatever comes first */
1495 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1496 }
1497 }
1498
1499 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1500 pa_usec_t volume_sleep;
1501 pa_source_volume_change_apply(u->source, &volume_sleep);
1502 if (volume_sleep > 0) {
1503 if (rtpoll_sleep > 0)
1504 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1505 else
1506 rtpoll_sleep = volume_sleep;
1507 }
1508 }
1509
1510 if (rtpoll_sleep > 0) {
1511 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1512 real_sleep = pa_rtclock_now();
1513 }
1514 else
1515 pa_rtpoll_set_timer_disabled(u->rtpoll);
1516
1517 /* Hmm, nothing to do. Let's sleep */
1518 if ((ret = pa_rtpoll_run(u->rtpoll, true)) < 0)
1519 goto fail;
1520
1521 if (rtpoll_sleep > 0) {
1522 real_sleep = pa_rtclock_now() - real_sleep;
1523 #ifdef DEBUG_TIMING
1524 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1525 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1526 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1527 #endif
1528 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1529 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1530 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1531 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1532 }
1533
1534 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1535 pa_source_volume_change_apply(u->source, NULL);
1536
1537 if (ret == 0)
1538 goto finish;
1539
1540 /* Tell ALSA about this and process its response */
1541 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1542 struct pollfd *pollfd;
1543 int err;
1544 unsigned n;
1545
1546 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1547
1548 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1549 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1550 goto fail;
1551 }
1552
1553 if (revents & ~POLLIN) {
1554 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1555 goto fail;
1556
1557 u->first = true;
1558 revents = 0;
1559 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1560 pa_log_debug("Wakeup from ALSA!");
1561
1562 } else
1563 revents = 0;
1564 }
1565
1566 fail:
1567 /* If this was no regular exit from the loop we have to continue
1568 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1569 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1570 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1571
1572 finish:
1573 pa_log_debug("Thread shutting down");
1574 }
1575
1576 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1577 const char *n;
1578 char *t;
1579
1580 pa_assert(data);
1581 pa_assert(ma);
1582 pa_assert(device_name);
1583
1584 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1585 pa_source_new_data_set_name(data, n);
1586 data->namereg_fail = true;
1587 return;
1588 }
1589
1590 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1591 data->namereg_fail = true;
1592 else {
1593 n = device_id ? device_id : device_name;
1594 data->namereg_fail = false;
1595 }
1596
1597 if (mapping)
1598 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1599 else
1600 t = pa_sprintf_malloc("alsa_input.%s", n);
1601
1602 pa_source_new_data_set_name(data, t);
1603 pa_xfree(t);
1604 }
1605
1606 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1607 snd_hctl_t *hctl;
1608
1609 if (!mapping && !element)
1610 return;
1611
1612 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1613 pa_log_info("Failed to find a working mixer device.");
1614 return;
1615 }
1616
1617 if (element) {
1618
1619 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1620 goto fail;
1621
1622 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1623 goto fail;
1624
1625 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1626 pa_alsa_path_dump(u->mixer_path);
1627 } else if (!(u->mixer_path_set = mapping->input_path_set))
1628 goto fail;
1629
1630 return;
1631
1632 fail:
1633
1634 if (u->mixer_path) {
1635 pa_alsa_path_free(u->mixer_path);
1636 u->mixer_path = NULL;
1637 }
1638
1639 if (u->mixer_handle) {
1640 snd_mixer_close(u->mixer_handle);
1641 u->mixer_handle = NULL;
1642 }
1643 }
1644
1645 static int setup_mixer(struct userdata *u, bool ignore_dB) {
1646 bool need_mixer_callback = false;
1647
1648 pa_assert(u);
1649
1650 if (!u->mixer_handle)
1651 return 0;
1652
1653 if (u->source->active_port) {
1654 pa_alsa_port_data *data;
1655
1656 /* We have a list of supported paths, so let's activate the
1657 * one that has been chosen as active */
1658
1659 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1660 u->mixer_path = data->path;
1661
1662 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1663
1664 } else {
1665
1666 if (!u->mixer_path && u->mixer_path_set)
1667 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1668
1669 if (u->mixer_path) {
1670 /* Hmm, we have only a single path, then let's activate it */
1671
1672 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1673 } else
1674 return 0;
1675 }
1676
1677 mixer_volume_init(u);
1678
1679 /* Will we need to register callbacks? */
1680 if (u->mixer_path_set && u->mixer_path_set->paths) {
1681 pa_alsa_path *p;
1682 void *state;
1683
1684 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1685 if (p->has_volume || p->has_mute)
1686 need_mixer_callback = true;
1687 }
1688 }
1689 else if (u->mixer_path)
1690 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1691
1692 if (need_mixer_callback) {
1693 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1694 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1695 u->mixer_pd = pa_alsa_mixer_pdata_new();
1696 mixer_callback = io_mixer_callback;
1697
1698 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1699 pa_log("Failed to initialize file descriptor monitoring");
1700 return -1;
1701 }
1702 } else {
1703 u->mixer_fdl = pa_alsa_fdlist_new();
1704 mixer_callback = ctl_mixer_callback;
1705
1706 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1707 pa_log("Failed to initialize file descriptor monitoring");
1708 return -1;
1709 }
1710 }
1711
1712 if (u->mixer_path_set)
1713 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1714 else
1715 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1716 }
1717
1718 return 0;
1719 }
1720
1721 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1722
1723 struct userdata *u = NULL;
1724 const char *dev_id = NULL, *key, *mod_name;
1725 pa_sample_spec ss;
1726 char *thread_name = NULL;
1727 uint32_t alternate_sample_rate;
1728 pa_channel_map map;
1729 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1730 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1731 size_t frame_size;
1732 bool use_mmap = true, b, use_tsched = true, d, ignore_dB = false, namereg_fail = false, deferred_volume = false, fixed_latency_range = false;
1733 pa_source_new_data data;
1734 pa_alsa_profile_set *profile_set = NULL;
1735 void *state = NULL;
1736
1737 pa_assert(m);
1738 pa_assert(ma);
1739
1740 ss = m->core->default_sample_spec;
1741 map = m->core->default_channel_map;
1742
1743 /* Pick sample spec overrides from the mapping, if any */
1744 if (mapping) {
1745 if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
1746 ss.format = mapping->sample_spec.format;
1747 if (mapping->sample_spec.rate != 0)
1748 ss.rate = mapping->sample_spec.rate;
1749 if (mapping->sample_spec.channels != 0) {
1750 ss.channels = mapping->sample_spec.channels;
1751 if (pa_channel_map_valid(&mapping->channel_map))
1752 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
1753 }
1754 }
1755
1756 /* Override with modargs if provided */
1757 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1758 pa_log("Failed to parse sample specification and channel map");
1759 goto fail;
1760 }
1761
1762 alternate_sample_rate = m->core->alternate_sample_rate;
1763 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1764 pa_log("Failed to parse alternate sample rate");
1765 goto fail;
1766 }
1767
1768 frame_size = pa_frame_size(&ss);
1769
1770 nfrags = m->core->default_n_fragments;
1771 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1772 if (frag_size <= 0)
1773 frag_size = (uint32_t) frame_size;
1774 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1775 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1776
1777 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1778 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1779 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1780 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1781 pa_log("Failed to parse buffer metrics");
1782 goto fail;
1783 }
1784
1785 buffer_size = nfrags * frag_size;
1786
1787 period_frames = frag_size/frame_size;
1788 buffer_frames = buffer_size/frame_size;
1789 tsched_frames = tsched_size/frame_size;
1790
1791 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1792 pa_log("Failed to parse mmap argument.");
1793 goto fail;
1794 }
1795
1796 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1797 pa_log("Failed to parse tsched argument.");
1798 goto fail;
1799 }
1800
1801 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1802 pa_log("Failed to parse ignore_dB argument.");
1803 goto fail;
1804 }
1805
1806 deferred_volume = m->core->deferred_volume;
1807 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1808 pa_log("Failed to parse deferred_volume argument.");
1809 goto fail;
1810 }
1811
1812 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1813 pa_log("Failed to parse fixed_latency_range argument.");
1814 goto fail;
1815 }
1816
1817 use_tsched = pa_alsa_may_tsched(use_tsched);
1818
1819 u = pa_xnew0(struct userdata, 1);
1820 u->core = m->core;
1821 u->module = m;
1822 u->use_mmap = use_mmap;
1823 u->use_tsched = use_tsched;
1824 u->deferred_volume = deferred_volume;
1825 u->fixed_latency_range = fixed_latency_range;
1826 u->first = true;
1827 u->rtpoll = pa_rtpoll_new();
1828 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1829
1830 u->smoother = pa_smoother_new(
1831 SMOOTHER_ADJUST_USEC,
1832 SMOOTHER_WINDOW_USEC,
1833 true,
1834 true,
1835 5,
1836 pa_rtclock_now(),
1837 true);
1838 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1839
1840 /* use ucm */
1841 if (mapping && mapping->ucm_context.ucm)
1842 u->ucm_context = &mapping->ucm_context;
1843
1844 dev_id = pa_modargs_get_value(
1845 ma, "device_id",
1846 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1847
1848 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1849
1850 if (reserve_init(u, dev_id) < 0)
1851 goto fail;
1852
1853 if (reserve_monitor_init(u, dev_id) < 0)
1854 goto fail;
1855
1856 b = use_mmap;
1857 d = use_tsched;
1858
1859 if (mapping) {
1860
1861 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1862 pa_log("device_id= not set");
1863 goto fail;
1864 }
1865
1866 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
1867 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
1868 pa_log("Failed to enable ucm modifier %s", mod_name);
1869 else
1870 pa_log_debug("Enabled ucm modifier %s", mod_name);
1871 }
1872
1873 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1874 dev_id,
1875 &u->device_name,
1876 &ss, &map,
1877 SND_PCM_STREAM_CAPTURE,
1878 &period_frames, &buffer_frames, tsched_frames,
1879 &b, &d, mapping)))
1880 goto fail;
1881
1882 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1883
1884 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1885 goto fail;
1886
1887 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1888 dev_id,
1889 &u->device_name,
1890 &ss, &map,
1891 SND_PCM_STREAM_CAPTURE,
1892 &period_frames, &buffer_frames, tsched_frames,
1893 &b, &d, profile_set, &mapping)))
1894 goto fail;
1895
1896 } else {
1897
1898 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1899 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1900 &u->device_name,
1901 &ss, &map,
1902 SND_PCM_STREAM_CAPTURE,
1903 &period_frames, &buffer_frames, tsched_frames,
1904 &b, &d, false)))
1905 goto fail;
1906 }
1907
1908 pa_assert(u->device_name);
1909 pa_log_info("Successfully opened device %s.", u->device_name);
1910
1911 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1912 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1913 goto fail;
1914 }
1915
1916 if (mapping)
1917 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1918
1919 if (use_mmap && !b) {
1920 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1921 u->use_mmap = use_mmap = false;
1922 }
1923
1924 if (use_tsched && (!b || !d)) {
1925 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1926 u->use_tsched = use_tsched = false;
1927 }
1928
1929 if (u->use_mmap)
1930 pa_log_info("Successfully enabled mmap() mode.");
1931
1932 if (u->use_tsched) {
1933 pa_log_info("Successfully enabled timer-based scheduling mode.");
1934 if (u->fixed_latency_range)
1935 pa_log_info("Disabling latency range changes on overrun");
1936 }
1937
1938 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
1939 if (!u->rates) {
1940 pa_log_error("Failed to find any supported sample rates.");
1941 goto fail;
1942 }
1943
1944 /* ALSA might tweak the sample spec, so recalculate the frame size */
1945 frame_size = pa_frame_size(&ss);
1946
1947 if (!u->ucm_context)
1948 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1949
1950 pa_source_new_data_init(&data);
1951 data.driver = driver;
1952 data.module = m;
1953 data.card = card;
1954 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1955
1956 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1957 * variable instead of using &data.namereg_fail directly, because
1958 * data.namereg_fail is a bitfield and taking the address of a bitfield
1959 * variable is impossible. */
1960 namereg_fail = data.namereg_fail;
1961 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1962 pa_log("Failed to parse namereg_fail argument.");
1963 pa_source_new_data_done(&data);
1964 goto fail;
1965 }
1966 data.namereg_fail = namereg_fail;
1967
1968 pa_source_new_data_set_sample_spec(&data, &ss);
1969 pa_source_new_data_set_channel_map(&data, &map);
1970 pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1971
1972 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1973 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1974 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1975 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1976 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1977
1978 if (mapping) {
1979 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1980 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1981
1982 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
1983 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
1984 }
1985
1986 pa_alsa_init_description(data.proplist, card);
1987
1988 if (u->control_device)
1989 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1990
1991 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1992 pa_log("Invalid properties");
1993 pa_source_new_data_done(&data);
1994 goto fail;
1995 }
1996
1997 if (u->ucm_context)
1998 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, false, card);
1999 else if (u->mixer_path_set)
2000 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2001
2002 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
2003 pa_source_new_data_done(&data);
2004
2005 if (!u->source) {
2006 pa_log("Failed to create source object");
2007 goto fail;
2008 }
2009
2010 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2011 &u->source->thread_info.volume_change_safety_margin) < 0) {
2012 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2013 goto fail;
2014 }
2015
2016 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2017 &u->source->thread_info.volume_change_extra_delay) < 0) {
2018 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2019 goto fail;
2020 }
2021
2022 u->source->parent.process_msg = source_process_msg;
2023 if (u->use_tsched)
2024 u->source->update_requested_latency = source_update_requested_latency_cb;
2025 u->source->set_state = source_set_state_cb;
2026 if (u->ucm_context)
2027 u->source->set_port = source_set_port_ucm_cb;
2028 else
2029 u->source->set_port = source_set_port_cb;
2030 if (u->source->alternate_sample_rate)
2031 u->source->update_rate = source_update_rate_cb;
2032 u->source->userdata = u;
2033
2034 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2035 pa_source_set_rtpoll(u->source, u->rtpoll);
2036
2037 u->frame_size = frame_size;
2038 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2039 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2040 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2041
2042 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2043 (double) u->hwbuf_size / (double) u->fragment_size,
2044 (long unsigned) u->fragment_size,
2045 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2046 (long unsigned) u->hwbuf_size,
2047 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2048
2049 if (u->use_tsched) {
2050 u->tsched_watermark_ref = tsched_watermark;
2051 reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2052 }
2053 else
2054 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2055
2056 reserve_update(u);
2057
2058 if (update_sw_params(u) < 0)
2059 goto fail;
2060
2061 if (u->ucm_context) {
2062 if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, false) < 0)
2063 goto fail;
2064 } else if (setup_mixer(u, ignore_dB) < 0)
2065 goto fail;
2066
2067 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2068
2069 thread_name = pa_sprintf_malloc("alsa-source-%s", pa_strnull(pa_proplist_gets(u->source->proplist, "alsa.id")));
2070 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2071 pa_log("Failed to create thread.");
2072 goto fail;
2073 }
2074 pa_xfree(thread_name);
2075 thread_name = NULL;
2076
2077 /* Get initial mixer settings */
2078 if (data.volume_is_set) {
2079 if (u->source->set_volume)
2080 u->source->set_volume(u->source);
2081 } else {
2082 if (u->source->get_volume)
2083 u->source->get_volume(u->source);
2084 }
2085
2086 if (data.muted_is_set) {
2087 if (u->source->set_mute)
2088 u->source->set_mute(u->source);
2089 } else {
2090 if (u->source->get_mute) {
2091 bool mute;
2092
2093 if (u->source->get_mute(u->source, &mute) >= 0)
2094 pa_source_set_mute(u->source, mute, false);
2095 }
2096 }
2097
2098 if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2099 u->source->write_volume(u->source);
2100
2101 pa_source_put(u->source);
2102
2103 if (profile_set)
2104 pa_alsa_profile_set_free(profile_set);
2105
2106 return u->source;
2107
2108 fail:
2109 pa_xfree(thread_name);
2110
2111 if (u)
2112 userdata_free(u);
2113
2114 if (profile_set)
2115 pa_alsa_profile_set_free(profile_set);
2116
2117 return NULL;
2118 }
2119
2120 static void userdata_free(struct userdata *u) {
2121 pa_assert(u);
2122
2123 if (u->source)
2124 pa_source_unlink(u->source);
2125
2126 if (u->thread) {
2127 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2128 pa_thread_free(u->thread);
2129 }
2130
2131 pa_thread_mq_done(&u->thread_mq);
2132
2133 if (u->source)
2134 pa_source_unref(u->source);
2135
2136 if (u->mixer_pd)
2137 pa_alsa_mixer_pdata_free(u->mixer_pd);
2138
2139 if (u->alsa_rtpoll_item)
2140 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2141
2142 if (u->rtpoll)
2143 pa_rtpoll_free(u->rtpoll);
2144
2145 if (u->pcm_handle) {
2146 snd_pcm_drop(u->pcm_handle);
2147 snd_pcm_close(u->pcm_handle);
2148 }
2149
2150 if (u->mixer_fdl)
2151 pa_alsa_fdlist_free(u->mixer_fdl);
2152
2153 if (u->mixer_path && !u->mixer_path_set)
2154 pa_alsa_path_free(u->mixer_path);
2155
2156 if (u->mixer_handle)
2157 snd_mixer_close(u->mixer_handle);
2158
2159 if (u->smoother)
2160 pa_smoother_free(u->smoother);
2161
2162 if (u->rates)
2163 pa_xfree(u->rates);
2164
2165 reserve_done(u);
2166 monitor_done(u);
2167
2168 pa_xfree(u->device_name);
2169 pa_xfree(u->control_device);
2170 pa_xfree(u->paths_dir);
2171 pa_xfree(u);
2172 }
2173
2174 void pa_alsa_source_free(pa_source *s) {
2175 struct userdata *u;
2176
2177 pa_source_assert_ref(s);
2178 pa_assert_se(u = s->userdata);
2179
2180 userdata_free(u);
2181 }