]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
alsa: Try to support non-standard rates in alsa-sink/source
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/thread-mq.h>
50 #include <pulsecore/rtpoll.h>
51 #include <pulsecore/time-smoother.h>
52
53 #include <modules/reserve-wrap.h>
54
55 #include "alsa-util.h"
56 #include "alsa-source.h"
57
58 /* #define DEBUG_TIMING */
59
60 #define DEFAULT_DEVICE "default"
61
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64
65 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
66 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
67 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
68 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
69 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
70 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
71
72 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
73 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
74
75 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
76 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
77
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
80
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
82
83 struct userdata {
84 pa_core *core;
85 pa_module *module;
86 pa_source *source;
87
88 pa_thread *thread;
89 pa_thread_mq thread_mq;
90 pa_rtpoll *rtpoll;
91
92 snd_pcm_t *pcm_handle;
93
94 char *paths_dir;
95 pa_alsa_fdlist *mixer_fdl;
96 pa_alsa_mixer_pdata *mixer_pd;
97 snd_mixer_t *mixer_handle;
98 pa_alsa_path_set *mixer_path_set;
99 pa_alsa_path *mixer_path;
100
101 pa_cvolume hardware_volume;
102
103 unsigned int *rates;
104
105 size_t
106 frame_size,
107 fragment_size,
108 hwbuf_size,
109 tsched_watermark,
110 tsched_watermark_ref,
111 hwbuf_unused,
112 min_sleep,
113 min_wakeup,
114 watermark_inc_step,
115 watermark_dec_step,
116 watermark_inc_threshold,
117 watermark_dec_threshold;
118
119 pa_usec_t watermark_dec_not_before;
120 pa_usec_t min_latency_ref;
121
122 char *device_name; /* name of the PCM device */
123 char *control_device; /* name of the control device */
124
125 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
126
127 pa_bool_t first;
128
129 pa_rtpoll_item *alsa_rtpoll_item;
130
131 pa_smoother *smoother;
132 uint64_t read_count;
133 pa_usec_t smoother_interval;
134 pa_usec_t last_smoother_update;
135
136 pa_reserve_wrapper *reserve;
137 pa_hook_slot *reserve_slot;
138 pa_reserve_monitor_wrapper *monitor;
139 pa_hook_slot *monitor_slot;
140
141 /* ucm context */
142 pa_alsa_ucm_mapping_context *ucm_context;
143 };
144
145 static void userdata_free(struct userdata *u);
146
147 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
148 pa_assert(r);
149 pa_assert(u);
150
151 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
152 return PA_HOOK_CANCEL;
153
154 return PA_HOOK_OK;
155 }
156
157 static void reserve_done(struct userdata *u) {
158 pa_assert(u);
159
160 if (u->reserve_slot) {
161 pa_hook_slot_free(u->reserve_slot);
162 u->reserve_slot = NULL;
163 }
164
165 if (u->reserve) {
166 pa_reserve_wrapper_unref(u->reserve);
167 u->reserve = NULL;
168 }
169 }
170
171 static void reserve_update(struct userdata *u) {
172 const char *description;
173 pa_assert(u);
174
175 if (!u->source || !u->reserve)
176 return;
177
178 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
179 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
180 }
181
182 static int reserve_init(struct userdata *u, const char *dname) {
183 char *rname;
184
185 pa_assert(u);
186 pa_assert(dname);
187
188 if (u->reserve)
189 return 0;
190
191 if (pa_in_system_mode())
192 return 0;
193
194 if (!(rname = pa_alsa_get_reserve_name(dname)))
195 return 0;
196
197 /* We are resuming, try to lock the device */
198 u->reserve = pa_reserve_wrapper_get(u->core, rname);
199 pa_xfree(rname);
200
201 if (!(u->reserve))
202 return -1;
203
204 reserve_update(u);
205
206 pa_assert(!u->reserve_slot);
207 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
208
209 return 0;
210 }
211
212 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
213 pa_bool_t b;
214
215 pa_assert(w);
216 pa_assert(u);
217
218 b = PA_PTR_TO_UINT(busy) && !u->reserve;
219
220 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
221 return PA_HOOK_OK;
222 }
223
224 static void monitor_done(struct userdata *u) {
225 pa_assert(u);
226
227 if (u->monitor_slot) {
228 pa_hook_slot_free(u->monitor_slot);
229 u->monitor_slot = NULL;
230 }
231
232 if (u->monitor) {
233 pa_reserve_monitor_wrapper_unref(u->monitor);
234 u->monitor = NULL;
235 }
236 }
237
238 static int reserve_monitor_init(struct userdata *u, const char *dname) {
239 char *rname;
240
241 pa_assert(u);
242 pa_assert(dname);
243
244 if (pa_in_system_mode())
245 return 0;
246
247 if (!(rname = pa_alsa_get_reserve_name(dname)))
248 return 0;
249
250 /* We are resuming, try to lock the device */
251 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
252 pa_xfree(rname);
253
254 if (!(u->monitor))
255 return -1;
256
257 pa_assert(!u->monitor_slot);
258 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
259
260 return 0;
261 }
262
263 static void fix_min_sleep_wakeup(struct userdata *u) {
264 size_t max_use, max_use_2;
265
266 pa_assert(u);
267 pa_assert(u->use_tsched);
268
269 max_use = u->hwbuf_size - u->hwbuf_unused;
270 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
271
272 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
273 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
274
275 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
276 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
277 }
278
279 static void fix_tsched_watermark(struct userdata *u) {
280 size_t max_use;
281 pa_assert(u);
282 pa_assert(u->use_tsched);
283
284 max_use = u->hwbuf_size - u->hwbuf_unused;
285
286 if (u->tsched_watermark > max_use - u->min_sleep)
287 u->tsched_watermark = max_use - u->min_sleep;
288
289 if (u->tsched_watermark < u->min_wakeup)
290 u->tsched_watermark = u->min_wakeup;
291 }
292
293 static void increase_watermark(struct userdata *u) {
294 size_t old_watermark;
295 pa_usec_t old_min_latency, new_min_latency;
296
297 pa_assert(u);
298 pa_assert(u->use_tsched);
299
300 /* First, just try to increase the watermark */
301 old_watermark = u->tsched_watermark;
302 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
303 fix_tsched_watermark(u);
304
305 if (old_watermark != u->tsched_watermark) {
306 pa_log_info("Increasing wakeup watermark to %0.2f ms",
307 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
308 return;
309 }
310
311 /* Hmm, we cannot increase the watermark any further, hence let's
312 raise the latency unless doing so was disabled in
313 configuration */
314 if (u->fixed_latency_range)
315 return;
316
317 old_min_latency = u->source->thread_info.min_latency;
318 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
319 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
320
321 if (old_min_latency != new_min_latency) {
322 pa_log_info("Increasing minimal latency to %0.2f ms",
323 (double) new_min_latency / PA_USEC_PER_MSEC);
324
325 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
326 }
327
328 /* When we reach this we're officialy fucked! */
329 }
330
331 static void decrease_watermark(struct userdata *u) {
332 size_t old_watermark;
333 pa_usec_t now;
334
335 pa_assert(u);
336 pa_assert(u->use_tsched);
337
338 now = pa_rtclock_now();
339
340 if (u->watermark_dec_not_before <= 0)
341 goto restart;
342
343 if (u->watermark_dec_not_before > now)
344 return;
345
346 old_watermark = u->tsched_watermark;
347
348 if (u->tsched_watermark < u->watermark_dec_step)
349 u->tsched_watermark = u->tsched_watermark / 2;
350 else
351 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
352
353 fix_tsched_watermark(u);
354
355 if (old_watermark != u->tsched_watermark)
356 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
357 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
358
359 /* We don't change the latency range*/
360
361 restart:
362 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
363 }
364
365 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
366 pa_usec_t wm, usec;
367
368 pa_assert(sleep_usec);
369 pa_assert(process_usec);
370
371 pa_assert(u);
372 pa_assert(u->use_tsched);
373
374 usec = pa_source_get_requested_latency_within_thread(u->source);
375
376 if (usec == (pa_usec_t) -1)
377 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
378
379 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
380
381 if (wm > usec)
382 wm = usec/2;
383
384 *sleep_usec = usec - wm;
385 *process_usec = wm;
386
387 #ifdef DEBUG_TIMING
388 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
389 (unsigned long) (usec / PA_USEC_PER_MSEC),
390 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
391 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
392 #endif
393 }
394
395 static int try_recover(struct userdata *u, const char *call, int err) {
396 pa_assert(u);
397 pa_assert(call);
398 pa_assert(err < 0);
399
400 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
401
402 pa_assert(err != -EAGAIN);
403
404 if (err == -EPIPE)
405 pa_log_debug("%s: Buffer overrun!", call);
406
407 if (err == -ESTRPIPE)
408 pa_log_debug("%s: System suspended!", call);
409
410 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
411 pa_log("%s: %s", call, pa_alsa_strerror(err));
412 return -1;
413 }
414
415 u->first = TRUE;
416 return 0;
417 }
418
419 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
420 size_t left_to_record;
421 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
422 pa_bool_t overrun = FALSE;
423
424 /* We use <= instead of < for this check here because an overrun
425 * only happens after the last sample was processed, not already when
426 * it is removed from the buffer. This is particularly important
427 * when block transfer is used. */
428
429 if (n_bytes <= rec_space)
430 left_to_record = rec_space - n_bytes;
431 else {
432
433 /* We got a dropout. What a mess! */
434 left_to_record = 0;
435 overrun = TRUE;
436
437 #ifdef DEBUG_TIMING
438 PA_DEBUG_TRAP;
439 #endif
440
441 if (pa_log_ratelimit(PA_LOG_INFO))
442 pa_log_info("Overrun!");
443 }
444
445 #ifdef DEBUG_TIMING
446 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
447 #endif
448
449 if (u->use_tsched) {
450 pa_bool_t reset_not_before = TRUE;
451
452 if (overrun || left_to_record < u->watermark_inc_threshold)
453 increase_watermark(u);
454 else if (left_to_record > u->watermark_dec_threshold) {
455 reset_not_before = FALSE;
456
457 /* We decrease the watermark only if have actually
458 * been woken up by a timeout. If something else woke
459 * us up it's too easy to fulfill the deadlines... */
460
461 if (on_timeout)
462 decrease_watermark(u);
463 }
464
465 if (reset_not_before)
466 u->watermark_dec_not_before = 0;
467 }
468
469 return left_to_record;
470 }
471
472 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
473 pa_bool_t work_done = FALSE;
474 pa_usec_t max_sleep_usec = 0, process_usec = 0;
475 size_t left_to_record;
476 unsigned j = 0;
477
478 pa_assert(u);
479 pa_source_assert_ref(u->source);
480
481 if (u->use_tsched)
482 hw_sleep_time(u, &max_sleep_usec, &process_usec);
483
484 for (;;) {
485 snd_pcm_sframes_t n;
486 size_t n_bytes;
487 int r;
488 pa_bool_t after_avail = TRUE;
489
490 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
491
492 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
493 continue;
494
495 return r;
496 }
497
498 n_bytes = (size_t) n * u->frame_size;
499
500 #ifdef DEBUG_TIMING
501 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
502 #endif
503
504 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
505 on_timeout = FALSE;
506
507 if (u->use_tsched)
508 if (!polled &&
509 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
510 #ifdef DEBUG_TIMING
511 pa_log_debug("Not reading, because too early.");
512 #endif
513 break;
514 }
515
516 if (PA_UNLIKELY(n_bytes <= 0)) {
517
518 if (polled)
519 PA_ONCE_BEGIN {
520 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
521 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
522 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
523 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
524 pa_strnull(dn));
525 pa_xfree(dn);
526 } PA_ONCE_END;
527
528 #ifdef DEBUG_TIMING
529 pa_log_debug("Not reading, because not necessary.");
530 #endif
531 break;
532 }
533
534
535 if (++j > 10) {
536 #ifdef DEBUG_TIMING
537 pa_log_debug("Not filling up, because already too many iterations.");
538 #endif
539
540 break;
541 }
542
543 polled = FALSE;
544
545 #ifdef DEBUG_TIMING
546 pa_log_debug("Reading");
547 #endif
548
549 for (;;) {
550 pa_memchunk chunk;
551 void *p;
552 int err;
553 const snd_pcm_channel_area_t *areas;
554 snd_pcm_uframes_t offset, frames;
555 snd_pcm_sframes_t sframes;
556
557 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
558 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
559
560 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
561
562 if (!after_avail && err == -EAGAIN)
563 break;
564
565 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
566 continue;
567
568 return r;
569 }
570
571 /* Make sure that if these memblocks need to be copied they will fit into one slot */
572 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
573 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
574
575 if (!after_avail && frames == 0)
576 break;
577
578 pa_assert(frames > 0);
579 after_avail = FALSE;
580
581 /* Check these are multiples of 8 bit */
582 pa_assert((areas[0].first & 7) == 0);
583 pa_assert((areas[0].step & 7)== 0);
584
585 /* We assume a single interleaved memory buffer */
586 pa_assert((areas[0].first >> 3) == 0);
587 pa_assert((areas[0].step >> 3) == u->frame_size);
588
589 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
590
591 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
592 chunk.length = pa_memblock_get_length(chunk.memblock);
593 chunk.index = 0;
594
595 pa_source_post(u->source, &chunk);
596 pa_memblock_unref_fixed(chunk.memblock);
597
598 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
599
600 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
601 continue;
602
603 return r;
604 }
605
606 work_done = TRUE;
607
608 u->read_count += frames * u->frame_size;
609
610 #ifdef DEBUG_TIMING
611 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
612 #endif
613
614 if ((size_t) frames * u->frame_size >= n_bytes)
615 break;
616
617 n_bytes -= (size_t) frames * u->frame_size;
618 }
619 }
620
621 if (u->use_tsched) {
622 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
623 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
624
625 if (*sleep_usec > process_usec)
626 *sleep_usec -= process_usec;
627 else
628 *sleep_usec = 0;
629 }
630
631 return work_done ? 1 : 0;
632 }
633
634 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
635 int work_done = FALSE;
636 pa_usec_t max_sleep_usec = 0, process_usec = 0;
637 size_t left_to_record;
638 unsigned j = 0;
639
640 pa_assert(u);
641 pa_source_assert_ref(u->source);
642
643 if (u->use_tsched)
644 hw_sleep_time(u, &max_sleep_usec, &process_usec);
645
646 for (;;) {
647 snd_pcm_sframes_t n;
648 size_t n_bytes;
649 int r;
650 pa_bool_t after_avail = TRUE;
651
652 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
653
654 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
655 continue;
656
657 return r;
658 }
659
660 n_bytes = (size_t) n * u->frame_size;
661 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
662 on_timeout = FALSE;
663
664 if (u->use_tsched)
665 if (!polled &&
666 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
667 break;
668
669 if (PA_UNLIKELY(n_bytes <= 0)) {
670
671 if (polled)
672 PA_ONCE_BEGIN {
673 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
674 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
675 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
676 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
677 pa_strnull(dn));
678 pa_xfree(dn);
679 } PA_ONCE_END;
680
681 break;
682 }
683
684 if (++j > 10) {
685 #ifdef DEBUG_TIMING
686 pa_log_debug("Not filling up, because already too many iterations.");
687 #endif
688
689 break;
690 }
691
692 polled = FALSE;
693
694 for (;;) {
695 void *p;
696 snd_pcm_sframes_t frames;
697 pa_memchunk chunk;
698
699 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
700
701 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
702
703 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
704 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
705
706 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
707
708 p = pa_memblock_acquire(chunk.memblock);
709 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
710 pa_memblock_release(chunk.memblock);
711
712 if (PA_UNLIKELY(frames < 0)) {
713 pa_memblock_unref(chunk.memblock);
714
715 if (!after_avail && (int) frames == -EAGAIN)
716 break;
717
718 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
719 continue;
720
721 return r;
722 }
723
724 if (!after_avail && frames == 0) {
725 pa_memblock_unref(chunk.memblock);
726 break;
727 }
728
729 pa_assert(frames > 0);
730 after_avail = FALSE;
731
732 chunk.index = 0;
733 chunk.length = (size_t) frames * u->frame_size;
734
735 pa_source_post(u->source, &chunk);
736 pa_memblock_unref(chunk.memblock);
737
738 work_done = TRUE;
739
740 u->read_count += frames * u->frame_size;
741
742 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
743
744 if ((size_t) frames * u->frame_size >= n_bytes)
745 break;
746
747 n_bytes -= (size_t) frames * u->frame_size;
748 }
749 }
750
751 if (u->use_tsched) {
752 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
753 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
754
755 if (*sleep_usec > process_usec)
756 *sleep_usec -= process_usec;
757 else
758 *sleep_usec = 0;
759 }
760
761 return work_done ? 1 : 0;
762 }
763
764 static void update_smoother(struct userdata *u) {
765 snd_pcm_sframes_t delay = 0;
766 uint64_t position;
767 int err;
768 pa_usec_t now1 = 0, now2;
769 snd_pcm_status_t *status;
770 snd_htimestamp_t htstamp = { 0, 0 };
771
772 snd_pcm_status_alloca(&status);
773
774 pa_assert(u);
775 pa_assert(u->pcm_handle);
776
777 /* Let's update the time smoother */
778
779 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
780 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
781 return;
782 }
783
784 snd_pcm_status_get_htstamp(status, &htstamp);
785 now1 = pa_timespec_load(&htstamp);
786
787 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
788 if (now1 <= 0)
789 now1 = pa_rtclock_now();
790
791 /* check if the time since the last update is bigger than the interval */
792 if (u->last_smoother_update > 0)
793 if (u->last_smoother_update + u->smoother_interval > now1)
794 return;
795
796 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
797 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
798
799 pa_smoother_put(u->smoother, now1, now2);
800
801 u->last_smoother_update = now1;
802 /* exponentially increase the update interval up to the MAX limit */
803 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
804 }
805
806 static pa_usec_t source_get_latency(struct userdata *u) {
807 int64_t delay;
808 pa_usec_t now1, now2;
809
810 pa_assert(u);
811
812 now1 = pa_rtclock_now();
813 now2 = pa_smoother_get(u->smoother, now1);
814
815 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
816
817 return delay >= 0 ? (pa_usec_t) delay : 0;
818 }
819
820 static int build_pollfd(struct userdata *u) {
821 pa_assert(u);
822 pa_assert(u->pcm_handle);
823
824 if (u->alsa_rtpoll_item)
825 pa_rtpoll_item_free(u->alsa_rtpoll_item);
826
827 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
828 return -1;
829
830 return 0;
831 }
832
833 /* Called from IO context */
834 static int suspend(struct userdata *u) {
835 pa_assert(u);
836 pa_assert(u->pcm_handle);
837
838 pa_smoother_pause(u->smoother, pa_rtclock_now());
839
840 /* Let's suspend */
841 snd_pcm_close(u->pcm_handle);
842 u->pcm_handle = NULL;
843
844 if (u->alsa_rtpoll_item) {
845 pa_rtpoll_item_free(u->alsa_rtpoll_item);
846 u->alsa_rtpoll_item = NULL;
847 }
848
849 pa_log_info("Device suspended...");
850
851 return 0;
852 }
853
854 /* Called from IO context */
855 static int update_sw_params(struct userdata *u) {
856 snd_pcm_uframes_t avail_min;
857 int err;
858
859 pa_assert(u);
860
861 /* Use the full buffer if no one asked us for anything specific */
862 u->hwbuf_unused = 0;
863
864 if (u->use_tsched) {
865 pa_usec_t latency;
866
867 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
868 size_t b;
869
870 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
871
872 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
873
874 /* We need at least one sample in our buffer */
875
876 if (PA_UNLIKELY(b < u->frame_size))
877 b = u->frame_size;
878
879 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
880 }
881
882 fix_min_sleep_wakeup(u);
883 fix_tsched_watermark(u);
884 }
885
886 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
887
888 avail_min = 1;
889
890 if (u->use_tsched) {
891 pa_usec_t sleep_usec, process_usec;
892
893 hw_sleep_time(u, &sleep_usec, &process_usec);
894 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
895 }
896
897 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
898
899 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
900 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
901 return err;
902 }
903
904 return 0;
905 }
906
907 /* Called from IO Context on unsuspend or from main thread when creating source */
908 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
909 pa_bool_t in_thread)
910 {
911 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
912 &u->source->sample_spec);
913
914 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
915 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
916
917 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
918 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
919
920 fix_min_sleep_wakeup(u);
921 fix_tsched_watermark(u);
922
923 if (in_thread)
924 pa_source_set_latency_range_within_thread(u->source,
925 u->min_latency_ref,
926 pa_bytes_to_usec(u->hwbuf_size, ss));
927 else {
928 pa_source_set_latency_range(u->source,
929 0,
930 pa_bytes_to_usec(u->hwbuf_size, ss));
931
932 /* work-around assert in pa_source_set_latency_within_thead,
933 keep track of min_latency and reuse it when
934 this routine is called from IO context */
935 u->min_latency_ref = u->source->thread_info.min_latency;
936 }
937
938 pa_log_info("Time scheduling watermark is %0.2fms",
939 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
940 }
941
942 /* Called from IO context */
943 static int unsuspend(struct userdata *u) {
944 pa_sample_spec ss;
945 int err;
946 pa_bool_t b, d;
947 snd_pcm_uframes_t period_size, buffer_size;
948
949 pa_assert(u);
950 pa_assert(!u->pcm_handle);
951
952 pa_log_info("Trying resume...");
953
954 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
955 SND_PCM_NONBLOCK|
956 SND_PCM_NO_AUTO_RESAMPLE|
957 SND_PCM_NO_AUTO_CHANNELS|
958 SND_PCM_NO_AUTO_FORMAT)) < 0) {
959 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
960 goto fail;
961 }
962
963 ss = u->source->sample_spec;
964 period_size = u->fragment_size / u->frame_size;
965 buffer_size = u->hwbuf_size / u->frame_size;
966 b = u->use_mmap;
967 d = u->use_tsched;
968
969 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
970 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
971 goto fail;
972 }
973
974 if (b != u->use_mmap || d != u->use_tsched) {
975 pa_log_warn("Resume failed, couldn't get original access mode.");
976 goto fail;
977 }
978
979 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
980 pa_log_warn("Resume failed, couldn't restore original sample settings.");
981 goto fail;
982 }
983
984 if (period_size*u->frame_size != u->fragment_size ||
985 buffer_size*u->frame_size != u->hwbuf_size) {
986 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
987 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
988 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
989 goto fail;
990 }
991
992 if (update_sw_params(u) < 0)
993 goto fail;
994
995 if (build_pollfd(u) < 0)
996 goto fail;
997
998 /* FIXME: We need to reload the volume somehow */
999
1000 u->read_count = 0;
1001 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1002 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1003 u->last_smoother_update = 0;
1004
1005 u->first = TRUE;
1006
1007 /* reset the watermark to the value defined when source was created */
1008 if (u->use_tsched)
1009 reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1010
1011 pa_log_info("Resumed successfully...");
1012
1013 return 0;
1014
1015 fail:
1016 if (u->pcm_handle) {
1017 snd_pcm_close(u->pcm_handle);
1018 u->pcm_handle = NULL;
1019 }
1020
1021 return -PA_ERR_IO;
1022 }
1023
1024 /* Called from IO context */
1025 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1026 struct userdata *u = PA_SOURCE(o)->userdata;
1027
1028 switch (code) {
1029
1030 case PA_SOURCE_MESSAGE_GET_LATENCY: {
1031 pa_usec_t r = 0;
1032
1033 if (u->pcm_handle)
1034 r = source_get_latency(u);
1035
1036 *((pa_usec_t*) data) = r;
1037
1038 return 0;
1039 }
1040
1041 case PA_SOURCE_MESSAGE_SET_STATE:
1042
1043 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1044
1045 case PA_SOURCE_SUSPENDED: {
1046 int r;
1047
1048 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1049
1050 if ((r = suspend(u)) < 0)
1051 return r;
1052
1053 break;
1054 }
1055
1056 case PA_SOURCE_IDLE:
1057 case PA_SOURCE_RUNNING: {
1058 int r;
1059
1060 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1061 if (build_pollfd(u) < 0)
1062 return -PA_ERR_IO;
1063 }
1064
1065 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1066 if ((r = unsuspend(u)) < 0)
1067 return r;
1068 }
1069
1070 break;
1071 }
1072
1073 case PA_SOURCE_UNLINKED:
1074 case PA_SOURCE_INIT:
1075 case PA_SOURCE_INVALID_STATE:
1076 ;
1077 }
1078
1079 break;
1080 }
1081
1082 return pa_source_process_msg(o, code, data, offset, chunk);
1083 }
1084
1085 /* Called from main context */
1086 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1087 pa_source_state_t old_state;
1088 struct userdata *u;
1089
1090 pa_source_assert_ref(s);
1091 pa_assert_se(u = s->userdata);
1092
1093 old_state = pa_source_get_state(u->source);
1094
1095 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1096 reserve_done(u);
1097 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1098 if (reserve_init(u, u->device_name) < 0)
1099 return -PA_ERR_BUSY;
1100
1101 return 0;
1102 }
1103
1104 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1105 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1106
1107 pa_assert(u);
1108 pa_assert(u->mixer_handle);
1109
1110 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1111 return 0;
1112
1113 if (!PA_SOURCE_IS_LINKED(u->source->state))
1114 return 0;
1115
1116 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1117 pa_source_set_mixer_dirty(u->source, TRUE);
1118 return 0;
1119 }
1120
1121 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1122 pa_source_get_volume(u->source, TRUE);
1123 pa_source_get_mute(u->source, TRUE);
1124 }
1125
1126 return 0;
1127 }
1128
1129 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1130 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1131
1132 pa_assert(u);
1133 pa_assert(u->mixer_handle);
1134
1135 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1136 return 0;
1137
1138 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1139 pa_source_set_mixer_dirty(u->source, TRUE);
1140 return 0;
1141 }
1142
1143 if (mask & SND_CTL_EVENT_MASK_VALUE)
1144 pa_source_update_volume_and_mute(u->source);
1145
1146 return 0;
1147 }
1148
1149 static void source_get_volume_cb(pa_source *s) {
1150 struct userdata *u = s->userdata;
1151 pa_cvolume r;
1152 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1153
1154 pa_assert(u);
1155 pa_assert(u->mixer_path);
1156 pa_assert(u->mixer_handle);
1157
1158 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1159 return;
1160
1161 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1162 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1163
1164 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1165
1166 if (u->mixer_path->has_dB) {
1167 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1168
1169 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1170 }
1171
1172 if (pa_cvolume_equal(&u->hardware_volume, &r))
1173 return;
1174
1175 s->real_volume = u->hardware_volume = r;
1176
1177 /* Hmm, so the hardware volume changed, let's reset our software volume */
1178 if (u->mixer_path->has_dB)
1179 pa_source_set_soft_volume(s, NULL);
1180 }
1181
1182 static void source_set_volume_cb(pa_source *s) {
1183 struct userdata *u = s->userdata;
1184 pa_cvolume r;
1185 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1186 pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1187
1188 pa_assert(u);
1189 pa_assert(u->mixer_path);
1190 pa_assert(u->mixer_handle);
1191
1192 /* Shift up by the base volume */
1193 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1194
1195 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1196 return;
1197
1198 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1199 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1200
1201 u->hardware_volume = r;
1202
1203 if (u->mixer_path->has_dB) {
1204 pa_cvolume new_soft_volume;
1205 pa_bool_t accurate_enough;
1206 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1207
1208 /* Match exactly what the user requested by software */
1209 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1210
1211 /* If the adjustment to do in software is only minimal we
1212 * can skip it. That saves us CPU at the expense of a bit of
1213 * accuracy */
1214 accurate_enough =
1215 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1216 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1217
1218 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1219 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1220 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1221 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1222 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1223 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1224 pa_yes_no(accurate_enough));
1225 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1226
1227 if (!accurate_enough)
1228 s->soft_volume = new_soft_volume;
1229
1230 } else {
1231 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1232
1233 /* We can't match exactly what the user requested, hence let's
1234 * at least tell the user about it */
1235
1236 s->real_volume = r;
1237 }
1238 }
1239
1240 static void source_write_volume_cb(pa_source *s) {
1241 struct userdata *u = s->userdata;
1242 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1243
1244 pa_assert(u);
1245 pa_assert(u->mixer_path);
1246 pa_assert(u->mixer_handle);
1247 pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1248
1249 /* Shift up by the base volume */
1250 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1251
1252 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1253 pa_log_error("Writing HW volume failed");
1254 else {
1255 pa_cvolume tmp_vol;
1256 pa_bool_t accurate_enough;
1257
1258 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1259 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1260
1261 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1262 accurate_enough =
1263 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1264 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1265
1266 if (!accurate_enough) {
1267 union {
1268 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1269 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1270 } vol;
1271
1272 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1273 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1274 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1275 pa_log_debug(" in dB: %s (request) != %s",
1276 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1277 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1278 }
1279 }
1280 }
1281
1282 static void source_get_mute_cb(pa_source *s) {
1283 struct userdata *u = s->userdata;
1284 pa_bool_t b;
1285
1286 pa_assert(u);
1287 pa_assert(u->mixer_path);
1288 pa_assert(u->mixer_handle);
1289
1290 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1291 return;
1292
1293 s->muted = b;
1294 }
1295
1296 static void source_set_mute_cb(pa_source *s) {
1297 struct userdata *u = s->userdata;
1298
1299 pa_assert(u);
1300 pa_assert(u->mixer_path);
1301 pa_assert(u->mixer_handle);
1302
1303 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1304 }
1305
1306 static void mixer_volume_init(struct userdata *u) {
1307 pa_assert(u);
1308
1309 if (!u->mixer_path->has_volume) {
1310 pa_source_set_write_volume_callback(u->source, NULL);
1311 pa_source_set_get_volume_callback(u->source, NULL);
1312 pa_source_set_set_volume_callback(u->source, NULL);
1313
1314 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1315 } else {
1316 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1317 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1318
1319 if (u->mixer_path->has_dB && u->deferred_volume) {
1320 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1321 pa_log_info("Successfully enabled deferred volume.");
1322 } else
1323 pa_source_set_write_volume_callback(u->source, NULL);
1324
1325 if (u->mixer_path->has_dB) {
1326 pa_source_enable_decibel_volume(u->source, TRUE);
1327 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1328
1329 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1330 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1331
1332 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1333 } else {
1334 pa_source_enable_decibel_volume(u->source, FALSE);
1335 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1336
1337 u->source->base_volume = PA_VOLUME_NORM;
1338 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1339 }
1340
1341 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1342 }
1343
1344 if (!u->mixer_path->has_mute) {
1345 pa_source_set_get_mute_callback(u->source, NULL);
1346 pa_source_set_set_mute_callback(u->source, NULL);
1347 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1348 } else {
1349 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1350 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1351 pa_log_info("Using hardware mute control.");
1352 }
1353 }
1354
1355 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1356 struct userdata *u = s->userdata;
1357
1358 pa_assert(u);
1359 pa_assert(p);
1360 pa_assert(u->ucm_context);
1361
1362 return pa_alsa_ucm_set_port(u->ucm_context, p, FALSE);
1363 }
1364
1365 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1366 struct userdata *u = s->userdata;
1367 pa_alsa_port_data *data;
1368
1369 pa_assert(u);
1370 pa_assert(p);
1371 pa_assert(u->mixer_handle);
1372
1373 data = PA_DEVICE_PORT_DATA(p);
1374
1375 pa_assert_se(u->mixer_path = data->path);
1376 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1377
1378 mixer_volume_init(u);
1379
1380 if (s->set_mute)
1381 s->set_mute(s);
1382 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1383 if (s->write_volume)
1384 s->write_volume(s);
1385 } else {
1386 if (s->set_volume)
1387 s->set_volume(s);
1388 }
1389
1390 return 0;
1391 }
1392
1393 static void source_update_requested_latency_cb(pa_source *s) {
1394 struct userdata *u = s->userdata;
1395 pa_assert(u);
1396 pa_assert(u->use_tsched); /* only when timer scheduling is used
1397 * we can dynamically adjust the
1398 * latency */
1399
1400 if (!u->pcm_handle)
1401 return;
1402
1403 update_sw_params(u);
1404 }
1405
1406 static pa_bool_t source_update_rate_cb(pa_source *s, uint32_t rate)
1407 {
1408 struct userdata *u = s->userdata;
1409 int i;
1410 pa_bool_t supported = FALSE;
1411
1412 pa_assert(u);
1413
1414 for (i = 0; u->rates[i]; i++) {
1415 if (u->rates[i] == rate) {
1416 supported = TRUE;
1417 break;
1418 }
1419 }
1420
1421 if (!supported) {
1422 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1423 return FALSE;
1424 }
1425
1426 if (!PA_SOURCE_IS_OPENED(s->state)) {
1427 pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1428 u->source->sample_spec.rate = rate;
1429 return TRUE;
1430 }
1431
1432 return FALSE;
1433 }
1434
1435 static void thread_func(void *userdata) {
1436 struct userdata *u = userdata;
1437 unsigned short revents = 0;
1438
1439 pa_assert(u);
1440
1441 pa_log_debug("Thread starting up");
1442
1443 if (u->core->realtime_scheduling)
1444 pa_make_realtime(u->core->realtime_priority);
1445
1446 pa_thread_mq_install(&u->thread_mq);
1447
1448 for (;;) {
1449 int ret;
1450 pa_usec_t rtpoll_sleep = 0, real_sleep;
1451
1452 #ifdef DEBUG_TIMING
1453 pa_log_debug("Loop");
1454 #endif
1455
1456 /* Read some data and pass it to the sources */
1457 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1458 int work_done;
1459 pa_usec_t sleep_usec = 0;
1460 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1461
1462 if (u->first) {
1463 pa_log_info("Starting capture.");
1464 snd_pcm_start(u->pcm_handle);
1465
1466 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1467
1468 u->first = FALSE;
1469 }
1470
1471 if (u->use_mmap)
1472 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1473 else
1474 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1475
1476 if (work_done < 0)
1477 goto fail;
1478
1479 /* pa_log_debug("work_done = %i", work_done); */
1480
1481 if (work_done)
1482 update_smoother(u);
1483
1484 if (u->use_tsched) {
1485 pa_usec_t cusec;
1486
1487 /* OK, the capture buffer is now empty, let's
1488 * calculate when to wake up next */
1489
1490 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1491
1492 /* Convert from the sound card time domain to the
1493 * system time domain */
1494 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1495
1496 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1497
1498 /* We don't trust the conversion, so we wake up whatever comes first */
1499 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1500 }
1501 }
1502
1503 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1504 pa_usec_t volume_sleep;
1505 pa_source_volume_change_apply(u->source, &volume_sleep);
1506 if (volume_sleep > 0) {
1507 if (rtpoll_sleep > 0)
1508 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1509 else
1510 rtpoll_sleep = volume_sleep;
1511 }
1512 }
1513
1514 if (rtpoll_sleep > 0) {
1515 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1516 real_sleep = pa_rtclock_now();
1517 }
1518 else
1519 pa_rtpoll_set_timer_disabled(u->rtpoll);
1520
1521 /* Hmm, nothing to do. Let's sleep */
1522 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1523 goto fail;
1524
1525 if (rtpoll_sleep > 0) {
1526 real_sleep = pa_rtclock_now() - real_sleep;
1527 #ifdef DEBUG_TIMING
1528 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1529 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1530 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1531 #endif
1532 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark)
1533 pa_log_info("Scheduling delay of %0.2fms, you might want to investigate this to improve latency...",
1534 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC);
1535 }
1536
1537 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1538 pa_source_volume_change_apply(u->source, NULL);
1539
1540 if (ret == 0)
1541 goto finish;
1542
1543 /* Tell ALSA about this and process its response */
1544 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1545 struct pollfd *pollfd;
1546 int err;
1547 unsigned n;
1548
1549 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1550
1551 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1552 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1553 goto fail;
1554 }
1555
1556 if (revents & ~POLLIN) {
1557 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1558 goto fail;
1559
1560 u->first = TRUE;
1561 revents = 0;
1562 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1563 pa_log_debug("Wakeup from ALSA!");
1564
1565 } else
1566 revents = 0;
1567 }
1568
1569 fail:
1570 /* If this was no regular exit from the loop we have to continue
1571 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1572 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1573 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1574
1575 finish:
1576 pa_log_debug("Thread shutting down");
1577 }
1578
1579 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1580 const char *n;
1581 char *t;
1582
1583 pa_assert(data);
1584 pa_assert(ma);
1585 pa_assert(device_name);
1586
1587 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1588 pa_source_new_data_set_name(data, n);
1589 data->namereg_fail = TRUE;
1590 return;
1591 }
1592
1593 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1594 data->namereg_fail = TRUE;
1595 else {
1596 n = device_id ? device_id : device_name;
1597 data->namereg_fail = FALSE;
1598 }
1599
1600 if (mapping)
1601 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1602 else
1603 t = pa_sprintf_malloc("alsa_input.%s", n);
1604
1605 pa_source_new_data_set_name(data, t);
1606 pa_xfree(t);
1607 }
1608
1609 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1610 snd_hctl_t *hctl;
1611
1612 if (!mapping && !element)
1613 return;
1614
1615 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1616 pa_log_info("Failed to find a working mixer device.");
1617 return;
1618 }
1619
1620 if (element) {
1621
1622 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1623 goto fail;
1624
1625 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1626 goto fail;
1627
1628 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1629 pa_alsa_path_dump(u->mixer_path);
1630 } else if (!(u->mixer_path_set = mapping->input_path_set))
1631 goto fail;
1632
1633 return;
1634
1635 fail:
1636
1637 if (u->mixer_path) {
1638 pa_alsa_path_free(u->mixer_path);
1639 u->mixer_path = NULL;
1640 }
1641
1642 if (u->mixer_handle) {
1643 snd_mixer_close(u->mixer_handle);
1644 u->mixer_handle = NULL;
1645 }
1646 }
1647
1648 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1649 pa_bool_t need_mixer_callback = FALSE;
1650
1651 pa_assert(u);
1652
1653 if (!u->mixer_handle)
1654 return 0;
1655
1656 if (u->source->active_port) {
1657 pa_alsa_port_data *data;
1658
1659 /* We have a list of supported paths, so let's activate the
1660 * one that has been chosen as active */
1661
1662 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1663 u->mixer_path = data->path;
1664
1665 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1666
1667 } else {
1668
1669 if (!u->mixer_path && u->mixer_path_set)
1670 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1671
1672 if (u->mixer_path) {
1673 /* Hmm, we have only a single path, then let's activate it */
1674
1675 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1676 } else
1677 return 0;
1678 }
1679
1680 mixer_volume_init(u);
1681
1682 /* Will we need to register callbacks? */
1683 if (u->mixer_path_set && u->mixer_path_set->paths) {
1684 pa_alsa_path *p;
1685 void *state;
1686
1687 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1688 if (p->has_volume || p->has_mute)
1689 need_mixer_callback = TRUE;
1690 }
1691 }
1692 else if (u->mixer_path)
1693 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1694
1695 if (need_mixer_callback) {
1696 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1697 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1698 u->mixer_pd = pa_alsa_mixer_pdata_new();
1699 mixer_callback = io_mixer_callback;
1700
1701 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1702 pa_log("Failed to initialize file descriptor monitoring");
1703 return -1;
1704 }
1705 } else {
1706 u->mixer_fdl = pa_alsa_fdlist_new();
1707 mixer_callback = ctl_mixer_callback;
1708
1709 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1710 pa_log("Failed to initialize file descriptor monitoring");
1711 return -1;
1712 }
1713 }
1714
1715 if (u->mixer_path_set)
1716 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1717 else
1718 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1719 }
1720
1721 return 0;
1722 }
1723
1724 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1725
1726 struct userdata *u = NULL;
1727 const char *dev_id = NULL, *key, *mod_name;
1728 pa_sample_spec ss;
1729 uint32_t alternate_sample_rate;
1730 pa_channel_map map;
1731 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1732 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1733 size_t frame_size;
1734 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, fixed_latency_range = FALSE;
1735 pa_source_new_data data;
1736 pa_alsa_profile_set *profile_set = NULL;
1737 void *state = NULL;
1738
1739 pa_assert(m);
1740 pa_assert(ma);
1741
1742 ss = m->core->default_sample_spec;
1743 map = m->core->default_channel_map;
1744 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1745 pa_log("Failed to parse sample specification and channel map");
1746 goto fail;
1747 }
1748
1749 alternate_sample_rate = m->core->alternate_sample_rate;
1750 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1751 pa_log("Failed to parse alternate sample rate");
1752 goto fail;
1753 }
1754
1755 frame_size = pa_frame_size(&ss);
1756
1757 nfrags = m->core->default_n_fragments;
1758 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1759 if (frag_size <= 0)
1760 frag_size = (uint32_t) frame_size;
1761 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1762 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1763
1764 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1765 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1766 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1767 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1768 pa_log("Failed to parse buffer metrics");
1769 goto fail;
1770 }
1771
1772 buffer_size = nfrags * frag_size;
1773
1774 period_frames = frag_size/frame_size;
1775 buffer_frames = buffer_size/frame_size;
1776 tsched_frames = tsched_size/frame_size;
1777
1778 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1779 pa_log("Failed to parse mmap argument.");
1780 goto fail;
1781 }
1782
1783 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1784 pa_log("Failed to parse tsched argument.");
1785 goto fail;
1786 }
1787
1788 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1789 pa_log("Failed to parse ignore_dB argument.");
1790 goto fail;
1791 }
1792
1793 deferred_volume = m->core->deferred_volume;
1794 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1795 pa_log("Failed to parse deferred_volume argument.");
1796 goto fail;
1797 }
1798
1799 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1800 pa_log("Failed to parse fixed_latency_range argument.");
1801 goto fail;
1802 }
1803
1804 use_tsched = pa_alsa_may_tsched(use_tsched);
1805
1806 u = pa_xnew0(struct userdata, 1);
1807 u->core = m->core;
1808 u->module = m;
1809 u->use_mmap = use_mmap;
1810 u->use_tsched = use_tsched;
1811 u->deferred_volume = deferred_volume;
1812 u->fixed_latency_range = fixed_latency_range;
1813 u->first = TRUE;
1814 u->rtpoll = pa_rtpoll_new();
1815 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1816
1817 u->smoother = pa_smoother_new(
1818 SMOOTHER_ADJUST_USEC,
1819 SMOOTHER_WINDOW_USEC,
1820 TRUE,
1821 TRUE,
1822 5,
1823 pa_rtclock_now(),
1824 TRUE);
1825 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1826
1827 /* use ucm */
1828 if (mapping && mapping->ucm_context.ucm)
1829 u->ucm_context = &mapping->ucm_context;
1830
1831 dev_id = pa_modargs_get_value(
1832 ma, "device_id",
1833 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1834
1835 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1836
1837 if (reserve_init(u, dev_id) < 0)
1838 goto fail;
1839
1840 if (reserve_monitor_init(u, dev_id) < 0)
1841 goto fail;
1842
1843 b = use_mmap;
1844 d = use_tsched;
1845
1846 if (mapping) {
1847
1848 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1849 pa_log("device_id= not set");
1850 goto fail;
1851 }
1852
1853 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
1854 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
1855 pa_log("Failed to enable ucm modifier %s", mod_name);
1856 else
1857 pa_log_debug("Enabled ucm modifier %s", mod_name);
1858 }
1859
1860 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1861 dev_id,
1862 &u->device_name,
1863 &ss, &map,
1864 SND_PCM_STREAM_CAPTURE,
1865 &period_frames, &buffer_frames, tsched_frames,
1866 &b, &d, mapping)))
1867 goto fail;
1868
1869 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1870
1871 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1872 goto fail;
1873
1874 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1875 dev_id,
1876 &u->device_name,
1877 &ss, &map,
1878 SND_PCM_STREAM_CAPTURE,
1879 &period_frames, &buffer_frames, tsched_frames,
1880 &b, &d, profile_set, &mapping)))
1881 goto fail;
1882
1883 } else {
1884
1885 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1886 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1887 &u->device_name,
1888 &ss, &map,
1889 SND_PCM_STREAM_CAPTURE,
1890 &period_frames, &buffer_frames, tsched_frames,
1891 &b, &d, FALSE)))
1892 goto fail;
1893 }
1894
1895 pa_assert(u->device_name);
1896 pa_log_info("Successfully opened device %s.", u->device_name);
1897
1898 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1899 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1900 goto fail;
1901 }
1902
1903 if (mapping)
1904 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1905
1906 if (use_mmap && !b) {
1907 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1908 u->use_mmap = use_mmap = FALSE;
1909 }
1910
1911 if (use_tsched && (!b || !d)) {
1912 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1913 u->use_tsched = use_tsched = FALSE;
1914 }
1915
1916 if (u->use_mmap)
1917 pa_log_info("Successfully enabled mmap() mode.");
1918
1919 if (u->use_tsched) {
1920 pa_log_info("Successfully enabled timer-based scheduling mode.");
1921 if (u->fixed_latency_range)
1922 pa_log_info("Disabling latency range changes on overrun");
1923 }
1924
1925 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
1926 if (!u->rates) {
1927 pa_log_error("Failed to find any supported sample rates.");
1928 goto fail;
1929 }
1930
1931 /* ALSA might tweak the sample spec, so recalculate the frame size */
1932 frame_size = pa_frame_size(&ss);
1933
1934 if (!u->ucm_context)
1935 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1936
1937 pa_source_new_data_init(&data);
1938 data.driver = driver;
1939 data.module = m;
1940 data.card = card;
1941 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1942
1943 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1944 * variable instead of using &data.namereg_fail directly, because
1945 * data.namereg_fail is a bitfield and taking the address of a bitfield
1946 * variable is impossible. */
1947 namereg_fail = data.namereg_fail;
1948 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1949 pa_log("Failed to parse namereg_fail argument.");
1950 pa_source_new_data_done(&data);
1951 goto fail;
1952 }
1953 data.namereg_fail = namereg_fail;
1954
1955 pa_source_new_data_set_sample_spec(&data, &ss);
1956 pa_source_new_data_set_channel_map(&data, &map);
1957 pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1958
1959 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1960 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1961 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1962 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1963 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1964
1965 if (mapping) {
1966 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1967 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1968
1969 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
1970 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
1971 }
1972
1973 pa_alsa_init_description(data.proplist);
1974
1975 if (u->control_device)
1976 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1977
1978 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1979 pa_log("Invalid properties");
1980 pa_source_new_data_done(&data);
1981 goto fail;
1982 }
1983
1984 if (u->ucm_context)
1985 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, FALSE, card);
1986 else if (u->mixer_path_set)
1987 pa_alsa_add_ports(&data, u->mixer_path_set, card);
1988
1989 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1990 pa_source_new_data_done(&data);
1991
1992 if (!u->source) {
1993 pa_log("Failed to create source object");
1994 goto fail;
1995 }
1996
1997 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1998 &u->source->thread_info.volume_change_safety_margin) < 0) {
1999 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2000 goto fail;
2001 }
2002
2003 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2004 &u->source->thread_info.volume_change_extra_delay) < 0) {
2005 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2006 goto fail;
2007 }
2008
2009 u->source->parent.process_msg = source_process_msg;
2010 if (u->use_tsched)
2011 u->source->update_requested_latency = source_update_requested_latency_cb;
2012 u->source->set_state = source_set_state_cb;
2013 if (u->ucm_context)
2014 u->source->set_port = source_set_port_ucm_cb;
2015 else
2016 u->source->set_port = source_set_port_cb;
2017 if (u->source->alternate_sample_rate)
2018 u->source->update_rate = source_update_rate_cb;
2019 u->source->userdata = u;
2020
2021 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2022 pa_source_set_rtpoll(u->source, u->rtpoll);
2023
2024 u->frame_size = frame_size;
2025 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2026 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2027 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2028
2029 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2030 (double) u->hwbuf_size / (double) u->fragment_size,
2031 (long unsigned) u->fragment_size,
2032 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2033 (long unsigned) u->hwbuf_size,
2034 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2035
2036 if (u->use_tsched) {
2037 u->tsched_watermark_ref = tsched_watermark;
2038 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2039 }
2040 else
2041 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2042
2043 reserve_update(u);
2044
2045 if (update_sw_params(u) < 0)
2046 goto fail;
2047
2048 if (u->ucm_context) {
2049 if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, FALSE) < 0)
2050 goto fail;
2051 } else if (setup_mixer(u, ignore_dB) < 0)
2052 goto fail;
2053
2054 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2055
2056 if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
2057 pa_log("Failed to create thread.");
2058 goto fail;
2059 }
2060
2061 /* Get initial mixer settings */
2062 if (data.volume_is_set) {
2063 if (u->source->set_volume)
2064 u->source->set_volume(u->source);
2065 } else {
2066 if (u->source->get_volume)
2067 u->source->get_volume(u->source);
2068 }
2069
2070 if (data.muted_is_set) {
2071 if (u->source->set_mute)
2072 u->source->set_mute(u->source);
2073 } else {
2074 if (u->source->get_mute)
2075 u->source->get_mute(u->source);
2076 }
2077
2078 if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2079 u->source->write_volume(u->source);
2080
2081 pa_source_put(u->source);
2082
2083 if (profile_set)
2084 pa_alsa_profile_set_free(profile_set);
2085
2086 return u->source;
2087
2088 fail:
2089
2090 if (u)
2091 userdata_free(u);
2092
2093 if (profile_set)
2094 pa_alsa_profile_set_free(profile_set);
2095
2096 return NULL;
2097 }
2098
2099 static void userdata_free(struct userdata *u) {
2100 pa_assert(u);
2101
2102 if (u->source)
2103 pa_source_unlink(u->source);
2104
2105 if (u->thread) {
2106 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2107 pa_thread_free(u->thread);
2108 }
2109
2110 pa_thread_mq_done(&u->thread_mq);
2111
2112 if (u->source)
2113 pa_source_unref(u->source);
2114
2115 if (u->mixer_pd)
2116 pa_alsa_mixer_pdata_free(u->mixer_pd);
2117
2118 if (u->alsa_rtpoll_item)
2119 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2120
2121 if (u->rtpoll)
2122 pa_rtpoll_free(u->rtpoll);
2123
2124 if (u->pcm_handle) {
2125 snd_pcm_drop(u->pcm_handle);
2126 snd_pcm_close(u->pcm_handle);
2127 }
2128
2129 if (u->mixer_fdl)
2130 pa_alsa_fdlist_free(u->mixer_fdl);
2131
2132 if (u->mixer_path && !u->mixer_path_set)
2133 pa_alsa_path_free(u->mixer_path);
2134
2135 if (u->mixer_handle)
2136 snd_mixer_close(u->mixer_handle);
2137
2138 if (u->smoother)
2139 pa_smoother_free(u->smoother);
2140
2141 if (u->rates)
2142 pa_xfree(u->rates);
2143
2144 reserve_done(u);
2145 monitor_done(u);
2146
2147 pa_xfree(u->device_name);
2148 pa_xfree(u->control_device);
2149 pa_xfree(u->paths_dir);
2150 pa_xfree(u);
2151 }
2152
2153 void pa_alsa_source_free(pa_source *s) {
2154 struct userdata *u;
2155
2156 pa_source_assert_ref(s);
2157 pa_assert_se(u = s->userdata);
2158
2159 userdata_free(u);
2160 }