]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
alsa: ignore volume changes from the hw if we are not on the active console
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core-error.h>
38 #include <pulsecore/core.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/time-smoother.h>
53
54 #include <modules/reserve-wrap.h>
55
56 #include "alsa-util.h"
57 #include "alsa-source.h"
58
59 /* #define DEBUG_TIMING */
60
61 #define DEFAULT_DEVICE "default"
62
63 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
64 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
65
66 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
67 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
68 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
69 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
70 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
71 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72
73 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
74 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
75
76 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
77 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
78
79 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
80
81 struct userdata {
82 pa_core *core;
83 pa_module *module;
84 pa_source *source;
85
86 pa_thread *thread;
87 pa_thread_mq thread_mq;
88 pa_rtpoll *rtpoll;
89
90 snd_pcm_t *pcm_handle;
91
92 pa_alsa_fdlist *mixer_fdl;
93 snd_mixer_t *mixer_handle;
94 pa_alsa_path_set *mixer_path_set;
95 pa_alsa_path *mixer_path;
96
97 pa_cvolume hardware_volume;
98
99 size_t
100 frame_size,
101 fragment_size,
102 hwbuf_size,
103 tsched_watermark,
104 hwbuf_unused,
105 min_sleep,
106 min_wakeup,
107 watermark_inc_step,
108 watermark_dec_step,
109 watermark_inc_threshold,
110 watermark_dec_threshold;
111
112 pa_usec_t watermark_dec_not_before;
113
114 char *device_name;
115 char *control_device;
116
117 pa_bool_t use_mmap:1, use_tsched:1;
118
119 pa_rtpoll_item *alsa_rtpoll_item;
120
121 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
122
123 pa_smoother *smoother;
124 uint64_t read_count;
125 pa_usec_t smoother_interval;
126 pa_usec_t last_smoother_update;
127
128 pa_reserve_wrapper *reserve;
129 pa_hook_slot *reserve_slot;
130 pa_reserve_monitor_wrapper *monitor;
131 pa_hook_slot *monitor_slot;
132 };
133
134 static void userdata_free(struct userdata *u);
135
136 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
137 pa_assert(r);
138 pa_assert(u);
139
140 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
141 return PA_HOOK_CANCEL;
142
143 return PA_HOOK_OK;
144 }
145
146 static void reserve_done(struct userdata *u) {
147 pa_assert(u);
148
149 if (u->reserve_slot) {
150 pa_hook_slot_free(u->reserve_slot);
151 u->reserve_slot = NULL;
152 }
153
154 if (u->reserve) {
155 pa_reserve_wrapper_unref(u->reserve);
156 u->reserve = NULL;
157 }
158 }
159
160 static void reserve_update(struct userdata *u) {
161 const char *description;
162 pa_assert(u);
163
164 if (!u->source || !u->reserve)
165 return;
166
167 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
168 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
169 }
170
171 static int reserve_init(struct userdata *u, const char *dname) {
172 char *rname;
173
174 pa_assert(u);
175 pa_assert(dname);
176
177 if (u->reserve)
178 return 0;
179
180 if (pa_in_system_mode())
181 return 0;
182
183 /* We are resuming, try to lock the device */
184 if (!(rname = pa_alsa_get_reserve_name(dname)))
185 return 0;
186
187 u->reserve = pa_reserve_wrapper_get(u->core, rname);
188 pa_xfree(rname);
189
190 if (!(u->reserve))
191 return -1;
192
193 reserve_update(u);
194
195 pa_assert(!u->reserve_slot);
196 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
197
198 return 0;
199 }
200
201 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
202 pa_bool_t b;
203
204 pa_assert(w);
205 pa_assert(u);
206
207 b = PA_PTR_TO_UINT(busy) && !u->reserve;
208
209 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
210 return PA_HOOK_OK;
211 }
212
213 static void monitor_done(struct userdata *u) {
214 pa_assert(u);
215
216 if (u->monitor_slot) {
217 pa_hook_slot_free(u->monitor_slot);
218 u->monitor_slot = NULL;
219 }
220
221 if (u->monitor) {
222 pa_reserve_monitor_wrapper_unref(u->monitor);
223 u->monitor = NULL;
224 }
225 }
226
227 static int reserve_monitor_init(struct userdata *u, const char *dname) {
228 char *rname;
229
230 pa_assert(u);
231 pa_assert(dname);
232
233 if (pa_in_system_mode())
234 return 0;
235
236 /* We are resuming, try to lock the device */
237 if (!(rname = pa_alsa_get_reserve_name(dname)))
238 return 0;
239
240 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
241 pa_xfree(rname);
242
243 if (!(u->monitor))
244 return -1;
245
246 pa_assert(!u->monitor_slot);
247 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
248
249 return 0;
250 }
251
252 static void fix_min_sleep_wakeup(struct userdata *u) {
253 size_t max_use, max_use_2;
254 pa_assert(u);
255 pa_assert(u->use_tsched);
256
257 max_use = u->hwbuf_size - u->hwbuf_unused;
258 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
259
260 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
261 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
262
263 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
264 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
265 }
266
267 static void fix_tsched_watermark(struct userdata *u) {
268 size_t max_use;
269 pa_assert(u);
270 pa_assert(u->use_tsched);
271
272 max_use = u->hwbuf_size - u->hwbuf_unused;
273
274 if (u->tsched_watermark > max_use - u->min_sleep)
275 u->tsched_watermark = max_use - u->min_sleep;
276
277 if (u->tsched_watermark < u->min_wakeup)
278 u->tsched_watermark = u->min_wakeup;
279 }
280
281 static void increase_watermark(struct userdata *u) {
282 size_t old_watermark;
283 pa_usec_t old_min_latency, new_min_latency;
284
285 pa_assert(u);
286 pa_assert(u->use_tsched);
287
288 /* First, just try to increase the watermark */
289 old_watermark = u->tsched_watermark;
290 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
291 fix_tsched_watermark(u);
292
293 if (old_watermark != u->tsched_watermark) {
294 pa_log_info("Increasing wakeup watermark to %0.2f ms",
295 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
296 return;
297 }
298
299 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
300 old_min_latency = u->source->thread_info.min_latency;
301 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
302 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
303
304 if (old_min_latency != new_min_latency) {
305 pa_log_info("Increasing minimal latency to %0.2f ms",
306 (double) new_min_latency / PA_USEC_PER_MSEC);
307
308 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
309 }
310
311 /* When we reach this we're officialy fucked! */
312 }
313
314 static void decrease_watermark(struct userdata *u) {
315 size_t old_watermark;
316 pa_usec_t now;
317
318 pa_assert(u);
319 pa_assert(u->use_tsched);
320
321 now = pa_rtclock_now();
322
323 if (u->watermark_dec_not_before <= 0)
324 goto restart;
325
326 if (u->watermark_dec_not_before > now)
327 return;
328
329 old_watermark = u->tsched_watermark;
330
331 if (u->tsched_watermark < u->watermark_dec_step)
332 u->tsched_watermark = u->tsched_watermark / 2;
333 else
334 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
335
336 fix_tsched_watermark(u);
337
338 if (old_watermark != u->tsched_watermark)
339 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
340 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
341
342 /* We don't change the latency range*/
343
344 restart:
345 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
346 }
347
348 static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
349 pa_usec_t wm, usec;
350
351 pa_assert(sleep_usec);
352 pa_assert(process_usec);
353
354 pa_assert(u);
355 pa_assert(u->use_tsched);
356
357 usec = pa_source_get_requested_latency_within_thread(u->source);
358
359 if (usec == (pa_usec_t) -1)
360 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
361
362 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
363
364 if (wm > usec)
365 wm = usec/2;
366
367 *sleep_usec = usec - wm;
368 *process_usec = wm;
369
370 #ifdef DEBUG_TIMING
371 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
372 (unsigned long) (usec / PA_USEC_PER_MSEC),
373 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
374 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
375 #endif
376
377 return usec;
378 }
379
380 static int try_recover(struct userdata *u, const char *call, int err) {
381 pa_assert(u);
382 pa_assert(call);
383 pa_assert(err < 0);
384
385 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
386
387 pa_assert(err != -EAGAIN);
388
389 if (err == -EPIPE)
390 pa_log_debug("%s: Buffer overrun!", call);
391
392 if (err == -ESTRPIPE)
393 pa_log_debug("%s: System suspended!", call);
394
395 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
396 pa_log("%s: %s", call, pa_alsa_strerror(err));
397 return -1;
398 }
399
400 snd_pcm_start(u->pcm_handle);
401 return 0;
402 }
403
404 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
405 size_t left_to_record;
406 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
407 pa_bool_t overrun = FALSE;
408
409 /* We use <= instead of < for this check here because an overrun
410 * only happens after the last sample was processed, not already when
411 * it is removed from the buffer. This is particularly important
412 * when block transfer is used. */
413
414 if (n_bytes <= rec_space)
415 left_to_record = rec_space - n_bytes;
416 else {
417
418 /* We got a dropout. What a mess! */
419 left_to_record = 0;
420 overrun = TRUE;
421
422 #ifdef DEBUG_TIMING
423 PA_DEBUG_TRAP;
424 #endif
425
426 if (pa_log_ratelimit())
427 pa_log_info("Overrun!");
428 }
429
430 #ifdef DEBUG_TIMING
431 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
432 #endif
433
434 if (u->use_tsched) {
435 pa_bool_t reset_not_before = TRUE;
436
437 if (overrun || left_to_record < u->watermark_inc_threshold)
438 increase_watermark(u);
439 else if (left_to_record > u->watermark_dec_threshold) {
440 reset_not_before = FALSE;
441
442 /* We decrease the watermark only if have actually been
443 * woken up by a timeout. If something else woke us up
444 * it's too easy to fulfill the deadlines... */
445
446 if (on_timeout)
447 decrease_watermark(u);
448 }
449
450 if (reset_not_before)
451 u->watermark_dec_not_before = 0;
452 }
453
454 return left_to_record;
455 }
456
457 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
458 pa_bool_t work_done = FALSE;
459 pa_usec_t max_sleep_usec = 0, process_usec = 0;
460 size_t left_to_record;
461 unsigned j = 0;
462
463 pa_assert(u);
464 pa_source_assert_ref(u->source);
465
466 if (u->use_tsched)
467 hw_sleep_time(u, &max_sleep_usec, &process_usec);
468
469 for (;;) {
470 snd_pcm_sframes_t n;
471 size_t n_bytes;
472 int r;
473 pa_bool_t after_avail = TRUE;
474
475 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
476
477 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
478 continue;
479
480 return r;
481 }
482
483 n_bytes = (size_t) n * u->frame_size;
484
485 #ifdef DEBUG_TIMING
486 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
487 #endif
488
489 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
490 on_timeout = FALSE;
491
492 if (u->use_tsched)
493 if (!polled &&
494 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
495 #ifdef DEBUG_TIMING
496 pa_log_debug("Not reading, because too early.");
497 #endif
498 break;
499 }
500
501 if (PA_UNLIKELY(n_bytes <= 0)) {
502
503 if (polled)
504 PA_ONCE_BEGIN {
505 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
506 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
507 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
508 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
509 pa_strnull(dn));
510 pa_xfree(dn);
511 } PA_ONCE_END;
512
513 #ifdef DEBUG_TIMING
514 pa_log_debug("Not reading, because not necessary.");
515 #endif
516 break;
517 }
518
519 if (++j > 10) {
520 #ifdef DEBUG_TIMING
521 pa_log_debug("Not filling up, because already too many iterations.");
522 #endif
523
524 break;
525 }
526
527 polled = FALSE;
528
529 #ifdef DEBUG_TIMING
530 pa_log_debug("Reading");
531 #endif
532
533 for (;;) {
534 int err;
535 const snd_pcm_channel_area_t *areas;
536 snd_pcm_uframes_t offset, frames;
537 pa_memchunk chunk;
538 void *p;
539 snd_pcm_sframes_t sframes;
540
541 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
542
543 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
544
545 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
546
547 if (!after_avail && err == -EAGAIN)
548 break;
549
550 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
551 continue;
552
553 return r;
554 }
555
556 /* Make sure that if these memblocks need to be copied they will fit into one slot */
557 if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
558 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
559
560 if (!after_avail && frames == 0)
561 break;
562
563 pa_assert(frames > 0);
564 after_avail = FALSE;
565
566 /* Check these are multiples of 8 bit */
567 pa_assert((areas[0].first & 7) == 0);
568 pa_assert((areas[0].step & 7)== 0);
569
570 /* We assume a single interleaved memory buffer */
571 pa_assert((areas[0].first >> 3) == 0);
572 pa_assert((areas[0].step >> 3) == u->frame_size);
573
574 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
575
576 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
577 chunk.length = pa_memblock_get_length(chunk.memblock);
578 chunk.index = 0;
579
580 pa_source_post(u->source, &chunk);
581 pa_memblock_unref_fixed(chunk.memblock);
582
583 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
584
585 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
586 continue;
587
588 return r;
589 }
590
591 work_done = TRUE;
592
593 u->read_count += frames * u->frame_size;
594
595 #ifdef DEBUG_TIMING
596 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
597 #endif
598
599 if ((size_t) frames * u->frame_size >= n_bytes)
600 break;
601
602 n_bytes -= (size_t) frames * u->frame_size;
603 }
604 }
605
606 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
607
608 if (*sleep_usec > process_usec)
609 *sleep_usec -= process_usec;
610 else
611 *sleep_usec = 0;
612
613 return work_done ? 1 : 0;
614 }
615
616 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
617 int work_done = FALSE;
618 pa_usec_t max_sleep_usec = 0, process_usec = 0;
619 size_t left_to_record;
620 unsigned j = 0;
621
622 pa_assert(u);
623 pa_source_assert_ref(u->source);
624
625 if (u->use_tsched)
626 hw_sleep_time(u, &max_sleep_usec, &process_usec);
627
628 for (;;) {
629 snd_pcm_sframes_t n;
630 size_t n_bytes;
631 int r;
632 pa_bool_t after_avail = TRUE;
633
634 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
635
636 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
637 continue;
638
639 return r;
640 }
641
642 n_bytes = (size_t) n * u->frame_size;
643 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
644 on_timeout = FALSE;
645
646 if (u->use_tsched)
647 if (!polled &&
648 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
649 break;
650
651 if (PA_UNLIKELY(n_bytes <= 0)) {
652
653 if (polled)
654 PA_ONCE_BEGIN {
655 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
656 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
657 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
658 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
659 pa_strnull(dn));
660 pa_xfree(dn);
661 } PA_ONCE_END;
662
663 break;
664 }
665
666 if (++j > 10) {
667 #ifdef DEBUG_TIMING
668 pa_log_debug("Not filling up, because already too many iterations.");
669 #endif
670
671 break;
672 }
673
674 polled = FALSE;
675
676 for (;;) {
677 void *p;
678 snd_pcm_sframes_t frames;
679 pa_memchunk chunk;
680
681 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
682
683 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
684
685 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
686 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
687
688 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
689
690 p = pa_memblock_acquire(chunk.memblock);
691 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
692 pa_memblock_release(chunk.memblock);
693
694 if (PA_UNLIKELY(frames < 0)) {
695 pa_memblock_unref(chunk.memblock);
696
697 if (!after_avail && (int) frames == -EAGAIN)
698 break;
699
700 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
701 continue;
702
703 return r;
704 }
705
706 if (!after_avail && frames == 0) {
707 pa_memblock_unref(chunk.memblock);
708 break;
709 }
710
711 pa_assert(frames > 0);
712 after_avail = FALSE;
713
714 chunk.index = 0;
715 chunk.length = (size_t) frames * u->frame_size;
716
717 pa_source_post(u->source, &chunk);
718 pa_memblock_unref(chunk.memblock);
719
720 work_done = TRUE;
721
722 u->read_count += frames * u->frame_size;
723
724 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
725
726 if ((size_t) frames * u->frame_size >= n_bytes)
727 break;
728
729 n_bytes -= (size_t) frames * u->frame_size;
730 }
731 }
732
733 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
734
735 if (*sleep_usec > process_usec)
736 *sleep_usec -= process_usec;
737 else
738 *sleep_usec = 0;
739
740 return work_done ? 1 : 0;
741 }
742
743 static void update_smoother(struct userdata *u) {
744 snd_pcm_sframes_t delay = 0;
745 uint64_t position;
746 int err;
747 pa_usec_t now1 = 0, now2;
748 snd_pcm_status_t *status;
749
750 snd_pcm_status_alloca(&status);
751
752 pa_assert(u);
753 pa_assert(u->pcm_handle);
754
755 /* Let's update the time smoother */
756
757 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
758 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
759 return;
760 }
761
762 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
763 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
764 else {
765 snd_htimestamp_t htstamp = { 0, 0 };
766 snd_pcm_status_get_htstamp(status, &htstamp);
767 now1 = pa_timespec_load(&htstamp);
768 }
769
770 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
771 if (now1 <= 0)
772 now1 = pa_rtclock_now();
773
774 /* check if the time since the last update is bigger than the interval */
775 if (u->last_smoother_update > 0)
776 if (u->last_smoother_update + u->smoother_interval > now1)
777 return;
778
779 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
780 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
781
782 pa_smoother_put(u->smoother, now1, now2);
783
784 u->last_smoother_update = now1;
785 /* exponentially increase the update interval up to the MAX limit */
786 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
787 }
788
789 static pa_usec_t source_get_latency(struct userdata *u) {
790 int64_t delay;
791 pa_usec_t now1, now2;
792
793 pa_assert(u);
794
795 now1 = pa_rtclock_now();
796 now2 = pa_smoother_get(u->smoother, now1);
797
798 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
799
800 return delay >= 0 ? (pa_usec_t) delay : 0;
801 }
802
803 static int build_pollfd(struct userdata *u) {
804 pa_assert(u);
805 pa_assert(u->pcm_handle);
806
807 if (u->alsa_rtpoll_item)
808 pa_rtpoll_item_free(u->alsa_rtpoll_item);
809
810 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
811 return -1;
812
813 return 0;
814 }
815
816 static int suspend(struct userdata *u) {
817 pa_assert(u);
818 pa_assert(u->pcm_handle);
819
820 pa_smoother_pause(u->smoother, pa_rtclock_now());
821
822 /* Let's suspend */
823 snd_pcm_close(u->pcm_handle);
824 u->pcm_handle = NULL;
825
826 if (u->alsa_rtpoll_item) {
827 pa_rtpoll_item_free(u->alsa_rtpoll_item);
828 u->alsa_rtpoll_item = NULL;
829 }
830
831 pa_log_info("Device suspended...");
832
833 return 0;
834 }
835
836 static int update_sw_params(struct userdata *u) {
837 snd_pcm_uframes_t avail_min;
838 int err;
839
840 pa_assert(u);
841
842 /* Use the full buffer if noone asked us for anything specific */
843 u->hwbuf_unused = 0;
844
845 if (u->use_tsched) {
846 pa_usec_t latency;
847
848 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
849 size_t b;
850
851 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
852
853 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
854
855 /* We need at least one sample in our buffer */
856
857 if (PA_UNLIKELY(b < u->frame_size))
858 b = u->frame_size;
859
860 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
861 }
862
863 fix_min_sleep_wakeup(u);
864 fix_tsched_watermark(u);
865 }
866
867 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
868
869 avail_min = 1;
870
871 if (u->use_tsched) {
872 pa_usec_t sleep_usec, process_usec;
873
874 hw_sleep_time(u, &sleep_usec, &process_usec);
875 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
876 }
877
878 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
879
880 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
881 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
882 return err;
883 }
884
885 return 0;
886 }
887
888 static int unsuspend(struct userdata *u) {
889 pa_sample_spec ss;
890 int err;
891 pa_bool_t b, d;
892 snd_pcm_uframes_t period_size, buffer_size;
893
894 pa_assert(u);
895 pa_assert(!u->pcm_handle);
896
897 pa_log_info("Trying resume...");
898
899 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
900 SND_PCM_NONBLOCK|
901 SND_PCM_NO_AUTO_RESAMPLE|
902 SND_PCM_NO_AUTO_CHANNELS|
903 SND_PCM_NO_AUTO_FORMAT)) < 0) {
904 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
905 goto fail;
906 }
907
908 ss = u->source->sample_spec;
909 period_size = u->fragment_size / u->frame_size;
910 buffer_size = u->hwbuf_size / u->frame_size;
911 b = u->use_mmap;
912 d = u->use_tsched;
913
914 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
915 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
916 goto fail;
917 }
918
919 if (b != u->use_mmap || d != u->use_tsched) {
920 pa_log_warn("Resume failed, couldn't get original access mode.");
921 goto fail;
922 }
923
924 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
925 pa_log_warn("Resume failed, couldn't restore original sample settings.");
926 goto fail;
927 }
928
929 if (period_size*u->frame_size != u->fragment_size ||
930 buffer_size*u->frame_size != u->hwbuf_size) {
931 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
932 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
933 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
934 goto fail;
935 }
936
937 if (update_sw_params(u) < 0)
938 goto fail;
939
940 if (build_pollfd(u) < 0)
941 goto fail;
942
943 /* FIXME: We need to reload the volume somehow */
944
945 snd_pcm_start(u->pcm_handle);
946
947 u->read_count = 0;
948 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
949 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
950 u->last_smoother_update = 0;
951
952 pa_log_info("Resumed successfully...");
953
954 return 0;
955
956 fail:
957 if (u->pcm_handle) {
958 snd_pcm_close(u->pcm_handle);
959 u->pcm_handle = NULL;
960 }
961
962 return -PA_ERR_IO;
963 }
964
965 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
966 struct userdata *u = PA_SOURCE(o)->userdata;
967
968 switch (code) {
969
970 case PA_SOURCE_MESSAGE_GET_LATENCY: {
971 pa_usec_t r = 0;
972
973 if (u->pcm_handle)
974 r = source_get_latency(u);
975
976 *((pa_usec_t*) data) = r;
977
978 return 0;
979 }
980
981 case PA_SOURCE_MESSAGE_SET_STATE:
982
983 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
984
985 case PA_SOURCE_SUSPENDED: {
986 int r;
987 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
988
989 if ((r = suspend(u)) < 0)
990 return r;
991
992 break;
993 }
994
995 case PA_SOURCE_IDLE:
996 case PA_SOURCE_RUNNING: {
997 int r;
998
999 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1000 if (build_pollfd(u) < 0)
1001 return -PA_ERR_IO;
1002
1003 snd_pcm_start(u->pcm_handle);
1004 }
1005
1006 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1007 if ((r = unsuspend(u)) < 0)
1008 return r;
1009 }
1010
1011 break;
1012 }
1013
1014 case PA_SOURCE_UNLINKED:
1015 case PA_SOURCE_INIT:
1016 case PA_SOURCE_INVALID_STATE:
1017 ;
1018 }
1019
1020 break;
1021 }
1022
1023 return pa_source_process_msg(o, code, data, offset, chunk);
1024 }
1025
1026 /* Called from main context */
1027 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1028 pa_source_state_t old_state;
1029 struct userdata *u;
1030
1031 pa_source_assert_ref(s);
1032 pa_assert_se(u = s->userdata);
1033
1034 old_state = pa_source_get_state(u->source);
1035
1036 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1037 reserve_done(u);
1038 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1039 if (reserve_init(u, u->device_name) < 0)
1040 return -PA_ERR_BUSY;
1041
1042 return 0;
1043 }
1044
1045 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1046 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1047
1048 pa_assert(u);
1049 pa_assert(u->mixer_handle);
1050
1051 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1052 return 0;
1053
1054 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1055 return 0;
1056
1057 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1058 pa_source_get_volume(u->source, TRUE);
1059 pa_source_get_mute(u->source, TRUE);
1060 }
1061
1062 return 0;
1063 }
1064
1065 static void source_get_volume_cb(pa_source *s) {
1066 struct userdata *u = s->userdata;
1067 pa_cvolume r;
1068 char t[PA_CVOLUME_SNPRINT_MAX];
1069
1070 pa_assert(u);
1071 pa_assert(u->mixer_path);
1072 pa_assert(u->mixer_handle);
1073
1074 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1075 return;
1076
1077 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1078 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1079
1080 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1081
1082 if (pa_cvolume_equal(&u->hardware_volume, &r))
1083 return;
1084
1085 s->volume = u->hardware_volume = r;
1086
1087 /* Hmm, so the hardware volume changed, let's reset our software volume */
1088 if (u->mixer_path->has_dB)
1089 pa_source_set_soft_volume(s, NULL);
1090 }
1091
1092 static void source_set_volume_cb(pa_source *s) {
1093 struct userdata *u = s->userdata;
1094 pa_cvolume r;
1095 char t[PA_CVOLUME_SNPRINT_MAX];
1096
1097 pa_assert(u);
1098 pa_assert(u->mixer_path);
1099 pa_assert(u->mixer_handle);
1100
1101 /* Shift up by the base volume */
1102 pa_sw_cvolume_divide_scalar(&r, &s->volume, s->base_volume);
1103
1104 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1105 return;
1106
1107 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1108 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1109
1110 u->hardware_volume = r;
1111
1112 if (u->mixer_path->has_dB) {
1113 pa_cvolume new_soft_volume;
1114 pa_bool_t accurate_enough;
1115
1116 /* Match exactly what the user requested by software */
1117 pa_sw_cvolume_divide(&new_soft_volume, &s->volume, &u->hardware_volume);
1118
1119 /* If the adjustment to do in software is only minimal we
1120 * can skip it. That saves us CPU at the expense of a bit of
1121 * accuracy */
1122 accurate_enough =
1123 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1124 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1125
1126 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
1127 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1128 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1129 pa_yes_no(accurate_enough));
1130
1131 if (!accurate_enough)
1132 s->soft_volume = new_soft_volume;
1133
1134 } else {
1135 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1136
1137 /* We can't match exactly what the user requested, hence let's
1138 * at least tell the user about it */
1139
1140 s->volume = r;
1141 }
1142 }
1143
1144 static void source_get_mute_cb(pa_source *s) {
1145 struct userdata *u = s->userdata;
1146 pa_bool_t b;
1147
1148 pa_assert(u);
1149 pa_assert(u->mixer_path);
1150 pa_assert(u->mixer_handle);
1151
1152 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1153 return;
1154
1155 s->muted = b;
1156 }
1157
1158 static void source_set_mute_cb(pa_source *s) {
1159 struct userdata *u = s->userdata;
1160
1161 pa_assert(u);
1162 pa_assert(u->mixer_path);
1163 pa_assert(u->mixer_handle);
1164
1165 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1166 }
1167
1168 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1169 struct userdata *u = s->userdata;
1170 pa_alsa_port_data *data;
1171
1172 pa_assert(u);
1173 pa_assert(p);
1174 pa_assert(u->mixer_handle);
1175
1176 data = PA_DEVICE_PORT_DATA(p);
1177
1178 pa_assert_se(u->mixer_path = data->path);
1179 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1180
1181 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1182 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1183 s->n_volume_steps = PA_VOLUME_NORM+1;
1184
1185 if (u->mixer_path->max_dB > 0.0)
1186 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1187 else
1188 pa_log_info("No particular base volume set, fixing to 0 dB");
1189 } else {
1190 s->base_volume = PA_VOLUME_NORM;
1191 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1192 }
1193
1194 if (data->setting)
1195 pa_alsa_setting_select(data->setting, u->mixer_handle);
1196
1197 if (s->set_mute)
1198 s->set_mute(s);
1199 if (s->set_volume)
1200 s->set_volume(s);
1201
1202 return 0;
1203 }
1204
1205 static void source_update_requested_latency_cb(pa_source *s) {
1206 struct userdata *u = s->userdata;
1207 pa_assert(u);
1208
1209 if (!u->pcm_handle)
1210 return;
1211
1212 update_sw_params(u);
1213 }
1214
1215 static void thread_func(void *userdata) {
1216 struct userdata *u = userdata;
1217 unsigned short revents = 0;
1218
1219 pa_assert(u);
1220
1221 pa_log_debug("Thread starting up");
1222
1223 if (u->core->realtime_scheduling)
1224 pa_make_realtime(u->core->realtime_priority);
1225
1226 pa_thread_mq_install(&u->thread_mq);
1227
1228 for (;;) {
1229 int ret;
1230
1231 #ifdef DEBUG_TIMING
1232 pa_log_debug("Loop");
1233 #endif
1234
1235 /* Read some data and pass it to the sources */
1236 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1237 int work_done;
1238 pa_usec_t sleep_usec = 0;
1239 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1240
1241 if (u->use_mmap)
1242 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1243 else
1244 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1245
1246 if (work_done < 0)
1247 goto fail;
1248
1249 /* pa_log_debug("work_done = %i", work_done); */
1250
1251 if (work_done)
1252 update_smoother(u);
1253
1254 if (u->use_tsched) {
1255 pa_usec_t cusec;
1256
1257 /* OK, the capture buffer is now empty, let's
1258 * calculate when to wake up next */
1259
1260 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1261
1262 /* Convert from the sound card time domain to the
1263 * system time domain */
1264 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1265
1266 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1267
1268 /* We don't trust the conversion, so we wake up whatever comes first */
1269 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1270 }
1271 } else if (u->use_tsched)
1272
1273 /* OK, we're in an invalid state, let's disable our timers */
1274 pa_rtpoll_set_timer_disabled(u->rtpoll);
1275
1276 /* Hmm, nothing to do. Let's sleep */
1277 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1278 goto fail;
1279
1280 if (ret == 0)
1281 goto finish;
1282
1283 /* Tell ALSA about this and process its response */
1284 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1285 struct pollfd *pollfd;
1286 int err;
1287 unsigned n;
1288
1289 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1290
1291 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1292 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1293 goto fail;
1294 }
1295
1296 if (revents & ~POLLIN) {
1297 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1298 goto fail;
1299
1300 snd_pcm_start(u->pcm_handle);
1301 } else if (revents && u->use_tsched && pa_log_ratelimit())
1302 pa_log_debug("Wakeup from ALSA!");
1303
1304 } else
1305 revents = 0;
1306 }
1307
1308 fail:
1309 /* If this was no regular exit from the loop we have to continue
1310 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1311 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1312 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1313
1314 finish:
1315 pa_log_debug("Thread shutting down");
1316 }
1317
1318 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1319 const char *n;
1320 char *t;
1321
1322 pa_assert(data);
1323 pa_assert(ma);
1324 pa_assert(device_name);
1325
1326 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1327 pa_source_new_data_set_name(data, n);
1328 data->namereg_fail = TRUE;
1329 return;
1330 }
1331
1332 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1333 data->namereg_fail = TRUE;
1334 else {
1335 n = device_id ? device_id : device_name;
1336 data->namereg_fail = FALSE;
1337 }
1338
1339 if (mapping)
1340 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1341 else
1342 t = pa_sprintf_malloc("alsa_input.%s", n);
1343
1344 pa_source_new_data_set_name(data, t);
1345 pa_xfree(t);
1346 }
1347
1348 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1349
1350 if (!mapping && !element)
1351 return;
1352
1353 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1354 pa_log_info("Failed to find a working mixer device.");
1355 return;
1356 }
1357
1358 if (element) {
1359
1360 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1361 goto fail;
1362
1363 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1364 goto fail;
1365
1366 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1367 pa_alsa_path_dump(u->mixer_path);
1368 } else {
1369
1370 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1371 goto fail;
1372
1373 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1374
1375 pa_log_debug("Probed mixer paths:");
1376 pa_alsa_path_set_dump(u->mixer_path_set);
1377 }
1378
1379 return;
1380
1381 fail:
1382
1383 if (u->mixer_path_set) {
1384 pa_alsa_path_set_free(u->mixer_path_set);
1385 u->mixer_path_set = NULL;
1386 } else if (u->mixer_path) {
1387 pa_alsa_path_free(u->mixer_path);
1388 u->mixer_path = NULL;
1389 }
1390
1391 if (u->mixer_handle) {
1392 snd_mixer_close(u->mixer_handle);
1393 u->mixer_handle = NULL;
1394 }
1395 }
1396
1397 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1398 pa_assert(u);
1399
1400 if (!u->mixer_handle)
1401 return 0;
1402
1403 if (u->source->active_port) {
1404 pa_alsa_port_data *data;
1405
1406 /* We have a list of supported paths, so let's activate the
1407 * one that has been chosen as active */
1408
1409 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1410 u->mixer_path = data->path;
1411
1412 pa_alsa_path_select(data->path, u->mixer_handle);
1413
1414 if (data->setting)
1415 pa_alsa_setting_select(data->setting, u->mixer_handle);
1416
1417 } else {
1418
1419 if (!u->mixer_path && u->mixer_path_set)
1420 u->mixer_path = u->mixer_path_set->paths;
1421
1422 if (u->mixer_path) {
1423 /* Hmm, we have only a single path, then let's activate it */
1424
1425 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1426
1427 if (u->mixer_path->settings)
1428 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1429 } else
1430 return 0;
1431 }
1432
1433 if (!u->mixer_path->has_volume)
1434 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1435 else {
1436
1437 if (u->mixer_path->has_dB) {
1438 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1439
1440 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1441 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1442
1443 if (u->mixer_path->max_dB > 0.0)
1444 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1445 else
1446 pa_log_info("No particular base volume set, fixing to 0 dB");
1447
1448 } else {
1449 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1450 u->source->base_volume = PA_VOLUME_NORM;
1451 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1452 }
1453
1454 u->source->get_volume = source_get_volume_cb;
1455 u->source->set_volume = source_set_volume_cb;
1456
1457 u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SOURCE_DECIBEL_VOLUME : 0);
1458 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1459 }
1460
1461 if (!u->mixer_path->has_mute) {
1462 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1463 } else {
1464 u->source->get_mute = source_get_mute_cb;
1465 u->source->set_mute = source_set_mute_cb;
1466 u->source->flags |= PA_SOURCE_HW_MUTE_CTRL;
1467 pa_log_info("Using hardware mute control.");
1468 }
1469
1470 u->mixer_fdl = pa_alsa_fdlist_new();
1471
1472 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1473 pa_log("Failed to initialize file descriptor monitoring");
1474 return -1;
1475 }
1476
1477 if (u->mixer_path_set)
1478 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1479 else
1480 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1481
1482 return 0;
1483 }
1484
1485 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1486
1487 struct userdata *u = NULL;
1488 const char *dev_id = NULL;
1489 pa_sample_spec ss, requested_ss;
1490 pa_channel_map map;
1491 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1492 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1493 size_t frame_size;
1494 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1495 pa_source_new_data data;
1496 pa_alsa_profile_set *profile_set = NULL;
1497
1498 pa_assert(m);
1499 pa_assert(ma);
1500
1501 ss = m->core->default_sample_spec;
1502 map = m->core->default_channel_map;
1503 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1504 pa_log("Failed to parse sample specification");
1505 goto fail;
1506 }
1507
1508 requested_ss = ss;
1509 frame_size = pa_frame_size(&ss);
1510
1511 nfrags = m->core->default_n_fragments;
1512 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1513 if (frag_size <= 0)
1514 frag_size = (uint32_t) frame_size;
1515 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1516 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1517
1518 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1519 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1520 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1521 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1522 pa_log("Failed to parse buffer metrics");
1523 goto fail;
1524 }
1525
1526 buffer_size = nfrags * frag_size;
1527
1528 period_frames = frag_size/frame_size;
1529 buffer_frames = buffer_size/frame_size;
1530 tsched_frames = tsched_size/frame_size;
1531
1532 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1533 pa_log("Failed to parse mmap argument.");
1534 goto fail;
1535 }
1536
1537 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1538 pa_log("Failed to parse timer_scheduling argument.");
1539 goto fail;
1540 }
1541
1542 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1543 pa_log("Failed to parse ignore_dB argument.");
1544 goto fail;
1545 }
1546
1547 use_tsched = pa_alsa_may_tsched(use_tsched);
1548
1549 u = pa_xnew0(struct userdata, 1);
1550 u->core = m->core;
1551 u->module = m;
1552 u->use_mmap = use_mmap;
1553 u->use_tsched = use_tsched;
1554 u->rtpoll = pa_rtpoll_new();
1555 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1556
1557 u->smoother = pa_smoother_new(
1558 DEFAULT_TSCHED_WATERMARK_USEC*2,
1559 DEFAULT_TSCHED_WATERMARK_USEC*2,
1560 TRUE,
1561 TRUE,
1562 5,
1563 pa_rtclock_now(),
1564 FALSE);
1565 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1566
1567 dev_id = pa_modargs_get_value(
1568 ma, "device_id",
1569 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1570
1571 if (reserve_init(u, dev_id) < 0)
1572 goto fail;
1573
1574 if (reserve_monitor_init(u, dev_id) < 0)
1575 goto fail;
1576
1577 b = use_mmap;
1578 d = use_tsched;
1579
1580 if (mapping) {
1581
1582 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1583 pa_log("device_id= not set");
1584 goto fail;
1585 }
1586
1587 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1588 dev_id,
1589 &u->device_name,
1590 &ss, &map,
1591 SND_PCM_STREAM_CAPTURE,
1592 &period_frames, &buffer_frames, tsched_frames,
1593 &b, &d, mapping)))
1594 goto fail;
1595
1596 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1597
1598 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1599 goto fail;
1600
1601 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1602 dev_id,
1603 &u->device_name,
1604 &ss, &map,
1605 SND_PCM_STREAM_CAPTURE,
1606 &period_frames, &buffer_frames, tsched_frames,
1607 &b, &d, profile_set, &mapping)))
1608 goto fail;
1609
1610 } else {
1611
1612 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1613 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1614 &u->device_name,
1615 &ss, &map,
1616 SND_PCM_STREAM_CAPTURE,
1617 &period_frames, &buffer_frames, tsched_frames,
1618 &b, &d, FALSE)))
1619 goto fail;
1620 }
1621
1622 pa_assert(u->device_name);
1623 pa_log_info("Successfully opened device %s.", u->device_name);
1624
1625 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1626 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1627 goto fail;
1628 }
1629
1630 if (mapping)
1631 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1632
1633 if (use_mmap && !b) {
1634 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1635 u->use_mmap = use_mmap = FALSE;
1636 }
1637
1638 if (use_tsched && (!b || !d)) {
1639 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1640 u->use_tsched = use_tsched = FALSE;
1641 }
1642
1643 if (u->use_mmap)
1644 pa_log_info("Successfully enabled mmap() mode.");
1645
1646 if (u->use_tsched)
1647 pa_log_info("Successfully enabled timer-based scheduling mode.");
1648
1649 /* ALSA might tweak the sample spec, so recalculate the frame size */
1650 frame_size = pa_frame_size(&ss);
1651
1652 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1653
1654 pa_source_new_data_init(&data);
1655 data.driver = driver;
1656 data.module = m;
1657 data.card = card;
1658 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1659 pa_source_new_data_set_sample_spec(&data, &ss);
1660 pa_source_new_data_set_channel_map(&data, &map);
1661
1662 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1663 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1664 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1665 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1666 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1667
1668 if (mapping) {
1669 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1670 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1671 }
1672
1673 pa_alsa_init_description(data.proplist);
1674
1675 if (u->control_device)
1676 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1677
1678 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1679 pa_log("Invalid properties");
1680 pa_source_new_data_done(&data);
1681 goto fail;
1682 }
1683
1684 if (u->mixer_path_set)
1685 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1686
1687 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1688 pa_source_new_data_done(&data);
1689
1690 if (!u->source) {
1691 pa_log("Failed to create source object");
1692 goto fail;
1693 }
1694
1695 u->source->parent.process_msg = source_process_msg;
1696 u->source->update_requested_latency = source_update_requested_latency_cb;
1697 u->source->set_state = source_set_state_cb;
1698 u->source->set_port = source_set_port_cb;
1699 u->source->userdata = u;
1700
1701 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1702 pa_source_set_rtpoll(u->source, u->rtpoll);
1703
1704 u->frame_size = frame_size;
1705 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1706 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1707 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1708
1709 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1710 (double) u->hwbuf_size / (double) u->fragment_size,
1711 (long unsigned) u->fragment_size,
1712 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1713 (long unsigned) u->hwbuf_size,
1714 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1715
1716 if (u->use_tsched) {
1717 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1718
1719 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1720 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1721
1722 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1723 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1724
1725 fix_min_sleep_wakeup(u);
1726 fix_tsched_watermark(u);
1727
1728 pa_source_set_latency_range(u->source,
1729 0,
1730 pa_bytes_to_usec(u->hwbuf_size, &ss));
1731
1732 pa_log_info("Time scheduling watermark is %0.2fms",
1733 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1734 } else
1735 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1736
1737 reserve_update(u);
1738
1739 if (update_sw_params(u) < 0)
1740 goto fail;
1741
1742 if (setup_mixer(u, ignore_dB) < 0)
1743 goto fail;
1744
1745 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1746
1747 if (!(u->thread = pa_thread_new(thread_func, u))) {
1748 pa_log("Failed to create thread.");
1749 goto fail;
1750 }
1751 /* Get initial mixer settings */
1752 if (data.volume_is_set) {
1753 if (u->source->set_volume)
1754 u->source->set_volume(u->source);
1755 } else {
1756 if (u->source->get_volume)
1757 u->source->get_volume(u->source);
1758 }
1759
1760 if (data.muted_is_set) {
1761 if (u->source->set_mute)
1762 u->source->set_mute(u->source);
1763 } else {
1764 if (u->source->get_mute)
1765 u->source->get_mute(u->source);
1766 }
1767
1768 pa_source_put(u->source);
1769
1770 if (profile_set)
1771 pa_alsa_profile_set_free(profile_set);
1772
1773 return u->source;
1774
1775 fail:
1776
1777 if (u)
1778 userdata_free(u);
1779
1780 if (profile_set)
1781 pa_alsa_profile_set_free(profile_set);
1782
1783 return NULL;
1784 }
1785
1786 static void userdata_free(struct userdata *u) {
1787 pa_assert(u);
1788
1789 if (u->source)
1790 pa_source_unlink(u->source);
1791
1792 if (u->thread) {
1793 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1794 pa_thread_free(u->thread);
1795 }
1796
1797 pa_thread_mq_done(&u->thread_mq);
1798
1799 if (u->source)
1800 pa_source_unref(u->source);
1801
1802 if (u->alsa_rtpoll_item)
1803 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1804
1805 if (u->rtpoll)
1806 pa_rtpoll_free(u->rtpoll);
1807
1808 if (u->pcm_handle) {
1809 snd_pcm_drop(u->pcm_handle);
1810 snd_pcm_close(u->pcm_handle);
1811 }
1812
1813 if (u->mixer_fdl)
1814 pa_alsa_fdlist_free(u->mixer_fdl);
1815
1816 if (u->mixer_path_set)
1817 pa_alsa_path_set_free(u->mixer_path_set);
1818 else if (u->mixer_path)
1819 pa_alsa_path_free(u->mixer_path);
1820
1821 if (u->mixer_handle)
1822 snd_mixer_close(u->mixer_handle);
1823
1824 if (u->smoother)
1825 pa_smoother_free(u->smoother);
1826
1827 reserve_done(u);
1828 monitor_done(u);
1829
1830 pa_xfree(u->device_name);
1831 pa_xfree(u->control_device);
1832 pa_xfree(u);
1833 }
1834
1835 void pa_alsa_source_free(pa_source *s) {
1836 struct userdata *u;
1837
1838 pa_source_assert_ref(s);
1839 pa_assert_se(u = s->userdata);
1840
1841 userdata_free(u);
1842 }