]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
alsa: resume smoother after unsuspend
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core-error.h>
38 #include <pulsecore/core.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/time-smoother.h>
53
54 #include <modules/reserve-wrap.h>
55
56 #include "alsa-util.h"
57 #include "alsa-source.h"
58
59 /* #define DEBUG_TIMING */
60
61 #define DEFAULT_DEVICE "default"
62
63 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
64 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
65
66 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
67 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
68 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
69 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
70 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
71 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72
73 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
74 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
75
76 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
77 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
78
79 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
80
81 struct userdata {
82 pa_core *core;
83 pa_module *module;
84 pa_source *source;
85
86 pa_thread *thread;
87 pa_thread_mq thread_mq;
88 pa_rtpoll *rtpoll;
89
90 snd_pcm_t *pcm_handle;
91
92 pa_alsa_fdlist *mixer_fdl;
93 snd_mixer_t *mixer_handle;
94 pa_alsa_path_set *mixer_path_set;
95 pa_alsa_path *mixer_path;
96
97 pa_cvolume hardware_volume;
98
99 size_t
100 frame_size,
101 fragment_size,
102 hwbuf_size,
103 tsched_watermark,
104 hwbuf_unused,
105 min_sleep,
106 min_wakeup,
107 watermark_inc_step,
108 watermark_dec_step,
109 watermark_inc_threshold,
110 watermark_dec_threshold;
111
112 pa_usec_t watermark_dec_not_before;
113
114 char *device_name;
115 char *control_device;
116
117 pa_bool_t use_mmap:1, use_tsched:1;
118
119 pa_rtpoll_item *alsa_rtpoll_item;
120
121 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
122
123 pa_smoother *smoother;
124 uint64_t read_count;
125 pa_usec_t smoother_interval;
126 pa_usec_t last_smoother_update;
127
128 pa_reserve_wrapper *reserve;
129 pa_hook_slot *reserve_slot;
130 pa_reserve_monitor_wrapper *monitor;
131 pa_hook_slot *monitor_slot;
132 };
133
134 static void userdata_free(struct userdata *u);
135
136 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
137 pa_assert(r);
138 pa_assert(u);
139
140 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
141 return PA_HOOK_CANCEL;
142
143 return PA_HOOK_OK;
144 }
145
146 static void reserve_done(struct userdata *u) {
147 pa_assert(u);
148
149 if (u->reserve_slot) {
150 pa_hook_slot_free(u->reserve_slot);
151 u->reserve_slot = NULL;
152 }
153
154 if (u->reserve) {
155 pa_reserve_wrapper_unref(u->reserve);
156 u->reserve = NULL;
157 }
158 }
159
160 static void reserve_update(struct userdata *u) {
161 const char *description;
162 pa_assert(u);
163
164 if (!u->source || !u->reserve)
165 return;
166
167 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
168 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
169 }
170
171 static int reserve_init(struct userdata *u, const char *dname) {
172 char *rname;
173
174 pa_assert(u);
175 pa_assert(dname);
176
177 if (u->reserve)
178 return 0;
179
180 if (pa_in_system_mode())
181 return 0;
182
183 /* We are resuming, try to lock the device */
184 if (!(rname = pa_alsa_get_reserve_name(dname)))
185 return 0;
186
187 u->reserve = pa_reserve_wrapper_get(u->core, rname);
188 pa_xfree(rname);
189
190 if (!(u->reserve))
191 return -1;
192
193 reserve_update(u);
194
195 pa_assert(!u->reserve_slot);
196 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
197
198 return 0;
199 }
200
201 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
202 pa_bool_t b;
203
204 pa_assert(w);
205 pa_assert(u);
206
207 b = PA_PTR_TO_UINT(busy) && !u->reserve;
208
209 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
210 return PA_HOOK_OK;
211 }
212
213 static void monitor_done(struct userdata *u) {
214 pa_assert(u);
215
216 if (u->monitor_slot) {
217 pa_hook_slot_free(u->monitor_slot);
218 u->monitor_slot = NULL;
219 }
220
221 if (u->monitor) {
222 pa_reserve_monitor_wrapper_unref(u->monitor);
223 u->monitor = NULL;
224 }
225 }
226
227 static int reserve_monitor_init(struct userdata *u, const char *dname) {
228 char *rname;
229
230 pa_assert(u);
231 pa_assert(dname);
232
233 if (pa_in_system_mode())
234 return 0;
235
236 /* We are resuming, try to lock the device */
237 if (!(rname = pa_alsa_get_reserve_name(dname)))
238 return 0;
239
240 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
241 pa_xfree(rname);
242
243 if (!(u->monitor))
244 return -1;
245
246 pa_assert(!u->monitor_slot);
247 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
248
249 return 0;
250 }
251
252 static void fix_min_sleep_wakeup(struct userdata *u) {
253 size_t max_use, max_use_2;
254 pa_assert(u);
255 pa_assert(u->use_tsched);
256
257 max_use = u->hwbuf_size - u->hwbuf_unused;
258 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
259
260 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
261 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
262
263 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
264 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
265 }
266
267 static void fix_tsched_watermark(struct userdata *u) {
268 size_t max_use;
269 pa_assert(u);
270 pa_assert(u->use_tsched);
271
272 max_use = u->hwbuf_size - u->hwbuf_unused;
273
274 if (u->tsched_watermark > max_use - u->min_sleep)
275 u->tsched_watermark = max_use - u->min_sleep;
276
277 if (u->tsched_watermark < u->min_wakeup)
278 u->tsched_watermark = u->min_wakeup;
279 }
280
281 static void increase_watermark(struct userdata *u) {
282 size_t old_watermark;
283 pa_usec_t old_min_latency, new_min_latency;
284
285 pa_assert(u);
286 pa_assert(u->use_tsched);
287
288 /* First, just try to increase the watermark */
289 old_watermark = u->tsched_watermark;
290 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
291 fix_tsched_watermark(u);
292
293 if (old_watermark != u->tsched_watermark) {
294 pa_log_info("Increasing wakeup watermark to %0.2f ms",
295 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
296 return;
297 }
298
299 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
300 old_min_latency = u->source->thread_info.min_latency;
301 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
302 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
303
304 if (old_min_latency != new_min_latency) {
305 pa_log_info("Increasing minimal latency to %0.2f ms",
306 (double) new_min_latency / PA_USEC_PER_MSEC);
307
308 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
309 }
310
311 /* When we reach this we're officialy fucked! */
312 }
313
314 static void decrease_watermark(struct userdata *u) {
315 size_t old_watermark;
316 pa_usec_t now;
317
318 pa_assert(u);
319 pa_assert(u->use_tsched);
320
321 now = pa_rtclock_now();
322
323 if (u->watermark_dec_not_before <= 0)
324 goto restart;
325
326 if (u->watermark_dec_not_before > now)
327 return;
328
329 old_watermark = u->tsched_watermark;
330
331 if (u->tsched_watermark < u->watermark_dec_step)
332 u->tsched_watermark = u->tsched_watermark / 2;
333 else
334 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
335
336 fix_tsched_watermark(u);
337
338 if (old_watermark != u->tsched_watermark)
339 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
340 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
341
342 /* We don't change the latency range*/
343
344 restart:
345 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
346 }
347
348 static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
349 pa_usec_t wm, usec;
350
351 pa_assert(sleep_usec);
352 pa_assert(process_usec);
353
354 pa_assert(u);
355 pa_assert(u->use_tsched);
356
357 usec = pa_source_get_requested_latency_within_thread(u->source);
358
359 if (usec == (pa_usec_t) -1)
360 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
361
362 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
363
364 if (wm > usec)
365 wm = usec/2;
366
367 *sleep_usec = usec - wm;
368 *process_usec = wm;
369
370 #ifdef DEBUG_TIMING
371 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
372 (unsigned long) (usec / PA_USEC_PER_MSEC),
373 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
374 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
375 #endif
376
377 return usec;
378 }
379
380 static int try_recover(struct userdata *u, const char *call, int err) {
381 pa_assert(u);
382 pa_assert(call);
383 pa_assert(err < 0);
384
385 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
386
387 pa_assert(err != -EAGAIN);
388
389 if (err == -EPIPE)
390 pa_log_debug("%s: Buffer overrun!", call);
391
392 if (err == -ESTRPIPE)
393 pa_log_debug("%s: System suspended!", call);
394
395 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
396 pa_log("%s: %s", call, pa_alsa_strerror(err));
397 return -1;
398 }
399
400 snd_pcm_start(u->pcm_handle);
401 return 0;
402 }
403
404 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
405 size_t left_to_record;
406 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
407 pa_bool_t overrun = FALSE;
408
409 /* We use <= instead of < for this check here because an overrun
410 * only happens after the last sample was processed, not already when
411 * it is removed from the buffer. This is particularly important
412 * when block transfer is used. */
413
414 if (n_bytes <= rec_space)
415 left_to_record = rec_space - n_bytes;
416 else {
417
418 /* We got a dropout. What a mess! */
419 left_to_record = 0;
420 overrun = TRUE;
421
422 #ifdef DEBUG_TIMING
423 PA_DEBUG_TRAP;
424 #endif
425
426 if (pa_log_ratelimit())
427 pa_log_info("Overrun!");
428 }
429
430 #ifdef DEBUG_TIMING
431 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
432 #endif
433
434 if (u->use_tsched) {
435 pa_bool_t reset_not_before = TRUE;
436
437 if (overrun || left_to_record < u->watermark_inc_threshold)
438 increase_watermark(u);
439 else if (left_to_record > u->watermark_dec_threshold) {
440 reset_not_before = FALSE;
441
442 /* We decrease the watermark only if have actually been
443 * woken up by a timeout. If something else woke us up
444 * it's too easy to fulfill the deadlines... */
445
446 if (on_timeout)
447 decrease_watermark(u);
448 }
449
450 if (reset_not_before)
451 u->watermark_dec_not_before = 0;
452 }
453
454 return left_to_record;
455 }
456
457 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
458 pa_bool_t work_done = FALSE;
459 pa_usec_t max_sleep_usec = 0, process_usec = 0;
460 size_t left_to_record;
461 unsigned j = 0;
462
463 pa_assert(u);
464 pa_source_assert_ref(u->source);
465
466 if (u->use_tsched)
467 hw_sleep_time(u, &max_sleep_usec, &process_usec);
468
469 for (;;) {
470 snd_pcm_sframes_t n;
471 size_t n_bytes;
472 int r;
473 pa_bool_t after_avail = TRUE;
474
475 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
476
477 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
478 continue;
479
480 return r;
481 }
482
483 n_bytes = (size_t) n * u->frame_size;
484
485 #ifdef DEBUG_TIMING
486 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
487 #endif
488
489 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
490 on_timeout = FALSE;
491
492 if (u->use_tsched)
493 if (!polled &&
494 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
495 #ifdef DEBUG_TIMING
496 pa_log_debug("Not reading, because too early.");
497 #endif
498 break;
499 }
500
501 if (PA_UNLIKELY(n_bytes <= 0)) {
502
503 if (polled)
504 PA_ONCE_BEGIN {
505 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
506 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
507 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
508 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
509 pa_strnull(dn));
510 pa_xfree(dn);
511 } PA_ONCE_END;
512
513 #ifdef DEBUG_TIMING
514 pa_log_debug("Not reading, because not necessary.");
515 #endif
516 break;
517 }
518
519 if (++j > 10) {
520 #ifdef DEBUG_TIMING
521 pa_log_debug("Not filling up, because already too many iterations.");
522 #endif
523
524 break;
525 }
526
527 polled = FALSE;
528
529 #ifdef DEBUG_TIMING
530 pa_log_debug("Reading");
531 #endif
532
533 for (;;) {
534 int err;
535 const snd_pcm_channel_area_t *areas;
536 snd_pcm_uframes_t offset, frames;
537 pa_memchunk chunk;
538 void *p;
539 snd_pcm_sframes_t sframes;
540
541 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
542
543 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
544
545 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
546
547 if (!after_avail && err == -EAGAIN)
548 break;
549
550 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
551 continue;
552
553 return r;
554 }
555
556 /* Make sure that if these memblocks need to be copied they will fit into one slot */
557 if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
558 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
559
560 if (!after_avail && frames == 0)
561 break;
562
563 pa_assert(frames > 0);
564 after_avail = FALSE;
565
566 /* Check these are multiples of 8 bit */
567 pa_assert((areas[0].first & 7) == 0);
568 pa_assert((areas[0].step & 7)== 0);
569
570 /* We assume a single interleaved memory buffer */
571 pa_assert((areas[0].first >> 3) == 0);
572 pa_assert((areas[0].step >> 3) == u->frame_size);
573
574 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
575
576 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
577 chunk.length = pa_memblock_get_length(chunk.memblock);
578 chunk.index = 0;
579
580 pa_source_post(u->source, &chunk);
581 pa_memblock_unref_fixed(chunk.memblock);
582
583 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
584
585 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
586 continue;
587
588 return r;
589 }
590
591 work_done = TRUE;
592
593 u->read_count += frames * u->frame_size;
594
595 #ifdef DEBUG_TIMING
596 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
597 #endif
598
599 if ((size_t) frames * u->frame_size >= n_bytes)
600 break;
601
602 n_bytes -= (size_t) frames * u->frame_size;
603 }
604 }
605
606 if (u->use_tsched) {
607 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
608
609 if (*sleep_usec > process_usec)
610 *sleep_usec -= process_usec;
611 else
612 *sleep_usec = 0;
613 }
614
615 return work_done ? 1 : 0;
616 }
617
618 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
619 int work_done = FALSE;
620 pa_usec_t max_sleep_usec = 0, process_usec = 0;
621 size_t left_to_record;
622 unsigned j = 0;
623
624 pa_assert(u);
625 pa_source_assert_ref(u->source);
626
627 if (u->use_tsched)
628 hw_sleep_time(u, &max_sleep_usec, &process_usec);
629
630 for (;;) {
631 snd_pcm_sframes_t n;
632 size_t n_bytes;
633 int r;
634 pa_bool_t after_avail = TRUE;
635
636 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
637
638 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
639 continue;
640
641 return r;
642 }
643
644 n_bytes = (size_t) n * u->frame_size;
645 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
646 on_timeout = FALSE;
647
648 if (u->use_tsched)
649 if (!polled &&
650 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
651 break;
652
653 if (PA_UNLIKELY(n_bytes <= 0)) {
654
655 if (polled)
656 PA_ONCE_BEGIN {
657 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
658 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
659 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
660 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
661 pa_strnull(dn));
662 pa_xfree(dn);
663 } PA_ONCE_END;
664
665 break;
666 }
667
668 if (++j > 10) {
669 #ifdef DEBUG_TIMING
670 pa_log_debug("Not filling up, because already too many iterations.");
671 #endif
672
673 break;
674 }
675
676 polled = FALSE;
677
678 for (;;) {
679 void *p;
680 snd_pcm_sframes_t frames;
681 pa_memchunk chunk;
682
683 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
684
685 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
686
687 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
688 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
689
690 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
691
692 p = pa_memblock_acquire(chunk.memblock);
693 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
694 pa_memblock_release(chunk.memblock);
695
696 if (PA_UNLIKELY(frames < 0)) {
697 pa_memblock_unref(chunk.memblock);
698
699 if (!after_avail && (int) frames == -EAGAIN)
700 break;
701
702 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
703 continue;
704
705 return r;
706 }
707
708 if (!after_avail && frames == 0) {
709 pa_memblock_unref(chunk.memblock);
710 break;
711 }
712
713 pa_assert(frames > 0);
714 after_avail = FALSE;
715
716 chunk.index = 0;
717 chunk.length = (size_t) frames * u->frame_size;
718
719 pa_source_post(u->source, &chunk);
720 pa_memblock_unref(chunk.memblock);
721
722 work_done = TRUE;
723
724 u->read_count += frames * u->frame_size;
725
726 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
727
728 if ((size_t) frames * u->frame_size >= n_bytes)
729 break;
730
731 n_bytes -= (size_t) frames * u->frame_size;
732 }
733 }
734
735 if (u->use_tsched) {
736 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
737
738 if (*sleep_usec > process_usec)
739 *sleep_usec -= process_usec;
740 else
741 *sleep_usec = 0;
742 }
743
744 return work_done ? 1 : 0;
745 }
746
747 static void update_smoother(struct userdata *u) {
748 snd_pcm_sframes_t delay = 0;
749 uint64_t position;
750 int err;
751 pa_usec_t now1 = 0, now2;
752 snd_pcm_status_t *status;
753
754 snd_pcm_status_alloca(&status);
755
756 pa_assert(u);
757 pa_assert(u->pcm_handle);
758
759 /* Let's update the time smoother */
760
761 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
762 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
763 return;
764 }
765
766 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
767 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
768 else {
769 snd_htimestamp_t htstamp = { 0, 0 };
770 snd_pcm_status_get_htstamp(status, &htstamp);
771 now1 = pa_timespec_load(&htstamp);
772 }
773
774 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
775 if (now1 <= 0)
776 now1 = pa_rtclock_now();
777
778 /* check if the time since the last update is bigger than the interval */
779 if (u->last_smoother_update > 0)
780 if (u->last_smoother_update + u->smoother_interval > now1)
781 return;
782
783 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
784 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
785
786 pa_smoother_put(u->smoother, now1, now2);
787
788 u->last_smoother_update = now1;
789 /* exponentially increase the update interval up to the MAX limit */
790 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
791 }
792
793 static pa_usec_t source_get_latency(struct userdata *u) {
794 int64_t delay;
795 pa_usec_t now1, now2;
796
797 pa_assert(u);
798
799 now1 = pa_rtclock_now();
800 now2 = pa_smoother_get(u->smoother, now1);
801
802 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
803
804 return delay >= 0 ? (pa_usec_t) delay : 0;
805 }
806
807 static int build_pollfd(struct userdata *u) {
808 pa_assert(u);
809 pa_assert(u->pcm_handle);
810
811 if (u->alsa_rtpoll_item)
812 pa_rtpoll_item_free(u->alsa_rtpoll_item);
813
814 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
815 return -1;
816
817 return 0;
818 }
819
820 static int suspend(struct userdata *u) {
821 pa_assert(u);
822 pa_assert(u->pcm_handle);
823
824 pa_smoother_pause(u->smoother, pa_rtclock_now());
825
826 /* Let's suspend */
827 snd_pcm_close(u->pcm_handle);
828 u->pcm_handle = NULL;
829
830 if (u->alsa_rtpoll_item) {
831 pa_rtpoll_item_free(u->alsa_rtpoll_item);
832 u->alsa_rtpoll_item = NULL;
833 }
834
835 pa_log_info("Device suspended...");
836
837 return 0;
838 }
839
840 static int update_sw_params(struct userdata *u) {
841 snd_pcm_uframes_t avail_min;
842 int err;
843
844 pa_assert(u);
845
846 /* Use the full buffer if noone asked us for anything specific */
847 u->hwbuf_unused = 0;
848
849 if (u->use_tsched) {
850 pa_usec_t latency;
851
852 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
853 size_t b;
854
855 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
856
857 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
858
859 /* We need at least one sample in our buffer */
860
861 if (PA_UNLIKELY(b < u->frame_size))
862 b = u->frame_size;
863
864 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
865 }
866
867 fix_min_sleep_wakeup(u);
868 fix_tsched_watermark(u);
869 }
870
871 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
872
873 avail_min = 1;
874
875 if (u->use_tsched) {
876 pa_usec_t sleep_usec, process_usec;
877
878 hw_sleep_time(u, &sleep_usec, &process_usec);
879 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
880 }
881
882 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
883
884 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
885 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
886 return err;
887 }
888
889 return 0;
890 }
891
892 static int unsuspend(struct userdata *u) {
893 pa_sample_spec ss;
894 int err;
895 pa_bool_t b, d;
896 snd_pcm_uframes_t period_size, buffer_size;
897
898 pa_assert(u);
899 pa_assert(!u->pcm_handle);
900
901 pa_log_info("Trying resume...");
902
903 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
904 SND_PCM_NONBLOCK|
905 SND_PCM_NO_AUTO_RESAMPLE|
906 SND_PCM_NO_AUTO_CHANNELS|
907 SND_PCM_NO_AUTO_FORMAT)) < 0) {
908 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
909 goto fail;
910 }
911
912 ss = u->source->sample_spec;
913 period_size = u->fragment_size / u->frame_size;
914 buffer_size = u->hwbuf_size / u->frame_size;
915 b = u->use_mmap;
916 d = u->use_tsched;
917
918 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
919 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
920 goto fail;
921 }
922
923 if (b != u->use_mmap || d != u->use_tsched) {
924 pa_log_warn("Resume failed, couldn't get original access mode.");
925 goto fail;
926 }
927
928 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
929 pa_log_warn("Resume failed, couldn't restore original sample settings.");
930 goto fail;
931 }
932
933 if (period_size*u->frame_size != u->fragment_size ||
934 buffer_size*u->frame_size != u->hwbuf_size) {
935 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
936 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
937 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
938 goto fail;
939 }
940
941 if (update_sw_params(u) < 0)
942 goto fail;
943
944 if (build_pollfd(u) < 0)
945 goto fail;
946
947 /* FIXME: We need to reload the volume somehow */
948
949 snd_pcm_start(u->pcm_handle);
950
951 u->read_count = 0;
952 pa_smoother_reset(u->smoother, pa_rtclock_now(), FALSE);
953 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
954 u->last_smoother_update = 0;
955
956 pa_log_info("Resumed successfully...");
957
958 return 0;
959
960 fail:
961 if (u->pcm_handle) {
962 snd_pcm_close(u->pcm_handle);
963 u->pcm_handle = NULL;
964 }
965
966 return -PA_ERR_IO;
967 }
968
969 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
970 struct userdata *u = PA_SOURCE(o)->userdata;
971
972 switch (code) {
973
974 case PA_SOURCE_MESSAGE_GET_LATENCY: {
975 pa_usec_t r = 0;
976
977 if (u->pcm_handle)
978 r = source_get_latency(u);
979
980 *((pa_usec_t*) data) = r;
981
982 return 0;
983 }
984
985 case PA_SOURCE_MESSAGE_SET_STATE:
986
987 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
988
989 case PA_SOURCE_SUSPENDED: {
990 int r;
991 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
992
993 if ((r = suspend(u)) < 0)
994 return r;
995
996 break;
997 }
998
999 case PA_SOURCE_IDLE:
1000 case PA_SOURCE_RUNNING: {
1001 int r;
1002
1003 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1004 if (build_pollfd(u) < 0)
1005 return -PA_ERR_IO;
1006
1007 snd_pcm_start(u->pcm_handle);
1008 }
1009
1010 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1011 if ((r = unsuspend(u)) < 0)
1012 return r;
1013 }
1014
1015 break;
1016 }
1017
1018 case PA_SOURCE_UNLINKED:
1019 case PA_SOURCE_INIT:
1020 case PA_SOURCE_INVALID_STATE:
1021 ;
1022 }
1023
1024 break;
1025 }
1026
1027 return pa_source_process_msg(o, code, data, offset, chunk);
1028 }
1029
1030 /* Called from main context */
1031 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1032 pa_source_state_t old_state;
1033 struct userdata *u;
1034
1035 pa_source_assert_ref(s);
1036 pa_assert_se(u = s->userdata);
1037
1038 old_state = pa_source_get_state(u->source);
1039
1040 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1041 reserve_done(u);
1042 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1043 if (reserve_init(u, u->device_name) < 0)
1044 return -PA_ERR_BUSY;
1045
1046 return 0;
1047 }
1048
1049 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1050 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1051
1052 pa_assert(u);
1053 pa_assert(u->mixer_handle);
1054
1055 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1056 return 0;
1057
1058 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1059 return 0;
1060
1061 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1062 pa_source_get_volume(u->source, TRUE);
1063 pa_source_get_mute(u->source, TRUE);
1064 }
1065
1066 return 0;
1067 }
1068
1069 static void source_get_volume_cb(pa_source *s) {
1070 struct userdata *u = s->userdata;
1071 pa_cvolume r;
1072 char t[PA_CVOLUME_SNPRINT_MAX];
1073
1074 pa_assert(u);
1075 pa_assert(u->mixer_path);
1076 pa_assert(u->mixer_handle);
1077
1078 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1079 return;
1080
1081 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1082 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1083
1084 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1085
1086 if (pa_cvolume_equal(&u->hardware_volume, &r))
1087 return;
1088
1089 s->volume = u->hardware_volume = r;
1090
1091 /* Hmm, so the hardware volume changed, let's reset our software volume */
1092 if (u->mixer_path->has_dB)
1093 pa_source_set_soft_volume(s, NULL);
1094 }
1095
1096 static void source_set_volume_cb(pa_source *s) {
1097 struct userdata *u = s->userdata;
1098 pa_cvolume r;
1099 char t[PA_CVOLUME_SNPRINT_MAX];
1100
1101 pa_assert(u);
1102 pa_assert(u->mixer_path);
1103 pa_assert(u->mixer_handle);
1104
1105 /* Shift up by the base volume */
1106 pa_sw_cvolume_divide_scalar(&r, &s->volume, s->base_volume);
1107
1108 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1109 return;
1110
1111 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1112 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1113
1114 u->hardware_volume = r;
1115
1116 if (u->mixer_path->has_dB) {
1117 pa_cvolume new_soft_volume;
1118 pa_bool_t accurate_enough;
1119
1120 /* Match exactly what the user requested by software */
1121 pa_sw_cvolume_divide(&new_soft_volume, &s->volume, &u->hardware_volume);
1122
1123 /* If the adjustment to do in software is only minimal we
1124 * can skip it. That saves us CPU at the expense of a bit of
1125 * accuracy */
1126 accurate_enough =
1127 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1128 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1129
1130 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
1131 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1132 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1133 pa_yes_no(accurate_enough));
1134
1135 if (!accurate_enough)
1136 s->soft_volume = new_soft_volume;
1137
1138 } else {
1139 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1140
1141 /* We can't match exactly what the user requested, hence let's
1142 * at least tell the user about it */
1143
1144 s->volume = r;
1145 }
1146 }
1147
1148 static void source_get_mute_cb(pa_source *s) {
1149 struct userdata *u = s->userdata;
1150 pa_bool_t b;
1151
1152 pa_assert(u);
1153 pa_assert(u->mixer_path);
1154 pa_assert(u->mixer_handle);
1155
1156 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1157 return;
1158
1159 s->muted = b;
1160 }
1161
1162 static void source_set_mute_cb(pa_source *s) {
1163 struct userdata *u = s->userdata;
1164
1165 pa_assert(u);
1166 pa_assert(u->mixer_path);
1167 pa_assert(u->mixer_handle);
1168
1169 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1170 }
1171
1172 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1173 struct userdata *u = s->userdata;
1174 pa_alsa_port_data *data;
1175
1176 pa_assert(u);
1177 pa_assert(p);
1178 pa_assert(u->mixer_handle);
1179
1180 data = PA_DEVICE_PORT_DATA(p);
1181
1182 pa_assert_se(u->mixer_path = data->path);
1183 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1184
1185 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1186 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1187 s->n_volume_steps = PA_VOLUME_NORM+1;
1188
1189 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1190 } else {
1191 s->base_volume = PA_VOLUME_NORM;
1192 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1193 }
1194
1195 if (data->setting)
1196 pa_alsa_setting_select(data->setting, u->mixer_handle);
1197
1198 if (s->set_mute)
1199 s->set_mute(s);
1200 if (s->set_volume)
1201 s->set_volume(s);
1202
1203 return 0;
1204 }
1205
1206 static void source_update_requested_latency_cb(pa_source *s) {
1207 struct userdata *u = s->userdata;
1208 pa_assert(u);
1209 pa_assert(u->use_tsched);
1210
1211 if (!u->pcm_handle)
1212 return;
1213
1214 update_sw_params(u);
1215 }
1216
1217 static void thread_func(void *userdata) {
1218 struct userdata *u = userdata;
1219 unsigned short revents = 0;
1220
1221 pa_assert(u);
1222
1223 pa_log_debug("Thread starting up");
1224
1225 if (u->core->realtime_scheduling)
1226 pa_make_realtime(u->core->realtime_priority);
1227
1228 pa_thread_mq_install(&u->thread_mq);
1229
1230 for (;;) {
1231 int ret;
1232
1233 #ifdef DEBUG_TIMING
1234 pa_log_debug("Loop");
1235 #endif
1236
1237 /* Read some data and pass it to the sources */
1238 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1239 int work_done;
1240 pa_usec_t sleep_usec = 0;
1241 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1242
1243 if (u->use_mmap)
1244 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1245 else
1246 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1247
1248 if (work_done < 0)
1249 goto fail;
1250
1251 /* pa_log_debug("work_done = %i", work_done); */
1252
1253 if (work_done)
1254 update_smoother(u);
1255
1256 if (u->use_tsched) {
1257 pa_usec_t cusec;
1258
1259 /* OK, the capture buffer is now empty, let's
1260 * calculate when to wake up next */
1261
1262 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1263
1264 /* Convert from the sound card time domain to the
1265 * system time domain */
1266 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1267
1268 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1269
1270 /* We don't trust the conversion, so we wake up whatever comes first */
1271 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1272 }
1273 } else if (u->use_tsched)
1274
1275 /* OK, we're in an invalid state, let's disable our timers */
1276 pa_rtpoll_set_timer_disabled(u->rtpoll);
1277
1278 /* Hmm, nothing to do. Let's sleep */
1279 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1280 goto fail;
1281
1282 if (ret == 0)
1283 goto finish;
1284
1285 /* Tell ALSA about this and process its response */
1286 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1287 struct pollfd *pollfd;
1288 int err;
1289 unsigned n;
1290
1291 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1292
1293 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1294 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1295 goto fail;
1296 }
1297
1298 if (revents & ~POLLIN) {
1299 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1300 goto fail;
1301
1302 snd_pcm_start(u->pcm_handle);
1303 } else if (revents && u->use_tsched && pa_log_ratelimit())
1304 pa_log_debug("Wakeup from ALSA!");
1305
1306 } else
1307 revents = 0;
1308 }
1309
1310 fail:
1311 /* If this was no regular exit from the loop we have to continue
1312 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1313 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1314 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1315
1316 finish:
1317 pa_log_debug("Thread shutting down");
1318 }
1319
1320 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1321 const char *n;
1322 char *t;
1323
1324 pa_assert(data);
1325 pa_assert(ma);
1326 pa_assert(device_name);
1327
1328 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1329 pa_source_new_data_set_name(data, n);
1330 data->namereg_fail = TRUE;
1331 return;
1332 }
1333
1334 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1335 data->namereg_fail = TRUE;
1336 else {
1337 n = device_id ? device_id : device_name;
1338 data->namereg_fail = FALSE;
1339 }
1340
1341 if (mapping)
1342 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1343 else
1344 t = pa_sprintf_malloc("alsa_input.%s", n);
1345
1346 pa_source_new_data_set_name(data, t);
1347 pa_xfree(t);
1348 }
1349
1350 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1351
1352 if (!mapping && !element)
1353 return;
1354
1355 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1356 pa_log_info("Failed to find a working mixer device.");
1357 return;
1358 }
1359
1360 if (element) {
1361
1362 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1363 goto fail;
1364
1365 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1366 goto fail;
1367
1368 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1369 pa_alsa_path_dump(u->mixer_path);
1370 } else {
1371
1372 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1373 goto fail;
1374
1375 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1376
1377 pa_log_debug("Probed mixer paths:");
1378 pa_alsa_path_set_dump(u->mixer_path_set);
1379 }
1380
1381 return;
1382
1383 fail:
1384
1385 if (u->mixer_path_set) {
1386 pa_alsa_path_set_free(u->mixer_path_set);
1387 u->mixer_path_set = NULL;
1388 } else if (u->mixer_path) {
1389 pa_alsa_path_free(u->mixer_path);
1390 u->mixer_path = NULL;
1391 }
1392
1393 if (u->mixer_handle) {
1394 snd_mixer_close(u->mixer_handle);
1395 u->mixer_handle = NULL;
1396 }
1397 }
1398
1399 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1400 pa_assert(u);
1401
1402 if (!u->mixer_handle)
1403 return 0;
1404
1405 if (u->source->active_port) {
1406 pa_alsa_port_data *data;
1407
1408 /* We have a list of supported paths, so let's activate the
1409 * one that has been chosen as active */
1410
1411 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1412 u->mixer_path = data->path;
1413
1414 pa_alsa_path_select(data->path, u->mixer_handle);
1415
1416 if (data->setting)
1417 pa_alsa_setting_select(data->setting, u->mixer_handle);
1418
1419 } else {
1420
1421 if (!u->mixer_path && u->mixer_path_set)
1422 u->mixer_path = u->mixer_path_set->paths;
1423
1424 if (u->mixer_path) {
1425 /* Hmm, we have only a single path, then let's activate it */
1426
1427 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1428
1429 if (u->mixer_path->settings)
1430 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1431 } else
1432 return 0;
1433 }
1434
1435 if (!u->mixer_path->has_volume)
1436 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1437 else {
1438
1439 if (u->mixer_path->has_dB) {
1440 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1441
1442 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1443 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1444
1445 if (u->mixer_path->max_dB > 0.0)
1446 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1447 else
1448 pa_log_info("No particular base volume set, fixing to 0 dB");
1449
1450 } else {
1451 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1452 u->source->base_volume = PA_VOLUME_NORM;
1453 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1454 }
1455
1456 u->source->get_volume = source_get_volume_cb;
1457 u->source->set_volume = source_set_volume_cb;
1458
1459 u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SOURCE_DECIBEL_VOLUME : 0);
1460 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1461 }
1462
1463 if (!u->mixer_path->has_mute) {
1464 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1465 } else {
1466 u->source->get_mute = source_get_mute_cb;
1467 u->source->set_mute = source_set_mute_cb;
1468 u->source->flags |= PA_SOURCE_HW_MUTE_CTRL;
1469 pa_log_info("Using hardware mute control.");
1470 }
1471
1472 u->mixer_fdl = pa_alsa_fdlist_new();
1473
1474 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1475 pa_log("Failed to initialize file descriptor monitoring");
1476 return -1;
1477 }
1478
1479 if (u->mixer_path_set)
1480 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1481 else
1482 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1483
1484 return 0;
1485 }
1486
1487 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1488
1489 struct userdata *u = NULL;
1490 const char *dev_id = NULL;
1491 pa_sample_spec ss, requested_ss;
1492 pa_channel_map map;
1493 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1494 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1495 size_t frame_size;
1496 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1497 pa_source_new_data data;
1498 pa_alsa_profile_set *profile_set = NULL;
1499
1500 pa_assert(m);
1501 pa_assert(ma);
1502
1503 ss = m->core->default_sample_spec;
1504 map = m->core->default_channel_map;
1505 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1506 pa_log("Failed to parse sample specification");
1507 goto fail;
1508 }
1509
1510 requested_ss = ss;
1511 frame_size = pa_frame_size(&ss);
1512
1513 nfrags = m->core->default_n_fragments;
1514 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1515 if (frag_size <= 0)
1516 frag_size = (uint32_t) frame_size;
1517 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1518 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1519
1520 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1521 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1522 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1523 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1524 pa_log("Failed to parse buffer metrics");
1525 goto fail;
1526 }
1527
1528 buffer_size = nfrags * frag_size;
1529
1530 period_frames = frag_size/frame_size;
1531 buffer_frames = buffer_size/frame_size;
1532 tsched_frames = tsched_size/frame_size;
1533
1534 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1535 pa_log("Failed to parse mmap argument.");
1536 goto fail;
1537 }
1538
1539 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1540 pa_log("Failed to parse timer_scheduling argument.");
1541 goto fail;
1542 }
1543
1544 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1545 pa_log("Failed to parse ignore_dB argument.");
1546 goto fail;
1547 }
1548
1549 use_tsched = pa_alsa_may_tsched(use_tsched);
1550
1551 u = pa_xnew0(struct userdata, 1);
1552 u->core = m->core;
1553 u->module = m;
1554 u->use_mmap = use_mmap;
1555 u->use_tsched = use_tsched;
1556 u->rtpoll = pa_rtpoll_new();
1557 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1558
1559 u->smoother = pa_smoother_new(
1560 DEFAULT_TSCHED_WATERMARK_USEC*2,
1561 DEFAULT_TSCHED_WATERMARK_USEC*2,
1562 TRUE,
1563 TRUE,
1564 5,
1565 pa_rtclock_now(),
1566 FALSE);
1567 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1568
1569 dev_id = pa_modargs_get_value(
1570 ma, "device_id",
1571 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1572
1573 if (reserve_init(u, dev_id) < 0)
1574 goto fail;
1575
1576 if (reserve_monitor_init(u, dev_id) < 0)
1577 goto fail;
1578
1579 b = use_mmap;
1580 d = use_tsched;
1581
1582 if (mapping) {
1583
1584 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1585 pa_log("device_id= not set");
1586 goto fail;
1587 }
1588
1589 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1590 dev_id,
1591 &u->device_name,
1592 &ss, &map,
1593 SND_PCM_STREAM_CAPTURE,
1594 &period_frames, &buffer_frames, tsched_frames,
1595 &b, &d, mapping)))
1596 goto fail;
1597
1598 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1599
1600 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1601 goto fail;
1602
1603 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1604 dev_id,
1605 &u->device_name,
1606 &ss, &map,
1607 SND_PCM_STREAM_CAPTURE,
1608 &period_frames, &buffer_frames, tsched_frames,
1609 &b, &d, profile_set, &mapping)))
1610 goto fail;
1611
1612 } else {
1613
1614 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1615 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1616 &u->device_name,
1617 &ss, &map,
1618 SND_PCM_STREAM_CAPTURE,
1619 &period_frames, &buffer_frames, tsched_frames,
1620 &b, &d, FALSE)))
1621 goto fail;
1622 }
1623
1624 pa_assert(u->device_name);
1625 pa_log_info("Successfully opened device %s.", u->device_name);
1626
1627 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1628 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1629 goto fail;
1630 }
1631
1632 if (mapping)
1633 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1634
1635 if (use_mmap && !b) {
1636 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1637 u->use_mmap = use_mmap = FALSE;
1638 }
1639
1640 if (use_tsched && (!b || !d)) {
1641 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1642 u->use_tsched = use_tsched = FALSE;
1643 }
1644
1645 if (u->use_mmap)
1646 pa_log_info("Successfully enabled mmap() mode.");
1647
1648 if (u->use_tsched)
1649 pa_log_info("Successfully enabled timer-based scheduling mode.");
1650
1651 /* ALSA might tweak the sample spec, so recalculate the frame size */
1652 frame_size = pa_frame_size(&ss);
1653
1654 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1655
1656 pa_source_new_data_init(&data);
1657 data.driver = driver;
1658 data.module = m;
1659 data.card = card;
1660 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1661 pa_source_new_data_set_sample_spec(&data, &ss);
1662 pa_source_new_data_set_channel_map(&data, &map);
1663
1664 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1665 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1666 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1667 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1668 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1669
1670 if (mapping) {
1671 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1672 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1673 }
1674
1675 pa_alsa_init_description(data.proplist);
1676
1677 if (u->control_device)
1678 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1679
1680 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1681 pa_log("Invalid properties");
1682 pa_source_new_data_done(&data);
1683 goto fail;
1684 }
1685
1686 if (u->mixer_path_set)
1687 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1688
1689 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1690 pa_source_new_data_done(&data);
1691
1692 if (!u->source) {
1693 pa_log("Failed to create source object");
1694 goto fail;
1695 }
1696
1697 u->source->parent.process_msg = source_process_msg;
1698 if (u->use_tsched)
1699 u->source->update_requested_latency = source_update_requested_latency_cb;
1700 u->source->set_state = source_set_state_cb;
1701 u->source->set_port = source_set_port_cb;
1702 u->source->userdata = u;
1703
1704 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1705 pa_source_set_rtpoll(u->source, u->rtpoll);
1706
1707 u->frame_size = frame_size;
1708 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1709 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1710 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1711
1712 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1713 (double) u->hwbuf_size / (double) u->fragment_size,
1714 (long unsigned) u->fragment_size,
1715 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1716 (long unsigned) u->hwbuf_size,
1717 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1718
1719 if (u->use_tsched) {
1720 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1721
1722 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1723 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1724
1725 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1726 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1727
1728 fix_min_sleep_wakeup(u);
1729 fix_tsched_watermark(u);
1730
1731 pa_source_set_latency_range(u->source,
1732 0,
1733 pa_bytes_to_usec(u->hwbuf_size, &ss));
1734
1735 pa_log_info("Time scheduling watermark is %0.2fms",
1736 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1737 } else
1738 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1739
1740 reserve_update(u);
1741
1742 if (update_sw_params(u) < 0)
1743 goto fail;
1744
1745 if (setup_mixer(u, ignore_dB) < 0)
1746 goto fail;
1747
1748 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1749
1750 if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1751 pa_log("Failed to create thread.");
1752 goto fail;
1753 }
1754 /* Get initial mixer settings */
1755 if (data.volume_is_set) {
1756 if (u->source->set_volume)
1757 u->source->set_volume(u->source);
1758 } else {
1759 if (u->source->get_volume)
1760 u->source->get_volume(u->source);
1761 }
1762
1763 if (data.muted_is_set) {
1764 if (u->source->set_mute)
1765 u->source->set_mute(u->source);
1766 } else {
1767 if (u->source->get_mute)
1768 u->source->get_mute(u->source);
1769 }
1770
1771 pa_source_put(u->source);
1772
1773 if (profile_set)
1774 pa_alsa_profile_set_free(profile_set);
1775
1776 return u->source;
1777
1778 fail:
1779
1780 if (u)
1781 userdata_free(u);
1782
1783 if (profile_set)
1784 pa_alsa_profile_set_free(profile_set);
1785
1786 return NULL;
1787 }
1788
1789 static void userdata_free(struct userdata *u) {
1790 pa_assert(u);
1791
1792 if (u->source)
1793 pa_source_unlink(u->source);
1794
1795 if (u->thread) {
1796 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1797 pa_thread_free(u->thread);
1798 }
1799
1800 pa_thread_mq_done(&u->thread_mq);
1801
1802 if (u->source)
1803 pa_source_unref(u->source);
1804
1805 if (u->alsa_rtpoll_item)
1806 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1807
1808 if (u->rtpoll)
1809 pa_rtpoll_free(u->rtpoll);
1810
1811 if (u->pcm_handle) {
1812 snd_pcm_drop(u->pcm_handle);
1813 snd_pcm_close(u->pcm_handle);
1814 }
1815
1816 if (u->mixer_fdl)
1817 pa_alsa_fdlist_free(u->mixer_fdl);
1818
1819 if (u->mixer_path_set)
1820 pa_alsa_path_set_free(u->mixer_path_set);
1821 else if (u->mixer_path)
1822 pa_alsa_path_free(u->mixer_path);
1823
1824 if (u->mixer_handle)
1825 snd_mixer_close(u->mixer_handle);
1826
1827 if (u->smoother)
1828 pa_smoother_free(u->smoother);
1829
1830 reserve_done(u);
1831 monitor_done(u);
1832
1833 pa_xfree(u->device_name);
1834 pa_xfree(u->control_device);
1835 pa_xfree(u);
1836 }
1837
1838 void pa_alsa_source_free(pa_source *s) {
1839 struct userdata *u;
1840
1841 pa_source_assert_ref(s);
1842 pa_assert_se(u = s->userdata);
1843
1844 userdata_free(u);
1845 }