]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
alsa: work around slightly broken _delay implementations
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core-error.h>
38 #include <pulsecore/core.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/time-smoother.h>
53
54 #include <modules/reserve-wrap.h>
55
56 #include "alsa-util.h"
57 #include "alsa-source.h"
58
59 /* #define DEBUG_TIMING */
60
61 #define DEFAULT_DEVICE "default"
62
63 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
64 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
65
66 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
67 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
68 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
69 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
70 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
71 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72
73 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
74 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
75
76 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
77 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
78
79 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
80
81 struct userdata {
82 pa_core *core;
83 pa_module *module;
84 pa_source *source;
85
86 pa_thread *thread;
87 pa_thread_mq thread_mq;
88 pa_rtpoll *rtpoll;
89
90 snd_pcm_t *pcm_handle;
91
92 pa_alsa_fdlist *mixer_fdl;
93 snd_mixer_t *mixer_handle;
94 pa_alsa_path_set *mixer_path_set;
95 pa_alsa_path *mixer_path;
96
97 pa_cvolume hardware_volume;
98
99 size_t
100 frame_size,
101 fragment_size,
102 hwbuf_size,
103 tsched_watermark,
104 hwbuf_unused,
105 min_sleep,
106 min_wakeup,
107 watermark_inc_step,
108 watermark_dec_step,
109 watermark_inc_threshold,
110 watermark_dec_threshold;
111
112 pa_usec_t watermark_dec_not_before;
113
114 char *device_name;
115 char *control_device;
116
117 pa_bool_t use_mmap:1, use_tsched:1;
118
119 pa_bool_t first;
120
121 pa_rtpoll_item *alsa_rtpoll_item;
122
123 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
124
125 pa_smoother *smoother;
126 uint64_t read_count;
127 pa_usec_t smoother_interval;
128 pa_usec_t last_smoother_update;
129
130 pa_reserve_wrapper *reserve;
131 pa_hook_slot *reserve_slot;
132 pa_reserve_monitor_wrapper *monitor;
133 pa_hook_slot *monitor_slot;
134 };
135
136 static void userdata_free(struct userdata *u);
137
138 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
139 pa_assert(r);
140 pa_assert(u);
141
142 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
143 return PA_HOOK_CANCEL;
144
145 return PA_HOOK_OK;
146 }
147
148 static void reserve_done(struct userdata *u) {
149 pa_assert(u);
150
151 if (u->reserve_slot) {
152 pa_hook_slot_free(u->reserve_slot);
153 u->reserve_slot = NULL;
154 }
155
156 if (u->reserve) {
157 pa_reserve_wrapper_unref(u->reserve);
158 u->reserve = NULL;
159 }
160 }
161
162 static void reserve_update(struct userdata *u) {
163 const char *description;
164 pa_assert(u);
165
166 if (!u->source || !u->reserve)
167 return;
168
169 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
170 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
171 }
172
173 static int reserve_init(struct userdata *u, const char *dname) {
174 char *rname;
175
176 pa_assert(u);
177 pa_assert(dname);
178
179 if (u->reserve)
180 return 0;
181
182 if (pa_in_system_mode())
183 return 0;
184
185 /* We are resuming, try to lock the device */
186 if (!(rname = pa_alsa_get_reserve_name(dname)))
187 return 0;
188
189 u->reserve = pa_reserve_wrapper_get(u->core, rname);
190 pa_xfree(rname);
191
192 if (!(u->reserve))
193 return -1;
194
195 reserve_update(u);
196
197 pa_assert(!u->reserve_slot);
198 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
199
200 return 0;
201 }
202
203 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
204 pa_bool_t b;
205
206 pa_assert(w);
207 pa_assert(u);
208
209 b = PA_PTR_TO_UINT(busy) && !u->reserve;
210
211 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
212 return PA_HOOK_OK;
213 }
214
215 static void monitor_done(struct userdata *u) {
216 pa_assert(u);
217
218 if (u->monitor_slot) {
219 pa_hook_slot_free(u->monitor_slot);
220 u->monitor_slot = NULL;
221 }
222
223 if (u->monitor) {
224 pa_reserve_monitor_wrapper_unref(u->monitor);
225 u->monitor = NULL;
226 }
227 }
228
229 static int reserve_monitor_init(struct userdata *u, const char *dname) {
230 char *rname;
231
232 pa_assert(u);
233 pa_assert(dname);
234
235 if (pa_in_system_mode())
236 return 0;
237
238 /* We are resuming, try to lock the device */
239 if (!(rname = pa_alsa_get_reserve_name(dname)))
240 return 0;
241
242 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
243 pa_xfree(rname);
244
245 if (!(u->monitor))
246 return -1;
247
248 pa_assert(!u->monitor_slot);
249 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
250
251 return 0;
252 }
253
254 static void fix_min_sleep_wakeup(struct userdata *u) {
255 size_t max_use, max_use_2;
256 pa_assert(u);
257 pa_assert(u->use_tsched);
258
259 max_use = u->hwbuf_size - u->hwbuf_unused;
260 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
261
262 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
263 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
264
265 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
266 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
267 }
268
269 static void fix_tsched_watermark(struct userdata *u) {
270 size_t max_use;
271 pa_assert(u);
272 pa_assert(u->use_tsched);
273
274 max_use = u->hwbuf_size - u->hwbuf_unused;
275
276 if (u->tsched_watermark > max_use - u->min_sleep)
277 u->tsched_watermark = max_use - u->min_sleep;
278
279 if (u->tsched_watermark < u->min_wakeup)
280 u->tsched_watermark = u->min_wakeup;
281 }
282
283 static void increase_watermark(struct userdata *u) {
284 size_t old_watermark;
285 pa_usec_t old_min_latency, new_min_latency;
286
287 pa_assert(u);
288 pa_assert(u->use_tsched);
289
290 /* First, just try to increase the watermark */
291 old_watermark = u->tsched_watermark;
292 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
293 fix_tsched_watermark(u);
294
295 if (old_watermark != u->tsched_watermark) {
296 pa_log_info("Increasing wakeup watermark to %0.2f ms",
297 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
298 return;
299 }
300
301 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
302 old_min_latency = u->source->thread_info.min_latency;
303 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
304 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
305
306 if (old_min_latency != new_min_latency) {
307 pa_log_info("Increasing minimal latency to %0.2f ms",
308 (double) new_min_latency / PA_USEC_PER_MSEC);
309
310 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
311 }
312
313 /* When we reach this we're officialy fucked! */
314 }
315
316 static void decrease_watermark(struct userdata *u) {
317 size_t old_watermark;
318 pa_usec_t now;
319
320 pa_assert(u);
321 pa_assert(u->use_tsched);
322
323 now = pa_rtclock_now();
324
325 if (u->watermark_dec_not_before <= 0)
326 goto restart;
327
328 if (u->watermark_dec_not_before > now)
329 return;
330
331 old_watermark = u->tsched_watermark;
332
333 if (u->tsched_watermark < u->watermark_dec_step)
334 u->tsched_watermark = u->tsched_watermark / 2;
335 else
336 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
337
338 fix_tsched_watermark(u);
339
340 if (old_watermark != u->tsched_watermark)
341 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
342 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
343
344 /* We don't change the latency range*/
345
346 restart:
347 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
348 }
349
350 static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
351 pa_usec_t wm, usec;
352
353 pa_assert(sleep_usec);
354 pa_assert(process_usec);
355
356 pa_assert(u);
357 pa_assert(u->use_tsched);
358
359 usec = pa_source_get_requested_latency_within_thread(u->source);
360
361 if (usec == (pa_usec_t) -1)
362 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
363
364 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
365
366 if (wm > usec)
367 wm = usec/2;
368
369 *sleep_usec = usec - wm;
370 *process_usec = wm;
371
372 #ifdef DEBUG_TIMING
373 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
374 (unsigned long) (usec / PA_USEC_PER_MSEC),
375 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
376 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
377 #endif
378
379 return usec;
380 }
381
382 static int try_recover(struct userdata *u, const char *call, int err) {
383 pa_assert(u);
384 pa_assert(call);
385 pa_assert(err < 0);
386
387 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
388
389 pa_assert(err != -EAGAIN);
390
391 if (err == -EPIPE)
392 pa_log_debug("%s: Buffer overrun!", call);
393
394 if (err == -ESTRPIPE)
395 pa_log_debug("%s: System suspended!", call);
396
397 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
398 pa_log("%s: %s", call, pa_alsa_strerror(err));
399 return -1;
400 }
401
402 u->first = TRUE;
403 return 0;
404 }
405
406 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
407 size_t left_to_record;
408 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
409 pa_bool_t overrun = FALSE;
410
411 /* We use <= instead of < for this check here because an overrun
412 * only happens after the last sample was processed, not already when
413 * it is removed from the buffer. This is particularly important
414 * when block transfer is used. */
415
416 if (n_bytes <= rec_space)
417 left_to_record = rec_space - n_bytes;
418 else {
419
420 /* We got a dropout. What a mess! */
421 left_to_record = 0;
422 overrun = TRUE;
423
424 #ifdef DEBUG_TIMING
425 PA_DEBUG_TRAP;
426 #endif
427
428 if (pa_log_ratelimit())
429 pa_log_info("Overrun!");
430 }
431
432 #ifdef DEBUG_TIMING
433 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
434 #endif
435
436 if (u->use_tsched) {
437 pa_bool_t reset_not_before = TRUE;
438
439 if (overrun || left_to_record < u->watermark_inc_threshold)
440 increase_watermark(u);
441 else if (left_to_record > u->watermark_dec_threshold) {
442 reset_not_before = FALSE;
443
444 /* We decrease the watermark only if have actually been
445 * woken up by a timeout. If something else woke us up
446 * it's too easy to fulfill the deadlines... */
447
448 if (on_timeout)
449 decrease_watermark(u);
450 }
451
452 if (reset_not_before)
453 u->watermark_dec_not_before = 0;
454 }
455
456 return left_to_record;
457 }
458
459 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
460 pa_bool_t work_done = FALSE;
461 pa_usec_t max_sleep_usec = 0, process_usec = 0;
462 size_t left_to_record;
463 unsigned j = 0;
464
465 pa_assert(u);
466 pa_source_assert_ref(u->source);
467
468 if (u->use_tsched)
469 hw_sleep_time(u, &max_sleep_usec, &process_usec);
470
471 for (;;) {
472 snd_pcm_sframes_t n;
473 size_t n_bytes;
474 int r;
475 pa_bool_t after_avail = TRUE;
476
477 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
478
479 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
480 continue;
481
482 return r;
483 }
484
485 n_bytes = (size_t) n * u->frame_size;
486
487 #ifdef DEBUG_TIMING
488 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
489 #endif
490
491 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
492 on_timeout = FALSE;
493
494 if (u->use_tsched)
495 if (!polled &&
496 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
497 #ifdef DEBUG_TIMING
498 pa_log_debug("Not reading, because too early.");
499 #endif
500 break;
501 }
502
503 if (PA_UNLIKELY(n_bytes <= 0)) {
504
505 if (polled)
506 PA_ONCE_BEGIN {
507 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
508 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
509 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
510 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
511 pa_strnull(dn));
512 pa_xfree(dn);
513 } PA_ONCE_END;
514
515 #ifdef DEBUG_TIMING
516 pa_log_debug("Not reading, because not necessary.");
517 #endif
518 break;
519 }
520
521 if (++j > 10) {
522 #ifdef DEBUG_TIMING
523 pa_log_debug("Not filling up, because already too many iterations.");
524 #endif
525
526 break;
527 }
528
529 polled = FALSE;
530
531 #ifdef DEBUG_TIMING
532 pa_log_debug("Reading");
533 #endif
534
535 for (;;) {
536 int err;
537 const snd_pcm_channel_area_t *areas;
538 snd_pcm_uframes_t offset, frames;
539 pa_memchunk chunk;
540 void *p;
541 snd_pcm_sframes_t sframes;
542
543 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
544
545 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
546
547 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
548
549 if (!after_avail && err == -EAGAIN)
550 break;
551
552 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
553 continue;
554
555 return r;
556 }
557
558 /* Make sure that if these memblocks need to be copied they will fit into one slot */
559 if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
560 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
561
562 if (!after_avail && frames == 0)
563 break;
564
565 pa_assert(frames > 0);
566 after_avail = FALSE;
567
568 /* Check these are multiples of 8 bit */
569 pa_assert((areas[0].first & 7) == 0);
570 pa_assert((areas[0].step & 7)== 0);
571
572 /* We assume a single interleaved memory buffer */
573 pa_assert((areas[0].first >> 3) == 0);
574 pa_assert((areas[0].step >> 3) == u->frame_size);
575
576 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
577
578 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
579 chunk.length = pa_memblock_get_length(chunk.memblock);
580 chunk.index = 0;
581
582 pa_source_post(u->source, &chunk);
583 pa_memblock_unref_fixed(chunk.memblock);
584
585 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
586
587 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
588 continue;
589
590 return r;
591 }
592
593 work_done = TRUE;
594
595 u->read_count += frames * u->frame_size;
596
597 #ifdef DEBUG_TIMING
598 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
599 #endif
600
601 if ((size_t) frames * u->frame_size >= n_bytes)
602 break;
603
604 n_bytes -= (size_t) frames * u->frame_size;
605 }
606 }
607
608 if (u->use_tsched) {
609 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
610
611 if (*sleep_usec > process_usec)
612 *sleep_usec -= process_usec;
613 else
614 *sleep_usec = 0;
615 }
616
617 return work_done ? 1 : 0;
618 }
619
620 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
621 int work_done = FALSE;
622 pa_usec_t max_sleep_usec = 0, process_usec = 0;
623 size_t left_to_record;
624 unsigned j = 0;
625
626 pa_assert(u);
627 pa_source_assert_ref(u->source);
628
629 if (u->use_tsched)
630 hw_sleep_time(u, &max_sleep_usec, &process_usec);
631
632 for (;;) {
633 snd_pcm_sframes_t n;
634 size_t n_bytes;
635 int r;
636 pa_bool_t after_avail = TRUE;
637
638 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
639
640 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
641 continue;
642
643 return r;
644 }
645
646 n_bytes = (size_t) n * u->frame_size;
647 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
648 on_timeout = FALSE;
649
650 if (u->use_tsched)
651 if (!polled &&
652 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
653 break;
654
655 if (PA_UNLIKELY(n_bytes <= 0)) {
656
657 if (polled)
658 PA_ONCE_BEGIN {
659 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
660 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
661 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
662 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
663 pa_strnull(dn));
664 pa_xfree(dn);
665 } PA_ONCE_END;
666
667 break;
668 }
669
670 if (++j > 10) {
671 #ifdef DEBUG_TIMING
672 pa_log_debug("Not filling up, because already too many iterations.");
673 #endif
674
675 break;
676 }
677
678 polled = FALSE;
679
680 for (;;) {
681 void *p;
682 snd_pcm_sframes_t frames;
683 pa_memchunk chunk;
684
685 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
686
687 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
688
689 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
690 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
691
692 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
693
694 p = pa_memblock_acquire(chunk.memblock);
695 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
696 pa_memblock_release(chunk.memblock);
697
698 if (PA_UNLIKELY(frames < 0)) {
699 pa_memblock_unref(chunk.memblock);
700
701 if (!after_avail && (int) frames == -EAGAIN)
702 break;
703
704 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
705 continue;
706
707 return r;
708 }
709
710 if (!after_avail && frames == 0) {
711 pa_memblock_unref(chunk.memblock);
712 break;
713 }
714
715 pa_assert(frames > 0);
716 after_avail = FALSE;
717
718 chunk.index = 0;
719 chunk.length = (size_t) frames * u->frame_size;
720
721 pa_source_post(u->source, &chunk);
722 pa_memblock_unref(chunk.memblock);
723
724 work_done = TRUE;
725
726 u->read_count += frames * u->frame_size;
727
728 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
729
730 if ((size_t) frames * u->frame_size >= n_bytes)
731 break;
732
733 n_bytes -= (size_t) frames * u->frame_size;
734 }
735 }
736
737 if (u->use_tsched) {
738 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
739
740 if (*sleep_usec > process_usec)
741 *sleep_usec -= process_usec;
742 else
743 *sleep_usec = 0;
744 }
745
746 return work_done ? 1 : 0;
747 }
748
749 static void update_smoother(struct userdata *u) {
750 snd_pcm_sframes_t delay = 0;
751 uint64_t position;
752 int err;
753 pa_usec_t now1 = 0, now2;
754 snd_pcm_status_t *status;
755
756 snd_pcm_status_alloca(&status);
757
758 pa_assert(u);
759 pa_assert(u->pcm_handle);
760
761 /* Let's update the time smoother */
762
763 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
764 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
765 return;
766 }
767
768 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
769 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
770 else {
771 snd_htimestamp_t htstamp = { 0, 0 };
772 snd_pcm_status_get_htstamp(status, &htstamp);
773 now1 = pa_timespec_load(&htstamp);
774 }
775
776 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
777 if (now1 <= 0)
778 now1 = pa_rtclock_now();
779
780 /* check if the time since the last update is bigger than the interval */
781 if (u->last_smoother_update > 0)
782 if (u->last_smoother_update + u->smoother_interval > now1)
783 return;
784
785 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
786 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
787
788 pa_smoother_put(u->smoother, now1, now2);
789
790 u->last_smoother_update = now1;
791 /* exponentially increase the update interval up to the MAX limit */
792 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
793 }
794
795 static pa_usec_t source_get_latency(struct userdata *u) {
796 int64_t delay;
797 pa_usec_t now1, now2;
798
799 pa_assert(u);
800
801 now1 = pa_rtclock_now();
802 now2 = pa_smoother_get(u->smoother, now1);
803
804 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
805
806 return delay >= 0 ? (pa_usec_t) delay : 0;
807 }
808
809 static int build_pollfd(struct userdata *u) {
810 pa_assert(u);
811 pa_assert(u->pcm_handle);
812
813 if (u->alsa_rtpoll_item)
814 pa_rtpoll_item_free(u->alsa_rtpoll_item);
815
816 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
817 return -1;
818
819 return 0;
820 }
821
822 static int suspend(struct userdata *u) {
823 pa_assert(u);
824 pa_assert(u->pcm_handle);
825
826 pa_smoother_pause(u->smoother, pa_rtclock_now());
827
828 /* Let's suspend */
829 snd_pcm_close(u->pcm_handle);
830 u->pcm_handle = NULL;
831
832 if (u->alsa_rtpoll_item) {
833 pa_rtpoll_item_free(u->alsa_rtpoll_item);
834 u->alsa_rtpoll_item = NULL;
835 }
836
837 pa_log_info("Device suspended...");
838
839 return 0;
840 }
841
842 static int update_sw_params(struct userdata *u) {
843 snd_pcm_uframes_t avail_min;
844 int err;
845
846 pa_assert(u);
847
848 /* Use the full buffer if noone asked us for anything specific */
849 u->hwbuf_unused = 0;
850
851 if (u->use_tsched) {
852 pa_usec_t latency;
853
854 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
855 size_t b;
856
857 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
858
859 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
860
861 /* We need at least one sample in our buffer */
862
863 if (PA_UNLIKELY(b < u->frame_size))
864 b = u->frame_size;
865
866 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
867 }
868
869 fix_min_sleep_wakeup(u);
870 fix_tsched_watermark(u);
871 }
872
873 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
874
875 avail_min = 1;
876
877 if (u->use_tsched) {
878 pa_usec_t sleep_usec, process_usec;
879
880 hw_sleep_time(u, &sleep_usec, &process_usec);
881 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
882 }
883
884 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
885
886 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
887 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
888 return err;
889 }
890
891 return 0;
892 }
893
894 static int unsuspend(struct userdata *u) {
895 pa_sample_spec ss;
896 int err;
897 pa_bool_t b, d;
898 snd_pcm_uframes_t period_size, buffer_size;
899
900 pa_assert(u);
901 pa_assert(!u->pcm_handle);
902
903 pa_log_info("Trying resume...");
904
905 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
906 SND_PCM_NONBLOCK|
907 SND_PCM_NO_AUTO_RESAMPLE|
908 SND_PCM_NO_AUTO_CHANNELS|
909 SND_PCM_NO_AUTO_FORMAT)) < 0) {
910 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
911 goto fail;
912 }
913
914 ss = u->source->sample_spec;
915 period_size = u->fragment_size / u->frame_size;
916 buffer_size = u->hwbuf_size / u->frame_size;
917 b = u->use_mmap;
918 d = u->use_tsched;
919
920 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
921 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
922 goto fail;
923 }
924
925 if (b != u->use_mmap || d != u->use_tsched) {
926 pa_log_warn("Resume failed, couldn't get original access mode.");
927 goto fail;
928 }
929
930 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
931 pa_log_warn("Resume failed, couldn't restore original sample settings.");
932 goto fail;
933 }
934
935 if (period_size*u->frame_size != u->fragment_size ||
936 buffer_size*u->frame_size != u->hwbuf_size) {
937 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
938 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
939 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
940 goto fail;
941 }
942
943 if (update_sw_params(u) < 0)
944 goto fail;
945
946 if (build_pollfd(u) < 0)
947 goto fail;
948
949 /* FIXME: We need to reload the volume somehow */
950
951 u->read_count = 0;
952 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
953 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
954 u->last_smoother_update = 0;
955
956 u->first = TRUE;
957
958 pa_log_info("Resumed successfully...");
959
960 return 0;
961
962 fail:
963 if (u->pcm_handle) {
964 snd_pcm_close(u->pcm_handle);
965 u->pcm_handle = NULL;
966 }
967
968 return -PA_ERR_IO;
969 }
970
971 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
972 struct userdata *u = PA_SOURCE(o)->userdata;
973
974 switch (code) {
975
976 case PA_SOURCE_MESSAGE_GET_LATENCY: {
977 pa_usec_t r = 0;
978
979 if (u->pcm_handle)
980 r = source_get_latency(u);
981
982 *((pa_usec_t*) data) = r;
983
984 return 0;
985 }
986
987 case PA_SOURCE_MESSAGE_SET_STATE:
988
989 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
990
991 case PA_SOURCE_SUSPENDED: {
992 int r;
993 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
994
995 if ((r = suspend(u)) < 0)
996 return r;
997
998 break;
999 }
1000
1001 case PA_SOURCE_IDLE:
1002 case PA_SOURCE_RUNNING: {
1003 int r;
1004
1005 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1006 if (build_pollfd(u) < 0)
1007 return -PA_ERR_IO;
1008 }
1009
1010 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1011 if ((r = unsuspend(u)) < 0)
1012 return r;
1013 }
1014
1015 break;
1016 }
1017
1018 case PA_SOURCE_UNLINKED:
1019 case PA_SOURCE_INIT:
1020 case PA_SOURCE_INVALID_STATE:
1021 ;
1022 }
1023
1024 break;
1025 }
1026
1027 return pa_source_process_msg(o, code, data, offset, chunk);
1028 }
1029
1030 /* Called from main context */
1031 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1032 pa_source_state_t old_state;
1033 struct userdata *u;
1034
1035 pa_source_assert_ref(s);
1036 pa_assert_se(u = s->userdata);
1037
1038 old_state = pa_source_get_state(u->source);
1039
1040 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1041 reserve_done(u);
1042 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1043 if (reserve_init(u, u->device_name) < 0)
1044 return -PA_ERR_BUSY;
1045
1046 return 0;
1047 }
1048
1049 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1050 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1051
1052 pa_assert(u);
1053 pa_assert(u->mixer_handle);
1054
1055 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1056 return 0;
1057
1058 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1059 return 0;
1060
1061 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1062 pa_source_get_volume(u->source, TRUE);
1063 pa_source_get_mute(u->source, TRUE);
1064 }
1065
1066 return 0;
1067 }
1068
1069 static void source_get_volume_cb(pa_source *s) {
1070 struct userdata *u = s->userdata;
1071 pa_cvolume r;
1072 char t[PA_CVOLUME_SNPRINT_MAX];
1073
1074 pa_assert(u);
1075 pa_assert(u->mixer_path);
1076 pa_assert(u->mixer_handle);
1077
1078 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1079 return;
1080
1081 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1082 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1083
1084 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1085
1086 if (pa_cvolume_equal(&u->hardware_volume, &r))
1087 return;
1088
1089 s->volume = u->hardware_volume = r;
1090
1091 /* Hmm, so the hardware volume changed, let's reset our software volume */
1092 if (u->mixer_path->has_dB)
1093 pa_source_set_soft_volume(s, NULL);
1094 }
1095
1096 static void source_set_volume_cb(pa_source *s) {
1097 struct userdata *u = s->userdata;
1098 pa_cvolume r;
1099 char t[PA_CVOLUME_SNPRINT_MAX];
1100
1101 pa_assert(u);
1102 pa_assert(u->mixer_path);
1103 pa_assert(u->mixer_handle);
1104
1105 /* Shift up by the base volume */
1106 pa_sw_cvolume_divide_scalar(&r, &s->volume, s->base_volume);
1107
1108 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1109 return;
1110
1111 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1112 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1113
1114 u->hardware_volume = r;
1115
1116 if (u->mixer_path->has_dB) {
1117 pa_cvolume new_soft_volume;
1118 pa_bool_t accurate_enough;
1119
1120 /* Match exactly what the user requested by software */
1121 pa_sw_cvolume_divide(&new_soft_volume, &s->volume, &u->hardware_volume);
1122
1123 /* If the adjustment to do in software is only minimal we
1124 * can skip it. That saves us CPU at the expense of a bit of
1125 * accuracy */
1126 accurate_enough =
1127 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1128 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1129
1130 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
1131 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1132 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1133 pa_yes_no(accurate_enough));
1134
1135 if (!accurate_enough)
1136 s->soft_volume = new_soft_volume;
1137
1138 } else {
1139 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1140
1141 /* We can't match exactly what the user requested, hence let's
1142 * at least tell the user about it */
1143
1144 s->volume = r;
1145 }
1146 }
1147
1148 static void source_get_mute_cb(pa_source *s) {
1149 struct userdata *u = s->userdata;
1150 pa_bool_t b;
1151
1152 pa_assert(u);
1153 pa_assert(u->mixer_path);
1154 pa_assert(u->mixer_handle);
1155
1156 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1157 return;
1158
1159 s->muted = b;
1160 }
1161
1162 static void source_set_mute_cb(pa_source *s) {
1163 struct userdata *u = s->userdata;
1164
1165 pa_assert(u);
1166 pa_assert(u->mixer_path);
1167 pa_assert(u->mixer_handle);
1168
1169 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1170 }
1171
1172 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1173 struct userdata *u = s->userdata;
1174 pa_alsa_port_data *data;
1175
1176 pa_assert(u);
1177 pa_assert(p);
1178 pa_assert(u->mixer_handle);
1179
1180 data = PA_DEVICE_PORT_DATA(p);
1181
1182 pa_assert_se(u->mixer_path = data->path);
1183 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1184
1185 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1186 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1187 s->n_volume_steps = PA_VOLUME_NORM+1;
1188
1189 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1190 } else {
1191 s->base_volume = PA_VOLUME_NORM;
1192 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1193 }
1194
1195 if (data->setting)
1196 pa_alsa_setting_select(data->setting, u->mixer_handle);
1197
1198 if (s->set_mute)
1199 s->set_mute(s);
1200 if (s->set_volume)
1201 s->set_volume(s);
1202
1203 return 0;
1204 }
1205
1206 static void source_update_requested_latency_cb(pa_source *s) {
1207 struct userdata *u = s->userdata;
1208 pa_assert(u);
1209 pa_assert(u->use_tsched);
1210
1211 if (!u->pcm_handle)
1212 return;
1213
1214 update_sw_params(u);
1215 }
1216
1217 static void thread_func(void *userdata) {
1218 struct userdata *u = userdata;
1219 unsigned short revents = 0;
1220
1221 pa_assert(u);
1222
1223 pa_log_debug("Thread starting up");
1224
1225 if (u->core->realtime_scheduling)
1226 pa_make_realtime(u->core->realtime_priority);
1227
1228 pa_thread_mq_install(&u->thread_mq);
1229
1230 for (;;) {
1231 int ret;
1232
1233 #ifdef DEBUG_TIMING
1234 pa_log_debug("Loop");
1235 #endif
1236
1237 /* Read some data and pass it to the sources */
1238 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1239 int work_done;
1240 pa_usec_t sleep_usec = 0;
1241 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1242
1243 if (u->first) {
1244 pa_log_info("Starting capture.");
1245 snd_pcm_start(u->pcm_handle);
1246
1247 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1248
1249 u->first = FALSE;
1250 }
1251
1252 if (u->use_mmap)
1253 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1254 else
1255 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1256
1257 if (work_done < 0)
1258 goto fail;
1259
1260 /* pa_log_debug("work_done = %i", work_done); */
1261
1262 if (work_done)
1263 update_smoother(u);
1264
1265 if (u->use_tsched) {
1266 pa_usec_t cusec;
1267
1268 /* OK, the capture buffer is now empty, let's
1269 * calculate when to wake up next */
1270
1271 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1272
1273 /* Convert from the sound card time domain to the
1274 * system time domain */
1275 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1276
1277 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1278
1279 /* We don't trust the conversion, so we wake up whatever comes first */
1280 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1281 }
1282 } else if (u->use_tsched)
1283
1284 /* OK, we're in an invalid state, let's disable our timers */
1285 pa_rtpoll_set_timer_disabled(u->rtpoll);
1286
1287 /* Hmm, nothing to do. Let's sleep */
1288 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1289 goto fail;
1290
1291 if (ret == 0)
1292 goto finish;
1293
1294 /* Tell ALSA about this and process its response */
1295 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1296 struct pollfd *pollfd;
1297 int err;
1298 unsigned n;
1299
1300 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1301
1302 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1303 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1304 goto fail;
1305 }
1306
1307 if (revents & ~POLLIN) {
1308 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1309 goto fail;
1310
1311 u->first = TRUE;
1312 } else if (revents && u->use_tsched && pa_log_ratelimit())
1313 pa_log_debug("Wakeup from ALSA!");
1314
1315 } else
1316 revents = 0;
1317 }
1318
1319 fail:
1320 /* If this was no regular exit from the loop we have to continue
1321 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1322 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1323 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1324
1325 finish:
1326 pa_log_debug("Thread shutting down");
1327 }
1328
1329 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1330 const char *n;
1331 char *t;
1332
1333 pa_assert(data);
1334 pa_assert(ma);
1335 pa_assert(device_name);
1336
1337 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1338 pa_source_new_data_set_name(data, n);
1339 data->namereg_fail = TRUE;
1340 return;
1341 }
1342
1343 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1344 data->namereg_fail = TRUE;
1345 else {
1346 n = device_id ? device_id : device_name;
1347 data->namereg_fail = FALSE;
1348 }
1349
1350 if (mapping)
1351 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1352 else
1353 t = pa_sprintf_malloc("alsa_input.%s", n);
1354
1355 pa_source_new_data_set_name(data, t);
1356 pa_xfree(t);
1357 }
1358
1359 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1360
1361 if (!mapping && !element)
1362 return;
1363
1364 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1365 pa_log_info("Failed to find a working mixer device.");
1366 return;
1367 }
1368
1369 if (element) {
1370
1371 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1372 goto fail;
1373
1374 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1375 goto fail;
1376
1377 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1378 pa_alsa_path_dump(u->mixer_path);
1379 } else {
1380
1381 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1382 goto fail;
1383
1384 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1385
1386 pa_log_debug("Probed mixer paths:");
1387 pa_alsa_path_set_dump(u->mixer_path_set);
1388 }
1389
1390 return;
1391
1392 fail:
1393
1394 if (u->mixer_path_set) {
1395 pa_alsa_path_set_free(u->mixer_path_set);
1396 u->mixer_path_set = NULL;
1397 } else if (u->mixer_path) {
1398 pa_alsa_path_free(u->mixer_path);
1399 u->mixer_path = NULL;
1400 }
1401
1402 if (u->mixer_handle) {
1403 snd_mixer_close(u->mixer_handle);
1404 u->mixer_handle = NULL;
1405 }
1406 }
1407
1408 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1409 pa_assert(u);
1410
1411 if (!u->mixer_handle)
1412 return 0;
1413
1414 if (u->source->active_port) {
1415 pa_alsa_port_data *data;
1416
1417 /* We have a list of supported paths, so let's activate the
1418 * one that has been chosen as active */
1419
1420 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1421 u->mixer_path = data->path;
1422
1423 pa_alsa_path_select(data->path, u->mixer_handle);
1424
1425 if (data->setting)
1426 pa_alsa_setting_select(data->setting, u->mixer_handle);
1427
1428 } else {
1429
1430 if (!u->mixer_path && u->mixer_path_set)
1431 u->mixer_path = u->mixer_path_set->paths;
1432
1433 if (u->mixer_path) {
1434 /* Hmm, we have only a single path, then let's activate it */
1435
1436 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1437
1438 if (u->mixer_path->settings)
1439 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1440 } else
1441 return 0;
1442 }
1443
1444 if (!u->mixer_path->has_volume)
1445 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1446 else {
1447
1448 if (u->mixer_path->has_dB) {
1449 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1450
1451 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1452 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1453
1454 if (u->mixer_path->max_dB > 0.0)
1455 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1456 else
1457 pa_log_info("No particular base volume set, fixing to 0 dB");
1458
1459 } else {
1460 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1461 u->source->base_volume = PA_VOLUME_NORM;
1462 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1463 }
1464
1465 u->source->get_volume = source_get_volume_cb;
1466 u->source->set_volume = source_set_volume_cb;
1467
1468 u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SOURCE_DECIBEL_VOLUME : 0);
1469 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1470 }
1471
1472 if (!u->mixer_path->has_mute) {
1473 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1474 } else {
1475 u->source->get_mute = source_get_mute_cb;
1476 u->source->set_mute = source_set_mute_cb;
1477 u->source->flags |= PA_SOURCE_HW_MUTE_CTRL;
1478 pa_log_info("Using hardware mute control.");
1479 }
1480
1481 u->mixer_fdl = pa_alsa_fdlist_new();
1482
1483 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1484 pa_log("Failed to initialize file descriptor monitoring");
1485 return -1;
1486 }
1487
1488 if (u->mixer_path_set)
1489 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1490 else
1491 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1492
1493 return 0;
1494 }
1495
1496 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1497
1498 struct userdata *u = NULL;
1499 const char *dev_id = NULL;
1500 pa_sample_spec ss, requested_ss;
1501 pa_channel_map map;
1502 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1503 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1504 size_t frame_size;
1505 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE;
1506 pa_source_new_data data;
1507 pa_alsa_profile_set *profile_set = NULL;
1508
1509 pa_assert(m);
1510 pa_assert(ma);
1511
1512 ss = m->core->default_sample_spec;
1513 map = m->core->default_channel_map;
1514 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1515 pa_log("Failed to parse sample specification");
1516 goto fail;
1517 }
1518
1519 requested_ss = ss;
1520 frame_size = pa_frame_size(&ss);
1521
1522 nfrags = m->core->default_n_fragments;
1523 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1524 if (frag_size <= 0)
1525 frag_size = (uint32_t) frame_size;
1526 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1527 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1528
1529 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1530 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1531 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1532 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1533 pa_log("Failed to parse buffer metrics");
1534 goto fail;
1535 }
1536
1537 buffer_size = nfrags * frag_size;
1538
1539 period_frames = frag_size/frame_size;
1540 buffer_frames = buffer_size/frame_size;
1541 tsched_frames = tsched_size/frame_size;
1542
1543 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1544 pa_log("Failed to parse mmap argument.");
1545 goto fail;
1546 }
1547
1548 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1549 pa_log("Failed to parse timer_scheduling argument.");
1550 goto fail;
1551 }
1552
1553 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1554 pa_log("Failed to parse ignore_dB argument.");
1555 goto fail;
1556 }
1557
1558 use_tsched = pa_alsa_may_tsched(use_tsched);
1559
1560 u = pa_xnew0(struct userdata, 1);
1561 u->core = m->core;
1562 u->module = m;
1563 u->use_mmap = use_mmap;
1564 u->use_tsched = use_tsched;
1565 u->first = TRUE;
1566 u->rtpoll = pa_rtpoll_new();
1567 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1568
1569 u->smoother = pa_smoother_new(
1570 DEFAULT_TSCHED_BUFFER_USEC*2,
1571 DEFAULT_TSCHED_BUFFER_USEC*2,
1572 TRUE,
1573 TRUE,
1574 5,
1575 pa_rtclock_now(),
1576 TRUE);
1577 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1578
1579 dev_id = pa_modargs_get_value(
1580 ma, "device_id",
1581 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1582
1583 if (reserve_init(u, dev_id) < 0)
1584 goto fail;
1585
1586 if (reserve_monitor_init(u, dev_id) < 0)
1587 goto fail;
1588
1589 b = use_mmap;
1590 d = use_tsched;
1591
1592 if (mapping) {
1593
1594 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1595 pa_log("device_id= not set");
1596 goto fail;
1597 }
1598
1599 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1600 dev_id,
1601 &u->device_name,
1602 &ss, &map,
1603 SND_PCM_STREAM_CAPTURE,
1604 &period_frames, &buffer_frames, tsched_frames,
1605 &b, &d, mapping)))
1606 goto fail;
1607
1608 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1609
1610 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1611 goto fail;
1612
1613 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1614 dev_id,
1615 &u->device_name,
1616 &ss, &map,
1617 SND_PCM_STREAM_CAPTURE,
1618 &period_frames, &buffer_frames, tsched_frames,
1619 &b, &d, profile_set, &mapping)))
1620 goto fail;
1621
1622 } else {
1623
1624 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1625 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1626 &u->device_name,
1627 &ss, &map,
1628 SND_PCM_STREAM_CAPTURE,
1629 &period_frames, &buffer_frames, tsched_frames,
1630 &b, &d, FALSE)))
1631 goto fail;
1632 }
1633
1634 pa_assert(u->device_name);
1635 pa_log_info("Successfully opened device %s.", u->device_name);
1636
1637 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1638 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1639 goto fail;
1640 }
1641
1642 if (mapping)
1643 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1644
1645 if (use_mmap && !b) {
1646 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1647 u->use_mmap = use_mmap = FALSE;
1648 }
1649
1650 if (use_tsched && (!b || !d)) {
1651 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1652 u->use_tsched = use_tsched = FALSE;
1653 }
1654
1655 if (u->use_mmap)
1656 pa_log_info("Successfully enabled mmap() mode.");
1657
1658 if (u->use_tsched)
1659 pa_log_info("Successfully enabled timer-based scheduling mode.");
1660
1661 /* ALSA might tweak the sample spec, so recalculate the frame size */
1662 frame_size = pa_frame_size(&ss);
1663
1664 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1665
1666 pa_source_new_data_init(&data);
1667 data.driver = driver;
1668 data.module = m;
1669 data.card = card;
1670 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1671
1672 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1673 * variable instead of using &data.namereg_fail directly, because
1674 * data.namereg_fail is a bitfield and taking the address of a bitfield
1675 * variable is impossible. */
1676 namereg_fail = data.namereg_fail;
1677 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1678 pa_log("Failed to parse boolean argument namereg_fail.");
1679 pa_source_new_data_done(&data);
1680 goto fail;
1681 }
1682 data.namereg_fail = namereg_fail;
1683
1684 pa_source_new_data_set_sample_spec(&data, &ss);
1685 pa_source_new_data_set_channel_map(&data, &map);
1686
1687 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1688 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1689 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1690 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1691 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1692
1693 if (mapping) {
1694 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1695 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1696 }
1697
1698 pa_alsa_init_description(data.proplist);
1699
1700 if (u->control_device)
1701 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1702
1703 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1704 pa_log("Invalid properties");
1705 pa_source_new_data_done(&data);
1706 goto fail;
1707 }
1708
1709 if (u->mixer_path_set)
1710 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1711
1712 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1713 pa_source_new_data_done(&data);
1714
1715 if (!u->source) {
1716 pa_log("Failed to create source object");
1717 goto fail;
1718 }
1719
1720 u->source->parent.process_msg = source_process_msg;
1721 if (u->use_tsched)
1722 u->source->update_requested_latency = source_update_requested_latency_cb;
1723 u->source->set_state = source_set_state_cb;
1724 u->source->set_port = source_set_port_cb;
1725 u->source->userdata = u;
1726
1727 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1728 pa_source_set_rtpoll(u->source, u->rtpoll);
1729
1730 u->frame_size = frame_size;
1731 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1732 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1733 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1734
1735 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1736 (double) u->hwbuf_size / (double) u->fragment_size,
1737 (long unsigned) u->fragment_size,
1738 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1739 (long unsigned) u->hwbuf_size,
1740 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1741
1742 if (u->use_tsched) {
1743 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1744
1745 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1746 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1747
1748 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1749 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1750
1751 fix_min_sleep_wakeup(u);
1752 fix_tsched_watermark(u);
1753
1754 pa_source_set_latency_range(u->source,
1755 0,
1756 pa_bytes_to_usec(u->hwbuf_size, &ss));
1757
1758 pa_log_info("Time scheduling watermark is %0.2fms",
1759 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1760 } else
1761 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1762
1763 reserve_update(u);
1764
1765 if (update_sw_params(u) < 0)
1766 goto fail;
1767
1768 if (setup_mixer(u, ignore_dB) < 0)
1769 goto fail;
1770
1771 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1772
1773 if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1774 pa_log("Failed to create thread.");
1775 goto fail;
1776 }
1777 /* Get initial mixer settings */
1778 if (data.volume_is_set) {
1779 if (u->source->set_volume)
1780 u->source->set_volume(u->source);
1781 } else {
1782 if (u->source->get_volume)
1783 u->source->get_volume(u->source);
1784 }
1785
1786 if (data.muted_is_set) {
1787 if (u->source->set_mute)
1788 u->source->set_mute(u->source);
1789 } else {
1790 if (u->source->get_mute)
1791 u->source->get_mute(u->source);
1792 }
1793
1794 pa_source_put(u->source);
1795
1796 if (profile_set)
1797 pa_alsa_profile_set_free(profile_set);
1798
1799 return u->source;
1800
1801 fail:
1802
1803 if (u)
1804 userdata_free(u);
1805
1806 if (profile_set)
1807 pa_alsa_profile_set_free(profile_set);
1808
1809 return NULL;
1810 }
1811
1812 static void userdata_free(struct userdata *u) {
1813 pa_assert(u);
1814
1815 if (u->source)
1816 pa_source_unlink(u->source);
1817
1818 if (u->thread) {
1819 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1820 pa_thread_free(u->thread);
1821 }
1822
1823 pa_thread_mq_done(&u->thread_mq);
1824
1825 if (u->source)
1826 pa_source_unref(u->source);
1827
1828 if (u->alsa_rtpoll_item)
1829 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1830
1831 if (u->rtpoll)
1832 pa_rtpoll_free(u->rtpoll);
1833
1834 if (u->pcm_handle) {
1835 snd_pcm_drop(u->pcm_handle);
1836 snd_pcm_close(u->pcm_handle);
1837 }
1838
1839 if (u->mixer_fdl)
1840 pa_alsa_fdlist_free(u->mixer_fdl);
1841
1842 if (u->mixer_path_set)
1843 pa_alsa_path_set_free(u->mixer_path_set);
1844 else if (u->mixer_path)
1845 pa_alsa_path_free(u->mixer_path);
1846
1847 if (u->mixer_handle)
1848 snd_mixer_close(u->mixer_handle);
1849
1850 if (u->smoother)
1851 pa_smoother_free(u->smoother);
1852
1853 reserve_done(u);
1854 monitor_done(u);
1855
1856 pa_xfree(u->device_name);
1857 pa_xfree(u->control_device);
1858 pa_xfree(u);
1859 }
1860
1861 void pa_alsa_source_free(pa_source *s) {
1862 struct userdata *u;
1863
1864 pa_source_assert_ref(s);
1865 pa_assert_se(u = s->userdata);
1866
1867 userdata_free(u);
1868 }