]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
Merge commit 'origin/master-tx'
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core-error.h>
38 #include <pulsecore/core.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/time-smoother.h>
53
54 #include <modules/reserve-wrap.h>
55
56 #include "alsa-util.h"
57 #include "alsa-source.h"
58
59 /* #define DEBUG_TIMING */
60
61 #define DEFAULT_DEVICE "default"
62
63 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
64 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
65
66 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
67 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
68 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
69 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
70 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
71 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72
73 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
74 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
75
76 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
77 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
78
79 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
80
81 struct userdata {
82 pa_core *core;
83 pa_module *module;
84 pa_source *source;
85
86 pa_thread *thread;
87 pa_thread_mq thread_mq;
88 pa_rtpoll *rtpoll;
89
90 snd_pcm_t *pcm_handle;
91
92 pa_alsa_fdlist *mixer_fdl;
93 snd_mixer_t *mixer_handle;
94 pa_alsa_path_set *mixer_path_set;
95 pa_alsa_path *mixer_path;
96
97 pa_cvolume hardware_volume;
98
99 size_t
100 frame_size,
101 fragment_size,
102 hwbuf_size,
103 tsched_watermark,
104 hwbuf_unused,
105 min_sleep,
106 min_wakeup,
107 watermark_inc_step,
108 watermark_dec_step,
109 watermark_inc_threshold,
110 watermark_dec_threshold;
111
112 pa_usec_t watermark_dec_not_before;
113
114 unsigned nfragments;
115
116 char *device_name;
117 char *control_device;
118
119 pa_bool_t use_mmap:1, use_tsched:1;
120
121 pa_rtpoll_item *alsa_rtpoll_item;
122
123 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
124
125 pa_smoother *smoother;
126 uint64_t read_count;
127 pa_usec_t smoother_interval;
128 pa_usec_t last_smoother_update;
129
130 pa_reserve_wrapper *reserve;
131 pa_hook_slot *reserve_slot;
132 pa_reserve_monitor_wrapper *monitor;
133 pa_hook_slot *monitor_slot;
134 };
135
136 static void userdata_free(struct userdata *u);
137
138 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
139 pa_assert(r);
140 pa_assert(u);
141
142 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
143 return PA_HOOK_CANCEL;
144
145 return PA_HOOK_OK;
146 }
147
148 static void reserve_done(struct userdata *u) {
149 pa_assert(u);
150
151 if (u->reserve_slot) {
152 pa_hook_slot_free(u->reserve_slot);
153 u->reserve_slot = NULL;
154 }
155
156 if (u->reserve) {
157 pa_reserve_wrapper_unref(u->reserve);
158 u->reserve = NULL;
159 }
160 }
161
162 static void reserve_update(struct userdata *u) {
163 const char *description;
164 pa_assert(u);
165
166 if (!u->source || !u->reserve)
167 return;
168
169 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
170 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
171 }
172
173 static int reserve_init(struct userdata *u, const char *dname) {
174 char *rname;
175
176 pa_assert(u);
177 pa_assert(dname);
178
179 if (u->reserve)
180 return 0;
181
182 if (pa_in_system_mode())
183 return 0;
184
185 /* We are resuming, try to lock the device */
186 if (!(rname = pa_alsa_get_reserve_name(dname)))
187 return 0;
188
189 u->reserve = pa_reserve_wrapper_get(u->core, rname);
190 pa_xfree(rname);
191
192 if (!(u->reserve))
193 return -1;
194
195 reserve_update(u);
196
197 pa_assert(!u->reserve_slot);
198 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
199
200 return 0;
201 }
202
203 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
204 pa_bool_t b;
205
206 pa_assert(w);
207 pa_assert(u);
208
209 b = PA_PTR_TO_UINT(busy) && !u->reserve;
210
211 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
212 return PA_HOOK_OK;
213 }
214
215 static void monitor_done(struct userdata *u) {
216 pa_assert(u);
217
218 if (u->monitor_slot) {
219 pa_hook_slot_free(u->monitor_slot);
220 u->monitor_slot = NULL;
221 }
222
223 if (u->monitor) {
224 pa_reserve_monitor_wrapper_unref(u->monitor);
225 u->monitor = NULL;
226 }
227 }
228
229 static int reserve_monitor_init(struct userdata *u, const char *dname) {
230 char *rname;
231
232 pa_assert(u);
233 pa_assert(dname);
234
235 if (pa_in_system_mode())
236 return 0;
237
238 /* We are resuming, try to lock the device */
239 if (!(rname = pa_alsa_get_reserve_name(dname)))
240 return 0;
241
242 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
243 pa_xfree(rname);
244
245 if (!(u->monitor))
246 return -1;
247
248 pa_assert(!u->monitor_slot);
249 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
250
251 return 0;
252 }
253
254 static void fix_min_sleep_wakeup(struct userdata *u) {
255 size_t max_use, max_use_2;
256 pa_assert(u);
257 pa_assert(u->use_tsched);
258
259 max_use = u->hwbuf_size - u->hwbuf_unused;
260 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
261
262 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
263 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
264
265 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
266 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
267 }
268
269 static void fix_tsched_watermark(struct userdata *u) {
270 size_t max_use;
271 pa_assert(u);
272 pa_assert(u->use_tsched);
273
274 max_use = u->hwbuf_size - u->hwbuf_unused;
275
276 if (u->tsched_watermark > max_use - u->min_sleep)
277 u->tsched_watermark = max_use - u->min_sleep;
278
279 if (u->tsched_watermark < u->min_wakeup)
280 u->tsched_watermark = u->min_wakeup;
281 }
282
283 static void increase_watermark(struct userdata *u) {
284 size_t old_watermark;
285 pa_usec_t old_min_latency, new_min_latency;
286
287 pa_assert(u);
288 pa_assert(u->use_tsched);
289
290 /* First, just try to increase the watermark */
291 old_watermark = u->tsched_watermark;
292 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
293 fix_tsched_watermark(u);
294
295 if (old_watermark != u->tsched_watermark) {
296 pa_log_info("Increasing wakeup watermark to %0.2f ms",
297 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
298 return;
299 }
300
301 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
302 old_min_latency = u->source->thread_info.min_latency;
303 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
304 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
305
306 if (old_min_latency != new_min_latency) {
307 pa_log_info("Increasing minimal latency to %0.2f ms",
308 (double) new_min_latency / PA_USEC_PER_MSEC);
309
310 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
311 }
312
313 /* When we reach this we're officialy fucked! */
314 }
315
316 static void decrease_watermark(struct userdata *u) {
317 size_t old_watermark;
318 pa_usec_t now;
319
320 pa_assert(u);
321 pa_assert(u->use_tsched);
322
323 now = pa_rtclock_now();
324
325 if (u->watermark_dec_not_before <= 0)
326 goto restart;
327
328 if (u->watermark_dec_not_before > now)
329 return;
330
331 old_watermark = u->tsched_watermark;
332
333 if (u->tsched_watermark < u->watermark_dec_step)
334 u->tsched_watermark = u->tsched_watermark / 2;
335 else
336 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
337
338 fix_tsched_watermark(u);
339
340 if (old_watermark != u->tsched_watermark)
341 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
342 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
343
344 /* We don't change the latency range*/
345
346 restart:
347 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
348 }
349
350 static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
351 pa_usec_t wm, usec;
352
353 pa_assert(sleep_usec);
354 pa_assert(process_usec);
355
356 pa_assert(u);
357 pa_assert(u->use_tsched);
358
359 usec = pa_source_get_requested_latency_within_thread(u->source);
360
361 if (usec == (pa_usec_t) -1)
362 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
363
364 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
365
366 if (wm > usec)
367 wm = usec/2;
368
369 *sleep_usec = usec - wm;
370 *process_usec = wm;
371
372 #ifdef DEBUG_TIMING
373 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
374 (unsigned long) (usec / PA_USEC_PER_MSEC),
375 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
376 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
377 #endif
378
379 return usec;
380 }
381
382 static int try_recover(struct userdata *u, const char *call, int err) {
383 pa_assert(u);
384 pa_assert(call);
385 pa_assert(err < 0);
386
387 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
388
389 pa_assert(err != -EAGAIN);
390
391 if (err == -EPIPE)
392 pa_log_debug("%s: Buffer overrun!", call);
393
394 if (err == -ESTRPIPE)
395 pa_log_debug("%s: System suspended!", call);
396
397 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
398 pa_log("%s: %s", call, pa_alsa_strerror(err));
399 return -1;
400 }
401
402 snd_pcm_start(u->pcm_handle);
403 return 0;
404 }
405
406 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
407 size_t left_to_record;
408 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
409 pa_bool_t overrun = FALSE;
410
411 /* We use <= instead of < for this check here because an overrun
412 * only happens after the last sample was processed, not already when
413 * it is removed from the buffer. This is particularly important
414 * when block transfer is used. */
415
416 if (n_bytes <= rec_space)
417 left_to_record = rec_space - n_bytes;
418 else {
419
420 /* We got a dropout. What a mess! */
421 left_to_record = 0;
422 overrun = TRUE;
423
424 #ifdef DEBUG_TIMING
425 PA_DEBUG_TRAP;
426 #endif
427
428 if (pa_log_ratelimit())
429 pa_log_info("Overrun!");
430 }
431
432 #ifdef DEBUG_TIMING
433 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
434 #endif
435
436 if (u->use_tsched) {
437 pa_bool_t reset_not_before = TRUE;
438
439 if (overrun || left_to_record < u->watermark_inc_threshold)
440 increase_watermark(u);
441 else if (left_to_record > u->watermark_dec_threshold) {
442 reset_not_before = FALSE;
443
444 /* We decrease the watermark only if have actually been
445 * woken up by a timeout. If something else woke us up
446 * it's too easy to fulfill the deadlines... */
447
448 if (on_timeout)
449 decrease_watermark(u);
450 }
451
452 if (reset_not_before)
453 u->watermark_dec_not_before = 0;
454 }
455
456 return left_to_record;
457 }
458
459 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
460 pa_bool_t work_done = FALSE;
461 pa_usec_t max_sleep_usec = 0, process_usec = 0;
462 size_t left_to_record;
463 unsigned j = 0;
464
465 pa_assert(u);
466 pa_source_assert_ref(u->source);
467
468 if (u->use_tsched)
469 hw_sleep_time(u, &max_sleep_usec, &process_usec);
470
471 for (;;) {
472 snd_pcm_sframes_t n;
473 size_t n_bytes;
474 int r;
475 pa_bool_t after_avail = TRUE;
476
477 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
478
479 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
480 continue;
481
482 return r;
483 }
484
485 n_bytes = (size_t) n * u->frame_size;
486
487 #ifdef DEBUG_TIMING
488 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
489 #endif
490
491 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
492 on_timeout = FALSE;
493
494 if (u->use_tsched)
495 if (!polled &&
496 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
497 #ifdef DEBUG_TIMING
498 pa_log_debug("Not reading, because too early.");
499 #endif
500 break;
501 }
502
503 if (PA_UNLIKELY(n_bytes <= 0)) {
504
505 if (polled)
506 PA_ONCE_BEGIN {
507 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
508 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
509 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
510 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
511 pa_strnull(dn));
512 pa_xfree(dn);
513 } PA_ONCE_END;
514
515 #ifdef DEBUG_TIMING
516 pa_log_debug("Not reading, because not necessary.");
517 #endif
518 break;
519 }
520
521 if (++j > 10) {
522 #ifdef DEBUG_TIMING
523 pa_log_debug("Not filling up, because already too many iterations.");
524 #endif
525
526 break;
527 }
528
529 polled = FALSE;
530
531 #ifdef DEBUG_TIMING
532 pa_log_debug("Reading");
533 #endif
534
535 for (;;) {
536 int err;
537 const snd_pcm_channel_area_t *areas;
538 snd_pcm_uframes_t offset, frames;
539 pa_memchunk chunk;
540 void *p;
541 snd_pcm_sframes_t sframes;
542
543 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
544
545 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
546
547 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
548
549 if (!after_avail && err == -EAGAIN)
550 break;
551
552 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
553 continue;
554
555 return r;
556 }
557
558 /* Make sure that if these memblocks need to be copied they will fit into one slot */
559 if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
560 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
561
562 if (!after_avail && frames == 0)
563 break;
564
565 pa_assert(frames > 0);
566 after_avail = FALSE;
567
568 /* Check these are multiples of 8 bit */
569 pa_assert((areas[0].first & 7) == 0);
570 pa_assert((areas[0].step & 7)== 0);
571
572 /* We assume a single interleaved memory buffer */
573 pa_assert((areas[0].first >> 3) == 0);
574 pa_assert((areas[0].step >> 3) == u->frame_size);
575
576 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
577
578 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
579 chunk.length = pa_memblock_get_length(chunk.memblock);
580 chunk.index = 0;
581
582 pa_source_post(u->source, &chunk);
583 pa_memblock_unref_fixed(chunk.memblock);
584
585 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
586
587 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
588 continue;
589
590 return r;
591 }
592
593 work_done = TRUE;
594
595 u->read_count += frames * u->frame_size;
596
597 #ifdef DEBUG_TIMING
598 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
599 #endif
600
601 if ((size_t) frames * u->frame_size >= n_bytes)
602 break;
603
604 n_bytes -= (size_t) frames * u->frame_size;
605 }
606 }
607
608 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
609
610 if (*sleep_usec > process_usec)
611 *sleep_usec -= process_usec;
612 else
613 *sleep_usec = 0;
614
615 return work_done ? 1 : 0;
616 }
617
618 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
619 int work_done = FALSE;
620 pa_usec_t max_sleep_usec = 0, process_usec = 0;
621 size_t left_to_record;
622 unsigned j = 0;
623
624 pa_assert(u);
625 pa_source_assert_ref(u->source);
626
627 if (u->use_tsched)
628 hw_sleep_time(u, &max_sleep_usec, &process_usec);
629
630 for (;;) {
631 snd_pcm_sframes_t n;
632 size_t n_bytes;
633 int r;
634 pa_bool_t after_avail = TRUE;
635
636 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
637
638 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
639 continue;
640
641 return r;
642 }
643
644 n_bytes = (size_t) n * u->frame_size;
645 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
646 on_timeout = FALSE;
647
648 if (u->use_tsched)
649 if (!polled &&
650 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
651 break;
652
653 if (PA_UNLIKELY(n_bytes <= 0)) {
654
655 if (polled)
656 PA_ONCE_BEGIN {
657 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
658 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
659 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
660 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
661 pa_strnull(dn));
662 pa_xfree(dn);
663 } PA_ONCE_END;
664
665 break;
666 }
667
668 if (++j > 10) {
669 #ifdef DEBUG_TIMING
670 pa_log_debug("Not filling up, because already too many iterations.");
671 #endif
672
673 break;
674 }
675
676 polled = FALSE;
677
678 for (;;) {
679 void *p;
680 snd_pcm_sframes_t frames;
681 pa_memchunk chunk;
682
683 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
684
685 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
686
687 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
688 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
689
690 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
691
692 p = pa_memblock_acquire(chunk.memblock);
693 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
694 pa_memblock_release(chunk.memblock);
695
696 if (PA_UNLIKELY(frames < 0)) {
697 pa_memblock_unref(chunk.memblock);
698
699 if (!after_avail && (int) frames == -EAGAIN)
700 break;
701
702 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
703 continue;
704
705 return r;
706 }
707
708 if (!after_avail && frames == 0) {
709 pa_memblock_unref(chunk.memblock);
710 break;
711 }
712
713 pa_assert(frames > 0);
714 after_avail = FALSE;
715
716 chunk.index = 0;
717 chunk.length = (size_t) frames * u->frame_size;
718
719 pa_source_post(u->source, &chunk);
720 pa_memblock_unref(chunk.memblock);
721
722 work_done = TRUE;
723
724 u->read_count += frames * u->frame_size;
725
726 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
727
728 if ((size_t) frames * u->frame_size >= n_bytes)
729 break;
730
731 n_bytes -= (size_t) frames * u->frame_size;
732 }
733 }
734
735 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
736
737 if (*sleep_usec > process_usec)
738 *sleep_usec -= process_usec;
739 else
740 *sleep_usec = 0;
741
742 return work_done ? 1 : 0;
743 }
744
745 static void update_smoother(struct userdata *u) {
746 snd_pcm_sframes_t delay = 0;
747 uint64_t position;
748 int err;
749 pa_usec_t now1 = 0, now2;
750 snd_pcm_status_t *status;
751
752 snd_pcm_status_alloca(&status);
753
754 pa_assert(u);
755 pa_assert(u->pcm_handle);
756
757 /* Let's update the time smoother */
758
759 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
760 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
761 return;
762 }
763
764 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
765 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
766 else {
767 snd_htimestamp_t htstamp = { 0, 0 };
768 snd_pcm_status_get_htstamp(status, &htstamp);
769 now1 = pa_timespec_load(&htstamp);
770 }
771
772 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
773 if (now1 <= 0)
774 now1 = pa_rtclock_now();
775
776 /* check if the time since the last update is bigger than the interval */
777 if (u->last_smoother_update > 0)
778 if (u->last_smoother_update + u->smoother_interval > now1)
779 return;
780
781 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
782 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
783
784 pa_smoother_put(u->smoother, now1, now2);
785
786 u->last_smoother_update = now1;
787 /* exponentially increase the update interval up to the MAX limit */
788 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
789 }
790
791 static pa_usec_t source_get_latency(struct userdata *u) {
792 int64_t delay;
793 pa_usec_t now1, now2;
794
795 pa_assert(u);
796
797 now1 = pa_rtclock_now();
798 now2 = pa_smoother_get(u->smoother, now1);
799
800 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
801
802 return delay >= 0 ? (pa_usec_t) delay : 0;
803 }
804
805 static int build_pollfd(struct userdata *u) {
806 pa_assert(u);
807 pa_assert(u->pcm_handle);
808
809 if (u->alsa_rtpoll_item)
810 pa_rtpoll_item_free(u->alsa_rtpoll_item);
811
812 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
813 return -1;
814
815 return 0;
816 }
817
818 static int suspend(struct userdata *u) {
819 pa_assert(u);
820 pa_assert(u->pcm_handle);
821
822 pa_smoother_pause(u->smoother, pa_rtclock_now());
823
824 /* Let's suspend */
825 snd_pcm_close(u->pcm_handle);
826 u->pcm_handle = NULL;
827
828 if (u->alsa_rtpoll_item) {
829 pa_rtpoll_item_free(u->alsa_rtpoll_item);
830 u->alsa_rtpoll_item = NULL;
831 }
832
833 pa_log_info("Device suspended...");
834
835 return 0;
836 }
837
838 static int update_sw_params(struct userdata *u) {
839 snd_pcm_uframes_t avail_min;
840 int err;
841
842 pa_assert(u);
843
844 /* Use the full buffer if noone asked us for anything specific */
845 u->hwbuf_unused = 0;
846
847 if (u->use_tsched) {
848 pa_usec_t latency;
849
850 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
851 size_t b;
852
853 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
854
855 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
856
857 /* We need at least one sample in our buffer */
858
859 if (PA_UNLIKELY(b < u->frame_size))
860 b = u->frame_size;
861
862 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
863 }
864
865 fix_min_sleep_wakeup(u);
866 fix_tsched_watermark(u);
867 }
868
869 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
870
871 avail_min = 1;
872
873 if (u->use_tsched) {
874 pa_usec_t sleep_usec, process_usec;
875
876 hw_sleep_time(u, &sleep_usec, &process_usec);
877 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
878 }
879
880 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
881
882 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
883 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
884 return err;
885 }
886
887 return 0;
888 }
889
890 static int unsuspend(struct userdata *u) {
891 pa_sample_spec ss;
892 int err;
893 pa_bool_t b, d;
894 unsigned nfrags;
895 snd_pcm_uframes_t period_size;
896
897 pa_assert(u);
898 pa_assert(!u->pcm_handle);
899
900 pa_log_info("Trying resume...");
901
902 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
903 /*SND_PCM_NONBLOCK|*/
904 SND_PCM_NO_AUTO_RESAMPLE|
905 SND_PCM_NO_AUTO_CHANNELS|
906 SND_PCM_NO_AUTO_FORMAT)) < 0) {
907 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
908 goto fail;
909 }
910
911 ss = u->source->sample_spec;
912 nfrags = u->nfragments;
913 period_size = u->fragment_size / u->frame_size;
914 b = u->use_mmap;
915 d = u->use_tsched;
916
917 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
918 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
919 goto fail;
920 }
921
922 if (b != u->use_mmap || d != u->use_tsched) {
923 pa_log_warn("Resume failed, couldn't get original access mode.");
924 goto fail;
925 }
926
927 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
928 pa_log_warn("Resume failed, couldn't restore original sample settings.");
929 goto fail;
930 }
931
932 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
933 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
934 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
935 (unsigned long) nfrags, period_size * u->frame_size);
936 goto fail;
937 }
938
939 if (update_sw_params(u) < 0)
940 goto fail;
941
942 if (build_pollfd(u) < 0)
943 goto fail;
944
945 /* FIXME: We need to reload the volume somehow */
946
947 snd_pcm_start(u->pcm_handle);
948
949 u->read_count = 0;
950 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
951 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
952 u->last_smoother_update = 0;
953
954 pa_log_info("Resumed successfully...");
955
956 return 0;
957
958 fail:
959 if (u->pcm_handle) {
960 snd_pcm_close(u->pcm_handle);
961 u->pcm_handle = NULL;
962 }
963
964 return -1;
965 }
966
967 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
968 struct userdata *u = PA_SOURCE(o)->userdata;
969
970 switch (code) {
971
972 case PA_SOURCE_MESSAGE_GET_LATENCY: {
973 pa_usec_t r = 0;
974
975 if (u->pcm_handle)
976 r = source_get_latency(u);
977
978 *((pa_usec_t*) data) = r;
979
980 return 0;
981 }
982
983 case PA_SOURCE_MESSAGE_SET_STATE:
984
985 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
986
987 case PA_SOURCE_SUSPENDED:
988 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
989
990 if (suspend(u) < 0)
991 return -1;
992
993 break;
994
995 case PA_SOURCE_IDLE:
996 case PA_SOURCE_RUNNING:
997
998 if (u->source->thread_info.state == PA_SOURCE_INIT) {
999 if (build_pollfd(u) < 0)
1000 return -1;
1001
1002 snd_pcm_start(u->pcm_handle);
1003 }
1004
1005 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1006 if (unsuspend(u) < 0)
1007 return -1;
1008 }
1009
1010 break;
1011
1012 case PA_SOURCE_UNLINKED:
1013 case PA_SOURCE_INIT:
1014 case PA_SOURCE_INVALID_STATE:
1015 ;
1016 }
1017
1018 break;
1019 }
1020
1021 return pa_source_process_msg(o, code, data, offset, chunk);
1022 }
1023
1024 /* Called from main context */
1025 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1026 pa_source_state_t old_state;
1027 struct userdata *u;
1028
1029 pa_source_assert_ref(s);
1030 pa_assert_se(u = s->userdata);
1031
1032 old_state = pa_source_get_state(u->source);
1033
1034 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1035 reserve_done(u);
1036 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1037 if (reserve_init(u, u->device_name) < 0)
1038 return -1;
1039
1040 return 0;
1041 }
1042
1043 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1044 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1045
1046 pa_assert(u);
1047 pa_assert(u->mixer_handle);
1048
1049 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1050 return 0;
1051
1052 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1053 pa_source_get_volume(u->source, TRUE);
1054 pa_source_get_mute(u->source, TRUE);
1055 }
1056
1057 return 0;
1058 }
1059
1060 static void source_get_volume_cb(pa_source *s) {
1061 struct userdata *u = s->userdata;
1062 pa_cvolume r;
1063 char t[PA_CVOLUME_SNPRINT_MAX];
1064
1065 pa_assert(u);
1066 pa_assert(u->mixer_path);
1067 pa_assert(u->mixer_handle);
1068
1069 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1070 return;
1071
1072 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1073 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1074
1075 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1076
1077 if (pa_cvolume_equal(&u->hardware_volume, &r))
1078 return;
1079
1080 s->volume = u->hardware_volume = r;
1081
1082 /* Hmm, so the hardware volume changed, let's reset our software volume */
1083 if (u->mixer_path->has_dB)
1084 pa_source_set_soft_volume(s, NULL);
1085 }
1086
1087 static void source_set_volume_cb(pa_source *s) {
1088 struct userdata *u = s->userdata;
1089 pa_cvolume r;
1090 char t[PA_CVOLUME_SNPRINT_MAX];
1091
1092 pa_assert(u);
1093 pa_assert(u->mixer_path);
1094 pa_assert(u->mixer_handle);
1095
1096 /* Shift up by the base volume */
1097 pa_sw_cvolume_divide_scalar(&r, &s->volume, s->base_volume);
1098
1099 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1100 return;
1101
1102 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1103 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1104
1105 u->hardware_volume = r;
1106
1107 if (u->mixer_path->has_dB) {
1108 pa_cvolume new_soft_volume;
1109 pa_bool_t accurate_enough;
1110
1111 /* Match exactly what the user requested by software */
1112 pa_sw_cvolume_divide(&new_soft_volume, &s->volume, &u->hardware_volume);
1113
1114 /* If the adjustment to do in software is only minimal we
1115 * can skip it. That saves us CPU at the expense of a bit of
1116 * accuracy */
1117 accurate_enough =
1118 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1119 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1120
1121 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
1122 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1123 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1124 pa_yes_no(accurate_enough));
1125
1126 if (!accurate_enough)
1127 s->soft_volume = new_soft_volume;
1128
1129 } else {
1130 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1131
1132 /* We can't match exactly what the user requested, hence let's
1133 * at least tell the user about it */
1134
1135 s->volume = r;
1136 }
1137 }
1138
1139 static void source_get_mute_cb(pa_source *s) {
1140 struct userdata *u = s->userdata;
1141 pa_bool_t b;
1142
1143 pa_assert(u);
1144 pa_assert(u->mixer_path);
1145 pa_assert(u->mixer_handle);
1146
1147 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1148 return;
1149
1150 s->muted = b;
1151 }
1152
1153 static void source_set_mute_cb(pa_source *s) {
1154 struct userdata *u = s->userdata;
1155
1156 pa_assert(u);
1157 pa_assert(u->mixer_path);
1158 pa_assert(u->mixer_handle);
1159
1160 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1161 }
1162
1163 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1164 struct userdata *u = s->userdata;
1165 pa_alsa_port_data *data;
1166
1167 pa_assert(u);
1168 pa_assert(p);
1169 pa_assert(u->mixer_handle);
1170
1171 data = PA_DEVICE_PORT_DATA(p);
1172
1173 pa_assert_se(u->mixer_path = data->path);
1174 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1175
1176 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1177 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1178 s->n_volume_steps = PA_VOLUME_NORM+1;
1179
1180 if (u->mixer_path->max_dB > 0.0)
1181 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1182 else
1183 pa_log_info("No particular base volume set, fixing to 0 dB");
1184 } else {
1185 s->base_volume = PA_VOLUME_NORM;
1186 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1187 }
1188
1189 if (data->setting)
1190 pa_alsa_setting_select(data->setting, u->mixer_handle);
1191
1192 if (s->set_mute)
1193 s->set_mute(s);
1194 if (s->set_volume)
1195 s->set_volume(s);
1196
1197 return 0;
1198 }
1199
1200 static void source_update_requested_latency_cb(pa_source *s) {
1201 struct userdata *u = s->userdata;
1202 pa_assert(u);
1203
1204 if (!u->pcm_handle)
1205 return;
1206
1207 update_sw_params(u);
1208 }
1209
1210 static void thread_func(void *userdata) {
1211 struct userdata *u = userdata;
1212 unsigned short revents = 0;
1213
1214 pa_assert(u);
1215
1216 pa_log_debug("Thread starting up");
1217
1218 if (u->core->realtime_scheduling)
1219 pa_make_realtime(u->core->realtime_priority);
1220
1221 pa_thread_mq_install(&u->thread_mq);
1222
1223 for (;;) {
1224 int ret;
1225
1226 #ifdef DEBUG_TIMING
1227 pa_log_debug("Loop");
1228 #endif
1229
1230 /* Read some data and pass it to the sources */
1231 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1232 int work_done;
1233 pa_usec_t sleep_usec = 0;
1234 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1235
1236 if (u->use_mmap)
1237 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1238 else
1239 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1240
1241 if (work_done < 0)
1242 goto fail;
1243
1244 /* pa_log_debug("work_done = %i", work_done); */
1245
1246 if (work_done)
1247 update_smoother(u);
1248
1249 if (u->use_tsched) {
1250 pa_usec_t cusec;
1251
1252 /* OK, the capture buffer is now empty, let's
1253 * calculate when to wake up next */
1254
1255 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1256
1257 /* Convert from the sound card time domain to the
1258 * system time domain */
1259 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1260
1261 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1262
1263 /* We don't trust the conversion, so we wake up whatever comes first */
1264 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1265 }
1266 } else if (u->use_tsched)
1267
1268 /* OK, we're in an invalid state, let's disable our timers */
1269 pa_rtpoll_set_timer_disabled(u->rtpoll);
1270
1271 /* Hmm, nothing to do. Let's sleep */
1272 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1273 goto fail;
1274
1275 if (ret == 0)
1276 goto finish;
1277
1278 /* Tell ALSA about this and process its response */
1279 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1280 struct pollfd *pollfd;
1281 int err;
1282 unsigned n;
1283
1284 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1285
1286 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1287 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1288 goto fail;
1289 }
1290
1291 if (revents & ~POLLIN) {
1292 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1293 goto fail;
1294
1295 snd_pcm_start(u->pcm_handle);
1296 } else if (revents && u->use_tsched && pa_log_ratelimit())
1297 pa_log_debug("Wakeup from ALSA!");
1298
1299 } else
1300 revents = 0;
1301 }
1302
1303 fail:
1304 /* If this was no regular exit from the loop we have to continue
1305 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1306 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1307 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1308
1309 finish:
1310 pa_log_debug("Thread shutting down");
1311 }
1312
1313 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1314 const char *n;
1315 char *t;
1316
1317 pa_assert(data);
1318 pa_assert(ma);
1319 pa_assert(device_name);
1320
1321 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1322 pa_source_new_data_set_name(data, n);
1323 data->namereg_fail = TRUE;
1324 return;
1325 }
1326
1327 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1328 data->namereg_fail = TRUE;
1329 else {
1330 n = device_id ? device_id : device_name;
1331 data->namereg_fail = FALSE;
1332 }
1333
1334 if (mapping)
1335 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1336 else
1337 t = pa_sprintf_malloc("alsa_input.%s", n);
1338
1339 pa_source_new_data_set_name(data, t);
1340 pa_xfree(t);
1341 }
1342
1343 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1344
1345 if (!mapping && !element)
1346 return;
1347
1348 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1349 pa_log_info("Failed to find a working mixer device.");
1350 return;
1351 }
1352
1353 if (element) {
1354
1355 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1356 goto fail;
1357
1358 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1359 goto fail;
1360
1361 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1362 pa_alsa_path_dump(u->mixer_path);
1363 } else {
1364
1365 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1366 goto fail;
1367
1368 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1369
1370 pa_log_debug("Probed mixer paths:");
1371 pa_alsa_path_set_dump(u->mixer_path_set);
1372 }
1373
1374 return;
1375
1376 fail:
1377
1378 if (u->mixer_path_set) {
1379 pa_alsa_path_set_free(u->mixer_path_set);
1380 u->mixer_path_set = NULL;
1381 } else if (u->mixer_path) {
1382 pa_alsa_path_free(u->mixer_path);
1383 u->mixer_path = NULL;
1384 }
1385
1386 if (u->mixer_handle) {
1387 snd_mixer_close(u->mixer_handle);
1388 u->mixer_handle = NULL;
1389 }
1390 }
1391
1392 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1393 pa_assert(u);
1394
1395 if (!u->mixer_handle)
1396 return 0;
1397
1398 if (u->source->active_port) {
1399 pa_alsa_port_data *data;
1400
1401 /* We have a list of supported paths, so let's activate the
1402 * one that has been chosen as active */
1403
1404 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1405 u->mixer_path = data->path;
1406
1407 pa_alsa_path_select(data->path, u->mixer_handle);
1408
1409 if (data->setting)
1410 pa_alsa_setting_select(data->setting, u->mixer_handle);
1411
1412 } else {
1413
1414 if (!u->mixer_path && u->mixer_path_set)
1415 u->mixer_path = u->mixer_path_set->paths;
1416
1417 if (u->mixer_path) {
1418 /* Hmm, we have only a single path, then let's activate it */
1419
1420 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1421
1422 if (u->mixer_path->settings)
1423 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1424 } else
1425 return 0;
1426 }
1427
1428 if (!u->mixer_path->has_volume)
1429 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1430 else {
1431
1432 if (u->mixer_path->has_dB) {
1433 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1434
1435 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1436 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1437
1438 if (u->mixer_path->max_dB > 0.0)
1439 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1440 else
1441 pa_log_info("No particular base volume set, fixing to 0 dB");
1442
1443 } else {
1444 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1445 u->source->base_volume = PA_VOLUME_NORM;
1446 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1447 }
1448
1449 u->source->get_volume = source_get_volume_cb;
1450 u->source->set_volume = source_set_volume_cb;
1451
1452 u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SOURCE_DECIBEL_VOLUME : 0);
1453 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1454 }
1455
1456 if (!u->mixer_path->has_mute) {
1457 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1458 } else {
1459 u->source->get_mute = source_get_mute_cb;
1460 u->source->set_mute = source_set_mute_cb;
1461 u->source->flags |= PA_SOURCE_HW_MUTE_CTRL;
1462 pa_log_info("Using hardware mute control.");
1463 }
1464
1465 u->mixer_fdl = pa_alsa_fdlist_new();
1466
1467 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1468 pa_log("Failed to initialize file descriptor monitoring");
1469 return -1;
1470 }
1471
1472 if (u->mixer_path_set)
1473 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1474 else
1475 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1476
1477 return 0;
1478 }
1479
1480 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1481
1482 struct userdata *u = NULL;
1483 const char *dev_id = NULL;
1484 pa_sample_spec ss, requested_ss;
1485 pa_channel_map map;
1486 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1487 snd_pcm_uframes_t period_frames, tsched_frames;
1488 size_t frame_size;
1489 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1490 pa_source_new_data data;
1491 pa_alsa_profile_set *profile_set = NULL;
1492
1493 pa_assert(m);
1494 pa_assert(ma);
1495
1496 ss = m->core->default_sample_spec;
1497 map = m->core->default_channel_map;
1498 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1499 pa_log("Failed to parse sample specification");
1500 goto fail;
1501 }
1502
1503 requested_ss = ss;
1504 frame_size = pa_frame_size(&ss);
1505
1506 nfrags = m->core->default_n_fragments;
1507 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1508 if (frag_size <= 0)
1509 frag_size = (uint32_t) frame_size;
1510 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1511 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1512
1513 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1514 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1515 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1516 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1517 pa_log("Failed to parse buffer metrics");
1518 goto fail;
1519 }
1520
1521 hwbuf_size = frag_size * nfrags;
1522 period_frames = frag_size/frame_size;
1523 tsched_frames = tsched_size/frame_size;
1524
1525 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1526 pa_log("Failed to parse mmap argument.");
1527 goto fail;
1528 }
1529
1530 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1531 pa_log("Failed to parse timer_scheduling argument.");
1532 goto fail;
1533 }
1534
1535 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1536 pa_log("Failed to parse ignore_dB argument.");
1537 goto fail;
1538 }
1539
1540 if (use_tsched && !pa_rtclock_hrtimer()) {
1541 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1542 use_tsched = FALSE;
1543 }
1544
1545 u = pa_xnew0(struct userdata, 1);
1546 u->core = m->core;
1547 u->module = m;
1548 u->use_mmap = use_mmap;
1549 u->use_tsched = use_tsched;
1550 u->rtpoll = pa_rtpoll_new();
1551 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1552
1553 u->smoother = pa_smoother_new(
1554 DEFAULT_TSCHED_WATERMARK_USEC*2,
1555 DEFAULT_TSCHED_WATERMARK_USEC*2,
1556 TRUE,
1557 TRUE,
1558 5,
1559 pa_rtclock_now(),
1560 FALSE);
1561 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1562
1563 dev_id = pa_modargs_get_value(
1564 ma, "device_id",
1565 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1566
1567 if (reserve_init(u, dev_id) < 0)
1568 goto fail;
1569
1570 if (reserve_monitor_init(u, dev_id) < 0)
1571 goto fail;
1572
1573 b = use_mmap;
1574 d = use_tsched;
1575
1576 if (mapping) {
1577
1578 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1579 pa_log("device_id= not set");
1580 goto fail;
1581 }
1582
1583 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1584 dev_id,
1585 &u->device_name,
1586 &ss, &map,
1587 SND_PCM_STREAM_CAPTURE,
1588 &nfrags, &period_frames, tsched_frames,
1589 &b, &d, mapping)))
1590 goto fail;
1591
1592 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1593
1594 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1595 goto fail;
1596
1597 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1598 dev_id,
1599 &u->device_name,
1600 &ss, &map,
1601 SND_PCM_STREAM_CAPTURE,
1602 &nfrags, &period_frames, tsched_frames,
1603 &b, &d, profile_set, &mapping)))
1604 goto fail;
1605
1606 } else {
1607
1608 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1609 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1610 &u->device_name,
1611 &ss, &map,
1612 SND_PCM_STREAM_CAPTURE,
1613 &nfrags, &period_frames, tsched_frames,
1614 &b, &d, FALSE)))
1615 goto fail;
1616 }
1617
1618 pa_assert(u->device_name);
1619 pa_log_info("Successfully opened device %s.", u->device_name);
1620
1621 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1622 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1623 goto fail;
1624 }
1625
1626 if (mapping)
1627 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1628
1629 if (use_mmap && !b) {
1630 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1631 u->use_mmap = use_mmap = FALSE;
1632 }
1633
1634 if (use_tsched && (!b || !d)) {
1635 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1636 u->use_tsched = use_tsched = FALSE;
1637 }
1638
1639 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1640 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1641 u->use_tsched = use_tsched = FALSE;
1642 }
1643
1644 if (u->use_mmap)
1645 pa_log_info("Successfully enabled mmap() mode.");
1646
1647 if (u->use_tsched)
1648 pa_log_info("Successfully enabled timer-based scheduling mode.");
1649
1650 /* ALSA might tweak the sample spec, so recalculate the frame size */
1651 frame_size = pa_frame_size(&ss);
1652
1653 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1654
1655 pa_source_new_data_init(&data);
1656 data.driver = driver;
1657 data.module = m;
1658 data.card = card;
1659 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1660 pa_source_new_data_set_sample_spec(&data, &ss);
1661 pa_source_new_data_set_channel_map(&data, &map);
1662
1663 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1664 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1665 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1666 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1667 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1668
1669 if (mapping) {
1670 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1671 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1672 }
1673
1674 pa_alsa_init_description(data.proplist);
1675
1676 if (u->control_device)
1677 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1678
1679 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1680 pa_log("Invalid properties");
1681 pa_source_new_data_done(&data);
1682 goto fail;
1683 }
1684
1685 if (u->mixer_path_set)
1686 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1687
1688 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1689 pa_source_new_data_done(&data);
1690
1691 if (!u->source) {
1692 pa_log("Failed to create source object");
1693 goto fail;
1694 }
1695
1696 u->source->parent.process_msg = source_process_msg;
1697 u->source->update_requested_latency = source_update_requested_latency_cb;
1698 u->source->set_state = source_set_state_cb;
1699 u->source->set_port = source_set_port_cb;
1700 u->source->userdata = u;
1701
1702 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1703 pa_source_set_rtpoll(u->source, u->rtpoll);
1704
1705 u->frame_size = frame_size;
1706 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1707 u->nfragments = nfrags;
1708 u->hwbuf_size = u->fragment_size * nfrags;
1709 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1710
1711 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1712 nfrags, (long unsigned) u->fragment_size,
1713 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1714
1715 if (u->use_tsched) {
1716 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1717
1718 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1719 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1720
1721 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1722 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1723
1724 fix_min_sleep_wakeup(u);
1725 fix_tsched_watermark(u);
1726
1727 pa_source_set_latency_range(u->source,
1728 0,
1729 pa_bytes_to_usec(u->hwbuf_size, &ss));
1730
1731 pa_log_info("Time scheduling watermark is %0.2fms",
1732 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1733 } else
1734 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1735
1736 reserve_update(u);
1737
1738 if (update_sw_params(u) < 0)
1739 goto fail;
1740
1741 if (setup_mixer(u, ignore_dB) < 0)
1742 goto fail;
1743
1744 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1745
1746 if (!(u->thread = pa_thread_new(thread_func, u))) {
1747 pa_log("Failed to create thread.");
1748 goto fail;
1749 }
1750 /* Get initial mixer settings */
1751 if (data.volume_is_set) {
1752 if (u->source->set_volume)
1753 u->source->set_volume(u->source);
1754 } else {
1755 if (u->source->get_volume)
1756 u->source->get_volume(u->source);
1757 }
1758
1759 if (data.muted_is_set) {
1760 if (u->source->set_mute)
1761 u->source->set_mute(u->source);
1762 } else {
1763 if (u->source->get_mute)
1764 u->source->get_mute(u->source);
1765 }
1766
1767 pa_source_put(u->source);
1768
1769 if (profile_set)
1770 pa_alsa_profile_set_free(profile_set);
1771
1772 return u->source;
1773
1774 fail:
1775
1776 if (u)
1777 userdata_free(u);
1778
1779 if (profile_set)
1780 pa_alsa_profile_set_free(profile_set);
1781
1782 return NULL;
1783 }
1784
1785 static void userdata_free(struct userdata *u) {
1786 pa_assert(u);
1787
1788 if (u->source)
1789 pa_source_unlink(u->source);
1790
1791 if (u->thread) {
1792 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1793 pa_thread_free(u->thread);
1794 }
1795
1796 pa_thread_mq_done(&u->thread_mq);
1797
1798 if (u->source)
1799 pa_source_unref(u->source);
1800
1801 if (u->alsa_rtpoll_item)
1802 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1803
1804 if (u->rtpoll)
1805 pa_rtpoll_free(u->rtpoll);
1806
1807 if (u->pcm_handle) {
1808 snd_pcm_drop(u->pcm_handle);
1809 snd_pcm_close(u->pcm_handle);
1810 }
1811
1812 if (u->mixer_fdl)
1813 pa_alsa_fdlist_free(u->mixer_fdl);
1814
1815 if (u->mixer_path_set)
1816 pa_alsa_path_set_free(u->mixer_path_set);
1817 else if (u->mixer_path)
1818 pa_alsa_path_free(u->mixer_path);
1819
1820 if (u->mixer_handle)
1821 snd_mixer_close(u->mixer_handle);
1822
1823 if (u->smoother)
1824 pa_smoother_free(u->smoother);
1825
1826 reserve_done(u);
1827 monitor_done(u);
1828
1829 pa_xfree(u->device_name);
1830 pa_xfree(u->control_device);
1831 pa_xfree(u);
1832 }
1833
1834 void pa_alsa_source_free(pa_source *s) {
1835 struct userdata *u;
1836
1837 pa_source_assert_ref(s);
1838 pa_assert_se(u = s->userdata);
1839
1840 userdata_free(u);
1841 }