]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
Merge remote branch 'mkbosmans/rate-adjustment'
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core-error.h>
38 #include <pulsecore/core.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/time-smoother.h>
53
54 #include <modules/reserve-wrap.h>
55
56 #include "alsa-util.h"
57 #include "alsa-source.h"
58
59 /* #define DEBUG_TIMING */
60
61 #define DEFAULT_DEVICE "default"
62
63 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
64 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
65
66 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
67 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
68 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
69 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
70 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
71 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72
73 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
74 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
75
76 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
77 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
78
79 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
80 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
81
82 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
83
84 struct userdata {
85 pa_core *core;
86 pa_module *module;
87 pa_source *source;
88
89 pa_thread *thread;
90 pa_thread_mq thread_mq;
91 pa_rtpoll *rtpoll;
92
93 snd_pcm_t *pcm_handle;
94
95 pa_alsa_fdlist *mixer_fdl;
96 snd_mixer_t *mixer_handle;
97 pa_alsa_path_set *mixer_path_set;
98 pa_alsa_path *mixer_path;
99
100 pa_cvolume hardware_volume;
101
102 size_t
103 frame_size,
104 fragment_size,
105 hwbuf_size,
106 tsched_watermark,
107 hwbuf_unused,
108 min_sleep,
109 min_wakeup,
110 watermark_inc_step,
111 watermark_dec_step,
112 watermark_inc_threshold,
113 watermark_dec_threshold;
114
115 pa_usec_t watermark_dec_not_before;
116
117 char *device_name;
118 char *control_device;
119
120 pa_bool_t use_mmap:1, use_tsched:1;
121
122 pa_bool_t first;
123
124 pa_rtpoll_item *alsa_rtpoll_item;
125
126 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
127
128 pa_smoother *smoother;
129 uint64_t read_count;
130 pa_usec_t smoother_interval;
131 pa_usec_t last_smoother_update;
132
133 pa_reserve_wrapper *reserve;
134 pa_hook_slot *reserve_slot;
135 pa_reserve_monitor_wrapper *monitor;
136 pa_hook_slot *monitor_slot;
137 };
138
139 static void userdata_free(struct userdata *u);
140
141 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
142 pa_assert(r);
143 pa_assert(u);
144
145 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
146 return PA_HOOK_CANCEL;
147
148 return PA_HOOK_OK;
149 }
150
151 static void reserve_done(struct userdata *u) {
152 pa_assert(u);
153
154 if (u->reserve_slot) {
155 pa_hook_slot_free(u->reserve_slot);
156 u->reserve_slot = NULL;
157 }
158
159 if (u->reserve) {
160 pa_reserve_wrapper_unref(u->reserve);
161 u->reserve = NULL;
162 }
163 }
164
165 static void reserve_update(struct userdata *u) {
166 const char *description;
167 pa_assert(u);
168
169 if (!u->source || !u->reserve)
170 return;
171
172 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
173 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
174 }
175
176 static int reserve_init(struct userdata *u, const char *dname) {
177 char *rname;
178
179 pa_assert(u);
180 pa_assert(dname);
181
182 if (u->reserve)
183 return 0;
184
185 if (pa_in_system_mode())
186 return 0;
187
188 /* We are resuming, try to lock the device */
189 if (!(rname = pa_alsa_get_reserve_name(dname)))
190 return 0;
191
192 u->reserve = pa_reserve_wrapper_get(u->core, rname);
193 pa_xfree(rname);
194
195 if (!(u->reserve))
196 return -1;
197
198 reserve_update(u);
199
200 pa_assert(!u->reserve_slot);
201 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
202
203 return 0;
204 }
205
206 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
207 pa_bool_t b;
208
209 pa_assert(w);
210 pa_assert(u);
211
212 b = PA_PTR_TO_UINT(busy) && !u->reserve;
213
214 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
215 return PA_HOOK_OK;
216 }
217
218 static void monitor_done(struct userdata *u) {
219 pa_assert(u);
220
221 if (u->monitor_slot) {
222 pa_hook_slot_free(u->monitor_slot);
223 u->monitor_slot = NULL;
224 }
225
226 if (u->monitor) {
227 pa_reserve_monitor_wrapper_unref(u->monitor);
228 u->monitor = NULL;
229 }
230 }
231
232 static int reserve_monitor_init(struct userdata *u, const char *dname) {
233 char *rname;
234
235 pa_assert(u);
236 pa_assert(dname);
237
238 if (pa_in_system_mode())
239 return 0;
240
241 /* We are resuming, try to lock the device */
242 if (!(rname = pa_alsa_get_reserve_name(dname)))
243 return 0;
244
245 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
246 pa_xfree(rname);
247
248 if (!(u->monitor))
249 return -1;
250
251 pa_assert(!u->monitor_slot);
252 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
253
254 return 0;
255 }
256
257 static void fix_min_sleep_wakeup(struct userdata *u) {
258 size_t max_use, max_use_2;
259 pa_assert(u);
260 pa_assert(u->use_tsched);
261
262 max_use = u->hwbuf_size - u->hwbuf_unused;
263 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
264
265 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
266 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
267
268 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
269 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
270 }
271
272 static void fix_tsched_watermark(struct userdata *u) {
273 size_t max_use;
274 pa_assert(u);
275 pa_assert(u->use_tsched);
276
277 max_use = u->hwbuf_size - u->hwbuf_unused;
278
279 if (u->tsched_watermark > max_use - u->min_sleep)
280 u->tsched_watermark = max_use - u->min_sleep;
281
282 if (u->tsched_watermark < u->min_wakeup)
283 u->tsched_watermark = u->min_wakeup;
284 }
285
286 static void increase_watermark(struct userdata *u) {
287 size_t old_watermark;
288 pa_usec_t old_min_latency, new_min_latency;
289
290 pa_assert(u);
291 pa_assert(u->use_tsched);
292
293 /* First, just try to increase the watermark */
294 old_watermark = u->tsched_watermark;
295 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
296 fix_tsched_watermark(u);
297
298 if (old_watermark != u->tsched_watermark) {
299 pa_log_info("Increasing wakeup watermark to %0.2f ms",
300 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
301 return;
302 }
303
304 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
305 old_min_latency = u->source->thread_info.min_latency;
306 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
307 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
308
309 if (old_min_latency != new_min_latency) {
310 pa_log_info("Increasing minimal latency to %0.2f ms",
311 (double) new_min_latency / PA_USEC_PER_MSEC);
312
313 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
314 }
315
316 /* When we reach this we're officialy fucked! */
317 }
318
319 static void decrease_watermark(struct userdata *u) {
320 size_t old_watermark;
321 pa_usec_t now;
322
323 pa_assert(u);
324 pa_assert(u->use_tsched);
325
326 now = pa_rtclock_now();
327
328 if (u->watermark_dec_not_before <= 0)
329 goto restart;
330
331 if (u->watermark_dec_not_before > now)
332 return;
333
334 old_watermark = u->tsched_watermark;
335
336 if (u->tsched_watermark < u->watermark_dec_step)
337 u->tsched_watermark = u->tsched_watermark / 2;
338 else
339 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
340
341 fix_tsched_watermark(u);
342
343 if (old_watermark != u->tsched_watermark)
344 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
345 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
346
347 /* We don't change the latency range*/
348
349 restart:
350 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
351 }
352
353 static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
354 pa_usec_t wm, usec;
355
356 pa_assert(sleep_usec);
357 pa_assert(process_usec);
358
359 pa_assert(u);
360 pa_assert(u->use_tsched);
361
362 usec = pa_source_get_requested_latency_within_thread(u->source);
363
364 if (usec == (pa_usec_t) -1)
365 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
366
367 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
368
369 if (wm > usec)
370 wm = usec/2;
371
372 *sleep_usec = usec - wm;
373 *process_usec = wm;
374
375 #ifdef DEBUG_TIMING
376 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
377 (unsigned long) (usec / PA_USEC_PER_MSEC),
378 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
379 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
380 #endif
381
382 return usec;
383 }
384
385 static int try_recover(struct userdata *u, const char *call, int err) {
386 pa_assert(u);
387 pa_assert(call);
388 pa_assert(err < 0);
389
390 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
391
392 pa_assert(err != -EAGAIN);
393
394 if (err == -EPIPE)
395 pa_log_debug("%s: Buffer overrun!", call);
396
397 if (err == -ESTRPIPE)
398 pa_log_debug("%s: System suspended!", call);
399
400 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
401 pa_log("%s: %s", call, pa_alsa_strerror(err));
402 return -1;
403 }
404
405 u->first = TRUE;
406 return 0;
407 }
408
409 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
410 size_t left_to_record;
411 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
412 pa_bool_t overrun = FALSE;
413
414 /* We use <= instead of < for this check here because an overrun
415 * only happens after the last sample was processed, not already when
416 * it is removed from the buffer. This is particularly important
417 * when block transfer is used. */
418
419 if (n_bytes <= rec_space)
420 left_to_record = rec_space - n_bytes;
421 else {
422
423 /* We got a dropout. What a mess! */
424 left_to_record = 0;
425 overrun = TRUE;
426
427 #ifdef DEBUG_TIMING
428 PA_DEBUG_TRAP;
429 #endif
430
431 if (pa_log_ratelimit(PA_LOG_INFO))
432 pa_log_info("Overrun!");
433 }
434
435 #ifdef DEBUG_TIMING
436 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
437 #endif
438
439 if (u->use_tsched) {
440 pa_bool_t reset_not_before = TRUE;
441
442 if (overrun || left_to_record < u->watermark_inc_threshold)
443 increase_watermark(u);
444 else if (left_to_record > u->watermark_dec_threshold) {
445 reset_not_before = FALSE;
446
447 /* We decrease the watermark only if have actually been
448 * woken up by a timeout. If something else woke us up
449 * it's too easy to fulfill the deadlines... */
450
451 if (on_timeout)
452 decrease_watermark(u);
453 }
454
455 if (reset_not_before)
456 u->watermark_dec_not_before = 0;
457 }
458
459 return left_to_record;
460 }
461
462 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
463 pa_bool_t work_done = FALSE;
464 pa_usec_t max_sleep_usec = 0, process_usec = 0;
465 size_t left_to_record;
466 unsigned j = 0;
467
468 pa_assert(u);
469 pa_source_assert_ref(u->source);
470
471 if (u->use_tsched)
472 hw_sleep_time(u, &max_sleep_usec, &process_usec);
473
474 for (;;) {
475 snd_pcm_sframes_t n;
476 size_t n_bytes;
477 int r;
478 pa_bool_t after_avail = TRUE;
479
480 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
481
482 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
483 continue;
484
485 return r;
486 }
487
488 n_bytes = (size_t) n * u->frame_size;
489
490 #ifdef DEBUG_TIMING
491 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
492 #endif
493
494 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
495 on_timeout = FALSE;
496
497 if (u->use_tsched)
498 if (!polled &&
499 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
500 #ifdef DEBUG_TIMING
501 pa_log_debug("Not reading, because too early.");
502 #endif
503 break;
504 }
505
506 if (PA_UNLIKELY(n_bytes <= 0)) {
507
508 if (polled)
509 PA_ONCE_BEGIN {
510 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
511 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
512 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
513 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
514 pa_strnull(dn));
515 pa_xfree(dn);
516 } PA_ONCE_END;
517
518 #ifdef DEBUG_TIMING
519 pa_log_debug("Not reading, because not necessary.");
520 #endif
521 break;
522 }
523
524 if (++j > 10) {
525 #ifdef DEBUG_TIMING
526 pa_log_debug("Not filling up, because already too many iterations.");
527 #endif
528
529 break;
530 }
531
532 polled = FALSE;
533
534 #ifdef DEBUG_TIMING
535 pa_log_debug("Reading");
536 #endif
537
538 for (;;) {
539 int err;
540 const snd_pcm_channel_area_t *areas;
541 snd_pcm_uframes_t offset, frames;
542 pa_memchunk chunk;
543 void *p;
544 snd_pcm_sframes_t sframes;
545
546 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
547
548 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
549
550 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
551
552 if (!after_avail && err == -EAGAIN)
553 break;
554
555 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
556 continue;
557
558 return r;
559 }
560
561 /* Make sure that if these memblocks need to be copied they will fit into one slot */
562 if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
563 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
564
565 if (!after_avail && frames == 0)
566 break;
567
568 pa_assert(frames > 0);
569 after_avail = FALSE;
570
571 /* Check these are multiples of 8 bit */
572 pa_assert((areas[0].first & 7) == 0);
573 pa_assert((areas[0].step & 7)== 0);
574
575 /* We assume a single interleaved memory buffer */
576 pa_assert((areas[0].first >> 3) == 0);
577 pa_assert((areas[0].step >> 3) == u->frame_size);
578
579 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
580
581 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
582 chunk.length = pa_memblock_get_length(chunk.memblock);
583 chunk.index = 0;
584
585 pa_source_post(u->source, &chunk);
586 pa_memblock_unref_fixed(chunk.memblock);
587
588 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
589
590 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
591 continue;
592
593 return r;
594 }
595
596 work_done = TRUE;
597
598 u->read_count += frames * u->frame_size;
599
600 #ifdef DEBUG_TIMING
601 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
602 #endif
603
604 if ((size_t) frames * u->frame_size >= n_bytes)
605 break;
606
607 n_bytes -= (size_t) frames * u->frame_size;
608 }
609 }
610
611 if (u->use_tsched) {
612 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
613
614 if (*sleep_usec > process_usec)
615 *sleep_usec -= process_usec;
616 else
617 *sleep_usec = 0;
618 }
619
620 return work_done ? 1 : 0;
621 }
622
623 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
624 int work_done = FALSE;
625 pa_usec_t max_sleep_usec = 0, process_usec = 0;
626 size_t left_to_record;
627 unsigned j = 0;
628
629 pa_assert(u);
630 pa_source_assert_ref(u->source);
631
632 if (u->use_tsched)
633 hw_sleep_time(u, &max_sleep_usec, &process_usec);
634
635 for (;;) {
636 snd_pcm_sframes_t n;
637 size_t n_bytes;
638 int r;
639 pa_bool_t after_avail = TRUE;
640
641 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
642
643 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
644 continue;
645
646 return r;
647 }
648
649 n_bytes = (size_t) n * u->frame_size;
650 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
651 on_timeout = FALSE;
652
653 if (u->use_tsched)
654 if (!polled &&
655 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
656 break;
657
658 if (PA_UNLIKELY(n_bytes <= 0)) {
659
660 if (polled)
661 PA_ONCE_BEGIN {
662 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
663 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
664 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
665 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
666 pa_strnull(dn));
667 pa_xfree(dn);
668 } PA_ONCE_END;
669
670 break;
671 }
672
673 if (++j > 10) {
674 #ifdef DEBUG_TIMING
675 pa_log_debug("Not filling up, because already too many iterations.");
676 #endif
677
678 break;
679 }
680
681 polled = FALSE;
682
683 for (;;) {
684 void *p;
685 snd_pcm_sframes_t frames;
686 pa_memchunk chunk;
687
688 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
689
690 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
691
692 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
693 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
694
695 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
696
697 p = pa_memblock_acquire(chunk.memblock);
698 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
699 pa_memblock_release(chunk.memblock);
700
701 if (PA_UNLIKELY(frames < 0)) {
702 pa_memblock_unref(chunk.memblock);
703
704 if (!after_avail && (int) frames == -EAGAIN)
705 break;
706
707 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
708 continue;
709
710 return r;
711 }
712
713 if (!after_avail && frames == 0) {
714 pa_memblock_unref(chunk.memblock);
715 break;
716 }
717
718 pa_assert(frames > 0);
719 after_avail = FALSE;
720
721 chunk.index = 0;
722 chunk.length = (size_t) frames * u->frame_size;
723
724 pa_source_post(u->source, &chunk);
725 pa_memblock_unref(chunk.memblock);
726
727 work_done = TRUE;
728
729 u->read_count += frames * u->frame_size;
730
731 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
732
733 if ((size_t) frames * u->frame_size >= n_bytes)
734 break;
735
736 n_bytes -= (size_t) frames * u->frame_size;
737 }
738 }
739
740 if (u->use_tsched) {
741 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
742
743 if (*sleep_usec > process_usec)
744 *sleep_usec -= process_usec;
745 else
746 *sleep_usec = 0;
747 }
748
749 return work_done ? 1 : 0;
750 }
751
752 static void update_smoother(struct userdata *u) {
753 snd_pcm_sframes_t delay = 0;
754 uint64_t position;
755 int err;
756 pa_usec_t now1 = 0, now2;
757 snd_pcm_status_t *status;
758
759 snd_pcm_status_alloca(&status);
760
761 pa_assert(u);
762 pa_assert(u->pcm_handle);
763
764 /* Let's update the time smoother */
765
766 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
767 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
768 return;
769 }
770
771 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
772 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
773 else {
774 snd_htimestamp_t htstamp = { 0, 0 };
775 snd_pcm_status_get_htstamp(status, &htstamp);
776 now1 = pa_timespec_load(&htstamp);
777 }
778
779 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
780 if (now1 <= 0)
781 now1 = pa_rtclock_now();
782
783 /* check if the time since the last update is bigger than the interval */
784 if (u->last_smoother_update > 0)
785 if (u->last_smoother_update + u->smoother_interval > now1)
786 return;
787
788 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
789 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
790
791 pa_smoother_put(u->smoother, now1, now2);
792
793 u->last_smoother_update = now1;
794 /* exponentially increase the update interval up to the MAX limit */
795 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
796 }
797
798 static pa_usec_t source_get_latency(struct userdata *u) {
799 int64_t delay;
800 pa_usec_t now1, now2;
801
802 pa_assert(u);
803
804 now1 = pa_rtclock_now();
805 now2 = pa_smoother_get(u->smoother, now1);
806
807 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
808
809 return delay >= 0 ? (pa_usec_t) delay : 0;
810 }
811
812 static int build_pollfd(struct userdata *u) {
813 pa_assert(u);
814 pa_assert(u->pcm_handle);
815
816 if (u->alsa_rtpoll_item)
817 pa_rtpoll_item_free(u->alsa_rtpoll_item);
818
819 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
820 return -1;
821
822 return 0;
823 }
824
825 static int suspend(struct userdata *u) {
826 pa_assert(u);
827 pa_assert(u->pcm_handle);
828
829 pa_smoother_pause(u->smoother, pa_rtclock_now());
830
831 /* Let's suspend */
832 snd_pcm_close(u->pcm_handle);
833 u->pcm_handle = NULL;
834
835 if (u->alsa_rtpoll_item) {
836 pa_rtpoll_item_free(u->alsa_rtpoll_item);
837 u->alsa_rtpoll_item = NULL;
838 }
839
840 pa_log_info("Device suspended...");
841
842 return 0;
843 }
844
845 static int update_sw_params(struct userdata *u) {
846 snd_pcm_uframes_t avail_min;
847 int err;
848
849 pa_assert(u);
850
851 /* Use the full buffer if noone asked us for anything specific */
852 u->hwbuf_unused = 0;
853
854 if (u->use_tsched) {
855 pa_usec_t latency;
856
857 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
858 size_t b;
859
860 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
861
862 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
863
864 /* We need at least one sample in our buffer */
865
866 if (PA_UNLIKELY(b < u->frame_size))
867 b = u->frame_size;
868
869 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
870 }
871
872 fix_min_sleep_wakeup(u);
873 fix_tsched_watermark(u);
874 }
875
876 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
877
878 avail_min = 1;
879
880 if (u->use_tsched) {
881 pa_usec_t sleep_usec, process_usec;
882
883 hw_sleep_time(u, &sleep_usec, &process_usec);
884 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
885 }
886
887 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
888
889 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
890 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
891 return err;
892 }
893
894 return 0;
895 }
896
897 static int unsuspend(struct userdata *u) {
898 pa_sample_spec ss;
899 int err;
900 pa_bool_t b, d;
901 snd_pcm_uframes_t period_size, buffer_size;
902
903 pa_assert(u);
904 pa_assert(!u->pcm_handle);
905
906 pa_log_info("Trying resume...");
907
908 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
909 SND_PCM_NONBLOCK|
910 SND_PCM_NO_AUTO_RESAMPLE|
911 SND_PCM_NO_AUTO_CHANNELS|
912 SND_PCM_NO_AUTO_FORMAT)) < 0) {
913 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
914 goto fail;
915 }
916
917 ss = u->source->sample_spec;
918 period_size = u->fragment_size / u->frame_size;
919 buffer_size = u->hwbuf_size / u->frame_size;
920 b = u->use_mmap;
921 d = u->use_tsched;
922
923 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
924 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
925 goto fail;
926 }
927
928 if (b != u->use_mmap || d != u->use_tsched) {
929 pa_log_warn("Resume failed, couldn't get original access mode.");
930 goto fail;
931 }
932
933 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
934 pa_log_warn("Resume failed, couldn't restore original sample settings.");
935 goto fail;
936 }
937
938 if (period_size*u->frame_size != u->fragment_size ||
939 buffer_size*u->frame_size != u->hwbuf_size) {
940 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
941 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
942 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
943 goto fail;
944 }
945
946 if (update_sw_params(u) < 0)
947 goto fail;
948
949 if (build_pollfd(u) < 0)
950 goto fail;
951
952 /* FIXME: We need to reload the volume somehow */
953
954 u->read_count = 0;
955 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
956 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
957 u->last_smoother_update = 0;
958
959 u->first = TRUE;
960
961 pa_log_info("Resumed successfully...");
962
963 return 0;
964
965 fail:
966 if (u->pcm_handle) {
967 snd_pcm_close(u->pcm_handle);
968 u->pcm_handle = NULL;
969 }
970
971 return -PA_ERR_IO;
972 }
973
974 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
975 struct userdata *u = PA_SOURCE(o)->userdata;
976
977 switch (code) {
978
979 case PA_SOURCE_MESSAGE_GET_LATENCY: {
980 pa_usec_t r = 0;
981
982 if (u->pcm_handle)
983 r = source_get_latency(u);
984
985 *((pa_usec_t*) data) = r;
986
987 return 0;
988 }
989
990 case PA_SOURCE_MESSAGE_SET_STATE:
991
992 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
993
994 case PA_SOURCE_SUSPENDED: {
995 int r;
996 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
997
998 if ((r = suspend(u)) < 0)
999 return r;
1000
1001 break;
1002 }
1003
1004 case PA_SOURCE_IDLE:
1005 case PA_SOURCE_RUNNING: {
1006 int r;
1007
1008 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1009 if (build_pollfd(u) < 0)
1010 return -PA_ERR_IO;
1011 }
1012
1013 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1014 if ((r = unsuspend(u)) < 0)
1015 return r;
1016 }
1017
1018 break;
1019 }
1020
1021 case PA_SOURCE_UNLINKED:
1022 case PA_SOURCE_INIT:
1023 case PA_SOURCE_INVALID_STATE:
1024 ;
1025 }
1026
1027 break;
1028 }
1029
1030 return pa_source_process_msg(o, code, data, offset, chunk);
1031 }
1032
1033 /* Called from main context */
1034 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1035 pa_source_state_t old_state;
1036 struct userdata *u;
1037
1038 pa_source_assert_ref(s);
1039 pa_assert_se(u = s->userdata);
1040
1041 old_state = pa_source_get_state(u->source);
1042
1043 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1044 reserve_done(u);
1045 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1046 if (reserve_init(u, u->device_name) < 0)
1047 return -PA_ERR_BUSY;
1048
1049 return 0;
1050 }
1051
1052 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1053 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1054
1055 pa_assert(u);
1056 pa_assert(u->mixer_handle);
1057
1058 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1059 return 0;
1060
1061 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1062 return 0;
1063
1064 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1065 pa_source_get_volume(u->source, TRUE);
1066 pa_source_get_mute(u->source, TRUE);
1067 }
1068
1069 return 0;
1070 }
1071
1072 static void source_get_volume_cb(pa_source *s) {
1073 struct userdata *u = s->userdata;
1074 pa_cvolume r;
1075 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1076
1077 pa_assert(u);
1078 pa_assert(u->mixer_path);
1079 pa_assert(u->mixer_handle);
1080
1081 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1082 return;
1083
1084 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1085 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1086
1087 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1088
1089 if (u->mixer_path->has_dB) {
1090 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1091
1092 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1093 }
1094
1095 if (pa_cvolume_equal(&u->hardware_volume, &r))
1096 return;
1097
1098 s->volume = u->hardware_volume = r;
1099
1100 /* Hmm, so the hardware volume changed, let's reset our software volume */
1101 if (u->mixer_path->has_dB)
1102 pa_source_set_soft_volume(s, NULL);
1103 }
1104
1105 static void source_set_volume_cb(pa_source *s) {
1106 struct userdata *u = s->userdata;
1107 pa_cvolume r;
1108 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1109
1110 pa_assert(u);
1111 pa_assert(u->mixer_path);
1112 pa_assert(u->mixer_handle);
1113
1114 /* Shift up by the base volume */
1115 pa_sw_cvolume_divide_scalar(&r, &s->volume, s->base_volume);
1116
1117 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, TRUE) < 0)
1118 return;
1119
1120 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1121 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1122
1123 u->hardware_volume = r;
1124
1125 if (u->mixer_path->has_dB) {
1126 pa_cvolume new_soft_volume;
1127 pa_bool_t accurate_enough;
1128 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1129
1130 /* Match exactly what the user requested by software */
1131 pa_sw_cvolume_divide(&new_soft_volume, &s->volume, &u->hardware_volume);
1132
1133 /* If the adjustment to do in software is only minimal we
1134 * can skip it. That saves us CPU at the expense of a bit of
1135 * accuracy */
1136 accurate_enough =
1137 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1138 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1139
1140 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->volume));
1141 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->volume));
1142 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1143 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1144 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1145 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1146 pa_yes_no(accurate_enough));
1147 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1148
1149 if (!accurate_enough)
1150 s->soft_volume = new_soft_volume;
1151
1152 } else {
1153 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1154
1155 /* We can't match exactly what the user requested, hence let's
1156 * at least tell the user about it */
1157
1158 s->volume = r;
1159 }
1160 }
1161
1162 static void source_get_mute_cb(pa_source *s) {
1163 struct userdata *u = s->userdata;
1164 pa_bool_t b;
1165
1166 pa_assert(u);
1167 pa_assert(u->mixer_path);
1168 pa_assert(u->mixer_handle);
1169
1170 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1171 return;
1172
1173 s->muted = b;
1174 }
1175
1176 static void source_set_mute_cb(pa_source *s) {
1177 struct userdata *u = s->userdata;
1178
1179 pa_assert(u);
1180 pa_assert(u->mixer_path);
1181 pa_assert(u->mixer_handle);
1182
1183 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1184 }
1185
1186 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1187 struct userdata *u = s->userdata;
1188 pa_alsa_port_data *data;
1189
1190 pa_assert(u);
1191 pa_assert(p);
1192 pa_assert(u->mixer_handle);
1193
1194 data = PA_DEVICE_PORT_DATA(p);
1195
1196 pa_assert_se(u->mixer_path = data->path);
1197 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1198
1199 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1200 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1201 s->n_volume_steps = PA_VOLUME_NORM+1;
1202
1203 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1204 } else {
1205 s->base_volume = PA_VOLUME_NORM;
1206 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1207 }
1208
1209 if (data->setting)
1210 pa_alsa_setting_select(data->setting, u->mixer_handle);
1211
1212 if (s->set_mute)
1213 s->set_mute(s);
1214 if (s->set_volume)
1215 s->set_volume(s);
1216
1217 return 0;
1218 }
1219
1220 static void source_update_requested_latency_cb(pa_source *s) {
1221 struct userdata *u = s->userdata;
1222 pa_assert(u);
1223 pa_assert(u->use_tsched);
1224
1225 if (!u->pcm_handle)
1226 return;
1227
1228 update_sw_params(u);
1229 }
1230
1231 static void thread_func(void *userdata) {
1232 struct userdata *u = userdata;
1233 unsigned short revents = 0;
1234
1235 pa_assert(u);
1236
1237 pa_log_debug("Thread starting up");
1238
1239 if (u->core->realtime_scheduling)
1240 pa_make_realtime(u->core->realtime_priority);
1241
1242 pa_thread_mq_install(&u->thread_mq);
1243
1244 for (;;) {
1245 int ret;
1246
1247 #ifdef DEBUG_TIMING
1248 pa_log_debug("Loop");
1249 #endif
1250
1251 /* Read some data and pass it to the sources */
1252 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1253 int work_done;
1254 pa_usec_t sleep_usec = 0;
1255 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1256
1257 if (u->first) {
1258 pa_log_info("Starting capture.");
1259 snd_pcm_start(u->pcm_handle);
1260
1261 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1262
1263 u->first = FALSE;
1264 }
1265
1266 if (u->use_mmap)
1267 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1268 else
1269 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1270
1271 if (work_done < 0)
1272 goto fail;
1273
1274 /* pa_log_debug("work_done = %i", work_done); */
1275
1276 if (work_done)
1277 update_smoother(u);
1278
1279 if (u->use_tsched) {
1280 pa_usec_t cusec;
1281
1282 /* OK, the capture buffer is now empty, let's
1283 * calculate when to wake up next */
1284
1285 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1286
1287 /* Convert from the sound card time domain to the
1288 * system time domain */
1289 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1290
1291 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1292
1293 /* We don't trust the conversion, so we wake up whatever comes first */
1294 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1295 }
1296 } else if (u->use_tsched)
1297
1298 /* OK, we're in an invalid state, let's disable our timers */
1299 pa_rtpoll_set_timer_disabled(u->rtpoll);
1300
1301 /* Hmm, nothing to do. Let's sleep */
1302 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1303 goto fail;
1304
1305 if (ret == 0)
1306 goto finish;
1307
1308 /* Tell ALSA about this and process its response */
1309 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1310 struct pollfd *pollfd;
1311 int err;
1312 unsigned n;
1313
1314 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1315
1316 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1317 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1318 goto fail;
1319 }
1320
1321 if (revents & ~POLLIN) {
1322 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1323 goto fail;
1324
1325 u->first = TRUE;
1326 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1327 pa_log_debug("Wakeup from ALSA!");
1328
1329 } else
1330 revents = 0;
1331 }
1332
1333 fail:
1334 /* If this was no regular exit from the loop we have to continue
1335 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1336 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1337 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1338
1339 finish:
1340 pa_log_debug("Thread shutting down");
1341 }
1342
1343 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1344 const char *n;
1345 char *t;
1346
1347 pa_assert(data);
1348 pa_assert(ma);
1349 pa_assert(device_name);
1350
1351 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1352 pa_source_new_data_set_name(data, n);
1353 data->namereg_fail = TRUE;
1354 return;
1355 }
1356
1357 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1358 data->namereg_fail = TRUE;
1359 else {
1360 n = device_id ? device_id : device_name;
1361 data->namereg_fail = FALSE;
1362 }
1363
1364 if (mapping)
1365 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1366 else
1367 t = pa_sprintf_malloc("alsa_input.%s", n);
1368
1369 pa_source_new_data_set_name(data, t);
1370 pa_xfree(t);
1371 }
1372
1373 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1374
1375 if (!mapping && !element)
1376 return;
1377
1378 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1379 pa_log_info("Failed to find a working mixer device.");
1380 return;
1381 }
1382
1383 if (element) {
1384
1385 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1386 goto fail;
1387
1388 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1389 goto fail;
1390
1391 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1392 pa_alsa_path_dump(u->mixer_path);
1393 } else {
1394
1395 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1396 goto fail;
1397
1398 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1399
1400 pa_log_debug("Probed mixer paths:");
1401 pa_alsa_path_set_dump(u->mixer_path_set);
1402 }
1403
1404 return;
1405
1406 fail:
1407
1408 if (u->mixer_path_set) {
1409 pa_alsa_path_set_free(u->mixer_path_set);
1410 u->mixer_path_set = NULL;
1411 } else if (u->mixer_path) {
1412 pa_alsa_path_free(u->mixer_path);
1413 u->mixer_path = NULL;
1414 }
1415
1416 if (u->mixer_handle) {
1417 snd_mixer_close(u->mixer_handle);
1418 u->mixer_handle = NULL;
1419 }
1420 }
1421
1422 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1423 pa_assert(u);
1424
1425 if (!u->mixer_handle)
1426 return 0;
1427
1428 if (u->source->active_port) {
1429 pa_alsa_port_data *data;
1430
1431 /* We have a list of supported paths, so let's activate the
1432 * one that has been chosen as active */
1433
1434 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1435 u->mixer_path = data->path;
1436
1437 pa_alsa_path_select(data->path, u->mixer_handle);
1438
1439 if (data->setting)
1440 pa_alsa_setting_select(data->setting, u->mixer_handle);
1441
1442 } else {
1443
1444 if (!u->mixer_path && u->mixer_path_set)
1445 u->mixer_path = u->mixer_path_set->paths;
1446
1447 if (u->mixer_path) {
1448 /* Hmm, we have only a single path, then let's activate it */
1449
1450 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1451
1452 if (u->mixer_path->settings)
1453 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1454 } else
1455 return 0;
1456 }
1457
1458 if (!u->mixer_path->has_volume)
1459 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1460 else {
1461
1462 if (u->mixer_path->has_dB) {
1463 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1464
1465 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1466 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1467
1468 if (u->mixer_path->max_dB > 0.0)
1469 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1470 else
1471 pa_log_info("No particular base volume set, fixing to 0 dB");
1472
1473 } else {
1474 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1475 u->source->base_volume = PA_VOLUME_NORM;
1476 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1477 }
1478
1479 u->source->get_volume = source_get_volume_cb;
1480 u->source->set_volume = source_set_volume_cb;
1481
1482 u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SOURCE_DECIBEL_VOLUME : 0);
1483 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1484 }
1485
1486 if (!u->mixer_path->has_mute) {
1487 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1488 } else {
1489 u->source->get_mute = source_get_mute_cb;
1490 u->source->set_mute = source_set_mute_cb;
1491 u->source->flags |= PA_SOURCE_HW_MUTE_CTRL;
1492 pa_log_info("Using hardware mute control.");
1493 }
1494
1495 u->mixer_fdl = pa_alsa_fdlist_new();
1496
1497 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1498 pa_log("Failed to initialize file descriptor monitoring");
1499 return -1;
1500 }
1501
1502 if (u->mixer_path_set)
1503 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1504 else
1505 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1506
1507 return 0;
1508 }
1509
1510 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1511
1512 struct userdata *u = NULL;
1513 const char *dev_id = NULL;
1514 pa_sample_spec ss, requested_ss;
1515 pa_channel_map map;
1516 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1517 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1518 size_t frame_size;
1519 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE;
1520 pa_source_new_data data;
1521 pa_alsa_profile_set *profile_set = NULL;
1522
1523 pa_assert(m);
1524 pa_assert(ma);
1525
1526 ss = m->core->default_sample_spec;
1527 map = m->core->default_channel_map;
1528 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1529 pa_log("Failed to parse sample specification");
1530 goto fail;
1531 }
1532
1533 requested_ss = ss;
1534 frame_size = pa_frame_size(&ss);
1535
1536 nfrags = m->core->default_n_fragments;
1537 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1538 if (frag_size <= 0)
1539 frag_size = (uint32_t) frame_size;
1540 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1541 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1542
1543 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1544 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1545 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1546 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1547 pa_log("Failed to parse buffer metrics");
1548 goto fail;
1549 }
1550
1551 buffer_size = nfrags * frag_size;
1552
1553 period_frames = frag_size/frame_size;
1554 buffer_frames = buffer_size/frame_size;
1555 tsched_frames = tsched_size/frame_size;
1556
1557 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1558 pa_log("Failed to parse mmap argument.");
1559 goto fail;
1560 }
1561
1562 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1563 pa_log("Failed to parse timer_scheduling argument.");
1564 goto fail;
1565 }
1566
1567 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1568 pa_log("Failed to parse ignore_dB argument.");
1569 goto fail;
1570 }
1571
1572 use_tsched = pa_alsa_may_tsched(use_tsched);
1573
1574 u = pa_xnew0(struct userdata, 1);
1575 u->core = m->core;
1576 u->module = m;
1577 u->use_mmap = use_mmap;
1578 u->use_tsched = use_tsched;
1579 u->first = TRUE;
1580 u->rtpoll = pa_rtpoll_new();
1581 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1582
1583 u->smoother = pa_smoother_new(
1584 SMOOTHER_ADJUST_USEC,
1585 SMOOTHER_WINDOW_USEC,
1586 TRUE,
1587 TRUE,
1588 5,
1589 pa_rtclock_now(),
1590 TRUE);
1591 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1592
1593 dev_id = pa_modargs_get_value(
1594 ma, "device_id",
1595 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1596
1597 if (reserve_init(u, dev_id) < 0)
1598 goto fail;
1599
1600 if (reserve_monitor_init(u, dev_id) < 0)
1601 goto fail;
1602
1603 b = use_mmap;
1604 d = use_tsched;
1605
1606 if (mapping) {
1607
1608 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1609 pa_log("device_id= not set");
1610 goto fail;
1611 }
1612
1613 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1614 dev_id,
1615 &u->device_name,
1616 &ss, &map,
1617 SND_PCM_STREAM_CAPTURE,
1618 &period_frames, &buffer_frames, tsched_frames,
1619 &b, &d, mapping)))
1620 goto fail;
1621
1622 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1623
1624 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1625 goto fail;
1626
1627 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1628 dev_id,
1629 &u->device_name,
1630 &ss, &map,
1631 SND_PCM_STREAM_CAPTURE,
1632 &period_frames, &buffer_frames, tsched_frames,
1633 &b, &d, profile_set, &mapping)))
1634 goto fail;
1635
1636 } else {
1637
1638 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1639 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1640 &u->device_name,
1641 &ss, &map,
1642 SND_PCM_STREAM_CAPTURE,
1643 &period_frames, &buffer_frames, tsched_frames,
1644 &b, &d, FALSE)))
1645 goto fail;
1646 }
1647
1648 pa_assert(u->device_name);
1649 pa_log_info("Successfully opened device %s.", u->device_name);
1650
1651 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1652 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1653 goto fail;
1654 }
1655
1656 if (mapping)
1657 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1658
1659 if (use_mmap && !b) {
1660 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1661 u->use_mmap = use_mmap = FALSE;
1662 }
1663
1664 if (use_tsched && (!b || !d)) {
1665 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1666 u->use_tsched = use_tsched = FALSE;
1667 }
1668
1669 if (u->use_mmap)
1670 pa_log_info("Successfully enabled mmap() mode.");
1671
1672 if (u->use_tsched)
1673 pa_log_info("Successfully enabled timer-based scheduling mode.");
1674
1675 /* ALSA might tweak the sample spec, so recalculate the frame size */
1676 frame_size = pa_frame_size(&ss);
1677
1678 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1679
1680 pa_source_new_data_init(&data);
1681 data.driver = driver;
1682 data.module = m;
1683 data.card = card;
1684 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1685
1686 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1687 * variable instead of using &data.namereg_fail directly, because
1688 * data.namereg_fail is a bitfield and taking the address of a bitfield
1689 * variable is impossible. */
1690 namereg_fail = data.namereg_fail;
1691 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1692 pa_log("Failed to parse boolean argument namereg_fail.");
1693 pa_source_new_data_done(&data);
1694 goto fail;
1695 }
1696 data.namereg_fail = namereg_fail;
1697
1698 pa_source_new_data_set_sample_spec(&data, &ss);
1699 pa_source_new_data_set_channel_map(&data, &map);
1700
1701 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1702 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1703 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1704 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1705 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1706
1707 if (mapping) {
1708 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1709 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1710 }
1711
1712 pa_alsa_init_description(data.proplist);
1713
1714 if (u->control_device)
1715 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1716
1717 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1718 pa_log("Invalid properties");
1719 pa_source_new_data_done(&data);
1720 goto fail;
1721 }
1722
1723 if (u->mixer_path_set)
1724 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1725
1726 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1727 pa_source_new_data_done(&data);
1728
1729 if (!u->source) {
1730 pa_log("Failed to create source object");
1731 goto fail;
1732 }
1733
1734 u->source->parent.process_msg = source_process_msg;
1735 if (u->use_tsched)
1736 u->source->update_requested_latency = source_update_requested_latency_cb;
1737 u->source->set_state = source_set_state_cb;
1738 u->source->set_port = source_set_port_cb;
1739 u->source->userdata = u;
1740
1741 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1742 pa_source_set_rtpoll(u->source, u->rtpoll);
1743
1744 u->frame_size = frame_size;
1745 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1746 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1747 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1748
1749 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1750 (double) u->hwbuf_size / (double) u->fragment_size,
1751 (long unsigned) u->fragment_size,
1752 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1753 (long unsigned) u->hwbuf_size,
1754 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1755
1756 if (u->use_tsched) {
1757 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1758
1759 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1760 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1761
1762 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1763 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1764
1765 fix_min_sleep_wakeup(u);
1766 fix_tsched_watermark(u);
1767
1768 pa_source_set_latency_range(u->source,
1769 0,
1770 pa_bytes_to_usec(u->hwbuf_size, &ss));
1771
1772 pa_log_info("Time scheduling watermark is %0.2fms",
1773 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1774 } else
1775 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1776
1777 reserve_update(u);
1778
1779 if (update_sw_params(u) < 0)
1780 goto fail;
1781
1782 if (setup_mixer(u, ignore_dB) < 0)
1783 goto fail;
1784
1785 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1786
1787 if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1788 pa_log("Failed to create thread.");
1789 goto fail;
1790 }
1791 /* Get initial mixer settings */
1792 if (data.volume_is_set) {
1793 if (u->source->set_volume)
1794 u->source->set_volume(u->source);
1795 } else {
1796 if (u->source->get_volume)
1797 u->source->get_volume(u->source);
1798 }
1799
1800 if (data.muted_is_set) {
1801 if (u->source->set_mute)
1802 u->source->set_mute(u->source);
1803 } else {
1804 if (u->source->get_mute)
1805 u->source->get_mute(u->source);
1806 }
1807
1808 pa_source_put(u->source);
1809
1810 if (profile_set)
1811 pa_alsa_profile_set_free(profile_set);
1812
1813 return u->source;
1814
1815 fail:
1816
1817 if (u)
1818 userdata_free(u);
1819
1820 if (profile_set)
1821 pa_alsa_profile_set_free(profile_set);
1822
1823 return NULL;
1824 }
1825
1826 static void userdata_free(struct userdata *u) {
1827 pa_assert(u);
1828
1829 if (u->source)
1830 pa_source_unlink(u->source);
1831
1832 if (u->thread) {
1833 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1834 pa_thread_free(u->thread);
1835 }
1836
1837 pa_thread_mq_done(&u->thread_mq);
1838
1839 if (u->source)
1840 pa_source_unref(u->source);
1841
1842 if (u->alsa_rtpoll_item)
1843 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1844
1845 if (u->rtpoll)
1846 pa_rtpoll_free(u->rtpoll);
1847
1848 if (u->pcm_handle) {
1849 snd_pcm_drop(u->pcm_handle);
1850 snd_pcm_close(u->pcm_handle);
1851 }
1852
1853 if (u->mixer_fdl)
1854 pa_alsa_fdlist_free(u->mixer_fdl);
1855
1856 if (u->mixer_path_set)
1857 pa_alsa_path_set_free(u->mixer_path_set);
1858 else if (u->mixer_path)
1859 pa_alsa_path_free(u->mixer_path);
1860
1861 if (u->mixer_handle)
1862 snd_mixer_close(u->mixer_handle);
1863
1864 if (u->smoother)
1865 pa_smoother_free(u->smoother);
1866
1867 reserve_done(u);
1868 monitor_done(u);
1869
1870 pa_xfree(u->device_name);
1871 pa_xfree(u->control_device);
1872 pa_xfree(u);
1873 }
1874
1875 void pa_alsa_source_free(pa_source *s) {
1876 struct userdata *u;
1877
1878 pa_source_assert_ref(s);
1879 pa_assert_se(u = s->userdata);
1880
1881 userdata_free(u);
1882 }