]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
alsa: Update process_usec before going to sleep
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/rtclock.h>
32 #include <pulse/timeval.h>
33 #include <pulse/volume.h>
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/core.h>
37 #include <pulsecore/i18n.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
51
52 #include <modules/reserve-wrap.h>
53
54 #include "alsa-util.h"
55 #include "alsa-source.h"
56
57 /* #define DEBUG_TIMING */
58
59 #define DEFAULT_DEVICE "default"
60
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
63
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
70
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
73
74 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
75 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
76
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
79
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
81
82 struct userdata {
83 pa_core *core;
84 pa_module *module;
85 pa_source *source;
86
87 pa_thread *thread;
88 pa_thread_mq thread_mq;
89 pa_rtpoll *rtpoll;
90
91 snd_pcm_t *pcm_handle;
92
93 pa_alsa_fdlist *mixer_fdl;
94 pa_alsa_mixer_pdata *mixer_pd;
95 snd_mixer_t *mixer_handle;
96 pa_alsa_path_set *mixer_path_set;
97 pa_alsa_path *mixer_path;
98
99 pa_cvolume hardware_volume;
100
101 size_t
102 frame_size,
103 fragment_size,
104 hwbuf_size,
105 tsched_watermark,
106 hwbuf_unused,
107 min_sleep,
108 min_wakeup,
109 watermark_inc_step,
110 watermark_dec_step,
111 watermark_inc_threshold,
112 watermark_dec_threshold;
113
114 pa_usec_t watermark_dec_not_before;
115
116 char *device_name; /* name of the PCM device */
117 char *control_device; /* name of the control device */
118
119 pa_bool_t use_mmap:1, use_tsched:1, sync_volume:1;
120
121 pa_bool_t first;
122
123 pa_rtpoll_item *alsa_rtpoll_item;
124
125 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
126
127 pa_smoother *smoother;
128 uint64_t read_count;
129 pa_usec_t smoother_interval;
130 pa_usec_t last_smoother_update;
131
132 pa_reserve_wrapper *reserve;
133 pa_hook_slot *reserve_slot;
134 pa_reserve_monitor_wrapper *monitor;
135 pa_hook_slot *monitor_slot;
136 };
137
138 static void userdata_free(struct userdata *u);
139
140 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
141 pa_assert(r);
142 pa_assert(u);
143
144 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
145 return PA_HOOK_CANCEL;
146
147 return PA_HOOK_OK;
148 }
149
150 static void reserve_done(struct userdata *u) {
151 pa_assert(u);
152
153 if (u->reserve_slot) {
154 pa_hook_slot_free(u->reserve_slot);
155 u->reserve_slot = NULL;
156 }
157
158 if (u->reserve) {
159 pa_reserve_wrapper_unref(u->reserve);
160 u->reserve = NULL;
161 }
162 }
163
164 static void reserve_update(struct userdata *u) {
165 const char *description;
166 pa_assert(u);
167
168 if (!u->source || !u->reserve)
169 return;
170
171 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
172 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
173 }
174
175 static int reserve_init(struct userdata *u, const char *dname) {
176 char *rname;
177
178 pa_assert(u);
179 pa_assert(dname);
180
181 if (u->reserve)
182 return 0;
183
184 if (pa_in_system_mode())
185 return 0;
186
187 if (!(rname = pa_alsa_get_reserve_name(dname)))
188 return 0;
189
190 /* We are resuming, try to lock the device */
191 u->reserve = pa_reserve_wrapper_get(u->core, rname);
192 pa_xfree(rname);
193
194 if (!(u->reserve))
195 return -1;
196
197 reserve_update(u);
198
199 pa_assert(!u->reserve_slot);
200 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
201
202 return 0;
203 }
204
205 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
206 pa_bool_t b;
207
208 pa_assert(w);
209 pa_assert(u);
210
211 b = PA_PTR_TO_UINT(busy) && !u->reserve;
212
213 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
214 return PA_HOOK_OK;
215 }
216
217 static void monitor_done(struct userdata *u) {
218 pa_assert(u);
219
220 if (u->monitor_slot) {
221 pa_hook_slot_free(u->monitor_slot);
222 u->monitor_slot = NULL;
223 }
224
225 if (u->monitor) {
226 pa_reserve_monitor_wrapper_unref(u->monitor);
227 u->monitor = NULL;
228 }
229 }
230
231 static int reserve_monitor_init(struct userdata *u, const char *dname) {
232 char *rname;
233
234 pa_assert(u);
235 pa_assert(dname);
236
237 if (pa_in_system_mode())
238 return 0;
239
240 if (!(rname = pa_alsa_get_reserve_name(dname)))
241 return 0;
242
243 /* We are resuming, try to lock the device */
244 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
245 pa_xfree(rname);
246
247 if (!(u->monitor))
248 return -1;
249
250 pa_assert(!u->monitor_slot);
251 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
252
253 return 0;
254 }
255
256 static void fix_min_sleep_wakeup(struct userdata *u) {
257 size_t max_use, max_use_2;
258
259 pa_assert(u);
260 pa_assert(u->use_tsched);
261
262 max_use = u->hwbuf_size - u->hwbuf_unused;
263 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
264
265 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
266 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
267
268 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
269 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
270 }
271
272 static void fix_tsched_watermark(struct userdata *u) {
273 size_t max_use;
274 pa_assert(u);
275 pa_assert(u->use_tsched);
276
277 max_use = u->hwbuf_size - u->hwbuf_unused;
278
279 if (u->tsched_watermark > max_use - u->min_sleep)
280 u->tsched_watermark = max_use - u->min_sleep;
281
282 if (u->tsched_watermark < u->min_wakeup)
283 u->tsched_watermark = u->min_wakeup;
284 }
285
286 static void increase_watermark(struct userdata *u) {
287 size_t old_watermark;
288 pa_usec_t old_min_latency, new_min_latency;
289
290 pa_assert(u);
291 pa_assert(u->use_tsched);
292
293 /* First, just try to increase the watermark */
294 old_watermark = u->tsched_watermark;
295 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
296 fix_tsched_watermark(u);
297
298 if (old_watermark != u->tsched_watermark) {
299 pa_log_info("Increasing wakeup watermark to %0.2f ms",
300 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
301 return;
302 }
303
304 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
305 old_min_latency = u->source->thread_info.min_latency;
306 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
307 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
308
309 if (old_min_latency != new_min_latency) {
310 pa_log_info("Increasing minimal latency to %0.2f ms",
311 (double) new_min_latency / PA_USEC_PER_MSEC);
312
313 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
314 }
315
316 /* When we reach this we're officialy fucked! */
317 }
318
319 static void decrease_watermark(struct userdata *u) {
320 size_t old_watermark;
321 pa_usec_t now;
322
323 pa_assert(u);
324 pa_assert(u->use_tsched);
325
326 now = pa_rtclock_now();
327
328 if (u->watermark_dec_not_before <= 0)
329 goto restart;
330
331 if (u->watermark_dec_not_before > now)
332 return;
333
334 old_watermark = u->tsched_watermark;
335
336 if (u->tsched_watermark < u->watermark_dec_step)
337 u->tsched_watermark = u->tsched_watermark / 2;
338 else
339 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
340
341 fix_tsched_watermark(u);
342
343 if (old_watermark != u->tsched_watermark)
344 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
345 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
346
347 /* We don't change the latency range*/
348
349 restart:
350 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
351 }
352
353 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
354 pa_usec_t wm, usec;
355
356 pa_assert(sleep_usec);
357 pa_assert(process_usec);
358
359 pa_assert(u);
360 pa_assert(u->use_tsched);
361
362 usec = pa_source_get_requested_latency_within_thread(u->source);
363
364 if (usec == (pa_usec_t) -1)
365 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
366
367 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
368
369 if (wm > usec)
370 wm = usec/2;
371
372 *sleep_usec = usec - wm;
373 *process_usec = wm;
374
375 #ifdef DEBUG_TIMING
376 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
377 (unsigned long) (usec / PA_USEC_PER_MSEC),
378 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
379 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
380 #endif
381 }
382
383 static int try_recover(struct userdata *u, const char *call, int err) {
384 pa_assert(u);
385 pa_assert(call);
386 pa_assert(err < 0);
387
388 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
389
390 pa_assert(err != -EAGAIN);
391
392 if (err == -EPIPE)
393 pa_log_debug("%s: Buffer overrun!", call);
394
395 if (err == -ESTRPIPE)
396 pa_log_debug("%s: System suspended!", call);
397
398 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
399 pa_log("%s: %s", call, pa_alsa_strerror(err));
400 return -1;
401 }
402
403 u->first = TRUE;
404 return 0;
405 }
406
407 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
408 size_t left_to_record;
409 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
410 pa_bool_t overrun = FALSE;
411
412 /* We use <= instead of < for this check here because an overrun
413 * only happens after the last sample was processed, not already when
414 * it is removed from the buffer. This is particularly important
415 * when block transfer is used. */
416
417 if (n_bytes <= rec_space)
418 left_to_record = rec_space - n_bytes;
419 else {
420
421 /* We got a dropout. What a mess! */
422 left_to_record = 0;
423 overrun = TRUE;
424
425 #ifdef DEBUG_TIMING
426 PA_DEBUG_TRAP;
427 #endif
428
429 if (pa_log_ratelimit(PA_LOG_INFO))
430 pa_log_info("Overrun!");
431 }
432
433 #ifdef DEBUG_TIMING
434 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
435 #endif
436
437 if (u->use_tsched) {
438 pa_bool_t reset_not_before = TRUE;
439
440 if (overrun || left_to_record < u->watermark_inc_threshold)
441 increase_watermark(u);
442 else if (left_to_record > u->watermark_dec_threshold) {
443 reset_not_before = FALSE;
444
445 /* We decrease the watermark only if have actually
446 * been woken up by a timeout. If something else woke
447 * us up it's too easy to fulfill the deadlines... */
448
449 if (on_timeout)
450 decrease_watermark(u);
451 }
452
453 if (reset_not_before)
454 u->watermark_dec_not_before = 0;
455 }
456
457 return left_to_record;
458 }
459
460 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
461 pa_bool_t work_done = FALSE;
462 pa_usec_t max_sleep_usec = 0, process_usec = 0;
463 size_t left_to_record;
464 unsigned j = 0;
465
466 pa_assert(u);
467 pa_source_assert_ref(u->source);
468
469 if (u->use_tsched)
470 hw_sleep_time(u, &max_sleep_usec, &process_usec);
471
472 for (;;) {
473 snd_pcm_sframes_t n;
474 size_t n_bytes;
475 int r;
476 pa_bool_t after_avail = TRUE;
477
478 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
479
480 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
481 continue;
482
483 return r;
484 }
485
486 n_bytes = (size_t) n * u->frame_size;
487
488 #ifdef DEBUG_TIMING
489 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
490 #endif
491
492 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
493 on_timeout = FALSE;
494
495 if (u->use_tsched)
496 if (!polled &&
497 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
498 #ifdef DEBUG_TIMING
499 pa_log_debug("Not reading, because too early.");
500 #endif
501 break;
502 }
503
504 if (PA_UNLIKELY(n_bytes <= 0)) {
505
506 if (polled)
507 PA_ONCE_BEGIN {
508 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
509 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
510 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
511 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
512 pa_strnull(dn));
513 pa_xfree(dn);
514 } PA_ONCE_END;
515
516 #ifdef DEBUG_TIMING
517 pa_log_debug("Not reading, because not necessary.");
518 #endif
519 break;
520 }
521
522
523 if (++j > 10) {
524 #ifdef DEBUG_TIMING
525 pa_log_debug("Not filling up, because already too many iterations.");
526 #endif
527
528 break;
529 }
530
531 polled = FALSE;
532
533 #ifdef DEBUG_TIMING
534 pa_log_debug("Reading");
535 #endif
536
537 for (;;) {
538 pa_memchunk chunk;
539 void *p;
540 int err;
541 const snd_pcm_channel_area_t *areas;
542 snd_pcm_uframes_t offset, frames;
543 snd_pcm_sframes_t sframes;
544
545 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
546 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
547
548 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
549
550 if (!after_avail && err == -EAGAIN)
551 break;
552
553 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
554 continue;
555
556 return r;
557 }
558
559 /* Make sure that if these memblocks need to be copied they will fit into one slot */
560 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
561 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
562
563 if (!after_avail && frames == 0)
564 break;
565
566 pa_assert(frames > 0);
567 after_avail = FALSE;
568
569 /* Check these are multiples of 8 bit */
570 pa_assert((areas[0].first & 7) == 0);
571 pa_assert((areas[0].step & 7)== 0);
572
573 /* We assume a single interleaved memory buffer */
574 pa_assert((areas[0].first >> 3) == 0);
575 pa_assert((areas[0].step >> 3) == u->frame_size);
576
577 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
578
579 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
580 chunk.length = pa_memblock_get_length(chunk.memblock);
581 chunk.index = 0;
582
583 pa_source_post(u->source, &chunk);
584 pa_memblock_unref_fixed(chunk.memblock);
585
586 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
587
588 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
589 continue;
590
591 return r;
592 }
593
594 work_done = TRUE;
595
596 u->read_count += frames * u->frame_size;
597
598 #ifdef DEBUG_TIMING
599 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
600 #endif
601
602 if ((size_t) frames * u->frame_size >= n_bytes)
603 break;
604
605 n_bytes -= (size_t) frames * u->frame_size;
606 }
607 }
608
609 if (u->use_tsched) {
610 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
611 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
612
613 if (*sleep_usec > process_usec)
614 *sleep_usec -= process_usec;
615 else
616 *sleep_usec = 0;
617 }
618
619 return work_done ? 1 : 0;
620 }
621
622 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
623 int work_done = FALSE;
624 pa_usec_t max_sleep_usec = 0, process_usec = 0;
625 size_t left_to_record;
626 unsigned j = 0;
627
628 pa_assert(u);
629 pa_source_assert_ref(u->source);
630
631 if (u->use_tsched)
632 hw_sleep_time(u, &max_sleep_usec, &process_usec);
633
634 for (;;) {
635 snd_pcm_sframes_t n;
636 size_t n_bytes;
637 int r;
638 pa_bool_t after_avail = TRUE;
639
640 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
641
642 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
643 continue;
644
645 return r;
646 }
647
648 n_bytes = (size_t) n * u->frame_size;
649 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
650 on_timeout = FALSE;
651
652 if (u->use_tsched)
653 if (!polled &&
654 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
655 break;
656
657 if (PA_UNLIKELY(n_bytes <= 0)) {
658
659 if (polled)
660 PA_ONCE_BEGIN {
661 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
662 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
663 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
664 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
665 pa_strnull(dn));
666 pa_xfree(dn);
667 } PA_ONCE_END;
668
669 break;
670 }
671
672 if (++j > 10) {
673 #ifdef DEBUG_TIMING
674 pa_log_debug("Not filling up, because already too many iterations.");
675 #endif
676
677 break;
678 }
679
680 polled = FALSE;
681
682 for (;;) {
683 void *p;
684 snd_pcm_sframes_t frames;
685 pa_memchunk chunk;
686
687 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
688
689 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
690
691 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
692 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
693
694 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
695
696 p = pa_memblock_acquire(chunk.memblock);
697 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
698 pa_memblock_release(chunk.memblock);
699
700 if (PA_UNLIKELY(frames < 0)) {
701 pa_memblock_unref(chunk.memblock);
702
703 if (!after_avail && (int) frames == -EAGAIN)
704 break;
705
706 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
707 continue;
708
709 return r;
710 }
711
712 if (!after_avail && frames == 0) {
713 pa_memblock_unref(chunk.memblock);
714 break;
715 }
716
717 pa_assert(frames > 0);
718 after_avail = FALSE;
719
720 chunk.index = 0;
721 chunk.length = (size_t) frames * u->frame_size;
722
723 pa_source_post(u->source, &chunk);
724 pa_memblock_unref(chunk.memblock);
725
726 work_done = TRUE;
727
728 u->read_count += frames * u->frame_size;
729
730 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
731
732 if ((size_t) frames * u->frame_size >= n_bytes)
733 break;
734
735 n_bytes -= (size_t) frames * u->frame_size;
736 }
737 }
738
739 if (u->use_tsched) {
740 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
741 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
742
743 if (*sleep_usec > process_usec)
744 *sleep_usec -= process_usec;
745 else
746 *sleep_usec = 0;
747 }
748
749 return work_done ? 1 : 0;
750 }
751
752 static void update_smoother(struct userdata *u) {
753 snd_pcm_sframes_t delay = 0;
754 uint64_t position;
755 int err;
756 pa_usec_t now1 = 0, now2;
757 snd_pcm_status_t *status;
758
759 snd_pcm_status_alloca(&status);
760
761 pa_assert(u);
762 pa_assert(u->pcm_handle);
763
764 /* Let's update the time smoother */
765
766 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
767 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
768 return;
769 }
770
771 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
772 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
773 else {
774 snd_htimestamp_t htstamp = { 0, 0 };
775 snd_pcm_status_get_htstamp(status, &htstamp);
776 now1 = pa_timespec_load(&htstamp);
777 }
778
779 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
780 if (now1 <= 0)
781 now1 = pa_rtclock_now();
782
783 /* check if the time since the last update is bigger than the interval */
784 if (u->last_smoother_update > 0)
785 if (u->last_smoother_update + u->smoother_interval > now1)
786 return;
787
788 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
789 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
790
791 pa_smoother_put(u->smoother, now1, now2);
792
793 u->last_smoother_update = now1;
794 /* exponentially increase the update interval up to the MAX limit */
795 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
796 }
797
798 static pa_usec_t source_get_latency(struct userdata *u) {
799 int64_t delay;
800 pa_usec_t now1, now2;
801
802 pa_assert(u);
803
804 now1 = pa_rtclock_now();
805 now2 = pa_smoother_get(u->smoother, now1);
806
807 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
808
809 return delay >= 0 ? (pa_usec_t) delay : 0;
810 }
811
812 static int build_pollfd(struct userdata *u) {
813 pa_assert(u);
814 pa_assert(u->pcm_handle);
815
816 if (u->alsa_rtpoll_item)
817 pa_rtpoll_item_free(u->alsa_rtpoll_item);
818
819 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
820 return -1;
821
822 return 0;
823 }
824
825 /* Called from IO context */
826 static int suspend(struct userdata *u) {
827 pa_assert(u);
828 pa_assert(u->pcm_handle);
829
830 pa_smoother_pause(u->smoother, pa_rtclock_now());
831
832 /* Let's suspend */
833 snd_pcm_close(u->pcm_handle);
834 u->pcm_handle = NULL;
835
836 if (u->alsa_rtpoll_item) {
837 pa_rtpoll_item_free(u->alsa_rtpoll_item);
838 u->alsa_rtpoll_item = NULL;
839 }
840
841 pa_log_info("Device suspended...");
842
843 return 0;
844 }
845
846 /* Called from IO context */
847 static int update_sw_params(struct userdata *u) {
848 snd_pcm_uframes_t avail_min;
849 int err;
850
851 pa_assert(u);
852
853 /* Use the full buffer if noone asked us for anything specific */
854 u->hwbuf_unused = 0;
855
856 if (u->use_tsched) {
857 pa_usec_t latency;
858
859 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
860 size_t b;
861
862 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
863
864 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
865
866 /* We need at least one sample in our buffer */
867
868 if (PA_UNLIKELY(b < u->frame_size))
869 b = u->frame_size;
870
871 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
872 }
873
874 fix_min_sleep_wakeup(u);
875 fix_tsched_watermark(u);
876 }
877
878 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
879
880 avail_min = 1;
881
882 if (u->use_tsched) {
883 pa_usec_t sleep_usec, process_usec;
884
885 hw_sleep_time(u, &sleep_usec, &process_usec);
886 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
887 }
888
889 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
890
891 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
892 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
893 return err;
894 }
895
896 return 0;
897 }
898
899 /* Called from IO context */
900 static int unsuspend(struct userdata *u) {
901 pa_sample_spec ss;
902 int err;
903 pa_bool_t b, d;
904 snd_pcm_uframes_t period_size, buffer_size;
905
906 pa_assert(u);
907 pa_assert(!u->pcm_handle);
908
909 pa_log_info("Trying resume...");
910
911 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
912 SND_PCM_NONBLOCK|
913 SND_PCM_NO_AUTO_RESAMPLE|
914 SND_PCM_NO_AUTO_CHANNELS|
915 SND_PCM_NO_AUTO_FORMAT)) < 0) {
916 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
917 goto fail;
918 }
919
920 ss = u->source->sample_spec;
921 period_size = u->fragment_size / u->frame_size;
922 buffer_size = u->hwbuf_size / u->frame_size;
923 b = u->use_mmap;
924 d = u->use_tsched;
925
926 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
927 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
928 goto fail;
929 }
930
931 if (b != u->use_mmap || d != u->use_tsched) {
932 pa_log_warn("Resume failed, couldn't get original access mode.");
933 goto fail;
934 }
935
936 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
937 pa_log_warn("Resume failed, couldn't restore original sample settings.");
938 goto fail;
939 }
940
941 if (period_size*u->frame_size != u->fragment_size ||
942 buffer_size*u->frame_size != u->hwbuf_size) {
943 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
944 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
945 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
946 goto fail;
947 }
948
949 if (update_sw_params(u) < 0)
950 goto fail;
951
952 if (build_pollfd(u) < 0)
953 goto fail;
954
955 /* FIXME: We need to reload the volume somehow */
956
957 u->read_count = 0;
958 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
959 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
960 u->last_smoother_update = 0;
961
962 u->first = TRUE;
963
964 pa_log_info("Resumed successfully...");
965
966 return 0;
967
968 fail:
969 if (u->pcm_handle) {
970 snd_pcm_close(u->pcm_handle);
971 u->pcm_handle = NULL;
972 }
973
974 return -PA_ERR_IO;
975 }
976
977 /* Called from IO context */
978 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
979 struct userdata *u = PA_SOURCE(o)->userdata;
980
981 switch (code) {
982
983 case PA_SOURCE_MESSAGE_GET_LATENCY: {
984 pa_usec_t r = 0;
985
986 if (u->pcm_handle)
987 r = source_get_latency(u);
988
989 *((pa_usec_t*) data) = r;
990
991 return 0;
992 }
993
994 case PA_SOURCE_MESSAGE_SET_STATE:
995
996 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
997
998 case PA_SOURCE_SUSPENDED: {
999 int r;
1000
1001 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1002
1003 if ((r = suspend(u)) < 0)
1004 return r;
1005
1006 break;
1007 }
1008
1009 case PA_SOURCE_IDLE:
1010 case PA_SOURCE_RUNNING: {
1011 int r;
1012
1013 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1014 if (build_pollfd(u) < 0)
1015 return -PA_ERR_IO;
1016 }
1017
1018 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1019 if ((r = unsuspend(u)) < 0)
1020 return r;
1021 }
1022
1023 break;
1024 }
1025
1026 case PA_SOURCE_UNLINKED:
1027 case PA_SOURCE_INIT:
1028 case PA_SOURCE_INVALID_STATE:
1029 ;
1030 }
1031
1032 break;
1033 }
1034
1035 return pa_source_process_msg(o, code, data, offset, chunk);
1036 }
1037
1038 /* Called from main context */
1039 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1040 pa_source_state_t old_state;
1041 struct userdata *u;
1042
1043 pa_source_assert_ref(s);
1044 pa_assert_se(u = s->userdata);
1045
1046 old_state = pa_source_get_state(u->source);
1047
1048 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1049 reserve_done(u);
1050 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1051 if (reserve_init(u, u->device_name) < 0)
1052 return -PA_ERR_BUSY;
1053
1054 return 0;
1055 }
1056
1057 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1058 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1059
1060 pa_assert(u);
1061 pa_assert(u->mixer_handle);
1062
1063 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1064 return 0;
1065
1066 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1067 return 0;
1068
1069 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1070 pa_source_get_volume(u->source, TRUE);
1071 pa_source_get_mute(u->source, TRUE);
1072 }
1073
1074 return 0;
1075 }
1076
1077 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1078 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1079
1080 pa_assert(u);
1081 pa_assert(u->mixer_handle);
1082
1083 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1084 return 0;
1085
1086 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1087 return 0;
1088
1089 if (mask & SND_CTL_EVENT_MASK_VALUE)
1090 pa_source_update_volume_and_mute(u->source);
1091
1092 return 0;
1093 }
1094
1095 static void source_get_volume_cb(pa_source *s) {
1096 struct userdata *u = s->userdata;
1097 pa_cvolume r;
1098 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1099
1100 pa_assert(u);
1101 pa_assert(u->mixer_path);
1102 pa_assert(u->mixer_handle);
1103
1104 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1105 return;
1106
1107 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1108 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1109
1110 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1111
1112 if (u->mixer_path->has_dB) {
1113 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1114
1115 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1116 }
1117
1118 if (pa_cvolume_equal(&u->hardware_volume, &r))
1119 return;
1120
1121 s->real_volume = u->hardware_volume = r;
1122
1123 /* Hmm, so the hardware volume changed, let's reset our software volume */
1124 if (u->mixer_path->has_dB)
1125 pa_source_set_soft_volume(s, NULL);
1126 }
1127
1128 static void source_set_volume_cb(pa_source *s) {
1129 struct userdata *u = s->userdata;
1130 pa_cvolume r;
1131 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1132 pa_bool_t sync_volume = !!(s->flags & PA_SOURCE_SYNC_VOLUME);
1133
1134 pa_assert(u);
1135 pa_assert(u->mixer_path);
1136 pa_assert(u->mixer_handle);
1137
1138 /* Shift up by the base volume */
1139 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1140
1141 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, sync_volume, !sync_volume) < 0)
1142 return;
1143
1144 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1145 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1146
1147 u->hardware_volume = r;
1148
1149 if (u->mixer_path->has_dB) {
1150 pa_cvolume new_soft_volume;
1151 pa_bool_t accurate_enough;
1152 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1153
1154 /* Match exactly what the user requested by software */
1155 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1156
1157 /* If the adjustment to do in software is only minimal we
1158 * can skip it. That saves us CPU at the expense of a bit of
1159 * accuracy */
1160 accurate_enough =
1161 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1162 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1163
1164 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1165 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1166 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1167 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1168 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1169 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1170 pa_yes_no(accurate_enough));
1171 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1172
1173 if (!accurate_enough)
1174 s->soft_volume = new_soft_volume;
1175
1176 } else {
1177 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1178
1179 /* We can't match exactly what the user requested, hence let's
1180 * at least tell the user about it */
1181
1182 s->real_volume = r;
1183 }
1184 }
1185
1186 static void source_write_volume_cb(pa_source *s) {
1187 struct userdata *u = s->userdata;
1188 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1189
1190 pa_assert(u);
1191 pa_assert(u->mixer_path);
1192 pa_assert(u->mixer_handle);
1193 pa_assert(s->flags & PA_SOURCE_SYNC_VOLUME);
1194
1195 /* Shift up by the base volume */
1196 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1197
1198 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1199 pa_log_error("Writing HW volume failed");
1200 else {
1201 pa_cvolume tmp_vol;
1202 pa_bool_t accurate_enough;
1203
1204 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1205 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1206
1207 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1208 accurate_enough =
1209 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1210 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1211
1212 if (!accurate_enough) {
1213 union {
1214 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1215 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1216 } vol;
1217
1218 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1219 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1220 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1221 pa_log_debug(" in dB: %s (request) != %s",
1222 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1223 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1224 }
1225 }
1226 }
1227
1228 static void source_get_mute_cb(pa_source *s) {
1229 struct userdata *u = s->userdata;
1230 pa_bool_t b;
1231
1232 pa_assert(u);
1233 pa_assert(u->mixer_path);
1234 pa_assert(u->mixer_handle);
1235
1236 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1237 return;
1238
1239 s->muted = b;
1240 }
1241
1242 static void source_set_mute_cb(pa_source *s) {
1243 struct userdata *u = s->userdata;
1244
1245 pa_assert(u);
1246 pa_assert(u->mixer_path);
1247 pa_assert(u->mixer_handle);
1248
1249 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1250 }
1251
1252 static void mixer_volume_init(struct userdata *u) {
1253 pa_assert(u);
1254
1255 if (!u->mixer_path->has_volume) {
1256 pa_source_set_write_volume_callback(u->source, NULL);
1257 pa_source_set_get_volume_callback(u->source, NULL);
1258 pa_source_set_set_volume_callback(u->source, NULL);
1259
1260 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1261 } else {
1262 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1263 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1264
1265 if (u->mixer_path->has_dB && u->sync_volume) {
1266 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1267 pa_log_info("Successfully enabled synchronous volume.");
1268 } else
1269 pa_source_set_write_volume_callback(u->source, NULL);
1270
1271 if (u->mixer_path->has_dB) {
1272 pa_source_enable_decibel_volume(u->source, TRUE);
1273 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1274
1275 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1276 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1277
1278 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1279 } else {
1280 pa_source_enable_decibel_volume(u->source, FALSE);
1281 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1282
1283 u->source->base_volume = PA_VOLUME_NORM;
1284 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1285 }
1286
1287 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1288 }
1289
1290 if (!u->mixer_path->has_mute) {
1291 pa_source_set_get_mute_callback(u->source, NULL);
1292 pa_source_set_set_mute_callback(u->source, NULL);
1293 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1294 } else {
1295 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1296 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1297 pa_log_info("Using hardware mute control.");
1298 }
1299 }
1300
1301 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1302 struct userdata *u = s->userdata;
1303 pa_alsa_port_data *data;
1304
1305 pa_assert(u);
1306 pa_assert(p);
1307 pa_assert(u->mixer_handle);
1308
1309 data = PA_DEVICE_PORT_DATA(p);
1310
1311 pa_assert_se(u->mixer_path = data->path);
1312 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1313
1314 mixer_volume_init(u);
1315
1316 if (data->setting)
1317 pa_alsa_setting_select(data->setting, u->mixer_handle);
1318
1319 if (s->set_mute)
1320 s->set_mute(s);
1321 if (s->set_volume)
1322 s->set_volume(s);
1323
1324 return 0;
1325 }
1326
1327 static void source_update_requested_latency_cb(pa_source *s) {
1328 struct userdata *u = s->userdata;
1329 pa_assert(u);
1330 pa_assert(u->use_tsched); /* only when timer scheduling is used
1331 * we can dynamically adjust the
1332 * latency */
1333
1334 if (!u->pcm_handle)
1335 return;
1336
1337 update_sw_params(u);
1338 }
1339
1340 static void thread_func(void *userdata) {
1341 struct userdata *u = userdata;
1342 unsigned short revents = 0;
1343
1344 pa_assert(u);
1345
1346 pa_log_debug("Thread starting up");
1347
1348 if (u->core->realtime_scheduling)
1349 pa_make_realtime(u->core->realtime_priority);
1350
1351 pa_thread_mq_install(&u->thread_mq);
1352
1353 for (;;) {
1354 int ret;
1355 pa_usec_t rtpoll_sleep = 0;
1356
1357 #ifdef DEBUG_TIMING
1358 pa_log_debug("Loop");
1359 #endif
1360
1361 /* Read some data and pass it to the sources */
1362 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1363 int work_done;
1364 pa_usec_t sleep_usec = 0;
1365 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1366
1367 if (u->first) {
1368 pa_log_info("Starting capture.");
1369 snd_pcm_start(u->pcm_handle);
1370
1371 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1372
1373 u->first = FALSE;
1374 }
1375
1376 if (u->use_mmap)
1377 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1378 else
1379 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1380
1381 if (work_done < 0)
1382 goto fail;
1383
1384 /* pa_log_debug("work_done = %i", work_done); */
1385
1386 if (work_done)
1387 update_smoother(u);
1388
1389 if (u->use_tsched) {
1390 pa_usec_t cusec;
1391
1392 /* OK, the capture buffer is now empty, let's
1393 * calculate when to wake up next */
1394
1395 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1396
1397 /* Convert from the sound card time domain to the
1398 * system time domain */
1399 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1400
1401 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1402
1403 /* We don't trust the conversion, so we wake up whatever comes first */
1404 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1405 }
1406 }
1407
1408 if (u->source->flags & PA_SOURCE_SYNC_VOLUME) {
1409 pa_usec_t volume_sleep;
1410 pa_source_volume_change_apply(u->source, &volume_sleep);
1411 if (volume_sleep > 0)
1412 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1413 }
1414
1415 if (rtpoll_sleep > 0)
1416 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1417 else
1418 pa_rtpoll_set_timer_disabled(u->rtpoll);
1419
1420 /* Hmm, nothing to do. Let's sleep */
1421 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1422 goto fail;
1423
1424 if (u->source->flags & PA_SOURCE_SYNC_VOLUME)
1425 pa_source_volume_change_apply(u->source, NULL);
1426
1427 if (ret == 0)
1428 goto finish;
1429
1430 /* Tell ALSA about this and process its response */
1431 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1432 struct pollfd *pollfd;
1433 int err;
1434 unsigned n;
1435
1436 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1437
1438 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1439 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1440 goto fail;
1441 }
1442
1443 if (revents & ~POLLIN) {
1444 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1445 goto fail;
1446
1447 u->first = TRUE;
1448 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1449 pa_log_debug("Wakeup from ALSA!");
1450
1451 } else
1452 revents = 0;
1453 }
1454
1455 fail:
1456 /* If this was no regular exit from the loop we have to continue
1457 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1458 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1459 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1460
1461 finish:
1462 pa_log_debug("Thread shutting down");
1463 }
1464
1465 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1466 const char *n;
1467 char *t;
1468
1469 pa_assert(data);
1470 pa_assert(ma);
1471 pa_assert(device_name);
1472
1473 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1474 pa_source_new_data_set_name(data, n);
1475 data->namereg_fail = TRUE;
1476 return;
1477 }
1478
1479 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1480 data->namereg_fail = TRUE;
1481 else {
1482 n = device_id ? device_id : device_name;
1483 data->namereg_fail = FALSE;
1484 }
1485
1486 if (mapping)
1487 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1488 else
1489 t = pa_sprintf_malloc("alsa_input.%s", n);
1490
1491 pa_source_new_data_set_name(data, t);
1492 pa_xfree(t);
1493 }
1494
1495 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1496
1497 if (!mapping && !element)
1498 return;
1499
1500 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1501 pa_log_info("Failed to find a working mixer device.");
1502 return;
1503 }
1504
1505 if (element) {
1506
1507 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1508 goto fail;
1509
1510 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1511 goto fail;
1512
1513 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1514 pa_alsa_path_dump(u->mixer_path);
1515 } else {
1516
1517 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1518 goto fail;
1519
1520 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1521 }
1522
1523 return;
1524
1525 fail:
1526
1527 if (u->mixer_path_set) {
1528 pa_alsa_path_set_free(u->mixer_path_set);
1529 u->mixer_path_set = NULL;
1530 } else if (u->mixer_path) {
1531 pa_alsa_path_free(u->mixer_path);
1532 u->mixer_path = NULL;
1533 }
1534
1535 if (u->mixer_handle) {
1536 snd_mixer_close(u->mixer_handle);
1537 u->mixer_handle = NULL;
1538 }
1539 }
1540
1541 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1542 pa_bool_t need_mixer_callback = FALSE;
1543
1544 pa_assert(u);
1545
1546 if (!u->mixer_handle)
1547 return 0;
1548
1549 if (u->source->active_port) {
1550 pa_alsa_port_data *data;
1551
1552 /* We have a list of supported paths, so let's activate the
1553 * one that has been chosen as active */
1554
1555 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1556 u->mixer_path = data->path;
1557
1558 pa_alsa_path_select(data->path, u->mixer_handle);
1559
1560 if (data->setting)
1561 pa_alsa_setting_select(data->setting, u->mixer_handle);
1562
1563 } else {
1564
1565 if (!u->mixer_path && u->mixer_path_set)
1566 u->mixer_path = u->mixer_path_set->paths;
1567
1568 if (u->mixer_path) {
1569 /* Hmm, we have only a single path, then let's activate it */
1570
1571 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1572
1573 if (u->mixer_path->settings)
1574 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1575 } else
1576 return 0;
1577 }
1578
1579 mixer_volume_init(u);
1580
1581 /* Will we need to register callbacks? */
1582 if (u->mixer_path_set && u->mixer_path_set->paths) {
1583 pa_alsa_path *p;
1584
1585 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1586 if (p->has_volume || p->has_mute)
1587 need_mixer_callback = TRUE;
1588 }
1589 }
1590 else if (u->mixer_path)
1591 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1592
1593 if (need_mixer_callback) {
1594 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1595 if (u->source->flags & PA_SOURCE_SYNC_VOLUME) {
1596 u->mixer_pd = pa_alsa_mixer_pdata_new();
1597 mixer_callback = io_mixer_callback;
1598
1599 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1600 pa_log("Failed to initialize file descriptor monitoring");
1601 return -1;
1602 }
1603 } else {
1604 u->mixer_fdl = pa_alsa_fdlist_new();
1605 mixer_callback = ctl_mixer_callback;
1606
1607 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1608 pa_log("Failed to initialize file descriptor monitoring");
1609 return -1;
1610 }
1611 }
1612
1613 if (u->mixer_path_set)
1614 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1615 else
1616 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1617 }
1618
1619 return 0;
1620 }
1621
1622 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1623
1624 struct userdata *u = NULL;
1625 const char *dev_id = NULL;
1626 pa_sample_spec ss, requested_ss;
1627 pa_channel_map map;
1628 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1629 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1630 size_t frame_size;
1631 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE;
1632 pa_source_new_data data;
1633 pa_alsa_profile_set *profile_set = NULL;
1634
1635 pa_assert(m);
1636 pa_assert(ma);
1637
1638 ss = m->core->default_sample_spec;
1639 map = m->core->default_channel_map;
1640 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1641 pa_log("Failed to parse sample specification and channel map");
1642 goto fail;
1643 }
1644
1645 requested_ss = ss;
1646 frame_size = pa_frame_size(&ss);
1647
1648 nfrags = m->core->default_n_fragments;
1649 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1650 if (frag_size <= 0)
1651 frag_size = (uint32_t) frame_size;
1652 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1653 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1654
1655 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1656 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1657 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1658 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1659 pa_log("Failed to parse buffer metrics");
1660 goto fail;
1661 }
1662
1663 buffer_size = nfrags * frag_size;
1664
1665 period_frames = frag_size/frame_size;
1666 buffer_frames = buffer_size/frame_size;
1667 tsched_frames = tsched_size/frame_size;
1668
1669 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1670 pa_log("Failed to parse mmap argument.");
1671 goto fail;
1672 }
1673
1674 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1675 pa_log("Failed to parse tsched argument.");
1676 goto fail;
1677 }
1678
1679 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1680 pa_log("Failed to parse ignore_dB argument.");
1681 goto fail;
1682 }
1683
1684 sync_volume = m->core->sync_volume;
1685 if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1686 pa_log("Failed to parse sync_volume argument.");
1687 goto fail;
1688 }
1689
1690 use_tsched = pa_alsa_may_tsched(use_tsched);
1691
1692 u = pa_xnew0(struct userdata, 1);
1693 u->core = m->core;
1694 u->module = m;
1695 u->use_mmap = use_mmap;
1696 u->use_tsched = use_tsched;
1697 u->sync_volume = sync_volume;
1698 u->first = TRUE;
1699 u->rtpoll = pa_rtpoll_new();
1700 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1701
1702 u->smoother = pa_smoother_new(
1703 SMOOTHER_ADJUST_USEC,
1704 SMOOTHER_WINDOW_USEC,
1705 TRUE,
1706 TRUE,
1707 5,
1708 pa_rtclock_now(),
1709 TRUE);
1710 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1711
1712 dev_id = pa_modargs_get_value(
1713 ma, "device_id",
1714 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1715
1716 if (reserve_init(u, dev_id) < 0)
1717 goto fail;
1718
1719 if (reserve_monitor_init(u, dev_id) < 0)
1720 goto fail;
1721
1722 b = use_mmap;
1723 d = use_tsched;
1724
1725 if (mapping) {
1726
1727 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1728 pa_log("device_id= not set");
1729 goto fail;
1730 }
1731
1732 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1733 dev_id,
1734 &u->device_name,
1735 &ss, &map,
1736 SND_PCM_STREAM_CAPTURE,
1737 &period_frames, &buffer_frames, tsched_frames,
1738 &b, &d, mapping)))
1739 goto fail;
1740
1741 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1742
1743 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1744 goto fail;
1745
1746 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1747 dev_id,
1748 &u->device_name,
1749 &ss, &map,
1750 SND_PCM_STREAM_CAPTURE,
1751 &period_frames, &buffer_frames, tsched_frames,
1752 &b, &d, profile_set, &mapping)))
1753 goto fail;
1754
1755 } else {
1756
1757 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1758 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1759 &u->device_name,
1760 &ss, &map,
1761 SND_PCM_STREAM_CAPTURE,
1762 &period_frames, &buffer_frames, tsched_frames,
1763 &b, &d, FALSE)))
1764 goto fail;
1765 }
1766
1767 pa_assert(u->device_name);
1768 pa_log_info("Successfully opened device %s.", u->device_name);
1769
1770 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1771 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1772 goto fail;
1773 }
1774
1775 if (mapping)
1776 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1777
1778 if (use_mmap && !b) {
1779 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1780 u->use_mmap = use_mmap = FALSE;
1781 }
1782
1783 if (use_tsched && (!b || !d)) {
1784 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1785 u->use_tsched = use_tsched = FALSE;
1786 }
1787
1788 if (u->use_mmap)
1789 pa_log_info("Successfully enabled mmap() mode.");
1790
1791 if (u->use_tsched)
1792 pa_log_info("Successfully enabled timer-based scheduling mode.");
1793
1794 /* ALSA might tweak the sample spec, so recalculate the frame size */
1795 frame_size = pa_frame_size(&ss);
1796
1797 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1798
1799 pa_source_new_data_init(&data);
1800 data.driver = driver;
1801 data.module = m;
1802 data.card = card;
1803 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1804
1805 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1806 * variable instead of using &data.namereg_fail directly, because
1807 * data.namereg_fail is a bitfield and taking the address of a bitfield
1808 * variable is impossible. */
1809 namereg_fail = data.namereg_fail;
1810 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1811 pa_log("Failed to parse boolean argument namereg_fail.");
1812 pa_source_new_data_done(&data);
1813 goto fail;
1814 }
1815 data.namereg_fail = namereg_fail;
1816
1817 pa_source_new_data_set_sample_spec(&data, &ss);
1818 pa_source_new_data_set_channel_map(&data, &map);
1819
1820 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1821 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1822 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1823 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1824 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1825
1826 if (mapping) {
1827 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1828 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1829 }
1830
1831 pa_alsa_init_description(data.proplist);
1832
1833 if (u->control_device)
1834 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1835
1836 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1837 pa_log("Invalid properties");
1838 pa_source_new_data_done(&data);
1839 goto fail;
1840 }
1841
1842 if (u->mixer_path_set)
1843 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1844
1845 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1846 pa_source_new_data_done(&data);
1847
1848 if (!u->source) {
1849 pa_log("Failed to create source object");
1850 goto fail;
1851 }
1852
1853 if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
1854 &u->source->thread_info.volume_change_safety_margin) < 0) {
1855 pa_log("Failed to parse sync_volume_safety_margin parameter");
1856 goto fail;
1857 }
1858
1859 if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
1860 &u->source->thread_info.volume_change_extra_delay) < 0) {
1861 pa_log("Failed to parse sync_volume_extra_delay parameter");
1862 goto fail;
1863 }
1864
1865 u->source->parent.process_msg = source_process_msg;
1866 if (u->use_tsched)
1867 u->source->update_requested_latency = source_update_requested_latency_cb;
1868 u->source->set_state = source_set_state_cb;
1869 u->source->set_port = source_set_port_cb;
1870 u->source->userdata = u;
1871
1872 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1873 pa_source_set_rtpoll(u->source, u->rtpoll);
1874
1875 u->frame_size = frame_size;
1876 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1877 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1878 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1879
1880 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1881 (double) u->hwbuf_size / (double) u->fragment_size,
1882 (long unsigned) u->fragment_size,
1883 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1884 (long unsigned) u->hwbuf_size,
1885 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1886
1887 if (u->use_tsched) {
1888 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1889
1890 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1891 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1892
1893 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1894 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1895
1896 fix_min_sleep_wakeup(u);
1897 fix_tsched_watermark(u);
1898
1899 pa_source_set_latency_range(u->source,
1900 0,
1901 pa_bytes_to_usec(u->hwbuf_size, &ss));
1902
1903 pa_log_info("Time scheduling watermark is %0.2fms",
1904 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1905 } else
1906 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1907
1908 reserve_update(u);
1909
1910 if (update_sw_params(u) < 0)
1911 goto fail;
1912
1913 if (setup_mixer(u, ignore_dB) < 0)
1914 goto fail;
1915
1916 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1917
1918 if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1919 pa_log("Failed to create thread.");
1920 goto fail;
1921 }
1922
1923 /* Get initial mixer settings */
1924 if (data.volume_is_set) {
1925 if (u->source->set_volume)
1926 u->source->set_volume(u->source);
1927 } else {
1928 if (u->source->get_volume)
1929 u->source->get_volume(u->source);
1930 }
1931
1932 if (data.muted_is_set) {
1933 if (u->source->set_mute)
1934 u->source->set_mute(u->source);
1935 } else {
1936 if (u->source->get_mute)
1937 u->source->get_mute(u->source);
1938 }
1939
1940 if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
1941 u->source->write_volume(u->source);
1942
1943 pa_source_put(u->source);
1944
1945 if (profile_set)
1946 pa_alsa_profile_set_free(profile_set);
1947
1948 return u->source;
1949
1950 fail:
1951
1952 if (u)
1953 userdata_free(u);
1954
1955 if (profile_set)
1956 pa_alsa_profile_set_free(profile_set);
1957
1958 return NULL;
1959 }
1960
1961 static void userdata_free(struct userdata *u) {
1962 pa_assert(u);
1963
1964 if (u->source)
1965 pa_source_unlink(u->source);
1966
1967 if (u->thread) {
1968 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1969 pa_thread_free(u->thread);
1970 }
1971
1972 pa_thread_mq_done(&u->thread_mq);
1973
1974 if (u->source)
1975 pa_source_unref(u->source);
1976
1977 if (u->mixer_pd)
1978 pa_alsa_mixer_pdata_free(u->mixer_pd);
1979
1980 if (u->alsa_rtpoll_item)
1981 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1982
1983 if (u->rtpoll)
1984 pa_rtpoll_free(u->rtpoll);
1985
1986 if (u->pcm_handle) {
1987 snd_pcm_drop(u->pcm_handle);
1988 snd_pcm_close(u->pcm_handle);
1989 }
1990
1991 if (u->mixer_fdl)
1992 pa_alsa_fdlist_free(u->mixer_fdl);
1993
1994 if (u->mixer_path_set)
1995 pa_alsa_path_set_free(u->mixer_path_set);
1996 else if (u->mixer_path)
1997 pa_alsa_path_free(u->mixer_path);
1998
1999 if (u->mixer_handle)
2000 snd_mixer_close(u->mixer_handle);
2001
2002 if (u->smoother)
2003 pa_smoother_free(u->smoother);
2004
2005 reserve_done(u);
2006 monitor_done(u);
2007
2008 pa_xfree(u->device_name);
2009 pa_xfree(u->control_device);
2010 pa_xfree(u);
2011 }
2012
2013 void pa_alsa_source_free(pa_source *s) {
2014 struct userdata *u;
2015
2016 pa_source_assert_ref(s);
2017 pa_assert_se(u = s->userdata);
2018
2019 userdata_free(u);
2020 }