]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa-sink/source: Warn for scheduling delays
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
34 #endif
35
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
41
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
57
58 #include <modules/reserve-wrap.h>
59
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
62
63 /* #define DEBUG_TIMING */
64
65 #define DEFAULT_DEVICE "default"
66
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
78
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
92
93 struct userdata {
94 pa_core *core;
95 pa_module *module;
96 pa_sink *sink;
97
98 pa_thread *thread;
99 pa_thread_mq thread_mq;
100 pa_rtpoll *rtpoll;
101
102 snd_pcm_t *pcm_handle;
103
104 char *paths_dir;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
110
111 pa_cvolume hardware_volume;
112
113 unsigned int *rates;
114
115 size_t
116 frame_size,
117 fragment_size,
118 hwbuf_size,
119 tsched_watermark,
120 tsched_watermark_ref,
121 hwbuf_unused,
122 min_sleep,
123 min_wakeup,
124 watermark_inc_step,
125 watermark_dec_step,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
128 rewind_safeguard;
129
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
132
133 pa_memchunk memchunk;
134
135 char *device_name; /* name of the PCM device */
136 char *control_device; /* name of the control device */
137
138 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
139
140 pa_bool_t first, after_rewind;
141
142 pa_rtpoll_item *alsa_rtpoll_item;
143
144 pa_smoother *smoother;
145 uint64_t write_count;
146 uint64_t since_start;
147 pa_usec_t smoother_interval;
148 pa_usec_t last_smoother_update;
149
150 pa_idxset *formats;
151
152 pa_reserve_wrapper *reserve;
153 pa_hook_slot *reserve_slot;
154 pa_reserve_monitor_wrapper *monitor;
155 pa_hook_slot *monitor_slot;
156
157 /* ucm context */
158 pa_alsa_ucm_mapping_context *ucm_context;
159 };
160
161 static void userdata_free(struct userdata *u);
162
163 /* FIXME: Is there a better way to do this than device names? */
164 static pa_bool_t is_iec958(struct userdata *u) {
165 return (strncmp("iec958", u->device_name, 6) == 0);
166 }
167
168 static pa_bool_t is_hdmi(struct userdata *u) {
169 return (strncmp("hdmi", u->device_name, 4) == 0);
170 }
171
172 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
173 pa_assert(r);
174 pa_assert(u);
175
176 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
177 return PA_HOOK_CANCEL;
178
179 return PA_HOOK_OK;
180 }
181
182 static void reserve_done(struct userdata *u) {
183 pa_assert(u);
184
185 if (u->reserve_slot) {
186 pa_hook_slot_free(u->reserve_slot);
187 u->reserve_slot = NULL;
188 }
189
190 if (u->reserve) {
191 pa_reserve_wrapper_unref(u->reserve);
192 u->reserve = NULL;
193 }
194 }
195
196 static void reserve_update(struct userdata *u) {
197 const char *description;
198 pa_assert(u);
199
200 if (!u->sink || !u->reserve)
201 return;
202
203 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
204 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
205 }
206
207 static int reserve_init(struct userdata *u, const char *dname) {
208 char *rname;
209
210 pa_assert(u);
211 pa_assert(dname);
212
213 if (u->reserve)
214 return 0;
215
216 if (pa_in_system_mode())
217 return 0;
218
219 if (!(rname = pa_alsa_get_reserve_name(dname)))
220 return 0;
221
222 /* We are resuming, try to lock the device */
223 u->reserve = pa_reserve_wrapper_get(u->core, rname);
224 pa_xfree(rname);
225
226 if (!(u->reserve))
227 return -1;
228
229 reserve_update(u);
230
231 pa_assert(!u->reserve_slot);
232 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
233
234 return 0;
235 }
236
237 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
238 pa_bool_t b;
239
240 pa_assert(w);
241 pa_assert(u);
242
243 b = PA_PTR_TO_UINT(busy) && !u->reserve;
244
245 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
246 return PA_HOOK_OK;
247 }
248
249 static void monitor_done(struct userdata *u) {
250 pa_assert(u);
251
252 if (u->monitor_slot) {
253 pa_hook_slot_free(u->monitor_slot);
254 u->monitor_slot = NULL;
255 }
256
257 if (u->monitor) {
258 pa_reserve_monitor_wrapper_unref(u->monitor);
259 u->monitor = NULL;
260 }
261 }
262
263 static int reserve_monitor_init(struct userdata *u, const char *dname) {
264 char *rname;
265
266 pa_assert(u);
267 pa_assert(dname);
268
269 if (pa_in_system_mode())
270 return 0;
271
272 if (!(rname = pa_alsa_get_reserve_name(dname)))
273 return 0;
274
275 /* We are resuming, try to lock the device */
276 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
277 pa_xfree(rname);
278
279 if (!(u->monitor))
280 return -1;
281
282 pa_assert(!u->monitor_slot);
283 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
284
285 return 0;
286 }
287
288 static void fix_min_sleep_wakeup(struct userdata *u) {
289 size_t max_use, max_use_2;
290
291 pa_assert(u);
292 pa_assert(u->use_tsched);
293
294 max_use = u->hwbuf_size - u->hwbuf_unused;
295 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
296
297 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
298 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
299
300 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
301 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
302 }
303
304 static void fix_tsched_watermark(struct userdata *u) {
305 size_t max_use;
306 pa_assert(u);
307 pa_assert(u->use_tsched);
308
309 max_use = u->hwbuf_size - u->hwbuf_unused;
310
311 if (u->tsched_watermark > max_use - u->min_sleep)
312 u->tsched_watermark = max_use - u->min_sleep;
313
314 if (u->tsched_watermark < u->min_wakeup)
315 u->tsched_watermark = u->min_wakeup;
316 }
317
318 static void increase_watermark(struct userdata *u) {
319 size_t old_watermark;
320 pa_usec_t old_min_latency, new_min_latency;
321
322 pa_assert(u);
323 pa_assert(u->use_tsched);
324
325 /* First, just try to increase the watermark */
326 old_watermark = u->tsched_watermark;
327 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
328 fix_tsched_watermark(u);
329
330 if (old_watermark != u->tsched_watermark) {
331 pa_log_info("Increasing wakeup watermark to %0.2f ms",
332 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
333 return;
334 }
335
336 /* Hmm, we cannot increase the watermark any further, hence let's
337 raise the latency, unless doing so was disabled in
338 configuration */
339 if (u->fixed_latency_range)
340 return;
341
342 old_min_latency = u->sink->thread_info.min_latency;
343 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
344 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
345
346 if (old_min_latency != new_min_latency) {
347 pa_log_info("Increasing minimal latency to %0.2f ms",
348 (double) new_min_latency / PA_USEC_PER_MSEC);
349
350 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
351 }
352
353 /* When we reach this we're officialy fucked! */
354 }
355
356 static void decrease_watermark(struct userdata *u) {
357 size_t old_watermark;
358 pa_usec_t now;
359
360 pa_assert(u);
361 pa_assert(u->use_tsched);
362
363 now = pa_rtclock_now();
364
365 if (u->watermark_dec_not_before <= 0)
366 goto restart;
367
368 if (u->watermark_dec_not_before > now)
369 return;
370
371 old_watermark = u->tsched_watermark;
372
373 if (u->tsched_watermark < u->watermark_dec_step)
374 u->tsched_watermark = u->tsched_watermark / 2;
375 else
376 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
377
378 fix_tsched_watermark(u);
379
380 if (old_watermark != u->tsched_watermark)
381 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
382 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
383
384 /* We don't change the latency range*/
385
386 restart:
387 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
388 }
389
390 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
391 pa_usec_t usec, wm;
392
393 pa_assert(sleep_usec);
394 pa_assert(process_usec);
395
396 pa_assert(u);
397 pa_assert(u->use_tsched);
398
399 usec = pa_sink_get_requested_latency_within_thread(u->sink);
400
401 if (usec == (pa_usec_t) -1)
402 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
403
404 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
405
406 if (wm > usec)
407 wm = usec/2;
408
409 *sleep_usec = usec - wm;
410 *process_usec = wm;
411
412 #ifdef DEBUG_TIMING
413 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
414 (unsigned long) (usec / PA_USEC_PER_MSEC),
415 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
416 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
417 #endif
418 }
419
420 static int try_recover(struct userdata *u, const char *call, int err) {
421 pa_assert(u);
422 pa_assert(call);
423 pa_assert(err < 0);
424
425 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
426
427 pa_assert(err != -EAGAIN);
428
429 if (err == -EPIPE)
430 pa_log_debug("%s: Buffer underrun!", call);
431
432 if (err == -ESTRPIPE)
433 pa_log_debug("%s: System suspended!", call);
434
435 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
436 pa_log("%s: %s", call, pa_alsa_strerror(err));
437 return -1;
438 }
439
440 u->first = TRUE;
441 u->since_start = 0;
442 return 0;
443 }
444
445 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
446 size_t left_to_play;
447 pa_bool_t underrun = FALSE;
448
449 /* We use <= instead of < for this check here because an underrun
450 * only happens after the last sample was processed, not already when
451 * it is removed from the buffer. This is particularly important
452 * when block transfer is used. */
453
454 if (n_bytes <= u->hwbuf_size)
455 left_to_play = u->hwbuf_size - n_bytes;
456 else {
457
458 /* We got a dropout. What a mess! */
459 left_to_play = 0;
460 underrun = TRUE;
461
462 #if 0
463 PA_DEBUG_TRAP;
464 #endif
465
466 if (!u->first && !u->after_rewind)
467 if (pa_log_ratelimit(PA_LOG_INFO))
468 pa_log_info("Underrun!");
469 }
470
471 #ifdef DEBUG_TIMING
472 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
473 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
474 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
475 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
476 #endif
477
478 if (u->use_tsched) {
479 pa_bool_t reset_not_before = TRUE;
480
481 if (!u->first && !u->after_rewind) {
482 if (underrun || left_to_play < u->watermark_inc_threshold)
483 increase_watermark(u);
484 else if (left_to_play > u->watermark_dec_threshold) {
485 reset_not_before = FALSE;
486
487 /* We decrease the watermark only if have actually
488 * been woken up by a timeout. If something else woke
489 * us up it's too easy to fulfill the deadlines... */
490
491 if (on_timeout)
492 decrease_watermark(u);
493 }
494 }
495
496 if (reset_not_before)
497 u->watermark_dec_not_before = 0;
498 }
499
500 return left_to_play;
501 }
502
503 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
504 pa_bool_t work_done = FALSE;
505 pa_usec_t max_sleep_usec = 0, process_usec = 0;
506 size_t left_to_play;
507 unsigned j = 0;
508
509 pa_assert(u);
510 pa_sink_assert_ref(u->sink);
511
512 if (u->use_tsched)
513 hw_sleep_time(u, &max_sleep_usec, &process_usec);
514
515 for (;;) {
516 snd_pcm_sframes_t n;
517 size_t n_bytes;
518 int r;
519 pa_bool_t after_avail = TRUE;
520
521 /* First we determine how many samples are missing to fill the
522 * buffer up to 100% */
523
524 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
525
526 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
527 continue;
528
529 return r;
530 }
531
532 n_bytes = (size_t) n * u->frame_size;
533
534 #ifdef DEBUG_TIMING
535 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
536 #endif
537
538 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
539 on_timeout = FALSE;
540
541 if (u->use_tsched)
542
543 /* We won't fill up the playback buffer before at least
544 * half the sleep time is over because otherwise we might
545 * ask for more data from the clients then they expect. We
546 * need to guarantee that clients only have to keep around
547 * a single hw buffer length. */
548
549 if (!polled &&
550 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
551 #ifdef DEBUG_TIMING
552 pa_log_debug("Not filling up, because too early.");
553 #endif
554 break;
555 }
556
557 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
558
559 if (polled)
560 PA_ONCE_BEGIN {
561 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
562 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
563 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
564 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
565 pa_strnull(dn));
566 pa_xfree(dn);
567 } PA_ONCE_END;
568
569 #ifdef DEBUG_TIMING
570 pa_log_debug("Not filling up, because not necessary.");
571 #endif
572 break;
573 }
574
575
576 if (++j > 10) {
577 #ifdef DEBUG_TIMING
578 pa_log_debug("Not filling up, because already too many iterations.");
579 #endif
580
581 break;
582 }
583
584 n_bytes -= u->hwbuf_unused;
585 polled = FALSE;
586
587 #ifdef DEBUG_TIMING
588 pa_log_debug("Filling up");
589 #endif
590
591 for (;;) {
592 pa_memchunk chunk;
593 void *p;
594 int err;
595 const snd_pcm_channel_area_t *areas;
596 snd_pcm_uframes_t offset, frames;
597 snd_pcm_sframes_t sframes;
598
599 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
600 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
601
602 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
603
604 if (!after_avail && err == -EAGAIN)
605 break;
606
607 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
608 continue;
609
610 return r;
611 }
612
613 /* Make sure that if these memblocks need to be copied they will fit into one slot */
614 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
615 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
616
617 if (!after_avail && frames == 0)
618 break;
619
620 pa_assert(frames > 0);
621 after_avail = FALSE;
622
623 /* Check these are multiples of 8 bit */
624 pa_assert((areas[0].first & 7) == 0);
625 pa_assert((areas[0].step & 7)== 0);
626
627 /* We assume a single interleaved memory buffer */
628 pa_assert((areas[0].first >> 3) == 0);
629 pa_assert((areas[0].step >> 3) == u->frame_size);
630
631 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
632
633 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
634 chunk.length = pa_memblock_get_length(chunk.memblock);
635 chunk.index = 0;
636
637 pa_sink_render_into_full(u->sink, &chunk);
638 pa_memblock_unref_fixed(chunk.memblock);
639
640 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
641
642 if (!after_avail && (int) sframes == -EAGAIN)
643 break;
644
645 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
646 continue;
647
648 return r;
649 }
650
651 work_done = TRUE;
652
653 u->write_count += frames * u->frame_size;
654 u->since_start += frames * u->frame_size;
655
656 #ifdef DEBUG_TIMING
657 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
658 #endif
659
660 if ((size_t) frames * u->frame_size >= n_bytes)
661 break;
662
663 n_bytes -= (size_t) frames * u->frame_size;
664 }
665 }
666
667 if (u->use_tsched) {
668 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
669 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
670
671 if (*sleep_usec > process_usec)
672 *sleep_usec -= process_usec;
673 else
674 *sleep_usec = 0;
675 } else
676 *sleep_usec = 0;
677
678 return work_done ? 1 : 0;
679 }
680
681 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
682 pa_bool_t work_done = FALSE;
683 pa_usec_t max_sleep_usec = 0, process_usec = 0;
684 size_t left_to_play;
685 unsigned j = 0;
686
687 pa_assert(u);
688 pa_sink_assert_ref(u->sink);
689
690 if (u->use_tsched)
691 hw_sleep_time(u, &max_sleep_usec, &process_usec);
692
693 for (;;) {
694 snd_pcm_sframes_t n;
695 size_t n_bytes;
696 int r;
697 pa_bool_t after_avail = TRUE;
698
699 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
700
701 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
702 continue;
703
704 return r;
705 }
706
707 n_bytes = (size_t) n * u->frame_size;
708 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
709 on_timeout = FALSE;
710
711 if (u->use_tsched)
712
713 /* We won't fill up the playback buffer before at least
714 * half the sleep time is over because otherwise we might
715 * ask for more data from the clients then they expect. We
716 * need to guarantee that clients only have to keep around
717 * a single hw buffer length. */
718
719 if (!polled &&
720 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
721 break;
722
723 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
724
725 if (polled)
726 PA_ONCE_BEGIN {
727 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
728 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
729 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
730 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
731 pa_strnull(dn));
732 pa_xfree(dn);
733 } PA_ONCE_END;
734
735 break;
736 }
737
738 if (++j > 10) {
739 #ifdef DEBUG_TIMING
740 pa_log_debug("Not filling up, because already too many iterations.");
741 #endif
742
743 break;
744 }
745
746 n_bytes -= u->hwbuf_unused;
747 polled = FALSE;
748
749 for (;;) {
750 snd_pcm_sframes_t frames;
751 void *p;
752
753 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
754
755 if (u->memchunk.length <= 0)
756 pa_sink_render(u->sink, n_bytes, &u->memchunk);
757
758 pa_assert(u->memchunk.length > 0);
759
760 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
761
762 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
763 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
764
765 p = pa_memblock_acquire(u->memchunk.memblock);
766 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
767 pa_memblock_release(u->memchunk.memblock);
768
769 if (PA_UNLIKELY(frames < 0)) {
770
771 if (!after_avail && (int) frames == -EAGAIN)
772 break;
773
774 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
775 continue;
776
777 return r;
778 }
779
780 if (!after_avail && frames == 0)
781 break;
782
783 pa_assert(frames > 0);
784 after_avail = FALSE;
785
786 u->memchunk.index += (size_t) frames * u->frame_size;
787 u->memchunk.length -= (size_t) frames * u->frame_size;
788
789 if (u->memchunk.length <= 0) {
790 pa_memblock_unref(u->memchunk.memblock);
791 pa_memchunk_reset(&u->memchunk);
792 }
793
794 work_done = TRUE;
795
796 u->write_count += frames * u->frame_size;
797 u->since_start += frames * u->frame_size;
798
799 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
800
801 if ((size_t) frames * u->frame_size >= n_bytes)
802 break;
803
804 n_bytes -= (size_t) frames * u->frame_size;
805 }
806 }
807
808 if (u->use_tsched) {
809 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
810 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
811
812 if (*sleep_usec > process_usec)
813 *sleep_usec -= process_usec;
814 else
815 *sleep_usec = 0;
816 } else
817 *sleep_usec = 0;
818
819 return work_done ? 1 : 0;
820 }
821
822 static void update_smoother(struct userdata *u) {
823 snd_pcm_sframes_t delay = 0;
824 int64_t position;
825 int err;
826 pa_usec_t now1 = 0, now2;
827 snd_pcm_status_t *status;
828
829 snd_pcm_status_alloca(&status);
830
831 pa_assert(u);
832 pa_assert(u->pcm_handle);
833
834 /* Let's update the time smoother */
835
836 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
837 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
838 return;
839 }
840
841 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
842 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
843 else {
844 snd_htimestamp_t htstamp = { 0, 0 };
845 snd_pcm_status_get_htstamp(status, &htstamp);
846 now1 = pa_timespec_load(&htstamp);
847 }
848
849 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
850 if (now1 <= 0)
851 now1 = pa_rtclock_now();
852
853 /* check if the time since the last update is bigger than the interval */
854 if (u->last_smoother_update > 0)
855 if (u->last_smoother_update + u->smoother_interval > now1)
856 return;
857
858 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
859
860 if (PA_UNLIKELY(position < 0))
861 position = 0;
862
863 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
864
865 pa_smoother_put(u->smoother, now1, now2);
866
867 u->last_smoother_update = now1;
868 /* exponentially increase the update interval up to the MAX limit */
869 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
870 }
871
872 static pa_usec_t sink_get_latency(struct userdata *u) {
873 pa_usec_t r;
874 int64_t delay;
875 pa_usec_t now1, now2;
876
877 pa_assert(u);
878
879 now1 = pa_rtclock_now();
880 now2 = pa_smoother_get(u->smoother, now1);
881
882 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
883
884 r = delay >= 0 ? (pa_usec_t) delay : 0;
885
886 if (u->memchunk.memblock)
887 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
888
889 return r;
890 }
891
892 static int build_pollfd(struct userdata *u) {
893 pa_assert(u);
894 pa_assert(u->pcm_handle);
895
896 if (u->alsa_rtpoll_item)
897 pa_rtpoll_item_free(u->alsa_rtpoll_item);
898
899 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
900 return -1;
901
902 return 0;
903 }
904
905 /* Called from IO context */
906 static int suspend(struct userdata *u) {
907 pa_assert(u);
908 pa_assert(u->pcm_handle);
909
910 pa_smoother_pause(u->smoother, pa_rtclock_now());
911
912 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
913 * take awfully long with our long buffer sizes today. */
914 snd_pcm_close(u->pcm_handle);
915 u->pcm_handle = NULL;
916
917 if (u->alsa_rtpoll_item) {
918 pa_rtpoll_item_free(u->alsa_rtpoll_item);
919 u->alsa_rtpoll_item = NULL;
920 }
921
922 /* We reset max_rewind/max_request here to make sure that while we
923 * are suspended the old max_request/max_rewind values set before
924 * the suspend can influence the per-stream buffer of newly
925 * created streams, without their requirements having any
926 * influence on them. */
927 pa_sink_set_max_rewind_within_thread(u->sink, 0);
928 pa_sink_set_max_request_within_thread(u->sink, 0);
929
930 pa_log_info("Device suspended...");
931
932 return 0;
933 }
934
935 /* Called from IO context */
936 static int update_sw_params(struct userdata *u) {
937 snd_pcm_uframes_t avail_min;
938 int err;
939
940 pa_assert(u);
941
942 /* Use the full buffer if no one asked us for anything specific */
943 u->hwbuf_unused = 0;
944
945 if (u->use_tsched) {
946 pa_usec_t latency;
947
948 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
949 size_t b;
950
951 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
952
953 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
954
955 /* We need at least one sample in our buffer */
956
957 if (PA_UNLIKELY(b < u->frame_size))
958 b = u->frame_size;
959
960 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
961 }
962
963 fix_min_sleep_wakeup(u);
964 fix_tsched_watermark(u);
965 }
966
967 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
968
969 /* We need at last one frame in the used part of the buffer */
970 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
971
972 if (u->use_tsched) {
973 pa_usec_t sleep_usec, process_usec;
974
975 hw_sleep_time(u, &sleep_usec, &process_usec);
976 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
977 }
978
979 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
980
981 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
982 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
983 return err;
984 }
985
986 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
987 if (pa_alsa_pcm_is_hw(u->pcm_handle))
988 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
989 else {
990 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
991 pa_sink_set_max_rewind_within_thread(u->sink, 0);
992 }
993
994 return 0;
995 }
996
997 /* Called from IO Context on unsuspend or from main thread when creating sink */
998 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
999 pa_bool_t in_thread)
1000 {
1001 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
1002 &u->sink->sample_spec);
1003
1004 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1005 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1006
1007 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1008 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1009
1010 fix_min_sleep_wakeup(u);
1011 fix_tsched_watermark(u);
1012
1013 if (in_thread)
1014 pa_sink_set_latency_range_within_thread(u->sink,
1015 u->min_latency_ref,
1016 pa_bytes_to_usec(u->hwbuf_size, ss));
1017 else {
1018 pa_sink_set_latency_range(u->sink,
1019 0,
1020 pa_bytes_to_usec(u->hwbuf_size, ss));
1021
1022 /* work-around assert in pa_sink_set_latency_within_thead,
1023 keep track of min_latency and reuse it when
1024 this routine is called from IO context */
1025 u->min_latency_ref = u->sink->thread_info.min_latency;
1026 }
1027
1028 pa_log_info("Time scheduling watermark is %0.2fms",
1029 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1030 }
1031
1032 /* Called from IO context */
1033 static int unsuspend(struct userdata *u) {
1034 pa_sample_spec ss;
1035 int err;
1036 pa_bool_t b, d;
1037 snd_pcm_uframes_t period_size, buffer_size;
1038 char *device_name = NULL;
1039
1040 pa_assert(u);
1041 pa_assert(!u->pcm_handle);
1042
1043 pa_log_info("Trying resume...");
1044
1045 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1046 /* Need to open device in NONAUDIO mode */
1047 int len = strlen(u->device_name) + 8;
1048
1049 device_name = pa_xmalloc(len);
1050 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1051 }
1052
1053 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1054 SND_PCM_NONBLOCK|
1055 SND_PCM_NO_AUTO_RESAMPLE|
1056 SND_PCM_NO_AUTO_CHANNELS|
1057 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1058 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1059 goto fail;
1060 }
1061
1062 ss = u->sink->sample_spec;
1063 period_size = u->fragment_size / u->frame_size;
1064 buffer_size = u->hwbuf_size / u->frame_size;
1065 b = u->use_mmap;
1066 d = u->use_tsched;
1067
1068 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1069 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1070 goto fail;
1071 }
1072
1073 if (b != u->use_mmap || d != u->use_tsched) {
1074 pa_log_warn("Resume failed, couldn't get original access mode.");
1075 goto fail;
1076 }
1077
1078 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1079 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1080 goto fail;
1081 }
1082
1083 if (period_size*u->frame_size != u->fragment_size ||
1084 buffer_size*u->frame_size != u->hwbuf_size) {
1085 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1086 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1087 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1088 goto fail;
1089 }
1090
1091 if (update_sw_params(u) < 0)
1092 goto fail;
1093
1094 if (build_pollfd(u) < 0)
1095 goto fail;
1096
1097 u->write_count = 0;
1098 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1099 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1100 u->last_smoother_update = 0;
1101
1102 u->first = TRUE;
1103 u->since_start = 0;
1104
1105 /* reset the watermark to the value defined when sink was created */
1106 if (u->use_tsched)
1107 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1108
1109 pa_log_info("Resumed successfully...");
1110
1111 pa_xfree(device_name);
1112 return 0;
1113
1114 fail:
1115 if (u->pcm_handle) {
1116 snd_pcm_close(u->pcm_handle);
1117 u->pcm_handle = NULL;
1118 }
1119
1120 pa_xfree(device_name);
1121
1122 return -PA_ERR_IO;
1123 }
1124
1125 /* Called from IO context */
1126 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1127 struct userdata *u = PA_SINK(o)->userdata;
1128
1129 switch (code) {
1130
1131 case PA_SINK_MESSAGE_GET_LATENCY: {
1132 pa_usec_t r = 0;
1133
1134 if (u->pcm_handle)
1135 r = sink_get_latency(u);
1136
1137 *((pa_usec_t*) data) = r;
1138
1139 return 0;
1140 }
1141
1142 case PA_SINK_MESSAGE_SET_STATE:
1143
1144 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1145
1146 case PA_SINK_SUSPENDED: {
1147 int r;
1148
1149 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1150
1151 if ((r = suspend(u)) < 0)
1152 return r;
1153
1154 break;
1155 }
1156
1157 case PA_SINK_IDLE:
1158 case PA_SINK_RUNNING: {
1159 int r;
1160
1161 if (u->sink->thread_info.state == PA_SINK_INIT) {
1162 if (build_pollfd(u) < 0)
1163 return -PA_ERR_IO;
1164 }
1165
1166 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1167 if ((r = unsuspend(u)) < 0)
1168 return r;
1169 }
1170
1171 break;
1172 }
1173
1174 case PA_SINK_UNLINKED:
1175 case PA_SINK_INIT:
1176 case PA_SINK_INVALID_STATE:
1177 ;
1178 }
1179
1180 break;
1181 }
1182
1183 return pa_sink_process_msg(o, code, data, offset, chunk);
1184 }
1185
1186 /* Called from main context */
1187 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1188 pa_sink_state_t old_state;
1189 struct userdata *u;
1190
1191 pa_sink_assert_ref(s);
1192 pa_assert_se(u = s->userdata);
1193
1194 old_state = pa_sink_get_state(u->sink);
1195
1196 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1197 reserve_done(u);
1198 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1199 if (reserve_init(u, u->device_name) < 0)
1200 return -PA_ERR_BUSY;
1201
1202 return 0;
1203 }
1204
1205 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1206 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1207
1208 pa_assert(u);
1209 pa_assert(u->mixer_handle);
1210
1211 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1212 return 0;
1213
1214 if (!PA_SINK_IS_LINKED(u->sink->state))
1215 return 0;
1216
1217 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1218 pa_sink_set_mixer_dirty(u->sink, TRUE);
1219 return 0;
1220 }
1221
1222 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1223 pa_sink_get_volume(u->sink, TRUE);
1224 pa_sink_get_mute(u->sink, TRUE);
1225 }
1226
1227 return 0;
1228 }
1229
1230 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1231 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1232
1233 pa_assert(u);
1234 pa_assert(u->mixer_handle);
1235
1236 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1237 return 0;
1238
1239 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1240 pa_sink_set_mixer_dirty(u->sink, TRUE);
1241 return 0;
1242 }
1243
1244 if (mask & SND_CTL_EVENT_MASK_VALUE)
1245 pa_sink_update_volume_and_mute(u->sink);
1246
1247 return 0;
1248 }
1249
1250 static void sink_get_volume_cb(pa_sink *s) {
1251 struct userdata *u = s->userdata;
1252 pa_cvolume r;
1253 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1254
1255 pa_assert(u);
1256 pa_assert(u->mixer_path);
1257 pa_assert(u->mixer_handle);
1258
1259 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1260 return;
1261
1262 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1263 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1264
1265 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1266
1267 if (u->mixer_path->has_dB) {
1268 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1269
1270 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1271 }
1272
1273 if (pa_cvolume_equal(&u->hardware_volume, &r))
1274 return;
1275
1276 s->real_volume = u->hardware_volume = r;
1277
1278 /* Hmm, so the hardware volume changed, let's reset our software volume */
1279 if (u->mixer_path->has_dB)
1280 pa_sink_set_soft_volume(s, NULL);
1281 }
1282
1283 static void sink_set_volume_cb(pa_sink *s) {
1284 struct userdata *u = s->userdata;
1285 pa_cvolume r;
1286 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1287 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1288
1289 pa_assert(u);
1290 pa_assert(u->mixer_path);
1291 pa_assert(u->mixer_handle);
1292
1293 /* Shift up by the base volume */
1294 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1295
1296 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1297 return;
1298
1299 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1300 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1301
1302 u->hardware_volume = r;
1303
1304 if (u->mixer_path->has_dB) {
1305 pa_cvolume new_soft_volume;
1306 pa_bool_t accurate_enough;
1307 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1308
1309 /* Match exactly what the user requested by software */
1310 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1311
1312 /* If the adjustment to do in software is only minimal we
1313 * can skip it. That saves us CPU at the expense of a bit of
1314 * accuracy */
1315 accurate_enough =
1316 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1317 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1318
1319 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1320 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1321 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1322 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1323 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1324 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1325 pa_yes_no(accurate_enough));
1326 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1327
1328 if (!accurate_enough)
1329 s->soft_volume = new_soft_volume;
1330
1331 } else {
1332 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1333
1334 /* We can't match exactly what the user requested, hence let's
1335 * at least tell the user about it */
1336
1337 s->real_volume = r;
1338 }
1339 }
1340
1341 static void sink_write_volume_cb(pa_sink *s) {
1342 struct userdata *u = s->userdata;
1343 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1344
1345 pa_assert(u);
1346 pa_assert(u->mixer_path);
1347 pa_assert(u->mixer_handle);
1348 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1349
1350 /* Shift up by the base volume */
1351 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1352
1353 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1354 pa_log_error("Writing HW volume failed");
1355 else {
1356 pa_cvolume tmp_vol;
1357 pa_bool_t accurate_enough;
1358
1359 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1360 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1361
1362 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1363 accurate_enough =
1364 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1365 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1366
1367 if (!accurate_enough) {
1368 union {
1369 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1370 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1371 } vol;
1372
1373 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1374 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1375 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1376 pa_log_debug(" in dB: %s (request) != %s",
1377 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1378 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1379 }
1380 }
1381 }
1382
1383 static void sink_get_mute_cb(pa_sink *s) {
1384 struct userdata *u = s->userdata;
1385 pa_bool_t b;
1386
1387 pa_assert(u);
1388 pa_assert(u->mixer_path);
1389 pa_assert(u->mixer_handle);
1390
1391 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1392 return;
1393
1394 s->muted = b;
1395 }
1396
1397 static void sink_set_mute_cb(pa_sink *s) {
1398 struct userdata *u = s->userdata;
1399
1400 pa_assert(u);
1401 pa_assert(u->mixer_path);
1402 pa_assert(u->mixer_handle);
1403
1404 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1405 }
1406
1407 static void mixer_volume_init(struct userdata *u) {
1408 pa_assert(u);
1409
1410 if (!u->mixer_path->has_volume) {
1411 pa_sink_set_write_volume_callback(u->sink, NULL);
1412 pa_sink_set_get_volume_callback(u->sink, NULL);
1413 pa_sink_set_set_volume_callback(u->sink, NULL);
1414
1415 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1416 } else {
1417 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1418 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1419
1420 if (u->mixer_path->has_dB && u->deferred_volume) {
1421 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1422 pa_log_info("Successfully enabled deferred volume.");
1423 } else
1424 pa_sink_set_write_volume_callback(u->sink, NULL);
1425
1426 if (u->mixer_path->has_dB) {
1427 pa_sink_enable_decibel_volume(u->sink, TRUE);
1428 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1429
1430 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1431 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1432
1433 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1434 } else {
1435 pa_sink_enable_decibel_volume(u->sink, FALSE);
1436 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1437
1438 u->sink->base_volume = PA_VOLUME_NORM;
1439 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1440 }
1441
1442 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1443 }
1444
1445 if (!u->mixer_path->has_mute) {
1446 pa_sink_set_get_mute_callback(u->sink, NULL);
1447 pa_sink_set_set_mute_callback(u->sink, NULL);
1448 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1449 } else {
1450 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1451 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1452 pa_log_info("Using hardware mute control.");
1453 }
1454 }
1455
1456 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1457 struct userdata *u = s->userdata;
1458
1459 pa_assert(u);
1460 pa_assert(p);
1461 pa_assert(u->ucm_context);
1462
1463 return pa_alsa_ucm_set_port(u->ucm_context, p, TRUE);
1464 }
1465
1466 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1467 struct userdata *u = s->userdata;
1468 pa_alsa_port_data *data;
1469
1470 pa_assert(u);
1471 pa_assert(p);
1472 pa_assert(u->mixer_handle);
1473
1474 data = PA_DEVICE_PORT_DATA(p);
1475
1476 pa_assert_se(u->mixer_path = data->path);
1477 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1478
1479 mixer_volume_init(u);
1480
1481 if (s->set_mute)
1482 s->set_mute(s);
1483 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1484 if (s->write_volume)
1485 s->write_volume(s);
1486 } else {
1487 if (s->set_volume)
1488 s->set_volume(s);
1489 }
1490
1491 return 0;
1492 }
1493
1494 static void sink_update_requested_latency_cb(pa_sink *s) {
1495 struct userdata *u = s->userdata;
1496 size_t before;
1497 pa_assert(u);
1498 pa_assert(u->use_tsched); /* only when timer scheduling is used
1499 * we can dynamically adjust the
1500 * latency */
1501
1502 if (!u->pcm_handle)
1503 return;
1504
1505 before = u->hwbuf_unused;
1506 update_sw_params(u);
1507
1508 /* Let's check whether we now use only a smaller part of the
1509 buffer then before. If so, we need to make sure that subsequent
1510 rewinds are relative to the new maximum fill level and not to the
1511 current fill level. Thus, let's do a full rewind once, to clear
1512 things up. */
1513
1514 if (u->hwbuf_unused > before) {
1515 pa_log_debug("Requesting rewind due to latency change.");
1516 pa_sink_request_rewind(s, (size_t) -1);
1517 }
1518 }
1519
1520 static pa_idxset* sink_get_formats(pa_sink *s) {
1521 struct userdata *u = s->userdata;
1522 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1523 pa_format_info *f;
1524 uint32_t idx;
1525
1526 pa_assert(u);
1527
1528 PA_IDXSET_FOREACH(f, u->formats, idx) {
1529 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1530 }
1531
1532 return ret;
1533 }
1534
1535 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1536 struct userdata *u = s->userdata;
1537 pa_format_info *f, *g;
1538 uint32_t idx, n;
1539
1540 pa_assert(u);
1541
1542 /* FIXME: also validate sample rates against what the device supports */
1543 PA_IDXSET_FOREACH(f, formats, idx) {
1544 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1545 /* EAC3 cannot be sent over over S/PDIF */
1546 return FALSE;
1547 }
1548
1549 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1550 u->formats = pa_idxset_new(NULL, NULL);
1551
1552 /* Note: the logic below won't apply if we're using software encoding.
1553 * This is fine for now since we don't support that via the passthrough
1554 * framework, but this must be changed if we do. */
1555
1556 /* Count how many sample rates we support */
1557 for (idx = 0, n = 0; u->rates[idx]; idx++)
1558 n++;
1559
1560 /* First insert non-PCM formats since we prefer those. */
1561 PA_IDXSET_FOREACH(f, formats, idx) {
1562 if (!pa_format_info_is_pcm(f)) {
1563 g = pa_format_info_copy(f);
1564 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1565 pa_idxset_put(u->formats, g, NULL);
1566 }
1567 }
1568
1569 /* Now add any PCM formats */
1570 PA_IDXSET_FOREACH(f, formats, idx) {
1571 if (pa_format_info_is_pcm(f)) {
1572 /* We don't set rates here since we'll just tack on a resampler for
1573 * unsupported rates */
1574 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1575 }
1576 }
1577
1578 return TRUE;
1579 }
1580
1581 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
1582 {
1583 struct userdata *u = s->userdata;
1584 int i;
1585 pa_bool_t supported = FALSE;
1586
1587 pa_assert(u);
1588
1589 for (i = 0; u->rates[i]; i++) {
1590 if (u->rates[i] == rate) {
1591 supported = TRUE;
1592 break;
1593 }
1594 }
1595
1596 if (!supported) {
1597 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1598 return FALSE;
1599 }
1600
1601 if (!PA_SINK_IS_OPENED(s->state)) {
1602 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1603 u->sink->sample_spec.rate = rate;
1604 return TRUE;
1605 }
1606
1607 return FALSE;
1608 }
1609
1610 static int process_rewind(struct userdata *u) {
1611 snd_pcm_sframes_t unused;
1612 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1613 pa_assert(u);
1614
1615 /* Figure out how much we shall rewind and reset the counter */
1616 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1617
1618 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1619
1620 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1621 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1622 return -1;
1623 }
1624
1625 unused_nbytes = (size_t) unused * u->frame_size;
1626
1627 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1628 unused_nbytes += u->rewind_safeguard;
1629
1630 if (u->hwbuf_size > unused_nbytes)
1631 limit_nbytes = u->hwbuf_size - unused_nbytes;
1632 else
1633 limit_nbytes = 0;
1634
1635 if (rewind_nbytes > limit_nbytes)
1636 rewind_nbytes = limit_nbytes;
1637
1638 if (rewind_nbytes > 0) {
1639 snd_pcm_sframes_t in_frames, out_frames;
1640
1641 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1642
1643 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1644 pa_log_debug("before: %lu", (unsigned long) in_frames);
1645 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1646 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1647 if (try_recover(u, "process_rewind", out_frames) < 0)
1648 return -1;
1649 out_frames = 0;
1650 }
1651
1652 pa_log_debug("after: %lu", (unsigned long) out_frames);
1653
1654 rewind_nbytes = (size_t) out_frames * u->frame_size;
1655
1656 if (rewind_nbytes <= 0)
1657 pa_log_info("Tried rewind, but was apparently not possible.");
1658 else {
1659 u->write_count -= rewind_nbytes;
1660 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1661 pa_sink_process_rewind(u->sink, rewind_nbytes);
1662
1663 u->after_rewind = TRUE;
1664 return 0;
1665 }
1666 } else
1667 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1668
1669 pa_sink_process_rewind(u->sink, 0);
1670 return 0;
1671 }
1672
1673 static void thread_func(void *userdata) {
1674 struct userdata *u = userdata;
1675 unsigned short revents = 0;
1676
1677 pa_assert(u);
1678
1679 pa_log_debug("Thread starting up");
1680
1681 if (u->core->realtime_scheduling)
1682 pa_make_realtime(u->core->realtime_priority);
1683
1684 pa_thread_mq_install(&u->thread_mq);
1685
1686 for (;;) {
1687 int ret;
1688 pa_usec_t rtpoll_sleep = 0, real_sleep;
1689
1690 #ifdef DEBUG_TIMING
1691 pa_log_debug("Loop");
1692 #endif
1693
1694 /* Render some data and write it to the dsp */
1695 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1696 int work_done;
1697 pa_usec_t sleep_usec = 0;
1698 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1699
1700 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1701 if (process_rewind(u) < 0)
1702 goto fail;
1703
1704 if (u->use_mmap)
1705 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1706 else
1707 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1708
1709 if (work_done < 0)
1710 goto fail;
1711
1712 /* pa_log_debug("work_done = %i", work_done); */
1713
1714 if (work_done) {
1715
1716 if (u->first) {
1717 pa_log_info("Starting playback.");
1718 snd_pcm_start(u->pcm_handle);
1719
1720 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1721
1722 u->first = FALSE;
1723 }
1724
1725 update_smoother(u);
1726 }
1727
1728 if (u->use_tsched) {
1729 pa_usec_t cusec;
1730
1731 if (u->since_start <= u->hwbuf_size) {
1732
1733 /* USB devices on ALSA seem to hit a buffer
1734 * underrun during the first iterations much
1735 * quicker then we calculate here, probably due to
1736 * the transport latency. To accommodate for that
1737 * we artificially decrease the sleep time until
1738 * we have filled the buffer at least once
1739 * completely.*/
1740
1741 if (pa_log_ratelimit(PA_LOG_DEBUG))
1742 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1743 sleep_usec /= 2;
1744 }
1745
1746 /* OK, the playback buffer is now full, let's
1747 * calculate when to wake up next */
1748 #ifdef DEBUG_TIMING
1749 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1750 #endif
1751
1752 /* Convert from the sound card time domain to the
1753 * system time domain */
1754 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1755
1756 #ifdef DEBUG_TIMING
1757 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1758 #endif
1759
1760 /* We don't trust the conversion, so we wake up whatever comes first */
1761 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1762 }
1763
1764 u->after_rewind = FALSE;
1765
1766 }
1767
1768 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1769 pa_usec_t volume_sleep;
1770 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1771 if (volume_sleep > 0) {
1772 if (rtpoll_sleep > 0)
1773 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1774 else
1775 rtpoll_sleep = volume_sleep;
1776 }
1777 }
1778
1779 if (rtpoll_sleep > 0) {
1780 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1781 real_sleep = pa_rtclock_now();
1782 }
1783 else
1784 pa_rtpoll_set_timer_disabled(u->rtpoll);
1785
1786 /* Hmm, nothing to do. Let's sleep */
1787 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1788 goto fail;
1789
1790 if (rtpoll_sleep > 0) {
1791 real_sleep = pa_rtclock_now() - real_sleep;
1792 #ifdef DEBUG_TIMING
1793 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1794 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1795 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1796 #endif
1797 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark)
1798 pa_log_info("Scheduling delay of %0.2fms, you might want to investigate this to improve latency...",
1799 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC);
1800 }
1801
1802 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1803 pa_sink_volume_change_apply(u->sink, NULL);
1804
1805 if (ret == 0)
1806 goto finish;
1807
1808 /* Tell ALSA about this and process its response */
1809 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1810 struct pollfd *pollfd;
1811 int err;
1812 unsigned n;
1813
1814 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1815
1816 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1817 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1818 goto fail;
1819 }
1820
1821 if (revents & ~POLLOUT) {
1822 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1823 goto fail;
1824
1825 u->first = TRUE;
1826 u->since_start = 0;
1827 revents = 0;
1828 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1829 pa_log_debug("Wakeup from ALSA!");
1830
1831 } else
1832 revents = 0;
1833 }
1834
1835 fail:
1836 /* If this was no regular exit from the loop we have to continue
1837 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1838 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1839 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1840
1841 finish:
1842 pa_log_debug("Thread shutting down");
1843 }
1844
1845 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1846 const char *n;
1847 char *t;
1848
1849 pa_assert(data);
1850 pa_assert(ma);
1851 pa_assert(device_name);
1852
1853 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1854 pa_sink_new_data_set_name(data, n);
1855 data->namereg_fail = TRUE;
1856 return;
1857 }
1858
1859 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1860 data->namereg_fail = TRUE;
1861 else {
1862 n = device_id ? device_id : device_name;
1863 data->namereg_fail = FALSE;
1864 }
1865
1866 if (mapping)
1867 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1868 else
1869 t = pa_sprintf_malloc("alsa_output.%s", n);
1870
1871 pa_sink_new_data_set_name(data, t);
1872 pa_xfree(t);
1873 }
1874
1875 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1876 snd_hctl_t *hctl;
1877
1878 if (!mapping && !element)
1879 return;
1880
1881 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1882 pa_log_info("Failed to find a working mixer device.");
1883 return;
1884 }
1885
1886 if (element) {
1887
1888 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1889 goto fail;
1890
1891 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1892 goto fail;
1893
1894 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1895 pa_alsa_path_dump(u->mixer_path);
1896 } else if (!(u->mixer_path_set = mapping->output_path_set))
1897 goto fail;
1898
1899 return;
1900
1901 fail:
1902
1903 if (u->mixer_path) {
1904 pa_alsa_path_free(u->mixer_path);
1905 u->mixer_path = NULL;
1906 }
1907
1908 if (u->mixer_handle) {
1909 snd_mixer_close(u->mixer_handle);
1910 u->mixer_handle = NULL;
1911 }
1912 }
1913
1914 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1915 pa_bool_t need_mixer_callback = FALSE;
1916
1917 pa_assert(u);
1918
1919 if (!u->mixer_handle)
1920 return 0;
1921
1922 if (u->sink->active_port) {
1923 pa_alsa_port_data *data;
1924
1925 /* We have a list of supported paths, so let's activate the
1926 * one that has been chosen as active */
1927
1928 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1929 u->mixer_path = data->path;
1930
1931 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
1932
1933 } else {
1934
1935 if (!u->mixer_path && u->mixer_path_set)
1936 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1937
1938 if (u->mixer_path) {
1939 /* Hmm, we have only a single path, then let's activate it */
1940
1941 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
1942
1943 } else
1944 return 0;
1945 }
1946
1947 mixer_volume_init(u);
1948
1949 /* Will we need to register callbacks? */
1950 if (u->mixer_path_set && u->mixer_path_set->paths) {
1951 pa_alsa_path *p;
1952 void *state;
1953
1954 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1955 if (p->has_volume || p->has_mute)
1956 need_mixer_callback = TRUE;
1957 }
1958 }
1959 else if (u->mixer_path)
1960 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1961
1962 if (need_mixer_callback) {
1963 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1964 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1965 u->mixer_pd = pa_alsa_mixer_pdata_new();
1966 mixer_callback = io_mixer_callback;
1967
1968 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1969 pa_log("Failed to initialize file descriptor monitoring");
1970 return -1;
1971 }
1972 } else {
1973 u->mixer_fdl = pa_alsa_fdlist_new();
1974 mixer_callback = ctl_mixer_callback;
1975
1976 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1977 pa_log("Failed to initialize file descriptor monitoring");
1978 return -1;
1979 }
1980 }
1981
1982 if (u->mixer_path_set)
1983 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1984 else
1985 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1986 }
1987
1988 return 0;
1989 }
1990
1991 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1992
1993 struct userdata *u = NULL;
1994 const char *dev_id = NULL, *key, *mod_name;
1995 pa_sample_spec ss;
1996 uint32_t alternate_sample_rate;
1997 pa_channel_map map;
1998 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1999 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2000 size_t frame_size;
2001 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE, fixed_latency_range = FALSE;
2002 pa_sink_new_data data;
2003 pa_alsa_profile_set *profile_set = NULL;
2004 void *state = NULL;
2005
2006 pa_assert(m);
2007 pa_assert(ma);
2008
2009 ss = m->core->default_sample_spec;
2010 map = m->core->default_channel_map;
2011 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2012 pa_log("Failed to parse sample specification and channel map");
2013 goto fail;
2014 }
2015
2016 alternate_sample_rate = m->core->alternate_sample_rate;
2017 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2018 pa_log("Failed to parse alternate sample rate");
2019 goto fail;
2020 }
2021
2022 frame_size = pa_frame_size(&ss);
2023
2024 nfrags = m->core->default_n_fragments;
2025 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2026 if (frag_size <= 0)
2027 frag_size = (uint32_t) frame_size;
2028 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2029 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2030
2031 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2032 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2033 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2034 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2035 pa_log("Failed to parse buffer metrics");
2036 goto fail;
2037 }
2038
2039 buffer_size = nfrags * frag_size;
2040
2041 period_frames = frag_size/frame_size;
2042 buffer_frames = buffer_size/frame_size;
2043 tsched_frames = tsched_size/frame_size;
2044
2045 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2046 pa_log("Failed to parse mmap argument.");
2047 goto fail;
2048 }
2049
2050 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2051 pa_log("Failed to parse tsched argument.");
2052 goto fail;
2053 }
2054
2055 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2056 pa_log("Failed to parse ignore_dB argument.");
2057 goto fail;
2058 }
2059
2060 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2061 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2062 pa_log("Failed to parse rewind_safeguard argument");
2063 goto fail;
2064 }
2065
2066 deferred_volume = m->core->deferred_volume;
2067 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2068 pa_log("Failed to parse deferred_volume argument.");
2069 goto fail;
2070 }
2071
2072 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2073 pa_log("Failed to parse fixed_latency_range argument.");
2074 goto fail;
2075 }
2076
2077 use_tsched = pa_alsa_may_tsched(use_tsched);
2078
2079 u = pa_xnew0(struct userdata, 1);
2080 u->core = m->core;
2081 u->module = m;
2082 u->use_mmap = use_mmap;
2083 u->use_tsched = use_tsched;
2084 u->deferred_volume = deferred_volume;
2085 u->fixed_latency_range = fixed_latency_range;
2086 u->first = TRUE;
2087 u->rewind_safeguard = rewind_safeguard;
2088 u->rtpoll = pa_rtpoll_new();
2089 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2090
2091 u->smoother = pa_smoother_new(
2092 SMOOTHER_ADJUST_USEC,
2093 SMOOTHER_WINDOW_USEC,
2094 TRUE,
2095 TRUE,
2096 5,
2097 pa_rtclock_now(),
2098 TRUE);
2099 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2100
2101 /* use ucm */
2102 if (mapping && mapping->ucm_context.ucm)
2103 u->ucm_context = &mapping->ucm_context;
2104
2105 dev_id = pa_modargs_get_value(
2106 ma, "device_id",
2107 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2108
2109 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2110
2111 if (reserve_init(u, dev_id) < 0)
2112 goto fail;
2113
2114 if (reserve_monitor_init(u, dev_id) < 0)
2115 goto fail;
2116
2117 b = use_mmap;
2118 d = use_tsched;
2119
2120 if (mapping) {
2121
2122 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2123 pa_log("device_id= not set");
2124 goto fail;
2125 }
2126
2127 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2128 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2129 pa_log("Failed to enable ucm modifier %s", mod_name);
2130 else
2131 pa_log_debug("Enabled ucm modifier %s", mod_name);
2132 }
2133
2134 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2135 dev_id,
2136 &u->device_name,
2137 &ss, &map,
2138 SND_PCM_STREAM_PLAYBACK,
2139 &period_frames, &buffer_frames, tsched_frames,
2140 &b, &d, mapping)))
2141 goto fail;
2142
2143 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2144
2145 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2146 goto fail;
2147
2148 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2149 dev_id,
2150 &u->device_name,
2151 &ss, &map,
2152 SND_PCM_STREAM_PLAYBACK,
2153 &period_frames, &buffer_frames, tsched_frames,
2154 &b, &d, profile_set, &mapping)))
2155 goto fail;
2156
2157 } else {
2158
2159 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2160 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2161 &u->device_name,
2162 &ss, &map,
2163 SND_PCM_STREAM_PLAYBACK,
2164 &period_frames, &buffer_frames, tsched_frames,
2165 &b, &d, FALSE)))
2166 goto fail;
2167 }
2168
2169 pa_assert(u->device_name);
2170 pa_log_info("Successfully opened device %s.", u->device_name);
2171
2172 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2173 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2174 goto fail;
2175 }
2176
2177 if (mapping)
2178 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2179
2180 if (use_mmap && !b) {
2181 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2182 u->use_mmap = use_mmap = FALSE;
2183 }
2184
2185 if (use_tsched && (!b || !d)) {
2186 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2187 u->use_tsched = use_tsched = FALSE;
2188 }
2189
2190 if (u->use_mmap)
2191 pa_log_info("Successfully enabled mmap() mode.");
2192
2193 if (u->use_tsched) {
2194 pa_log_info("Successfully enabled timer-based scheduling mode.");
2195
2196 if (u->fixed_latency_range)
2197 pa_log_info("Disabling latency range changes on underrun");
2198 }
2199
2200 if (is_iec958(u) || is_hdmi(u))
2201 set_formats = TRUE;
2202
2203 u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
2204 if (!u->rates) {
2205 pa_log_error("Failed to find any supported sample rates.");
2206 goto fail;
2207 }
2208
2209 /* ALSA might tweak the sample spec, so recalculate the frame size */
2210 frame_size = pa_frame_size(&ss);
2211
2212 if (!u->ucm_context)
2213 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2214
2215 pa_sink_new_data_init(&data);
2216 data.driver = driver;
2217 data.module = m;
2218 data.card = card;
2219 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2220
2221 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2222 * variable instead of using &data.namereg_fail directly, because
2223 * data.namereg_fail is a bitfield and taking the address of a bitfield
2224 * variable is impossible. */
2225 namereg_fail = data.namereg_fail;
2226 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2227 pa_log("Failed to parse namereg_fail argument.");
2228 pa_sink_new_data_done(&data);
2229 goto fail;
2230 }
2231 data.namereg_fail = namereg_fail;
2232
2233 pa_sink_new_data_set_sample_spec(&data, &ss);
2234 pa_sink_new_data_set_channel_map(&data, &map);
2235 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2236
2237 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2238 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2239 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2240 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2241 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2242
2243 if (mapping) {
2244 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2245 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2246
2247 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2248 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2249 }
2250
2251 pa_alsa_init_description(data.proplist);
2252
2253 if (u->control_device)
2254 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2255
2256 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2257 pa_log("Invalid properties");
2258 pa_sink_new_data_done(&data);
2259 goto fail;
2260 }
2261
2262 if (u->ucm_context)
2263 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, TRUE, card);
2264 else if (u->mixer_path_set)
2265 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2266
2267 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2268 (set_formats ? PA_SINK_SET_FORMATS : 0));
2269 pa_sink_new_data_done(&data);
2270
2271 if (!u->sink) {
2272 pa_log("Failed to create sink object");
2273 goto fail;
2274 }
2275
2276 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2277 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2278 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2279 goto fail;
2280 }
2281
2282 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2283 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2284 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2285 goto fail;
2286 }
2287
2288 u->sink->parent.process_msg = sink_process_msg;
2289 if (u->use_tsched)
2290 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2291 u->sink->set_state = sink_set_state_cb;
2292 if (u->ucm_context)
2293 u->sink->set_port = sink_set_port_ucm_cb;
2294 else
2295 u->sink->set_port = sink_set_port_cb;
2296 if (u->sink->alternate_sample_rate)
2297 u->sink->update_rate = sink_update_rate_cb;
2298 u->sink->userdata = u;
2299
2300 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2301 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2302
2303 u->frame_size = frame_size;
2304 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2305 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2306 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2307
2308 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2309 (double) u->hwbuf_size / (double) u->fragment_size,
2310 (long unsigned) u->fragment_size,
2311 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2312 (long unsigned) u->hwbuf_size,
2313 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2314
2315 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2316 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2317 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2318 else {
2319 pa_log_info("Disabling rewind for device %s", u->device_name);
2320 pa_sink_set_max_rewind(u->sink, 0);
2321 }
2322
2323 if (u->use_tsched) {
2324 u->tsched_watermark_ref = tsched_watermark;
2325 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2326 } else
2327 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2328
2329 reserve_update(u);
2330
2331 if (update_sw_params(u) < 0)
2332 goto fail;
2333
2334 if (u->ucm_context) {
2335 if (u->sink->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port, TRUE) < 0)
2336 goto fail;
2337 } else if (setup_mixer(u, ignore_dB) < 0)
2338 goto fail;
2339
2340 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2341
2342 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2343 pa_log("Failed to create thread.");
2344 goto fail;
2345 }
2346
2347 /* Get initial mixer settings */
2348 if (data.volume_is_set) {
2349 if (u->sink->set_volume)
2350 u->sink->set_volume(u->sink);
2351 } else {
2352 if (u->sink->get_volume)
2353 u->sink->get_volume(u->sink);
2354 }
2355
2356 if (data.muted_is_set) {
2357 if (u->sink->set_mute)
2358 u->sink->set_mute(u->sink);
2359 } else {
2360 if (u->sink->get_mute)
2361 u->sink->get_mute(u->sink);
2362 }
2363
2364 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2365 u->sink->write_volume(u->sink);
2366
2367 if (set_formats) {
2368 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2369 pa_format_info *format;
2370
2371 /* To start with, we only support PCM formats. Other formats may be added
2372 * with pa_sink_set_formats().*/
2373 format = pa_format_info_new();
2374 format->encoding = PA_ENCODING_PCM;
2375 u->formats = pa_idxset_new(NULL, NULL);
2376 pa_idxset_put(u->formats, format, NULL);
2377
2378 u->sink->get_formats = sink_get_formats;
2379 u->sink->set_formats = sink_set_formats;
2380 }
2381
2382 pa_sink_put(u->sink);
2383
2384 if (profile_set)
2385 pa_alsa_profile_set_free(profile_set);
2386
2387 return u->sink;
2388
2389 fail:
2390
2391 if (u)
2392 userdata_free(u);
2393
2394 if (profile_set)
2395 pa_alsa_profile_set_free(profile_set);
2396
2397 return NULL;
2398 }
2399
2400 static void userdata_free(struct userdata *u) {
2401 pa_assert(u);
2402
2403 if (u->sink)
2404 pa_sink_unlink(u->sink);
2405
2406 if (u->thread) {
2407 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2408 pa_thread_free(u->thread);
2409 }
2410
2411 pa_thread_mq_done(&u->thread_mq);
2412
2413 if (u->sink)
2414 pa_sink_unref(u->sink);
2415
2416 if (u->memchunk.memblock)
2417 pa_memblock_unref(u->memchunk.memblock);
2418
2419 if (u->mixer_pd)
2420 pa_alsa_mixer_pdata_free(u->mixer_pd);
2421
2422 if (u->alsa_rtpoll_item)
2423 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2424
2425 if (u->rtpoll)
2426 pa_rtpoll_free(u->rtpoll);
2427
2428 if (u->pcm_handle) {
2429 snd_pcm_drop(u->pcm_handle);
2430 snd_pcm_close(u->pcm_handle);
2431 }
2432
2433 if (u->mixer_fdl)
2434 pa_alsa_fdlist_free(u->mixer_fdl);
2435
2436 if (u->mixer_path && !u->mixer_path_set)
2437 pa_alsa_path_free(u->mixer_path);
2438
2439 if (u->mixer_handle)
2440 snd_mixer_close(u->mixer_handle);
2441
2442 if (u->smoother)
2443 pa_smoother_free(u->smoother);
2444
2445 if (u->formats)
2446 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2447
2448 if (u->rates)
2449 pa_xfree(u->rates);
2450
2451 reserve_done(u);
2452 monitor_done(u);
2453
2454 pa_xfree(u->device_name);
2455 pa_xfree(u->control_device);
2456 pa_xfree(u->paths_dir);
2457 pa_xfree(u);
2458 }
2459
2460 void pa_alsa_sink_free(pa_sink *s) {
2461 struct userdata *u;
2462
2463 pa_sink_assert_ref(s);
2464 pa_assert_se(u = s->userdata);
2465
2466 userdata_free(u);
2467 }