]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: Fix crash when loading bare ALSA sink/source
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
34 #endif
35
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
41
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
57
58 #include <modules/reserve-wrap.h>
59
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
62
63 /* #define DEBUG_TIMING */
64
65 #define DEFAULT_DEVICE "default"
66
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
78
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
92
93 struct userdata {
94 pa_core *core;
95 pa_module *module;
96 pa_sink *sink;
97
98 pa_thread *thread;
99 pa_thread_mq thread_mq;
100 pa_rtpoll *rtpoll;
101
102 snd_pcm_t *pcm_handle;
103
104 char *paths_dir;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
110
111 pa_cvolume hardware_volume;
112
113 unsigned int *rates;
114
115 size_t
116 frame_size,
117 fragment_size,
118 hwbuf_size,
119 tsched_watermark,
120 tsched_watermark_ref,
121 hwbuf_unused,
122 min_sleep,
123 min_wakeup,
124 watermark_inc_step,
125 watermark_dec_step,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
128 rewind_safeguard;
129
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
132 pa_usec_t tsched_watermark_usec;
133
134 pa_memchunk memchunk;
135
136 char *device_name; /* name of the PCM device */
137 char *control_device; /* name of the control device */
138
139 bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
140
141 bool first, after_rewind;
142
143 pa_rtpoll_item *alsa_rtpoll_item;
144
145 pa_smoother *smoother;
146 uint64_t write_count;
147 uint64_t since_start;
148 pa_usec_t smoother_interval;
149 pa_usec_t last_smoother_update;
150
151 pa_idxset *formats;
152
153 pa_reserve_wrapper *reserve;
154 pa_hook_slot *reserve_slot;
155 pa_reserve_monitor_wrapper *monitor;
156 pa_hook_slot *monitor_slot;
157
158 /* ucm context */
159 pa_alsa_ucm_mapping_context *ucm_context;
160 };
161
162 static void userdata_free(struct userdata *u);
163
164 /* FIXME: Is there a better way to do this than device names? */
165 static bool is_iec958(struct userdata *u) {
166 return (strncmp("iec958", u->device_name, 6) == 0);
167 }
168
169 static bool is_hdmi(struct userdata *u) {
170 return (strncmp("hdmi", u->device_name, 4) == 0);
171 }
172
173 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
174 pa_assert(r);
175 pa_assert(u);
176
177 pa_log_debug("Suspending sink %s, because another application requested us to release the device.", u->sink->name);
178
179 if (pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION) < 0)
180 return PA_HOOK_CANCEL;
181
182 return PA_HOOK_OK;
183 }
184
185 static void reserve_done(struct userdata *u) {
186 pa_assert(u);
187
188 if (u->reserve_slot) {
189 pa_hook_slot_free(u->reserve_slot);
190 u->reserve_slot = NULL;
191 }
192
193 if (u->reserve) {
194 pa_reserve_wrapper_unref(u->reserve);
195 u->reserve = NULL;
196 }
197 }
198
199 static void reserve_update(struct userdata *u) {
200 const char *description;
201 pa_assert(u);
202
203 if (!u->sink || !u->reserve)
204 return;
205
206 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
207 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
208 }
209
210 static int reserve_init(struct userdata *u, const char *dname) {
211 char *rname;
212
213 pa_assert(u);
214 pa_assert(dname);
215
216 if (u->reserve)
217 return 0;
218
219 if (pa_in_system_mode())
220 return 0;
221
222 if (!(rname = pa_alsa_get_reserve_name(dname)))
223 return 0;
224
225 /* We are resuming, try to lock the device */
226 u->reserve = pa_reserve_wrapper_get(u->core, rname);
227 pa_xfree(rname);
228
229 if (!(u->reserve))
230 return -1;
231
232 reserve_update(u);
233
234 pa_assert(!u->reserve_slot);
235 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
236
237 return 0;
238 }
239
240 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
241 pa_assert(w);
242 pa_assert(u);
243
244 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
245 pa_log_debug("Suspending sink %s, because another application is blocking the access to the device.", u->sink->name);
246 pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION);
247 } else {
248 pa_log_debug("Resuming sink %s, because other applications aren't blocking access to the device any more.", u->sink->name);
249 pa_sink_suspend(u->sink, false, PA_SUSPEND_APPLICATION);
250 }
251
252 return PA_HOOK_OK;
253 }
254
255 static void monitor_done(struct userdata *u) {
256 pa_assert(u);
257
258 if (u->monitor_slot) {
259 pa_hook_slot_free(u->monitor_slot);
260 u->monitor_slot = NULL;
261 }
262
263 if (u->monitor) {
264 pa_reserve_monitor_wrapper_unref(u->monitor);
265 u->monitor = NULL;
266 }
267 }
268
269 static int reserve_monitor_init(struct userdata *u, const char *dname) {
270 char *rname;
271
272 pa_assert(u);
273 pa_assert(dname);
274
275 if (pa_in_system_mode())
276 return 0;
277
278 if (!(rname = pa_alsa_get_reserve_name(dname)))
279 return 0;
280
281 /* We are resuming, try to lock the device */
282 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
283 pa_xfree(rname);
284
285 if (!(u->monitor))
286 return -1;
287
288 pa_assert(!u->monitor_slot);
289 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
290
291 return 0;
292 }
293
294 static void fix_min_sleep_wakeup(struct userdata *u) {
295 size_t max_use, max_use_2;
296
297 pa_assert(u);
298 pa_assert(u->use_tsched);
299
300 max_use = u->hwbuf_size - u->hwbuf_unused;
301 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
302
303 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
304 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
305
306 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
307 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
308 }
309
310 static void fix_tsched_watermark(struct userdata *u) {
311 size_t max_use;
312 pa_assert(u);
313 pa_assert(u->use_tsched);
314
315 max_use = u->hwbuf_size - u->hwbuf_unused;
316
317 if (u->tsched_watermark > max_use - u->min_sleep)
318 u->tsched_watermark = max_use - u->min_sleep;
319
320 if (u->tsched_watermark < u->min_wakeup)
321 u->tsched_watermark = u->min_wakeup;
322
323 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
324 }
325
326 static void increase_watermark(struct userdata *u) {
327 size_t old_watermark;
328 pa_usec_t old_min_latency, new_min_latency;
329
330 pa_assert(u);
331 pa_assert(u->use_tsched);
332
333 /* First, just try to increase the watermark */
334 old_watermark = u->tsched_watermark;
335 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
336 fix_tsched_watermark(u);
337
338 if (old_watermark != u->tsched_watermark) {
339 pa_log_info("Increasing wakeup watermark to %0.2f ms",
340 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
341 return;
342 }
343
344 /* Hmm, we cannot increase the watermark any further, hence let's
345 raise the latency, unless doing so was disabled in
346 configuration */
347 if (u->fixed_latency_range)
348 return;
349
350 old_min_latency = u->sink->thread_info.min_latency;
351 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
352 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
353
354 if (old_min_latency != new_min_latency) {
355 pa_log_info("Increasing minimal latency to %0.2f ms",
356 (double) new_min_latency / PA_USEC_PER_MSEC);
357
358 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
359 }
360
361 /* When we reach this we're officialy fucked! */
362 }
363
364 static void decrease_watermark(struct userdata *u) {
365 size_t old_watermark;
366 pa_usec_t now;
367
368 pa_assert(u);
369 pa_assert(u->use_tsched);
370
371 now = pa_rtclock_now();
372
373 if (u->watermark_dec_not_before <= 0)
374 goto restart;
375
376 if (u->watermark_dec_not_before > now)
377 return;
378
379 old_watermark = u->tsched_watermark;
380
381 if (u->tsched_watermark < u->watermark_dec_step)
382 u->tsched_watermark = u->tsched_watermark / 2;
383 else
384 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
385
386 fix_tsched_watermark(u);
387
388 if (old_watermark != u->tsched_watermark)
389 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
390 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
391
392 /* We don't change the latency range*/
393
394 restart:
395 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
396 }
397
398 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
399 pa_usec_t usec, wm;
400
401 pa_assert(sleep_usec);
402 pa_assert(process_usec);
403
404 pa_assert(u);
405 pa_assert(u->use_tsched);
406
407 usec = pa_sink_get_requested_latency_within_thread(u->sink);
408
409 if (usec == (pa_usec_t) -1)
410 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
411
412 wm = u->tsched_watermark_usec;
413
414 if (wm > usec)
415 wm = usec/2;
416
417 *sleep_usec = usec - wm;
418 *process_usec = wm;
419
420 #ifdef DEBUG_TIMING
421 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
422 (unsigned long) (usec / PA_USEC_PER_MSEC),
423 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
424 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
425 #endif
426 }
427
428 static int try_recover(struct userdata *u, const char *call, int err) {
429 pa_assert(u);
430 pa_assert(call);
431 pa_assert(err < 0);
432
433 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
434
435 pa_assert(err != -EAGAIN);
436
437 if (err == -EPIPE)
438 pa_log_debug("%s: Buffer underrun!", call);
439
440 if (err == -ESTRPIPE)
441 pa_log_debug("%s: System suspended!", call);
442
443 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
444 pa_log("%s: %s", call, pa_alsa_strerror(err));
445 return -1;
446 }
447
448 u->first = true;
449 u->since_start = 0;
450 return 0;
451 }
452
453 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, bool on_timeout) {
454 size_t left_to_play;
455 bool underrun = false;
456
457 /* We use <= instead of < for this check here because an underrun
458 * only happens after the last sample was processed, not already when
459 * it is removed from the buffer. This is particularly important
460 * when block transfer is used. */
461
462 if (n_bytes <= u->hwbuf_size)
463 left_to_play = u->hwbuf_size - n_bytes;
464 else {
465
466 /* We got a dropout. What a mess! */
467 left_to_play = 0;
468 underrun = true;
469
470 #if 0
471 PA_DEBUG_TRAP;
472 #endif
473
474 if (!u->first && !u->after_rewind)
475 if (pa_log_ratelimit(PA_LOG_INFO))
476 pa_log_info("Underrun!");
477 }
478
479 #ifdef DEBUG_TIMING
480 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
481 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
482 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
483 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
484 #endif
485
486 if (u->use_tsched) {
487 bool reset_not_before = true;
488
489 if (!u->first && !u->after_rewind) {
490 if (underrun || left_to_play < u->watermark_inc_threshold)
491 increase_watermark(u);
492 else if (left_to_play > u->watermark_dec_threshold) {
493 reset_not_before = false;
494
495 /* We decrease the watermark only if have actually
496 * been woken up by a timeout. If something else woke
497 * us up it's too easy to fulfill the deadlines... */
498
499 if (on_timeout)
500 decrease_watermark(u);
501 }
502 }
503
504 if (reset_not_before)
505 u->watermark_dec_not_before = 0;
506 }
507
508 return left_to_play;
509 }
510
511 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
512 bool work_done = false;
513 pa_usec_t max_sleep_usec = 0, process_usec = 0;
514 size_t left_to_play, input_underrun;
515 unsigned j = 0;
516
517 pa_assert(u);
518 pa_sink_assert_ref(u->sink);
519
520 if (u->use_tsched)
521 hw_sleep_time(u, &max_sleep_usec, &process_usec);
522
523 for (;;) {
524 snd_pcm_sframes_t n;
525 size_t n_bytes;
526 int r;
527 bool after_avail = true;
528
529 /* First we determine how many samples are missing to fill the
530 * buffer up to 100% */
531
532 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
533
534 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
535 continue;
536
537 return r;
538 }
539
540 n_bytes = (size_t) n * u->frame_size;
541
542 #ifdef DEBUG_TIMING
543 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
544 #endif
545
546 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
547 on_timeout = false;
548
549 if (u->use_tsched)
550
551 /* We won't fill up the playback buffer before at least
552 * half the sleep time is over because otherwise we might
553 * ask for more data from the clients then they expect. We
554 * need to guarantee that clients only have to keep around
555 * a single hw buffer length. */
556
557 if (!polled &&
558 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
559 #ifdef DEBUG_TIMING
560 pa_log_debug("Not filling up, because too early.");
561 #endif
562 break;
563 }
564
565 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
566
567 if (polled)
568 PA_ONCE_BEGIN {
569 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
570 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
571 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
572 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
573 pa_strnull(dn));
574 pa_xfree(dn);
575 } PA_ONCE_END;
576
577 #ifdef DEBUG_TIMING
578 pa_log_debug("Not filling up, because not necessary.");
579 #endif
580 break;
581 }
582
583 if (++j > 10) {
584 #ifdef DEBUG_TIMING
585 pa_log_debug("Not filling up, because already too many iterations.");
586 #endif
587
588 break;
589 }
590
591 n_bytes -= u->hwbuf_unused;
592 polled = false;
593
594 #ifdef DEBUG_TIMING
595 pa_log_debug("Filling up");
596 #endif
597
598 for (;;) {
599 pa_memchunk chunk;
600 void *p;
601 int err;
602 const snd_pcm_channel_area_t *areas;
603 snd_pcm_uframes_t offset, frames;
604 snd_pcm_sframes_t sframes;
605 size_t written;
606
607 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
608 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
609
610 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
611
612 if (!after_avail && err == -EAGAIN)
613 break;
614
615 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
616 continue;
617
618 return r;
619 }
620
621 /* Make sure that if these memblocks need to be copied they will fit into one slot */
622 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
623 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
624
625 if (!after_avail && frames == 0)
626 break;
627
628 pa_assert(frames > 0);
629 after_avail = false;
630
631 /* Check these are multiples of 8 bit */
632 pa_assert((areas[0].first & 7) == 0);
633 pa_assert((areas[0].step & 7)== 0);
634
635 /* We assume a single interleaved memory buffer */
636 pa_assert((areas[0].first >> 3) == 0);
637 pa_assert((areas[0].step >> 3) == u->frame_size);
638
639 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
640
641 written = frames * u->frame_size;
642 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, written, true);
643 chunk.length = pa_memblock_get_length(chunk.memblock);
644 chunk.index = 0;
645
646 pa_sink_render_into_full(u->sink, &chunk);
647 pa_memblock_unref_fixed(chunk.memblock);
648
649 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
650
651 if (!after_avail && (int) sframes == -EAGAIN)
652 break;
653
654 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
655 continue;
656
657 return r;
658 }
659
660 work_done = true;
661
662 u->write_count += written;
663 u->since_start += written;
664
665 #ifdef DEBUG_TIMING
666 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) written, (unsigned long) n_bytes);
667 #endif
668
669 if (written >= n_bytes)
670 break;
671
672 n_bytes -= written;
673 }
674 }
675
676 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
677
678 if (u->use_tsched) {
679 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
680
681 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
682 process_usec = u->tsched_watermark_usec;
683
684 if (*sleep_usec > process_usec)
685 *sleep_usec -= process_usec;
686 else
687 *sleep_usec = 0;
688
689 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
690 } else
691 *sleep_usec = 0;
692
693 return work_done ? 1 : 0;
694 }
695
696 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
697 bool work_done = false;
698 pa_usec_t max_sleep_usec = 0, process_usec = 0;
699 size_t left_to_play, input_underrun;
700 unsigned j = 0;
701
702 pa_assert(u);
703 pa_sink_assert_ref(u->sink);
704
705 if (u->use_tsched)
706 hw_sleep_time(u, &max_sleep_usec, &process_usec);
707
708 for (;;) {
709 snd_pcm_sframes_t n;
710 size_t n_bytes;
711 int r;
712 bool after_avail = true;
713
714 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
715
716 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
717 continue;
718
719 return r;
720 }
721
722 n_bytes = (size_t) n * u->frame_size;
723
724 #ifdef DEBUG_TIMING
725 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
726 #endif
727
728 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
729 on_timeout = false;
730
731 if (u->use_tsched)
732
733 /* We won't fill up the playback buffer before at least
734 * half the sleep time is over because otherwise we might
735 * ask for more data from the clients then they expect. We
736 * need to guarantee that clients only have to keep around
737 * a single hw buffer length. */
738
739 if (!polled &&
740 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
741 break;
742
743 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
744
745 if (polled)
746 PA_ONCE_BEGIN {
747 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
748 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
749 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
750 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
751 pa_strnull(dn));
752 pa_xfree(dn);
753 } PA_ONCE_END;
754
755 break;
756 }
757
758 if (++j > 10) {
759 #ifdef DEBUG_TIMING
760 pa_log_debug("Not filling up, because already too many iterations.");
761 #endif
762
763 break;
764 }
765
766 n_bytes -= u->hwbuf_unused;
767 polled = false;
768
769 for (;;) {
770 snd_pcm_sframes_t frames;
771 void *p;
772 size_t written;
773
774 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
775
776 if (u->memchunk.length <= 0)
777 pa_sink_render(u->sink, n_bytes, &u->memchunk);
778
779 pa_assert(u->memchunk.length > 0);
780
781 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
782
783 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
784 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
785
786 p = pa_memblock_acquire(u->memchunk.memblock);
787 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
788 pa_memblock_release(u->memchunk.memblock);
789
790 if (PA_UNLIKELY(frames < 0)) {
791
792 if (!after_avail && (int) frames == -EAGAIN)
793 break;
794
795 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
796 continue;
797
798 return r;
799 }
800
801 if (!after_avail && frames == 0)
802 break;
803
804 pa_assert(frames > 0);
805 after_avail = false;
806
807 written = frames * u->frame_size;
808 u->memchunk.index += written;
809 u->memchunk.length -= written;
810
811 if (u->memchunk.length <= 0) {
812 pa_memblock_unref(u->memchunk.memblock);
813 pa_memchunk_reset(&u->memchunk);
814 }
815
816 work_done = true;
817
818 u->write_count += written;
819 u->since_start += written;
820
821 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
822
823 if (written >= n_bytes)
824 break;
825
826 n_bytes -= written;
827 }
828 }
829
830 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
831
832 if (u->use_tsched) {
833 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
834
835 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
836 process_usec = u->tsched_watermark_usec;
837
838 if (*sleep_usec > process_usec)
839 *sleep_usec -= process_usec;
840 else
841 *sleep_usec = 0;
842
843 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
844 } else
845 *sleep_usec = 0;
846
847 return work_done ? 1 : 0;
848 }
849
850 static void update_smoother(struct userdata *u) {
851 snd_pcm_sframes_t delay = 0;
852 int64_t position;
853 int err;
854 pa_usec_t now1 = 0, now2;
855 snd_pcm_status_t *status;
856 snd_htimestamp_t htstamp = { 0, 0 };
857
858 snd_pcm_status_alloca(&status);
859
860 pa_assert(u);
861 pa_assert(u->pcm_handle);
862
863 /* Let's update the time smoother */
864
865 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->sink->sample_spec, false)) < 0)) {
866 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
867 return;
868 }
869
870 snd_pcm_status_get_htstamp(status, &htstamp);
871 now1 = pa_timespec_load(&htstamp);
872
873 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
874 if (now1 <= 0)
875 now1 = pa_rtclock_now();
876
877 /* check if the time since the last update is bigger than the interval */
878 if (u->last_smoother_update > 0)
879 if (u->last_smoother_update + u->smoother_interval > now1)
880 return;
881
882 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
883
884 if (PA_UNLIKELY(position < 0))
885 position = 0;
886
887 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
888
889 pa_smoother_put(u->smoother, now1, now2);
890
891 u->last_smoother_update = now1;
892 /* exponentially increase the update interval up to the MAX limit */
893 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
894 }
895
896 static pa_usec_t sink_get_latency(struct userdata *u) {
897 pa_usec_t r;
898 int64_t delay;
899 pa_usec_t now1, now2;
900
901 pa_assert(u);
902
903 now1 = pa_rtclock_now();
904 now2 = pa_smoother_get(u->smoother, now1);
905
906 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
907
908 r = delay >= 0 ? (pa_usec_t) delay : 0;
909
910 if (u->memchunk.memblock)
911 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
912
913 return r;
914 }
915
916 static int build_pollfd(struct userdata *u) {
917 pa_assert(u);
918 pa_assert(u->pcm_handle);
919
920 if (u->alsa_rtpoll_item)
921 pa_rtpoll_item_free(u->alsa_rtpoll_item);
922
923 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
924 return -1;
925
926 return 0;
927 }
928
929 /* Called from IO context */
930 static int suspend(struct userdata *u) {
931 pa_assert(u);
932 pa_assert(u->pcm_handle);
933
934 pa_smoother_pause(u->smoother, pa_rtclock_now());
935
936 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
937 * take awfully long with our long buffer sizes today. */
938 snd_pcm_close(u->pcm_handle);
939 u->pcm_handle = NULL;
940
941 if (u->alsa_rtpoll_item) {
942 pa_rtpoll_item_free(u->alsa_rtpoll_item);
943 u->alsa_rtpoll_item = NULL;
944 }
945
946 /* We reset max_rewind/max_request here to make sure that while we
947 * are suspended the old max_request/max_rewind values set before
948 * the suspend can influence the per-stream buffer of newly
949 * created streams, without their requirements having any
950 * influence on them. */
951 pa_sink_set_max_rewind_within_thread(u->sink, 0);
952 pa_sink_set_max_request_within_thread(u->sink, 0);
953
954 pa_log_info("Device suspended...");
955
956 return 0;
957 }
958
959 /* Called from IO context */
960 static int update_sw_params(struct userdata *u) {
961 snd_pcm_uframes_t avail_min;
962 int err;
963
964 pa_assert(u);
965
966 /* Use the full buffer if no one asked us for anything specific */
967 u->hwbuf_unused = 0;
968
969 if (u->use_tsched) {
970 pa_usec_t latency;
971
972 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
973 size_t b;
974
975 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
976
977 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
978
979 /* We need at least one sample in our buffer */
980
981 if (PA_UNLIKELY(b < u->frame_size))
982 b = u->frame_size;
983
984 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
985 }
986
987 fix_min_sleep_wakeup(u);
988 fix_tsched_watermark(u);
989 }
990
991 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
992
993 /* We need at last one frame in the used part of the buffer */
994 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
995
996 if (u->use_tsched) {
997 pa_usec_t sleep_usec, process_usec;
998
999 hw_sleep_time(u, &sleep_usec, &process_usec);
1000 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
1001 }
1002
1003 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
1004
1005 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
1006 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
1007 return err;
1008 }
1009
1010 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
1011 if (pa_alsa_pcm_is_hw(u->pcm_handle))
1012 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
1013 else {
1014 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
1015 pa_sink_set_max_rewind_within_thread(u->sink, 0);
1016 }
1017
1018 return 0;
1019 }
1020
1021 /* Called from IO Context on unsuspend or from main thread when creating sink */
1022 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
1023 bool in_thread) {
1024 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
1025 &u->sink->sample_spec);
1026
1027 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1028 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1029
1030 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1031 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1032
1033 fix_min_sleep_wakeup(u);
1034 fix_tsched_watermark(u);
1035
1036 if (in_thread)
1037 pa_sink_set_latency_range_within_thread(u->sink,
1038 u->min_latency_ref,
1039 pa_bytes_to_usec(u->hwbuf_size, ss));
1040 else {
1041 pa_sink_set_latency_range(u->sink,
1042 0,
1043 pa_bytes_to_usec(u->hwbuf_size, ss));
1044
1045 /* work-around assert in pa_sink_set_latency_within_thead,
1046 keep track of min_latency and reuse it when
1047 this routine is called from IO context */
1048 u->min_latency_ref = u->sink->thread_info.min_latency;
1049 }
1050
1051 pa_log_info("Time scheduling watermark is %0.2fms",
1052 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
1053 }
1054
1055 /* Called from IO context */
1056 static int unsuspend(struct userdata *u) {
1057 pa_sample_spec ss;
1058 int err;
1059 bool b, d;
1060 snd_pcm_uframes_t period_size, buffer_size;
1061 char *device_name = NULL;
1062
1063 pa_assert(u);
1064 pa_assert(!u->pcm_handle);
1065
1066 pa_log_info("Trying resume...");
1067
1068 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1069 /* Need to open device in NONAUDIO mode */
1070 int len = strlen(u->device_name) + 8;
1071
1072 device_name = pa_xmalloc(len);
1073 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1074 }
1075
1076 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1077 SND_PCM_NONBLOCK|
1078 SND_PCM_NO_AUTO_RESAMPLE|
1079 SND_PCM_NO_AUTO_CHANNELS|
1080 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1081 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1082 goto fail;
1083 }
1084
1085 ss = u->sink->sample_spec;
1086 period_size = u->fragment_size / u->frame_size;
1087 buffer_size = u->hwbuf_size / u->frame_size;
1088 b = u->use_mmap;
1089 d = u->use_tsched;
1090
1091 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, true)) < 0) {
1092 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1093 goto fail;
1094 }
1095
1096 if (b != u->use_mmap || d != u->use_tsched) {
1097 pa_log_warn("Resume failed, couldn't get original access mode.");
1098 goto fail;
1099 }
1100
1101 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1102 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1103 goto fail;
1104 }
1105
1106 if (period_size*u->frame_size != u->fragment_size ||
1107 buffer_size*u->frame_size != u->hwbuf_size) {
1108 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1109 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1110 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1111 goto fail;
1112 }
1113
1114 if (update_sw_params(u) < 0)
1115 goto fail;
1116
1117 if (build_pollfd(u) < 0)
1118 goto fail;
1119
1120 u->write_count = 0;
1121 pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
1122 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1123 u->last_smoother_update = 0;
1124
1125 u->first = true;
1126 u->since_start = 0;
1127
1128 /* reset the watermark to the value defined when sink was created */
1129 if (u->use_tsched)
1130 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, true);
1131
1132 pa_log_info("Resumed successfully...");
1133
1134 pa_xfree(device_name);
1135 return 0;
1136
1137 fail:
1138 if (u->pcm_handle) {
1139 snd_pcm_close(u->pcm_handle);
1140 u->pcm_handle = NULL;
1141 }
1142
1143 pa_xfree(device_name);
1144
1145 return -PA_ERR_IO;
1146 }
1147
1148 /* Called from IO context */
1149 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1150 struct userdata *u = PA_SINK(o)->userdata;
1151
1152 switch (code) {
1153
1154 case PA_SINK_MESSAGE_GET_LATENCY: {
1155 pa_usec_t r = 0;
1156
1157 if (u->pcm_handle)
1158 r = sink_get_latency(u);
1159
1160 *((pa_usec_t*) data) = r;
1161
1162 return 0;
1163 }
1164
1165 case PA_SINK_MESSAGE_SET_STATE:
1166
1167 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1168
1169 case PA_SINK_SUSPENDED: {
1170 int r;
1171
1172 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1173
1174 if ((r = suspend(u)) < 0)
1175 return r;
1176
1177 break;
1178 }
1179
1180 case PA_SINK_IDLE:
1181 case PA_SINK_RUNNING: {
1182 int r;
1183
1184 if (u->sink->thread_info.state == PA_SINK_INIT) {
1185 if (build_pollfd(u) < 0)
1186 return -PA_ERR_IO;
1187 }
1188
1189 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1190 if ((r = unsuspend(u)) < 0)
1191 return r;
1192 }
1193
1194 break;
1195 }
1196
1197 case PA_SINK_UNLINKED:
1198 case PA_SINK_INIT:
1199 case PA_SINK_INVALID_STATE:
1200 ;
1201 }
1202
1203 break;
1204 }
1205
1206 return pa_sink_process_msg(o, code, data, offset, chunk);
1207 }
1208
1209 /* Called from main context */
1210 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1211 pa_sink_state_t old_state;
1212 struct userdata *u;
1213
1214 pa_sink_assert_ref(s);
1215 pa_assert_se(u = s->userdata);
1216
1217 old_state = pa_sink_get_state(u->sink);
1218
1219 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1220 reserve_done(u);
1221 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1222 if (reserve_init(u, u->device_name) < 0)
1223 return -PA_ERR_BUSY;
1224
1225 return 0;
1226 }
1227
1228 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1229 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1230
1231 pa_assert(u);
1232 pa_assert(u->mixer_handle);
1233
1234 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1235 return 0;
1236
1237 if (!PA_SINK_IS_LINKED(u->sink->state))
1238 return 0;
1239
1240 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1241 pa_sink_set_mixer_dirty(u->sink, true);
1242 return 0;
1243 }
1244
1245 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1246 pa_sink_get_volume(u->sink, true);
1247 pa_sink_get_mute(u->sink, true);
1248 }
1249
1250 return 0;
1251 }
1252
1253 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1254 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1255
1256 pa_assert(u);
1257 pa_assert(u->mixer_handle);
1258
1259 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1260 return 0;
1261
1262 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1263 pa_sink_set_mixer_dirty(u->sink, true);
1264 return 0;
1265 }
1266
1267 if (mask & SND_CTL_EVENT_MASK_VALUE)
1268 pa_sink_update_volume_and_mute(u->sink);
1269
1270 return 0;
1271 }
1272
1273 static void sink_get_volume_cb(pa_sink *s) {
1274 struct userdata *u = s->userdata;
1275 pa_cvolume r;
1276 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1277
1278 pa_assert(u);
1279 pa_assert(u->mixer_path);
1280 pa_assert(u->mixer_handle);
1281
1282 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1283 return;
1284
1285 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1286 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1287
1288 pa_log_debug("Read hardware volume: %s",
1289 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1290
1291 if (pa_cvolume_equal(&u->hardware_volume, &r))
1292 return;
1293
1294 s->real_volume = u->hardware_volume = r;
1295
1296 /* Hmm, so the hardware volume changed, let's reset our software volume */
1297 if (u->mixer_path->has_dB)
1298 pa_sink_set_soft_volume(s, NULL);
1299 }
1300
1301 static void sink_set_volume_cb(pa_sink *s) {
1302 struct userdata *u = s->userdata;
1303 pa_cvolume r;
1304 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1305 bool deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1306
1307 pa_assert(u);
1308 pa_assert(u->mixer_path);
1309 pa_assert(u->mixer_handle);
1310
1311 /* Shift up by the base volume */
1312 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1313
1314 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1315 return;
1316
1317 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1318 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1319
1320 u->hardware_volume = r;
1321
1322 if (u->mixer_path->has_dB) {
1323 pa_cvolume new_soft_volume;
1324 bool accurate_enough;
1325
1326 /* Match exactly what the user requested by software */
1327 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1328
1329 /* If the adjustment to do in software is only minimal we
1330 * can skip it. That saves us CPU at the expense of a bit of
1331 * accuracy */
1332 accurate_enough =
1333 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1334 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1335
1336 pa_log_debug("Requested volume: %s",
1337 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1338 pa_log_debug("Got hardware volume: %s",
1339 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1340 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1341 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1342 pa_yes_no(accurate_enough));
1343
1344 if (!accurate_enough)
1345 s->soft_volume = new_soft_volume;
1346
1347 } else {
1348 pa_log_debug("Wrote hardware volume: %s",
1349 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1350
1351 /* We can't match exactly what the user requested, hence let's
1352 * at least tell the user about it */
1353
1354 s->real_volume = r;
1355 }
1356 }
1357
1358 static void sink_write_volume_cb(pa_sink *s) {
1359 struct userdata *u = s->userdata;
1360 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1361
1362 pa_assert(u);
1363 pa_assert(u->mixer_path);
1364 pa_assert(u->mixer_handle);
1365 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1366
1367 /* Shift up by the base volume */
1368 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1369
1370 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1371 pa_log_error("Writing HW volume failed");
1372 else {
1373 pa_cvolume tmp_vol;
1374 bool accurate_enough;
1375
1376 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1377 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1378
1379 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1380 accurate_enough =
1381 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1382 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1383
1384 if (!accurate_enough) {
1385 char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1386
1387 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1388 pa_cvolume_snprint_verbose(volume_buf[0],
1389 sizeof(volume_buf[0]),
1390 &s->thread_info.current_hw_volume,
1391 &s->channel_map,
1392 true),
1393 pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1394 }
1395 }
1396 }
1397
1398 static void sink_get_mute_cb(pa_sink *s) {
1399 struct userdata *u = s->userdata;
1400 bool b;
1401
1402 pa_assert(u);
1403 pa_assert(u->mixer_path);
1404 pa_assert(u->mixer_handle);
1405
1406 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1407 return;
1408
1409 s->muted = b;
1410 }
1411
1412 static void sink_set_mute_cb(pa_sink *s) {
1413 struct userdata *u = s->userdata;
1414
1415 pa_assert(u);
1416 pa_assert(u->mixer_path);
1417 pa_assert(u->mixer_handle);
1418
1419 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1420 }
1421
1422 static void mixer_volume_init(struct userdata *u) {
1423 pa_assert(u);
1424
1425 if (!u->mixer_path->has_volume) {
1426 pa_sink_set_write_volume_callback(u->sink, NULL);
1427 pa_sink_set_get_volume_callback(u->sink, NULL);
1428 pa_sink_set_set_volume_callback(u->sink, NULL);
1429
1430 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1431 } else {
1432 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1433 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1434
1435 if (u->mixer_path->has_dB && u->deferred_volume) {
1436 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1437 pa_log_info("Successfully enabled deferred volume.");
1438 } else
1439 pa_sink_set_write_volume_callback(u->sink, NULL);
1440
1441 if (u->mixer_path->has_dB) {
1442 pa_sink_enable_decibel_volume(u->sink, true);
1443 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1444
1445 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1446 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1447
1448 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1449 } else {
1450 pa_sink_enable_decibel_volume(u->sink, false);
1451 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1452
1453 u->sink->base_volume = PA_VOLUME_NORM;
1454 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1455 }
1456
1457 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1458 }
1459
1460 if (!u->mixer_path->has_mute) {
1461 pa_sink_set_get_mute_callback(u->sink, NULL);
1462 pa_sink_set_set_mute_callback(u->sink, NULL);
1463 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1464 } else {
1465 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1466 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1467 pa_log_info("Using hardware mute control.");
1468 }
1469 }
1470
1471 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1472 struct userdata *u = s->userdata;
1473
1474 pa_assert(u);
1475 pa_assert(p);
1476 pa_assert(u->ucm_context);
1477
1478 return pa_alsa_ucm_set_port(u->ucm_context, p, true);
1479 }
1480
1481 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1482 struct userdata *u = s->userdata;
1483 pa_alsa_port_data *data;
1484
1485 pa_assert(u);
1486 pa_assert(p);
1487 pa_assert(u->mixer_handle);
1488
1489 data = PA_DEVICE_PORT_DATA(p);
1490
1491 pa_assert_se(u->mixer_path = data->path);
1492 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1493
1494 mixer_volume_init(u);
1495
1496 if (s->set_mute)
1497 s->set_mute(s);
1498 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1499 if (s->write_volume)
1500 s->write_volume(s);
1501 } else {
1502 if (s->set_volume)
1503 s->set_volume(s);
1504 }
1505
1506 return 0;
1507 }
1508
1509 static void sink_update_requested_latency_cb(pa_sink *s) {
1510 struct userdata *u = s->userdata;
1511 size_t before;
1512 pa_assert(u);
1513 pa_assert(u->use_tsched); /* only when timer scheduling is used
1514 * we can dynamically adjust the
1515 * latency */
1516
1517 if (!u->pcm_handle)
1518 return;
1519
1520 before = u->hwbuf_unused;
1521 update_sw_params(u);
1522
1523 /* Let's check whether we now use only a smaller part of the
1524 buffer then before. If so, we need to make sure that subsequent
1525 rewinds are relative to the new maximum fill level and not to the
1526 current fill level. Thus, let's do a full rewind once, to clear
1527 things up. */
1528
1529 if (u->hwbuf_unused > before) {
1530 pa_log_debug("Requesting rewind due to latency change.");
1531 pa_sink_request_rewind(s, (size_t) -1);
1532 }
1533 }
1534
1535 static pa_idxset* sink_get_formats(pa_sink *s) {
1536 struct userdata *u = s->userdata;
1537 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1538 pa_format_info *f;
1539 uint32_t idx;
1540
1541 pa_assert(u);
1542
1543 PA_IDXSET_FOREACH(f, u->formats, idx) {
1544 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1545 }
1546
1547 return ret;
1548 }
1549
1550 static bool sink_set_formats(pa_sink *s, pa_idxset *formats) {
1551 struct userdata *u = s->userdata;
1552 pa_format_info *f, *g;
1553 uint32_t idx, n;
1554
1555 pa_assert(u);
1556
1557 /* FIXME: also validate sample rates against what the device supports */
1558 PA_IDXSET_FOREACH(f, formats, idx) {
1559 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1560 /* EAC3 cannot be sent over over S/PDIF */
1561 return false;
1562 }
1563
1564 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
1565 u->formats = pa_idxset_new(NULL, NULL);
1566
1567 /* Note: the logic below won't apply if we're using software encoding.
1568 * This is fine for now since we don't support that via the passthrough
1569 * framework, but this must be changed if we do. */
1570
1571 /* Count how many sample rates we support */
1572 for (idx = 0, n = 0; u->rates[idx]; idx++)
1573 n++;
1574
1575 /* First insert non-PCM formats since we prefer those. */
1576 PA_IDXSET_FOREACH(f, formats, idx) {
1577 if (!pa_format_info_is_pcm(f)) {
1578 g = pa_format_info_copy(f);
1579 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1580 pa_idxset_put(u->formats, g, NULL);
1581 }
1582 }
1583
1584 /* Now add any PCM formats */
1585 PA_IDXSET_FOREACH(f, formats, idx) {
1586 if (pa_format_info_is_pcm(f)) {
1587 /* We don't set rates here since we'll just tack on a resampler for
1588 * unsupported rates */
1589 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1590 }
1591 }
1592
1593 return true;
1594 }
1595
1596 static int sink_update_rate_cb(pa_sink *s, uint32_t rate) {
1597 struct userdata *u = s->userdata;
1598 int i;
1599 bool supported = false;
1600
1601 pa_assert(u);
1602
1603 for (i = 0; u->rates[i]; i++) {
1604 if (u->rates[i] == rate) {
1605 supported = true;
1606 break;
1607 }
1608 }
1609
1610 if (!supported) {
1611 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1612 return -1;
1613 }
1614
1615 if (!PA_SINK_IS_OPENED(s->state)) {
1616 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1617 u->sink->sample_spec.rate = rate;
1618 return 0;
1619 }
1620
1621 return -1;
1622 }
1623
1624 static int process_rewind(struct userdata *u) {
1625 snd_pcm_sframes_t unused;
1626 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1627 pa_assert(u);
1628
1629 if (!PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1630 pa_sink_process_rewind(u->sink, 0);
1631 return 0;
1632 }
1633
1634 /* Figure out how much we shall rewind and reset the counter */
1635 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1636
1637 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1638
1639 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1640 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1641 return -1;
1642 }
1643
1644 unused_nbytes = (size_t) unused * u->frame_size;
1645
1646 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1647 unused_nbytes += u->rewind_safeguard;
1648
1649 if (u->hwbuf_size > unused_nbytes)
1650 limit_nbytes = u->hwbuf_size - unused_nbytes;
1651 else
1652 limit_nbytes = 0;
1653
1654 if (rewind_nbytes > limit_nbytes)
1655 rewind_nbytes = limit_nbytes;
1656
1657 if (rewind_nbytes > 0) {
1658 snd_pcm_sframes_t in_frames, out_frames;
1659
1660 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1661
1662 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1663 pa_log_debug("before: %lu", (unsigned long) in_frames);
1664 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1665 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1666 if (try_recover(u, "process_rewind", out_frames) < 0)
1667 return -1;
1668 out_frames = 0;
1669 }
1670
1671 pa_log_debug("after: %lu", (unsigned long) out_frames);
1672
1673 rewind_nbytes = (size_t) out_frames * u->frame_size;
1674
1675 if (rewind_nbytes <= 0)
1676 pa_log_info("Tried rewind, but was apparently not possible.");
1677 else {
1678 u->write_count -= rewind_nbytes;
1679 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1680 pa_sink_process_rewind(u->sink, rewind_nbytes);
1681
1682 u->after_rewind = true;
1683 return 0;
1684 }
1685 } else
1686 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1687
1688 pa_sink_process_rewind(u->sink, 0);
1689 return 0;
1690 }
1691
1692 static void thread_func(void *userdata) {
1693 struct userdata *u = userdata;
1694 unsigned short revents = 0;
1695
1696 pa_assert(u);
1697
1698 pa_log_debug("Thread starting up");
1699
1700 if (u->core->realtime_scheduling)
1701 pa_make_realtime(u->core->realtime_priority);
1702
1703 pa_thread_mq_install(&u->thread_mq);
1704
1705 for (;;) {
1706 int ret;
1707 pa_usec_t rtpoll_sleep = 0, real_sleep;
1708
1709 #ifdef DEBUG_TIMING
1710 pa_log_debug("Loop");
1711 #endif
1712
1713 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
1714 if (process_rewind(u) < 0)
1715 goto fail;
1716 }
1717
1718 /* Render some data and write it to the dsp */
1719 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1720 int work_done;
1721 pa_usec_t sleep_usec = 0;
1722 bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1723
1724 if (u->use_mmap)
1725 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1726 else
1727 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1728
1729 if (work_done < 0)
1730 goto fail;
1731
1732 /* pa_log_debug("work_done = %i", work_done); */
1733
1734 if (work_done) {
1735
1736 if (u->first) {
1737 pa_log_info("Starting playback.");
1738 snd_pcm_start(u->pcm_handle);
1739
1740 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1741
1742 u->first = false;
1743 }
1744
1745 update_smoother(u);
1746 }
1747
1748 if (u->use_tsched) {
1749 pa_usec_t cusec;
1750
1751 if (u->since_start <= u->hwbuf_size) {
1752
1753 /* USB devices on ALSA seem to hit a buffer
1754 * underrun during the first iterations much
1755 * quicker then we calculate here, probably due to
1756 * the transport latency. To accommodate for that
1757 * we artificially decrease the sleep time until
1758 * we have filled the buffer at least once
1759 * completely.*/
1760
1761 if (pa_log_ratelimit(PA_LOG_DEBUG))
1762 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1763 sleep_usec /= 2;
1764 }
1765
1766 /* OK, the playback buffer is now full, let's
1767 * calculate when to wake up next */
1768 #ifdef DEBUG_TIMING
1769 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1770 #endif
1771
1772 /* Convert from the sound card time domain to the
1773 * system time domain */
1774 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1775
1776 #ifdef DEBUG_TIMING
1777 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1778 #endif
1779
1780 /* We don't trust the conversion, so we wake up whatever comes first */
1781 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1782 }
1783
1784 u->after_rewind = false;
1785
1786 }
1787
1788 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1789 pa_usec_t volume_sleep;
1790 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1791 if (volume_sleep > 0) {
1792 if (rtpoll_sleep > 0)
1793 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1794 else
1795 rtpoll_sleep = volume_sleep;
1796 }
1797 }
1798
1799 if (rtpoll_sleep > 0) {
1800 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1801 real_sleep = pa_rtclock_now();
1802 }
1803 else
1804 pa_rtpoll_set_timer_disabled(u->rtpoll);
1805
1806 /* Hmm, nothing to do. Let's sleep */
1807 if ((ret = pa_rtpoll_run(u->rtpoll, true)) < 0)
1808 goto fail;
1809
1810 if (rtpoll_sleep > 0) {
1811 real_sleep = pa_rtclock_now() - real_sleep;
1812 #ifdef DEBUG_TIMING
1813 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1814 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1815 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1816 #endif
1817 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1818 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1819 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1820 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1821 }
1822
1823 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1824 pa_sink_volume_change_apply(u->sink, NULL);
1825
1826 if (ret == 0)
1827 goto finish;
1828
1829 /* Tell ALSA about this and process its response */
1830 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1831 struct pollfd *pollfd;
1832 int err;
1833 unsigned n;
1834
1835 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1836
1837 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1838 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1839 goto fail;
1840 }
1841
1842 if (revents & ~POLLOUT) {
1843 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1844 goto fail;
1845
1846 u->first = true;
1847 u->since_start = 0;
1848 revents = 0;
1849 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1850 pa_log_debug("Wakeup from ALSA!");
1851
1852 } else
1853 revents = 0;
1854 }
1855
1856 fail:
1857 /* If this was no regular exit from the loop we have to continue
1858 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1859 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1860 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1861
1862 finish:
1863 pa_log_debug("Thread shutting down");
1864 }
1865
1866 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1867 const char *n;
1868 char *t;
1869
1870 pa_assert(data);
1871 pa_assert(ma);
1872 pa_assert(device_name);
1873
1874 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1875 pa_sink_new_data_set_name(data, n);
1876 data->namereg_fail = true;
1877 return;
1878 }
1879
1880 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1881 data->namereg_fail = true;
1882 else {
1883 n = device_id ? device_id : device_name;
1884 data->namereg_fail = false;
1885 }
1886
1887 if (mapping)
1888 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1889 else
1890 t = pa_sprintf_malloc("alsa_output.%s", n);
1891
1892 pa_sink_new_data_set_name(data, t);
1893 pa_xfree(t);
1894 }
1895
1896 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1897 snd_hctl_t *hctl;
1898
1899 if (!mapping && !element)
1900 return;
1901
1902 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1903 pa_log_info("Failed to find a working mixer device.");
1904 return;
1905 }
1906
1907 if (element) {
1908
1909 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1910 goto fail;
1911
1912 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1913 goto fail;
1914
1915 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1916 pa_alsa_path_dump(u->mixer_path);
1917 } else if (!(u->mixer_path_set = mapping->output_path_set))
1918 goto fail;
1919
1920 return;
1921
1922 fail:
1923
1924 if (u->mixer_path) {
1925 pa_alsa_path_free(u->mixer_path);
1926 u->mixer_path = NULL;
1927 }
1928
1929 if (u->mixer_handle) {
1930 snd_mixer_close(u->mixer_handle);
1931 u->mixer_handle = NULL;
1932 }
1933 }
1934
1935 static int setup_mixer(struct userdata *u, bool ignore_dB) {
1936 bool need_mixer_callback = false;
1937
1938 pa_assert(u);
1939
1940 if (!u->mixer_handle)
1941 return 0;
1942
1943 if (u->sink->active_port) {
1944 pa_alsa_port_data *data;
1945
1946 /* We have a list of supported paths, so let's activate the
1947 * one that has been chosen as active */
1948
1949 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1950 u->mixer_path = data->path;
1951
1952 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
1953
1954 } else {
1955
1956 if (!u->mixer_path && u->mixer_path_set)
1957 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1958
1959 if (u->mixer_path) {
1960 /* Hmm, we have only a single path, then let's activate it */
1961
1962 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
1963
1964 } else
1965 return 0;
1966 }
1967
1968 mixer_volume_init(u);
1969
1970 /* Will we need to register callbacks? */
1971 if (u->mixer_path_set && u->mixer_path_set->paths) {
1972 pa_alsa_path *p;
1973 void *state;
1974
1975 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1976 if (p->has_volume || p->has_mute)
1977 need_mixer_callback = true;
1978 }
1979 }
1980 else if (u->mixer_path)
1981 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1982
1983 if (need_mixer_callback) {
1984 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1985 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1986 u->mixer_pd = pa_alsa_mixer_pdata_new();
1987 mixer_callback = io_mixer_callback;
1988
1989 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1990 pa_log("Failed to initialize file descriptor monitoring");
1991 return -1;
1992 }
1993 } else {
1994 u->mixer_fdl = pa_alsa_fdlist_new();
1995 mixer_callback = ctl_mixer_callback;
1996
1997 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1998 pa_log("Failed to initialize file descriptor monitoring");
1999 return -1;
2000 }
2001 }
2002
2003 if (u->mixer_path_set)
2004 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
2005 else
2006 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
2007 }
2008
2009 return 0;
2010 }
2011
2012 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
2013
2014 struct userdata *u = NULL;
2015 const char *dev_id = NULL, *key, *mod_name;
2016 pa_sample_spec ss;
2017 char *thread_name = NULL;
2018 uint32_t alternate_sample_rate;
2019 pa_channel_map map;
2020 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
2021 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2022 size_t frame_size;
2023 bool use_mmap = true, b, use_tsched = true, d, ignore_dB = false, namereg_fail = false, deferred_volume = false, set_formats = false, fixed_latency_range = false;
2024 pa_sink_new_data data;
2025 pa_alsa_profile_set *profile_set = NULL;
2026 void *state = NULL;
2027
2028 pa_assert(m);
2029 pa_assert(ma);
2030
2031 ss = m->core->default_sample_spec;
2032 map = m->core->default_channel_map;
2033
2034 /* Pick sample spec overrides from the mapping, if any */
2035 if (mapping) {
2036 if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
2037 ss.format = mapping->sample_spec.format;
2038 if (mapping->sample_spec.rate != 0)
2039 ss.rate = mapping->sample_spec.rate;
2040 if (mapping->sample_spec.channels != 0) {
2041 ss.channels = mapping->sample_spec.channels;
2042 if (pa_channel_map_valid(&mapping->channel_map))
2043 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
2044 }
2045 }
2046
2047 /* Override with modargs if provided */
2048 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2049 pa_log("Failed to parse sample specification and channel map");
2050 goto fail;
2051 }
2052
2053 alternate_sample_rate = m->core->alternate_sample_rate;
2054 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2055 pa_log("Failed to parse alternate sample rate");
2056 goto fail;
2057 }
2058
2059 frame_size = pa_frame_size(&ss);
2060
2061 nfrags = m->core->default_n_fragments;
2062 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2063 if (frag_size <= 0)
2064 frag_size = (uint32_t) frame_size;
2065 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2066 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2067
2068 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2069 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2070 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2071 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2072 pa_log("Failed to parse buffer metrics");
2073 goto fail;
2074 }
2075
2076 buffer_size = nfrags * frag_size;
2077
2078 period_frames = frag_size/frame_size;
2079 buffer_frames = buffer_size/frame_size;
2080 tsched_frames = tsched_size/frame_size;
2081
2082 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2083 pa_log("Failed to parse mmap argument.");
2084 goto fail;
2085 }
2086
2087 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2088 pa_log("Failed to parse tsched argument.");
2089 goto fail;
2090 }
2091
2092 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2093 pa_log("Failed to parse ignore_dB argument.");
2094 goto fail;
2095 }
2096
2097 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2098 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2099 pa_log("Failed to parse rewind_safeguard argument");
2100 goto fail;
2101 }
2102
2103 deferred_volume = m->core->deferred_volume;
2104 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2105 pa_log("Failed to parse deferred_volume argument.");
2106 goto fail;
2107 }
2108
2109 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2110 pa_log("Failed to parse fixed_latency_range argument.");
2111 goto fail;
2112 }
2113
2114 use_tsched = pa_alsa_may_tsched(use_tsched);
2115
2116 u = pa_xnew0(struct userdata, 1);
2117 u->core = m->core;
2118 u->module = m;
2119 u->use_mmap = use_mmap;
2120 u->use_tsched = use_tsched;
2121 u->deferred_volume = deferred_volume;
2122 u->fixed_latency_range = fixed_latency_range;
2123 u->first = true;
2124 u->rewind_safeguard = rewind_safeguard;
2125 u->rtpoll = pa_rtpoll_new();
2126 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2127
2128 u->smoother = pa_smoother_new(
2129 SMOOTHER_ADJUST_USEC,
2130 SMOOTHER_WINDOW_USEC,
2131 true,
2132 true,
2133 5,
2134 pa_rtclock_now(),
2135 true);
2136 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2137
2138 /* use ucm */
2139 if (mapping && mapping->ucm_context.ucm)
2140 u->ucm_context = &mapping->ucm_context;
2141
2142 dev_id = pa_modargs_get_value(
2143 ma, "device_id",
2144 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2145
2146 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2147
2148 if (reserve_init(u, dev_id) < 0)
2149 goto fail;
2150
2151 if (reserve_monitor_init(u, dev_id) < 0)
2152 goto fail;
2153
2154 b = use_mmap;
2155 d = use_tsched;
2156
2157 if (mapping) {
2158
2159 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2160 pa_log("device_id= not set");
2161 goto fail;
2162 }
2163
2164 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2165 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2166 pa_log("Failed to enable ucm modifier %s", mod_name);
2167 else
2168 pa_log_debug("Enabled ucm modifier %s", mod_name);
2169 }
2170
2171 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2172 dev_id,
2173 &u->device_name,
2174 &ss, &map,
2175 SND_PCM_STREAM_PLAYBACK,
2176 &period_frames, &buffer_frames, tsched_frames,
2177 &b, &d, mapping)))
2178 goto fail;
2179
2180 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2181
2182 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2183 goto fail;
2184
2185 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2186 dev_id,
2187 &u->device_name,
2188 &ss, &map,
2189 SND_PCM_STREAM_PLAYBACK,
2190 &period_frames, &buffer_frames, tsched_frames,
2191 &b, &d, profile_set, &mapping)))
2192 goto fail;
2193
2194 } else {
2195
2196 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2197 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2198 &u->device_name,
2199 &ss, &map,
2200 SND_PCM_STREAM_PLAYBACK,
2201 &period_frames, &buffer_frames, tsched_frames,
2202 &b, &d, false)))
2203 goto fail;
2204 }
2205
2206 pa_assert(u->device_name);
2207 pa_log_info("Successfully opened device %s.", u->device_name);
2208
2209 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2210 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2211 goto fail;
2212 }
2213
2214 if (mapping)
2215 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2216
2217 if (use_mmap && !b) {
2218 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2219 u->use_mmap = use_mmap = false;
2220 }
2221
2222 if (use_tsched && (!b || !d)) {
2223 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2224 u->use_tsched = use_tsched = false;
2225 }
2226
2227 if (u->use_mmap)
2228 pa_log_info("Successfully enabled mmap() mode.");
2229
2230 if (u->use_tsched) {
2231 pa_log_info("Successfully enabled timer-based scheduling mode.");
2232
2233 if (u->fixed_latency_range)
2234 pa_log_info("Disabling latency range changes on underrun");
2235 }
2236
2237 if (is_iec958(u) || is_hdmi(u))
2238 set_formats = true;
2239
2240 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
2241 if (!u->rates) {
2242 pa_log_error("Failed to find any supported sample rates.");
2243 goto fail;
2244 }
2245
2246 /* ALSA might tweak the sample spec, so recalculate the frame size */
2247 frame_size = pa_frame_size(&ss);
2248
2249 if (!u->ucm_context)
2250 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2251
2252 pa_sink_new_data_init(&data);
2253 data.driver = driver;
2254 data.module = m;
2255 data.card = card;
2256 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2257
2258 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2259 * variable instead of using &data.namereg_fail directly, because
2260 * data.namereg_fail is a bitfield and taking the address of a bitfield
2261 * variable is impossible. */
2262 namereg_fail = data.namereg_fail;
2263 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2264 pa_log("Failed to parse namereg_fail argument.");
2265 pa_sink_new_data_done(&data);
2266 goto fail;
2267 }
2268 data.namereg_fail = namereg_fail;
2269
2270 pa_sink_new_data_set_sample_spec(&data, &ss);
2271 pa_sink_new_data_set_channel_map(&data, &map);
2272 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2273
2274 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2275 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2276 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2277 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2278 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2279
2280 if (mapping) {
2281 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2282 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2283
2284 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2285 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2286 }
2287
2288 pa_alsa_init_description(data.proplist);
2289
2290 if (u->control_device)
2291 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2292
2293 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2294 pa_log("Invalid properties");
2295 pa_sink_new_data_done(&data);
2296 goto fail;
2297 }
2298
2299 if (u->ucm_context)
2300 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, true, card);
2301 else if (u->mixer_path_set)
2302 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2303
2304 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2305 (set_formats ? PA_SINK_SET_FORMATS : 0));
2306 pa_sink_new_data_done(&data);
2307
2308 if (!u->sink) {
2309 pa_log("Failed to create sink object");
2310 goto fail;
2311 }
2312
2313 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2314 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2315 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2316 goto fail;
2317 }
2318
2319 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2320 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2321 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2322 goto fail;
2323 }
2324
2325 u->sink->parent.process_msg = sink_process_msg;
2326 if (u->use_tsched)
2327 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2328 u->sink->set_state = sink_set_state_cb;
2329 if (u->ucm_context)
2330 u->sink->set_port = sink_set_port_ucm_cb;
2331 else
2332 u->sink->set_port = sink_set_port_cb;
2333 if (u->sink->alternate_sample_rate)
2334 u->sink->update_rate = sink_update_rate_cb;
2335 u->sink->userdata = u;
2336
2337 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2338 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2339
2340 u->frame_size = frame_size;
2341 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2342 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2343 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2344
2345 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2346 (double) u->hwbuf_size / (double) u->fragment_size,
2347 (long unsigned) u->fragment_size,
2348 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2349 (long unsigned) u->hwbuf_size,
2350 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2351
2352 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2353 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2354 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2355 else {
2356 pa_log_info("Disabling rewind for device %s", u->device_name);
2357 pa_sink_set_max_rewind(u->sink, 0);
2358 }
2359
2360 if (u->use_tsched) {
2361 u->tsched_watermark_ref = tsched_watermark;
2362 reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2363 } else
2364 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2365
2366 reserve_update(u);
2367
2368 if (update_sw_params(u) < 0)
2369 goto fail;
2370
2371 if (u->ucm_context) {
2372 if (u->sink->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port, true) < 0)
2373 goto fail;
2374 } else if (setup_mixer(u, ignore_dB) < 0)
2375 goto fail;
2376
2377 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2378
2379 thread_name = pa_sprintf_malloc("alsa-sink-%s", pa_strnull(pa_proplist_gets(u->sink->proplist, "alsa.id")));
2380 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2381 pa_log("Failed to create thread.");
2382 goto fail;
2383 }
2384 pa_xfree(thread_name);
2385 thread_name = NULL;
2386
2387 /* Get initial mixer settings */
2388 if (data.volume_is_set) {
2389 if (u->sink->set_volume)
2390 u->sink->set_volume(u->sink);
2391 } else {
2392 if (u->sink->get_volume)
2393 u->sink->get_volume(u->sink);
2394 }
2395
2396 if (data.muted_is_set) {
2397 if (u->sink->set_mute)
2398 u->sink->set_mute(u->sink);
2399 } else {
2400 if (u->sink->get_mute)
2401 u->sink->get_mute(u->sink);
2402 }
2403
2404 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2405 u->sink->write_volume(u->sink);
2406
2407 if (set_formats) {
2408 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2409 pa_format_info *format;
2410
2411 /* To start with, we only support PCM formats. Other formats may be added
2412 * with pa_sink_set_formats().*/
2413 format = pa_format_info_new();
2414 format->encoding = PA_ENCODING_PCM;
2415 u->formats = pa_idxset_new(NULL, NULL);
2416 pa_idxset_put(u->formats, format, NULL);
2417
2418 u->sink->get_formats = sink_get_formats;
2419 u->sink->set_formats = sink_set_formats;
2420 }
2421
2422 pa_sink_put(u->sink);
2423
2424 if (profile_set)
2425 pa_alsa_profile_set_free(profile_set);
2426
2427 return u->sink;
2428
2429 fail:
2430 pa_xfree(thread_name);
2431
2432 if (u)
2433 userdata_free(u);
2434
2435 if (profile_set)
2436 pa_alsa_profile_set_free(profile_set);
2437
2438 return NULL;
2439 }
2440
2441 static void userdata_free(struct userdata *u) {
2442 pa_assert(u);
2443
2444 if (u->sink)
2445 pa_sink_unlink(u->sink);
2446
2447 if (u->thread) {
2448 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2449 pa_thread_free(u->thread);
2450 }
2451
2452 pa_thread_mq_done(&u->thread_mq);
2453
2454 if (u->sink)
2455 pa_sink_unref(u->sink);
2456
2457 if (u->memchunk.memblock)
2458 pa_memblock_unref(u->memchunk.memblock);
2459
2460 if (u->mixer_pd)
2461 pa_alsa_mixer_pdata_free(u->mixer_pd);
2462
2463 if (u->alsa_rtpoll_item)
2464 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2465
2466 if (u->rtpoll)
2467 pa_rtpoll_free(u->rtpoll);
2468
2469 if (u->pcm_handle) {
2470 snd_pcm_drop(u->pcm_handle);
2471 snd_pcm_close(u->pcm_handle);
2472 }
2473
2474 if (u->mixer_fdl)
2475 pa_alsa_fdlist_free(u->mixer_fdl);
2476
2477 if (u->mixer_path && !u->mixer_path_set)
2478 pa_alsa_path_free(u->mixer_path);
2479
2480 if (u->mixer_handle)
2481 snd_mixer_close(u->mixer_handle);
2482
2483 if (u->smoother)
2484 pa_smoother_free(u->smoother);
2485
2486 if (u->formats)
2487 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
2488
2489 if (u->rates)
2490 pa_xfree(u->rates);
2491
2492 reserve_done(u);
2493 monitor_done(u);
2494
2495 pa_xfree(u->device_name);
2496 pa_xfree(u->control_device);
2497 pa_xfree(u->paths_dir);
2498 pa_xfree(u);
2499 }
2500
2501 void pa_alsa_sink_free(pa_sink *s) {
2502 struct userdata *u;
2503
2504 pa_sink_assert_ref(s);
2505 pa_assert_se(u = s->userdata);
2506
2507 userdata_free(u);
2508 }