]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
sink, source: Assign to s->muted from only one place
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
34 #endif
35
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
41
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
57
58 #include <modules/reserve-wrap.h>
59
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
62
63 /* #define DEBUG_TIMING */
64
65 #define DEFAULT_DEVICE "default"
66
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
78
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
92
93 struct userdata {
94 pa_core *core;
95 pa_module *module;
96 pa_sink *sink;
97
98 pa_thread *thread;
99 pa_thread_mq thread_mq;
100 pa_rtpoll *rtpoll;
101
102 snd_pcm_t *pcm_handle;
103
104 char *paths_dir;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
110
111 pa_cvolume hardware_volume;
112
113 unsigned int *rates;
114
115 size_t
116 frame_size,
117 fragment_size,
118 hwbuf_size,
119 tsched_watermark,
120 tsched_watermark_ref,
121 hwbuf_unused,
122 min_sleep,
123 min_wakeup,
124 watermark_inc_step,
125 watermark_dec_step,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
128 rewind_safeguard;
129
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
132 pa_usec_t tsched_watermark_usec;
133
134 pa_memchunk memchunk;
135
136 char *device_name; /* name of the PCM device */
137 char *control_device; /* name of the control device */
138
139 bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
140
141 bool first, after_rewind;
142
143 pa_rtpoll_item *alsa_rtpoll_item;
144
145 pa_smoother *smoother;
146 uint64_t write_count;
147 uint64_t since_start;
148 pa_usec_t smoother_interval;
149 pa_usec_t last_smoother_update;
150
151 pa_idxset *formats;
152
153 pa_reserve_wrapper *reserve;
154 pa_hook_slot *reserve_slot;
155 pa_reserve_monitor_wrapper *monitor;
156 pa_hook_slot *monitor_slot;
157
158 /* ucm context */
159 pa_alsa_ucm_mapping_context *ucm_context;
160 };
161
162 static void userdata_free(struct userdata *u);
163
164 /* FIXME: Is there a better way to do this than device names? */
165 static bool is_iec958(struct userdata *u) {
166 return (strncmp("iec958", u->device_name, 6) == 0);
167 }
168
169 static bool is_hdmi(struct userdata *u) {
170 return (strncmp("hdmi", u->device_name, 4) == 0);
171 }
172
173 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
174 pa_assert(r);
175 pa_assert(u);
176
177 pa_log_debug("Suspending sink %s, because another application requested us to release the device.", u->sink->name);
178
179 if (pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION) < 0)
180 return PA_HOOK_CANCEL;
181
182 return PA_HOOK_OK;
183 }
184
185 static void reserve_done(struct userdata *u) {
186 pa_assert(u);
187
188 if (u->reserve_slot) {
189 pa_hook_slot_free(u->reserve_slot);
190 u->reserve_slot = NULL;
191 }
192
193 if (u->reserve) {
194 pa_reserve_wrapper_unref(u->reserve);
195 u->reserve = NULL;
196 }
197 }
198
199 static void reserve_update(struct userdata *u) {
200 const char *description;
201 pa_assert(u);
202
203 if (!u->sink || !u->reserve)
204 return;
205
206 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
207 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
208 }
209
210 static int reserve_init(struct userdata *u, const char *dname) {
211 char *rname;
212
213 pa_assert(u);
214 pa_assert(dname);
215
216 if (u->reserve)
217 return 0;
218
219 if (pa_in_system_mode())
220 return 0;
221
222 if (!(rname = pa_alsa_get_reserve_name(dname)))
223 return 0;
224
225 /* We are resuming, try to lock the device */
226 u->reserve = pa_reserve_wrapper_get(u->core, rname);
227 pa_xfree(rname);
228
229 if (!(u->reserve))
230 return -1;
231
232 reserve_update(u);
233
234 pa_assert(!u->reserve_slot);
235 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
236
237 return 0;
238 }
239
240 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
241 pa_assert(w);
242 pa_assert(u);
243
244 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
245 pa_log_debug("Suspending sink %s, because another application is blocking the access to the device.", u->sink->name);
246 pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION);
247 } else {
248 pa_log_debug("Resuming sink %s, because other applications aren't blocking access to the device any more.", u->sink->name);
249 pa_sink_suspend(u->sink, false, PA_SUSPEND_APPLICATION);
250 }
251
252 return PA_HOOK_OK;
253 }
254
255 static void monitor_done(struct userdata *u) {
256 pa_assert(u);
257
258 if (u->monitor_slot) {
259 pa_hook_slot_free(u->monitor_slot);
260 u->monitor_slot = NULL;
261 }
262
263 if (u->monitor) {
264 pa_reserve_monitor_wrapper_unref(u->monitor);
265 u->monitor = NULL;
266 }
267 }
268
269 static int reserve_monitor_init(struct userdata *u, const char *dname) {
270 char *rname;
271
272 pa_assert(u);
273 pa_assert(dname);
274
275 if (pa_in_system_mode())
276 return 0;
277
278 if (!(rname = pa_alsa_get_reserve_name(dname)))
279 return 0;
280
281 /* We are resuming, try to lock the device */
282 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
283 pa_xfree(rname);
284
285 if (!(u->monitor))
286 return -1;
287
288 pa_assert(!u->monitor_slot);
289 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
290
291 return 0;
292 }
293
294 static void fix_min_sleep_wakeup(struct userdata *u) {
295 size_t max_use, max_use_2;
296
297 pa_assert(u);
298 pa_assert(u->use_tsched);
299
300 max_use = u->hwbuf_size - u->hwbuf_unused;
301 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
302
303 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
304 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
305
306 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
307 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
308 }
309
310 static void fix_tsched_watermark(struct userdata *u) {
311 size_t max_use;
312 pa_assert(u);
313 pa_assert(u->use_tsched);
314
315 max_use = u->hwbuf_size - u->hwbuf_unused;
316
317 if (u->tsched_watermark > max_use - u->min_sleep)
318 u->tsched_watermark = max_use - u->min_sleep;
319
320 if (u->tsched_watermark < u->min_wakeup)
321 u->tsched_watermark = u->min_wakeup;
322
323 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
324 }
325
326 static void increase_watermark(struct userdata *u) {
327 size_t old_watermark;
328 pa_usec_t old_min_latency, new_min_latency;
329
330 pa_assert(u);
331 pa_assert(u->use_tsched);
332
333 /* First, just try to increase the watermark */
334 old_watermark = u->tsched_watermark;
335 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
336 fix_tsched_watermark(u);
337
338 if (old_watermark != u->tsched_watermark) {
339 pa_log_info("Increasing wakeup watermark to %0.2f ms",
340 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
341 return;
342 }
343
344 /* Hmm, we cannot increase the watermark any further, hence let's
345 raise the latency, unless doing so was disabled in
346 configuration */
347 if (u->fixed_latency_range)
348 return;
349
350 old_min_latency = u->sink->thread_info.min_latency;
351 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
352 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
353
354 if (old_min_latency != new_min_latency) {
355 pa_log_info("Increasing minimal latency to %0.2f ms",
356 (double) new_min_latency / PA_USEC_PER_MSEC);
357
358 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
359 }
360
361 /* When we reach this we're officialy fucked! */
362 }
363
364 static void decrease_watermark(struct userdata *u) {
365 size_t old_watermark;
366 pa_usec_t now;
367
368 pa_assert(u);
369 pa_assert(u->use_tsched);
370
371 now = pa_rtclock_now();
372
373 if (u->watermark_dec_not_before <= 0)
374 goto restart;
375
376 if (u->watermark_dec_not_before > now)
377 return;
378
379 old_watermark = u->tsched_watermark;
380
381 if (u->tsched_watermark < u->watermark_dec_step)
382 u->tsched_watermark = u->tsched_watermark / 2;
383 else
384 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
385
386 fix_tsched_watermark(u);
387
388 if (old_watermark != u->tsched_watermark)
389 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
390 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
391
392 /* We don't change the latency range*/
393
394 restart:
395 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
396 }
397
398 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
399 pa_usec_t usec, wm;
400
401 pa_assert(sleep_usec);
402 pa_assert(process_usec);
403
404 pa_assert(u);
405 pa_assert(u->use_tsched);
406
407 usec = pa_sink_get_requested_latency_within_thread(u->sink);
408
409 if (usec == (pa_usec_t) -1)
410 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
411
412 wm = u->tsched_watermark_usec;
413
414 if (wm > usec)
415 wm = usec/2;
416
417 *sleep_usec = usec - wm;
418 *process_usec = wm;
419
420 #ifdef DEBUG_TIMING
421 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
422 (unsigned long) (usec / PA_USEC_PER_MSEC),
423 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
424 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
425 #endif
426 }
427
428 static int try_recover(struct userdata *u, const char *call, int err) {
429 pa_assert(u);
430 pa_assert(call);
431 pa_assert(err < 0);
432
433 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
434
435 pa_assert(err != -EAGAIN);
436
437 if (err == -EPIPE)
438 pa_log_debug("%s: Buffer underrun!", call);
439
440 if (err == -ESTRPIPE)
441 pa_log_debug("%s: System suspended!", call);
442
443 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
444 pa_log("%s: %s", call, pa_alsa_strerror(err));
445 return -1;
446 }
447
448 u->first = true;
449 u->since_start = 0;
450 return 0;
451 }
452
453 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, bool on_timeout) {
454 size_t left_to_play;
455 bool underrun = false;
456
457 /* We use <= instead of < for this check here because an underrun
458 * only happens after the last sample was processed, not already when
459 * it is removed from the buffer. This is particularly important
460 * when block transfer is used. */
461
462 if (n_bytes <= u->hwbuf_size)
463 left_to_play = u->hwbuf_size - n_bytes;
464 else {
465
466 /* We got a dropout. What a mess! */
467 left_to_play = 0;
468 underrun = true;
469
470 #if 0
471 PA_DEBUG_TRAP;
472 #endif
473
474 if (!u->first && !u->after_rewind)
475 if (pa_log_ratelimit(PA_LOG_INFO))
476 pa_log_info("Underrun!");
477 }
478
479 #ifdef DEBUG_TIMING
480 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
481 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
482 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
483 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
484 #endif
485
486 if (u->use_tsched) {
487 bool reset_not_before = true;
488
489 if (!u->first && !u->after_rewind) {
490 if (underrun || left_to_play < u->watermark_inc_threshold)
491 increase_watermark(u);
492 else if (left_to_play > u->watermark_dec_threshold) {
493 reset_not_before = false;
494
495 /* We decrease the watermark only if have actually
496 * been woken up by a timeout. If something else woke
497 * us up it's too easy to fulfill the deadlines... */
498
499 if (on_timeout)
500 decrease_watermark(u);
501 }
502 }
503
504 if (reset_not_before)
505 u->watermark_dec_not_before = 0;
506 }
507
508 return left_to_play;
509 }
510
511 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
512 bool work_done = false;
513 pa_usec_t max_sleep_usec = 0, process_usec = 0;
514 size_t left_to_play, input_underrun;
515 unsigned j = 0;
516
517 pa_assert(u);
518 pa_sink_assert_ref(u->sink);
519
520 if (u->use_tsched)
521 hw_sleep_time(u, &max_sleep_usec, &process_usec);
522
523 for (;;) {
524 snd_pcm_sframes_t n;
525 size_t n_bytes;
526 int r;
527 bool after_avail = true;
528
529 /* First we determine how many samples are missing to fill the
530 * buffer up to 100% */
531
532 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
533
534 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
535 continue;
536
537 return r;
538 }
539
540 n_bytes = (size_t) n * u->frame_size;
541
542 #ifdef DEBUG_TIMING
543 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
544 #endif
545
546 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
547 on_timeout = false;
548
549 if (u->use_tsched)
550
551 /* We won't fill up the playback buffer before at least
552 * half the sleep time is over because otherwise we might
553 * ask for more data from the clients then they expect. We
554 * need to guarantee that clients only have to keep around
555 * a single hw buffer length. */
556
557 if (!polled &&
558 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
559 #ifdef DEBUG_TIMING
560 pa_log_debug("Not filling up, because too early.");
561 #endif
562 break;
563 }
564
565 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
566
567 if (polled)
568 PA_ONCE_BEGIN {
569 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
570 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
571 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
572 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
573 pa_strnull(dn));
574 pa_xfree(dn);
575 } PA_ONCE_END;
576
577 #ifdef DEBUG_TIMING
578 pa_log_debug("Not filling up, because not necessary.");
579 #endif
580 break;
581 }
582
583 if (++j > 10) {
584 #ifdef DEBUG_TIMING
585 pa_log_debug("Not filling up, because already too many iterations.");
586 #endif
587
588 break;
589 }
590
591 n_bytes -= u->hwbuf_unused;
592 polled = false;
593
594 #ifdef DEBUG_TIMING
595 pa_log_debug("Filling up");
596 #endif
597
598 for (;;) {
599 pa_memchunk chunk;
600 void *p;
601 int err;
602 const snd_pcm_channel_area_t *areas;
603 snd_pcm_uframes_t offset, frames;
604 snd_pcm_sframes_t sframes;
605 size_t written;
606
607 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
608 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
609
610 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
611
612 if (!after_avail && err == -EAGAIN)
613 break;
614
615 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
616 continue;
617
618 return r;
619 }
620
621 /* Make sure that if these memblocks need to be copied they will fit into one slot */
622 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
623 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
624
625 if (!after_avail && frames == 0)
626 break;
627
628 pa_assert(frames > 0);
629 after_avail = false;
630
631 /* Check these are multiples of 8 bit */
632 pa_assert((areas[0].first & 7) == 0);
633 pa_assert((areas[0].step & 7)== 0);
634
635 /* We assume a single interleaved memory buffer */
636 pa_assert((areas[0].first >> 3) == 0);
637 pa_assert((areas[0].step >> 3) == u->frame_size);
638
639 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
640
641 written = frames * u->frame_size;
642 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, written, true);
643 chunk.length = pa_memblock_get_length(chunk.memblock);
644 chunk.index = 0;
645
646 pa_sink_render_into_full(u->sink, &chunk);
647 pa_memblock_unref_fixed(chunk.memblock);
648
649 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
650
651 if (!after_avail && (int) sframes == -EAGAIN)
652 break;
653
654 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
655 continue;
656
657 return r;
658 }
659
660 work_done = true;
661
662 u->write_count += written;
663 u->since_start += written;
664
665 #ifdef DEBUG_TIMING
666 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) written, (unsigned long) n_bytes);
667 #endif
668
669 if (written >= n_bytes)
670 break;
671
672 n_bytes -= written;
673 }
674 }
675
676 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
677
678 if (u->use_tsched) {
679 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
680
681 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
682 process_usec = u->tsched_watermark_usec;
683
684 if (*sleep_usec > process_usec)
685 *sleep_usec -= process_usec;
686 else
687 *sleep_usec = 0;
688
689 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
690 } else
691 *sleep_usec = 0;
692
693 return work_done ? 1 : 0;
694 }
695
696 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
697 bool work_done = false;
698 pa_usec_t max_sleep_usec = 0, process_usec = 0;
699 size_t left_to_play, input_underrun;
700 unsigned j = 0;
701
702 pa_assert(u);
703 pa_sink_assert_ref(u->sink);
704
705 if (u->use_tsched)
706 hw_sleep_time(u, &max_sleep_usec, &process_usec);
707
708 for (;;) {
709 snd_pcm_sframes_t n;
710 size_t n_bytes;
711 int r;
712 bool after_avail = true;
713
714 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
715
716 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
717 continue;
718
719 return r;
720 }
721
722 n_bytes = (size_t) n * u->frame_size;
723
724 #ifdef DEBUG_TIMING
725 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
726 #endif
727
728 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
729 on_timeout = false;
730
731 if (u->use_tsched)
732
733 /* We won't fill up the playback buffer before at least
734 * half the sleep time is over because otherwise we might
735 * ask for more data from the clients then they expect. We
736 * need to guarantee that clients only have to keep around
737 * a single hw buffer length. */
738
739 if (!polled &&
740 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
741 break;
742
743 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
744
745 if (polled)
746 PA_ONCE_BEGIN {
747 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
748 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
749 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
750 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
751 pa_strnull(dn));
752 pa_xfree(dn);
753 } PA_ONCE_END;
754
755 break;
756 }
757
758 if (++j > 10) {
759 #ifdef DEBUG_TIMING
760 pa_log_debug("Not filling up, because already too many iterations.");
761 #endif
762
763 break;
764 }
765
766 n_bytes -= u->hwbuf_unused;
767 polled = false;
768
769 for (;;) {
770 snd_pcm_sframes_t frames;
771 void *p;
772 size_t written;
773
774 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
775
776 if (u->memchunk.length <= 0)
777 pa_sink_render(u->sink, n_bytes, &u->memchunk);
778
779 pa_assert(u->memchunk.length > 0);
780
781 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
782
783 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
784 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
785
786 p = pa_memblock_acquire(u->memchunk.memblock);
787 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
788 pa_memblock_release(u->memchunk.memblock);
789
790 if (PA_UNLIKELY(frames < 0)) {
791
792 if (!after_avail && (int) frames == -EAGAIN)
793 break;
794
795 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
796 continue;
797
798 return r;
799 }
800
801 if (!after_avail && frames == 0)
802 break;
803
804 pa_assert(frames > 0);
805 after_avail = false;
806
807 written = frames * u->frame_size;
808 u->memchunk.index += written;
809 u->memchunk.length -= written;
810
811 if (u->memchunk.length <= 0) {
812 pa_memblock_unref(u->memchunk.memblock);
813 pa_memchunk_reset(&u->memchunk);
814 }
815
816 work_done = true;
817
818 u->write_count += written;
819 u->since_start += written;
820
821 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
822
823 if (written >= n_bytes)
824 break;
825
826 n_bytes -= written;
827 }
828 }
829
830 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
831
832 if (u->use_tsched) {
833 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
834
835 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
836 process_usec = u->tsched_watermark_usec;
837
838 if (*sleep_usec > process_usec)
839 *sleep_usec -= process_usec;
840 else
841 *sleep_usec = 0;
842
843 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
844 } else
845 *sleep_usec = 0;
846
847 return work_done ? 1 : 0;
848 }
849
850 static void update_smoother(struct userdata *u) {
851 snd_pcm_sframes_t delay = 0;
852 int64_t position;
853 int err;
854 pa_usec_t now1 = 0, now2;
855 snd_pcm_status_t *status;
856 snd_htimestamp_t htstamp = { 0, 0 };
857
858 snd_pcm_status_alloca(&status);
859
860 pa_assert(u);
861 pa_assert(u->pcm_handle);
862
863 /* Let's update the time smoother */
864
865 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->sink->sample_spec, false)) < 0)) {
866 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
867 return;
868 }
869
870 snd_pcm_status_get_htstamp(status, &htstamp);
871 now1 = pa_timespec_load(&htstamp);
872
873 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
874 if (now1 <= 0)
875 now1 = pa_rtclock_now();
876
877 /* check if the time since the last update is bigger than the interval */
878 if (u->last_smoother_update > 0)
879 if (u->last_smoother_update + u->smoother_interval > now1)
880 return;
881
882 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
883
884 if (PA_UNLIKELY(position < 0))
885 position = 0;
886
887 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
888
889 pa_smoother_put(u->smoother, now1, now2);
890
891 u->last_smoother_update = now1;
892 /* exponentially increase the update interval up to the MAX limit */
893 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
894 }
895
896 static pa_usec_t sink_get_latency(struct userdata *u) {
897 pa_usec_t r;
898 int64_t delay;
899 pa_usec_t now1, now2;
900
901 pa_assert(u);
902
903 now1 = pa_rtclock_now();
904 now2 = pa_smoother_get(u->smoother, now1);
905
906 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
907
908 r = delay >= 0 ? (pa_usec_t) delay : 0;
909
910 if (u->memchunk.memblock)
911 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
912
913 return r;
914 }
915
916 static int build_pollfd(struct userdata *u) {
917 pa_assert(u);
918 pa_assert(u->pcm_handle);
919
920 if (u->alsa_rtpoll_item)
921 pa_rtpoll_item_free(u->alsa_rtpoll_item);
922
923 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
924 return -1;
925
926 return 0;
927 }
928
929 /* Called from IO context */
930 static int suspend(struct userdata *u) {
931 pa_assert(u);
932 pa_assert(u->pcm_handle);
933
934 pa_smoother_pause(u->smoother, pa_rtclock_now());
935
936 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
937 * take awfully long with our long buffer sizes today. */
938 snd_pcm_close(u->pcm_handle);
939 u->pcm_handle = NULL;
940
941 if (u->alsa_rtpoll_item) {
942 pa_rtpoll_item_free(u->alsa_rtpoll_item);
943 u->alsa_rtpoll_item = NULL;
944 }
945
946 /* We reset max_rewind/max_request here to make sure that while we
947 * are suspended the old max_request/max_rewind values set before
948 * the suspend can influence the per-stream buffer of newly
949 * created streams, without their requirements having any
950 * influence on them. */
951 pa_sink_set_max_rewind_within_thread(u->sink, 0);
952 pa_sink_set_max_request_within_thread(u->sink, 0);
953
954 pa_log_info("Device suspended...");
955
956 return 0;
957 }
958
959 /* Called from IO context */
960 static int update_sw_params(struct userdata *u) {
961 snd_pcm_uframes_t avail_min;
962 int err;
963
964 pa_assert(u);
965
966 /* Use the full buffer if no one asked us for anything specific */
967 u->hwbuf_unused = 0;
968
969 if (u->use_tsched) {
970 pa_usec_t latency;
971
972 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
973 size_t b;
974
975 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
976
977 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
978
979 /* We need at least one sample in our buffer */
980
981 if (PA_UNLIKELY(b < u->frame_size))
982 b = u->frame_size;
983
984 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
985 }
986
987 fix_min_sleep_wakeup(u);
988 fix_tsched_watermark(u);
989 }
990
991 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
992
993 /* We need at last one frame in the used part of the buffer */
994 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
995
996 if (u->use_tsched) {
997 pa_usec_t sleep_usec, process_usec;
998
999 hw_sleep_time(u, &sleep_usec, &process_usec);
1000 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
1001 }
1002
1003 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
1004
1005 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
1006 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
1007 return err;
1008 }
1009
1010 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
1011 if (pa_alsa_pcm_is_hw(u->pcm_handle))
1012 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
1013 else {
1014 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
1015 pa_sink_set_max_rewind_within_thread(u->sink, 0);
1016 }
1017
1018 return 0;
1019 }
1020
1021 /* Called from IO Context on unsuspend or from main thread when creating sink */
1022 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
1023 bool in_thread) {
1024 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
1025 &u->sink->sample_spec);
1026
1027 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1028 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1029
1030 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1031 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1032
1033 fix_min_sleep_wakeup(u);
1034 fix_tsched_watermark(u);
1035
1036 if (in_thread)
1037 pa_sink_set_latency_range_within_thread(u->sink,
1038 u->min_latency_ref,
1039 pa_bytes_to_usec(u->hwbuf_size, ss));
1040 else {
1041 pa_sink_set_latency_range(u->sink,
1042 0,
1043 pa_bytes_to_usec(u->hwbuf_size, ss));
1044
1045 /* work-around assert in pa_sink_set_latency_within_thead,
1046 keep track of min_latency and reuse it when
1047 this routine is called from IO context */
1048 u->min_latency_ref = u->sink->thread_info.min_latency;
1049 }
1050
1051 pa_log_info("Time scheduling watermark is %0.2fms",
1052 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
1053 }
1054
1055 /* Called from IO context */
1056 static int unsuspend(struct userdata *u) {
1057 pa_sample_spec ss;
1058 int err;
1059 bool b, d;
1060 snd_pcm_uframes_t period_size, buffer_size;
1061 char *device_name = NULL;
1062
1063 pa_assert(u);
1064 pa_assert(!u->pcm_handle);
1065
1066 pa_log_info("Trying resume...");
1067
1068 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1069 /* Need to open device in NONAUDIO mode */
1070 int len = strlen(u->device_name) + 8;
1071
1072 device_name = pa_xmalloc(len);
1073 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1074 }
1075
1076 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1077 SND_PCM_NONBLOCK|
1078 SND_PCM_NO_AUTO_RESAMPLE|
1079 SND_PCM_NO_AUTO_CHANNELS|
1080 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1081 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1082 goto fail;
1083 }
1084
1085 ss = u->sink->sample_spec;
1086 period_size = u->fragment_size / u->frame_size;
1087 buffer_size = u->hwbuf_size / u->frame_size;
1088 b = u->use_mmap;
1089 d = u->use_tsched;
1090
1091 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, true)) < 0) {
1092 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1093 goto fail;
1094 }
1095
1096 if (b != u->use_mmap || d != u->use_tsched) {
1097 pa_log_warn("Resume failed, couldn't get original access mode.");
1098 goto fail;
1099 }
1100
1101 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1102 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1103 goto fail;
1104 }
1105
1106 if (period_size*u->frame_size != u->fragment_size ||
1107 buffer_size*u->frame_size != u->hwbuf_size) {
1108 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1109 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1110 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1111 goto fail;
1112 }
1113
1114 if (update_sw_params(u) < 0)
1115 goto fail;
1116
1117 if (build_pollfd(u) < 0)
1118 goto fail;
1119
1120 u->write_count = 0;
1121 pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
1122 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1123 u->last_smoother_update = 0;
1124
1125 u->first = true;
1126 u->since_start = 0;
1127
1128 /* reset the watermark to the value defined when sink was created */
1129 if (u->use_tsched)
1130 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, true);
1131
1132 pa_log_info("Resumed successfully...");
1133
1134 pa_xfree(device_name);
1135 return 0;
1136
1137 fail:
1138 if (u->pcm_handle) {
1139 snd_pcm_close(u->pcm_handle);
1140 u->pcm_handle = NULL;
1141 }
1142
1143 pa_xfree(device_name);
1144
1145 return -PA_ERR_IO;
1146 }
1147
1148 /* Called from IO context */
1149 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1150 struct userdata *u = PA_SINK(o)->userdata;
1151
1152 switch (code) {
1153
1154 case PA_SINK_MESSAGE_GET_LATENCY: {
1155 pa_usec_t r = 0;
1156
1157 if (u->pcm_handle)
1158 r = sink_get_latency(u);
1159
1160 *((pa_usec_t*) data) = r;
1161
1162 return 0;
1163 }
1164
1165 case PA_SINK_MESSAGE_SET_STATE:
1166
1167 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1168
1169 case PA_SINK_SUSPENDED: {
1170 int r;
1171
1172 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1173
1174 if ((r = suspend(u)) < 0)
1175 return r;
1176
1177 break;
1178 }
1179
1180 case PA_SINK_IDLE:
1181 case PA_SINK_RUNNING: {
1182 int r;
1183
1184 if (u->sink->thread_info.state == PA_SINK_INIT) {
1185 if (build_pollfd(u) < 0)
1186 return -PA_ERR_IO;
1187 }
1188
1189 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1190 if ((r = unsuspend(u)) < 0)
1191 return r;
1192 }
1193
1194 break;
1195 }
1196
1197 case PA_SINK_UNLINKED:
1198 case PA_SINK_INIT:
1199 case PA_SINK_INVALID_STATE:
1200 ;
1201 }
1202
1203 break;
1204 }
1205
1206 return pa_sink_process_msg(o, code, data, offset, chunk);
1207 }
1208
1209 /* Called from main context */
1210 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1211 pa_sink_state_t old_state;
1212 struct userdata *u;
1213
1214 pa_sink_assert_ref(s);
1215 pa_assert_se(u = s->userdata);
1216
1217 old_state = pa_sink_get_state(u->sink);
1218
1219 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1220 reserve_done(u);
1221 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1222 if (reserve_init(u, u->device_name) < 0)
1223 return -PA_ERR_BUSY;
1224
1225 return 0;
1226 }
1227
1228 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1229 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1230
1231 pa_assert(u);
1232 pa_assert(u->mixer_handle);
1233
1234 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1235 return 0;
1236
1237 if (!PA_SINK_IS_LINKED(u->sink->state))
1238 return 0;
1239
1240 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1241 pa_sink_set_mixer_dirty(u->sink, true);
1242 return 0;
1243 }
1244
1245 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1246 pa_sink_get_volume(u->sink, true);
1247 pa_sink_get_mute(u->sink, true);
1248 }
1249
1250 return 0;
1251 }
1252
1253 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1254 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1255
1256 pa_assert(u);
1257 pa_assert(u->mixer_handle);
1258
1259 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1260 return 0;
1261
1262 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1263 pa_sink_set_mixer_dirty(u->sink, true);
1264 return 0;
1265 }
1266
1267 if (mask & SND_CTL_EVENT_MASK_VALUE)
1268 pa_sink_update_volume_and_mute(u->sink);
1269
1270 return 0;
1271 }
1272
1273 static void sink_get_volume_cb(pa_sink *s) {
1274 struct userdata *u = s->userdata;
1275 pa_cvolume r;
1276 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1277
1278 pa_assert(u);
1279 pa_assert(u->mixer_path);
1280 pa_assert(u->mixer_handle);
1281
1282 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1283 return;
1284
1285 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1286 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1287
1288 pa_log_debug("Read hardware volume: %s",
1289 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1290
1291 if (pa_cvolume_equal(&u->hardware_volume, &r))
1292 return;
1293
1294 s->real_volume = u->hardware_volume = r;
1295
1296 /* Hmm, so the hardware volume changed, let's reset our software volume */
1297 if (u->mixer_path->has_dB)
1298 pa_sink_set_soft_volume(s, NULL);
1299 }
1300
1301 static void sink_set_volume_cb(pa_sink *s) {
1302 struct userdata *u = s->userdata;
1303 pa_cvolume r;
1304 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1305 bool deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1306
1307 pa_assert(u);
1308 pa_assert(u->mixer_path);
1309 pa_assert(u->mixer_handle);
1310
1311 /* Shift up by the base volume */
1312 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1313
1314 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1315 return;
1316
1317 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1318 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1319
1320 u->hardware_volume = r;
1321
1322 if (u->mixer_path->has_dB) {
1323 pa_cvolume new_soft_volume;
1324 bool accurate_enough;
1325
1326 /* Match exactly what the user requested by software */
1327 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1328
1329 /* If the adjustment to do in software is only minimal we
1330 * can skip it. That saves us CPU at the expense of a bit of
1331 * accuracy */
1332 accurate_enough =
1333 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1334 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1335
1336 pa_log_debug("Requested volume: %s",
1337 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1338 pa_log_debug("Got hardware volume: %s",
1339 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1340 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1341 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1342 pa_yes_no(accurate_enough));
1343
1344 if (!accurate_enough)
1345 s->soft_volume = new_soft_volume;
1346
1347 } else {
1348 pa_log_debug("Wrote hardware volume: %s",
1349 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1350
1351 /* We can't match exactly what the user requested, hence let's
1352 * at least tell the user about it */
1353
1354 s->real_volume = r;
1355 }
1356 }
1357
1358 static void sink_write_volume_cb(pa_sink *s) {
1359 struct userdata *u = s->userdata;
1360 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1361
1362 pa_assert(u);
1363 pa_assert(u->mixer_path);
1364 pa_assert(u->mixer_handle);
1365 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1366
1367 /* Shift up by the base volume */
1368 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1369
1370 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1371 pa_log_error("Writing HW volume failed");
1372 else {
1373 pa_cvolume tmp_vol;
1374 bool accurate_enough;
1375
1376 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1377 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1378
1379 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1380 accurate_enough =
1381 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1382 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1383
1384 if (!accurate_enough) {
1385 char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1386
1387 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1388 pa_cvolume_snprint_verbose(volume_buf[0],
1389 sizeof(volume_buf[0]),
1390 &s->thread_info.current_hw_volume,
1391 &s->channel_map,
1392 true),
1393 pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1394 }
1395 }
1396 }
1397
1398 static int sink_get_mute_cb(pa_sink *s, bool *mute) {
1399 struct userdata *u = s->userdata;
1400
1401 pa_assert(u);
1402 pa_assert(u->mixer_path);
1403 pa_assert(u->mixer_handle);
1404
1405 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1406 return -1;
1407
1408 return 0;
1409 }
1410
1411 static void sink_set_mute_cb(pa_sink *s) {
1412 struct userdata *u = s->userdata;
1413
1414 pa_assert(u);
1415 pa_assert(u->mixer_path);
1416 pa_assert(u->mixer_handle);
1417
1418 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1419 }
1420
1421 static void mixer_volume_init(struct userdata *u) {
1422 pa_assert(u);
1423
1424 if (!u->mixer_path->has_volume) {
1425 pa_sink_set_write_volume_callback(u->sink, NULL);
1426 pa_sink_set_get_volume_callback(u->sink, NULL);
1427 pa_sink_set_set_volume_callback(u->sink, NULL);
1428
1429 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1430 } else {
1431 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1432 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1433
1434 if (u->mixer_path->has_dB && u->deferred_volume) {
1435 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1436 pa_log_info("Successfully enabled deferred volume.");
1437 } else
1438 pa_sink_set_write_volume_callback(u->sink, NULL);
1439
1440 if (u->mixer_path->has_dB) {
1441 pa_sink_enable_decibel_volume(u->sink, true);
1442 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1443
1444 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1445 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1446
1447 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1448 } else {
1449 pa_sink_enable_decibel_volume(u->sink, false);
1450 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1451
1452 u->sink->base_volume = PA_VOLUME_NORM;
1453 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1454 }
1455
1456 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1457 }
1458
1459 if (!u->mixer_path->has_mute) {
1460 pa_sink_set_get_mute_callback(u->sink, NULL);
1461 pa_sink_set_set_mute_callback(u->sink, NULL);
1462 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1463 } else {
1464 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1465 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1466 pa_log_info("Using hardware mute control.");
1467 }
1468 }
1469
1470 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1471 struct userdata *u = s->userdata;
1472
1473 pa_assert(u);
1474 pa_assert(p);
1475 pa_assert(u->ucm_context);
1476
1477 return pa_alsa_ucm_set_port(u->ucm_context, p, true);
1478 }
1479
1480 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1481 struct userdata *u = s->userdata;
1482 pa_alsa_port_data *data;
1483
1484 pa_assert(u);
1485 pa_assert(p);
1486 pa_assert(u->mixer_handle);
1487
1488 data = PA_DEVICE_PORT_DATA(p);
1489
1490 pa_assert_se(u->mixer_path = data->path);
1491 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1492
1493 mixer_volume_init(u);
1494
1495 if (s->set_mute)
1496 s->set_mute(s);
1497 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1498 if (s->write_volume)
1499 s->write_volume(s);
1500 } else {
1501 if (s->set_volume)
1502 s->set_volume(s);
1503 }
1504
1505 return 0;
1506 }
1507
1508 static void sink_update_requested_latency_cb(pa_sink *s) {
1509 struct userdata *u = s->userdata;
1510 size_t before;
1511 pa_assert(u);
1512 pa_assert(u->use_tsched); /* only when timer scheduling is used
1513 * we can dynamically adjust the
1514 * latency */
1515
1516 if (!u->pcm_handle)
1517 return;
1518
1519 before = u->hwbuf_unused;
1520 update_sw_params(u);
1521
1522 /* Let's check whether we now use only a smaller part of the
1523 buffer then before. If so, we need to make sure that subsequent
1524 rewinds are relative to the new maximum fill level and not to the
1525 current fill level. Thus, let's do a full rewind once, to clear
1526 things up. */
1527
1528 if (u->hwbuf_unused > before) {
1529 pa_log_debug("Requesting rewind due to latency change.");
1530 pa_sink_request_rewind(s, (size_t) -1);
1531 }
1532 }
1533
1534 static pa_idxset* sink_get_formats(pa_sink *s) {
1535 struct userdata *u = s->userdata;
1536
1537 pa_assert(u);
1538
1539 return pa_idxset_copy(u->formats, (pa_copy_func_t) pa_format_info_copy);
1540 }
1541
1542 static bool sink_set_formats(pa_sink *s, pa_idxset *formats) {
1543 struct userdata *u = s->userdata;
1544 pa_format_info *f, *g;
1545 uint32_t idx, n;
1546
1547 pa_assert(u);
1548
1549 /* FIXME: also validate sample rates against what the device supports */
1550 PA_IDXSET_FOREACH(f, formats, idx) {
1551 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1552 /* EAC3 cannot be sent over over S/PDIF */
1553 return false;
1554 }
1555
1556 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
1557 u->formats = pa_idxset_new(NULL, NULL);
1558
1559 /* Note: the logic below won't apply if we're using software encoding.
1560 * This is fine for now since we don't support that via the passthrough
1561 * framework, but this must be changed if we do. */
1562
1563 /* Count how many sample rates we support */
1564 for (idx = 0, n = 0; u->rates[idx]; idx++)
1565 n++;
1566
1567 /* First insert non-PCM formats since we prefer those. */
1568 PA_IDXSET_FOREACH(f, formats, idx) {
1569 if (!pa_format_info_is_pcm(f)) {
1570 g = pa_format_info_copy(f);
1571 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1572 pa_idxset_put(u->formats, g, NULL);
1573 }
1574 }
1575
1576 /* Now add any PCM formats */
1577 PA_IDXSET_FOREACH(f, formats, idx) {
1578 if (pa_format_info_is_pcm(f)) {
1579 /* We don't set rates here since we'll just tack on a resampler for
1580 * unsupported rates */
1581 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1582 }
1583 }
1584
1585 return true;
1586 }
1587
1588 static int sink_update_rate_cb(pa_sink *s, uint32_t rate) {
1589 struct userdata *u = s->userdata;
1590 int i;
1591 bool supported = false;
1592
1593 pa_assert(u);
1594
1595 for (i = 0; u->rates[i]; i++) {
1596 if (u->rates[i] == rate) {
1597 supported = true;
1598 break;
1599 }
1600 }
1601
1602 if (!supported) {
1603 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1604 return -1;
1605 }
1606
1607 if (!PA_SINK_IS_OPENED(s->state)) {
1608 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1609 u->sink->sample_spec.rate = rate;
1610 return 0;
1611 }
1612
1613 return -1;
1614 }
1615
1616 static int process_rewind(struct userdata *u) {
1617 snd_pcm_sframes_t unused;
1618 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1619 pa_assert(u);
1620
1621 if (!PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1622 pa_sink_process_rewind(u->sink, 0);
1623 return 0;
1624 }
1625
1626 /* Figure out how much we shall rewind and reset the counter */
1627 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1628
1629 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1630
1631 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1632 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1633 return -1;
1634 }
1635
1636 unused_nbytes = (size_t) unused * u->frame_size;
1637
1638 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1639 unused_nbytes += u->rewind_safeguard;
1640
1641 if (u->hwbuf_size > unused_nbytes)
1642 limit_nbytes = u->hwbuf_size - unused_nbytes;
1643 else
1644 limit_nbytes = 0;
1645
1646 if (rewind_nbytes > limit_nbytes)
1647 rewind_nbytes = limit_nbytes;
1648
1649 if (rewind_nbytes > 0) {
1650 snd_pcm_sframes_t in_frames, out_frames;
1651
1652 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1653
1654 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1655 pa_log_debug("before: %lu", (unsigned long) in_frames);
1656 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1657 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1658 if (try_recover(u, "process_rewind", out_frames) < 0)
1659 return -1;
1660 out_frames = 0;
1661 }
1662
1663 pa_log_debug("after: %lu", (unsigned long) out_frames);
1664
1665 rewind_nbytes = (size_t) out_frames * u->frame_size;
1666
1667 if (rewind_nbytes <= 0)
1668 pa_log_info("Tried rewind, but was apparently not possible.");
1669 else {
1670 u->write_count -= rewind_nbytes;
1671 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1672 pa_sink_process_rewind(u->sink, rewind_nbytes);
1673
1674 u->after_rewind = true;
1675 return 0;
1676 }
1677 } else
1678 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1679
1680 pa_sink_process_rewind(u->sink, 0);
1681 return 0;
1682 }
1683
1684 static void thread_func(void *userdata) {
1685 struct userdata *u = userdata;
1686 unsigned short revents = 0;
1687
1688 pa_assert(u);
1689
1690 pa_log_debug("Thread starting up");
1691
1692 if (u->core->realtime_scheduling)
1693 pa_make_realtime(u->core->realtime_priority);
1694
1695 pa_thread_mq_install(&u->thread_mq);
1696
1697 for (;;) {
1698 int ret;
1699 pa_usec_t rtpoll_sleep = 0, real_sleep;
1700
1701 #ifdef DEBUG_TIMING
1702 pa_log_debug("Loop");
1703 #endif
1704
1705 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
1706 if (process_rewind(u) < 0)
1707 goto fail;
1708 }
1709
1710 /* Render some data and write it to the dsp */
1711 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1712 int work_done;
1713 pa_usec_t sleep_usec = 0;
1714 bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1715
1716 if (u->use_mmap)
1717 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1718 else
1719 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1720
1721 if (work_done < 0)
1722 goto fail;
1723
1724 /* pa_log_debug("work_done = %i", work_done); */
1725
1726 if (work_done) {
1727
1728 if (u->first) {
1729 pa_log_info("Starting playback.");
1730 snd_pcm_start(u->pcm_handle);
1731
1732 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1733
1734 u->first = false;
1735 }
1736
1737 update_smoother(u);
1738 }
1739
1740 if (u->use_tsched) {
1741 pa_usec_t cusec;
1742
1743 if (u->since_start <= u->hwbuf_size) {
1744
1745 /* USB devices on ALSA seem to hit a buffer
1746 * underrun during the first iterations much
1747 * quicker then we calculate here, probably due to
1748 * the transport latency. To accommodate for that
1749 * we artificially decrease the sleep time until
1750 * we have filled the buffer at least once
1751 * completely.*/
1752
1753 if (pa_log_ratelimit(PA_LOG_DEBUG))
1754 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1755 sleep_usec /= 2;
1756 }
1757
1758 /* OK, the playback buffer is now full, let's
1759 * calculate when to wake up next */
1760 #ifdef DEBUG_TIMING
1761 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1762 #endif
1763
1764 /* Convert from the sound card time domain to the
1765 * system time domain */
1766 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1767
1768 #ifdef DEBUG_TIMING
1769 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1770 #endif
1771
1772 /* We don't trust the conversion, so we wake up whatever comes first */
1773 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1774 }
1775
1776 u->after_rewind = false;
1777
1778 }
1779
1780 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1781 pa_usec_t volume_sleep;
1782 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1783 if (volume_sleep > 0) {
1784 if (rtpoll_sleep > 0)
1785 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1786 else
1787 rtpoll_sleep = volume_sleep;
1788 }
1789 }
1790
1791 if (rtpoll_sleep > 0) {
1792 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1793 real_sleep = pa_rtclock_now();
1794 }
1795 else
1796 pa_rtpoll_set_timer_disabled(u->rtpoll);
1797
1798 /* Hmm, nothing to do. Let's sleep */
1799 if ((ret = pa_rtpoll_run(u->rtpoll, true)) < 0)
1800 goto fail;
1801
1802 if (rtpoll_sleep > 0) {
1803 real_sleep = pa_rtclock_now() - real_sleep;
1804 #ifdef DEBUG_TIMING
1805 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1806 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1807 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1808 #endif
1809 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1810 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1811 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1812 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1813 }
1814
1815 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1816 pa_sink_volume_change_apply(u->sink, NULL);
1817
1818 if (ret == 0)
1819 goto finish;
1820
1821 /* Tell ALSA about this and process its response */
1822 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1823 struct pollfd *pollfd;
1824 int err;
1825 unsigned n;
1826
1827 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1828
1829 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1830 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1831 goto fail;
1832 }
1833
1834 if (revents & ~POLLOUT) {
1835 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1836 goto fail;
1837
1838 u->first = true;
1839 u->since_start = 0;
1840 revents = 0;
1841 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1842 pa_log_debug("Wakeup from ALSA!");
1843
1844 } else
1845 revents = 0;
1846 }
1847
1848 fail:
1849 /* If this was no regular exit from the loop we have to continue
1850 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1851 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1852 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1853
1854 finish:
1855 pa_log_debug("Thread shutting down");
1856 }
1857
1858 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1859 const char *n;
1860 char *t;
1861
1862 pa_assert(data);
1863 pa_assert(ma);
1864 pa_assert(device_name);
1865
1866 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1867 pa_sink_new_data_set_name(data, n);
1868 data->namereg_fail = true;
1869 return;
1870 }
1871
1872 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1873 data->namereg_fail = true;
1874 else {
1875 n = device_id ? device_id : device_name;
1876 data->namereg_fail = false;
1877 }
1878
1879 if (mapping)
1880 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1881 else
1882 t = pa_sprintf_malloc("alsa_output.%s", n);
1883
1884 pa_sink_new_data_set_name(data, t);
1885 pa_xfree(t);
1886 }
1887
1888 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1889 snd_hctl_t *hctl;
1890
1891 if (!mapping && !element)
1892 return;
1893
1894 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1895 pa_log_info("Failed to find a working mixer device.");
1896 return;
1897 }
1898
1899 if (element) {
1900
1901 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1902 goto fail;
1903
1904 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1905 goto fail;
1906
1907 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1908 pa_alsa_path_dump(u->mixer_path);
1909 } else if (!(u->mixer_path_set = mapping->output_path_set))
1910 goto fail;
1911
1912 return;
1913
1914 fail:
1915
1916 if (u->mixer_path) {
1917 pa_alsa_path_free(u->mixer_path);
1918 u->mixer_path = NULL;
1919 }
1920
1921 if (u->mixer_handle) {
1922 snd_mixer_close(u->mixer_handle);
1923 u->mixer_handle = NULL;
1924 }
1925 }
1926
1927 static int setup_mixer(struct userdata *u, bool ignore_dB) {
1928 bool need_mixer_callback = false;
1929
1930 pa_assert(u);
1931
1932 if (!u->mixer_handle)
1933 return 0;
1934
1935 if (u->sink->active_port) {
1936 pa_alsa_port_data *data;
1937
1938 /* We have a list of supported paths, so let's activate the
1939 * one that has been chosen as active */
1940
1941 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1942 u->mixer_path = data->path;
1943
1944 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
1945
1946 } else {
1947
1948 if (!u->mixer_path && u->mixer_path_set)
1949 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1950
1951 if (u->mixer_path) {
1952 /* Hmm, we have only a single path, then let's activate it */
1953
1954 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
1955
1956 } else
1957 return 0;
1958 }
1959
1960 mixer_volume_init(u);
1961
1962 /* Will we need to register callbacks? */
1963 if (u->mixer_path_set && u->mixer_path_set->paths) {
1964 pa_alsa_path *p;
1965 void *state;
1966
1967 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1968 if (p->has_volume || p->has_mute)
1969 need_mixer_callback = true;
1970 }
1971 }
1972 else if (u->mixer_path)
1973 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1974
1975 if (need_mixer_callback) {
1976 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1977 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1978 u->mixer_pd = pa_alsa_mixer_pdata_new();
1979 mixer_callback = io_mixer_callback;
1980
1981 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1982 pa_log("Failed to initialize file descriptor monitoring");
1983 return -1;
1984 }
1985 } else {
1986 u->mixer_fdl = pa_alsa_fdlist_new();
1987 mixer_callback = ctl_mixer_callback;
1988
1989 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1990 pa_log("Failed to initialize file descriptor monitoring");
1991 return -1;
1992 }
1993 }
1994
1995 if (u->mixer_path_set)
1996 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1997 else
1998 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1999 }
2000
2001 return 0;
2002 }
2003
2004 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
2005
2006 struct userdata *u = NULL;
2007 const char *dev_id = NULL, *key, *mod_name;
2008 pa_sample_spec ss;
2009 char *thread_name = NULL;
2010 uint32_t alternate_sample_rate;
2011 pa_channel_map map;
2012 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
2013 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2014 size_t frame_size;
2015 bool use_mmap = true, b, use_tsched = true, d, ignore_dB = false, namereg_fail = false, deferred_volume = false, set_formats = false, fixed_latency_range = false;
2016 pa_sink_new_data data;
2017 pa_alsa_profile_set *profile_set = NULL;
2018 void *state = NULL;
2019
2020 pa_assert(m);
2021 pa_assert(ma);
2022
2023 ss = m->core->default_sample_spec;
2024 map = m->core->default_channel_map;
2025
2026 /* Pick sample spec overrides from the mapping, if any */
2027 if (mapping) {
2028 if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
2029 ss.format = mapping->sample_spec.format;
2030 if (mapping->sample_spec.rate != 0)
2031 ss.rate = mapping->sample_spec.rate;
2032 if (mapping->sample_spec.channels != 0) {
2033 ss.channels = mapping->sample_spec.channels;
2034 if (pa_channel_map_valid(&mapping->channel_map))
2035 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
2036 }
2037 }
2038
2039 /* Override with modargs if provided */
2040 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2041 pa_log("Failed to parse sample specification and channel map");
2042 goto fail;
2043 }
2044
2045 alternate_sample_rate = m->core->alternate_sample_rate;
2046 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2047 pa_log("Failed to parse alternate sample rate");
2048 goto fail;
2049 }
2050
2051 frame_size = pa_frame_size(&ss);
2052
2053 nfrags = m->core->default_n_fragments;
2054 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2055 if (frag_size <= 0)
2056 frag_size = (uint32_t) frame_size;
2057 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2058 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2059
2060 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2061 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2062 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2063 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2064 pa_log("Failed to parse buffer metrics");
2065 goto fail;
2066 }
2067
2068 buffer_size = nfrags * frag_size;
2069
2070 period_frames = frag_size/frame_size;
2071 buffer_frames = buffer_size/frame_size;
2072 tsched_frames = tsched_size/frame_size;
2073
2074 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2075 pa_log("Failed to parse mmap argument.");
2076 goto fail;
2077 }
2078
2079 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2080 pa_log("Failed to parse tsched argument.");
2081 goto fail;
2082 }
2083
2084 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2085 pa_log("Failed to parse ignore_dB argument.");
2086 goto fail;
2087 }
2088
2089 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2090 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2091 pa_log("Failed to parse rewind_safeguard argument");
2092 goto fail;
2093 }
2094
2095 deferred_volume = m->core->deferred_volume;
2096 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2097 pa_log("Failed to parse deferred_volume argument.");
2098 goto fail;
2099 }
2100
2101 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2102 pa_log("Failed to parse fixed_latency_range argument.");
2103 goto fail;
2104 }
2105
2106 use_tsched = pa_alsa_may_tsched(use_tsched);
2107
2108 u = pa_xnew0(struct userdata, 1);
2109 u->core = m->core;
2110 u->module = m;
2111 u->use_mmap = use_mmap;
2112 u->use_tsched = use_tsched;
2113 u->deferred_volume = deferred_volume;
2114 u->fixed_latency_range = fixed_latency_range;
2115 u->first = true;
2116 u->rewind_safeguard = rewind_safeguard;
2117 u->rtpoll = pa_rtpoll_new();
2118 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2119
2120 u->smoother = pa_smoother_new(
2121 SMOOTHER_ADJUST_USEC,
2122 SMOOTHER_WINDOW_USEC,
2123 true,
2124 true,
2125 5,
2126 pa_rtclock_now(),
2127 true);
2128 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2129
2130 /* use ucm */
2131 if (mapping && mapping->ucm_context.ucm)
2132 u->ucm_context = &mapping->ucm_context;
2133
2134 dev_id = pa_modargs_get_value(
2135 ma, "device_id",
2136 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2137
2138 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2139
2140 if (reserve_init(u, dev_id) < 0)
2141 goto fail;
2142
2143 if (reserve_monitor_init(u, dev_id) < 0)
2144 goto fail;
2145
2146 b = use_mmap;
2147 d = use_tsched;
2148
2149 if (mapping) {
2150
2151 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2152 pa_log("device_id= not set");
2153 goto fail;
2154 }
2155
2156 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2157 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2158 pa_log("Failed to enable ucm modifier %s", mod_name);
2159 else
2160 pa_log_debug("Enabled ucm modifier %s", mod_name);
2161 }
2162
2163 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2164 dev_id,
2165 &u->device_name,
2166 &ss, &map,
2167 SND_PCM_STREAM_PLAYBACK,
2168 &period_frames, &buffer_frames, tsched_frames,
2169 &b, &d, mapping)))
2170 goto fail;
2171
2172 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2173
2174 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2175 goto fail;
2176
2177 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2178 dev_id,
2179 &u->device_name,
2180 &ss, &map,
2181 SND_PCM_STREAM_PLAYBACK,
2182 &period_frames, &buffer_frames, tsched_frames,
2183 &b, &d, profile_set, &mapping)))
2184 goto fail;
2185
2186 } else {
2187
2188 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2189 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2190 &u->device_name,
2191 &ss, &map,
2192 SND_PCM_STREAM_PLAYBACK,
2193 &period_frames, &buffer_frames, tsched_frames,
2194 &b, &d, false)))
2195 goto fail;
2196 }
2197
2198 pa_assert(u->device_name);
2199 pa_log_info("Successfully opened device %s.", u->device_name);
2200
2201 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2202 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2203 goto fail;
2204 }
2205
2206 if (mapping)
2207 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2208
2209 if (use_mmap && !b) {
2210 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2211 u->use_mmap = use_mmap = false;
2212 }
2213
2214 if (use_tsched && (!b || !d)) {
2215 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2216 u->use_tsched = use_tsched = false;
2217 }
2218
2219 if (u->use_mmap)
2220 pa_log_info("Successfully enabled mmap() mode.");
2221
2222 if (u->use_tsched) {
2223 pa_log_info("Successfully enabled timer-based scheduling mode.");
2224
2225 if (u->fixed_latency_range)
2226 pa_log_info("Disabling latency range changes on underrun");
2227 }
2228
2229 if (is_iec958(u) || is_hdmi(u))
2230 set_formats = true;
2231
2232 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
2233 if (!u->rates) {
2234 pa_log_error("Failed to find any supported sample rates.");
2235 goto fail;
2236 }
2237
2238 /* ALSA might tweak the sample spec, so recalculate the frame size */
2239 frame_size = pa_frame_size(&ss);
2240
2241 if (!u->ucm_context)
2242 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2243
2244 pa_sink_new_data_init(&data);
2245 data.driver = driver;
2246 data.module = m;
2247 data.card = card;
2248 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2249
2250 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2251 * variable instead of using &data.namereg_fail directly, because
2252 * data.namereg_fail is a bitfield and taking the address of a bitfield
2253 * variable is impossible. */
2254 namereg_fail = data.namereg_fail;
2255 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2256 pa_log("Failed to parse namereg_fail argument.");
2257 pa_sink_new_data_done(&data);
2258 goto fail;
2259 }
2260 data.namereg_fail = namereg_fail;
2261
2262 pa_sink_new_data_set_sample_spec(&data, &ss);
2263 pa_sink_new_data_set_channel_map(&data, &map);
2264 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2265
2266 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2267 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2268 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2269 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2270 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2271
2272 if (mapping) {
2273 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2274 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2275
2276 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2277 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2278 }
2279
2280 pa_alsa_init_description(data.proplist, card);
2281
2282 if (u->control_device)
2283 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2284
2285 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2286 pa_log("Invalid properties");
2287 pa_sink_new_data_done(&data);
2288 goto fail;
2289 }
2290
2291 if (u->ucm_context)
2292 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, true, card);
2293 else if (u->mixer_path_set)
2294 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2295
2296 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2297 (set_formats ? PA_SINK_SET_FORMATS : 0));
2298 pa_sink_new_data_done(&data);
2299
2300 if (!u->sink) {
2301 pa_log("Failed to create sink object");
2302 goto fail;
2303 }
2304
2305 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2306 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2307 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2308 goto fail;
2309 }
2310
2311 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2312 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2313 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2314 goto fail;
2315 }
2316
2317 u->sink->parent.process_msg = sink_process_msg;
2318 if (u->use_tsched)
2319 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2320 u->sink->set_state = sink_set_state_cb;
2321 if (u->ucm_context)
2322 u->sink->set_port = sink_set_port_ucm_cb;
2323 else
2324 u->sink->set_port = sink_set_port_cb;
2325 if (u->sink->alternate_sample_rate)
2326 u->sink->update_rate = sink_update_rate_cb;
2327 u->sink->userdata = u;
2328
2329 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2330 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2331
2332 u->frame_size = frame_size;
2333 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2334 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2335 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2336
2337 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2338 (double) u->hwbuf_size / (double) u->fragment_size,
2339 (long unsigned) u->fragment_size,
2340 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2341 (long unsigned) u->hwbuf_size,
2342 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2343
2344 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2345 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2346 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2347 else {
2348 pa_log_info("Disabling rewind for device %s", u->device_name);
2349 pa_sink_set_max_rewind(u->sink, 0);
2350 }
2351
2352 if (u->use_tsched) {
2353 u->tsched_watermark_ref = tsched_watermark;
2354 reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2355 } else
2356 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2357
2358 reserve_update(u);
2359
2360 if (update_sw_params(u) < 0)
2361 goto fail;
2362
2363 if (u->ucm_context) {
2364 if (u->sink->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port, true) < 0)
2365 goto fail;
2366 } else if (setup_mixer(u, ignore_dB) < 0)
2367 goto fail;
2368
2369 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2370
2371 thread_name = pa_sprintf_malloc("alsa-sink-%s", pa_strnull(pa_proplist_gets(u->sink->proplist, "alsa.id")));
2372 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2373 pa_log("Failed to create thread.");
2374 goto fail;
2375 }
2376 pa_xfree(thread_name);
2377 thread_name = NULL;
2378
2379 /* Get initial mixer settings */
2380 if (data.volume_is_set) {
2381 if (u->sink->set_volume)
2382 u->sink->set_volume(u->sink);
2383 } else {
2384 if (u->sink->get_volume)
2385 u->sink->get_volume(u->sink);
2386 }
2387
2388 if (data.muted_is_set) {
2389 if (u->sink->set_mute)
2390 u->sink->set_mute(u->sink);
2391 } else {
2392 if (u->sink->get_mute) {
2393 bool mute;
2394
2395 if (u->sink->get_mute(u->sink, &mute) >= 0)
2396 pa_sink_set_mute(u->sink, mute, false);
2397 }
2398 }
2399
2400 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2401 u->sink->write_volume(u->sink);
2402
2403 if (set_formats) {
2404 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2405 pa_format_info *format;
2406
2407 /* To start with, we only support PCM formats. Other formats may be added
2408 * with pa_sink_set_formats().*/
2409 format = pa_format_info_new();
2410 format->encoding = PA_ENCODING_PCM;
2411 u->formats = pa_idxset_new(NULL, NULL);
2412 pa_idxset_put(u->formats, format, NULL);
2413
2414 u->sink->get_formats = sink_get_formats;
2415 u->sink->set_formats = sink_set_formats;
2416 }
2417
2418 pa_sink_put(u->sink);
2419
2420 if (profile_set)
2421 pa_alsa_profile_set_free(profile_set);
2422
2423 return u->sink;
2424
2425 fail:
2426 pa_xfree(thread_name);
2427
2428 if (u)
2429 userdata_free(u);
2430
2431 if (profile_set)
2432 pa_alsa_profile_set_free(profile_set);
2433
2434 return NULL;
2435 }
2436
2437 static void userdata_free(struct userdata *u) {
2438 pa_assert(u);
2439
2440 if (u->sink)
2441 pa_sink_unlink(u->sink);
2442
2443 if (u->thread) {
2444 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2445 pa_thread_free(u->thread);
2446 }
2447
2448 pa_thread_mq_done(&u->thread_mq);
2449
2450 if (u->sink)
2451 pa_sink_unref(u->sink);
2452
2453 if (u->memchunk.memblock)
2454 pa_memblock_unref(u->memchunk.memblock);
2455
2456 if (u->mixer_pd)
2457 pa_alsa_mixer_pdata_free(u->mixer_pd);
2458
2459 if (u->alsa_rtpoll_item)
2460 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2461
2462 if (u->rtpoll)
2463 pa_rtpoll_free(u->rtpoll);
2464
2465 if (u->pcm_handle) {
2466 snd_pcm_drop(u->pcm_handle);
2467 snd_pcm_close(u->pcm_handle);
2468 }
2469
2470 if (u->mixer_fdl)
2471 pa_alsa_fdlist_free(u->mixer_fdl);
2472
2473 if (u->mixer_path && !u->mixer_path_set)
2474 pa_alsa_path_free(u->mixer_path);
2475
2476 if (u->mixer_handle)
2477 snd_mixer_close(u->mixer_handle);
2478
2479 if (u->smoother)
2480 pa_smoother_free(u->smoother);
2481
2482 if (u->formats)
2483 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
2484
2485 if (u->rates)
2486 pa_xfree(u->rates);
2487
2488 reserve_done(u);
2489 monitor_done(u);
2490
2491 pa_xfree(u->device_name);
2492 pa_xfree(u->control_device);
2493 pa_xfree(u->paths_dir);
2494 pa_xfree(u);
2495 }
2496
2497 void pa_alsa_sink_free(pa_sink *s) {
2498 struct userdata *u;
2499
2500 pa_sink_assert_ref(s);
2501 pa_assert_se(u = s->userdata);
2502
2503 userdata_free(u);
2504 }