]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: Add separate sinks/sources for UCM modifiers if needed
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
34 #endif
35
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
41
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
57
58 #include <modules/reserve-wrap.h>
59
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
62
63 /* #define DEBUG_TIMING */
64
65 #define DEFAULT_DEVICE "default"
66
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
78
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
92
93 struct userdata {
94 pa_core *core;
95 pa_module *module;
96 pa_sink *sink;
97
98 pa_thread *thread;
99 pa_thread_mq thread_mq;
100 pa_rtpoll *rtpoll;
101
102 snd_pcm_t *pcm_handle;
103
104 char *paths_dir;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
110
111 pa_cvolume hardware_volume;
112
113 unsigned int *rates;
114
115 size_t
116 frame_size,
117 fragment_size,
118 hwbuf_size,
119 tsched_watermark,
120 tsched_watermark_ref,
121 hwbuf_unused,
122 min_sleep,
123 min_wakeup,
124 watermark_inc_step,
125 watermark_dec_step,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
128 rewind_safeguard;
129
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
132
133 pa_memchunk memchunk;
134
135 char *device_name; /* name of the PCM device */
136 char *control_device; /* name of the control device */
137
138 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
139
140 pa_bool_t first, after_rewind;
141
142 pa_rtpoll_item *alsa_rtpoll_item;
143
144 pa_smoother *smoother;
145 uint64_t write_count;
146 uint64_t since_start;
147 pa_usec_t smoother_interval;
148 pa_usec_t last_smoother_update;
149
150 pa_idxset *formats;
151
152 pa_reserve_wrapper *reserve;
153 pa_hook_slot *reserve_slot;
154 pa_reserve_monitor_wrapper *monitor;
155 pa_hook_slot *monitor_slot;
156
157 /* ucm context */
158 pa_alsa_ucm_mapping_context *ucm_context;
159 };
160
161 static void userdata_free(struct userdata *u);
162
163 /* FIXME: Is there a better way to do this than device names? */
164 static pa_bool_t is_iec958(struct userdata *u) {
165 return (strncmp("iec958", u->device_name, 6) == 0);
166 }
167
168 static pa_bool_t is_hdmi(struct userdata *u) {
169 return (strncmp("hdmi", u->device_name, 4) == 0);
170 }
171
172 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
173 pa_assert(r);
174 pa_assert(u);
175
176 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
177 return PA_HOOK_CANCEL;
178
179 return PA_HOOK_OK;
180 }
181
182 static void reserve_done(struct userdata *u) {
183 pa_assert(u);
184
185 if (u->reserve_slot) {
186 pa_hook_slot_free(u->reserve_slot);
187 u->reserve_slot = NULL;
188 }
189
190 if (u->reserve) {
191 pa_reserve_wrapper_unref(u->reserve);
192 u->reserve = NULL;
193 }
194 }
195
196 static void reserve_update(struct userdata *u) {
197 const char *description;
198 pa_assert(u);
199
200 if (!u->sink || !u->reserve)
201 return;
202
203 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
204 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
205 }
206
207 static int reserve_init(struct userdata *u, const char *dname) {
208 char *rname;
209
210 pa_assert(u);
211 pa_assert(dname);
212
213 if (u->reserve)
214 return 0;
215
216 if (pa_in_system_mode())
217 return 0;
218
219 if (!(rname = pa_alsa_get_reserve_name(dname)))
220 return 0;
221
222 /* We are resuming, try to lock the device */
223 u->reserve = pa_reserve_wrapper_get(u->core, rname);
224 pa_xfree(rname);
225
226 if (!(u->reserve))
227 return -1;
228
229 reserve_update(u);
230
231 pa_assert(!u->reserve_slot);
232 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
233
234 return 0;
235 }
236
237 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
238 pa_bool_t b;
239
240 pa_assert(w);
241 pa_assert(u);
242
243 b = PA_PTR_TO_UINT(busy) && !u->reserve;
244
245 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
246 return PA_HOOK_OK;
247 }
248
249 static void monitor_done(struct userdata *u) {
250 pa_assert(u);
251
252 if (u->monitor_slot) {
253 pa_hook_slot_free(u->monitor_slot);
254 u->monitor_slot = NULL;
255 }
256
257 if (u->monitor) {
258 pa_reserve_monitor_wrapper_unref(u->monitor);
259 u->monitor = NULL;
260 }
261 }
262
263 static int reserve_monitor_init(struct userdata *u, const char *dname) {
264 char *rname;
265
266 pa_assert(u);
267 pa_assert(dname);
268
269 if (pa_in_system_mode())
270 return 0;
271
272 if (!(rname = pa_alsa_get_reserve_name(dname)))
273 return 0;
274
275 /* We are resuming, try to lock the device */
276 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
277 pa_xfree(rname);
278
279 if (!(u->monitor))
280 return -1;
281
282 pa_assert(!u->monitor_slot);
283 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
284
285 return 0;
286 }
287
288 static void fix_min_sleep_wakeup(struct userdata *u) {
289 size_t max_use, max_use_2;
290
291 pa_assert(u);
292 pa_assert(u->use_tsched);
293
294 max_use = u->hwbuf_size - u->hwbuf_unused;
295 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
296
297 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
298 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
299
300 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
301 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
302 }
303
304 static void fix_tsched_watermark(struct userdata *u) {
305 size_t max_use;
306 pa_assert(u);
307 pa_assert(u->use_tsched);
308
309 max_use = u->hwbuf_size - u->hwbuf_unused;
310
311 if (u->tsched_watermark > max_use - u->min_sleep)
312 u->tsched_watermark = max_use - u->min_sleep;
313
314 if (u->tsched_watermark < u->min_wakeup)
315 u->tsched_watermark = u->min_wakeup;
316 }
317
318 static void increase_watermark(struct userdata *u) {
319 size_t old_watermark;
320 pa_usec_t old_min_latency, new_min_latency;
321
322 pa_assert(u);
323 pa_assert(u->use_tsched);
324
325 /* First, just try to increase the watermark */
326 old_watermark = u->tsched_watermark;
327 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
328 fix_tsched_watermark(u);
329
330 if (old_watermark != u->tsched_watermark) {
331 pa_log_info("Increasing wakeup watermark to %0.2f ms",
332 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
333 return;
334 }
335
336 /* Hmm, we cannot increase the watermark any further, hence let's
337 raise the latency, unless doing so was disabled in
338 configuration */
339 if (u->fixed_latency_range)
340 return;
341
342 old_min_latency = u->sink->thread_info.min_latency;
343 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
344 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
345
346 if (old_min_latency != new_min_latency) {
347 pa_log_info("Increasing minimal latency to %0.2f ms",
348 (double) new_min_latency / PA_USEC_PER_MSEC);
349
350 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
351 }
352
353 /* When we reach this we're officialy fucked! */
354 }
355
356 static void decrease_watermark(struct userdata *u) {
357 size_t old_watermark;
358 pa_usec_t now;
359
360 pa_assert(u);
361 pa_assert(u->use_tsched);
362
363 now = pa_rtclock_now();
364
365 if (u->watermark_dec_not_before <= 0)
366 goto restart;
367
368 if (u->watermark_dec_not_before > now)
369 return;
370
371 old_watermark = u->tsched_watermark;
372
373 if (u->tsched_watermark < u->watermark_dec_step)
374 u->tsched_watermark = u->tsched_watermark / 2;
375 else
376 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
377
378 fix_tsched_watermark(u);
379
380 if (old_watermark != u->tsched_watermark)
381 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
382 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
383
384 /* We don't change the latency range*/
385
386 restart:
387 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
388 }
389
390 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
391 pa_usec_t usec, wm;
392
393 pa_assert(sleep_usec);
394 pa_assert(process_usec);
395
396 pa_assert(u);
397 pa_assert(u->use_tsched);
398
399 usec = pa_sink_get_requested_latency_within_thread(u->sink);
400
401 if (usec == (pa_usec_t) -1)
402 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
403
404 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
405
406 if (wm > usec)
407 wm = usec/2;
408
409 *sleep_usec = usec - wm;
410 *process_usec = wm;
411
412 #ifdef DEBUG_TIMING
413 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
414 (unsigned long) (usec / PA_USEC_PER_MSEC),
415 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
416 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
417 #endif
418 }
419
420 static int try_recover(struct userdata *u, const char *call, int err) {
421 pa_assert(u);
422 pa_assert(call);
423 pa_assert(err < 0);
424
425 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
426
427 pa_assert(err != -EAGAIN);
428
429 if (err == -EPIPE)
430 pa_log_debug("%s: Buffer underrun!", call);
431
432 if (err == -ESTRPIPE)
433 pa_log_debug("%s: System suspended!", call);
434
435 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
436 pa_log("%s: %s", call, pa_alsa_strerror(err));
437 return -1;
438 }
439
440 u->first = TRUE;
441 u->since_start = 0;
442 return 0;
443 }
444
445 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
446 size_t left_to_play;
447 pa_bool_t underrun = FALSE;
448
449 /* We use <= instead of < for this check here because an underrun
450 * only happens after the last sample was processed, not already when
451 * it is removed from the buffer. This is particularly important
452 * when block transfer is used. */
453
454 if (n_bytes <= u->hwbuf_size)
455 left_to_play = u->hwbuf_size - n_bytes;
456 else {
457
458 /* We got a dropout. What a mess! */
459 left_to_play = 0;
460 underrun = TRUE;
461
462 #if 0
463 PA_DEBUG_TRAP;
464 #endif
465
466 if (!u->first && !u->after_rewind)
467 if (pa_log_ratelimit(PA_LOG_INFO))
468 pa_log_info("Underrun!");
469 }
470
471 #ifdef DEBUG_TIMING
472 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
473 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
474 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
475 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
476 #endif
477
478 if (u->use_tsched) {
479 pa_bool_t reset_not_before = TRUE;
480
481 if (!u->first && !u->after_rewind) {
482 if (underrun || left_to_play < u->watermark_inc_threshold)
483 increase_watermark(u);
484 else if (left_to_play > u->watermark_dec_threshold) {
485 reset_not_before = FALSE;
486
487 /* We decrease the watermark only if have actually
488 * been woken up by a timeout. If something else woke
489 * us up it's too easy to fulfill the deadlines... */
490
491 if (on_timeout)
492 decrease_watermark(u);
493 }
494 }
495
496 if (reset_not_before)
497 u->watermark_dec_not_before = 0;
498 }
499
500 return left_to_play;
501 }
502
503 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
504 pa_bool_t work_done = FALSE;
505 pa_usec_t max_sleep_usec = 0, process_usec = 0;
506 size_t left_to_play;
507 unsigned j = 0;
508
509 pa_assert(u);
510 pa_sink_assert_ref(u->sink);
511
512 if (u->use_tsched)
513 hw_sleep_time(u, &max_sleep_usec, &process_usec);
514
515 for (;;) {
516 snd_pcm_sframes_t n;
517 size_t n_bytes;
518 int r;
519 pa_bool_t after_avail = TRUE;
520
521 /* First we determine how many samples are missing to fill the
522 * buffer up to 100% */
523
524 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
525
526 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
527 continue;
528
529 return r;
530 }
531
532 n_bytes = (size_t) n * u->frame_size;
533
534 #ifdef DEBUG_TIMING
535 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
536 #endif
537
538 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
539 on_timeout = FALSE;
540
541 if (u->use_tsched)
542
543 /* We won't fill up the playback buffer before at least
544 * half the sleep time is over because otherwise we might
545 * ask for more data from the clients then they expect. We
546 * need to guarantee that clients only have to keep around
547 * a single hw buffer length. */
548
549 if (!polled &&
550 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
551 #ifdef DEBUG_TIMING
552 pa_log_debug("Not filling up, because too early.");
553 #endif
554 break;
555 }
556
557 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
558
559 if (polled)
560 PA_ONCE_BEGIN {
561 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
562 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
563 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
564 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
565 pa_strnull(dn));
566 pa_xfree(dn);
567 } PA_ONCE_END;
568
569 #ifdef DEBUG_TIMING
570 pa_log_debug("Not filling up, because not necessary.");
571 #endif
572 break;
573 }
574
575
576 if (++j > 10) {
577 #ifdef DEBUG_TIMING
578 pa_log_debug("Not filling up, because already too many iterations.");
579 #endif
580
581 break;
582 }
583
584 n_bytes -= u->hwbuf_unused;
585 polled = FALSE;
586
587 #ifdef DEBUG_TIMING
588 pa_log_debug("Filling up");
589 #endif
590
591 for (;;) {
592 pa_memchunk chunk;
593 void *p;
594 int err;
595 const snd_pcm_channel_area_t *areas;
596 snd_pcm_uframes_t offset, frames;
597 snd_pcm_sframes_t sframes;
598
599 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
600 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
601
602 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
603
604 if (!after_avail && err == -EAGAIN)
605 break;
606
607 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
608 continue;
609
610 return r;
611 }
612
613 /* Make sure that if these memblocks need to be copied they will fit into one slot */
614 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
615 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
616
617 if (!after_avail && frames == 0)
618 break;
619
620 pa_assert(frames > 0);
621 after_avail = FALSE;
622
623 /* Check these are multiples of 8 bit */
624 pa_assert((areas[0].first & 7) == 0);
625 pa_assert((areas[0].step & 7)== 0);
626
627 /* We assume a single interleaved memory buffer */
628 pa_assert((areas[0].first >> 3) == 0);
629 pa_assert((areas[0].step >> 3) == u->frame_size);
630
631 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
632
633 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
634 chunk.length = pa_memblock_get_length(chunk.memblock);
635 chunk.index = 0;
636
637 pa_sink_render_into_full(u->sink, &chunk);
638 pa_memblock_unref_fixed(chunk.memblock);
639
640 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
641
642 if (!after_avail && (int) sframes == -EAGAIN)
643 break;
644
645 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
646 continue;
647
648 return r;
649 }
650
651 work_done = TRUE;
652
653 u->write_count += frames * u->frame_size;
654 u->since_start += frames * u->frame_size;
655
656 #ifdef DEBUG_TIMING
657 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
658 #endif
659
660 if ((size_t) frames * u->frame_size >= n_bytes)
661 break;
662
663 n_bytes -= (size_t) frames * u->frame_size;
664 }
665 }
666
667 if (u->use_tsched) {
668 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
669 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
670
671 if (*sleep_usec > process_usec)
672 *sleep_usec -= process_usec;
673 else
674 *sleep_usec = 0;
675 } else
676 *sleep_usec = 0;
677
678 return work_done ? 1 : 0;
679 }
680
681 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
682 pa_bool_t work_done = FALSE;
683 pa_usec_t max_sleep_usec = 0, process_usec = 0;
684 size_t left_to_play;
685 unsigned j = 0;
686
687 pa_assert(u);
688 pa_sink_assert_ref(u->sink);
689
690 if (u->use_tsched)
691 hw_sleep_time(u, &max_sleep_usec, &process_usec);
692
693 for (;;) {
694 snd_pcm_sframes_t n;
695 size_t n_bytes;
696 int r;
697 pa_bool_t after_avail = TRUE;
698
699 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
700
701 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
702 continue;
703
704 return r;
705 }
706
707 n_bytes = (size_t) n * u->frame_size;
708 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
709 on_timeout = FALSE;
710
711 if (u->use_tsched)
712
713 /* We won't fill up the playback buffer before at least
714 * half the sleep time is over because otherwise we might
715 * ask for more data from the clients then they expect. We
716 * need to guarantee that clients only have to keep around
717 * a single hw buffer length. */
718
719 if (!polled &&
720 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
721 break;
722
723 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
724
725 if (polled)
726 PA_ONCE_BEGIN {
727 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
728 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
729 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
730 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
731 pa_strnull(dn));
732 pa_xfree(dn);
733 } PA_ONCE_END;
734
735 break;
736 }
737
738 if (++j > 10) {
739 #ifdef DEBUG_TIMING
740 pa_log_debug("Not filling up, because already too many iterations.");
741 #endif
742
743 break;
744 }
745
746 n_bytes -= u->hwbuf_unused;
747 polled = FALSE;
748
749 for (;;) {
750 snd_pcm_sframes_t frames;
751 void *p;
752
753 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
754
755 if (u->memchunk.length <= 0)
756 pa_sink_render(u->sink, n_bytes, &u->memchunk);
757
758 pa_assert(u->memchunk.length > 0);
759
760 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
761
762 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
763 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
764
765 p = pa_memblock_acquire(u->memchunk.memblock);
766 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
767 pa_memblock_release(u->memchunk.memblock);
768
769 if (PA_UNLIKELY(frames < 0)) {
770
771 if (!after_avail && (int) frames == -EAGAIN)
772 break;
773
774 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
775 continue;
776
777 return r;
778 }
779
780 if (!after_avail && frames == 0)
781 break;
782
783 pa_assert(frames > 0);
784 after_avail = FALSE;
785
786 u->memchunk.index += (size_t) frames * u->frame_size;
787 u->memchunk.length -= (size_t) frames * u->frame_size;
788
789 if (u->memchunk.length <= 0) {
790 pa_memblock_unref(u->memchunk.memblock);
791 pa_memchunk_reset(&u->memchunk);
792 }
793
794 work_done = TRUE;
795
796 u->write_count += frames * u->frame_size;
797 u->since_start += frames * u->frame_size;
798
799 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
800
801 if ((size_t) frames * u->frame_size >= n_bytes)
802 break;
803
804 n_bytes -= (size_t) frames * u->frame_size;
805 }
806 }
807
808 if (u->use_tsched) {
809 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
810 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
811
812 if (*sleep_usec > process_usec)
813 *sleep_usec -= process_usec;
814 else
815 *sleep_usec = 0;
816 } else
817 *sleep_usec = 0;
818
819 return work_done ? 1 : 0;
820 }
821
822 static void update_smoother(struct userdata *u) {
823 snd_pcm_sframes_t delay = 0;
824 int64_t position;
825 int err;
826 pa_usec_t now1 = 0, now2;
827 snd_pcm_status_t *status;
828
829 snd_pcm_status_alloca(&status);
830
831 pa_assert(u);
832 pa_assert(u->pcm_handle);
833
834 /* Let's update the time smoother */
835
836 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
837 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
838 return;
839 }
840
841 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
842 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
843 else {
844 snd_htimestamp_t htstamp = { 0, 0 };
845 snd_pcm_status_get_htstamp(status, &htstamp);
846 now1 = pa_timespec_load(&htstamp);
847 }
848
849 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
850 if (now1 <= 0)
851 now1 = pa_rtclock_now();
852
853 /* check if the time since the last update is bigger than the interval */
854 if (u->last_smoother_update > 0)
855 if (u->last_smoother_update + u->smoother_interval > now1)
856 return;
857
858 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
859
860 if (PA_UNLIKELY(position < 0))
861 position = 0;
862
863 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
864
865 pa_smoother_put(u->smoother, now1, now2);
866
867 u->last_smoother_update = now1;
868 /* exponentially increase the update interval up to the MAX limit */
869 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
870 }
871
872 static pa_usec_t sink_get_latency(struct userdata *u) {
873 pa_usec_t r;
874 int64_t delay;
875 pa_usec_t now1, now2;
876
877 pa_assert(u);
878
879 now1 = pa_rtclock_now();
880 now2 = pa_smoother_get(u->smoother, now1);
881
882 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
883
884 r = delay >= 0 ? (pa_usec_t) delay : 0;
885
886 if (u->memchunk.memblock)
887 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
888
889 return r;
890 }
891
892 static int build_pollfd(struct userdata *u) {
893 pa_assert(u);
894 pa_assert(u->pcm_handle);
895
896 if (u->alsa_rtpoll_item)
897 pa_rtpoll_item_free(u->alsa_rtpoll_item);
898
899 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
900 return -1;
901
902 return 0;
903 }
904
905 /* Called from IO context */
906 static int suspend(struct userdata *u) {
907 const char *mod_name;
908
909 pa_assert(u);
910 pa_assert(u->pcm_handle);
911
912 pa_smoother_pause(u->smoother, pa_rtclock_now());
913
914 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
915 * take awfully long with our long buffer sizes today. */
916 snd_pcm_close(u->pcm_handle);
917 u->pcm_handle = NULL;
918
919 if ((mod_name = pa_proplist_gets(u->sink->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
920 pa_log_info("Disable ucm modifier %s", mod_name);
921
922 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_dismod", mod_name) < 0)
923 pa_log("Failed to disable ucm modifier %s", mod_name);
924 }
925
926 if (u->alsa_rtpoll_item) {
927 pa_rtpoll_item_free(u->alsa_rtpoll_item);
928 u->alsa_rtpoll_item = NULL;
929 }
930
931 /* We reset max_rewind/max_request here to make sure that while we
932 * are suspended the old max_request/max_rewind values set before
933 * the suspend can influence the per-stream buffer of newly
934 * created streams, without their requirements having any
935 * influence on them. */
936 pa_sink_set_max_rewind_within_thread(u->sink, 0);
937 pa_sink_set_max_request_within_thread(u->sink, 0);
938
939 pa_log_info("Device suspended...");
940
941 return 0;
942 }
943
944 /* Called from IO context */
945 static int update_sw_params(struct userdata *u) {
946 snd_pcm_uframes_t avail_min;
947 int err;
948
949 pa_assert(u);
950
951 /* Use the full buffer if no one asked us for anything specific */
952 u->hwbuf_unused = 0;
953
954 if (u->use_tsched) {
955 pa_usec_t latency;
956
957 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
958 size_t b;
959
960 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
961
962 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
963
964 /* We need at least one sample in our buffer */
965
966 if (PA_UNLIKELY(b < u->frame_size))
967 b = u->frame_size;
968
969 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
970 }
971
972 fix_min_sleep_wakeup(u);
973 fix_tsched_watermark(u);
974 }
975
976 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
977
978 /* We need at last one frame in the used part of the buffer */
979 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
980
981 if (u->use_tsched) {
982 pa_usec_t sleep_usec, process_usec;
983
984 hw_sleep_time(u, &sleep_usec, &process_usec);
985 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
986 }
987
988 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
989
990 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
991 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
992 return err;
993 }
994
995 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
996 if (pa_alsa_pcm_is_hw(u->pcm_handle))
997 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
998 else {
999 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
1000 pa_sink_set_max_rewind_within_thread(u->sink, 0);
1001 }
1002
1003 return 0;
1004 }
1005
1006 /* Called from IO Context on unsuspend or from main thread when creating sink */
1007 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
1008 pa_bool_t in_thread)
1009 {
1010 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
1011 &u->sink->sample_spec);
1012
1013 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1014 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1015
1016 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1017 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1018
1019 fix_min_sleep_wakeup(u);
1020 fix_tsched_watermark(u);
1021
1022 if (in_thread)
1023 pa_sink_set_latency_range_within_thread(u->sink,
1024 u->min_latency_ref,
1025 pa_bytes_to_usec(u->hwbuf_size, ss));
1026 else {
1027 pa_sink_set_latency_range(u->sink,
1028 0,
1029 pa_bytes_to_usec(u->hwbuf_size, ss));
1030
1031 /* work-around assert in pa_sink_set_latency_within_thead,
1032 keep track of min_latency and reuse it when
1033 this routine is called from IO context */
1034 u->min_latency_ref = u->sink->thread_info.min_latency;
1035 }
1036
1037 pa_log_info("Time scheduling watermark is %0.2fms",
1038 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1039 }
1040
1041 /* Called from IO context */
1042 static int unsuspend(struct userdata *u) {
1043 pa_sample_spec ss;
1044 int err;
1045 pa_bool_t b, d;
1046 snd_pcm_uframes_t period_size, buffer_size;
1047 char *device_name = NULL;
1048 const char *mod_name;
1049
1050 pa_assert(u);
1051 pa_assert(!u->pcm_handle);
1052
1053 pa_log_info("Trying resume...");
1054
1055 if ((mod_name = pa_proplist_gets(u->sink->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
1056 pa_log_info("Enable ucm modifier %s", mod_name);
1057
1058 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
1059 pa_log("Failed to enable ucm modifier %s", mod_name);
1060 }
1061
1062 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1063 /* Need to open device in NONAUDIO mode */
1064 int len = strlen(u->device_name) + 8;
1065
1066 device_name = pa_xmalloc(len);
1067 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1068 }
1069
1070 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1071 SND_PCM_NONBLOCK|
1072 SND_PCM_NO_AUTO_RESAMPLE|
1073 SND_PCM_NO_AUTO_CHANNELS|
1074 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1075 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1076 goto fail;
1077 }
1078
1079 ss = u->sink->sample_spec;
1080 period_size = u->fragment_size / u->frame_size;
1081 buffer_size = u->hwbuf_size / u->frame_size;
1082 b = u->use_mmap;
1083 d = u->use_tsched;
1084
1085 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1086 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1087 goto fail;
1088 }
1089
1090 if (b != u->use_mmap || d != u->use_tsched) {
1091 pa_log_warn("Resume failed, couldn't get original access mode.");
1092 goto fail;
1093 }
1094
1095 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1096 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1097 goto fail;
1098 }
1099
1100 if (period_size*u->frame_size != u->fragment_size ||
1101 buffer_size*u->frame_size != u->hwbuf_size) {
1102 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1103 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1104 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1105 goto fail;
1106 }
1107
1108 if (update_sw_params(u) < 0)
1109 goto fail;
1110
1111 if (build_pollfd(u) < 0)
1112 goto fail;
1113
1114 u->write_count = 0;
1115 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1116 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1117 u->last_smoother_update = 0;
1118
1119 u->first = TRUE;
1120 u->since_start = 0;
1121
1122 /* reset the watermark to the value defined when sink was created */
1123 if (u->use_tsched)
1124 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1125
1126 pa_log_info("Resumed successfully...");
1127
1128 pa_xfree(device_name);
1129 return 0;
1130
1131 fail:
1132 if (u->pcm_handle) {
1133 snd_pcm_close(u->pcm_handle);
1134 u->pcm_handle = NULL;
1135 }
1136
1137 pa_xfree(device_name);
1138
1139 return -PA_ERR_IO;
1140 }
1141
1142 /* Called from IO context */
1143 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1144 struct userdata *u = PA_SINK(o)->userdata;
1145
1146 switch (code) {
1147
1148 case PA_SINK_MESSAGE_GET_LATENCY: {
1149 pa_usec_t r = 0;
1150
1151 if (u->pcm_handle)
1152 r = sink_get_latency(u);
1153
1154 *((pa_usec_t*) data) = r;
1155
1156 return 0;
1157 }
1158
1159 case PA_SINK_MESSAGE_SET_STATE:
1160
1161 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1162
1163 case PA_SINK_SUSPENDED: {
1164 int r;
1165
1166 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1167
1168 if ((r = suspend(u)) < 0)
1169 return r;
1170
1171 break;
1172 }
1173
1174 case PA_SINK_IDLE:
1175 case PA_SINK_RUNNING: {
1176 int r;
1177
1178 if (u->sink->thread_info.state == PA_SINK_INIT) {
1179 if (build_pollfd(u) < 0)
1180 return -PA_ERR_IO;
1181 }
1182
1183 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1184 if ((r = unsuspend(u)) < 0)
1185 return r;
1186 }
1187
1188 break;
1189 }
1190
1191 case PA_SINK_UNLINKED:
1192 case PA_SINK_INIT:
1193 case PA_SINK_INVALID_STATE:
1194 ;
1195 }
1196
1197 break;
1198 }
1199
1200 return pa_sink_process_msg(o, code, data, offset, chunk);
1201 }
1202
1203 /* Called from main context */
1204 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1205 pa_sink_state_t old_state;
1206 struct userdata *u;
1207
1208 pa_sink_assert_ref(s);
1209 pa_assert_se(u = s->userdata);
1210
1211 old_state = pa_sink_get_state(u->sink);
1212
1213 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1214 reserve_done(u);
1215 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1216 if (reserve_init(u, u->device_name) < 0)
1217 return -PA_ERR_BUSY;
1218
1219 return 0;
1220 }
1221
1222 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1223 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1224
1225 pa_assert(u);
1226 pa_assert(u->mixer_handle);
1227
1228 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1229 return 0;
1230
1231 if (!PA_SINK_IS_LINKED(u->sink->state))
1232 return 0;
1233
1234 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1235 pa_sink_set_mixer_dirty(u->sink, TRUE);
1236 return 0;
1237 }
1238
1239 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1240 pa_sink_get_volume(u->sink, TRUE);
1241 pa_sink_get_mute(u->sink, TRUE);
1242 }
1243
1244 return 0;
1245 }
1246
1247 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1248 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1249
1250 pa_assert(u);
1251 pa_assert(u->mixer_handle);
1252
1253 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1254 return 0;
1255
1256 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1257 pa_sink_set_mixer_dirty(u->sink, TRUE);
1258 return 0;
1259 }
1260
1261 if (mask & SND_CTL_EVENT_MASK_VALUE)
1262 pa_sink_update_volume_and_mute(u->sink);
1263
1264 return 0;
1265 }
1266
1267 static void sink_get_volume_cb(pa_sink *s) {
1268 struct userdata *u = s->userdata;
1269 pa_cvolume r;
1270 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1271
1272 pa_assert(u);
1273 pa_assert(u->mixer_path);
1274 pa_assert(u->mixer_handle);
1275
1276 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1277 return;
1278
1279 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1280 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1281
1282 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1283
1284 if (u->mixer_path->has_dB) {
1285 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1286
1287 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1288 }
1289
1290 if (pa_cvolume_equal(&u->hardware_volume, &r))
1291 return;
1292
1293 s->real_volume = u->hardware_volume = r;
1294
1295 /* Hmm, so the hardware volume changed, let's reset our software volume */
1296 if (u->mixer_path->has_dB)
1297 pa_sink_set_soft_volume(s, NULL);
1298 }
1299
1300 static void sink_set_volume_cb(pa_sink *s) {
1301 struct userdata *u = s->userdata;
1302 pa_cvolume r;
1303 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1304 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1305
1306 pa_assert(u);
1307 pa_assert(u->mixer_path);
1308 pa_assert(u->mixer_handle);
1309
1310 /* Shift up by the base volume */
1311 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1312
1313 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1314 return;
1315
1316 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1317 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1318
1319 u->hardware_volume = r;
1320
1321 if (u->mixer_path->has_dB) {
1322 pa_cvolume new_soft_volume;
1323 pa_bool_t accurate_enough;
1324 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1325
1326 /* Match exactly what the user requested by software */
1327 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1328
1329 /* If the adjustment to do in software is only minimal we
1330 * can skip it. That saves us CPU at the expense of a bit of
1331 * accuracy */
1332 accurate_enough =
1333 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1334 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1335
1336 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1337 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1338 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1339 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1340 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1341 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1342 pa_yes_no(accurate_enough));
1343 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1344
1345 if (!accurate_enough)
1346 s->soft_volume = new_soft_volume;
1347
1348 } else {
1349 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1350
1351 /* We can't match exactly what the user requested, hence let's
1352 * at least tell the user about it */
1353
1354 s->real_volume = r;
1355 }
1356 }
1357
1358 static void sink_write_volume_cb(pa_sink *s) {
1359 struct userdata *u = s->userdata;
1360 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1361
1362 pa_assert(u);
1363 pa_assert(u->mixer_path);
1364 pa_assert(u->mixer_handle);
1365 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1366
1367 /* Shift up by the base volume */
1368 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1369
1370 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1371 pa_log_error("Writing HW volume failed");
1372 else {
1373 pa_cvolume tmp_vol;
1374 pa_bool_t accurate_enough;
1375
1376 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1377 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1378
1379 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1380 accurate_enough =
1381 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1382 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1383
1384 if (!accurate_enough) {
1385 union {
1386 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1387 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1388 } vol;
1389
1390 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1391 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1392 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1393 pa_log_debug(" in dB: %s (request) != %s",
1394 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1395 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1396 }
1397 }
1398 }
1399
1400 static void sink_get_mute_cb(pa_sink *s) {
1401 struct userdata *u = s->userdata;
1402 pa_bool_t b;
1403
1404 pa_assert(u);
1405 pa_assert(u->mixer_path);
1406 pa_assert(u->mixer_handle);
1407
1408 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1409 return;
1410
1411 s->muted = b;
1412 }
1413
1414 static void sink_set_mute_cb(pa_sink *s) {
1415 struct userdata *u = s->userdata;
1416
1417 pa_assert(u);
1418 pa_assert(u->mixer_path);
1419 pa_assert(u->mixer_handle);
1420
1421 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1422 }
1423
1424 static void mixer_volume_init(struct userdata *u) {
1425 pa_assert(u);
1426
1427 if (!u->mixer_path->has_volume) {
1428 pa_sink_set_write_volume_callback(u->sink, NULL);
1429 pa_sink_set_get_volume_callback(u->sink, NULL);
1430 pa_sink_set_set_volume_callback(u->sink, NULL);
1431
1432 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1433 } else {
1434 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1435 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1436
1437 if (u->mixer_path->has_dB && u->deferred_volume) {
1438 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1439 pa_log_info("Successfully enabled deferred volume.");
1440 } else
1441 pa_sink_set_write_volume_callback(u->sink, NULL);
1442
1443 if (u->mixer_path->has_dB) {
1444 pa_sink_enable_decibel_volume(u->sink, TRUE);
1445 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1446
1447 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1448 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1449
1450 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1451 } else {
1452 pa_sink_enable_decibel_volume(u->sink, FALSE);
1453 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1454
1455 u->sink->base_volume = PA_VOLUME_NORM;
1456 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1457 }
1458
1459 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1460 }
1461
1462 if (!u->mixer_path->has_mute) {
1463 pa_sink_set_get_mute_callback(u->sink, NULL);
1464 pa_sink_set_set_mute_callback(u->sink, NULL);
1465 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1466 } else {
1467 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1468 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1469 pa_log_info("Using hardware mute control.");
1470 }
1471 }
1472
1473 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1474 struct userdata *u = s->userdata;
1475
1476 pa_assert(u);
1477 pa_assert(p);
1478 pa_assert(u->ucm_context);
1479
1480 return pa_alsa_ucm_set_port(u->ucm_context, p, TRUE);
1481 }
1482
1483 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1484 struct userdata *u = s->userdata;
1485 pa_alsa_port_data *data;
1486
1487 pa_assert(u);
1488 pa_assert(p);
1489 pa_assert(u->mixer_handle);
1490
1491 data = PA_DEVICE_PORT_DATA(p);
1492
1493 pa_assert_se(u->mixer_path = data->path);
1494 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1495
1496 mixer_volume_init(u);
1497
1498 if (s->set_mute)
1499 s->set_mute(s);
1500 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1501 if (s->write_volume)
1502 s->write_volume(s);
1503 } else {
1504 if (s->set_volume)
1505 s->set_volume(s);
1506 }
1507
1508 return 0;
1509 }
1510
1511 static void sink_update_requested_latency_cb(pa_sink *s) {
1512 struct userdata *u = s->userdata;
1513 size_t before;
1514 pa_assert(u);
1515 pa_assert(u->use_tsched); /* only when timer scheduling is used
1516 * we can dynamically adjust the
1517 * latency */
1518
1519 if (!u->pcm_handle)
1520 return;
1521
1522 before = u->hwbuf_unused;
1523 update_sw_params(u);
1524
1525 /* Let's check whether we now use only a smaller part of the
1526 buffer then before. If so, we need to make sure that subsequent
1527 rewinds are relative to the new maximum fill level and not to the
1528 current fill level. Thus, let's do a full rewind once, to clear
1529 things up. */
1530
1531 if (u->hwbuf_unused > before) {
1532 pa_log_debug("Requesting rewind due to latency change.");
1533 pa_sink_request_rewind(s, (size_t) -1);
1534 }
1535 }
1536
1537 static pa_idxset* sink_get_formats(pa_sink *s) {
1538 struct userdata *u = s->userdata;
1539 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1540 pa_format_info *f;
1541 uint32_t idx;
1542
1543 pa_assert(u);
1544
1545 PA_IDXSET_FOREACH(f, u->formats, idx) {
1546 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1547 }
1548
1549 return ret;
1550 }
1551
1552 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1553 struct userdata *u = s->userdata;
1554 pa_format_info *f, *g;
1555 uint32_t idx, n;
1556
1557 pa_assert(u);
1558
1559 /* FIXME: also validate sample rates against what the device supports */
1560 PA_IDXSET_FOREACH(f, formats, idx) {
1561 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1562 /* EAC3 cannot be sent over over S/PDIF */
1563 return FALSE;
1564 }
1565
1566 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1567 u->formats = pa_idxset_new(NULL, NULL);
1568
1569 /* Note: the logic below won't apply if we're using software encoding.
1570 * This is fine for now since we don't support that via the passthrough
1571 * framework, but this must be changed if we do. */
1572
1573 /* Count how many sample rates we support */
1574 for (idx = 0, n = 0; u->rates[idx]; idx++)
1575 n++;
1576
1577 /* First insert non-PCM formats since we prefer those. */
1578 PA_IDXSET_FOREACH(f, formats, idx) {
1579 if (!pa_format_info_is_pcm(f)) {
1580 g = pa_format_info_copy(f);
1581 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1582 pa_idxset_put(u->formats, g, NULL);
1583 }
1584 }
1585
1586 /* Now add any PCM formats */
1587 PA_IDXSET_FOREACH(f, formats, idx) {
1588 if (pa_format_info_is_pcm(f)) {
1589 /* We don't set rates here since we'll just tack on a resampler for
1590 * unsupported rates */
1591 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1592 }
1593 }
1594
1595 return TRUE;
1596 }
1597
1598 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
1599 {
1600 struct userdata *u = s->userdata;
1601 int i;
1602 pa_bool_t supported = FALSE;
1603
1604 pa_assert(u);
1605
1606 for (i = 0; u->rates[i]; i++) {
1607 if (u->rates[i] == rate) {
1608 supported = TRUE;
1609 break;
1610 }
1611 }
1612
1613 if (!supported) {
1614 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1615 return FALSE;
1616 }
1617
1618 if (!PA_SINK_IS_OPENED(s->state)) {
1619 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1620 u->sink->sample_spec.rate = rate;
1621 return TRUE;
1622 }
1623
1624 return FALSE;
1625 }
1626
1627 static int process_rewind(struct userdata *u) {
1628 snd_pcm_sframes_t unused;
1629 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1630 pa_assert(u);
1631
1632 /* Figure out how much we shall rewind and reset the counter */
1633 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1634
1635 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1636
1637 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1638 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1639 return -1;
1640 }
1641
1642 unused_nbytes = (size_t) unused * u->frame_size;
1643
1644 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1645 unused_nbytes += u->rewind_safeguard;
1646
1647 if (u->hwbuf_size > unused_nbytes)
1648 limit_nbytes = u->hwbuf_size - unused_nbytes;
1649 else
1650 limit_nbytes = 0;
1651
1652 if (rewind_nbytes > limit_nbytes)
1653 rewind_nbytes = limit_nbytes;
1654
1655 if (rewind_nbytes > 0) {
1656 snd_pcm_sframes_t in_frames, out_frames;
1657
1658 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1659
1660 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1661 pa_log_debug("before: %lu", (unsigned long) in_frames);
1662 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1663 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1664 if (try_recover(u, "process_rewind", out_frames) < 0)
1665 return -1;
1666 out_frames = 0;
1667 }
1668
1669 pa_log_debug("after: %lu", (unsigned long) out_frames);
1670
1671 rewind_nbytes = (size_t) out_frames * u->frame_size;
1672
1673 if (rewind_nbytes <= 0)
1674 pa_log_info("Tried rewind, but was apparently not possible.");
1675 else {
1676 u->write_count -= rewind_nbytes;
1677 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1678 pa_sink_process_rewind(u->sink, rewind_nbytes);
1679
1680 u->after_rewind = TRUE;
1681 return 0;
1682 }
1683 } else
1684 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1685
1686 pa_sink_process_rewind(u->sink, 0);
1687 return 0;
1688 }
1689
1690 static void thread_func(void *userdata) {
1691 struct userdata *u = userdata;
1692 unsigned short revents = 0;
1693
1694 pa_assert(u);
1695
1696 pa_log_debug("Thread starting up");
1697
1698 if (u->core->realtime_scheduling)
1699 pa_make_realtime(u->core->realtime_priority);
1700
1701 pa_thread_mq_install(&u->thread_mq);
1702
1703 for (;;) {
1704 int ret;
1705 pa_usec_t rtpoll_sleep = 0;
1706
1707 #ifdef DEBUG_TIMING
1708 pa_log_debug("Loop");
1709 #endif
1710
1711 /* Render some data and write it to the dsp */
1712 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1713 int work_done;
1714 pa_usec_t sleep_usec = 0;
1715 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1716
1717 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1718 if (process_rewind(u) < 0)
1719 goto fail;
1720
1721 if (u->use_mmap)
1722 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1723 else
1724 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1725
1726 if (work_done < 0)
1727 goto fail;
1728
1729 /* pa_log_debug("work_done = %i", work_done); */
1730
1731 if (work_done) {
1732
1733 if (u->first) {
1734 pa_log_info("Starting playback.");
1735 snd_pcm_start(u->pcm_handle);
1736
1737 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1738
1739 u->first = FALSE;
1740 }
1741
1742 update_smoother(u);
1743 }
1744
1745 if (u->use_tsched) {
1746 pa_usec_t cusec;
1747
1748 if (u->since_start <= u->hwbuf_size) {
1749
1750 /* USB devices on ALSA seem to hit a buffer
1751 * underrun during the first iterations much
1752 * quicker then we calculate here, probably due to
1753 * the transport latency. To accommodate for that
1754 * we artificially decrease the sleep time until
1755 * we have filled the buffer at least once
1756 * completely.*/
1757
1758 if (pa_log_ratelimit(PA_LOG_DEBUG))
1759 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1760 sleep_usec /= 2;
1761 }
1762
1763 /* OK, the playback buffer is now full, let's
1764 * calculate when to wake up next */
1765 #ifdef DEBUG_TIMING
1766 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1767 #endif
1768
1769 /* Convert from the sound card time domain to the
1770 * system time domain */
1771 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1772
1773 #ifdef DEBUG_TIMING
1774 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1775 #endif
1776
1777 /* We don't trust the conversion, so we wake up whatever comes first */
1778 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1779 }
1780
1781 u->after_rewind = FALSE;
1782
1783 }
1784
1785 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1786 pa_usec_t volume_sleep;
1787 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1788 if (volume_sleep > 0) {
1789 if (rtpoll_sleep > 0)
1790 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1791 else
1792 rtpoll_sleep = volume_sleep;
1793 }
1794 }
1795
1796 if (rtpoll_sleep > 0)
1797 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1798 else
1799 pa_rtpoll_set_timer_disabled(u->rtpoll);
1800
1801 /* Hmm, nothing to do. Let's sleep */
1802 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1803 goto fail;
1804
1805 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1806 pa_sink_volume_change_apply(u->sink, NULL);
1807
1808 if (ret == 0)
1809 goto finish;
1810
1811 /* Tell ALSA about this and process its response */
1812 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1813 struct pollfd *pollfd;
1814 int err;
1815 unsigned n;
1816
1817 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1818
1819 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1820 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1821 goto fail;
1822 }
1823
1824 if (revents & ~POLLOUT) {
1825 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1826 goto fail;
1827
1828 u->first = TRUE;
1829 u->since_start = 0;
1830 revents = 0;
1831 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1832 pa_log_debug("Wakeup from ALSA!");
1833
1834 } else
1835 revents = 0;
1836 }
1837
1838 fail:
1839 /* If this was no regular exit from the loop we have to continue
1840 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1841 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1842 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1843
1844 finish:
1845 pa_log_debug("Thread shutting down");
1846 }
1847
1848 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1849 const char *n;
1850 char *t;
1851
1852 pa_assert(data);
1853 pa_assert(ma);
1854 pa_assert(device_name);
1855
1856 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1857 pa_sink_new_data_set_name(data, n);
1858 data->namereg_fail = TRUE;
1859 return;
1860 }
1861
1862 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1863 data->namereg_fail = TRUE;
1864 else {
1865 n = device_id ? device_id : device_name;
1866 data->namereg_fail = FALSE;
1867 }
1868
1869 if (mapping)
1870 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1871 else
1872 t = pa_sprintf_malloc("alsa_output.%s", n);
1873
1874 pa_sink_new_data_set_name(data, t);
1875 pa_xfree(t);
1876 }
1877
1878 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1879 snd_hctl_t *hctl;
1880
1881 if (!mapping && !element)
1882 return;
1883
1884 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1885 pa_log_info("Failed to find a working mixer device.");
1886 return;
1887 }
1888
1889 if (element) {
1890
1891 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1892 goto fail;
1893
1894 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1895 goto fail;
1896
1897 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1898 pa_alsa_path_dump(u->mixer_path);
1899 } else if (!(u->mixer_path_set = mapping->output_path_set))
1900 goto fail;
1901
1902 return;
1903
1904 fail:
1905
1906 if (u->mixer_path) {
1907 pa_alsa_path_free(u->mixer_path);
1908 u->mixer_path = NULL;
1909 }
1910
1911 if (u->mixer_handle) {
1912 snd_mixer_close(u->mixer_handle);
1913 u->mixer_handle = NULL;
1914 }
1915 }
1916
1917 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1918 pa_bool_t need_mixer_callback = FALSE;
1919
1920 pa_assert(u);
1921
1922 if (!u->mixer_handle)
1923 return 0;
1924
1925 if (u->sink->active_port) {
1926 pa_alsa_port_data *data;
1927
1928 /* We have a list of supported paths, so let's activate the
1929 * one that has been chosen as active */
1930
1931 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1932 u->mixer_path = data->path;
1933
1934 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
1935
1936 } else {
1937
1938 if (!u->mixer_path && u->mixer_path_set)
1939 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1940
1941 if (u->mixer_path) {
1942 /* Hmm, we have only a single path, then let's activate it */
1943
1944 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
1945
1946 } else
1947 return 0;
1948 }
1949
1950 mixer_volume_init(u);
1951
1952 /* Will we need to register callbacks? */
1953 if (u->mixer_path_set && u->mixer_path_set->paths) {
1954 pa_alsa_path *p;
1955 void *state;
1956
1957 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1958 if (p->has_volume || p->has_mute)
1959 need_mixer_callback = TRUE;
1960 }
1961 }
1962 else if (u->mixer_path)
1963 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1964
1965 if (need_mixer_callback) {
1966 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1967 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1968 u->mixer_pd = pa_alsa_mixer_pdata_new();
1969 mixer_callback = io_mixer_callback;
1970
1971 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1972 pa_log("Failed to initialize file descriptor monitoring");
1973 return -1;
1974 }
1975 } else {
1976 u->mixer_fdl = pa_alsa_fdlist_new();
1977 mixer_callback = ctl_mixer_callback;
1978
1979 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1980 pa_log("Failed to initialize file descriptor monitoring");
1981 return -1;
1982 }
1983 }
1984
1985 if (u->mixer_path_set)
1986 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1987 else
1988 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1989 }
1990
1991 return 0;
1992 }
1993
1994 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1995
1996 struct userdata *u = NULL;
1997 const char *dev_id = NULL, *key, *mod_name;
1998 pa_sample_spec ss;
1999 uint32_t alternate_sample_rate;
2000 pa_channel_map map;
2001 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
2002 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2003 size_t frame_size;
2004 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE, fixed_latency_range = FALSE;
2005 pa_sink_new_data data;
2006 pa_alsa_profile_set *profile_set = NULL;
2007 void *state = NULL;
2008
2009 pa_assert(m);
2010 pa_assert(ma);
2011
2012 ss = m->core->default_sample_spec;
2013 map = m->core->default_channel_map;
2014 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2015 pa_log("Failed to parse sample specification and channel map");
2016 goto fail;
2017 }
2018
2019 alternate_sample_rate = m->core->alternate_sample_rate;
2020 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2021 pa_log("Failed to parse alternate sample rate");
2022 goto fail;
2023 }
2024
2025 frame_size = pa_frame_size(&ss);
2026
2027 nfrags = m->core->default_n_fragments;
2028 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2029 if (frag_size <= 0)
2030 frag_size = (uint32_t) frame_size;
2031 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2032 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2033
2034 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2035 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2036 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2037 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2038 pa_log("Failed to parse buffer metrics");
2039 goto fail;
2040 }
2041
2042 buffer_size = nfrags * frag_size;
2043
2044 period_frames = frag_size/frame_size;
2045 buffer_frames = buffer_size/frame_size;
2046 tsched_frames = tsched_size/frame_size;
2047
2048 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2049 pa_log("Failed to parse mmap argument.");
2050 goto fail;
2051 }
2052
2053 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2054 pa_log("Failed to parse tsched argument.");
2055 goto fail;
2056 }
2057
2058 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2059 pa_log("Failed to parse ignore_dB argument.");
2060 goto fail;
2061 }
2062
2063 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2064 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2065 pa_log("Failed to parse rewind_safeguard argument");
2066 goto fail;
2067 }
2068
2069 deferred_volume = m->core->deferred_volume;
2070 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2071 pa_log("Failed to parse deferred_volume argument.");
2072 goto fail;
2073 }
2074
2075 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2076 pa_log("Failed to parse fixed_latency_range argument.");
2077 goto fail;
2078 }
2079
2080 use_tsched = pa_alsa_may_tsched(use_tsched);
2081
2082 u = pa_xnew0(struct userdata, 1);
2083 u->core = m->core;
2084 u->module = m;
2085 u->use_mmap = use_mmap;
2086 u->use_tsched = use_tsched;
2087 u->deferred_volume = deferred_volume;
2088 u->fixed_latency_range = fixed_latency_range;
2089 u->first = TRUE;
2090 u->rewind_safeguard = rewind_safeguard;
2091 u->rtpoll = pa_rtpoll_new();
2092 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2093
2094 u->smoother = pa_smoother_new(
2095 SMOOTHER_ADJUST_USEC,
2096 SMOOTHER_WINDOW_USEC,
2097 TRUE,
2098 TRUE,
2099 5,
2100 pa_rtclock_now(),
2101 TRUE);
2102 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2103
2104 /* use ucm */
2105 if (mapping && mapping->ucm_context.ucm)
2106 u->ucm_context = &mapping->ucm_context;
2107
2108 dev_id = pa_modargs_get_value(
2109 ma, "device_id",
2110 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2111
2112 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2113
2114 if (reserve_init(u, dev_id) < 0)
2115 goto fail;
2116
2117 if (reserve_monitor_init(u, dev_id) < 0)
2118 goto fail;
2119
2120 b = use_mmap;
2121 d = use_tsched;
2122
2123 if (mapping) {
2124
2125 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2126 pa_log("device_id= not set");
2127 goto fail;
2128 }
2129
2130 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2131 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2132 pa_log("Failed to enable ucm modifier %s", mod_name);
2133 else
2134 pa_log_debug("Enabled ucm modifier %s", mod_name);
2135 }
2136
2137 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2138 dev_id,
2139 &u->device_name,
2140 &ss, &map,
2141 SND_PCM_STREAM_PLAYBACK,
2142 &period_frames, &buffer_frames, tsched_frames,
2143 &b, &d, mapping)))
2144 goto fail;
2145
2146 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2147
2148 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2149 goto fail;
2150
2151 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2152 dev_id,
2153 &u->device_name,
2154 &ss, &map,
2155 SND_PCM_STREAM_PLAYBACK,
2156 &period_frames, &buffer_frames, tsched_frames,
2157 &b, &d, profile_set, &mapping)))
2158 goto fail;
2159
2160 } else {
2161
2162 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2163 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2164 &u->device_name,
2165 &ss, &map,
2166 SND_PCM_STREAM_PLAYBACK,
2167 &period_frames, &buffer_frames, tsched_frames,
2168 &b, &d, FALSE)))
2169 goto fail;
2170 }
2171
2172 pa_assert(u->device_name);
2173 pa_log_info("Successfully opened device %s.", u->device_name);
2174
2175 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2176 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2177 goto fail;
2178 }
2179
2180 if (mapping)
2181 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2182
2183 if (use_mmap && !b) {
2184 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2185 u->use_mmap = use_mmap = FALSE;
2186 }
2187
2188 if (use_tsched && (!b || !d)) {
2189 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2190 u->use_tsched = use_tsched = FALSE;
2191 }
2192
2193 if (u->use_mmap)
2194 pa_log_info("Successfully enabled mmap() mode.");
2195
2196 if (u->use_tsched) {
2197 pa_log_info("Successfully enabled timer-based scheduling mode.");
2198
2199 if (u->fixed_latency_range)
2200 pa_log_info("Disabling latency range changes on underrun");
2201 }
2202
2203 if (is_iec958(u) || is_hdmi(u))
2204 set_formats = TRUE;
2205
2206 u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
2207 if (!u->rates) {
2208 pa_log_error("Failed to find any supported sample rates.");
2209 goto fail;
2210 }
2211
2212 /* ALSA might tweak the sample spec, so recalculate the frame size */
2213 frame_size = pa_frame_size(&ss);
2214
2215 if (!u->ucm_context)
2216 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2217
2218 pa_sink_new_data_init(&data);
2219 data.driver = driver;
2220 data.module = m;
2221 data.card = card;
2222 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2223
2224 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2225 * variable instead of using &data.namereg_fail directly, because
2226 * data.namereg_fail is a bitfield and taking the address of a bitfield
2227 * variable is impossible. */
2228 namereg_fail = data.namereg_fail;
2229 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2230 pa_log("Failed to parse namereg_fail argument.");
2231 pa_sink_new_data_done(&data);
2232 goto fail;
2233 }
2234 data.namereg_fail = namereg_fail;
2235
2236 pa_sink_new_data_set_sample_spec(&data, &ss);
2237 pa_sink_new_data_set_channel_map(&data, &map);
2238 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2239
2240 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2241 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2242 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2243 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2244 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2245
2246 if (mapping) {
2247 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2248 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2249
2250 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2251 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2252 }
2253
2254 pa_alsa_init_description(data.proplist);
2255
2256 if (u->control_device)
2257 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2258
2259 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2260 pa_log("Invalid properties");
2261 pa_sink_new_data_done(&data);
2262 goto fail;
2263 }
2264
2265 if (u->ucm_context)
2266 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, TRUE, card);
2267 else if (u->mixer_path_set)
2268 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2269
2270 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2271 (set_formats ? PA_SINK_SET_FORMATS : 0));
2272 pa_sink_new_data_done(&data);
2273
2274 if (!u->sink) {
2275 pa_log("Failed to create sink object");
2276 goto fail;
2277 }
2278
2279 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2280 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2281 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2282 goto fail;
2283 }
2284
2285 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2286 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2287 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2288 goto fail;
2289 }
2290
2291 u->sink->parent.process_msg = sink_process_msg;
2292 if (u->use_tsched)
2293 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2294 u->sink->set_state = sink_set_state_cb;
2295 if (u->ucm_context)
2296 u->sink->set_port = sink_set_port_ucm_cb;
2297 else
2298 u->sink->set_port = sink_set_port_cb;
2299 if (u->sink->alternate_sample_rate)
2300 u->sink->update_rate = sink_update_rate_cb;
2301 u->sink->userdata = u;
2302
2303 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2304 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2305
2306 u->frame_size = frame_size;
2307 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2308 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2309 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2310
2311 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2312 (double) u->hwbuf_size / (double) u->fragment_size,
2313 (long unsigned) u->fragment_size,
2314 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2315 (long unsigned) u->hwbuf_size,
2316 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2317
2318 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2319 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2320 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2321 else {
2322 pa_log_info("Disabling rewind for device %s", u->device_name);
2323 pa_sink_set_max_rewind(u->sink, 0);
2324 }
2325
2326 if (u->use_tsched) {
2327 u->tsched_watermark_ref = tsched_watermark;
2328 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2329 } else
2330 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2331
2332 reserve_update(u);
2333
2334 if (update_sw_params(u) < 0)
2335 goto fail;
2336
2337 if (u->ucm_context) {
2338 if (u->sink->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port, TRUE) < 0)
2339 goto fail;
2340 } else if (setup_mixer(u, ignore_dB) < 0)
2341 goto fail;
2342
2343 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2344
2345 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2346 pa_log("Failed to create thread.");
2347 goto fail;
2348 }
2349
2350 /* Get initial mixer settings */
2351 if (data.volume_is_set) {
2352 if (u->sink->set_volume)
2353 u->sink->set_volume(u->sink);
2354 } else {
2355 if (u->sink->get_volume)
2356 u->sink->get_volume(u->sink);
2357 }
2358
2359 if (data.muted_is_set) {
2360 if (u->sink->set_mute)
2361 u->sink->set_mute(u->sink);
2362 } else {
2363 if (u->sink->get_mute)
2364 u->sink->get_mute(u->sink);
2365 }
2366
2367 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2368 u->sink->write_volume(u->sink);
2369
2370 if (set_formats) {
2371 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2372 pa_format_info *format;
2373
2374 /* To start with, we only support PCM formats. Other formats may be added
2375 * with pa_sink_set_formats().*/
2376 format = pa_format_info_new();
2377 format->encoding = PA_ENCODING_PCM;
2378 u->formats = pa_idxset_new(NULL, NULL);
2379 pa_idxset_put(u->formats, format, NULL);
2380
2381 u->sink->get_formats = sink_get_formats;
2382 u->sink->set_formats = sink_set_formats;
2383 }
2384
2385 pa_sink_put(u->sink);
2386
2387 if (profile_set)
2388 pa_alsa_profile_set_free(profile_set);
2389
2390 return u->sink;
2391
2392 fail:
2393
2394 if (u)
2395 userdata_free(u);
2396
2397 if (profile_set)
2398 pa_alsa_profile_set_free(profile_set);
2399
2400 return NULL;
2401 }
2402
2403 static void userdata_free(struct userdata *u) {
2404 pa_assert(u);
2405
2406 if (u->sink)
2407 pa_sink_unlink(u->sink);
2408
2409 if (u->thread) {
2410 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2411 pa_thread_free(u->thread);
2412 }
2413
2414 pa_thread_mq_done(&u->thread_mq);
2415
2416 if (u->sink)
2417 pa_sink_unref(u->sink);
2418
2419 if (u->memchunk.memblock)
2420 pa_memblock_unref(u->memchunk.memblock);
2421
2422 if (u->mixer_pd)
2423 pa_alsa_mixer_pdata_free(u->mixer_pd);
2424
2425 if (u->alsa_rtpoll_item)
2426 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2427
2428 if (u->rtpoll)
2429 pa_rtpoll_free(u->rtpoll);
2430
2431 if (u->pcm_handle) {
2432 snd_pcm_drop(u->pcm_handle);
2433 snd_pcm_close(u->pcm_handle);
2434 }
2435
2436 if (u->mixer_fdl)
2437 pa_alsa_fdlist_free(u->mixer_fdl);
2438
2439 if (u->mixer_path && !u->mixer_path_set)
2440 pa_alsa_path_free(u->mixer_path);
2441
2442 if (u->mixer_handle)
2443 snd_mixer_close(u->mixer_handle);
2444
2445 if (u->smoother)
2446 pa_smoother_free(u->smoother);
2447
2448 if (u->formats)
2449 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2450
2451 if (u->rates)
2452 pa_xfree(u->rates);
2453
2454 reserve_done(u);
2455 monitor_done(u);
2456
2457 pa_xfree(u->device_name);
2458 pa_xfree(u->control_device);
2459 pa_xfree(u->paths_dir);
2460 pa_xfree(u);
2461 }
2462
2463 void pa_alsa_sink_free(pa_sink *s) {
2464 struct userdata *u;
2465
2466 pa_sink_assert_ref(s);
2467 pa_assert_se(u = s->userdata);
2468
2469 userdata_free(u);
2470 }