]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
sink: Process rewind requests also when suspended.
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
34 #endif
35
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
41
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
57
58 #include <modules/reserve-wrap.h>
59
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
62
63 /* #define DEBUG_TIMING */
64
65 #define DEFAULT_DEVICE "default"
66
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
78
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
92
93 struct userdata {
94 pa_core *core;
95 pa_module *module;
96 pa_sink *sink;
97
98 pa_thread *thread;
99 pa_thread_mq thread_mq;
100 pa_rtpoll *rtpoll;
101
102 snd_pcm_t *pcm_handle;
103
104 char *paths_dir;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
110
111 pa_cvolume hardware_volume;
112
113 unsigned int *rates;
114
115 size_t
116 frame_size,
117 fragment_size,
118 hwbuf_size,
119 tsched_watermark,
120 tsched_watermark_ref,
121 hwbuf_unused,
122 min_sleep,
123 min_wakeup,
124 watermark_inc_step,
125 watermark_dec_step,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
128 rewind_safeguard;
129
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
132
133 pa_memchunk memchunk;
134
135 char *device_name; /* name of the PCM device */
136 char *control_device; /* name of the control device */
137
138 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
139
140 pa_bool_t first, after_rewind;
141
142 pa_rtpoll_item *alsa_rtpoll_item;
143
144 pa_smoother *smoother;
145 uint64_t write_count;
146 uint64_t since_start;
147 pa_usec_t smoother_interval;
148 pa_usec_t last_smoother_update;
149
150 pa_idxset *formats;
151
152 pa_reserve_wrapper *reserve;
153 pa_hook_slot *reserve_slot;
154 pa_reserve_monitor_wrapper *monitor;
155 pa_hook_slot *monitor_slot;
156
157 /* ucm context */
158 pa_alsa_ucm_mapping_context *ucm_context;
159 };
160
161 static void userdata_free(struct userdata *u);
162
163 /* FIXME: Is there a better way to do this than device names? */
164 static pa_bool_t is_iec958(struct userdata *u) {
165 return (strncmp("iec958", u->device_name, 6) == 0);
166 }
167
168 static pa_bool_t is_hdmi(struct userdata *u) {
169 return (strncmp("hdmi", u->device_name, 4) == 0);
170 }
171
172 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
173 pa_assert(r);
174 pa_assert(u);
175
176 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
177 return PA_HOOK_CANCEL;
178
179 return PA_HOOK_OK;
180 }
181
182 static void reserve_done(struct userdata *u) {
183 pa_assert(u);
184
185 if (u->reserve_slot) {
186 pa_hook_slot_free(u->reserve_slot);
187 u->reserve_slot = NULL;
188 }
189
190 if (u->reserve) {
191 pa_reserve_wrapper_unref(u->reserve);
192 u->reserve = NULL;
193 }
194 }
195
196 static void reserve_update(struct userdata *u) {
197 const char *description;
198 pa_assert(u);
199
200 if (!u->sink || !u->reserve)
201 return;
202
203 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
204 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
205 }
206
207 static int reserve_init(struct userdata *u, const char *dname) {
208 char *rname;
209
210 pa_assert(u);
211 pa_assert(dname);
212
213 if (u->reserve)
214 return 0;
215
216 if (pa_in_system_mode())
217 return 0;
218
219 if (!(rname = pa_alsa_get_reserve_name(dname)))
220 return 0;
221
222 /* We are resuming, try to lock the device */
223 u->reserve = pa_reserve_wrapper_get(u->core, rname);
224 pa_xfree(rname);
225
226 if (!(u->reserve))
227 return -1;
228
229 reserve_update(u);
230
231 pa_assert(!u->reserve_slot);
232 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
233
234 return 0;
235 }
236
237 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
238 pa_bool_t b;
239
240 pa_assert(w);
241 pa_assert(u);
242
243 b = PA_PTR_TO_UINT(busy) && !u->reserve;
244
245 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
246 return PA_HOOK_OK;
247 }
248
249 static void monitor_done(struct userdata *u) {
250 pa_assert(u);
251
252 if (u->monitor_slot) {
253 pa_hook_slot_free(u->monitor_slot);
254 u->monitor_slot = NULL;
255 }
256
257 if (u->monitor) {
258 pa_reserve_monitor_wrapper_unref(u->monitor);
259 u->monitor = NULL;
260 }
261 }
262
263 static int reserve_monitor_init(struct userdata *u, const char *dname) {
264 char *rname;
265
266 pa_assert(u);
267 pa_assert(dname);
268
269 if (pa_in_system_mode())
270 return 0;
271
272 if (!(rname = pa_alsa_get_reserve_name(dname)))
273 return 0;
274
275 /* We are resuming, try to lock the device */
276 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
277 pa_xfree(rname);
278
279 if (!(u->monitor))
280 return -1;
281
282 pa_assert(!u->monitor_slot);
283 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
284
285 return 0;
286 }
287
288 static void fix_min_sleep_wakeup(struct userdata *u) {
289 size_t max_use, max_use_2;
290
291 pa_assert(u);
292 pa_assert(u->use_tsched);
293
294 max_use = u->hwbuf_size - u->hwbuf_unused;
295 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
296
297 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
298 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
299
300 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
301 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
302 }
303
304 static void fix_tsched_watermark(struct userdata *u) {
305 size_t max_use;
306 pa_assert(u);
307 pa_assert(u->use_tsched);
308
309 max_use = u->hwbuf_size - u->hwbuf_unused;
310
311 if (u->tsched_watermark > max_use - u->min_sleep)
312 u->tsched_watermark = max_use - u->min_sleep;
313
314 if (u->tsched_watermark < u->min_wakeup)
315 u->tsched_watermark = u->min_wakeup;
316 }
317
318 static void increase_watermark(struct userdata *u) {
319 size_t old_watermark;
320 pa_usec_t old_min_latency, new_min_latency;
321
322 pa_assert(u);
323 pa_assert(u->use_tsched);
324
325 /* First, just try to increase the watermark */
326 old_watermark = u->tsched_watermark;
327 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
328 fix_tsched_watermark(u);
329
330 if (old_watermark != u->tsched_watermark) {
331 pa_log_info("Increasing wakeup watermark to %0.2f ms",
332 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
333 return;
334 }
335
336 /* Hmm, we cannot increase the watermark any further, hence let's
337 raise the latency, unless doing so was disabled in
338 configuration */
339 if (u->fixed_latency_range)
340 return;
341
342 old_min_latency = u->sink->thread_info.min_latency;
343 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
344 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
345
346 if (old_min_latency != new_min_latency) {
347 pa_log_info("Increasing minimal latency to %0.2f ms",
348 (double) new_min_latency / PA_USEC_PER_MSEC);
349
350 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
351 }
352
353 /* When we reach this we're officialy fucked! */
354 }
355
356 static void decrease_watermark(struct userdata *u) {
357 size_t old_watermark;
358 pa_usec_t now;
359
360 pa_assert(u);
361 pa_assert(u->use_tsched);
362
363 now = pa_rtclock_now();
364
365 if (u->watermark_dec_not_before <= 0)
366 goto restart;
367
368 if (u->watermark_dec_not_before > now)
369 return;
370
371 old_watermark = u->tsched_watermark;
372
373 if (u->tsched_watermark < u->watermark_dec_step)
374 u->tsched_watermark = u->tsched_watermark / 2;
375 else
376 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
377
378 fix_tsched_watermark(u);
379
380 if (old_watermark != u->tsched_watermark)
381 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
382 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
383
384 /* We don't change the latency range*/
385
386 restart:
387 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
388 }
389
390 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
391 pa_usec_t usec, wm;
392
393 pa_assert(sleep_usec);
394 pa_assert(process_usec);
395
396 pa_assert(u);
397 pa_assert(u->use_tsched);
398
399 usec = pa_sink_get_requested_latency_within_thread(u->sink);
400
401 if (usec == (pa_usec_t) -1)
402 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
403
404 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
405
406 if (wm > usec)
407 wm = usec/2;
408
409 *sleep_usec = usec - wm;
410 *process_usec = wm;
411
412 #ifdef DEBUG_TIMING
413 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
414 (unsigned long) (usec / PA_USEC_PER_MSEC),
415 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
416 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
417 #endif
418 }
419
420 static int try_recover(struct userdata *u, const char *call, int err) {
421 pa_assert(u);
422 pa_assert(call);
423 pa_assert(err < 0);
424
425 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
426
427 pa_assert(err != -EAGAIN);
428
429 if (err == -EPIPE)
430 pa_log_debug("%s: Buffer underrun!", call);
431
432 if (err == -ESTRPIPE)
433 pa_log_debug("%s: System suspended!", call);
434
435 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
436 pa_log("%s: %s", call, pa_alsa_strerror(err));
437 return -1;
438 }
439
440 u->first = TRUE;
441 u->since_start = 0;
442 return 0;
443 }
444
445 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
446 size_t left_to_play;
447 pa_bool_t underrun = FALSE;
448
449 /* We use <= instead of < for this check here because an underrun
450 * only happens after the last sample was processed, not already when
451 * it is removed from the buffer. This is particularly important
452 * when block transfer is used. */
453
454 if (n_bytes <= u->hwbuf_size)
455 left_to_play = u->hwbuf_size - n_bytes;
456 else {
457
458 /* We got a dropout. What a mess! */
459 left_to_play = 0;
460 underrun = TRUE;
461
462 #if 0
463 PA_DEBUG_TRAP;
464 #endif
465
466 if (!u->first && !u->after_rewind)
467 if (pa_log_ratelimit(PA_LOG_INFO))
468 pa_log_info("Underrun!");
469 }
470
471 #ifdef DEBUG_TIMING
472 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
473 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
474 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
475 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
476 #endif
477
478 if (u->use_tsched) {
479 pa_bool_t reset_not_before = TRUE;
480
481 if (!u->first && !u->after_rewind) {
482 if (underrun || left_to_play < u->watermark_inc_threshold)
483 increase_watermark(u);
484 else if (left_to_play > u->watermark_dec_threshold) {
485 reset_not_before = FALSE;
486
487 /* We decrease the watermark only if have actually
488 * been woken up by a timeout. If something else woke
489 * us up it's too easy to fulfill the deadlines... */
490
491 if (on_timeout)
492 decrease_watermark(u);
493 }
494 }
495
496 if (reset_not_before)
497 u->watermark_dec_not_before = 0;
498 }
499
500 return left_to_play;
501 }
502
503 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
504 pa_bool_t work_done = FALSE;
505 pa_usec_t max_sleep_usec = 0, process_usec = 0;
506 size_t left_to_play;
507 unsigned j = 0;
508
509 pa_assert(u);
510 pa_sink_assert_ref(u->sink);
511
512 if (u->use_tsched)
513 hw_sleep_time(u, &max_sleep_usec, &process_usec);
514
515 for (;;) {
516 snd_pcm_sframes_t n;
517 size_t n_bytes;
518 int r;
519 pa_bool_t after_avail = TRUE;
520
521 /* First we determine how many samples are missing to fill the
522 * buffer up to 100% */
523
524 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
525
526 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
527 continue;
528
529 return r;
530 }
531
532 n_bytes = (size_t) n * u->frame_size;
533
534 #ifdef DEBUG_TIMING
535 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
536 #endif
537
538 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
539 on_timeout = FALSE;
540
541 if (u->use_tsched)
542
543 /* We won't fill up the playback buffer before at least
544 * half the sleep time is over because otherwise we might
545 * ask for more data from the clients then they expect. We
546 * need to guarantee that clients only have to keep around
547 * a single hw buffer length. */
548
549 if (!polled &&
550 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
551 #ifdef DEBUG_TIMING
552 pa_log_debug("Not filling up, because too early.");
553 #endif
554 break;
555 }
556
557 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
558
559 if (polled)
560 PA_ONCE_BEGIN {
561 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
562 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
563 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
564 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
565 pa_strnull(dn));
566 pa_xfree(dn);
567 } PA_ONCE_END;
568
569 #ifdef DEBUG_TIMING
570 pa_log_debug("Not filling up, because not necessary.");
571 #endif
572 break;
573 }
574
575
576 if (++j > 10) {
577 #ifdef DEBUG_TIMING
578 pa_log_debug("Not filling up, because already too many iterations.");
579 #endif
580
581 break;
582 }
583
584 n_bytes -= u->hwbuf_unused;
585 polled = FALSE;
586
587 #ifdef DEBUG_TIMING
588 pa_log_debug("Filling up");
589 #endif
590
591 for (;;) {
592 pa_memchunk chunk;
593 void *p;
594 int err;
595 const snd_pcm_channel_area_t *areas;
596 snd_pcm_uframes_t offset, frames;
597 snd_pcm_sframes_t sframes;
598
599 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
600 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
601
602 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
603
604 if (!after_avail && err == -EAGAIN)
605 break;
606
607 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
608 continue;
609
610 return r;
611 }
612
613 /* Make sure that if these memblocks need to be copied they will fit into one slot */
614 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
615 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
616
617 if (!after_avail && frames == 0)
618 break;
619
620 pa_assert(frames > 0);
621 after_avail = FALSE;
622
623 /* Check these are multiples of 8 bit */
624 pa_assert((areas[0].first & 7) == 0);
625 pa_assert((areas[0].step & 7)== 0);
626
627 /* We assume a single interleaved memory buffer */
628 pa_assert((areas[0].first >> 3) == 0);
629 pa_assert((areas[0].step >> 3) == u->frame_size);
630
631 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
632
633 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
634 chunk.length = pa_memblock_get_length(chunk.memblock);
635 chunk.index = 0;
636
637 pa_sink_render_into_full(u->sink, &chunk);
638 pa_memblock_unref_fixed(chunk.memblock);
639
640 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
641
642 if (!after_avail && (int) sframes == -EAGAIN)
643 break;
644
645 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
646 continue;
647
648 return r;
649 }
650
651 work_done = TRUE;
652
653 u->write_count += frames * u->frame_size;
654 u->since_start += frames * u->frame_size;
655
656 #ifdef DEBUG_TIMING
657 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
658 #endif
659
660 if ((size_t) frames * u->frame_size >= n_bytes)
661 break;
662
663 n_bytes -= (size_t) frames * u->frame_size;
664 }
665 }
666
667 if (u->use_tsched) {
668 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
669 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
670
671 if (*sleep_usec > process_usec)
672 *sleep_usec -= process_usec;
673 else
674 *sleep_usec = 0;
675 } else
676 *sleep_usec = 0;
677
678 return work_done ? 1 : 0;
679 }
680
681 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
682 pa_bool_t work_done = FALSE;
683 pa_usec_t max_sleep_usec = 0, process_usec = 0;
684 size_t left_to_play;
685 unsigned j = 0;
686
687 pa_assert(u);
688 pa_sink_assert_ref(u->sink);
689
690 if (u->use_tsched)
691 hw_sleep_time(u, &max_sleep_usec, &process_usec);
692
693 for (;;) {
694 snd_pcm_sframes_t n;
695 size_t n_bytes;
696 int r;
697 pa_bool_t after_avail = TRUE;
698
699 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
700
701 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
702 continue;
703
704 return r;
705 }
706
707 n_bytes = (size_t) n * u->frame_size;
708 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
709 on_timeout = FALSE;
710
711 if (u->use_tsched)
712
713 /* We won't fill up the playback buffer before at least
714 * half the sleep time is over because otherwise we might
715 * ask for more data from the clients then they expect. We
716 * need to guarantee that clients only have to keep around
717 * a single hw buffer length. */
718
719 if (!polled &&
720 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
721 break;
722
723 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
724
725 if (polled)
726 PA_ONCE_BEGIN {
727 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
728 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
729 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
730 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
731 pa_strnull(dn));
732 pa_xfree(dn);
733 } PA_ONCE_END;
734
735 break;
736 }
737
738 if (++j > 10) {
739 #ifdef DEBUG_TIMING
740 pa_log_debug("Not filling up, because already too many iterations.");
741 #endif
742
743 break;
744 }
745
746 n_bytes -= u->hwbuf_unused;
747 polled = FALSE;
748
749 for (;;) {
750 snd_pcm_sframes_t frames;
751 void *p;
752
753 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
754
755 if (u->memchunk.length <= 0)
756 pa_sink_render(u->sink, n_bytes, &u->memchunk);
757
758 pa_assert(u->memchunk.length > 0);
759
760 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
761
762 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
763 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
764
765 p = pa_memblock_acquire(u->memchunk.memblock);
766 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
767 pa_memblock_release(u->memchunk.memblock);
768
769 if (PA_UNLIKELY(frames < 0)) {
770
771 if (!after_avail && (int) frames == -EAGAIN)
772 break;
773
774 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
775 continue;
776
777 return r;
778 }
779
780 if (!after_avail && frames == 0)
781 break;
782
783 pa_assert(frames > 0);
784 after_avail = FALSE;
785
786 u->memchunk.index += (size_t) frames * u->frame_size;
787 u->memchunk.length -= (size_t) frames * u->frame_size;
788
789 if (u->memchunk.length <= 0) {
790 pa_memblock_unref(u->memchunk.memblock);
791 pa_memchunk_reset(&u->memchunk);
792 }
793
794 work_done = TRUE;
795
796 u->write_count += frames * u->frame_size;
797 u->since_start += frames * u->frame_size;
798
799 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
800
801 if ((size_t) frames * u->frame_size >= n_bytes)
802 break;
803
804 n_bytes -= (size_t) frames * u->frame_size;
805 }
806 }
807
808 if (u->use_tsched) {
809 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
810 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
811
812 if (*sleep_usec > process_usec)
813 *sleep_usec -= process_usec;
814 else
815 *sleep_usec = 0;
816 } else
817 *sleep_usec = 0;
818
819 return work_done ? 1 : 0;
820 }
821
822 static void update_smoother(struct userdata *u) {
823 snd_pcm_sframes_t delay = 0;
824 int64_t position;
825 int err;
826 pa_usec_t now1 = 0, now2;
827 snd_pcm_status_t *status;
828 snd_htimestamp_t htstamp = { 0, 0 };
829
830 snd_pcm_status_alloca(&status);
831
832 pa_assert(u);
833 pa_assert(u->pcm_handle);
834
835 /* Let's update the time smoother */
836
837 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
838 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
839 return;
840 }
841
842 snd_pcm_status_get_htstamp(status, &htstamp);
843 now1 = pa_timespec_load(&htstamp);
844
845 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
846 if (now1 <= 0)
847 now1 = pa_rtclock_now();
848
849 /* check if the time since the last update is bigger than the interval */
850 if (u->last_smoother_update > 0)
851 if (u->last_smoother_update + u->smoother_interval > now1)
852 return;
853
854 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
855
856 if (PA_UNLIKELY(position < 0))
857 position = 0;
858
859 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
860
861 pa_smoother_put(u->smoother, now1, now2);
862
863 u->last_smoother_update = now1;
864 /* exponentially increase the update interval up to the MAX limit */
865 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
866 }
867
868 static pa_usec_t sink_get_latency(struct userdata *u) {
869 pa_usec_t r;
870 int64_t delay;
871 pa_usec_t now1, now2;
872
873 pa_assert(u);
874
875 now1 = pa_rtclock_now();
876 now2 = pa_smoother_get(u->smoother, now1);
877
878 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
879
880 r = delay >= 0 ? (pa_usec_t) delay : 0;
881
882 if (u->memchunk.memblock)
883 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
884
885 return r;
886 }
887
888 static int build_pollfd(struct userdata *u) {
889 pa_assert(u);
890 pa_assert(u->pcm_handle);
891
892 if (u->alsa_rtpoll_item)
893 pa_rtpoll_item_free(u->alsa_rtpoll_item);
894
895 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
896 return -1;
897
898 return 0;
899 }
900
901 /* Called from IO context */
902 static int suspend(struct userdata *u) {
903 pa_assert(u);
904 pa_assert(u->pcm_handle);
905
906 pa_smoother_pause(u->smoother, pa_rtclock_now());
907
908 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
909 * take awfully long with our long buffer sizes today. */
910 snd_pcm_close(u->pcm_handle);
911 u->pcm_handle = NULL;
912
913 if (u->alsa_rtpoll_item) {
914 pa_rtpoll_item_free(u->alsa_rtpoll_item);
915 u->alsa_rtpoll_item = NULL;
916 }
917
918 /* We reset max_rewind/max_request here to make sure that while we
919 * are suspended the old max_request/max_rewind values set before
920 * the suspend can influence the per-stream buffer of newly
921 * created streams, without their requirements having any
922 * influence on them. */
923 pa_sink_set_max_rewind_within_thread(u->sink, 0);
924 pa_sink_set_max_request_within_thread(u->sink, 0);
925
926 pa_log_info("Device suspended...");
927
928 return 0;
929 }
930
931 /* Called from IO context */
932 static int update_sw_params(struct userdata *u) {
933 snd_pcm_uframes_t avail_min;
934 int err;
935
936 pa_assert(u);
937
938 /* Use the full buffer if no one asked us for anything specific */
939 u->hwbuf_unused = 0;
940
941 if (u->use_tsched) {
942 pa_usec_t latency;
943
944 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
945 size_t b;
946
947 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
948
949 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
950
951 /* We need at least one sample in our buffer */
952
953 if (PA_UNLIKELY(b < u->frame_size))
954 b = u->frame_size;
955
956 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
957 }
958
959 fix_min_sleep_wakeup(u);
960 fix_tsched_watermark(u);
961 }
962
963 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
964
965 /* We need at last one frame in the used part of the buffer */
966 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
967
968 if (u->use_tsched) {
969 pa_usec_t sleep_usec, process_usec;
970
971 hw_sleep_time(u, &sleep_usec, &process_usec);
972 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
973 }
974
975 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
976
977 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
978 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
979 return err;
980 }
981
982 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
983 if (pa_alsa_pcm_is_hw(u->pcm_handle))
984 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
985 else {
986 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
987 pa_sink_set_max_rewind_within_thread(u->sink, 0);
988 }
989
990 return 0;
991 }
992
993 /* Called from IO Context on unsuspend or from main thread when creating sink */
994 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
995 pa_bool_t in_thread)
996 {
997 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
998 &u->sink->sample_spec);
999
1000 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1001 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1002
1003 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1004 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1005
1006 fix_min_sleep_wakeup(u);
1007 fix_tsched_watermark(u);
1008
1009 if (in_thread)
1010 pa_sink_set_latency_range_within_thread(u->sink,
1011 u->min_latency_ref,
1012 pa_bytes_to_usec(u->hwbuf_size, ss));
1013 else {
1014 pa_sink_set_latency_range(u->sink,
1015 0,
1016 pa_bytes_to_usec(u->hwbuf_size, ss));
1017
1018 /* work-around assert in pa_sink_set_latency_within_thead,
1019 keep track of min_latency and reuse it when
1020 this routine is called from IO context */
1021 u->min_latency_ref = u->sink->thread_info.min_latency;
1022 }
1023
1024 pa_log_info("Time scheduling watermark is %0.2fms",
1025 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1026 }
1027
1028 /* Called from IO context */
1029 static int unsuspend(struct userdata *u) {
1030 pa_sample_spec ss;
1031 int err;
1032 pa_bool_t b, d;
1033 snd_pcm_uframes_t period_size, buffer_size;
1034 char *device_name = NULL;
1035
1036 pa_assert(u);
1037 pa_assert(!u->pcm_handle);
1038
1039 pa_log_info("Trying resume...");
1040
1041 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1042 /* Need to open device in NONAUDIO mode */
1043 int len = strlen(u->device_name) + 8;
1044
1045 device_name = pa_xmalloc(len);
1046 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1047 }
1048
1049 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1050 SND_PCM_NONBLOCK|
1051 SND_PCM_NO_AUTO_RESAMPLE|
1052 SND_PCM_NO_AUTO_CHANNELS|
1053 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1054 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1055 goto fail;
1056 }
1057
1058 ss = u->sink->sample_spec;
1059 period_size = u->fragment_size / u->frame_size;
1060 buffer_size = u->hwbuf_size / u->frame_size;
1061 b = u->use_mmap;
1062 d = u->use_tsched;
1063
1064 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1065 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1066 goto fail;
1067 }
1068
1069 if (b != u->use_mmap || d != u->use_tsched) {
1070 pa_log_warn("Resume failed, couldn't get original access mode.");
1071 goto fail;
1072 }
1073
1074 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1075 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1076 goto fail;
1077 }
1078
1079 if (period_size*u->frame_size != u->fragment_size ||
1080 buffer_size*u->frame_size != u->hwbuf_size) {
1081 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1082 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1083 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1084 goto fail;
1085 }
1086
1087 if (update_sw_params(u) < 0)
1088 goto fail;
1089
1090 if (build_pollfd(u) < 0)
1091 goto fail;
1092
1093 u->write_count = 0;
1094 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1095 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1096 u->last_smoother_update = 0;
1097
1098 u->first = TRUE;
1099 u->since_start = 0;
1100
1101 /* reset the watermark to the value defined when sink was created */
1102 if (u->use_tsched)
1103 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1104
1105 pa_log_info("Resumed successfully...");
1106
1107 pa_xfree(device_name);
1108 return 0;
1109
1110 fail:
1111 if (u->pcm_handle) {
1112 snd_pcm_close(u->pcm_handle);
1113 u->pcm_handle = NULL;
1114 }
1115
1116 pa_xfree(device_name);
1117
1118 return -PA_ERR_IO;
1119 }
1120
1121 /* Called from IO context */
1122 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1123 struct userdata *u = PA_SINK(o)->userdata;
1124
1125 switch (code) {
1126
1127 case PA_SINK_MESSAGE_GET_LATENCY: {
1128 pa_usec_t r = 0;
1129
1130 if (u->pcm_handle)
1131 r = sink_get_latency(u);
1132
1133 *((pa_usec_t*) data) = r;
1134
1135 return 0;
1136 }
1137
1138 case PA_SINK_MESSAGE_SET_STATE:
1139
1140 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1141
1142 case PA_SINK_SUSPENDED: {
1143 int r;
1144
1145 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1146
1147 if ((r = suspend(u)) < 0)
1148 return r;
1149
1150 break;
1151 }
1152
1153 case PA_SINK_IDLE:
1154 case PA_SINK_RUNNING: {
1155 int r;
1156
1157 if (u->sink->thread_info.state == PA_SINK_INIT) {
1158 if (build_pollfd(u) < 0)
1159 return -PA_ERR_IO;
1160 }
1161
1162 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1163 if ((r = unsuspend(u)) < 0)
1164 return r;
1165 }
1166
1167 break;
1168 }
1169
1170 case PA_SINK_UNLINKED:
1171 case PA_SINK_INIT:
1172 case PA_SINK_INVALID_STATE:
1173 ;
1174 }
1175
1176 break;
1177 }
1178
1179 return pa_sink_process_msg(o, code, data, offset, chunk);
1180 }
1181
1182 /* Called from main context */
1183 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1184 pa_sink_state_t old_state;
1185 struct userdata *u;
1186
1187 pa_sink_assert_ref(s);
1188 pa_assert_se(u = s->userdata);
1189
1190 old_state = pa_sink_get_state(u->sink);
1191
1192 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1193 reserve_done(u);
1194 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1195 if (reserve_init(u, u->device_name) < 0)
1196 return -PA_ERR_BUSY;
1197
1198 return 0;
1199 }
1200
1201 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1202 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1203
1204 pa_assert(u);
1205 pa_assert(u->mixer_handle);
1206
1207 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1208 return 0;
1209
1210 if (!PA_SINK_IS_LINKED(u->sink->state))
1211 return 0;
1212
1213 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1214 pa_sink_set_mixer_dirty(u->sink, TRUE);
1215 return 0;
1216 }
1217
1218 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1219 pa_sink_get_volume(u->sink, TRUE);
1220 pa_sink_get_mute(u->sink, TRUE);
1221 }
1222
1223 return 0;
1224 }
1225
1226 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1227 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1228
1229 pa_assert(u);
1230 pa_assert(u->mixer_handle);
1231
1232 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1233 return 0;
1234
1235 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1236 pa_sink_set_mixer_dirty(u->sink, TRUE);
1237 return 0;
1238 }
1239
1240 if (mask & SND_CTL_EVENT_MASK_VALUE)
1241 pa_sink_update_volume_and_mute(u->sink);
1242
1243 return 0;
1244 }
1245
1246 static void sink_get_volume_cb(pa_sink *s) {
1247 struct userdata *u = s->userdata;
1248 pa_cvolume r;
1249 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1250
1251 pa_assert(u);
1252 pa_assert(u->mixer_path);
1253 pa_assert(u->mixer_handle);
1254
1255 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1256 return;
1257
1258 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1259 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1260
1261 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1262
1263 if (u->mixer_path->has_dB) {
1264 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1265
1266 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1267 }
1268
1269 if (pa_cvolume_equal(&u->hardware_volume, &r))
1270 return;
1271
1272 s->real_volume = u->hardware_volume = r;
1273
1274 /* Hmm, so the hardware volume changed, let's reset our software volume */
1275 if (u->mixer_path->has_dB)
1276 pa_sink_set_soft_volume(s, NULL);
1277 }
1278
1279 static void sink_set_volume_cb(pa_sink *s) {
1280 struct userdata *u = s->userdata;
1281 pa_cvolume r;
1282 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1283 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1284
1285 pa_assert(u);
1286 pa_assert(u->mixer_path);
1287 pa_assert(u->mixer_handle);
1288
1289 /* Shift up by the base volume */
1290 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1291
1292 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1293 return;
1294
1295 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1296 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1297
1298 u->hardware_volume = r;
1299
1300 if (u->mixer_path->has_dB) {
1301 pa_cvolume new_soft_volume;
1302 pa_bool_t accurate_enough;
1303 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1304
1305 /* Match exactly what the user requested by software */
1306 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1307
1308 /* If the adjustment to do in software is only minimal we
1309 * can skip it. That saves us CPU at the expense of a bit of
1310 * accuracy */
1311 accurate_enough =
1312 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1313 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1314
1315 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1316 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1317 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1318 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1319 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1320 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1321 pa_yes_no(accurate_enough));
1322 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1323
1324 if (!accurate_enough)
1325 s->soft_volume = new_soft_volume;
1326
1327 } else {
1328 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1329
1330 /* We can't match exactly what the user requested, hence let's
1331 * at least tell the user about it */
1332
1333 s->real_volume = r;
1334 }
1335 }
1336
1337 static void sink_write_volume_cb(pa_sink *s) {
1338 struct userdata *u = s->userdata;
1339 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1340
1341 pa_assert(u);
1342 pa_assert(u->mixer_path);
1343 pa_assert(u->mixer_handle);
1344 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1345
1346 /* Shift up by the base volume */
1347 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1348
1349 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1350 pa_log_error("Writing HW volume failed");
1351 else {
1352 pa_cvolume tmp_vol;
1353 pa_bool_t accurate_enough;
1354
1355 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1356 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1357
1358 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1359 accurate_enough =
1360 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1361 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1362
1363 if (!accurate_enough) {
1364 union {
1365 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1366 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1367 } vol;
1368
1369 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1370 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1371 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1372 pa_log_debug(" in dB: %s (request) != %s",
1373 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1374 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1375 }
1376 }
1377 }
1378
1379 static void sink_get_mute_cb(pa_sink *s) {
1380 struct userdata *u = s->userdata;
1381 pa_bool_t b;
1382
1383 pa_assert(u);
1384 pa_assert(u->mixer_path);
1385 pa_assert(u->mixer_handle);
1386
1387 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1388 return;
1389
1390 s->muted = b;
1391 }
1392
1393 static void sink_set_mute_cb(pa_sink *s) {
1394 struct userdata *u = s->userdata;
1395
1396 pa_assert(u);
1397 pa_assert(u->mixer_path);
1398 pa_assert(u->mixer_handle);
1399
1400 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1401 }
1402
1403 static void mixer_volume_init(struct userdata *u) {
1404 pa_assert(u);
1405
1406 if (!u->mixer_path->has_volume) {
1407 pa_sink_set_write_volume_callback(u->sink, NULL);
1408 pa_sink_set_get_volume_callback(u->sink, NULL);
1409 pa_sink_set_set_volume_callback(u->sink, NULL);
1410
1411 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1412 } else {
1413 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1414 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1415
1416 if (u->mixer_path->has_dB && u->deferred_volume) {
1417 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1418 pa_log_info("Successfully enabled deferred volume.");
1419 } else
1420 pa_sink_set_write_volume_callback(u->sink, NULL);
1421
1422 if (u->mixer_path->has_dB) {
1423 pa_sink_enable_decibel_volume(u->sink, TRUE);
1424 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1425
1426 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1427 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1428
1429 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1430 } else {
1431 pa_sink_enable_decibel_volume(u->sink, FALSE);
1432 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1433
1434 u->sink->base_volume = PA_VOLUME_NORM;
1435 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1436 }
1437
1438 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1439 }
1440
1441 if (!u->mixer_path->has_mute) {
1442 pa_sink_set_get_mute_callback(u->sink, NULL);
1443 pa_sink_set_set_mute_callback(u->sink, NULL);
1444 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1445 } else {
1446 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1447 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1448 pa_log_info("Using hardware mute control.");
1449 }
1450 }
1451
1452 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1453 struct userdata *u = s->userdata;
1454
1455 pa_assert(u);
1456 pa_assert(p);
1457 pa_assert(u->ucm_context);
1458
1459 return pa_alsa_ucm_set_port(u->ucm_context, p, TRUE);
1460 }
1461
1462 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1463 struct userdata *u = s->userdata;
1464 pa_alsa_port_data *data;
1465
1466 pa_assert(u);
1467 pa_assert(p);
1468 pa_assert(u->mixer_handle);
1469
1470 data = PA_DEVICE_PORT_DATA(p);
1471
1472 pa_assert_se(u->mixer_path = data->path);
1473 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1474
1475 mixer_volume_init(u);
1476
1477 if (s->set_mute)
1478 s->set_mute(s);
1479 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1480 if (s->write_volume)
1481 s->write_volume(s);
1482 } else {
1483 if (s->set_volume)
1484 s->set_volume(s);
1485 }
1486
1487 return 0;
1488 }
1489
1490 static void sink_update_requested_latency_cb(pa_sink *s) {
1491 struct userdata *u = s->userdata;
1492 size_t before;
1493 pa_assert(u);
1494 pa_assert(u->use_tsched); /* only when timer scheduling is used
1495 * we can dynamically adjust the
1496 * latency */
1497
1498 if (!u->pcm_handle)
1499 return;
1500
1501 before = u->hwbuf_unused;
1502 update_sw_params(u);
1503
1504 /* Let's check whether we now use only a smaller part of the
1505 buffer then before. If so, we need to make sure that subsequent
1506 rewinds are relative to the new maximum fill level and not to the
1507 current fill level. Thus, let's do a full rewind once, to clear
1508 things up. */
1509
1510 if (u->hwbuf_unused > before) {
1511 pa_log_debug("Requesting rewind due to latency change.");
1512 pa_sink_request_rewind(s, (size_t) -1);
1513 }
1514 }
1515
1516 static pa_idxset* sink_get_formats(pa_sink *s) {
1517 struct userdata *u = s->userdata;
1518 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1519 pa_format_info *f;
1520 uint32_t idx;
1521
1522 pa_assert(u);
1523
1524 PA_IDXSET_FOREACH(f, u->formats, idx) {
1525 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1526 }
1527
1528 return ret;
1529 }
1530
1531 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1532 struct userdata *u = s->userdata;
1533 pa_format_info *f, *g;
1534 uint32_t idx, n;
1535
1536 pa_assert(u);
1537
1538 /* FIXME: also validate sample rates against what the device supports */
1539 PA_IDXSET_FOREACH(f, formats, idx) {
1540 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1541 /* EAC3 cannot be sent over over S/PDIF */
1542 return FALSE;
1543 }
1544
1545 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1546 u->formats = pa_idxset_new(NULL, NULL);
1547
1548 /* Note: the logic below won't apply if we're using software encoding.
1549 * This is fine for now since we don't support that via the passthrough
1550 * framework, but this must be changed if we do. */
1551
1552 /* Count how many sample rates we support */
1553 for (idx = 0, n = 0; u->rates[idx]; idx++)
1554 n++;
1555
1556 /* First insert non-PCM formats since we prefer those. */
1557 PA_IDXSET_FOREACH(f, formats, idx) {
1558 if (!pa_format_info_is_pcm(f)) {
1559 g = pa_format_info_copy(f);
1560 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1561 pa_idxset_put(u->formats, g, NULL);
1562 }
1563 }
1564
1565 /* Now add any PCM formats */
1566 PA_IDXSET_FOREACH(f, formats, idx) {
1567 if (pa_format_info_is_pcm(f)) {
1568 /* We don't set rates here since we'll just tack on a resampler for
1569 * unsupported rates */
1570 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1571 }
1572 }
1573
1574 return TRUE;
1575 }
1576
1577 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
1578 {
1579 struct userdata *u = s->userdata;
1580 int i;
1581 pa_bool_t supported = FALSE;
1582
1583 pa_assert(u);
1584
1585 for (i = 0; u->rates[i]; i++) {
1586 if (u->rates[i] == rate) {
1587 supported = TRUE;
1588 break;
1589 }
1590 }
1591
1592 if (!supported) {
1593 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1594 return FALSE;
1595 }
1596
1597 if (!PA_SINK_IS_OPENED(s->state)) {
1598 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1599 u->sink->sample_spec.rate = rate;
1600 return TRUE;
1601 }
1602
1603 return FALSE;
1604 }
1605
1606 static int process_rewind(struct userdata *u) {
1607 snd_pcm_sframes_t unused;
1608 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1609 pa_assert(u);
1610
1611 if (!PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1612 pa_sink_process_rewind(u->sink, 0);
1613 return 0;
1614 }
1615
1616 /* Figure out how much we shall rewind and reset the counter */
1617 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1618
1619 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1620
1621 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1622 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1623 return -1;
1624 }
1625
1626 unused_nbytes = (size_t) unused * u->frame_size;
1627
1628 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1629 unused_nbytes += u->rewind_safeguard;
1630
1631 if (u->hwbuf_size > unused_nbytes)
1632 limit_nbytes = u->hwbuf_size - unused_nbytes;
1633 else
1634 limit_nbytes = 0;
1635
1636 if (rewind_nbytes > limit_nbytes)
1637 rewind_nbytes = limit_nbytes;
1638
1639 if (rewind_nbytes > 0) {
1640 snd_pcm_sframes_t in_frames, out_frames;
1641
1642 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1643
1644 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1645 pa_log_debug("before: %lu", (unsigned long) in_frames);
1646 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1647 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1648 if (try_recover(u, "process_rewind", out_frames) < 0)
1649 return -1;
1650 out_frames = 0;
1651 }
1652
1653 pa_log_debug("after: %lu", (unsigned long) out_frames);
1654
1655 rewind_nbytes = (size_t) out_frames * u->frame_size;
1656
1657 if (rewind_nbytes <= 0)
1658 pa_log_info("Tried rewind, but was apparently not possible.");
1659 else {
1660 u->write_count -= rewind_nbytes;
1661 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1662 pa_sink_process_rewind(u->sink, rewind_nbytes);
1663
1664 u->after_rewind = TRUE;
1665 return 0;
1666 }
1667 } else
1668 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1669
1670 pa_sink_process_rewind(u->sink, 0);
1671 return 0;
1672 }
1673
1674 static void thread_func(void *userdata) {
1675 struct userdata *u = userdata;
1676 unsigned short revents = 0;
1677
1678 pa_assert(u);
1679
1680 pa_log_debug("Thread starting up");
1681
1682 if (u->core->realtime_scheduling)
1683 pa_make_realtime(u->core->realtime_priority);
1684
1685 pa_thread_mq_install(&u->thread_mq);
1686
1687 for (;;) {
1688 int ret;
1689 pa_usec_t rtpoll_sleep = 0, real_sleep;
1690
1691 #ifdef DEBUG_TIMING
1692 pa_log_debug("Loop");
1693 #endif
1694
1695 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
1696 if (process_rewind(u) < 0)
1697 goto fail;
1698 }
1699
1700 /* Render some data and write it to the dsp */
1701 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1702 int work_done;
1703 pa_usec_t sleep_usec = 0;
1704 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1705
1706 if (u->use_mmap)
1707 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1708 else
1709 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1710
1711 if (work_done < 0)
1712 goto fail;
1713
1714 /* pa_log_debug("work_done = %i", work_done); */
1715
1716 if (work_done) {
1717
1718 if (u->first) {
1719 pa_log_info("Starting playback.");
1720 snd_pcm_start(u->pcm_handle);
1721
1722 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1723
1724 u->first = FALSE;
1725 }
1726
1727 update_smoother(u);
1728 }
1729
1730 if (u->use_tsched) {
1731 pa_usec_t cusec;
1732
1733 if (u->since_start <= u->hwbuf_size) {
1734
1735 /* USB devices on ALSA seem to hit a buffer
1736 * underrun during the first iterations much
1737 * quicker then we calculate here, probably due to
1738 * the transport latency. To accommodate for that
1739 * we artificially decrease the sleep time until
1740 * we have filled the buffer at least once
1741 * completely.*/
1742
1743 if (pa_log_ratelimit(PA_LOG_DEBUG))
1744 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1745 sleep_usec /= 2;
1746 }
1747
1748 /* OK, the playback buffer is now full, let's
1749 * calculate when to wake up next */
1750 #ifdef DEBUG_TIMING
1751 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1752 #endif
1753
1754 /* Convert from the sound card time domain to the
1755 * system time domain */
1756 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1757
1758 #ifdef DEBUG_TIMING
1759 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1760 #endif
1761
1762 /* We don't trust the conversion, so we wake up whatever comes first */
1763 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1764 }
1765
1766 u->after_rewind = FALSE;
1767
1768 }
1769
1770 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1771 pa_usec_t volume_sleep;
1772 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1773 if (volume_sleep > 0) {
1774 if (rtpoll_sleep > 0)
1775 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1776 else
1777 rtpoll_sleep = volume_sleep;
1778 }
1779 }
1780
1781 if (rtpoll_sleep > 0) {
1782 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1783 real_sleep = pa_rtclock_now();
1784 }
1785 else
1786 pa_rtpoll_set_timer_disabled(u->rtpoll);
1787
1788 /* Hmm, nothing to do. Let's sleep */
1789 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1790 goto fail;
1791
1792 if (rtpoll_sleep > 0) {
1793 real_sleep = pa_rtclock_now() - real_sleep;
1794 #ifdef DEBUG_TIMING
1795 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1796 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1797 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1798 #endif
1799 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark)
1800 pa_log_info("Scheduling delay of %0.2fms, you might want to investigate this to improve latency...",
1801 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC);
1802 }
1803
1804 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1805 pa_sink_volume_change_apply(u->sink, NULL);
1806
1807 if (ret == 0)
1808 goto finish;
1809
1810 /* Tell ALSA about this and process its response */
1811 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1812 struct pollfd *pollfd;
1813 int err;
1814 unsigned n;
1815
1816 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1817
1818 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1819 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1820 goto fail;
1821 }
1822
1823 if (revents & ~POLLOUT) {
1824 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1825 goto fail;
1826
1827 u->first = TRUE;
1828 u->since_start = 0;
1829 revents = 0;
1830 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1831 pa_log_debug("Wakeup from ALSA!");
1832
1833 } else
1834 revents = 0;
1835 }
1836
1837 fail:
1838 /* If this was no regular exit from the loop we have to continue
1839 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1840 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1841 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1842
1843 finish:
1844 pa_log_debug("Thread shutting down");
1845 }
1846
1847 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1848 const char *n;
1849 char *t;
1850
1851 pa_assert(data);
1852 pa_assert(ma);
1853 pa_assert(device_name);
1854
1855 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1856 pa_sink_new_data_set_name(data, n);
1857 data->namereg_fail = TRUE;
1858 return;
1859 }
1860
1861 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1862 data->namereg_fail = TRUE;
1863 else {
1864 n = device_id ? device_id : device_name;
1865 data->namereg_fail = FALSE;
1866 }
1867
1868 if (mapping)
1869 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1870 else
1871 t = pa_sprintf_malloc("alsa_output.%s", n);
1872
1873 pa_sink_new_data_set_name(data, t);
1874 pa_xfree(t);
1875 }
1876
1877 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1878 snd_hctl_t *hctl;
1879
1880 if (!mapping && !element)
1881 return;
1882
1883 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1884 pa_log_info("Failed to find a working mixer device.");
1885 return;
1886 }
1887
1888 if (element) {
1889
1890 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1891 goto fail;
1892
1893 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1894 goto fail;
1895
1896 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1897 pa_alsa_path_dump(u->mixer_path);
1898 } else if (!(u->mixer_path_set = mapping->output_path_set))
1899 goto fail;
1900
1901 return;
1902
1903 fail:
1904
1905 if (u->mixer_path) {
1906 pa_alsa_path_free(u->mixer_path);
1907 u->mixer_path = NULL;
1908 }
1909
1910 if (u->mixer_handle) {
1911 snd_mixer_close(u->mixer_handle);
1912 u->mixer_handle = NULL;
1913 }
1914 }
1915
1916 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1917 pa_bool_t need_mixer_callback = FALSE;
1918
1919 pa_assert(u);
1920
1921 if (!u->mixer_handle)
1922 return 0;
1923
1924 if (u->sink->active_port) {
1925 pa_alsa_port_data *data;
1926
1927 /* We have a list of supported paths, so let's activate the
1928 * one that has been chosen as active */
1929
1930 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1931 u->mixer_path = data->path;
1932
1933 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
1934
1935 } else {
1936
1937 if (!u->mixer_path && u->mixer_path_set)
1938 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1939
1940 if (u->mixer_path) {
1941 /* Hmm, we have only a single path, then let's activate it */
1942
1943 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
1944
1945 } else
1946 return 0;
1947 }
1948
1949 mixer_volume_init(u);
1950
1951 /* Will we need to register callbacks? */
1952 if (u->mixer_path_set && u->mixer_path_set->paths) {
1953 pa_alsa_path *p;
1954 void *state;
1955
1956 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1957 if (p->has_volume || p->has_mute)
1958 need_mixer_callback = TRUE;
1959 }
1960 }
1961 else if (u->mixer_path)
1962 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1963
1964 if (need_mixer_callback) {
1965 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1966 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1967 u->mixer_pd = pa_alsa_mixer_pdata_new();
1968 mixer_callback = io_mixer_callback;
1969
1970 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1971 pa_log("Failed to initialize file descriptor monitoring");
1972 return -1;
1973 }
1974 } else {
1975 u->mixer_fdl = pa_alsa_fdlist_new();
1976 mixer_callback = ctl_mixer_callback;
1977
1978 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1979 pa_log("Failed to initialize file descriptor monitoring");
1980 return -1;
1981 }
1982 }
1983
1984 if (u->mixer_path_set)
1985 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1986 else
1987 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1988 }
1989
1990 return 0;
1991 }
1992
1993 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1994
1995 struct userdata *u = NULL;
1996 const char *dev_id = NULL, *key, *mod_name;
1997 pa_sample_spec ss;
1998 uint32_t alternate_sample_rate;
1999 pa_channel_map map;
2000 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
2001 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2002 size_t frame_size;
2003 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE, fixed_latency_range = FALSE;
2004 pa_sink_new_data data;
2005 pa_alsa_profile_set *profile_set = NULL;
2006 void *state = NULL;
2007
2008 pa_assert(m);
2009 pa_assert(ma);
2010
2011 ss = m->core->default_sample_spec;
2012 map = m->core->default_channel_map;
2013 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2014 pa_log("Failed to parse sample specification and channel map");
2015 goto fail;
2016 }
2017
2018 alternate_sample_rate = m->core->alternate_sample_rate;
2019 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2020 pa_log("Failed to parse alternate sample rate");
2021 goto fail;
2022 }
2023
2024 frame_size = pa_frame_size(&ss);
2025
2026 nfrags = m->core->default_n_fragments;
2027 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2028 if (frag_size <= 0)
2029 frag_size = (uint32_t) frame_size;
2030 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2031 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2032
2033 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2034 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2035 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2036 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2037 pa_log("Failed to parse buffer metrics");
2038 goto fail;
2039 }
2040
2041 buffer_size = nfrags * frag_size;
2042
2043 period_frames = frag_size/frame_size;
2044 buffer_frames = buffer_size/frame_size;
2045 tsched_frames = tsched_size/frame_size;
2046
2047 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2048 pa_log("Failed to parse mmap argument.");
2049 goto fail;
2050 }
2051
2052 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2053 pa_log("Failed to parse tsched argument.");
2054 goto fail;
2055 }
2056
2057 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2058 pa_log("Failed to parse ignore_dB argument.");
2059 goto fail;
2060 }
2061
2062 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2063 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2064 pa_log("Failed to parse rewind_safeguard argument");
2065 goto fail;
2066 }
2067
2068 deferred_volume = m->core->deferred_volume;
2069 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2070 pa_log("Failed to parse deferred_volume argument.");
2071 goto fail;
2072 }
2073
2074 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2075 pa_log("Failed to parse fixed_latency_range argument.");
2076 goto fail;
2077 }
2078
2079 use_tsched = pa_alsa_may_tsched(use_tsched);
2080
2081 u = pa_xnew0(struct userdata, 1);
2082 u->core = m->core;
2083 u->module = m;
2084 u->use_mmap = use_mmap;
2085 u->use_tsched = use_tsched;
2086 u->deferred_volume = deferred_volume;
2087 u->fixed_latency_range = fixed_latency_range;
2088 u->first = TRUE;
2089 u->rewind_safeguard = rewind_safeguard;
2090 u->rtpoll = pa_rtpoll_new();
2091 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2092
2093 u->smoother = pa_smoother_new(
2094 SMOOTHER_ADJUST_USEC,
2095 SMOOTHER_WINDOW_USEC,
2096 TRUE,
2097 TRUE,
2098 5,
2099 pa_rtclock_now(),
2100 TRUE);
2101 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2102
2103 /* use ucm */
2104 if (mapping && mapping->ucm_context.ucm)
2105 u->ucm_context = &mapping->ucm_context;
2106
2107 dev_id = pa_modargs_get_value(
2108 ma, "device_id",
2109 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2110
2111 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2112
2113 if (reserve_init(u, dev_id) < 0)
2114 goto fail;
2115
2116 if (reserve_monitor_init(u, dev_id) < 0)
2117 goto fail;
2118
2119 b = use_mmap;
2120 d = use_tsched;
2121
2122 if (mapping) {
2123
2124 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2125 pa_log("device_id= not set");
2126 goto fail;
2127 }
2128
2129 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2130 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2131 pa_log("Failed to enable ucm modifier %s", mod_name);
2132 else
2133 pa_log_debug("Enabled ucm modifier %s", mod_name);
2134 }
2135
2136 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2137 dev_id,
2138 &u->device_name,
2139 &ss, &map,
2140 SND_PCM_STREAM_PLAYBACK,
2141 &period_frames, &buffer_frames, tsched_frames,
2142 &b, &d, mapping)))
2143 goto fail;
2144
2145 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2146
2147 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2148 goto fail;
2149
2150 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2151 dev_id,
2152 &u->device_name,
2153 &ss, &map,
2154 SND_PCM_STREAM_PLAYBACK,
2155 &period_frames, &buffer_frames, tsched_frames,
2156 &b, &d, profile_set, &mapping)))
2157 goto fail;
2158
2159 } else {
2160
2161 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2162 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2163 &u->device_name,
2164 &ss, &map,
2165 SND_PCM_STREAM_PLAYBACK,
2166 &period_frames, &buffer_frames, tsched_frames,
2167 &b, &d, FALSE)))
2168 goto fail;
2169 }
2170
2171 pa_assert(u->device_name);
2172 pa_log_info("Successfully opened device %s.", u->device_name);
2173
2174 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2175 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2176 goto fail;
2177 }
2178
2179 if (mapping)
2180 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2181
2182 if (use_mmap && !b) {
2183 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2184 u->use_mmap = use_mmap = FALSE;
2185 }
2186
2187 if (use_tsched && (!b || !d)) {
2188 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2189 u->use_tsched = use_tsched = FALSE;
2190 }
2191
2192 if (u->use_mmap)
2193 pa_log_info("Successfully enabled mmap() mode.");
2194
2195 if (u->use_tsched) {
2196 pa_log_info("Successfully enabled timer-based scheduling mode.");
2197
2198 if (u->fixed_latency_range)
2199 pa_log_info("Disabling latency range changes on underrun");
2200 }
2201
2202 if (is_iec958(u) || is_hdmi(u))
2203 set_formats = TRUE;
2204
2205 u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
2206 if (!u->rates) {
2207 pa_log_error("Failed to find any supported sample rates.");
2208 goto fail;
2209 }
2210
2211 /* ALSA might tweak the sample spec, so recalculate the frame size */
2212 frame_size = pa_frame_size(&ss);
2213
2214 if (!u->ucm_context)
2215 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2216
2217 pa_sink_new_data_init(&data);
2218 data.driver = driver;
2219 data.module = m;
2220 data.card = card;
2221 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2222
2223 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2224 * variable instead of using &data.namereg_fail directly, because
2225 * data.namereg_fail is a bitfield and taking the address of a bitfield
2226 * variable is impossible. */
2227 namereg_fail = data.namereg_fail;
2228 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2229 pa_log("Failed to parse namereg_fail argument.");
2230 pa_sink_new_data_done(&data);
2231 goto fail;
2232 }
2233 data.namereg_fail = namereg_fail;
2234
2235 pa_sink_new_data_set_sample_spec(&data, &ss);
2236 pa_sink_new_data_set_channel_map(&data, &map);
2237 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2238
2239 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2240 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2241 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2242 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2243 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2244
2245 if (mapping) {
2246 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2247 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2248
2249 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2250 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2251 }
2252
2253 pa_alsa_init_description(data.proplist);
2254
2255 if (u->control_device)
2256 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2257
2258 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2259 pa_log("Invalid properties");
2260 pa_sink_new_data_done(&data);
2261 goto fail;
2262 }
2263
2264 if (u->ucm_context)
2265 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, TRUE, card);
2266 else if (u->mixer_path_set)
2267 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2268
2269 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2270 (set_formats ? PA_SINK_SET_FORMATS : 0));
2271 pa_sink_new_data_done(&data);
2272
2273 if (!u->sink) {
2274 pa_log("Failed to create sink object");
2275 goto fail;
2276 }
2277
2278 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2279 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2280 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2281 goto fail;
2282 }
2283
2284 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2285 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2286 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2287 goto fail;
2288 }
2289
2290 u->sink->parent.process_msg = sink_process_msg;
2291 if (u->use_tsched)
2292 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2293 u->sink->set_state = sink_set_state_cb;
2294 if (u->ucm_context)
2295 u->sink->set_port = sink_set_port_ucm_cb;
2296 else
2297 u->sink->set_port = sink_set_port_cb;
2298 if (u->sink->alternate_sample_rate)
2299 u->sink->update_rate = sink_update_rate_cb;
2300 u->sink->userdata = u;
2301
2302 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2303 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2304
2305 u->frame_size = frame_size;
2306 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2307 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2308 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2309
2310 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2311 (double) u->hwbuf_size / (double) u->fragment_size,
2312 (long unsigned) u->fragment_size,
2313 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2314 (long unsigned) u->hwbuf_size,
2315 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2316
2317 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2318 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2319 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2320 else {
2321 pa_log_info("Disabling rewind for device %s", u->device_name);
2322 pa_sink_set_max_rewind(u->sink, 0);
2323 }
2324
2325 if (u->use_tsched) {
2326 u->tsched_watermark_ref = tsched_watermark;
2327 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2328 } else
2329 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2330
2331 reserve_update(u);
2332
2333 if (update_sw_params(u) < 0)
2334 goto fail;
2335
2336 if (u->ucm_context) {
2337 if (u->sink->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port, TRUE) < 0)
2338 goto fail;
2339 } else if (setup_mixer(u, ignore_dB) < 0)
2340 goto fail;
2341
2342 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2343
2344 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2345 pa_log("Failed to create thread.");
2346 goto fail;
2347 }
2348
2349 /* Get initial mixer settings */
2350 if (data.volume_is_set) {
2351 if (u->sink->set_volume)
2352 u->sink->set_volume(u->sink);
2353 } else {
2354 if (u->sink->get_volume)
2355 u->sink->get_volume(u->sink);
2356 }
2357
2358 if (data.muted_is_set) {
2359 if (u->sink->set_mute)
2360 u->sink->set_mute(u->sink);
2361 } else {
2362 if (u->sink->get_mute)
2363 u->sink->get_mute(u->sink);
2364 }
2365
2366 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2367 u->sink->write_volume(u->sink);
2368
2369 if (set_formats) {
2370 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2371 pa_format_info *format;
2372
2373 /* To start with, we only support PCM formats. Other formats may be added
2374 * with pa_sink_set_formats().*/
2375 format = pa_format_info_new();
2376 format->encoding = PA_ENCODING_PCM;
2377 u->formats = pa_idxset_new(NULL, NULL);
2378 pa_idxset_put(u->formats, format, NULL);
2379
2380 u->sink->get_formats = sink_get_formats;
2381 u->sink->set_formats = sink_set_formats;
2382 }
2383
2384 pa_sink_put(u->sink);
2385
2386 if (profile_set)
2387 pa_alsa_profile_set_free(profile_set);
2388
2389 return u->sink;
2390
2391 fail:
2392
2393 if (u)
2394 userdata_free(u);
2395
2396 if (profile_set)
2397 pa_alsa_profile_set_free(profile_set);
2398
2399 return NULL;
2400 }
2401
2402 static void userdata_free(struct userdata *u) {
2403 pa_assert(u);
2404
2405 if (u->sink)
2406 pa_sink_unlink(u->sink);
2407
2408 if (u->thread) {
2409 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2410 pa_thread_free(u->thread);
2411 }
2412
2413 pa_thread_mq_done(&u->thread_mq);
2414
2415 if (u->sink)
2416 pa_sink_unref(u->sink);
2417
2418 if (u->memchunk.memblock)
2419 pa_memblock_unref(u->memchunk.memblock);
2420
2421 if (u->mixer_pd)
2422 pa_alsa_mixer_pdata_free(u->mixer_pd);
2423
2424 if (u->alsa_rtpoll_item)
2425 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2426
2427 if (u->rtpoll)
2428 pa_rtpoll_free(u->rtpoll);
2429
2430 if (u->pcm_handle) {
2431 snd_pcm_drop(u->pcm_handle);
2432 snd_pcm_close(u->pcm_handle);
2433 }
2434
2435 if (u->mixer_fdl)
2436 pa_alsa_fdlist_free(u->mixer_fdl);
2437
2438 if (u->mixer_path && !u->mixer_path_set)
2439 pa_alsa_path_free(u->mixer_path);
2440
2441 if (u->mixer_handle)
2442 snd_mixer_close(u->mixer_handle);
2443
2444 if (u->smoother)
2445 pa_smoother_free(u->smoother);
2446
2447 if (u->formats)
2448 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2449
2450 if (u->rates)
2451 pa_xfree(u->rates);
2452
2453 reserve_done(u);
2454 monitor_done(u);
2455
2456 pa_xfree(u->device_name);
2457 pa_xfree(u->control_device);
2458 pa_xfree(u->paths_dir);
2459 pa_xfree(u);
2460 }
2461
2462 void pa_alsa_sink_free(pa_sink *s) {
2463 struct userdata *u;
2464
2465 pa_sink_assert_ref(s);
2466 pa_assert_se(u = s->userdata);
2467
2468 userdata_free(u);
2469 }