]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa-sink/source: Better thread names
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
34 #endif
35
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
41
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
57
58 #include <modules/reserve-wrap.h>
59
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
62
63 /* #define DEBUG_TIMING */
64
65 #define DEFAULT_DEVICE "default"
66
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
78
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
92
93 struct userdata {
94 pa_core *core;
95 pa_module *module;
96 pa_sink *sink;
97
98 pa_thread *thread;
99 pa_thread_mq thread_mq;
100 pa_rtpoll *rtpoll;
101
102 snd_pcm_t *pcm_handle;
103
104 char *paths_dir;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
110
111 pa_cvolume hardware_volume;
112
113 unsigned int *rates;
114
115 size_t
116 frame_size,
117 fragment_size,
118 hwbuf_size,
119 tsched_watermark,
120 tsched_watermark_ref,
121 hwbuf_unused,
122 min_sleep,
123 min_wakeup,
124 watermark_inc_step,
125 watermark_dec_step,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
128 rewind_safeguard;
129
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
132
133 pa_memchunk memchunk;
134
135 char *device_name; /* name of the PCM device */
136 char *control_device; /* name of the control device */
137
138 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
139
140 pa_bool_t first, after_rewind;
141
142 pa_rtpoll_item *alsa_rtpoll_item;
143
144 pa_smoother *smoother;
145 uint64_t write_count;
146 uint64_t since_start;
147 pa_usec_t smoother_interval;
148 pa_usec_t last_smoother_update;
149
150 pa_idxset *formats;
151
152 pa_reserve_wrapper *reserve;
153 pa_hook_slot *reserve_slot;
154 pa_reserve_monitor_wrapper *monitor;
155 pa_hook_slot *monitor_slot;
156
157 /* ucm context */
158 pa_alsa_ucm_mapping_context *ucm_context;
159 };
160
161 static void userdata_free(struct userdata *u);
162
163 /* FIXME: Is there a better way to do this than device names? */
164 static pa_bool_t is_iec958(struct userdata *u) {
165 return (strncmp("iec958", u->device_name, 6) == 0);
166 }
167
168 static pa_bool_t is_hdmi(struct userdata *u) {
169 return (strncmp("hdmi", u->device_name, 4) == 0);
170 }
171
172 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
173 pa_assert(r);
174 pa_assert(u);
175
176 pa_log_debug("Suspending sink %s, because another application requested us to release the device.", u->sink->name);
177
178 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
179 return PA_HOOK_CANCEL;
180
181 return PA_HOOK_OK;
182 }
183
184 static void reserve_done(struct userdata *u) {
185 pa_assert(u);
186
187 if (u->reserve_slot) {
188 pa_hook_slot_free(u->reserve_slot);
189 u->reserve_slot = NULL;
190 }
191
192 if (u->reserve) {
193 pa_reserve_wrapper_unref(u->reserve);
194 u->reserve = NULL;
195 }
196 }
197
198 static void reserve_update(struct userdata *u) {
199 const char *description;
200 pa_assert(u);
201
202 if (!u->sink || !u->reserve)
203 return;
204
205 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
206 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
207 }
208
209 static int reserve_init(struct userdata *u, const char *dname) {
210 char *rname;
211
212 pa_assert(u);
213 pa_assert(dname);
214
215 if (u->reserve)
216 return 0;
217
218 if (pa_in_system_mode())
219 return 0;
220
221 if (!(rname = pa_alsa_get_reserve_name(dname)))
222 return 0;
223
224 /* We are resuming, try to lock the device */
225 u->reserve = pa_reserve_wrapper_get(u->core, rname);
226 pa_xfree(rname);
227
228 if (!(u->reserve))
229 return -1;
230
231 reserve_update(u);
232
233 pa_assert(!u->reserve_slot);
234 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
235
236 return 0;
237 }
238
239 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
240 pa_assert(w);
241 pa_assert(u);
242
243 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
244 pa_log_debug("Suspending sink %s, because another application is blocking the access to the device.", u->sink->name);
245 pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION);
246 } else {
247 pa_log_debug("Resuming sink %s, because other applications aren't blocking access to the device any more.", u->sink->name);
248 pa_sink_suspend(u->sink, false, PA_SUSPEND_APPLICATION);
249 }
250
251 return PA_HOOK_OK;
252 }
253
254 static void monitor_done(struct userdata *u) {
255 pa_assert(u);
256
257 if (u->monitor_slot) {
258 pa_hook_slot_free(u->monitor_slot);
259 u->monitor_slot = NULL;
260 }
261
262 if (u->monitor) {
263 pa_reserve_monitor_wrapper_unref(u->monitor);
264 u->monitor = NULL;
265 }
266 }
267
268 static int reserve_monitor_init(struct userdata *u, const char *dname) {
269 char *rname;
270
271 pa_assert(u);
272 pa_assert(dname);
273
274 if (pa_in_system_mode())
275 return 0;
276
277 if (!(rname = pa_alsa_get_reserve_name(dname)))
278 return 0;
279
280 /* We are resuming, try to lock the device */
281 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
282 pa_xfree(rname);
283
284 if (!(u->monitor))
285 return -1;
286
287 pa_assert(!u->monitor_slot);
288 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
289
290 return 0;
291 }
292
293 static void fix_min_sleep_wakeup(struct userdata *u) {
294 size_t max_use, max_use_2;
295
296 pa_assert(u);
297 pa_assert(u->use_tsched);
298
299 max_use = u->hwbuf_size - u->hwbuf_unused;
300 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
301
302 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
303 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
304
305 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
306 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
307 }
308
309 static void fix_tsched_watermark(struct userdata *u) {
310 size_t max_use;
311 pa_assert(u);
312 pa_assert(u->use_tsched);
313
314 max_use = u->hwbuf_size - u->hwbuf_unused;
315
316 if (u->tsched_watermark > max_use - u->min_sleep)
317 u->tsched_watermark = max_use - u->min_sleep;
318
319 if (u->tsched_watermark < u->min_wakeup)
320 u->tsched_watermark = u->min_wakeup;
321 }
322
323 static void increase_watermark(struct userdata *u) {
324 size_t old_watermark;
325 pa_usec_t old_min_latency, new_min_latency;
326
327 pa_assert(u);
328 pa_assert(u->use_tsched);
329
330 /* First, just try to increase the watermark */
331 old_watermark = u->tsched_watermark;
332 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
333 fix_tsched_watermark(u);
334
335 if (old_watermark != u->tsched_watermark) {
336 pa_log_info("Increasing wakeup watermark to %0.2f ms",
337 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
338 return;
339 }
340
341 /* Hmm, we cannot increase the watermark any further, hence let's
342 raise the latency, unless doing so was disabled in
343 configuration */
344 if (u->fixed_latency_range)
345 return;
346
347 old_min_latency = u->sink->thread_info.min_latency;
348 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
349 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
350
351 if (old_min_latency != new_min_latency) {
352 pa_log_info("Increasing minimal latency to %0.2f ms",
353 (double) new_min_latency / PA_USEC_PER_MSEC);
354
355 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
356 }
357
358 /* When we reach this we're officialy fucked! */
359 }
360
361 static void decrease_watermark(struct userdata *u) {
362 size_t old_watermark;
363 pa_usec_t now;
364
365 pa_assert(u);
366 pa_assert(u->use_tsched);
367
368 now = pa_rtclock_now();
369
370 if (u->watermark_dec_not_before <= 0)
371 goto restart;
372
373 if (u->watermark_dec_not_before > now)
374 return;
375
376 old_watermark = u->tsched_watermark;
377
378 if (u->tsched_watermark < u->watermark_dec_step)
379 u->tsched_watermark = u->tsched_watermark / 2;
380 else
381 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
382
383 fix_tsched_watermark(u);
384
385 if (old_watermark != u->tsched_watermark)
386 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
387 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
388
389 /* We don't change the latency range*/
390
391 restart:
392 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
393 }
394
395 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
396 pa_usec_t usec, wm;
397
398 pa_assert(sleep_usec);
399 pa_assert(process_usec);
400
401 pa_assert(u);
402 pa_assert(u->use_tsched);
403
404 usec = pa_sink_get_requested_latency_within_thread(u->sink);
405
406 if (usec == (pa_usec_t) -1)
407 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
408
409 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
410
411 if (wm > usec)
412 wm = usec/2;
413
414 *sleep_usec = usec - wm;
415 *process_usec = wm;
416
417 #ifdef DEBUG_TIMING
418 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
419 (unsigned long) (usec / PA_USEC_PER_MSEC),
420 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
421 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
422 #endif
423 }
424
425 static int try_recover(struct userdata *u, const char *call, int err) {
426 pa_assert(u);
427 pa_assert(call);
428 pa_assert(err < 0);
429
430 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
431
432 pa_assert(err != -EAGAIN);
433
434 if (err == -EPIPE)
435 pa_log_debug("%s: Buffer underrun!", call);
436
437 if (err == -ESTRPIPE)
438 pa_log_debug("%s: System suspended!", call);
439
440 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
441 pa_log("%s: %s", call, pa_alsa_strerror(err));
442 return -1;
443 }
444
445 u->first = TRUE;
446 u->since_start = 0;
447 return 0;
448 }
449
450 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
451 size_t left_to_play;
452 pa_bool_t underrun = FALSE;
453
454 /* We use <= instead of < for this check here because an underrun
455 * only happens after the last sample was processed, not already when
456 * it is removed from the buffer. This is particularly important
457 * when block transfer is used. */
458
459 if (n_bytes <= u->hwbuf_size)
460 left_to_play = u->hwbuf_size - n_bytes;
461 else {
462
463 /* We got a dropout. What a mess! */
464 left_to_play = 0;
465 underrun = TRUE;
466
467 #if 0
468 PA_DEBUG_TRAP;
469 #endif
470
471 if (!u->first && !u->after_rewind)
472 if (pa_log_ratelimit(PA_LOG_INFO))
473 pa_log_info("Underrun!");
474 }
475
476 #ifdef DEBUG_TIMING
477 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
478 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
479 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
480 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
481 #endif
482
483 if (u->use_tsched) {
484 pa_bool_t reset_not_before = TRUE;
485
486 if (!u->first && !u->after_rewind) {
487 if (underrun || left_to_play < u->watermark_inc_threshold)
488 increase_watermark(u);
489 else if (left_to_play > u->watermark_dec_threshold) {
490 reset_not_before = FALSE;
491
492 /* We decrease the watermark only if have actually
493 * been woken up by a timeout. If something else woke
494 * us up it's too easy to fulfill the deadlines... */
495
496 if (on_timeout)
497 decrease_watermark(u);
498 }
499 }
500
501 if (reset_not_before)
502 u->watermark_dec_not_before = 0;
503 }
504
505 return left_to_play;
506 }
507
508 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
509 pa_bool_t work_done = FALSE;
510 pa_usec_t max_sleep_usec = 0, process_usec = 0;
511 size_t left_to_play;
512 unsigned j = 0;
513
514 pa_assert(u);
515 pa_sink_assert_ref(u->sink);
516
517 if (u->use_tsched)
518 hw_sleep_time(u, &max_sleep_usec, &process_usec);
519
520 for (;;) {
521 snd_pcm_sframes_t n;
522 size_t n_bytes;
523 int r;
524 pa_bool_t after_avail = TRUE;
525
526 /* First we determine how many samples are missing to fill the
527 * buffer up to 100% */
528
529 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
530
531 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
532 continue;
533
534 return r;
535 }
536
537 n_bytes = (size_t) n * u->frame_size;
538
539 #ifdef DEBUG_TIMING
540 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
541 #endif
542
543 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
544 on_timeout = FALSE;
545
546 if (u->use_tsched)
547
548 /* We won't fill up the playback buffer before at least
549 * half the sleep time is over because otherwise we might
550 * ask for more data from the clients then they expect. We
551 * need to guarantee that clients only have to keep around
552 * a single hw buffer length. */
553
554 if (!polled &&
555 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
556 #ifdef DEBUG_TIMING
557 pa_log_debug("Not filling up, because too early.");
558 #endif
559 break;
560 }
561
562 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
563
564 if (polled)
565 PA_ONCE_BEGIN {
566 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
567 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
568 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
569 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
570 pa_strnull(dn));
571 pa_xfree(dn);
572 } PA_ONCE_END;
573
574 #ifdef DEBUG_TIMING
575 pa_log_debug("Not filling up, because not necessary.");
576 #endif
577 break;
578 }
579
580
581 if (++j > 10) {
582 #ifdef DEBUG_TIMING
583 pa_log_debug("Not filling up, because already too many iterations.");
584 #endif
585
586 break;
587 }
588
589 n_bytes -= u->hwbuf_unused;
590 polled = FALSE;
591
592 #ifdef DEBUG_TIMING
593 pa_log_debug("Filling up");
594 #endif
595
596 for (;;) {
597 pa_memchunk chunk;
598 void *p;
599 int err;
600 const snd_pcm_channel_area_t *areas;
601 snd_pcm_uframes_t offset, frames;
602 snd_pcm_sframes_t sframes;
603
604 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
605 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
606
607 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
608
609 if (!after_avail && err == -EAGAIN)
610 break;
611
612 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
613 continue;
614
615 return r;
616 }
617
618 /* Make sure that if these memblocks need to be copied they will fit into one slot */
619 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
620 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
621
622 if (!after_avail && frames == 0)
623 break;
624
625 pa_assert(frames > 0);
626 after_avail = FALSE;
627
628 /* Check these are multiples of 8 bit */
629 pa_assert((areas[0].first & 7) == 0);
630 pa_assert((areas[0].step & 7)== 0);
631
632 /* We assume a single interleaved memory buffer */
633 pa_assert((areas[0].first >> 3) == 0);
634 pa_assert((areas[0].step >> 3) == u->frame_size);
635
636 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
637
638 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
639 chunk.length = pa_memblock_get_length(chunk.memblock);
640 chunk.index = 0;
641
642 pa_sink_render_into_full(u->sink, &chunk);
643 pa_memblock_unref_fixed(chunk.memblock);
644
645 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
646
647 if (!after_avail && (int) sframes == -EAGAIN)
648 break;
649
650 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
651 continue;
652
653 return r;
654 }
655
656 work_done = TRUE;
657
658 u->write_count += frames * u->frame_size;
659 u->since_start += frames * u->frame_size;
660
661 #ifdef DEBUG_TIMING
662 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
663 #endif
664
665 if ((size_t) frames * u->frame_size >= n_bytes)
666 break;
667
668 n_bytes -= (size_t) frames * u->frame_size;
669 }
670 }
671
672 if (u->use_tsched) {
673 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
674 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
675
676 if (*sleep_usec > process_usec)
677 *sleep_usec -= process_usec;
678 else
679 *sleep_usec = 0;
680 } else
681 *sleep_usec = 0;
682
683 return work_done ? 1 : 0;
684 }
685
686 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
687 pa_bool_t work_done = FALSE;
688 pa_usec_t max_sleep_usec = 0, process_usec = 0;
689 size_t left_to_play;
690 unsigned j = 0;
691
692 pa_assert(u);
693 pa_sink_assert_ref(u->sink);
694
695 if (u->use_tsched)
696 hw_sleep_time(u, &max_sleep_usec, &process_usec);
697
698 for (;;) {
699 snd_pcm_sframes_t n;
700 size_t n_bytes;
701 int r;
702 pa_bool_t after_avail = TRUE;
703
704 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
705
706 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
707 continue;
708
709 return r;
710 }
711
712 n_bytes = (size_t) n * u->frame_size;
713 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
714 on_timeout = FALSE;
715
716 if (u->use_tsched)
717
718 /* We won't fill up the playback buffer before at least
719 * half the sleep time is over because otherwise we might
720 * ask for more data from the clients then they expect. We
721 * need to guarantee that clients only have to keep around
722 * a single hw buffer length. */
723
724 if (!polled &&
725 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
726 break;
727
728 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
729
730 if (polled)
731 PA_ONCE_BEGIN {
732 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
733 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
734 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
735 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
736 pa_strnull(dn));
737 pa_xfree(dn);
738 } PA_ONCE_END;
739
740 break;
741 }
742
743 if (++j > 10) {
744 #ifdef DEBUG_TIMING
745 pa_log_debug("Not filling up, because already too many iterations.");
746 #endif
747
748 break;
749 }
750
751 n_bytes -= u->hwbuf_unused;
752 polled = FALSE;
753
754 for (;;) {
755 snd_pcm_sframes_t frames;
756 void *p;
757
758 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
759
760 if (u->memchunk.length <= 0)
761 pa_sink_render(u->sink, n_bytes, &u->memchunk);
762
763 pa_assert(u->memchunk.length > 0);
764
765 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
766
767 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
768 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
769
770 p = pa_memblock_acquire(u->memchunk.memblock);
771 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
772 pa_memblock_release(u->memchunk.memblock);
773
774 if (PA_UNLIKELY(frames < 0)) {
775
776 if (!after_avail && (int) frames == -EAGAIN)
777 break;
778
779 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
780 continue;
781
782 return r;
783 }
784
785 if (!after_avail && frames == 0)
786 break;
787
788 pa_assert(frames > 0);
789 after_avail = FALSE;
790
791 u->memchunk.index += (size_t) frames * u->frame_size;
792 u->memchunk.length -= (size_t) frames * u->frame_size;
793
794 if (u->memchunk.length <= 0) {
795 pa_memblock_unref(u->memchunk.memblock);
796 pa_memchunk_reset(&u->memchunk);
797 }
798
799 work_done = TRUE;
800
801 u->write_count += frames * u->frame_size;
802 u->since_start += frames * u->frame_size;
803
804 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
805
806 if ((size_t) frames * u->frame_size >= n_bytes)
807 break;
808
809 n_bytes -= (size_t) frames * u->frame_size;
810 }
811 }
812
813 if (u->use_tsched) {
814 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
815 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
816
817 if (*sleep_usec > process_usec)
818 *sleep_usec -= process_usec;
819 else
820 *sleep_usec = 0;
821 } else
822 *sleep_usec = 0;
823
824 return work_done ? 1 : 0;
825 }
826
827 static void update_smoother(struct userdata *u) {
828 snd_pcm_sframes_t delay = 0;
829 int64_t position;
830 int err;
831 pa_usec_t now1 = 0, now2;
832 snd_pcm_status_t *status;
833 snd_htimestamp_t htstamp = { 0, 0 };
834
835 snd_pcm_status_alloca(&status);
836
837 pa_assert(u);
838 pa_assert(u->pcm_handle);
839
840 /* Let's update the time smoother */
841
842 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
843 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
844 return;
845 }
846
847 snd_pcm_status_get_htstamp(status, &htstamp);
848 now1 = pa_timespec_load(&htstamp);
849
850 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
851 if (now1 <= 0)
852 now1 = pa_rtclock_now();
853
854 /* check if the time since the last update is bigger than the interval */
855 if (u->last_smoother_update > 0)
856 if (u->last_smoother_update + u->smoother_interval > now1)
857 return;
858
859 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
860
861 if (PA_UNLIKELY(position < 0))
862 position = 0;
863
864 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
865
866 pa_smoother_put(u->smoother, now1, now2);
867
868 u->last_smoother_update = now1;
869 /* exponentially increase the update interval up to the MAX limit */
870 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
871 }
872
873 static pa_usec_t sink_get_latency(struct userdata *u) {
874 pa_usec_t r;
875 int64_t delay;
876 pa_usec_t now1, now2;
877
878 pa_assert(u);
879
880 now1 = pa_rtclock_now();
881 now2 = pa_smoother_get(u->smoother, now1);
882
883 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
884
885 r = delay >= 0 ? (pa_usec_t) delay : 0;
886
887 if (u->memchunk.memblock)
888 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
889
890 return r;
891 }
892
893 static int build_pollfd(struct userdata *u) {
894 pa_assert(u);
895 pa_assert(u->pcm_handle);
896
897 if (u->alsa_rtpoll_item)
898 pa_rtpoll_item_free(u->alsa_rtpoll_item);
899
900 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
901 return -1;
902
903 return 0;
904 }
905
906 /* Called from IO context */
907 static int suspend(struct userdata *u) {
908 pa_assert(u);
909 pa_assert(u->pcm_handle);
910
911 pa_smoother_pause(u->smoother, pa_rtclock_now());
912
913 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
914 * take awfully long with our long buffer sizes today. */
915 snd_pcm_close(u->pcm_handle);
916 u->pcm_handle = NULL;
917
918 if (u->alsa_rtpoll_item) {
919 pa_rtpoll_item_free(u->alsa_rtpoll_item);
920 u->alsa_rtpoll_item = NULL;
921 }
922
923 /* We reset max_rewind/max_request here to make sure that while we
924 * are suspended the old max_request/max_rewind values set before
925 * the suspend can influence the per-stream buffer of newly
926 * created streams, without their requirements having any
927 * influence on them. */
928 pa_sink_set_max_rewind_within_thread(u->sink, 0);
929 pa_sink_set_max_request_within_thread(u->sink, 0);
930
931 pa_log_info("Device suspended...");
932
933 return 0;
934 }
935
936 /* Called from IO context */
937 static int update_sw_params(struct userdata *u) {
938 snd_pcm_uframes_t avail_min;
939 int err;
940
941 pa_assert(u);
942
943 /* Use the full buffer if no one asked us for anything specific */
944 u->hwbuf_unused = 0;
945
946 if (u->use_tsched) {
947 pa_usec_t latency;
948
949 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
950 size_t b;
951
952 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
953
954 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
955
956 /* We need at least one sample in our buffer */
957
958 if (PA_UNLIKELY(b < u->frame_size))
959 b = u->frame_size;
960
961 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
962 }
963
964 fix_min_sleep_wakeup(u);
965 fix_tsched_watermark(u);
966 }
967
968 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
969
970 /* We need at last one frame in the used part of the buffer */
971 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
972
973 if (u->use_tsched) {
974 pa_usec_t sleep_usec, process_usec;
975
976 hw_sleep_time(u, &sleep_usec, &process_usec);
977 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
978 }
979
980 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
981
982 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
983 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
984 return err;
985 }
986
987 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
988 if (pa_alsa_pcm_is_hw(u->pcm_handle))
989 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
990 else {
991 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
992 pa_sink_set_max_rewind_within_thread(u->sink, 0);
993 }
994
995 return 0;
996 }
997
998 /* Called from IO Context on unsuspend or from main thread when creating sink */
999 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
1000 pa_bool_t in_thread)
1001 {
1002 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
1003 &u->sink->sample_spec);
1004
1005 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1006 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1007
1008 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1009 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1010
1011 fix_min_sleep_wakeup(u);
1012 fix_tsched_watermark(u);
1013
1014 if (in_thread)
1015 pa_sink_set_latency_range_within_thread(u->sink,
1016 u->min_latency_ref,
1017 pa_bytes_to_usec(u->hwbuf_size, ss));
1018 else {
1019 pa_sink_set_latency_range(u->sink,
1020 0,
1021 pa_bytes_to_usec(u->hwbuf_size, ss));
1022
1023 /* work-around assert in pa_sink_set_latency_within_thead,
1024 keep track of min_latency and reuse it when
1025 this routine is called from IO context */
1026 u->min_latency_ref = u->sink->thread_info.min_latency;
1027 }
1028
1029 pa_log_info("Time scheduling watermark is %0.2fms",
1030 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1031 }
1032
1033 /* Called from IO context */
1034 static int unsuspend(struct userdata *u) {
1035 pa_sample_spec ss;
1036 int err;
1037 pa_bool_t b, d;
1038 snd_pcm_uframes_t period_size, buffer_size;
1039 char *device_name = NULL;
1040
1041 pa_assert(u);
1042 pa_assert(!u->pcm_handle);
1043
1044 pa_log_info("Trying resume...");
1045
1046 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1047 /* Need to open device in NONAUDIO mode */
1048 int len = strlen(u->device_name) + 8;
1049
1050 device_name = pa_xmalloc(len);
1051 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1052 }
1053
1054 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1055 SND_PCM_NONBLOCK|
1056 SND_PCM_NO_AUTO_RESAMPLE|
1057 SND_PCM_NO_AUTO_CHANNELS|
1058 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1059 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1060 goto fail;
1061 }
1062
1063 ss = u->sink->sample_spec;
1064 period_size = u->fragment_size / u->frame_size;
1065 buffer_size = u->hwbuf_size / u->frame_size;
1066 b = u->use_mmap;
1067 d = u->use_tsched;
1068
1069 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1070 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1071 goto fail;
1072 }
1073
1074 if (b != u->use_mmap || d != u->use_tsched) {
1075 pa_log_warn("Resume failed, couldn't get original access mode.");
1076 goto fail;
1077 }
1078
1079 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1080 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1081 goto fail;
1082 }
1083
1084 if (period_size*u->frame_size != u->fragment_size ||
1085 buffer_size*u->frame_size != u->hwbuf_size) {
1086 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1087 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1088 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1089 goto fail;
1090 }
1091
1092 if (update_sw_params(u) < 0)
1093 goto fail;
1094
1095 if (build_pollfd(u) < 0)
1096 goto fail;
1097
1098 u->write_count = 0;
1099 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1100 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1101 u->last_smoother_update = 0;
1102
1103 u->first = TRUE;
1104 u->since_start = 0;
1105
1106 /* reset the watermark to the value defined when sink was created */
1107 if (u->use_tsched)
1108 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1109
1110 pa_log_info("Resumed successfully...");
1111
1112 pa_xfree(device_name);
1113 return 0;
1114
1115 fail:
1116 if (u->pcm_handle) {
1117 snd_pcm_close(u->pcm_handle);
1118 u->pcm_handle = NULL;
1119 }
1120
1121 pa_xfree(device_name);
1122
1123 return -PA_ERR_IO;
1124 }
1125
1126 /* Called from IO context */
1127 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1128 struct userdata *u = PA_SINK(o)->userdata;
1129
1130 switch (code) {
1131
1132 case PA_SINK_MESSAGE_GET_LATENCY: {
1133 pa_usec_t r = 0;
1134
1135 if (u->pcm_handle)
1136 r = sink_get_latency(u);
1137
1138 *((pa_usec_t*) data) = r;
1139
1140 return 0;
1141 }
1142
1143 case PA_SINK_MESSAGE_SET_STATE:
1144
1145 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1146
1147 case PA_SINK_SUSPENDED: {
1148 int r;
1149
1150 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1151
1152 if ((r = suspend(u)) < 0)
1153 return r;
1154
1155 break;
1156 }
1157
1158 case PA_SINK_IDLE:
1159 case PA_SINK_RUNNING: {
1160 int r;
1161
1162 if (u->sink->thread_info.state == PA_SINK_INIT) {
1163 if (build_pollfd(u) < 0)
1164 return -PA_ERR_IO;
1165 }
1166
1167 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1168 if ((r = unsuspend(u)) < 0)
1169 return r;
1170 }
1171
1172 break;
1173 }
1174
1175 case PA_SINK_UNLINKED:
1176 case PA_SINK_INIT:
1177 case PA_SINK_INVALID_STATE:
1178 ;
1179 }
1180
1181 break;
1182 }
1183
1184 return pa_sink_process_msg(o, code, data, offset, chunk);
1185 }
1186
1187 /* Called from main context */
1188 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1189 pa_sink_state_t old_state;
1190 struct userdata *u;
1191
1192 pa_sink_assert_ref(s);
1193 pa_assert_se(u = s->userdata);
1194
1195 old_state = pa_sink_get_state(u->sink);
1196
1197 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1198 reserve_done(u);
1199 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1200 if (reserve_init(u, u->device_name) < 0)
1201 return -PA_ERR_BUSY;
1202
1203 return 0;
1204 }
1205
1206 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1207 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1208
1209 pa_assert(u);
1210 pa_assert(u->mixer_handle);
1211
1212 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1213 return 0;
1214
1215 if (!PA_SINK_IS_LINKED(u->sink->state))
1216 return 0;
1217
1218 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1219 pa_sink_set_mixer_dirty(u->sink, TRUE);
1220 return 0;
1221 }
1222
1223 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1224 pa_sink_get_volume(u->sink, TRUE);
1225 pa_sink_get_mute(u->sink, TRUE);
1226 }
1227
1228 return 0;
1229 }
1230
1231 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1232 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1233
1234 pa_assert(u);
1235 pa_assert(u->mixer_handle);
1236
1237 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1238 return 0;
1239
1240 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1241 pa_sink_set_mixer_dirty(u->sink, TRUE);
1242 return 0;
1243 }
1244
1245 if (mask & SND_CTL_EVENT_MASK_VALUE)
1246 pa_sink_update_volume_and_mute(u->sink);
1247
1248 return 0;
1249 }
1250
1251 static void sink_get_volume_cb(pa_sink *s) {
1252 struct userdata *u = s->userdata;
1253 pa_cvolume r;
1254 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1255
1256 pa_assert(u);
1257 pa_assert(u->mixer_path);
1258 pa_assert(u->mixer_handle);
1259
1260 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1261 return;
1262
1263 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1264 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1265
1266 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1267
1268 if (u->mixer_path->has_dB) {
1269 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1270
1271 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1272 }
1273
1274 if (pa_cvolume_equal(&u->hardware_volume, &r))
1275 return;
1276
1277 s->real_volume = u->hardware_volume = r;
1278
1279 /* Hmm, so the hardware volume changed, let's reset our software volume */
1280 if (u->mixer_path->has_dB)
1281 pa_sink_set_soft_volume(s, NULL);
1282 }
1283
1284 static void sink_set_volume_cb(pa_sink *s) {
1285 struct userdata *u = s->userdata;
1286 pa_cvolume r;
1287 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1288 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1289
1290 pa_assert(u);
1291 pa_assert(u->mixer_path);
1292 pa_assert(u->mixer_handle);
1293
1294 /* Shift up by the base volume */
1295 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1296
1297 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1298 return;
1299
1300 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1301 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1302
1303 u->hardware_volume = r;
1304
1305 if (u->mixer_path->has_dB) {
1306 pa_cvolume new_soft_volume;
1307 pa_bool_t accurate_enough;
1308 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1309
1310 /* Match exactly what the user requested by software */
1311 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1312
1313 /* If the adjustment to do in software is only minimal we
1314 * can skip it. That saves us CPU at the expense of a bit of
1315 * accuracy */
1316 accurate_enough =
1317 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1318 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1319
1320 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1321 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1322 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1323 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1324 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1325 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1326 pa_yes_no(accurate_enough));
1327 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1328
1329 if (!accurate_enough)
1330 s->soft_volume = new_soft_volume;
1331
1332 } else {
1333 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1334
1335 /* We can't match exactly what the user requested, hence let's
1336 * at least tell the user about it */
1337
1338 s->real_volume = r;
1339 }
1340 }
1341
1342 static void sink_write_volume_cb(pa_sink *s) {
1343 struct userdata *u = s->userdata;
1344 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1345
1346 pa_assert(u);
1347 pa_assert(u->mixer_path);
1348 pa_assert(u->mixer_handle);
1349 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1350
1351 /* Shift up by the base volume */
1352 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1353
1354 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1355 pa_log_error("Writing HW volume failed");
1356 else {
1357 pa_cvolume tmp_vol;
1358 pa_bool_t accurate_enough;
1359
1360 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1361 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1362
1363 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1364 accurate_enough =
1365 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1366 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1367
1368 if (!accurate_enough) {
1369 union {
1370 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1371 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1372 } vol;
1373
1374 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1375 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1376 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1377 pa_log_debug(" in dB: %s (request) != %s",
1378 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1379 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1380 }
1381 }
1382 }
1383
1384 static void sink_get_mute_cb(pa_sink *s) {
1385 struct userdata *u = s->userdata;
1386 pa_bool_t b;
1387
1388 pa_assert(u);
1389 pa_assert(u->mixer_path);
1390 pa_assert(u->mixer_handle);
1391
1392 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1393 return;
1394
1395 s->muted = b;
1396 }
1397
1398 static void sink_set_mute_cb(pa_sink *s) {
1399 struct userdata *u = s->userdata;
1400
1401 pa_assert(u);
1402 pa_assert(u->mixer_path);
1403 pa_assert(u->mixer_handle);
1404
1405 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1406 }
1407
1408 static void mixer_volume_init(struct userdata *u) {
1409 pa_assert(u);
1410
1411 if (!u->mixer_path->has_volume) {
1412 pa_sink_set_write_volume_callback(u->sink, NULL);
1413 pa_sink_set_get_volume_callback(u->sink, NULL);
1414 pa_sink_set_set_volume_callback(u->sink, NULL);
1415
1416 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1417 } else {
1418 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1419 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1420
1421 if (u->mixer_path->has_dB && u->deferred_volume) {
1422 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1423 pa_log_info("Successfully enabled deferred volume.");
1424 } else
1425 pa_sink_set_write_volume_callback(u->sink, NULL);
1426
1427 if (u->mixer_path->has_dB) {
1428 pa_sink_enable_decibel_volume(u->sink, TRUE);
1429 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1430
1431 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1432 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1433
1434 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1435 } else {
1436 pa_sink_enable_decibel_volume(u->sink, FALSE);
1437 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1438
1439 u->sink->base_volume = PA_VOLUME_NORM;
1440 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1441 }
1442
1443 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1444 }
1445
1446 if (!u->mixer_path->has_mute) {
1447 pa_sink_set_get_mute_callback(u->sink, NULL);
1448 pa_sink_set_set_mute_callback(u->sink, NULL);
1449 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1450 } else {
1451 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1452 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1453 pa_log_info("Using hardware mute control.");
1454 }
1455 }
1456
1457 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1458 struct userdata *u = s->userdata;
1459
1460 pa_assert(u);
1461 pa_assert(p);
1462 pa_assert(u->ucm_context);
1463
1464 return pa_alsa_ucm_set_port(u->ucm_context, p, TRUE);
1465 }
1466
1467 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1468 struct userdata *u = s->userdata;
1469 pa_alsa_port_data *data;
1470
1471 pa_assert(u);
1472 pa_assert(p);
1473 pa_assert(u->mixer_handle);
1474
1475 data = PA_DEVICE_PORT_DATA(p);
1476
1477 pa_assert_se(u->mixer_path = data->path);
1478 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1479
1480 mixer_volume_init(u);
1481
1482 if (s->set_mute)
1483 s->set_mute(s);
1484 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1485 if (s->write_volume)
1486 s->write_volume(s);
1487 } else {
1488 if (s->set_volume)
1489 s->set_volume(s);
1490 }
1491
1492 return 0;
1493 }
1494
1495 static void sink_update_requested_latency_cb(pa_sink *s) {
1496 struct userdata *u = s->userdata;
1497 size_t before;
1498 pa_assert(u);
1499 pa_assert(u->use_tsched); /* only when timer scheduling is used
1500 * we can dynamically adjust the
1501 * latency */
1502
1503 if (!u->pcm_handle)
1504 return;
1505
1506 before = u->hwbuf_unused;
1507 update_sw_params(u);
1508
1509 /* Let's check whether we now use only a smaller part of the
1510 buffer then before. If so, we need to make sure that subsequent
1511 rewinds are relative to the new maximum fill level and not to the
1512 current fill level. Thus, let's do a full rewind once, to clear
1513 things up. */
1514
1515 if (u->hwbuf_unused > before) {
1516 pa_log_debug("Requesting rewind due to latency change.");
1517 pa_sink_request_rewind(s, (size_t) -1);
1518 }
1519 }
1520
1521 static pa_idxset* sink_get_formats(pa_sink *s) {
1522 struct userdata *u = s->userdata;
1523 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1524 pa_format_info *f;
1525 uint32_t idx;
1526
1527 pa_assert(u);
1528
1529 PA_IDXSET_FOREACH(f, u->formats, idx) {
1530 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1531 }
1532
1533 return ret;
1534 }
1535
1536 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1537 struct userdata *u = s->userdata;
1538 pa_format_info *f, *g;
1539 uint32_t idx, n;
1540
1541 pa_assert(u);
1542
1543 /* FIXME: also validate sample rates against what the device supports */
1544 PA_IDXSET_FOREACH(f, formats, idx) {
1545 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1546 /* EAC3 cannot be sent over over S/PDIF */
1547 return FALSE;
1548 }
1549
1550 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1551 u->formats = pa_idxset_new(NULL, NULL);
1552
1553 /* Note: the logic below won't apply if we're using software encoding.
1554 * This is fine for now since we don't support that via the passthrough
1555 * framework, but this must be changed if we do. */
1556
1557 /* Count how many sample rates we support */
1558 for (idx = 0, n = 0; u->rates[idx]; idx++)
1559 n++;
1560
1561 /* First insert non-PCM formats since we prefer those. */
1562 PA_IDXSET_FOREACH(f, formats, idx) {
1563 if (!pa_format_info_is_pcm(f)) {
1564 g = pa_format_info_copy(f);
1565 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1566 pa_idxset_put(u->formats, g, NULL);
1567 }
1568 }
1569
1570 /* Now add any PCM formats */
1571 PA_IDXSET_FOREACH(f, formats, idx) {
1572 if (pa_format_info_is_pcm(f)) {
1573 /* We don't set rates here since we'll just tack on a resampler for
1574 * unsupported rates */
1575 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1576 }
1577 }
1578
1579 return TRUE;
1580 }
1581
1582 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
1583 {
1584 struct userdata *u = s->userdata;
1585 int i;
1586 pa_bool_t supported = FALSE;
1587
1588 pa_assert(u);
1589
1590 for (i = 0; u->rates[i]; i++) {
1591 if (u->rates[i] == rate) {
1592 supported = TRUE;
1593 break;
1594 }
1595 }
1596
1597 if (!supported) {
1598 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1599 return FALSE;
1600 }
1601
1602 if (!PA_SINK_IS_OPENED(s->state)) {
1603 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1604 u->sink->sample_spec.rate = rate;
1605 return TRUE;
1606 }
1607
1608 return FALSE;
1609 }
1610
1611 static int process_rewind(struct userdata *u) {
1612 snd_pcm_sframes_t unused;
1613 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1614 pa_assert(u);
1615
1616 if (!PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1617 pa_sink_process_rewind(u->sink, 0);
1618 return 0;
1619 }
1620
1621 /* Figure out how much we shall rewind and reset the counter */
1622 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1623
1624 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1625
1626 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1627 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1628 return -1;
1629 }
1630
1631 unused_nbytes = (size_t) unused * u->frame_size;
1632
1633 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1634 unused_nbytes += u->rewind_safeguard;
1635
1636 if (u->hwbuf_size > unused_nbytes)
1637 limit_nbytes = u->hwbuf_size - unused_nbytes;
1638 else
1639 limit_nbytes = 0;
1640
1641 if (rewind_nbytes > limit_nbytes)
1642 rewind_nbytes = limit_nbytes;
1643
1644 if (rewind_nbytes > 0) {
1645 snd_pcm_sframes_t in_frames, out_frames;
1646
1647 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1648
1649 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1650 pa_log_debug("before: %lu", (unsigned long) in_frames);
1651 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1652 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1653 if (try_recover(u, "process_rewind", out_frames) < 0)
1654 return -1;
1655 out_frames = 0;
1656 }
1657
1658 pa_log_debug("after: %lu", (unsigned long) out_frames);
1659
1660 rewind_nbytes = (size_t) out_frames * u->frame_size;
1661
1662 if (rewind_nbytes <= 0)
1663 pa_log_info("Tried rewind, but was apparently not possible.");
1664 else {
1665 u->write_count -= rewind_nbytes;
1666 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1667 pa_sink_process_rewind(u->sink, rewind_nbytes);
1668
1669 u->after_rewind = TRUE;
1670 return 0;
1671 }
1672 } else
1673 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1674
1675 pa_sink_process_rewind(u->sink, 0);
1676 return 0;
1677 }
1678
1679 static void thread_func(void *userdata) {
1680 struct userdata *u = userdata;
1681 unsigned short revents = 0;
1682
1683 pa_assert(u);
1684
1685 pa_log_debug("Thread starting up");
1686
1687 if (u->core->realtime_scheduling)
1688 pa_make_realtime(u->core->realtime_priority);
1689
1690 pa_thread_mq_install(&u->thread_mq);
1691
1692 for (;;) {
1693 int ret;
1694 pa_usec_t rtpoll_sleep = 0, real_sleep;
1695
1696 #ifdef DEBUG_TIMING
1697 pa_log_debug("Loop");
1698 #endif
1699
1700 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
1701 if (process_rewind(u) < 0)
1702 goto fail;
1703 }
1704
1705 /* Render some data and write it to the dsp */
1706 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1707 int work_done;
1708 pa_usec_t sleep_usec = 0;
1709 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1710
1711 if (u->use_mmap)
1712 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1713 else
1714 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1715
1716 if (work_done < 0)
1717 goto fail;
1718
1719 /* pa_log_debug("work_done = %i", work_done); */
1720
1721 if (work_done) {
1722
1723 if (u->first) {
1724 pa_log_info("Starting playback.");
1725 snd_pcm_start(u->pcm_handle);
1726
1727 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1728
1729 u->first = FALSE;
1730 }
1731
1732 update_smoother(u);
1733 }
1734
1735 if (u->use_tsched) {
1736 pa_usec_t cusec;
1737
1738 if (u->since_start <= u->hwbuf_size) {
1739
1740 /* USB devices on ALSA seem to hit a buffer
1741 * underrun during the first iterations much
1742 * quicker then we calculate here, probably due to
1743 * the transport latency. To accommodate for that
1744 * we artificially decrease the sleep time until
1745 * we have filled the buffer at least once
1746 * completely.*/
1747
1748 if (pa_log_ratelimit(PA_LOG_DEBUG))
1749 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1750 sleep_usec /= 2;
1751 }
1752
1753 /* OK, the playback buffer is now full, let's
1754 * calculate when to wake up next */
1755 #ifdef DEBUG_TIMING
1756 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1757 #endif
1758
1759 /* Convert from the sound card time domain to the
1760 * system time domain */
1761 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1762
1763 #ifdef DEBUG_TIMING
1764 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1765 #endif
1766
1767 /* We don't trust the conversion, so we wake up whatever comes first */
1768 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1769 }
1770
1771 u->after_rewind = FALSE;
1772
1773 }
1774
1775 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1776 pa_usec_t volume_sleep;
1777 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1778 if (volume_sleep > 0) {
1779 if (rtpoll_sleep > 0)
1780 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1781 else
1782 rtpoll_sleep = volume_sleep;
1783 }
1784 }
1785
1786 if (rtpoll_sleep > 0) {
1787 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1788 real_sleep = pa_rtclock_now();
1789 }
1790 else
1791 pa_rtpoll_set_timer_disabled(u->rtpoll);
1792
1793 /* Hmm, nothing to do. Let's sleep */
1794 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1795 goto fail;
1796
1797 if (rtpoll_sleep > 0) {
1798 real_sleep = pa_rtclock_now() - real_sleep;
1799 #ifdef DEBUG_TIMING
1800 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1801 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1802 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1803 #endif
1804 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark)
1805 pa_log_info("Scheduling delay of %0.2fms, you might want to investigate this to improve latency...",
1806 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC);
1807 }
1808
1809 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1810 pa_sink_volume_change_apply(u->sink, NULL);
1811
1812 if (ret == 0)
1813 goto finish;
1814
1815 /* Tell ALSA about this and process its response */
1816 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1817 struct pollfd *pollfd;
1818 int err;
1819 unsigned n;
1820
1821 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1822
1823 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1824 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1825 goto fail;
1826 }
1827
1828 if (revents & ~POLLOUT) {
1829 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1830 goto fail;
1831
1832 u->first = TRUE;
1833 u->since_start = 0;
1834 revents = 0;
1835 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1836 pa_log_debug("Wakeup from ALSA!");
1837
1838 } else
1839 revents = 0;
1840 }
1841
1842 fail:
1843 /* If this was no regular exit from the loop we have to continue
1844 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1845 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1846 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1847
1848 finish:
1849 pa_log_debug("Thread shutting down");
1850 }
1851
1852 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1853 const char *n;
1854 char *t;
1855
1856 pa_assert(data);
1857 pa_assert(ma);
1858 pa_assert(device_name);
1859
1860 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1861 pa_sink_new_data_set_name(data, n);
1862 data->namereg_fail = TRUE;
1863 return;
1864 }
1865
1866 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1867 data->namereg_fail = TRUE;
1868 else {
1869 n = device_id ? device_id : device_name;
1870 data->namereg_fail = FALSE;
1871 }
1872
1873 if (mapping)
1874 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1875 else
1876 t = pa_sprintf_malloc("alsa_output.%s", n);
1877
1878 pa_sink_new_data_set_name(data, t);
1879 pa_xfree(t);
1880 }
1881
1882 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1883 snd_hctl_t *hctl;
1884
1885 if (!mapping && !element)
1886 return;
1887
1888 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1889 pa_log_info("Failed to find a working mixer device.");
1890 return;
1891 }
1892
1893 if (element) {
1894
1895 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1896 goto fail;
1897
1898 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1899 goto fail;
1900
1901 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1902 pa_alsa_path_dump(u->mixer_path);
1903 } else if (!(u->mixer_path_set = mapping->output_path_set))
1904 goto fail;
1905
1906 return;
1907
1908 fail:
1909
1910 if (u->mixer_path) {
1911 pa_alsa_path_free(u->mixer_path);
1912 u->mixer_path = NULL;
1913 }
1914
1915 if (u->mixer_handle) {
1916 snd_mixer_close(u->mixer_handle);
1917 u->mixer_handle = NULL;
1918 }
1919 }
1920
1921 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1922 pa_bool_t need_mixer_callback = FALSE;
1923
1924 pa_assert(u);
1925
1926 if (!u->mixer_handle)
1927 return 0;
1928
1929 if (u->sink->active_port) {
1930 pa_alsa_port_data *data;
1931
1932 /* We have a list of supported paths, so let's activate the
1933 * one that has been chosen as active */
1934
1935 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1936 u->mixer_path = data->path;
1937
1938 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
1939
1940 } else {
1941
1942 if (!u->mixer_path && u->mixer_path_set)
1943 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1944
1945 if (u->mixer_path) {
1946 /* Hmm, we have only a single path, then let's activate it */
1947
1948 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
1949
1950 } else
1951 return 0;
1952 }
1953
1954 mixer_volume_init(u);
1955
1956 /* Will we need to register callbacks? */
1957 if (u->mixer_path_set && u->mixer_path_set->paths) {
1958 pa_alsa_path *p;
1959 void *state;
1960
1961 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1962 if (p->has_volume || p->has_mute)
1963 need_mixer_callback = TRUE;
1964 }
1965 }
1966 else if (u->mixer_path)
1967 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1968
1969 if (need_mixer_callback) {
1970 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1971 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1972 u->mixer_pd = pa_alsa_mixer_pdata_new();
1973 mixer_callback = io_mixer_callback;
1974
1975 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1976 pa_log("Failed to initialize file descriptor monitoring");
1977 return -1;
1978 }
1979 } else {
1980 u->mixer_fdl = pa_alsa_fdlist_new();
1981 mixer_callback = ctl_mixer_callback;
1982
1983 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1984 pa_log("Failed to initialize file descriptor monitoring");
1985 return -1;
1986 }
1987 }
1988
1989 if (u->mixer_path_set)
1990 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1991 else
1992 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1993 }
1994
1995 return 0;
1996 }
1997
1998 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1999
2000 struct userdata *u = NULL;
2001 const char *dev_id = NULL, *key, *mod_name;
2002 pa_sample_spec ss;
2003 char *thread_name = NULL;
2004 uint32_t alternate_sample_rate;
2005 pa_channel_map map;
2006 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
2007 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2008 size_t frame_size;
2009 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE, fixed_latency_range = FALSE;
2010 pa_sink_new_data data;
2011 pa_alsa_profile_set *profile_set = NULL;
2012 void *state = NULL;
2013
2014 pa_assert(m);
2015 pa_assert(ma);
2016
2017 ss = m->core->default_sample_spec;
2018 map = m->core->default_channel_map;
2019 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2020 pa_log("Failed to parse sample specification and channel map");
2021 goto fail;
2022 }
2023
2024 alternate_sample_rate = m->core->alternate_sample_rate;
2025 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2026 pa_log("Failed to parse alternate sample rate");
2027 goto fail;
2028 }
2029
2030 frame_size = pa_frame_size(&ss);
2031
2032 nfrags = m->core->default_n_fragments;
2033 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2034 if (frag_size <= 0)
2035 frag_size = (uint32_t) frame_size;
2036 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2037 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2038
2039 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2040 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2041 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2042 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2043 pa_log("Failed to parse buffer metrics");
2044 goto fail;
2045 }
2046
2047 buffer_size = nfrags * frag_size;
2048
2049 period_frames = frag_size/frame_size;
2050 buffer_frames = buffer_size/frame_size;
2051 tsched_frames = tsched_size/frame_size;
2052
2053 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2054 pa_log("Failed to parse mmap argument.");
2055 goto fail;
2056 }
2057
2058 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2059 pa_log("Failed to parse tsched argument.");
2060 goto fail;
2061 }
2062
2063 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2064 pa_log("Failed to parse ignore_dB argument.");
2065 goto fail;
2066 }
2067
2068 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2069 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2070 pa_log("Failed to parse rewind_safeguard argument");
2071 goto fail;
2072 }
2073
2074 deferred_volume = m->core->deferred_volume;
2075 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2076 pa_log("Failed to parse deferred_volume argument.");
2077 goto fail;
2078 }
2079
2080 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2081 pa_log("Failed to parse fixed_latency_range argument.");
2082 goto fail;
2083 }
2084
2085 use_tsched = pa_alsa_may_tsched(use_tsched);
2086
2087 u = pa_xnew0(struct userdata, 1);
2088 u->core = m->core;
2089 u->module = m;
2090 u->use_mmap = use_mmap;
2091 u->use_tsched = use_tsched;
2092 u->deferred_volume = deferred_volume;
2093 u->fixed_latency_range = fixed_latency_range;
2094 u->first = TRUE;
2095 u->rewind_safeguard = rewind_safeguard;
2096 u->rtpoll = pa_rtpoll_new();
2097 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2098
2099 u->smoother = pa_smoother_new(
2100 SMOOTHER_ADJUST_USEC,
2101 SMOOTHER_WINDOW_USEC,
2102 TRUE,
2103 TRUE,
2104 5,
2105 pa_rtclock_now(),
2106 TRUE);
2107 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2108
2109 /* use ucm */
2110 if (mapping && mapping->ucm_context.ucm)
2111 u->ucm_context = &mapping->ucm_context;
2112
2113 dev_id = pa_modargs_get_value(
2114 ma, "device_id",
2115 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2116
2117 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2118
2119 if (reserve_init(u, dev_id) < 0)
2120 goto fail;
2121
2122 if (reserve_monitor_init(u, dev_id) < 0)
2123 goto fail;
2124
2125 b = use_mmap;
2126 d = use_tsched;
2127
2128 if (mapping) {
2129
2130 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2131 pa_log("device_id= not set");
2132 goto fail;
2133 }
2134
2135 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2136 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2137 pa_log("Failed to enable ucm modifier %s", mod_name);
2138 else
2139 pa_log_debug("Enabled ucm modifier %s", mod_name);
2140 }
2141
2142 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2143 dev_id,
2144 &u->device_name,
2145 &ss, &map,
2146 SND_PCM_STREAM_PLAYBACK,
2147 &period_frames, &buffer_frames, tsched_frames,
2148 &b, &d, mapping)))
2149 goto fail;
2150
2151 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2152
2153 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2154 goto fail;
2155
2156 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2157 dev_id,
2158 &u->device_name,
2159 &ss, &map,
2160 SND_PCM_STREAM_PLAYBACK,
2161 &period_frames, &buffer_frames, tsched_frames,
2162 &b, &d, profile_set, &mapping)))
2163 goto fail;
2164
2165 } else {
2166
2167 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2168 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2169 &u->device_name,
2170 &ss, &map,
2171 SND_PCM_STREAM_PLAYBACK,
2172 &period_frames, &buffer_frames, tsched_frames,
2173 &b, &d, FALSE)))
2174 goto fail;
2175 }
2176
2177 pa_assert(u->device_name);
2178 pa_log_info("Successfully opened device %s.", u->device_name);
2179
2180 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2181 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2182 goto fail;
2183 }
2184
2185 if (mapping)
2186 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2187
2188 if (use_mmap && !b) {
2189 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2190 u->use_mmap = use_mmap = FALSE;
2191 }
2192
2193 if (use_tsched && (!b || !d)) {
2194 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2195 u->use_tsched = use_tsched = FALSE;
2196 }
2197
2198 if (u->use_mmap)
2199 pa_log_info("Successfully enabled mmap() mode.");
2200
2201 if (u->use_tsched) {
2202 pa_log_info("Successfully enabled timer-based scheduling mode.");
2203
2204 if (u->fixed_latency_range)
2205 pa_log_info("Disabling latency range changes on underrun");
2206 }
2207
2208 if (is_iec958(u) || is_hdmi(u))
2209 set_formats = TRUE;
2210
2211 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
2212 if (!u->rates) {
2213 pa_log_error("Failed to find any supported sample rates.");
2214 goto fail;
2215 }
2216
2217 /* ALSA might tweak the sample spec, so recalculate the frame size */
2218 frame_size = pa_frame_size(&ss);
2219
2220 if (!u->ucm_context)
2221 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2222
2223 pa_sink_new_data_init(&data);
2224 data.driver = driver;
2225 data.module = m;
2226 data.card = card;
2227 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2228
2229 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2230 * variable instead of using &data.namereg_fail directly, because
2231 * data.namereg_fail is a bitfield and taking the address of a bitfield
2232 * variable is impossible. */
2233 namereg_fail = data.namereg_fail;
2234 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2235 pa_log("Failed to parse namereg_fail argument.");
2236 pa_sink_new_data_done(&data);
2237 goto fail;
2238 }
2239 data.namereg_fail = namereg_fail;
2240
2241 pa_sink_new_data_set_sample_spec(&data, &ss);
2242 pa_sink_new_data_set_channel_map(&data, &map);
2243 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2244
2245 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2246 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2247 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2248 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2249 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2250
2251 if (mapping) {
2252 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2253 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2254
2255 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2256 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2257 }
2258
2259 pa_alsa_init_description(data.proplist);
2260
2261 if (u->control_device)
2262 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2263
2264 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2265 pa_log("Invalid properties");
2266 pa_sink_new_data_done(&data);
2267 goto fail;
2268 }
2269
2270 if (u->ucm_context)
2271 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, TRUE, card);
2272 else if (u->mixer_path_set)
2273 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2274
2275 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2276 (set_formats ? PA_SINK_SET_FORMATS : 0));
2277 pa_sink_new_data_done(&data);
2278
2279 if (!u->sink) {
2280 pa_log("Failed to create sink object");
2281 goto fail;
2282 }
2283
2284 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2285 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2286 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2287 goto fail;
2288 }
2289
2290 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2291 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2292 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2293 goto fail;
2294 }
2295
2296 u->sink->parent.process_msg = sink_process_msg;
2297 if (u->use_tsched)
2298 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2299 u->sink->set_state = sink_set_state_cb;
2300 if (u->ucm_context)
2301 u->sink->set_port = sink_set_port_ucm_cb;
2302 else
2303 u->sink->set_port = sink_set_port_cb;
2304 if (u->sink->alternate_sample_rate)
2305 u->sink->update_rate = sink_update_rate_cb;
2306 u->sink->userdata = u;
2307
2308 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2309 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2310
2311 u->frame_size = frame_size;
2312 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2313 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2314 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2315
2316 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2317 (double) u->hwbuf_size / (double) u->fragment_size,
2318 (long unsigned) u->fragment_size,
2319 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2320 (long unsigned) u->hwbuf_size,
2321 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2322
2323 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2324 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2325 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2326 else {
2327 pa_log_info("Disabling rewind for device %s", u->device_name);
2328 pa_sink_set_max_rewind(u->sink, 0);
2329 }
2330
2331 if (u->use_tsched) {
2332 u->tsched_watermark_ref = tsched_watermark;
2333 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2334 } else
2335 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2336
2337 reserve_update(u);
2338
2339 if (update_sw_params(u) < 0)
2340 goto fail;
2341
2342 if (u->ucm_context) {
2343 if (u->sink->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port, TRUE) < 0)
2344 goto fail;
2345 } else if (setup_mixer(u, ignore_dB) < 0)
2346 goto fail;
2347
2348 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2349
2350 thread_name = pa_sprintf_malloc("alsa-sink-%s", pa_strnull(pa_proplist_gets(u->sink->proplist, "alsa.id")));
2351 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2352 pa_log("Failed to create thread.");
2353 goto fail;
2354 }
2355 pa_xfree(thread_name);
2356 thread_name = NULL;
2357
2358 /* Get initial mixer settings */
2359 if (data.volume_is_set) {
2360 if (u->sink->set_volume)
2361 u->sink->set_volume(u->sink);
2362 } else {
2363 if (u->sink->get_volume)
2364 u->sink->get_volume(u->sink);
2365 }
2366
2367 if (data.muted_is_set) {
2368 if (u->sink->set_mute)
2369 u->sink->set_mute(u->sink);
2370 } else {
2371 if (u->sink->get_mute)
2372 u->sink->get_mute(u->sink);
2373 }
2374
2375 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2376 u->sink->write_volume(u->sink);
2377
2378 if (set_formats) {
2379 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2380 pa_format_info *format;
2381
2382 /* To start with, we only support PCM formats. Other formats may be added
2383 * with pa_sink_set_formats().*/
2384 format = pa_format_info_new();
2385 format->encoding = PA_ENCODING_PCM;
2386 u->formats = pa_idxset_new(NULL, NULL);
2387 pa_idxset_put(u->formats, format, NULL);
2388
2389 u->sink->get_formats = sink_get_formats;
2390 u->sink->set_formats = sink_set_formats;
2391 }
2392
2393 pa_sink_put(u->sink);
2394
2395 if (profile_set)
2396 pa_alsa_profile_set_free(profile_set);
2397
2398 return u->sink;
2399
2400 fail:
2401 pa_xfree(thread_name);
2402
2403 if (u)
2404 userdata_free(u);
2405
2406 if (profile_set)
2407 pa_alsa_profile_set_free(profile_set);
2408
2409 return NULL;
2410 }
2411
2412 static void userdata_free(struct userdata *u) {
2413 pa_assert(u);
2414
2415 if (u->sink)
2416 pa_sink_unlink(u->sink);
2417
2418 if (u->thread) {
2419 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2420 pa_thread_free(u->thread);
2421 }
2422
2423 pa_thread_mq_done(&u->thread_mq);
2424
2425 if (u->sink)
2426 pa_sink_unref(u->sink);
2427
2428 if (u->memchunk.memblock)
2429 pa_memblock_unref(u->memchunk.memblock);
2430
2431 if (u->mixer_pd)
2432 pa_alsa_mixer_pdata_free(u->mixer_pd);
2433
2434 if (u->alsa_rtpoll_item)
2435 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2436
2437 if (u->rtpoll)
2438 pa_rtpoll_free(u->rtpoll);
2439
2440 if (u->pcm_handle) {
2441 snd_pcm_drop(u->pcm_handle);
2442 snd_pcm_close(u->pcm_handle);
2443 }
2444
2445 if (u->mixer_fdl)
2446 pa_alsa_fdlist_free(u->mixer_fdl);
2447
2448 if (u->mixer_path && !u->mixer_path_set)
2449 pa_alsa_path_free(u->mixer_path);
2450
2451 if (u->mixer_handle)
2452 snd_mixer_close(u->mixer_handle);
2453
2454 if (u->smoother)
2455 pa_smoother_free(u->smoother);
2456
2457 if (u->formats)
2458 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2459
2460 if (u->rates)
2461 pa_xfree(u->rates);
2462
2463 reserve_done(u);
2464 monitor_done(u);
2465
2466 pa_xfree(u->device_name);
2467 pa_xfree(u->control_device);
2468 pa_xfree(u->paths_dir);
2469 pa_xfree(u);
2470 }
2471
2472 void pa_alsa_sink_free(pa_sink *s) {
2473 struct userdata *u;
2474
2475 pa_sink_assert_ref(s);
2476 pa_assert_se(u = s->userdata);
2477
2478 userdata_free(u);
2479 }