]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
core, alsa: Better drain reporting
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
34 #endif
35
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
41
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
57
58 #include <modules/reserve-wrap.h>
59
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
62
63 /* #define DEBUG_TIMING */
64
65 #define DEFAULT_DEVICE "default"
66
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
78
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
92
93 struct userdata {
94 pa_core *core;
95 pa_module *module;
96 pa_sink *sink;
97
98 pa_thread *thread;
99 pa_thread_mq thread_mq;
100 pa_rtpoll *rtpoll;
101
102 snd_pcm_t *pcm_handle;
103
104 char *paths_dir;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
110
111 pa_cvolume hardware_volume;
112
113 unsigned int *rates;
114
115 size_t
116 frame_size,
117 fragment_size,
118 hwbuf_size,
119 tsched_watermark,
120 tsched_watermark_ref,
121 hwbuf_unused,
122 min_sleep,
123 min_wakeup,
124 watermark_inc_step,
125 watermark_dec_step,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
128 rewind_safeguard;
129
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
132
133 pa_memchunk memchunk;
134
135 char *device_name; /* name of the PCM device */
136 char *control_device; /* name of the control device */
137
138 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
139
140 pa_bool_t first, after_rewind;
141
142 pa_rtpoll_item *alsa_rtpoll_item;
143
144 pa_smoother *smoother;
145 uint64_t write_count;
146 uint64_t since_start;
147 pa_usec_t smoother_interval;
148 pa_usec_t last_smoother_update;
149
150 pa_idxset *formats;
151
152 pa_reserve_wrapper *reserve;
153 pa_hook_slot *reserve_slot;
154 pa_reserve_monitor_wrapper *monitor;
155 pa_hook_slot *monitor_slot;
156
157 /* ucm context */
158 pa_alsa_ucm_mapping_context *ucm_context;
159 };
160
161 static void userdata_free(struct userdata *u);
162
163 /* FIXME: Is there a better way to do this than device names? */
164 static pa_bool_t is_iec958(struct userdata *u) {
165 return (strncmp("iec958", u->device_name, 6) == 0);
166 }
167
168 static pa_bool_t is_hdmi(struct userdata *u) {
169 return (strncmp("hdmi", u->device_name, 4) == 0);
170 }
171
172 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
173 pa_assert(r);
174 pa_assert(u);
175
176 pa_log_debug("Suspending sink %s, because another application requested us to release the device.", u->sink->name);
177
178 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
179 return PA_HOOK_CANCEL;
180
181 return PA_HOOK_OK;
182 }
183
184 static void reserve_done(struct userdata *u) {
185 pa_assert(u);
186
187 if (u->reserve_slot) {
188 pa_hook_slot_free(u->reserve_slot);
189 u->reserve_slot = NULL;
190 }
191
192 if (u->reserve) {
193 pa_reserve_wrapper_unref(u->reserve);
194 u->reserve = NULL;
195 }
196 }
197
198 static void reserve_update(struct userdata *u) {
199 const char *description;
200 pa_assert(u);
201
202 if (!u->sink || !u->reserve)
203 return;
204
205 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
206 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
207 }
208
209 static int reserve_init(struct userdata *u, const char *dname) {
210 char *rname;
211
212 pa_assert(u);
213 pa_assert(dname);
214
215 if (u->reserve)
216 return 0;
217
218 if (pa_in_system_mode())
219 return 0;
220
221 if (!(rname = pa_alsa_get_reserve_name(dname)))
222 return 0;
223
224 /* We are resuming, try to lock the device */
225 u->reserve = pa_reserve_wrapper_get(u->core, rname);
226 pa_xfree(rname);
227
228 if (!(u->reserve))
229 return -1;
230
231 reserve_update(u);
232
233 pa_assert(!u->reserve_slot);
234 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
235
236 return 0;
237 }
238
239 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
240 pa_assert(w);
241 pa_assert(u);
242
243 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
244 pa_log_debug("Suspending sink %s, because another application is blocking the access to the device.", u->sink->name);
245 pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION);
246 } else {
247 pa_log_debug("Resuming sink %s, because other applications aren't blocking access to the device any more.", u->sink->name);
248 pa_sink_suspend(u->sink, false, PA_SUSPEND_APPLICATION);
249 }
250
251 return PA_HOOK_OK;
252 }
253
254 static void monitor_done(struct userdata *u) {
255 pa_assert(u);
256
257 if (u->monitor_slot) {
258 pa_hook_slot_free(u->monitor_slot);
259 u->monitor_slot = NULL;
260 }
261
262 if (u->monitor) {
263 pa_reserve_monitor_wrapper_unref(u->monitor);
264 u->monitor = NULL;
265 }
266 }
267
268 static int reserve_monitor_init(struct userdata *u, const char *dname) {
269 char *rname;
270
271 pa_assert(u);
272 pa_assert(dname);
273
274 if (pa_in_system_mode())
275 return 0;
276
277 if (!(rname = pa_alsa_get_reserve_name(dname)))
278 return 0;
279
280 /* We are resuming, try to lock the device */
281 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
282 pa_xfree(rname);
283
284 if (!(u->monitor))
285 return -1;
286
287 pa_assert(!u->monitor_slot);
288 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
289
290 return 0;
291 }
292
293 static void fix_min_sleep_wakeup(struct userdata *u) {
294 size_t max_use, max_use_2;
295
296 pa_assert(u);
297 pa_assert(u->use_tsched);
298
299 max_use = u->hwbuf_size - u->hwbuf_unused;
300 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
301
302 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
303 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
304
305 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
306 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
307 }
308
309 static void fix_tsched_watermark(struct userdata *u) {
310 size_t max_use;
311 pa_assert(u);
312 pa_assert(u->use_tsched);
313
314 max_use = u->hwbuf_size - u->hwbuf_unused;
315
316 if (u->tsched_watermark > max_use - u->min_sleep)
317 u->tsched_watermark = max_use - u->min_sleep;
318
319 if (u->tsched_watermark < u->min_wakeup)
320 u->tsched_watermark = u->min_wakeup;
321 }
322
323 static void increase_watermark(struct userdata *u) {
324 size_t old_watermark;
325 pa_usec_t old_min_latency, new_min_latency;
326
327 pa_assert(u);
328 pa_assert(u->use_tsched);
329
330 /* First, just try to increase the watermark */
331 old_watermark = u->tsched_watermark;
332 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
333 fix_tsched_watermark(u);
334
335 if (old_watermark != u->tsched_watermark) {
336 pa_log_info("Increasing wakeup watermark to %0.2f ms",
337 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
338 return;
339 }
340
341 /* Hmm, we cannot increase the watermark any further, hence let's
342 raise the latency, unless doing so was disabled in
343 configuration */
344 if (u->fixed_latency_range)
345 return;
346
347 old_min_latency = u->sink->thread_info.min_latency;
348 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
349 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
350
351 if (old_min_latency != new_min_latency) {
352 pa_log_info("Increasing minimal latency to %0.2f ms",
353 (double) new_min_latency / PA_USEC_PER_MSEC);
354
355 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
356 }
357
358 /* When we reach this we're officialy fucked! */
359 }
360
361 static void decrease_watermark(struct userdata *u) {
362 size_t old_watermark;
363 pa_usec_t now;
364
365 pa_assert(u);
366 pa_assert(u->use_tsched);
367
368 now = pa_rtclock_now();
369
370 if (u->watermark_dec_not_before <= 0)
371 goto restart;
372
373 if (u->watermark_dec_not_before > now)
374 return;
375
376 old_watermark = u->tsched_watermark;
377
378 if (u->tsched_watermark < u->watermark_dec_step)
379 u->tsched_watermark = u->tsched_watermark / 2;
380 else
381 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
382
383 fix_tsched_watermark(u);
384
385 if (old_watermark != u->tsched_watermark)
386 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
387 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
388
389 /* We don't change the latency range*/
390
391 restart:
392 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
393 }
394
395 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
396 pa_usec_t usec, wm;
397
398 pa_assert(sleep_usec);
399 pa_assert(process_usec);
400
401 pa_assert(u);
402 pa_assert(u->use_tsched);
403
404 usec = pa_sink_get_requested_latency_within_thread(u->sink);
405
406 if (usec == (pa_usec_t) -1)
407 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
408
409 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
410
411 if (wm > usec)
412 wm = usec/2;
413
414 *sleep_usec = usec - wm;
415 *process_usec = wm;
416
417 #ifdef DEBUG_TIMING
418 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
419 (unsigned long) (usec / PA_USEC_PER_MSEC),
420 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
421 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
422 #endif
423 }
424
425 static int try_recover(struct userdata *u, const char *call, int err) {
426 pa_assert(u);
427 pa_assert(call);
428 pa_assert(err < 0);
429
430 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
431
432 pa_assert(err != -EAGAIN);
433
434 if (err == -EPIPE)
435 pa_log_debug("%s: Buffer underrun!", call);
436
437 if (err == -ESTRPIPE)
438 pa_log_debug("%s: System suspended!", call);
439
440 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
441 pa_log("%s: %s", call, pa_alsa_strerror(err));
442 return -1;
443 }
444
445 u->first = TRUE;
446 u->since_start = 0;
447 return 0;
448 }
449
450 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
451 size_t left_to_play;
452 pa_bool_t underrun = FALSE;
453
454 /* We use <= instead of < for this check here because an underrun
455 * only happens after the last sample was processed, not already when
456 * it is removed from the buffer. This is particularly important
457 * when block transfer is used. */
458
459 if (n_bytes <= u->hwbuf_size)
460 left_to_play = u->hwbuf_size - n_bytes;
461 else {
462
463 /* We got a dropout. What a mess! */
464 left_to_play = 0;
465 underrun = TRUE;
466
467 #if 0
468 PA_DEBUG_TRAP;
469 #endif
470
471 if (!u->first && !u->after_rewind)
472 if (pa_log_ratelimit(PA_LOG_INFO))
473 pa_log_info("Underrun!");
474 }
475
476 #ifdef DEBUG_TIMING
477 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
478 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
479 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
480 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
481 #endif
482
483 if (u->use_tsched) {
484 pa_bool_t reset_not_before = TRUE;
485
486 if (!u->first && !u->after_rewind) {
487 if (underrun || left_to_play < u->watermark_inc_threshold)
488 increase_watermark(u);
489 else if (left_to_play > u->watermark_dec_threshold) {
490 reset_not_before = FALSE;
491
492 /* We decrease the watermark only if have actually
493 * been woken up by a timeout. If something else woke
494 * us up it's too easy to fulfill the deadlines... */
495
496 if (on_timeout)
497 decrease_watermark(u);
498 }
499 }
500
501 if (reset_not_before)
502 u->watermark_dec_not_before = 0;
503 }
504
505 return left_to_play;
506 }
507
508 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
509 pa_bool_t work_done = FALSE;
510 pa_usec_t max_sleep_usec = 0, process_usec = 0;
511 size_t left_to_play, input_underrun;
512 unsigned j = 0;
513
514 pa_assert(u);
515 pa_sink_assert_ref(u->sink);
516
517 if (u->use_tsched)
518 hw_sleep_time(u, &max_sleep_usec, &process_usec);
519
520 for (;;) {
521 snd_pcm_sframes_t n;
522 size_t n_bytes;
523 int r;
524 pa_bool_t after_avail = TRUE;
525
526 /* First we determine how many samples are missing to fill the
527 * buffer up to 100% */
528
529 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
530
531 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
532 continue;
533
534 return r;
535 }
536
537 n_bytes = (size_t) n * u->frame_size;
538
539 #ifdef DEBUG_TIMING
540 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
541 #endif
542
543 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
544 on_timeout = FALSE;
545
546 if (u->use_tsched)
547
548 /* We won't fill up the playback buffer before at least
549 * half the sleep time is over because otherwise we might
550 * ask for more data from the clients then they expect. We
551 * need to guarantee that clients only have to keep around
552 * a single hw buffer length. */
553
554 if (!polled &&
555 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
556 #ifdef DEBUG_TIMING
557 pa_log_debug("Not filling up, because too early.");
558 #endif
559 break;
560 }
561
562 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
563
564 if (polled)
565 PA_ONCE_BEGIN {
566 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
567 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
568 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
569 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
570 pa_strnull(dn));
571 pa_xfree(dn);
572 } PA_ONCE_END;
573
574 #ifdef DEBUG_TIMING
575 pa_log_debug("Not filling up, because not necessary.");
576 #endif
577 break;
578 }
579
580
581 if (++j > 10) {
582 #ifdef DEBUG_TIMING
583 pa_log_debug("Not filling up, because already too many iterations.");
584 #endif
585
586 break;
587 }
588
589 n_bytes -= u->hwbuf_unused;
590 polled = FALSE;
591
592 #ifdef DEBUG_TIMING
593 pa_log_debug("Filling up");
594 #endif
595
596 for (;;) {
597 pa_memchunk chunk;
598 void *p;
599 int err;
600 const snd_pcm_channel_area_t *areas;
601 snd_pcm_uframes_t offset, frames;
602 snd_pcm_sframes_t sframes;
603 size_t written;
604
605 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
606 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
607
608 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
609
610 if (!after_avail && err == -EAGAIN)
611 break;
612
613 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
614 continue;
615
616 return r;
617 }
618
619 /* Make sure that if these memblocks need to be copied they will fit into one slot */
620 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
621 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
622
623 if (!after_avail && frames == 0)
624 break;
625
626 pa_assert(frames > 0);
627 after_avail = FALSE;
628
629 /* Check these are multiples of 8 bit */
630 pa_assert((areas[0].first & 7) == 0);
631 pa_assert((areas[0].step & 7)== 0);
632
633 /* We assume a single interleaved memory buffer */
634 pa_assert((areas[0].first >> 3) == 0);
635 pa_assert((areas[0].step >> 3) == u->frame_size);
636
637 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
638
639 written = frames * u->frame_size;
640 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, written, TRUE);
641 chunk.length = pa_memblock_get_length(chunk.memblock);
642 chunk.index = 0;
643
644 pa_sink_render_into_full(u->sink, &chunk);
645 pa_memblock_unref_fixed(chunk.memblock);
646
647 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
648
649 if (!after_avail && (int) sframes == -EAGAIN)
650 break;
651
652 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
653 continue;
654
655 return r;
656 }
657
658 work_done = TRUE;
659
660 u->write_count += written;
661 u->since_start += written;
662
663 #ifdef DEBUG_TIMING
664 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) written, (unsigned long) n_bytes);
665 #endif
666
667 if (written >= n_bytes)
668 break;
669
670 n_bytes -= written;
671 }
672 }
673
674 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
675
676 if (u->use_tsched) {
677 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
678
679 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
680 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
681
682 if (*sleep_usec > process_usec)
683 *sleep_usec -= process_usec;
684 else
685 *sleep_usec = 0;
686
687 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
688 } else
689 *sleep_usec = 0;
690
691 return work_done ? 1 : 0;
692 }
693
694 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
695 pa_bool_t work_done = FALSE;
696 pa_usec_t max_sleep_usec = 0, process_usec = 0;
697 size_t left_to_play, input_underrun;
698 unsigned j = 0;
699
700 pa_assert(u);
701 pa_sink_assert_ref(u->sink);
702
703 if (u->use_tsched)
704 hw_sleep_time(u, &max_sleep_usec, &process_usec);
705
706 for (;;) {
707 snd_pcm_sframes_t n;
708 size_t n_bytes;
709 int r;
710 pa_bool_t after_avail = TRUE;
711
712 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
713
714 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
715 continue;
716
717 return r;
718 }
719
720 n_bytes = (size_t) n * u->frame_size;
721
722
723 #ifdef DEBUG_TIMING
724 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
725 #endif
726
727 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
728 on_timeout = FALSE;
729
730 if (u->use_tsched)
731
732 /* We won't fill up the playback buffer before at least
733 * half the sleep time is over because otherwise we might
734 * ask for more data from the clients then they expect. We
735 * need to guarantee that clients only have to keep around
736 * a single hw buffer length. */
737
738 if (!polled &&
739 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
740 break;
741
742 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
743
744 if (polled)
745 PA_ONCE_BEGIN {
746 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
747 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
748 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
749 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
750 pa_strnull(dn));
751 pa_xfree(dn);
752 } PA_ONCE_END;
753
754 break;
755 }
756
757 if (++j > 10) {
758 #ifdef DEBUG_TIMING
759 pa_log_debug("Not filling up, because already too many iterations.");
760 #endif
761
762 break;
763 }
764
765 n_bytes -= u->hwbuf_unused;
766 polled = FALSE;
767
768 for (;;) {
769 snd_pcm_sframes_t frames;
770 void *p;
771 size_t written;
772
773 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
774
775 if (u->memchunk.length <= 0)
776 pa_sink_render(u->sink, n_bytes, &u->memchunk);
777
778 pa_assert(u->memchunk.length > 0);
779
780 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
781
782 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
783 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
784
785 p = pa_memblock_acquire(u->memchunk.memblock);
786 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
787 pa_memblock_release(u->memchunk.memblock);
788
789 if (PA_UNLIKELY(frames < 0)) {
790
791 if (!after_avail && (int) frames == -EAGAIN)
792 break;
793
794 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
795 continue;
796
797 return r;
798 }
799
800 if (!after_avail && frames == 0)
801 break;
802
803 pa_assert(frames > 0);
804 after_avail = FALSE;
805
806 written = frames * u->frame_size;
807 u->memchunk.index += written;
808 u->memchunk.length -= written;
809
810 if (u->memchunk.length <= 0) {
811 pa_memblock_unref(u->memchunk.memblock);
812 pa_memchunk_reset(&u->memchunk);
813 }
814
815 work_done = TRUE;
816
817 u->write_count += written;
818 u->since_start += written;
819
820 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
821
822 if (written >= n_bytes)
823 break;
824
825 n_bytes -= written;
826 }
827 }
828
829 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
830
831 if (u->use_tsched) {
832 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
833
834 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
835 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
836
837 if (*sleep_usec > process_usec)
838 *sleep_usec -= process_usec;
839 else
840 *sleep_usec = 0;
841
842 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
843 } else
844 *sleep_usec = 0;
845
846 return work_done ? 1 : 0;
847 }
848
849 static void update_smoother(struct userdata *u) {
850 snd_pcm_sframes_t delay = 0;
851 int64_t position;
852 int err;
853 pa_usec_t now1 = 0, now2;
854 snd_pcm_status_t *status;
855 snd_htimestamp_t htstamp = { 0, 0 };
856
857 snd_pcm_status_alloca(&status);
858
859 pa_assert(u);
860 pa_assert(u->pcm_handle);
861
862 /* Let's update the time smoother */
863
864 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
865 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
866 return;
867 }
868
869 snd_pcm_status_get_htstamp(status, &htstamp);
870 now1 = pa_timespec_load(&htstamp);
871
872 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
873 if (now1 <= 0)
874 now1 = pa_rtclock_now();
875
876 /* check if the time since the last update is bigger than the interval */
877 if (u->last_smoother_update > 0)
878 if (u->last_smoother_update + u->smoother_interval > now1)
879 return;
880
881 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
882
883 if (PA_UNLIKELY(position < 0))
884 position = 0;
885
886 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
887
888 pa_smoother_put(u->smoother, now1, now2);
889
890 u->last_smoother_update = now1;
891 /* exponentially increase the update interval up to the MAX limit */
892 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
893 }
894
895 static pa_usec_t sink_get_latency(struct userdata *u) {
896 pa_usec_t r;
897 int64_t delay;
898 pa_usec_t now1, now2;
899
900 pa_assert(u);
901
902 now1 = pa_rtclock_now();
903 now2 = pa_smoother_get(u->smoother, now1);
904
905 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
906
907 r = delay >= 0 ? (pa_usec_t) delay : 0;
908
909 if (u->memchunk.memblock)
910 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
911
912 return r;
913 }
914
915 static int build_pollfd(struct userdata *u) {
916 pa_assert(u);
917 pa_assert(u->pcm_handle);
918
919 if (u->alsa_rtpoll_item)
920 pa_rtpoll_item_free(u->alsa_rtpoll_item);
921
922 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
923 return -1;
924
925 return 0;
926 }
927
928 /* Called from IO context */
929 static int suspend(struct userdata *u) {
930 pa_assert(u);
931 pa_assert(u->pcm_handle);
932
933 pa_smoother_pause(u->smoother, pa_rtclock_now());
934
935 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
936 * take awfully long with our long buffer sizes today. */
937 snd_pcm_close(u->pcm_handle);
938 u->pcm_handle = NULL;
939
940 if (u->alsa_rtpoll_item) {
941 pa_rtpoll_item_free(u->alsa_rtpoll_item);
942 u->alsa_rtpoll_item = NULL;
943 }
944
945 /* We reset max_rewind/max_request here to make sure that while we
946 * are suspended the old max_request/max_rewind values set before
947 * the suspend can influence the per-stream buffer of newly
948 * created streams, without their requirements having any
949 * influence on them. */
950 pa_sink_set_max_rewind_within_thread(u->sink, 0);
951 pa_sink_set_max_request_within_thread(u->sink, 0);
952
953 pa_log_info("Device suspended...");
954
955 return 0;
956 }
957
958 /* Called from IO context */
959 static int update_sw_params(struct userdata *u) {
960 snd_pcm_uframes_t avail_min;
961 int err;
962
963 pa_assert(u);
964
965 /* Use the full buffer if no one asked us for anything specific */
966 u->hwbuf_unused = 0;
967
968 if (u->use_tsched) {
969 pa_usec_t latency;
970
971 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
972 size_t b;
973
974 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
975
976 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
977
978 /* We need at least one sample in our buffer */
979
980 if (PA_UNLIKELY(b < u->frame_size))
981 b = u->frame_size;
982
983 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
984 }
985
986 fix_min_sleep_wakeup(u);
987 fix_tsched_watermark(u);
988 }
989
990 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
991
992 /* We need at last one frame in the used part of the buffer */
993 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
994
995 if (u->use_tsched) {
996 pa_usec_t sleep_usec, process_usec;
997
998 hw_sleep_time(u, &sleep_usec, &process_usec);
999 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
1000 }
1001
1002 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
1003
1004 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
1005 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
1006 return err;
1007 }
1008
1009 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
1010 if (pa_alsa_pcm_is_hw(u->pcm_handle))
1011 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
1012 else {
1013 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
1014 pa_sink_set_max_rewind_within_thread(u->sink, 0);
1015 }
1016
1017 return 0;
1018 }
1019
1020 /* Called from IO Context on unsuspend or from main thread when creating sink */
1021 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
1022 pa_bool_t in_thread)
1023 {
1024 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
1025 &u->sink->sample_spec);
1026
1027 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1028 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1029
1030 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1031 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1032
1033 fix_min_sleep_wakeup(u);
1034 fix_tsched_watermark(u);
1035
1036 if (in_thread)
1037 pa_sink_set_latency_range_within_thread(u->sink,
1038 u->min_latency_ref,
1039 pa_bytes_to_usec(u->hwbuf_size, ss));
1040 else {
1041 pa_sink_set_latency_range(u->sink,
1042 0,
1043 pa_bytes_to_usec(u->hwbuf_size, ss));
1044
1045 /* work-around assert in pa_sink_set_latency_within_thead,
1046 keep track of min_latency and reuse it when
1047 this routine is called from IO context */
1048 u->min_latency_ref = u->sink->thread_info.min_latency;
1049 }
1050
1051 pa_log_info("Time scheduling watermark is %0.2fms",
1052 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1053 }
1054
1055 /* Called from IO context */
1056 static int unsuspend(struct userdata *u) {
1057 pa_sample_spec ss;
1058 int err;
1059 pa_bool_t b, d;
1060 snd_pcm_uframes_t period_size, buffer_size;
1061 char *device_name = NULL;
1062
1063 pa_assert(u);
1064 pa_assert(!u->pcm_handle);
1065
1066 pa_log_info("Trying resume...");
1067
1068 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1069 /* Need to open device in NONAUDIO mode */
1070 int len = strlen(u->device_name) + 8;
1071
1072 device_name = pa_xmalloc(len);
1073 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1074 }
1075
1076 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1077 SND_PCM_NONBLOCK|
1078 SND_PCM_NO_AUTO_RESAMPLE|
1079 SND_PCM_NO_AUTO_CHANNELS|
1080 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1081 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1082 goto fail;
1083 }
1084
1085 ss = u->sink->sample_spec;
1086 period_size = u->fragment_size / u->frame_size;
1087 buffer_size = u->hwbuf_size / u->frame_size;
1088 b = u->use_mmap;
1089 d = u->use_tsched;
1090
1091 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1092 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1093 goto fail;
1094 }
1095
1096 if (b != u->use_mmap || d != u->use_tsched) {
1097 pa_log_warn("Resume failed, couldn't get original access mode.");
1098 goto fail;
1099 }
1100
1101 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1102 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1103 goto fail;
1104 }
1105
1106 if (period_size*u->frame_size != u->fragment_size ||
1107 buffer_size*u->frame_size != u->hwbuf_size) {
1108 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1109 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1110 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1111 goto fail;
1112 }
1113
1114 if (update_sw_params(u) < 0)
1115 goto fail;
1116
1117 if (build_pollfd(u) < 0)
1118 goto fail;
1119
1120 u->write_count = 0;
1121 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1122 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1123 u->last_smoother_update = 0;
1124
1125 u->first = TRUE;
1126 u->since_start = 0;
1127
1128 /* reset the watermark to the value defined when sink was created */
1129 if (u->use_tsched)
1130 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1131
1132 pa_log_info("Resumed successfully...");
1133
1134 pa_xfree(device_name);
1135 return 0;
1136
1137 fail:
1138 if (u->pcm_handle) {
1139 snd_pcm_close(u->pcm_handle);
1140 u->pcm_handle = NULL;
1141 }
1142
1143 pa_xfree(device_name);
1144
1145 return -PA_ERR_IO;
1146 }
1147
1148 /* Called from IO context */
1149 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1150 struct userdata *u = PA_SINK(o)->userdata;
1151
1152 switch (code) {
1153
1154 case PA_SINK_MESSAGE_GET_LATENCY: {
1155 pa_usec_t r = 0;
1156
1157 if (u->pcm_handle)
1158 r = sink_get_latency(u);
1159
1160 *((pa_usec_t*) data) = r;
1161
1162 return 0;
1163 }
1164
1165 case PA_SINK_MESSAGE_SET_STATE:
1166
1167 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1168
1169 case PA_SINK_SUSPENDED: {
1170 int r;
1171
1172 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1173
1174 if ((r = suspend(u)) < 0)
1175 return r;
1176
1177 break;
1178 }
1179
1180 case PA_SINK_IDLE:
1181 case PA_SINK_RUNNING: {
1182 int r;
1183
1184 if (u->sink->thread_info.state == PA_SINK_INIT) {
1185 if (build_pollfd(u) < 0)
1186 return -PA_ERR_IO;
1187 }
1188
1189 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1190 if ((r = unsuspend(u)) < 0)
1191 return r;
1192 }
1193
1194 break;
1195 }
1196
1197 case PA_SINK_UNLINKED:
1198 case PA_SINK_INIT:
1199 case PA_SINK_INVALID_STATE:
1200 ;
1201 }
1202
1203 break;
1204 }
1205
1206 return pa_sink_process_msg(o, code, data, offset, chunk);
1207 }
1208
1209 /* Called from main context */
1210 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1211 pa_sink_state_t old_state;
1212 struct userdata *u;
1213
1214 pa_sink_assert_ref(s);
1215 pa_assert_se(u = s->userdata);
1216
1217 old_state = pa_sink_get_state(u->sink);
1218
1219 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1220 reserve_done(u);
1221 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1222 if (reserve_init(u, u->device_name) < 0)
1223 return -PA_ERR_BUSY;
1224
1225 return 0;
1226 }
1227
1228 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1229 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1230
1231 pa_assert(u);
1232 pa_assert(u->mixer_handle);
1233
1234 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1235 return 0;
1236
1237 if (!PA_SINK_IS_LINKED(u->sink->state))
1238 return 0;
1239
1240 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1241 pa_sink_set_mixer_dirty(u->sink, TRUE);
1242 return 0;
1243 }
1244
1245 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1246 pa_sink_get_volume(u->sink, TRUE);
1247 pa_sink_get_mute(u->sink, TRUE);
1248 }
1249
1250 return 0;
1251 }
1252
1253 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1254 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1255
1256 pa_assert(u);
1257 pa_assert(u->mixer_handle);
1258
1259 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1260 return 0;
1261
1262 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1263 pa_sink_set_mixer_dirty(u->sink, TRUE);
1264 return 0;
1265 }
1266
1267 if (mask & SND_CTL_EVENT_MASK_VALUE)
1268 pa_sink_update_volume_and_mute(u->sink);
1269
1270 return 0;
1271 }
1272
1273 static void sink_get_volume_cb(pa_sink *s) {
1274 struct userdata *u = s->userdata;
1275 pa_cvolume r;
1276 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1277
1278 pa_assert(u);
1279 pa_assert(u->mixer_path);
1280 pa_assert(u->mixer_handle);
1281
1282 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1283 return;
1284
1285 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1286 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1287
1288 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1289
1290 if (u->mixer_path->has_dB) {
1291 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1292
1293 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1294 }
1295
1296 if (pa_cvolume_equal(&u->hardware_volume, &r))
1297 return;
1298
1299 s->real_volume = u->hardware_volume = r;
1300
1301 /* Hmm, so the hardware volume changed, let's reset our software volume */
1302 if (u->mixer_path->has_dB)
1303 pa_sink_set_soft_volume(s, NULL);
1304 }
1305
1306 static void sink_set_volume_cb(pa_sink *s) {
1307 struct userdata *u = s->userdata;
1308 pa_cvolume r;
1309 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1310 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1311
1312 pa_assert(u);
1313 pa_assert(u->mixer_path);
1314 pa_assert(u->mixer_handle);
1315
1316 /* Shift up by the base volume */
1317 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1318
1319 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1320 return;
1321
1322 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1323 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1324
1325 u->hardware_volume = r;
1326
1327 if (u->mixer_path->has_dB) {
1328 pa_cvolume new_soft_volume;
1329 pa_bool_t accurate_enough;
1330 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1331
1332 /* Match exactly what the user requested by software */
1333 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1334
1335 /* If the adjustment to do in software is only minimal we
1336 * can skip it. That saves us CPU at the expense of a bit of
1337 * accuracy */
1338 accurate_enough =
1339 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1340 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1341
1342 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1343 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1344 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1345 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1346 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1347 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1348 pa_yes_no(accurate_enough));
1349 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1350
1351 if (!accurate_enough)
1352 s->soft_volume = new_soft_volume;
1353
1354 } else {
1355 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1356
1357 /* We can't match exactly what the user requested, hence let's
1358 * at least tell the user about it */
1359
1360 s->real_volume = r;
1361 }
1362 }
1363
1364 static void sink_write_volume_cb(pa_sink *s) {
1365 struct userdata *u = s->userdata;
1366 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1367
1368 pa_assert(u);
1369 pa_assert(u->mixer_path);
1370 pa_assert(u->mixer_handle);
1371 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1372
1373 /* Shift up by the base volume */
1374 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1375
1376 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1377 pa_log_error("Writing HW volume failed");
1378 else {
1379 pa_cvolume tmp_vol;
1380 pa_bool_t accurate_enough;
1381
1382 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1383 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1384
1385 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1386 accurate_enough =
1387 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1388 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1389
1390 if (!accurate_enough) {
1391 union {
1392 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1393 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1394 } vol;
1395
1396 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1397 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1398 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1399 pa_log_debug(" in dB: %s (request) != %s",
1400 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1401 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1402 }
1403 }
1404 }
1405
1406 static void sink_get_mute_cb(pa_sink *s) {
1407 struct userdata *u = s->userdata;
1408 pa_bool_t b;
1409
1410 pa_assert(u);
1411 pa_assert(u->mixer_path);
1412 pa_assert(u->mixer_handle);
1413
1414 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1415 return;
1416
1417 s->muted = b;
1418 }
1419
1420 static void sink_set_mute_cb(pa_sink *s) {
1421 struct userdata *u = s->userdata;
1422
1423 pa_assert(u);
1424 pa_assert(u->mixer_path);
1425 pa_assert(u->mixer_handle);
1426
1427 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1428 }
1429
1430 static void mixer_volume_init(struct userdata *u) {
1431 pa_assert(u);
1432
1433 if (!u->mixer_path->has_volume) {
1434 pa_sink_set_write_volume_callback(u->sink, NULL);
1435 pa_sink_set_get_volume_callback(u->sink, NULL);
1436 pa_sink_set_set_volume_callback(u->sink, NULL);
1437
1438 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1439 } else {
1440 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1441 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1442
1443 if (u->mixer_path->has_dB && u->deferred_volume) {
1444 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1445 pa_log_info("Successfully enabled deferred volume.");
1446 } else
1447 pa_sink_set_write_volume_callback(u->sink, NULL);
1448
1449 if (u->mixer_path->has_dB) {
1450 pa_sink_enable_decibel_volume(u->sink, TRUE);
1451 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1452
1453 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1454 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1455
1456 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1457 } else {
1458 pa_sink_enable_decibel_volume(u->sink, FALSE);
1459 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1460
1461 u->sink->base_volume = PA_VOLUME_NORM;
1462 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1463 }
1464
1465 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1466 }
1467
1468 if (!u->mixer_path->has_mute) {
1469 pa_sink_set_get_mute_callback(u->sink, NULL);
1470 pa_sink_set_set_mute_callback(u->sink, NULL);
1471 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1472 } else {
1473 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1474 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1475 pa_log_info("Using hardware mute control.");
1476 }
1477 }
1478
1479 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1480 struct userdata *u = s->userdata;
1481
1482 pa_assert(u);
1483 pa_assert(p);
1484 pa_assert(u->ucm_context);
1485
1486 return pa_alsa_ucm_set_port(u->ucm_context, p, TRUE);
1487 }
1488
1489 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1490 struct userdata *u = s->userdata;
1491 pa_alsa_port_data *data;
1492
1493 pa_assert(u);
1494 pa_assert(p);
1495 pa_assert(u->mixer_handle);
1496
1497 data = PA_DEVICE_PORT_DATA(p);
1498
1499 pa_assert_se(u->mixer_path = data->path);
1500 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1501
1502 mixer_volume_init(u);
1503
1504 if (s->set_mute)
1505 s->set_mute(s);
1506 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1507 if (s->write_volume)
1508 s->write_volume(s);
1509 } else {
1510 if (s->set_volume)
1511 s->set_volume(s);
1512 }
1513
1514 return 0;
1515 }
1516
1517 static void sink_update_requested_latency_cb(pa_sink *s) {
1518 struct userdata *u = s->userdata;
1519 size_t before;
1520 pa_assert(u);
1521 pa_assert(u->use_tsched); /* only when timer scheduling is used
1522 * we can dynamically adjust the
1523 * latency */
1524
1525 if (!u->pcm_handle)
1526 return;
1527
1528 before = u->hwbuf_unused;
1529 update_sw_params(u);
1530
1531 /* Let's check whether we now use only a smaller part of the
1532 buffer then before. If so, we need to make sure that subsequent
1533 rewinds are relative to the new maximum fill level and not to the
1534 current fill level. Thus, let's do a full rewind once, to clear
1535 things up. */
1536
1537 if (u->hwbuf_unused > before) {
1538 pa_log_debug("Requesting rewind due to latency change.");
1539 pa_sink_request_rewind(s, (size_t) -1);
1540 }
1541 }
1542
1543 static pa_idxset* sink_get_formats(pa_sink *s) {
1544 struct userdata *u = s->userdata;
1545 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1546 pa_format_info *f;
1547 uint32_t idx;
1548
1549 pa_assert(u);
1550
1551 PA_IDXSET_FOREACH(f, u->formats, idx) {
1552 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1553 }
1554
1555 return ret;
1556 }
1557
1558 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1559 struct userdata *u = s->userdata;
1560 pa_format_info *f, *g;
1561 uint32_t idx, n;
1562
1563 pa_assert(u);
1564
1565 /* FIXME: also validate sample rates against what the device supports */
1566 PA_IDXSET_FOREACH(f, formats, idx) {
1567 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1568 /* EAC3 cannot be sent over over S/PDIF */
1569 return FALSE;
1570 }
1571
1572 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
1573 u->formats = pa_idxset_new(NULL, NULL);
1574
1575 /* Note: the logic below won't apply if we're using software encoding.
1576 * This is fine for now since we don't support that via the passthrough
1577 * framework, but this must be changed if we do. */
1578
1579 /* Count how many sample rates we support */
1580 for (idx = 0, n = 0; u->rates[idx]; idx++)
1581 n++;
1582
1583 /* First insert non-PCM formats since we prefer those. */
1584 PA_IDXSET_FOREACH(f, formats, idx) {
1585 if (!pa_format_info_is_pcm(f)) {
1586 g = pa_format_info_copy(f);
1587 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1588 pa_idxset_put(u->formats, g, NULL);
1589 }
1590 }
1591
1592 /* Now add any PCM formats */
1593 PA_IDXSET_FOREACH(f, formats, idx) {
1594 if (pa_format_info_is_pcm(f)) {
1595 /* We don't set rates here since we'll just tack on a resampler for
1596 * unsupported rates */
1597 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1598 }
1599 }
1600
1601 return TRUE;
1602 }
1603
1604 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
1605 {
1606 struct userdata *u = s->userdata;
1607 int i;
1608 pa_bool_t supported = FALSE;
1609
1610 pa_assert(u);
1611
1612 for (i = 0; u->rates[i]; i++) {
1613 if (u->rates[i] == rate) {
1614 supported = TRUE;
1615 break;
1616 }
1617 }
1618
1619 if (!supported) {
1620 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1621 return FALSE;
1622 }
1623
1624 if (!PA_SINK_IS_OPENED(s->state)) {
1625 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1626 u->sink->sample_spec.rate = rate;
1627 return TRUE;
1628 }
1629
1630 return FALSE;
1631 }
1632
1633 static int process_rewind(struct userdata *u) {
1634 snd_pcm_sframes_t unused;
1635 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1636 pa_assert(u);
1637
1638 if (!PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1639 pa_sink_process_rewind(u->sink, 0);
1640 return 0;
1641 }
1642
1643 /* Figure out how much we shall rewind and reset the counter */
1644 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1645
1646 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1647
1648 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1649 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1650 return -1;
1651 }
1652
1653 unused_nbytes = (size_t) unused * u->frame_size;
1654
1655 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1656 unused_nbytes += u->rewind_safeguard;
1657
1658 if (u->hwbuf_size > unused_nbytes)
1659 limit_nbytes = u->hwbuf_size - unused_nbytes;
1660 else
1661 limit_nbytes = 0;
1662
1663 if (rewind_nbytes > limit_nbytes)
1664 rewind_nbytes = limit_nbytes;
1665
1666 if (rewind_nbytes > 0) {
1667 snd_pcm_sframes_t in_frames, out_frames;
1668
1669 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1670
1671 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1672 pa_log_debug("before: %lu", (unsigned long) in_frames);
1673 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1674 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1675 if (try_recover(u, "process_rewind", out_frames) < 0)
1676 return -1;
1677 out_frames = 0;
1678 }
1679
1680 pa_log_debug("after: %lu", (unsigned long) out_frames);
1681
1682 rewind_nbytes = (size_t) out_frames * u->frame_size;
1683
1684 if (rewind_nbytes <= 0)
1685 pa_log_info("Tried rewind, but was apparently not possible.");
1686 else {
1687 u->write_count -= rewind_nbytes;
1688 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1689 pa_sink_process_rewind(u->sink, rewind_nbytes);
1690
1691 u->after_rewind = TRUE;
1692 return 0;
1693 }
1694 } else
1695 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1696
1697 pa_sink_process_rewind(u->sink, 0);
1698 return 0;
1699 }
1700
1701 static void thread_func(void *userdata) {
1702 struct userdata *u = userdata;
1703 unsigned short revents = 0;
1704
1705 pa_assert(u);
1706
1707 pa_log_debug("Thread starting up");
1708
1709 if (u->core->realtime_scheduling)
1710 pa_make_realtime(u->core->realtime_priority);
1711
1712 pa_thread_mq_install(&u->thread_mq);
1713
1714 for (;;) {
1715 int ret;
1716 pa_usec_t rtpoll_sleep = 0, real_sleep;
1717
1718 #ifdef DEBUG_TIMING
1719 pa_log_debug("Loop");
1720 #endif
1721
1722 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
1723 if (process_rewind(u) < 0)
1724 goto fail;
1725 }
1726
1727 /* Render some data and write it to the dsp */
1728 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1729 int work_done;
1730 pa_usec_t sleep_usec = 0;
1731 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1732
1733 if (u->use_mmap)
1734 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1735 else
1736 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1737
1738 if (work_done < 0)
1739 goto fail;
1740
1741 /* pa_log_debug("work_done = %i", work_done); */
1742
1743 if (work_done) {
1744
1745 if (u->first) {
1746 pa_log_info("Starting playback.");
1747 snd_pcm_start(u->pcm_handle);
1748
1749 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1750
1751 u->first = FALSE;
1752 }
1753
1754 update_smoother(u);
1755 }
1756
1757 if (u->use_tsched) {
1758 pa_usec_t cusec;
1759
1760 if (u->since_start <= u->hwbuf_size) {
1761
1762 /* USB devices on ALSA seem to hit a buffer
1763 * underrun during the first iterations much
1764 * quicker then we calculate here, probably due to
1765 * the transport latency. To accommodate for that
1766 * we artificially decrease the sleep time until
1767 * we have filled the buffer at least once
1768 * completely.*/
1769
1770 if (pa_log_ratelimit(PA_LOG_DEBUG))
1771 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1772 sleep_usec /= 2;
1773 }
1774
1775 /* OK, the playback buffer is now full, let's
1776 * calculate when to wake up next */
1777 #ifdef DEBUG_TIMING
1778 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1779 #endif
1780
1781 /* Convert from the sound card time domain to the
1782 * system time domain */
1783 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1784
1785 #ifdef DEBUG_TIMING
1786 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1787 #endif
1788
1789 /* We don't trust the conversion, so we wake up whatever comes first */
1790 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1791 }
1792
1793 u->after_rewind = FALSE;
1794
1795 }
1796
1797 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1798 pa_usec_t volume_sleep;
1799 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1800 if (volume_sleep > 0) {
1801 if (rtpoll_sleep > 0)
1802 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1803 else
1804 rtpoll_sleep = volume_sleep;
1805 }
1806 }
1807
1808 if (rtpoll_sleep > 0) {
1809 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1810 real_sleep = pa_rtclock_now();
1811 }
1812 else
1813 pa_rtpoll_set_timer_disabled(u->rtpoll);
1814
1815 /* Hmm, nothing to do. Let's sleep */
1816 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1817 goto fail;
1818
1819 if (rtpoll_sleep > 0) {
1820 real_sleep = pa_rtclock_now() - real_sleep;
1821 #ifdef DEBUG_TIMING
1822 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1823 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1824 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1825 #endif
1826 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark)
1827 pa_log_info("Scheduling delay of %0.2fms, you might want to investigate this to improve latency...",
1828 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC);
1829 }
1830
1831 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1832 pa_sink_volume_change_apply(u->sink, NULL);
1833
1834 if (ret == 0)
1835 goto finish;
1836
1837 /* Tell ALSA about this and process its response */
1838 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1839 struct pollfd *pollfd;
1840 int err;
1841 unsigned n;
1842
1843 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1844
1845 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1846 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1847 goto fail;
1848 }
1849
1850 if (revents & ~POLLOUT) {
1851 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1852 goto fail;
1853
1854 u->first = TRUE;
1855 u->since_start = 0;
1856 revents = 0;
1857 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1858 pa_log_debug("Wakeup from ALSA!");
1859
1860 } else
1861 revents = 0;
1862 }
1863
1864 fail:
1865 /* If this was no regular exit from the loop we have to continue
1866 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1867 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1868 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1869
1870 finish:
1871 pa_log_debug("Thread shutting down");
1872 }
1873
1874 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1875 const char *n;
1876 char *t;
1877
1878 pa_assert(data);
1879 pa_assert(ma);
1880 pa_assert(device_name);
1881
1882 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1883 pa_sink_new_data_set_name(data, n);
1884 data->namereg_fail = TRUE;
1885 return;
1886 }
1887
1888 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1889 data->namereg_fail = TRUE;
1890 else {
1891 n = device_id ? device_id : device_name;
1892 data->namereg_fail = FALSE;
1893 }
1894
1895 if (mapping)
1896 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1897 else
1898 t = pa_sprintf_malloc("alsa_output.%s", n);
1899
1900 pa_sink_new_data_set_name(data, t);
1901 pa_xfree(t);
1902 }
1903
1904 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1905 snd_hctl_t *hctl;
1906
1907 if (!mapping && !element)
1908 return;
1909
1910 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1911 pa_log_info("Failed to find a working mixer device.");
1912 return;
1913 }
1914
1915 if (element) {
1916
1917 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1918 goto fail;
1919
1920 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1921 goto fail;
1922
1923 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1924 pa_alsa_path_dump(u->mixer_path);
1925 } else if (!(u->mixer_path_set = mapping->output_path_set))
1926 goto fail;
1927
1928 return;
1929
1930 fail:
1931
1932 if (u->mixer_path) {
1933 pa_alsa_path_free(u->mixer_path);
1934 u->mixer_path = NULL;
1935 }
1936
1937 if (u->mixer_handle) {
1938 snd_mixer_close(u->mixer_handle);
1939 u->mixer_handle = NULL;
1940 }
1941 }
1942
1943 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1944 pa_bool_t need_mixer_callback = FALSE;
1945
1946 pa_assert(u);
1947
1948 if (!u->mixer_handle)
1949 return 0;
1950
1951 if (u->sink->active_port) {
1952 pa_alsa_port_data *data;
1953
1954 /* We have a list of supported paths, so let's activate the
1955 * one that has been chosen as active */
1956
1957 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1958 u->mixer_path = data->path;
1959
1960 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
1961
1962 } else {
1963
1964 if (!u->mixer_path && u->mixer_path_set)
1965 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1966
1967 if (u->mixer_path) {
1968 /* Hmm, we have only a single path, then let's activate it */
1969
1970 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
1971
1972 } else
1973 return 0;
1974 }
1975
1976 mixer_volume_init(u);
1977
1978 /* Will we need to register callbacks? */
1979 if (u->mixer_path_set && u->mixer_path_set->paths) {
1980 pa_alsa_path *p;
1981 void *state;
1982
1983 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1984 if (p->has_volume || p->has_mute)
1985 need_mixer_callback = TRUE;
1986 }
1987 }
1988 else if (u->mixer_path)
1989 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1990
1991 if (need_mixer_callback) {
1992 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1993 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1994 u->mixer_pd = pa_alsa_mixer_pdata_new();
1995 mixer_callback = io_mixer_callback;
1996
1997 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1998 pa_log("Failed to initialize file descriptor monitoring");
1999 return -1;
2000 }
2001 } else {
2002 u->mixer_fdl = pa_alsa_fdlist_new();
2003 mixer_callback = ctl_mixer_callback;
2004
2005 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
2006 pa_log("Failed to initialize file descriptor monitoring");
2007 return -1;
2008 }
2009 }
2010
2011 if (u->mixer_path_set)
2012 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
2013 else
2014 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
2015 }
2016
2017 return 0;
2018 }
2019
2020 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
2021
2022 struct userdata *u = NULL;
2023 const char *dev_id = NULL, *key, *mod_name;
2024 pa_sample_spec ss;
2025 char *thread_name = NULL;
2026 uint32_t alternate_sample_rate;
2027 pa_channel_map map;
2028 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
2029 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2030 size_t frame_size;
2031 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE, fixed_latency_range = FALSE;
2032 pa_sink_new_data data;
2033 pa_alsa_profile_set *profile_set = NULL;
2034 void *state = NULL;
2035
2036 pa_assert(m);
2037 pa_assert(ma);
2038
2039 ss = m->core->default_sample_spec;
2040 map = m->core->default_channel_map;
2041 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2042 pa_log("Failed to parse sample specification and channel map");
2043 goto fail;
2044 }
2045
2046 alternate_sample_rate = m->core->alternate_sample_rate;
2047 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2048 pa_log("Failed to parse alternate sample rate");
2049 goto fail;
2050 }
2051
2052 frame_size = pa_frame_size(&ss);
2053
2054 nfrags = m->core->default_n_fragments;
2055 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2056 if (frag_size <= 0)
2057 frag_size = (uint32_t) frame_size;
2058 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2059 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2060
2061 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2062 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2063 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2064 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2065 pa_log("Failed to parse buffer metrics");
2066 goto fail;
2067 }
2068
2069 buffer_size = nfrags * frag_size;
2070
2071 period_frames = frag_size/frame_size;
2072 buffer_frames = buffer_size/frame_size;
2073 tsched_frames = tsched_size/frame_size;
2074
2075 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2076 pa_log("Failed to parse mmap argument.");
2077 goto fail;
2078 }
2079
2080 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2081 pa_log("Failed to parse tsched argument.");
2082 goto fail;
2083 }
2084
2085 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2086 pa_log("Failed to parse ignore_dB argument.");
2087 goto fail;
2088 }
2089
2090 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2091 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2092 pa_log("Failed to parse rewind_safeguard argument");
2093 goto fail;
2094 }
2095
2096 deferred_volume = m->core->deferred_volume;
2097 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2098 pa_log("Failed to parse deferred_volume argument.");
2099 goto fail;
2100 }
2101
2102 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2103 pa_log("Failed to parse fixed_latency_range argument.");
2104 goto fail;
2105 }
2106
2107 use_tsched = pa_alsa_may_tsched(use_tsched);
2108
2109 u = pa_xnew0(struct userdata, 1);
2110 u->core = m->core;
2111 u->module = m;
2112 u->use_mmap = use_mmap;
2113 u->use_tsched = use_tsched;
2114 u->deferred_volume = deferred_volume;
2115 u->fixed_latency_range = fixed_latency_range;
2116 u->first = TRUE;
2117 u->rewind_safeguard = rewind_safeguard;
2118 u->rtpoll = pa_rtpoll_new();
2119 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2120
2121 u->smoother = pa_smoother_new(
2122 SMOOTHER_ADJUST_USEC,
2123 SMOOTHER_WINDOW_USEC,
2124 TRUE,
2125 TRUE,
2126 5,
2127 pa_rtclock_now(),
2128 TRUE);
2129 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2130
2131 /* use ucm */
2132 if (mapping && mapping->ucm_context.ucm)
2133 u->ucm_context = &mapping->ucm_context;
2134
2135 dev_id = pa_modargs_get_value(
2136 ma, "device_id",
2137 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2138
2139 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2140
2141 if (reserve_init(u, dev_id) < 0)
2142 goto fail;
2143
2144 if (reserve_monitor_init(u, dev_id) < 0)
2145 goto fail;
2146
2147 b = use_mmap;
2148 d = use_tsched;
2149
2150 if (mapping) {
2151
2152 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2153 pa_log("device_id= not set");
2154 goto fail;
2155 }
2156
2157 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2158 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2159 pa_log("Failed to enable ucm modifier %s", mod_name);
2160 else
2161 pa_log_debug("Enabled ucm modifier %s", mod_name);
2162 }
2163
2164 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2165 dev_id,
2166 &u->device_name,
2167 &ss, &map,
2168 SND_PCM_STREAM_PLAYBACK,
2169 &period_frames, &buffer_frames, tsched_frames,
2170 &b, &d, mapping)))
2171 goto fail;
2172
2173 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2174
2175 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2176 goto fail;
2177
2178 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2179 dev_id,
2180 &u->device_name,
2181 &ss, &map,
2182 SND_PCM_STREAM_PLAYBACK,
2183 &period_frames, &buffer_frames, tsched_frames,
2184 &b, &d, profile_set, &mapping)))
2185 goto fail;
2186
2187 } else {
2188
2189 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2190 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2191 &u->device_name,
2192 &ss, &map,
2193 SND_PCM_STREAM_PLAYBACK,
2194 &period_frames, &buffer_frames, tsched_frames,
2195 &b, &d, FALSE)))
2196 goto fail;
2197 }
2198
2199 pa_assert(u->device_name);
2200 pa_log_info("Successfully opened device %s.", u->device_name);
2201
2202 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2203 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2204 goto fail;
2205 }
2206
2207 if (mapping)
2208 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2209
2210 if (use_mmap && !b) {
2211 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2212 u->use_mmap = use_mmap = FALSE;
2213 }
2214
2215 if (use_tsched && (!b || !d)) {
2216 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2217 u->use_tsched = use_tsched = FALSE;
2218 }
2219
2220 if (u->use_mmap)
2221 pa_log_info("Successfully enabled mmap() mode.");
2222
2223 if (u->use_tsched) {
2224 pa_log_info("Successfully enabled timer-based scheduling mode.");
2225
2226 if (u->fixed_latency_range)
2227 pa_log_info("Disabling latency range changes on underrun");
2228 }
2229
2230 if (is_iec958(u) || is_hdmi(u))
2231 set_formats = TRUE;
2232
2233 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
2234 if (!u->rates) {
2235 pa_log_error("Failed to find any supported sample rates.");
2236 goto fail;
2237 }
2238
2239 /* ALSA might tweak the sample spec, so recalculate the frame size */
2240 frame_size = pa_frame_size(&ss);
2241
2242 if (!u->ucm_context)
2243 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2244
2245 pa_sink_new_data_init(&data);
2246 data.driver = driver;
2247 data.module = m;
2248 data.card = card;
2249 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2250
2251 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2252 * variable instead of using &data.namereg_fail directly, because
2253 * data.namereg_fail is a bitfield and taking the address of a bitfield
2254 * variable is impossible. */
2255 namereg_fail = data.namereg_fail;
2256 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2257 pa_log("Failed to parse namereg_fail argument.");
2258 pa_sink_new_data_done(&data);
2259 goto fail;
2260 }
2261 data.namereg_fail = namereg_fail;
2262
2263 pa_sink_new_data_set_sample_spec(&data, &ss);
2264 pa_sink_new_data_set_channel_map(&data, &map);
2265 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2266
2267 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2268 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2269 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2270 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2271 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2272
2273 if (mapping) {
2274 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2275 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2276
2277 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2278 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2279 }
2280
2281 pa_alsa_init_description(data.proplist);
2282
2283 if (u->control_device)
2284 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2285
2286 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2287 pa_log("Invalid properties");
2288 pa_sink_new_data_done(&data);
2289 goto fail;
2290 }
2291
2292 if (u->ucm_context)
2293 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, TRUE, card);
2294 else if (u->mixer_path_set)
2295 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2296
2297 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2298 (set_formats ? PA_SINK_SET_FORMATS : 0));
2299 pa_sink_new_data_done(&data);
2300
2301 if (!u->sink) {
2302 pa_log("Failed to create sink object");
2303 goto fail;
2304 }
2305
2306 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2307 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2308 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2309 goto fail;
2310 }
2311
2312 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2313 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2314 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2315 goto fail;
2316 }
2317
2318 u->sink->parent.process_msg = sink_process_msg;
2319 if (u->use_tsched)
2320 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2321 u->sink->set_state = sink_set_state_cb;
2322 if (u->ucm_context)
2323 u->sink->set_port = sink_set_port_ucm_cb;
2324 else
2325 u->sink->set_port = sink_set_port_cb;
2326 if (u->sink->alternate_sample_rate)
2327 u->sink->update_rate = sink_update_rate_cb;
2328 u->sink->userdata = u;
2329
2330 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2331 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2332
2333 u->frame_size = frame_size;
2334 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2335 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2336 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2337
2338 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2339 (double) u->hwbuf_size / (double) u->fragment_size,
2340 (long unsigned) u->fragment_size,
2341 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2342 (long unsigned) u->hwbuf_size,
2343 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2344
2345 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2346 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2347 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2348 else {
2349 pa_log_info("Disabling rewind for device %s", u->device_name);
2350 pa_sink_set_max_rewind(u->sink, 0);
2351 }
2352
2353 if (u->use_tsched) {
2354 u->tsched_watermark_ref = tsched_watermark;
2355 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2356 } else
2357 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2358
2359 reserve_update(u);
2360
2361 if (update_sw_params(u) < 0)
2362 goto fail;
2363
2364 if (u->ucm_context) {
2365 if (u->sink->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port, TRUE) < 0)
2366 goto fail;
2367 } else if (setup_mixer(u, ignore_dB) < 0)
2368 goto fail;
2369
2370 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2371
2372 thread_name = pa_sprintf_malloc("alsa-sink-%s", pa_strnull(pa_proplist_gets(u->sink->proplist, "alsa.id")));
2373 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2374 pa_log("Failed to create thread.");
2375 goto fail;
2376 }
2377 pa_xfree(thread_name);
2378 thread_name = NULL;
2379
2380 /* Get initial mixer settings */
2381 if (data.volume_is_set) {
2382 if (u->sink->set_volume)
2383 u->sink->set_volume(u->sink);
2384 } else {
2385 if (u->sink->get_volume)
2386 u->sink->get_volume(u->sink);
2387 }
2388
2389 if (data.muted_is_set) {
2390 if (u->sink->set_mute)
2391 u->sink->set_mute(u->sink);
2392 } else {
2393 if (u->sink->get_mute)
2394 u->sink->get_mute(u->sink);
2395 }
2396
2397 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2398 u->sink->write_volume(u->sink);
2399
2400 if (set_formats) {
2401 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2402 pa_format_info *format;
2403
2404 /* To start with, we only support PCM formats. Other formats may be added
2405 * with pa_sink_set_formats().*/
2406 format = pa_format_info_new();
2407 format->encoding = PA_ENCODING_PCM;
2408 u->formats = pa_idxset_new(NULL, NULL);
2409 pa_idxset_put(u->formats, format, NULL);
2410
2411 u->sink->get_formats = sink_get_formats;
2412 u->sink->set_formats = sink_set_formats;
2413 }
2414
2415 pa_sink_put(u->sink);
2416
2417 if (profile_set)
2418 pa_alsa_profile_set_free(profile_set);
2419
2420 return u->sink;
2421
2422 fail:
2423 pa_xfree(thread_name);
2424
2425 if (u)
2426 userdata_free(u);
2427
2428 if (profile_set)
2429 pa_alsa_profile_set_free(profile_set);
2430
2431 return NULL;
2432 }
2433
2434 static void userdata_free(struct userdata *u) {
2435 pa_assert(u);
2436
2437 if (u->sink)
2438 pa_sink_unlink(u->sink);
2439
2440 if (u->thread) {
2441 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2442 pa_thread_free(u->thread);
2443 }
2444
2445 pa_thread_mq_done(&u->thread_mq);
2446
2447 if (u->sink)
2448 pa_sink_unref(u->sink);
2449
2450 if (u->memchunk.memblock)
2451 pa_memblock_unref(u->memchunk.memblock);
2452
2453 if (u->mixer_pd)
2454 pa_alsa_mixer_pdata_free(u->mixer_pd);
2455
2456 if (u->alsa_rtpoll_item)
2457 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2458
2459 if (u->rtpoll)
2460 pa_rtpoll_free(u->rtpoll);
2461
2462 if (u->pcm_handle) {
2463 snd_pcm_drop(u->pcm_handle);
2464 snd_pcm_close(u->pcm_handle);
2465 }
2466
2467 if (u->mixer_fdl)
2468 pa_alsa_fdlist_free(u->mixer_fdl);
2469
2470 if (u->mixer_path && !u->mixer_path_set)
2471 pa_alsa_path_free(u->mixer_path);
2472
2473 if (u->mixer_handle)
2474 snd_mixer_close(u->mixer_handle);
2475
2476 if (u->smoother)
2477 pa_smoother_free(u->smoother);
2478
2479 if (u->formats)
2480 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
2481
2482 if (u->rates)
2483 pa_xfree(u->rates);
2484
2485 reserve_done(u);
2486 monitor_done(u);
2487
2488 pa_xfree(u->device_name);
2489 pa_xfree(u->control_device);
2490 pa_xfree(u->paths_dir);
2491 pa_xfree(u);
2492 }
2493
2494 void pa_alsa_sink_free(pa_sink *s) {
2495 struct userdata *u;
2496
2497 pa_sink_assert_ref(s);
2498 pa_assert_se(u = s->userdata);
2499
2500 userdata_free(u);
2501 }