]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: Fix "Scheduling delay of..." message
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
34 #endif
35
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
41
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
57
58 #include <modules/reserve-wrap.h>
59
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
62
63 /* #define DEBUG_TIMING */
64
65 #define DEFAULT_DEVICE "default"
66
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
78
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
92
93 struct userdata {
94 pa_core *core;
95 pa_module *module;
96 pa_sink *sink;
97
98 pa_thread *thread;
99 pa_thread_mq thread_mq;
100 pa_rtpoll *rtpoll;
101
102 snd_pcm_t *pcm_handle;
103
104 char *paths_dir;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
110
111 pa_cvolume hardware_volume;
112
113 unsigned int *rates;
114
115 size_t
116 frame_size,
117 fragment_size,
118 hwbuf_size,
119 tsched_watermark,
120 tsched_watermark_ref,
121 hwbuf_unused,
122 min_sleep,
123 min_wakeup,
124 watermark_inc_step,
125 watermark_dec_step,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
128 rewind_safeguard;
129
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
132 pa_usec_t tsched_watermark_usec;
133
134 pa_memchunk memchunk;
135
136 char *device_name; /* name of the PCM device */
137 char *control_device; /* name of the control device */
138
139 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
140
141 pa_bool_t first, after_rewind;
142
143 pa_rtpoll_item *alsa_rtpoll_item;
144
145 pa_smoother *smoother;
146 uint64_t write_count;
147 uint64_t since_start;
148 pa_usec_t smoother_interval;
149 pa_usec_t last_smoother_update;
150
151 pa_idxset *formats;
152
153 pa_reserve_wrapper *reserve;
154 pa_hook_slot *reserve_slot;
155 pa_reserve_monitor_wrapper *monitor;
156 pa_hook_slot *monitor_slot;
157
158 /* ucm context */
159 pa_alsa_ucm_mapping_context *ucm_context;
160 };
161
162 static void userdata_free(struct userdata *u);
163
164 /* FIXME: Is there a better way to do this than device names? */
165 static pa_bool_t is_iec958(struct userdata *u) {
166 return (strncmp("iec958", u->device_name, 6) == 0);
167 }
168
169 static pa_bool_t is_hdmi(struct userdata *u) {
170 return (strncmp("hdmi", u->device_name, 4) == 0);
171 }
172
173 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
174 pa_assert(r);
175 pa_assert(u);
176
177 pa_log_debug("Suspending sink %s, because another application requested us to release the device.", u->sink->name);
178
179 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
180 return PA_HOOK_CANCEL;
181
182 return PA_HOOK_OK;
183 }
184
185 static void reserve_done(struct userdata *u) {
186 pa_assert(u);
187
188 if (u->reserve_slot) {
189 pa_hook_slot_free(u->reserve_slot);
190 u->reserve_slot = NULL;
191 }
192
193 if (u->reserve) {
194 pa_reserve_wrapper_unref(u->reserve);
195 u->reserve = NULL;
196 }
197 }
198
199 static void reserve_update(struct userdata *u) {
200 const char *description;
201 pa_assert(u);
202
203 if (!u->sink || !u->reserve)
204 return;
205
206 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
207 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
208 }
209
210 static int reserve_init(struct userdata *u, const char *dname) {
211 char *rname;
212
213 pa_assert(u);
214 pa_assert(dname);
215
216 if (u->reserve)
217 return 0;
218
219 if (pa_in_system_mode())
220 return 0;
221
222 if (!(rname = pa_alsa_get_reserve_name(dname)))
223 return 0;
224
225 /* We are resuming, try to lock the device */
226 u->reserve = pa_reserve_wrapper_get(u->core, rname);
227 pa_xfree(rname);
228
229 if (!(u->reserve))
230 return -1;
231
232 reserve_update(u);
233
234 pa_assert(!u->reserve_slot);
235 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
236
237 return 0;
238 }
239
240 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
241 pa_assert(w);
242 pa_assert(u);
243
244 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
245 pa_log_debug("Suspending sink %s, because another application is blocking the access to the device.", u->sink->name);
246 pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION);
247 } else {
248 pa_log_debug("Resuming sink %s, because other applications aren't blocking access to the device any more.", u->sink->name);
249 pa_sink_suspend(u->sink, false, PA_SUSPEND_APPLICATION);
250 }
251
252 return PA_HOOK_OK;
253 }
254
255 static void monitor_done(struct userdata *u) {
256 pa_assert(u);
257
258 if (u->monitor_slot) {
259 pa_hook_slot_free(u->monitor_slot);
260 u->monitor_slot = NULL;
261 }
262
263 if (u->monitor) {
264 pa_reserve_monitor_wrapper_unref(u->monitor);
265 u->monitor = NULL;
266 }
267 }
268
269 static int reserve_monitor_init(struct userdata *u, const char *dname) {
270 char *rname;
271
272 pa_assert(u);
273 pa_assert(dname);
274
275 if (pa_in_system_mode())
276 return 0;
277
278 if (!(rname = pa_alsa_get_reserve_name(dname)))
279 return 0;
280
281 /* We are resuming, try to lock the device */
282 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
283 pa_xfree(rname);
284
285 if (!(u->monitor))
286 return -1;
287
288 pa_assert(!u->monitor_slot);
289 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
290
291 return 0;
292 }
293
294 static void fix_min_sleep_wakeup(struct userdata *u) {
295 size_t max_use, max_use_2;
296
297 pa_assert(u);
298 pa_assert(u->use_tsched);
299
300 max_use = u->hwbuf_size - u->hwbuf_unused;
301 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
302
303 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
304 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
305
306 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
307 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
308 }
309
310 static void fix_tsched_watermark(struct userdata *u) {
311 size_t max_use;
312 pa_assert(u);
313 pa_assert(u->use_tsched);
314
315 max_use = u->hwbuf_size - u->hwbuf_unused;
316
317 if (u->tsched_watermark > max_use - u->min_sleep)
318 u->tsched_watermark = max_use - u->min_sleep;
319
320 if (u->tsched_watermark < u->min_wakeup)
321 u->tsched_watermark = u->min_wakeup;
322
323 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
324 }
325
326 static void increase_watermark(struct userdata *u) {
327 size_t old_watermark;
328 pa_usec_t old_min_latency, new_min_latency;
329
330 pa_assert(u);
331 pa_assert(u->use_tsched);
332
333 /* First, just try to increase the watermark */
334 old_watermark = u->tsched_watermark;
335 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
336 fix_tsched_watermark(u);
337
338 if (old_watermark != u->tsched_watermark) {
339 pa_log_info("Increasing wakeup watermark to %0.2f ms",
340 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
341 return;
342 }
343
344 /* Hmm, we cannot increase the watermark any further, hence let's
345 raise the latency, unless doing so was disabled in
346 configuration */
347 if (u->fixed_latency_range)
348 return;
349
350 old_min_latency = u->sink->thread_info.min_latency;
351 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
352 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
353
354 if (old_min_latency != new_min_latency) {
355 pa_log_info("Increasing minimal latency to %0.2f ms",
356 (double) new_min_latency / PA_USEC_PER_MSEC);
357
358 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
359 }
360
361 /* When we reach this we're officialy fucked! */
362 }
363
364 static void decrease_watermark(struct userdata *u) {
365 size_t old_watermark;
366 pa_usec_t now;
367
368 pa_assert(u);
369 pa_assert(u->use_tsched);
370
371 now = pa_rtclock_now();
372
373 if (u->watermark_dec_not_before <= 0)
374 goto restart;
375
376 if (u->watermark_dec_not_before > now)
377 return;
378
379 old_watermark = u->tsched_watermark;
380
381 if (u->tsched_watermark < u->watermark_dec_step)
382 u->tsched_watermark = u->tsched_watermark / 2;
383 else
384 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
385
386 fix_tsched_watermark(u);
387
388 if (old_watermark != u->tsched_watermark)
389 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
390 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
391
392 /* We don't change the latency range*/
393
394 restart:
395 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
396 }
397
398 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
399 pa_usec_t usec, wm;
400
401 pa_assert(sleep_usec);
402 pa_assert(process_usec);
403
404 pa_assert(u);
405 pa_assert(u->use_tsched);
406
407 usec = pa_sink_get_requested_latency_within_thread(u->sink);
408
409 if (usec == (pa_usec_t) -1)
410 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
411
412 wm = u->tsched_watermark_usec;
413
414 if (wm > usec)
415 wm = usec/2;
416
417 *sleep_usec = usec - wm;
418 *process_usec = wm;
419
420 #ifdef DEBUG_TIMING
421 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
422 (unsigned long) (usec / PA_USEC_PER_MSEC),
423 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
424 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
425 #endif
426 }
427
428 static int try_recover(struct userdata *u, const char *call, int err) {
429 pa_assert(u);
430 pa_assert(call);
431 pa_assert(err < 0);
432
433 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
434
435 pa_assert(err != -EAGAIN);
436
437 if (err == -EPIPE)
438 pa_log_debug("%s: Buffer underrun!", call);
439
440 if (err == -ESTRPIPE)
441 pa_log_debug("%s: System suspended!", call);
442
443 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
444 pa_log("%s: %s", call, pa_alsa_strerror(err));
445 return -1;
446 }
447
448 u->first = TRUE;
449 u->since_start = 0;
450 return 0;
451 }
452
453 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
454 size_t left_to_play;
455 pa_bool_t underrun = FALSE;
456
457 /* We use <= instead of < for this check here because an underrun
458 * only happens after the last sample was processed, not already when
459 * it is removed from the buffer. This is particularly important
460 * when block transfer is used. */
461
462 if (n_bytes <= u->hwbuf_size)
463 left_to_play = u->hwbuf_size - n_bytes;
464 else {
465
466 /* We got a dropout. What a mess! */
467 left_to_play = 0;
468 underrun = TRUE;
469
470 #if 0
471 PA_DEBUG_TRAP;
472 #endif
473
474 if (!u->first && !u->after_rewind)
475 if (pa_log_ratelimit(PA_LOG_INFO))
476 pa_log_info("Underrun!");
477 }
478
479 #ifdef DEBUG_TIMING
480 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
481 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
482 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
483 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
484 #endif
485
486 if (u->use_tsched) {
487 pa_bool_t reset_not_before = TRUE;
488
489 if (!u->first && !u->after_rewind) {
490 if (underrun || left_to_play < u->watermark_inc_threshold)
491 increase_watermark(u);
492 else if (left_to_play > u->watermark_dec_threshold) {
493 reset_not_before = FALSE;
494
495 /* We decrease the watermark only if have actually
496 * been woken up by a timeout. If something else woke
497 * us up it's too easy to fulfill the deadlines... */
498
499 if (on_timeout)
500 decrease_watermark(u);
501 }
502 }
503
504 if (reset_not_before)
505 u->watermark_dec_not_before = 0;
506 }
507
508 return left_to_play;
509 }
510
511 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
512 pa_bool_t work_done = FALSE;
513 pa_usec_t max_sleep_usec = 0, process_usec = 0;
514 size_t left_to_play, input_underrun;
515 unsigned j = 0;
516
517 pa_assert(u);
518 pa_sink_assert_ref(u->sink);
519
520 if (u->use_tsched)
521 hw_sleep_time(u, &max_sleep_usec, &process_usec);
522
523 for (;;) {
524 snd_pcm_sframes_t n;
525 size_t n_bytes;
526 int r;
527 pa_bool_t after_avail = TRUE;
528
529 /* First we determine how many samples are missing to fill the
530 * buffer up to 100% */
531
532 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
533
534 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
535 continue;
536
537 return r;
538 }
539
540 n_bytes = (size_t) n * u->frame_size;
541
542 #ifdef DEBUG_TIMING
543 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
544 #endif
545
546 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
547 on_timeout = FALSE;
548
549 if (u->use_tsched)
550
551 /* We won't fill up the playback buffer before at least
552 * half the sleep time is over because otherwise we might
553 * ask for more data from the clients then they expect. We
554 * need to guarantee that clients only have to keep around
555 * a single hw buffer length. */
556
557 if (!polled &&
558 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
559 #ifdef DEBUG_TIMING
560 pa_log_debug("Not filling up, because too early.");
561 #endif
562 break;
563 }
564
565 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
566
567 if (polled)
568 PA_ONCE_BEGIN {
569 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
570 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
571 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
572 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
573 pa_strnull(dn));
574 pa_xfree(dn);
575 } PA_ONCE_END;
576
577 #ifdef DEBUG_TIMING
578 pa_log_debug("Not filling up, because not necessary.");
579 #endif
580 break;
581 }
582
583
584 if (++j > 10) {
585 #ifdef DEBUG_TIMING
586 pa_log_debug("Not filling up, because already too many iterations.");
587 #endif
588
589 break;
590 }
591
592 n_bytes -= u->hwbuf_unused;
593 polled = FALSE;
594
595 #ifdef DEBUG_TIMING
596 pa_log_debug("Filling up");
597 #endif
598
599 for (;;) {
600 pa_memchunk chunk;
601 void *p;
602 int err;
603 const snd_pcm_channel_area_t *areas;
604 snd_pcm_uframes_t offset, frames;
605 snd_pcm_sframes_t sframes;
606 size_t written;
607
608 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
609 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
610
611 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
612
613 if (!after_avail && err == -EAGAIN)
614 break;
615
616 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
617 continue;
618
619 return r;
620 }
621
622 /* Make sure that if these memblocks need to be copied they will fit into one slot */
623 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
624 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
625
626 if (!after_avail && frames == 0)
627 break;
628
629 pa_assert(frames > 0);
630 after_avail = FALSE;
631
632 /* Check these are multiples of 8 bit */
633 pa_assert((areas[0].first & 7) == 0);
634 pa_assert((areas[0].step & 7)== 0);
635
636 /* We assume a single interleaved memory buffer */
637 pa_assert((areas[0].first >> 3) == 0);
638 pa_assert((areas[0].step >> 3) == u->frame_size);
639
640 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
641
642 written = frames * u->frame_size;
643 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, written, TRUE);
644 chunk.length = pa_memblock_get_length(chunk.memblock);
645 chunk.index = 0;
646
647 pa_sink_render_into_full(u->sink, &chunk);
648 pa_memblock_unref_fixed(chunk.memblock);
649
650 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
651
652 if (!after_avail && (int) sframes == -EAGAIN)
653 break;
654
655 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
656 continue;
657
658 return r;
659 }
660
661 work_done = TRUE;
662
663 u->write_count += written;
664 u->since_start += written;
665
666 #ifdef DEBUG_TIMING
667 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) written, (unsigned long) n_bytes);
668 #endif
669
670 if (written >= n_bytes)
671 break;
672
673 n_bytes -= written;
674 }
675 }
676
677 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
678
679 if (u->use_tsched) {
680 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
681
682 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
683 process_usec = u->tsched_watermark_usec;
684
685 if (*sleep_usec > process_usec)
686 *sleep_usec -= process_usec;
687 else
688 *sleep_usec = 0;
689
690 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
691 } else
692 *sleep_usec = 0;
693
694 return work_done ? 1 : 0;
695 }
696
697 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
698 pa_bool_t work_done = FALSE;
699 pa_usec_t max_sleep_usec = 0, process_usec = 0;
700 size_t left_to_play, input_underrun;
701 unsigned j = 0;
702
703 pa_assert(u);
704 pa_sink_assert_ref(u->sink);
705
706 if (u->use_tsched)
707 hw_sleep_time(u, &max_sleep_usec, &process_usec);
708
709 for (;;) {
710 snd_pcm_sframes_t n;
711 size_t n_bytes;
712 int r;
713 pa_bool_t after_avail = TRUE;
714
715 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
716
717 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
718 continue;
719
720 return r;
721 }
722
723 n_bytes = (size_t) n * u->frame_size;
724
725
726 #ifdef DEBUG_TIMING
727 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
728 #endif
729
730 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
731 on_timeout = FALSE;
732
733 if (u->use_tsched)
734
735 /* We won't fill up the playback buffer before at least
736 * half the sleep time is over because otherwise we might
737 * ask for more data from the clients then they expect. We
738 * need to guarantee that clients only have to keep around
739 * a single hw buffer length. */
740
741 if (!polled &&
742 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
743 break;
744
745 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
746
747 if (polled)
748 PA_ONCE_BEGIN {
749 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
750 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
751 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
752 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
753 pa_strnull(dn));
754 pa_xfree(dn);
755 } PA_ONCE_END;
756
757 break;
758 }
759
760 if (++j > 10) {
761 #ifdef DEBUG_TIMING
762 pa_log_debug("Not filling up, because already too many iterations.");
763 #endif
764
765 break;
766 }
767
768 n_bytes -= u->hwbuf_unused;
769 polled = FALSE;
770
771 for (;;) {
772 snd_pcm_sframes_t frames;
773 void *p;
774 size_t written;
775
776 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
777
778 if (u->memchunk.length <= 0)
779 pa_sink_render(u->sink, n_bytes, &u->memchunk);
780
781 pa_assert(u->memchunk.length > 0);
782
783 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
784
785 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
786 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
787
788 p = pa_memblock_acquire(u->memchunk.memblock);
789 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
790 pa_memblock_release(u->memchunk.memblock);
791
792 if (PA_UNLIKELY(frames < 0)) {
793
794 if (!after_avail && (int) frames == -EAGAIN)
795 break;
796
797 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
798 continue;
799
800 return r;
801 }
802
803 if (!after_avail && frames == 0)
804 break;
805
806 pa_assert(frames > 0);
807 after_avail = FALSE;
808
809 written = frames * u->frame_size;
810 u->memchunk.index += written;
811 u->memchunk.length -= written;
812
813 if (u->memchunk.length <= 0) {
814 pa_memblock_unref(u->memchunk.memblock);
815 pa_memchunk_reset(&u->memchunk);
816 }
817
818 work_done = TRUE;
819
820 u->write_count += written;
821 u->since_start += written;
822
823 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
824
825 if (written >= n_bytes)
826 break;
827
828 n_bytes -= written;
829 }
830 }
831
832 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
833
834 if (u->use_tsched) {
835 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
836
837 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
838 process_usec = u->tsched_watermark_usec;
839
840 if (*sleep_usec > process_usec)
841 *sleep_usec -= process_usec;
842 else
843 *sleep_usec = 0;
844
845 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
846 } else
847 *sleep_usec = 0;
848
849 return work_done ? 1 : 0;
850 }
851
852 static void update_smoother(struct userdata *u) {
853 snd_pcm_sframes_t delay = 0;
854 int64_t position;
855 int err;
856 pa_usec_t now1 = 0, now2;
857 snd_pcm_status_t *status;
858 snd_htimestamp_t htstamp = { 0, 0 };
859
860 snd_pcm_status_alloca(&status);
861
862 pa_assert(u);
863 pa_assert(u->pcm_handle);
864
865 /* Let's update the time smoother */
866
867 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
868 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
869 return;
870 }
871
872 snd_pcm_status_get_htstamp(status, &htstamp);
873 now1 = pa_timespec_load(&htstamp);
874
875 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
876 if (now1 <= 0)
877 now1 = pa_rtclock_now();
878
879 /* check if the time since the last update is bigger than the interval */
880 if (u->last_smoother_update > 0)
881 if (u->last_smoother_update + u->smoother_interval > now1)
882 return;
883
884 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
885
886 if (PA_UNLIKELY(position < 0))
887 position = 0;
888
889 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
890
891 pa_smoother_put(u->smoother, now1, now2);
892
893 u->last_smoother_update = now1;
894 /* exponentially increase the update interval up to the MAX limit */
895 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
896 }
897
898 static pa_usec_t sink_get_latency(struct userdata *u) {
899 pa_usec_t r;
900 int64_t delay;
901 pa_usec_t now1, now2;
902
903 pa_assert(u);
904
905 now1 = pa_rtclock_now();
906 now2 = pa_smoother_get(u->smoother, now1);
907
908 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
909
910 r = delay >= 0 ? (pa_usec_t) delay : 0;
911
912 if (u->memchunk.memblock)
913 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
914
915 return r;
916 }
917
918 static int build_pollfd(struct userdata *u) {
919 pa_assert(u);
920 pa_assert(u->pcm_handle);
921
922 if (u->alsa_rtpoll_item)
923 pa_rtpoll_item_free(u->alsa_rtpoll_item);
924
925 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
926 return -1;
927
928 return 0;
929 }
930
931 /* Called from IO context */
932 static int suspend(struct userdata *u) {
933 pa_assert(u);
934 pa_assert(u->pcm_handle);
935
936 pa_smoother_pause(u->smoother, pa_rtclock_now());
937
938 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
939 * take awfully long with our long buffer sizes today. */
940 snd_pcm_close(u->pcm_handle);
941 u->pcm_handle = NULL;
942
943 if (u->alsa_rtpoll_item) {
944 pa_rtpoll_item_free(u->alsa_rtpoll_item);
945 u->alsa_rtpoll_item = NULL;
946 }
947
948 /* We reset max_rewind/max_request here to make sure that while we
949 * are suspended the old max_request/max_rewind values set before
950 * the suspend can influence the per-stream buffer of newly
951 * created streams, without their requirements having any
952 * influence on them. */
953 pa_sink_set_max_rewind_within_thread(u->sink, 0);
954 pa_sink_set_max_request_within_thread(u->sink, 0);
955
956 pa_log_info("Device suspended...");
957
958 return 0;
959 }
960
961 /* Called from IO context */
962 static int update_sw_params(struct userdata *u) {
963 snd_pcm_uframes_t avail_min;
964 int err;
965
966 pa_assert(u);
967
968 /* Use the full buffer if no one asked us for anything specific */
969 u->hwbuf_unused = 0;
970
971 if (u->use_tsched) {
972 pa_usec_t latency;
973
974 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
975 size_t b;
976
977 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
978
979 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
980
981 /* We need at least one sample in our buffer */
982
983 if (PA_UNLIKELY(b < u->frame_size))
984 b = u->frame_size;
985
986 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
987 }
988
989 fix_min_sleep_wakeup(u);
990 fix_tsched_watermark(u);
991 }
992
993 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
994
995 /* We need at last one frame in the used part of the buffer */
996 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
997
998 if (u->use_tsched) {
999 pa_usec_t sleep_usec, process_usec;
1000
1001 hw_sleep_time(u, &sleep_usec, &process_usec);
1002 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
1003 }
1004
1005 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
1006
1007 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
1008 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
1009 return err;
1010 }
1011
1012 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
1013 if (pa_alsa_pcm_is_hw(u->pcm_handle))
1014 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
1015 else {
1016 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
1017 pa_sink_set_max_rewind_within_thread(u->sink, 0);
1018 }
1019
1020 return 0;
1021 }
1022
1023 /* Called from IO Context on unsuspend or from main thread when creating sink */
1024 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
1025 pa_bool_t in_thread)
1026 {
1027 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
1028 &u->sink->sample_spec);
1029
1030 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1031 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1032
1033 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1034 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1035
1036 fix_min_sleep_wakeup(u);
1037 fix_tsched_watermark(u);
1038
1039 if (in_thread)
1040 pa_sink_set_latency_range_within_thread(u->sink,
1041 u->min_latency_ref,
1042 pa_bytes_to_usec(u->hwbuf_size, ss));
1043 else {
1044 pa_sink_set_latency_range(u->sink,
1045 0,
1046 pa_bytes_to_usec(u->hwbuf_size, ss));
1047
1048 /* work-around assert in pa_sink_set_latency_within_thead,
1049 keep track of min_latency and reuse it when
1050 this routine is called from IO context */
1051 u->min_latency_ref = u->sink->thread_info.min_latency;
1052 }
1053
1054 pa_log_info("Time scheduling watermark is %0.2fms",
1055 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
1056 }
1057
1058 /* Called from IO context */
1059 static int unsuspend(struct userdata *u) {
1060 pa_sample_spec ss;
1061 int err;
1062 pa_bool_t b, d;
1063 snd_pcm_uframes_t period_size, buffer_size;
1064 char *device_name = NULL;
1065
1066 pa_assert(u);
1067 pa_assert(!u->pcm_handle);
1068
1069 pa_log_info("Trying resume...");
1070
1071 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1072 /* Need to open device in NONAUDIO mode */
1073 int len = strlen(u->device_name) + 8;
1074
1075 device_name = pa_xmalloc(len);
1076 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1077 }
1078
1079 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1080 SND_PCM_NONBLOCK|
1081 SND_PCM_NO_AUTO_RESAMPLE|
1082 SND_PCM_NO_AUTO_CHANNELS|
1083 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1084 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1085 goto fail;
1086 }
1087
1088 ss = u->sink->sample_spec;
1089 period_size = u->fragment_size / u->frame_size;
1090 buffer_size = u->hwbuf_size / u->frame_size;
1091 b = u->use_mmap;
1092 d = u->use_tsched;
1093
1094 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1095 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1096 goto fail;
1097 }
1098
1099 if (b != u->use_mmap || d != u->use_tsched) {
1100 pa_log_warn("Resume failed, couldn't get original access mode.");
1101 goto fail;
1102 }
1103
1104 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1105 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1106 goto fail;
1107 }
1108
1109 if (period_size*u->frame_size != u->fragment_size ||
1110 buffer_size*u->frame_size != u->hwbuf_size) {
1111 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1112 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1113 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1114 goto fail;
1115 }
1116
1117 if (update_sw_params(u) < 0)
1118 goto fail;
1119
1120 if (build_pollfd(u) < 0)
1121 goto fail;
1122
1123 u->write_count = 0;
1124 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1125 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1126 u->last_smoother_update = 0;
1127
1128 u->first = TRUE;
1129 u->since_start = 0;
1130
1131 /* reset the watermark to the value defined when sink was created */
1132 if (u->use_tsched)
1133 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1134
1135 pa_log_info("Resumed successfully...");
1136
1137 pa_xfree(device_name);
1138 return 0;
1139
1140 fail:
1141 if (u->pcm_handle) {
1142 snd_pcm_close(u->pcm_handle);
1143 u->pcm_handle = NULL;
1144 }
1145
1146 pa_xfree(device_name);
1147
1148 return -PA_ERR_IO;
1149 }
1150
1151 /* Called from IO context */
1152 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1153 struct userdata *u = PA_SINK(o)->userdata;
1154
1155 switch (code) {
1156
1157 case PA_SINK_MESSAGE_GET_LATENCY: {
1158 pa_usec_t r = 0;
1159
1160 if (u->pcm_handle)
1161 r = sink_get_latency(u);
1162
1163 *((pa_usec_t*) data) = r;
1164
1165 return 0;
1166 }
1167
1168 case PA_SINK_MESSAGE_SET_STATE:
1169
1170 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1171
1172 case PA_SINK_SUSPENDED: {
1173 int r;
1174
1175 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1176
1177 if ((r = suspend(u)) < 0)
1178 return r;
1179
1180 break;
1181 }
1182
1183 case PA_SINK_IDLE:
1184 case PA_SINK_RUNNING: {
1185 int r;
1186
1187 if (u->sink->thread_info.state == PA_SINK_INIT) {
1188 if (build_pollfd(u) < 0)
1189 return -PA_ERR_IO;
1190 }
1191
1192 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1193 if ((r = unsuspend(u)) < 0)
1194 return r;
1195 }
1196
1197 break;
1198 }
1199
1200 case PA_SINK_UNLINKED:
1201 case PA_SINK_INIT:
1202 case PA_SINK_INVALID_STATE:
1203 ;
1204 }
1205
1206 break;
1207 }
1208
1209 return pa_sink_process_msg(o, code, data, offset, chunk);
1210 }
1211
1212 /* Called from main context */
1213 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1214 pa_sink_state_t old_state;
1215 struct userdata *u;
1216
1217 pa_sink_assert_ref(s);
1218 pa_assert_se(u = s->userdata);
1219
1220 old_state = pa_sink_get_state(u->sink);
1221
1222 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1223 reserve_done(u);
1224 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1225 if (reserve_init(u, u->device_name) < 0)
1226 return -PA_ERR_BUSY;
1227
1228 return 0;
1229 }
1230
1231 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1232 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1233
1234 pa_assert(u);
1235 pa_assert(u->mixer_handle);
1236
1237 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1238 return 0;
1239
1240 if (!PA_SINK_IS_LINKED(u->sink->state))
1241 return 0;
1242
1243 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1244 pa_sink_set_mixer_dirty(u->sink, TRUE);
1245 return 0;
1246 }
1247
1248 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1249 pa_sink_get_volume(u->sink, TRUE);
1250 pa_sink_get_mute(u->sink, TRUE);
1251 }
1252
1253 return 0;
1254 }
1255
1256 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1257 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1258
1259 pa_assert(u);
1260 pa_assert(u->mixer_handle);
1261
1262 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1263 return 0;
1264
1265 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1266 pa_sink_set_mixer_dirty(u->sink, TRUE);
1267 return 0;
1268 }
1269
1270 if (mask & SND_CTL_EVENT_MASK_VALUE)
1271 pa_sink_update_volume_and_mute(u->sink);
1272
1273 return 0;
1274 }
1275
1276 static void sink_get_volume_cb(pa_sink *s) {
1277 struct userdata *u = s->userdata;
1278 pa_cvolume r;
1279 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1280
1281 pa_assert(u);
1282 pa_assert(u->mixer_path);
1283 pa_assert(u->mixer_handle);
1284
1285 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1286 return;
1287
1288 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1289 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1290
1291 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1292
1293 if (u->mixer_path->has_dB) {
1294 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1295
1296 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1297 }
1298
1299 if (pa_cvolume_equal(&u->hardware_volume, &r))
1300 return;
1301
1302 s->real_volume = u->hardware_volume = r;
1303
1304 /* Hmm, so the hardware volume changed, let's reset our software volume */
1305 if (u->mixer_path->has_dB)
1306 pa_sink_set_soft_volume(s, NULL);
1307 }
1308
1309 static void sink_set_volume_cb(pa_sink *s) {
1310 struct userdata *u = s->userdata;
1311 pa_cvolume r;
1312 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1313 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1314
1315 pa_assert(u);
1316 pa_assert(u->mixer_path);
1317 pa_assert(u->mixer_handle);
1318
1319 /* Shift up by the base volume */
1320 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1321
1322 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1323 return;
1324
1325 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1326 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1327
1328 u->hardware_volume = r;
1329
1330 if (u->mixer_path->has_dB) {
1331 pa_cvolume new_soft_volume;
1332 pa_bool_t accurate_enough;
1333 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1334
1335 /* Match exactly what the user requested by software */
1336 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1337
1338 /* If the adjustment to do in software is only minimal we
1339 * can skip it. That saves us CPU at the expense of a bit of
1340 * accuracy */
1341 accurate_enough =
1342 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1343 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1344
1345 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1346 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1347 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1348 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1349 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1350 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1351 pa_yes_no(accurate_enough));
1352 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1353
1354 if (!accurate_enough)
1355 s->soft_volume = new_soft_volume;
1356
1357 } else {
1358 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1359
1360 /* We can't match exactly what the user requested, hence let's
1361 * at least tell the user about it */
1362
1363 s->real_volume = r;
1364 }
1365 }
1366
1367 static void sink_write_volume_cb(pa_sink *s) {
1368 struct userdata *u = s->userdata;
1369 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1370
1371 pa_assert(u);
1372 pa_assert(u->mixer_path);
1373 pa_assert(u->mixer_handle);
1374 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1375
1376 /* Shift up by the base volume */
1377 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1378
1379 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1380 pa_log_error("Writing HW volume failed");
1381 else {
1382 pa_cvolume tmp_vol;
1383 pa_bool_t accurate_enough;
1384
1385 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1386 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1387
1388 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1389 accurate_enough =
1390 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1391 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1392
1393 if (!accurate_enough) {
1394 union {
1395 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1396 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1397 } vol;
1398
1399 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1400 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1401 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1402 pa_log_debug(" in dB: %s (request) != %s",
1403 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1404 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1405 }
1406 }
1407 }
1408
1409 static void sink_get_mute_cb(pa_sink *s) {
1410 struct userdata *u = s->userdata;
1411 pa_bool_t b;
1412
1413 pa_assert(u);
1414 pa_assert(u->mixer_path);
1415 pa_assert(u->mixer_handle);
1416
1417 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1418 return;
1419
1420 s->muted = b;
1421 }
1422
1423 static void sink_set_mute_cb(pa_sink *s) {
1424 struct userdata *u = s->userdata;
1425
1426 pa_assert(u);
1427 pa_assert(u->mixer_path);
1428 pa_assert(u->mixer_handle);
1429
1430 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1431 }
1432
1433 static void mixer_volume_init(struct userdata *u) {
1434 pa_assert(u);
1435
1436 if (!u->mixer_path->has_volume) {
1437 pa_sink_set_write_volume_callback(u->sink, NULL);
1438 pa_sink_set_get_volume_callback(u->sink, NULL);
1439 pa_sink_set_set_volume_callback(u->sink, NULL);
1440
1441 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1442 } else {
1443 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1444 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1445
1446 if (u->mixer_path->has_dB && u->deferred_volume) {
1447 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1448 pa_log_info("Successfully enabled deferred volume.");
1449 } else
1450 pa_sink_set_write_volume_callback(u->sink, NULL);
1451
1452 if (u->mixer_path->has_dB) {
1453 pa_sink_enable_decibel_volume(u->sink, TRUE);
1454 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1455
1456 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1457 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1458
1459 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1460 } else {
1461 pa_sink_enable_decibel_volume(u->sink, FALSE);
1462 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1463
1464 u->sink->base_volume = PA_VOLUME_NORM;
1465 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1466 }
1467
1468 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1469 }
1470
1471 if (!u->mixer_path->has_mute) {
1472 pa_sink_set_get_mute_callback(u->sink, NULL);
1473 pa_sink_set_set_mute_callback(u->sink, NULL);
1474 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1475 } else {
1476 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1477 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1478 pa_log_info("Using hardware mute control.");
1479 }
1480 }
1481
1482 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1483 struct userdata *u = s->userdata;
1484
1485 pa_assert(u);
1486 pa_assert(p);
1487 pa_assert(u->ucm_context);
1488
1489 return pa_alsa_ucm_set_port(u->ucm_context, p, TRUE);
1490 }
1491
1492 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1493 struct userdata *u = s->userdata;
1494 pa_alsa_port_data *data;
1495
1496 pa_assert(u);
1497 pa_assert(p);
1498 pa_assert(u->mixer_handle);
1499
1500 data = PA_DEVICE_PORT_DATA(p);
1501
1502 pa_assert_se(u->mixer_path = data->path);
1503 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1504
1505 mixer_volume_init(u);
1506
1507 if (s->set_mute)
1508 s->set_mute(s);
1509 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1510 if (s->write_volume)
1511 s->write_volume(s);
1512 } else {
1513 if (s->set_volume)
1514 s->set_volume(s);
1515 }
1516
1517 return 0;
1518 }
1519
1520 static void sink_update_requested_latency_cb(pa_sink *s) {
1521 struct userdata *u = s->userdata;
1522 size_t before;
1523 pa_assert(u);
1524 pa_assert(u->use_tsched); /* only when timer scheduling is used
1525 * we can dynamically adjust the
1526 * latency */
1527
1528 if (!u->pcm_handle)
1529 return;
1530
1531 before = u->hwbuf_unused;
1532 update_sw_params(u);
1533
1534 /* Let's check whether we now use only a smaller part of the
1535 buffer then before. If so, we need to make sure that subsequent
1536 rewinds are relative to the new maximum fill level and not to the
1537 current fill level. Thus, let's do a full rewind once, to clear
1538 things up. */
1539
1540 if (u->hwbuf_unused > before) {
1541 pa_log_debug("Requesting rewind due to latency change.");
1542 pa_sink_request_rewind(s, (size_t) -1);
1543 }
1544 }
1545
1546 static pa_idxset* sink_get_formats(pa_sink *s) {
1547 struct userdata *u = s->userdata;
1548 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1549 pa_format_info *f;
1550 uint32_t idx;
1551
1552 pa_assert(u);
1553
1554 PA_IDXSET_FOREACH(f, u->formats, idx) {
1555 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1556 }
1557
1558 return ret;
1559 }
1560
1561 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1562 struct userdata *u = s->userdata;
1563 pa_format_info *f, *g;
1564 uint32_t idx, n;
1565
1566 pa_assert(u);
1567
1568 /* FIXME: also validate sample rates against what the device supports */
1569 PA_IDXSET_FOREACH(f, formats, idx) {
1570 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1571 /* EAC3 cannot be sent over over S/PDIF */
1572 return FALSE;
1573 }
1574
1575 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
1576 u->formats = pa_idxset_new(NULL, NULL);
1577
1578 /* Note: the logic below won't apply if we're using software encoding.
1579 * This is fine for now since we don't support that via the passthrough
1580 * framework, but this must be changed if we do. */
1581
1582 /* Count how many sample rates we support */
1583 for (idx = 0, n = 0; u->rates[idx]; idx++)
1584 n++;
1585
1586 /* First insert non-PCM formats since we prefer those. */
1587 PA_IDXSET_FOREACH(f, formats, idx) {
1588 if (!pa_format_info_is_pcm(f)) {
1589 g = pa_format_info_copy(f);
1590 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1591 pa_idxset_put(u->formats, g, NULL);
1592 }
1593 }
1594
1595 /* Now add any PCM formats */
1596 PA_IDXSET_FOREACH(f, formats, idx) {
1597 if (pa_format_info_is_pcm(f)) {
1598 /* We don't set rates here since we'll just tack on a resampler for
1599 * unsupported rates */
1600 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1601 }
1602 }
1603
1604 return TRUE;
1605 }
1606
1607 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
1608 {
1609 struct userdata *u = s->userdata;
1610 int i;
1611 pa_bool_t supported = FALSE;
1612
1613 pa_assert(u);
1614
1615 for (i = 0; u->rates[i]; i++) {
1616 if (u->rates[i] == rate) {
1617 supported = TRUE;
1618 break;
1619 }
1620 }
1621
1622 if (!supported) {
1623 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1624 return FALSE;
1625 }
1626
1627 if (!PA_SINK_IS_OPENED(s->state)) {
1628 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1629 u->sink->sample_spec.rate = rate;
1630 return TRUE;
1631 }
1632
1633 return FALSE;
1634 }
1635
1636 static int process_rewind(struct userdata *u) {
1637 snd_pcm_sframes_t unused;
1638 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1639 pa_assert(u);
1640
1641 if (!PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1642 pa_sink_process_rewind(u->sink, 0);
1643 return 0;
1644 }
1645
1646 /* Figure out how much we shall rewind and reset the counter */
1647 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1648
1649 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1650
1651 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1652 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1653 return -1;
1654 }
1655
1656 unused_nbytes = (size_t) unused * u->frame_size;
1657
1658 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1659 unused_nbytes += u->rewind_safeguard;
1660
1661 if (u->hwbuf_size > unused_nbytes)
1662 limit_nbytes = u->hwbuf_size - unused_nbytes;
1663 else
1664 limit_nbytes = 0;
1665
1666 if (rewind_nbytes > limit_nbytes)
1667 rewind_nbytes = limit_nbytes;
1668
1669 if (rewind_nbytes > 0) {
1670 snd_pcm_sframes_t in_frames, out_frames;
1671
1672 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1673
1674 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1675 pa_log_debug("before: %lu", (unsigned long) in_frames);
1676 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1677 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1678 if (try_recover(u, "process_rewind", out_frames) < 0)
1679 return -1;
1680 out_frames = 0;
1681 }
1682
1683 pa_log_debug("after: %lu", (unsigned long) out_frames);
1684
1685 rewind_nbytes = (size_t) out_frames * u->frame_size;
1686
1687 if (rewind_nbytes <= 0)
1688 pa_log_info("Tried rewind, but was apparently not possible.");
1689 else {
1690 u->write_count -= rewind_nbytes;
1691 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1692 pa_sink_process_rewind(u->sink, rewind_nbytes);
1693
1694 u->after_rewind = TRUE;
1695 return 0;
1696 }
1697 } else
1698 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1699
1700 pa_sink_process_rewind(u->sink, 0);
1701 return 0;
1702 }
1703
1704 static void thread_func(void *userdata) {
1705 struct userdata *u = userdata;
1706 unsigned short revents = 0;
1707
1708 pa_assert(u);
1709
1710 pa_log_debug("Thread starting up");
1711
1712 if (u->core->realtime_scheduling)
1713 pa_make_realtime(u->core->realtime_priority);
1714
1715 pa_thread_mq_install(&u->thread_mq);
1716
1717 for (;;) {
1718 int ret;
1719 pa_usec_t rtpoll_sleep = 0, real_sleep;
1720
1721 #ifdef DEBUG_TIMING
1722 pa_log_debug("Loop");
1723 #endif
1724
1725 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
1726 if (process_rewind(u) < 0)
1727 goto fail;
1728 }
1729
1730 /* Render some data and write it to the dsp */
1731 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1732 int work_done;
1733 pa_usec_t sleep_usec = 0;
1734 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1735
1736 if (u->use_mmap)
1737 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1738 else
1739 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1740
1741 if (work_done < 0)
1742 goto fail;
1743
1744 /* pa_log_debug("work_done = %i", work_done); */
1745
1746 if (work_done) {
1747
1748 if (u->first) {
1749 pa_log_info("Starting playback.");
1750 snd_pcm_start(u->pcm_handle);
1751
1752 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1753
1754 u->first = FALSE;
1755 }
1756
1757 update_smoother(u);
1758 }
1759
1760 if (u->use_tsched) {
1761 pa_usec_t cusec;
1762
1763 if (u->since_start <= u->hwbuf_size) {
1764
1765 /* USB devices on ALSA seem to hit a buffer
1766 * underrun during the first iterations much
1767 * quicker then we calculate here, probably due to
1768 * the transport latency. To accommodate for that
1769 * we artificially decrease the sleep time until
1770 * we have filled the buffer at least once
1771 * completely.*/
1772
1773 if (pa_log_ratelimit(PA_LOG_DEBUG))
1774 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1775 sleep_usec /= 2;
1776 }
1777
1778 /* OK, the playback buffer is now full, let's
1779 * calculate when to wake up next */
1780 #ifdef DEBUG_TIMING
1781 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1782 #endif
1783
1784 /* Convert from the sound card time domain to the
1785 * system time domain */
1786 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1787
1788 #ifdef DEBUG_TIMING
1789 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1790 #endif
1791
1792 /* We don't trust the conversion, so we wake up whatever comes first */
1793 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1794 }
1795
1796 u->after_rewind = FALSE;
1797
1798 }
1799
1800 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1801 pa_usec_t volume_sleep;
1802 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1803 if (volume_sleep > 0) {
1804 if (rtpoll_sleep > 0)
1805 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1806 else
1807 rtpoll_sleep = volume_sleep;
1808 }
1809 }
1810
1811 if (rtpoll_sleep > 0) {
1812 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1813 real_sleep = pa_rtclock_now();
1814 }
1815 else
1816 pa_rtpoll_set_timer_disabled(u->rtpoll);
1817
1818 /* Hmm, nothing to do. Let's sleep */
1819 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1820 goto fail;
1821
1822 if (rtpoll_sleep > 0) {
1823 real_sleep = pa_rtclock_now() - real_sleep;
1824 #ifdef DEBUG_TIMING
1825 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1826 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1827 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1828 #endif
1829 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1830 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1831 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1832 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1833 }
1834
1835 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1836 pa_sink_volume_change_apply(u->sink, NULL);
1837
1838 if (ret == 0)
1839 goto finish;
1840
1841 /* Tell ALSA about this and process its response */
1842 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1843 struct pollfd *pollfd;
1844 int err;
1845 unsigned n;
1846
1847 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1848
1849 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1850 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1851 goto fail;
1852 }
1853
1854 if (revents & ~POLLOUT) {
1855 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1856 goto fail;
1857
1858 u->first = TRUE;
1859 u->since_start = 0;
1860 revents = 0;
1861 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1862 pa_log_debug("Wakeup from ALSA!");
1863
1864 } else
1865 revents = 0;
1866 }
1867
1868 fail:
1869 /* If this was no regular exit from the loop we have to continue
1870 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1871 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1872 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1873
1874 finish:
1875 pa_log_debug("Thread shutting down");
1876 }
1877
1878 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1879 const char *n;
1880 char *t;
1881
1882 pa_assert(data);
1883 pa_assert(ma);
1884 pa_assert(device_name);
1885
1886 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1887 pa_sink_new_data_set_name(data, n);
1888 data->namereg_fail = TRUE;
1889 return;
1890 }
1891
1892 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1893 data->namereg_fail = TRUE;
1894 else {
1895 n = device_id ? device_id : device_name;
1896 data->namereg_fail = FALSE;
1897 }
1898
1899 if (mapping)
1900 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1901 else
1902 t = pa_sprintf_malloc("alsa_output.%s", n);
1903
1904 pa_sink_new_data_set_name(data, t);
1905 pa_xfree(t);
1906 }
1907
1908 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1909 snd_hctl_t *hctl;
1910
1911 if (!mapping && !element)
1912 return;
1913
1914 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1915 pa_log_info("Failed to find a working mixer device.");
1916 return;
1917 }
1918
1919 if (element) {
1920
1921 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1922 goto fail;
1923
1924 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1925 goto fail;
1926
1927 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1928 pa_alsa_path_dump(u->mixer_path);
1929 } else if (!(u->mixer_path_set = mapping->output_path_set))
1930 goto fail;
1931
1932 return;
1933
1934 fail:
1935
1936 if (u->mixer_path) {
1937 pa_alsa_path_free(u->mixer_path);
1938 u->mixer_path = NULL;
1939 }
1940
1941 if (u->mixer_handle) {
1942 snd_mixer_close(u->mixer_handle);
1943 u->mixer_handle = NULL;
1944 }
1945 }
1946
1947 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1948 pa_bool_t need_mixer_callback = FALSE;
1949
1950 pa_assert(u);
1951
1952 if (!u->mixer_handle)
1953 return 0;
1954
1955 if (u->sink->active_port) {
1956 pa_alsa_port_data *data;
1957
1958 /* We have a list of supported paths, so let's activate the
1959 * one that has been chosen as active */
1960
1961 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1962 u->mixer_path = data->path;
1963
1964 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
1965
1966 } else {
1967
1968 if (!u->mixer_path && u->mixer_path_set)
1969 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1970
1971 if (u->mixer_path) {
1972 /* Hmm, we have only a single path, then let's activate it */
1973
1974 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
1975
1976 } else
1977 return 0;
1978 }
1979
1980 mixer_volume_init(u);
1981
1982 /* Will we need to register callbacks? */
1983 if (u->mixer_path_set && u->mixer_path_set->paths) {
1984 pa_alsa_path *p;
1985 void *state;
1986
1987 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1988 if (p->has_volume || p->has_mute)
1989 need_mixer_callback = TRUE;
1990 }
1991 }
1992 else if (u->mixer_path)
1993 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1994
1995 if (need_mixer_callback) {
1996 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1997 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1998 u->mixer_pd = pa_alsa_mixer_pdata_new();
1999 mixer_callback = io_mixer_callback;
2000
2001 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
2002 pa_log("Failed to initialize file descriptor monitoring");
2003 return -1;
2004 }
2005 } else {
2006 u->mixer_fdl = pa_alsa_fdlist_new();
2007 mixer_callback = ctl_mixer_callback;
2008
2009 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
2010 pa_log("Failed to initialize file descriptor monitoring");
2011 return -1;
2012 }
2013 }
2014
2015 if (u->mixer_path_set)
2016 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
2017 else
2018 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
2019 }
2020
2021 return 0;
2022 }
2023
2024 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
2025
2026 struct userdata *u = NULL;
2027 const char *dev_id = NULL, *key, *mod_name;
2028 pa_sample_spec ss;
2029 char *thread_name = NULL;
2030 uint32_t alternate_sample_rate;
2031 pa_channel_map map;
2032 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
2033 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2034 size_t frame_size;
2035 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE, fixed_latency_range = FALSE;
2036 pa_sink_new_data data;
2037 pa_alsa_profile_set *profile_set = NULL;
2038 void *state = NULL;
2039
2040 pa_assert(m);
2041 pa_assert(ma);
2042
2043 ss = m->core->default_sample_spec;
2044 map = m->core->default_channel_map;
2045 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2046 pa_log("Failed to parse sample specification and channel map");
2047 goto fail;
2048 }
2049
2050 alternate_sample_rate = m->core->alternate_sample_rate;
2051 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2052 pa_log("Failed to parse alternate sample rate");
2053 goto fail;
2054 }
2055
2056 frame_size = pa_frame_size(&ss);
2057
2058 nfrags = m->core->default_n_fragments;
2059 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2060 if (frag_size <= 0)
2061 frag_size = (uint32_t) frame_size;
2062 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2063 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2064
2065 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2066 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2067 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2068 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2069 pa_log("Failed to parse buffer metrics");
2070 goto fail;
2071 }
2072
2073 buffer_size = nfrags * frag_size;
2074
2075 period_frames = frag_size/frame_size;
2076 buffer_frames = buffer_size/frame_size;
2077 tsched_frames = tsched_size/frame_size;
2078
2079 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2080 pa_log("Failed to parse mmap argument.");
2081 goto fail;
2082 }
2083
2084 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2085 pa_log("Failed to parse tsched argument.");
2086 goto fail;
2087 }
2088
2089 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2090 pa_log("Failed to parse ignore_dB argument.");
2091 goto fail;
2092 }
2093
2094 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2095 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2096 pa_log("Failed to parse rewind_safeguard argument");
2097 goto fail;
2098 }
2099
2100 deferred_volume = m->core->deferred_volume;
2101 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2102 pa_log("Failed to parse deferred_volume argument.");
2103 goto fail;
2104 }
2105
2106 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2107 pa_log("Failed to parse fixed_latency_range argument.");
2108 goto fail;
2109 }
2110
2111 use_tsched = pa_alsa_may_tsched(use_tsched);
2112
2113 u = pa_xnew0(struct userdata, 1);
2114 u->core = m->core;
2115 u->module = m;
2116 u->use_mmap = use_mmap;
2117 u->use_tsched = use_tsched;
2118 u->deferred_volume = deferred_volume;
2119 u->fixed_latency_range = fixed_latency_range;
2120 u->first = TRUE;
2121 u->rewind_safeguard = rewind_safeguard;
2122 u->rtpoll = pa_rtpoll_new();
2123 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2124
2125 u->smoother = pa_smoother_new(
2126 SMOOTHER_ADJUST_USEC,
2127 SMOOTHER_WINDOW_USEC,
2128 TRUE,
2129 TRUE,
2130 5,
2131 pa_rtclock_now(),
2132 TRUE);
2133 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2134
2135 /* use ucm */
2136 if (mapping && mapping->ucm_context.ucm)
2137 u->ucm_context = &mapping->ucm_context;
2138
2139 dev_id = pa_modargs_get_value(
2140 ma, "device_id",
2141 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2142
2143 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2144
2145 if (reserve_init(u, dev_id) < 0)
2146 goto fail;
2147
2148 if (reserve_monitor_init(u, dev_id) < 0)
2149 goto fail;
2150
2151 b = use_mmap;
2152 d = use_tsched;
2153
2154 if (mapping) {
2155
2156 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2157 pa_log("device_id= not set");
2158 goto fail;
2159 }
2160
2161 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2162 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2163 pa_log("Failed to enable ucm modifier %s", mod_name);
2164 else
2165 pa_log_debug("Enabled ucm modifier %s", mod_name);
2166 }
2167
2168 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2169 dev_id,
2170 &u->device_name,
2171 &ss, &map,
2172 SND_PCM_STREAM_PLAYBACK,
2173 &period_frames, &buffer_frames, tsched_frames,
2174 &b, &d, mapping)))
2175 goto fail;
2176
2177 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2178
2179 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2180 goto fail;
2181
2182 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2183 dev_id,
2184 &u->device_name,
2185 &ss, &map,
2186 SND_PCM_STREAM_PLAYBACK,
2187 &period_frames, &buffer_frames, tsched_frames,
2188 &b, &d, profile_set, &mapping)))
2189 goto fail;
2190
2191 } else {
2192
2193 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2194 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2195 &u->device_name,
2196 &ss, &map,
2197 SND_PCM_STREAM_PLAYBACK,
2198 &period_frames, &buffer_frames, tsched_frames,
2199 &b, &d, FALSE)))
2200 goto fail;
2201 }
2202
2203 pa_assert(u->device_name);
2204 pa_log_info("Successfully opened device %s.", u->device_name);
2205
2206 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2207 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2208 goto fail;
2209 }
2210
2211 if (mapping)
2212 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2213
2214 if (use_mmap && !b) {
2215 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2216 u->use_mmap = use_mmap = FALSE;
2217 }
2218
2219 if (use_tsched && (!b || !d)) {
2220 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2221 u->use_tsched = use_tsched = FALSE;
2222 }
2223
2224 if (u->use_mmap)
2225 pa_log_info("Successfully enabled mmap() mode.");
2226
2227 if (u->use_tsched) {
2228 pa_log_info("Successfully enabled timer-based scheduling mode.");
2229
2230 if (u->fixed_latency_range)
2231 pa_log_info("Disabling latency range changes on underrun");
2232 }
2233
2234 if (is_iec958(u) || is_hdmi(u))
2235 set_formats = TRUE;
2236
2237 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
2238 if (!u->rates) {
2239 pa_log_error("Failed to find any supported sample rates.");
2240 goto fail;
2241 }
2242
2243 /* ALSA might tweak the sample spec, so recalculate the frame size */
2244 frame_size = pa_frame_size(&ss);
2245
2246 if (!u->ucm_context)
2247 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2248
2249 pa_sink_new_data_init(&data);
2250 data.driver = driver;
2251 data.module = m;
2252 data.card = card;
2253 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2254
2255 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2256 * variable instead of using &data.namereg_fail directly, because
2257 * data.namereg_fail is a bitfield and taking the address of a bitfield
2258 * variable is impossible. */
2259 namereg_fail = data.namereg_fail;
2260 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2261 pa_log("Failed to parse namereg_fail argument.");
2262 pa_sink_new_data_done(&data);
2263 goto fail;
2264 }
2265 data.namereg_fail = namereg_fail;
2266
2267 pa_sink_new_data_set_sample_spec(&data, &ss);
2268 pa_sink_new_data_set_channel_map(&data, &map);
2269 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2270
2271 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2272 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2273 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2274 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2275 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2276
2277 if (mapping) {
2278 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2279 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2280
2281 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2282 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2283 }
2284
2285 pa_alsa_init_description(data.proplist);
2286
2287 if (u->control_device)
2288 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2289
2290 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2291 pa_log("Invalid properties");
2292 pa_sink_new_data_done(&data);
2293 goto fail;
2294 }
2295
2296 if (u->ucm_context)
2297 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, TRUE, card);
2298 else if (u->mixer_path_set)
2299 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2300
2301 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2302 (set_formats ? PA_SINK_SET_FORMATS : 0));
2303 pa_sink_new_data_done(&data);
2304
2305 if (!u->sink) {
2306 pa_log("Failed to create sink object");
2307 goto fail;
2308 }
2309
2310 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2311 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2312 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2313 goto fail;
2314 }
2315
2316 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2317 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2318 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2319 goto fail;
2320 }
2321
2322 u->sink->parent.process_msg = sink_process_msg;
2323 if (u->use_tsched)
2324 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2325 u->sink->set_state = sink_set_state_cb;
2326 if (u->ucm_context)
2327 u->sink->set_port = sink_set_port_ucm_cb;
2328 else
2329 u->sink->set_port = sink_set_port_cb;
2330 if (u->sink->alternate_sample_rate)
2331 u->sink->update_rate = sink_update_rate_cb;
2332 u->sink->userdata = u;
2333
2334 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2335 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2336
2337 u->frame_size = frame_size;
2338 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2339 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2340 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2341
2342 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2343 (double) u->hwbuf_size / (double) u->fragment_size,
2344 (long unsigned) u->fragment_size,
2345 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2346 (long unsigned) u->hwbuf_size,
2347 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2348
2349 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2350 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2351 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2352 else {
2353 pa_log_info("Disabling rewind for device %s", u->device_name);
2354 pa_sink_set_max_rewind(u->sink, 0);
2355 }
2356
2357 if (u->use_tsched) {
2358 u->tsched_watermark_ref = tsched_watermark;
2359 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2360 } else
2361 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2362
2363 reserve_update(u);
2364
2365 if (update_sw_params(u) < 0)
2366 goto fail;
2367
2368 if (u->ucm_context) {
2369 if (u->sink->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port, TRUE) < 0)
2370 goto fail;
2371 } else if (setup_mixer(u, ignore_dB) < 0)
2372 goto fail;
2373
2374 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2375
2376 thread_name = pa_sprintf_malloc("alsa-sink-%s", pa_strnull(pa_proplist_gets(u->sink->proplist, "alsa.id")));
2377 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2378 pa_log("Failed to create thread.");
2379 goto fail;
2380 }
2381 pa_xfree(thread_name);
2382 thread_name = NULL;
2383
2384 /* Get initial mixer settings */
2385 if (data.volume_is_set) {
2386 if (u->sink->set_volume)
2387 u->sink->set_volume(u->sink);
2388 } else {
2389 if (u->sink->get_volume)
2390 u->sink->get_volume(u->sink);
2391 }
2392
2393 if (data.muted_is_set) {
2394 if (u->sink->set_mute)
2395 u->sink->set_mute(u->sink);
2396 } else {
2397 if (u->sink->get_mute)
2398 u->sink->get_mute(u->sink);
2399 }
2400
2401 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2402 u->sink->write_volume(u->sink);
2403
2404 if (set_formats) {
2405 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2406 pa_format_info *format;
2407
2408 /* To start with, we only support PCM formats. Other formats may be added
2409 * with pa_sink_set_formats().*/
2410 format = pa_format_info_new();
2411 format->encoding = PA_ENCODING_PCM;
2412 u->formats = pa_idxset_new(NULL, NULL);
2413 pa_idxset_put(u->formats, format, NULL);
2414
2415 u->sink->get_formats = sink_get_formats;
2416 u->sink->set_formats = sink_set_formats;
2417 }
2418
2419 pa_sink_put(u->sink);
2420
2421 if (profile_set)
2422 pa_alsa_profile_set_free(profile_set);
2423
2424 return u->sink;
2425
2426 fail:
2427 pa_xfree(thread_name);
2428
2429 if (u)
2430 userdata_free(u);
2431
2432 if (profile_set)
2433 pa_alsa_profile_set_free(profile_set);
2434
2435 return NULL;
2436 }
2437
2438 static void userdata_free(struct userdata *u) {
2439 pa_assert(u);
2440
2441 if (u->sink)
2442 pa_sink_unlink(u->sink);
2443
2444 if (u->thread) {
2445 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2446 pa_thread_free(u->thread);
2447 }
2448
2449 pa_thread_mq_done(&u->thread_mq);
2450
2451 if (u->sink)
2452 pa_sink_unref(u->sink);
2453
2454 if (u->memchunk.memblock)
2455 pa_memblock_unref(u->memchunk.memblock);
2456
2457 if (u->mixer_pd)
2458 pa_alsa_mixer_pdata_free(u->mixer_pd);
2459
2460 if (u->alsa_rtpoll_item)
2461 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2462
2463 if (u->rtpoll)
2464 pa_rtpoll_free(u->rtpoll);
2465
2466 if (u->pcm_handle) {
2467 snd_pcm_drop(u->pcm_handle);
2468 snd_pcm_close(u->pcm_handle);
2469 }
2470
2471 if (u->mixer_fdl)
2472 pa_alsa_fdlist_free(u->mixer_fdl);
2473
2474 if (u->mixer_path && !u->mixer_path_set)
2475 pa_alsa_path_free(u->mixer_path);
2476
2477 if (u->mixer_handle)
2478 snd_mixer_close(u->mixer_handle);
2479
2480 if (u->smoother)
2481 pa_smoother_free(u->smoother);
2482
2483 if (u->formats)
2484 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
2485
2486 if (u->rates)
2487 pa_xfree(u->rates);
2488
2489 reserve_done(u);
2490 monitor_done(u);
2491
2492 pa_xfree(u->device_name);
2493 pa_xfree(u->control_device);
2494 pa_xfree(u->paths_dir);
2495 pa_xfree(u);
2496 }
2497
2498 void pa_alsa_sink_free(pa_sink *s) {
2499 struct userdata *u;
2500
2501 pa_sink_assert_ref(s);
2502 pa_assert_se(u = s->userdata);
2503
2504 userdata_free(u);
2505 }