]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: distuingish real underruns from left_to_play=0
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (1*PA_USEC_PER_MSEC) /* 3ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
76 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
77
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */
80
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
82
83 struct userdata {
84 pa_core *core;
85 pa_module *module;
86 pa_sink *sink;
87
88 pa_thread *thread;
89 pa_thread_mq thread_mq;
90 pa_rtpoll *rtpoll;
91
92 snd_pcm_t *pcm_handle;
93
94 pa_alsa_fdlist *mixer_fdl;
95 snd_mixer_t *mixer_handle;
96 pa_alsa_path_set *mixer_path_set;
97 pa_alsa_path *mixer_path;
98
99 pa_cvolume hardware_volume;
100
101 size_t
102 frame_size,
103 fragment_size,
104 hwbuf_size,
105 tsched_watermark,
106 hwbuf_unused,
107 min_sleep,
108 min_wakeup,
109 watermark_inc_step,
110 watermark_dec_step,
111 watermark_inc_threshold,
112 watermark_dec_threshold;
113
114 pa_usec_t watermark_dec_not_before;
115
116 unsigned nfragments;
117 pa_memchunk memchunk;
118
119 char *device_name; /* name of the PCM device */
120 char *control_device; /* name of the control device */
121
122 pa_bool_t use_mmap:1, use_tsched:1;
123
124 pa_bool_t first, after_rewind;
125
126 pa_rtpoll_item *alsa_rtpoll_item;
127
128 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
129
130 pa_smoother *smoother;
131 uint64_t write_count;
132 uint64_t since_start;
133 pa_usec_t smoother_interval;
134 pa_usec_t last_smoother_update;
135
136 pa_reserve_wrapper *reserve;
137 pa_hook_slot *reserve_slot;
138 pa_reserve_monitor_wrapper *monitor;
139 pa_hook_slot *monitor_slot;
140 };
141
142 static void userdata_free(struct userdata *u);
143
144 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
145 pa_assert(r);
146 pa_assert(u);
147
148 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
149 return PA_HOOK_CANCEL;
150
151 return PA_HOOK_OK;
152 }
153
154 static void reserve_done(struct userdata *u) {
155 pa_assert(u);
156
157 if (u->reserve_slot) {
158 pa_hook_slot_free(u->reserve_slot);
159 u->reserve_slot = NULL;
160 }
161
162 if (u->reserve) {
163 pa_reserve_wrapper_unref(u->reserve);
164 u->reserve = NULL;
165 }
166 }
167
168 static void reserve_update(struct userdata *u) {
169 const char *description;
170 pa_assert(u);
171
172 if (!u->sink || !u->reserve)
173 return;
174
175 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
176 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
177 }
178
179 static int reserve_init(struct userdata *u, const char *dname) {
180 char *rname;
181
182 pa_assert(u);
183 pa_assert(dname);
184
185 if (u->reserve)
186 return 0;
187
188 if (pa_in_system_mode())
189 return 0;
190
191 if (!(rname = pa_alsa_get_reserve_name(dname)))
192 return 0;
193
194 /* We are resuming, try to lock the device */
195 u->reserve = pa_reserve_wrapper_get(u->core, rname);
196 pa_xfree(rname);
197
198 if (!(u->reserve))
199 return -1;
200
201 reserve_update(u);
202
203 pa_assert(!u->reserve_slot);
204 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
205
206 return 0;
207 }
208
209 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
210 pa_bool_t b;
211
212 pa_assert(w);
213 pa_assert(u);
214
215 b = PA_PTR_TO_UINT(busy) && !u->reserve;
216
217 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
218 return PA_HOOK_OK;
219 }
220
221 static void monitor_done(struct userdata *u) {
222 pa_assert(u);
223
224 if (u->monitor_slot) {
225 pa_hook_slot_free(u->monitor_slot);
226 u->monitor_slot = NULL;
227 }
228
229 if (u->monitor) {
230 pa_reserve_monitor_wrapper_unref(u->monitor);
231 u->monitor = NULL;
232 }
233 }
234
235 static int reserve_monitor_init(struct userdata *u, const char *dname) {
236 char *rname;
237
238 pa_assert(u);
239 pa_assert(dname);
240
241 if (pa_in_system_mode())
242 return 0;
243
244 if (!(rname = pa_alsa_get_reserve_name(dname)))
245 return 0;
246
247 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
248 pa_xfree(rname);
249
250 if (!(u->monitor))
251 return -1;
252
253 pa_assert(!u->monitor_slot);
254 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
255
256 return 0;
257 }
258
259 static void fix_min_sleep_wakeup(struct userdata *u) {
260 size_t max_use, max_use_2;
261
262 pa_assert(u);
263 pa_assert(u->use_tsched);
264
265 max_use = u->hwbuf_size - u->hwbuf_unused;
266 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
267
268 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
269 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
270
271 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
272 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
273 }
274
275 static void fix_tsched_watermark(struct userdata *u) {
276 size_t max_use;
277 pa_assert(u);
278 pa_assert(u->use_tsched);
279
280 max_use = u->hwbuf_size - u->hwbuf_unused;
281
282 if (u->tsched_watermark > max_use - u->min_sleep)
283 u->tsched_watermark = max_use - u->min_sleep;
284
285 if (u->tsched_watermark < u->min_wakeup)
286 u->tsched_watermark = u->min_wakeup;
287 }
288
289 static void increase_watermark(struct userdata *u) {
290 size_t old_watermark;
291 pa_usec_t old_min_latency, new_min_latency;
292
293 pa_assert(u);
294 pa_assert(u->use_tsched);
295
296 /* First, just try to increase the watermark */
297 old_watermark = u->tsched_watermark;
298 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
299 fix_tsched_watermark(u);
300
301 if (old_watermark != u->tsched_watermark) {
302 pa_log_info("Increasing wakeup watermark to %0.2f ms",
303 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
304 return;
305 }
306
307 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
308 old_min_latency = u->sink->thread_info.min_latency;
309 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
310 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
311
312 if (old_min_latency != new_min_latency) {
313 pa_log_info("Increasing minimal latency to %0.2f ms",
314 (double) new_min_latency / PA_USEC_PER_MSEC);
315
316 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
317 }
318
319 /* When we reach this we're officialy fucked! */
320 }
321
322 static void decrease_watermark(struct userdata *u) {
323 size_t old_watermark;
324 pa_usec_t now;
325
326 pa_assert(u);
327 pa_assert(u->use_tsched);
328
329 now = pa_rtclock_now();
330
331 if (u->watermark_dec_not_before <= 0)
332 goto restart;
333
334 if (u->watermark_dec_not_before > now)
335 return;
336
337 old_watermark = u->tsched_watermark;
338
339 if (u->tsched_watermark < u->watermark_dec_step)
340 u->tsched_watermark = u->tsched_watermark / 2;
341 else
342 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
343
344 fix_tsched_watermark(u);
345
346 if (old_watermark != u->tsched_watermark)
347 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
348 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
349
350 /* We don't change the latency range*/
351
352 restart:
353 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
354 }
355
356 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
357 pa_usec_t usec, wm;
358
359 pa_assert(sleep_usec);
360 pa_assert(process_usec);
361
362 pa_assert(u);
363 pa_assert(u->use_tsched);
364
365 usec = pa_sink_get_requested_latency_within_thread(u->sink);
366
367 if (usec == (pa_usec_t) -1)
368 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
369
370 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
371
372 if (wm > usec)
373 wm = usec/2;
374
375 *sleep_usec = usec - wm;
376 *process_usec = wm;
377
378 #ifdef DEBUG_TIMING
379 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
380 (unsigned long) (usec / PA_USEC_PER_MSEC),
381 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
382 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
383 #endif
384 }
385
386 static int try_recover(struct userdata *u, const char *call, int err) {
387 pa_assert(u);
388 pa_assert(call);
389 pa_assert(err < 0);
390
391 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
392
393 pa_assert(err != -EAGAIN);
394
395 if (err == -EPIPE)
396 pa_log_debug("%s: Buffer underrun!", call);
397
398 if (err == -ESTRPIPE)
399 pa_log_debug("%s: System suspended!", call);
400
401 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
402 pa_log("%s: %s", call, pa_alsa_strerror(err));
403 return -1;
404 }
405
406 u->first = TRUE;
407 u->since_start = 0;
408 return 0;
409 }
410
411 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
412 size_t left_to_play;
413 pa_bool_t underrun = FALSE;
414
415 /* We use <= instead of < for this check here because an underrun
416 * only happens after the last sample was processed, not already when
417 * it is removed from the buffer. This is particularly important
418 * when block transfer is used. */
419
420 if (n_bytes <= u->hwbuf_size)
421 left_to_play = u->hwbuf_size - n_bytes;
422 else {
423
424 /* We got a dropout. What a mess! */
425 left_to_play = 0;
426 underrun = TRUE;
427
428 #ifdef DEBUG_TIMING
429 PA_DEBUG_TRAP;
430 #endif
431
432 if (!u->first && !u->after_rewind)
433 if (pa_log_ratelimit())
434 pa_log_info("Underrun!");
435 }
436
437 #ifdef DEBUG_TIMING
438 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
439 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
440 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
441 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
442 #endif
443
444 if (u->use_tsched) {
445 pa_bool_t reset_not_before = TRUE;
446
447 if (!u->first && !u->after_rewind) {
448 if (underrun || left_to_play < u->watermark_inc_threshold)
449 increase_watermark(u);
450 else if (left_to_play > u->watermark_dec_threshold) {
451 reset_not_before = FALSE;
452
453 /* We decrease the watermark only if have actually
454 * been woken up by a timeout. If something else woke
455 * us up it's too easy to fulfill the deadlines... */
456
457 if (on_timeout)
458 decrease_watermark(u);
459 }
460 }
461
462 if (reset_not_before)
463 u->watermark_dec_not_before = 0;
464 }
465
466 return left_to_play;
467 }
468
469 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
470 pa_bool_t work_done = TRUE;
471 pa_usec_t max_sleep_usec = 0, process_usec = 0;
472 size_t left_to_play;
473 unsigned j = 0;
474
475 pa_assert(u);
476 pa_sink_assert_ref(u->sink);
477
478 if (u->use_tsched)
479 hw_sleep_time(u, &max_sleep_usec, &process_usec);
480
481 for (;;) {
482 snd_pcm_sframes_t n;
483 size_t n_bytes;
484 int r;
485 pa_bool_t after_avail = TRUE;
486
487 /* First we determine how many samples are missing to fill the
488 * buffer up to 100% */
489
490 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
491
492 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
493 continue;
494
495 return r;
496 }
497
498 n_bytes = (size_t) n * u->frame_size;
499
500 #ifdef DEBUG_TIMING
501 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
502 #endif
503
504 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
505 on_timeout = FALSE;
506
507 if (u->use_tsched)
508
509 /* We won't fill up the playback buffer before at least
510 * half the sleep time is over because otherwise we might
511 * ask for more data from the clients then they expect. We
512 * need to guarantee that clients only have to keep around
513 * a single hw buffer length. */
514
515 if (!polled &&
516 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
517 #ifdef DEBUG_TIMING
518 pa_log_debug("Not filling up, because too early.");
519 #endif
520 break;
521 }
522
523 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
524
525 if (polled)
526 PA_ONCE_BEGIN {
527 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
528 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
529 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
530 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
531 pa_strnull(dn));
532 pa_xfree(dn);
533 } PA_ONCE_END;
534
535 #ifdef DEBUG_TIMING
536 pa_log_debug("Not filling up, because not necessary.");
537 #endif
538 break;
539 }
540
541
542 if (++j > 10) {
543 #ifdef DEBUG_TIMING
544 pa_log_debug("Not filling up, because already too many iterations.");
545 #endif
546
547 break;
548 }
549
550 n_bytes -= u->hwbuf_unused;
551 polled = FALSE;
552
553 #ifdef DEBUG_TIMING
554 pa_log_debug("Filling up");
555 #endif
556
557 for (;;) {
558 pa_memchunk chunk;
559 void *p;
560 int err;
561 const snd_pcm_channel_area_t *areas;
562 snd_pcm_uframes_t offset, frames;
563 snd_pcm_sframes_t sframes;
564
565 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
566 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
567
568 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
569
570 if (!after_avail && err == -EAGAIN)
571 break;
572
573 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
574 continue;
575
576 return r;
577 }
578
579 /* Make sure that if these memblocks need to be copied they will fit into one slot */
580 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
581 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
582
583 if (!after_avail && frames == 0)
584 break;
585
586 pa_assert(frames > 0);
587 after_avail = FALSE;
588
589 /* Check these are multiples of 8 bit */
590 pa_assert((areas[0].first & 7) == 0);
591 pa_assert((areas[0].step & 7)== 0);
592
593 /* We assume a single interleaved memory buffer */
594 pa_assert((areas[0].first >> 3) == 0);
595 pa_assert((areas[0].step >> 3) == u->frame_size);
596
597 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
598
599 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
600 chunk.length = pa_memblock_get_length(chunk.memblock);
601 chunk.index = 0;
602
603 pa_sink_render_into_full(u->sink, &chunk);
604 pa_memblock_unref_fixed(chunk.memblock);
605
606 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
607
608 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
609 continue;
610
611 return r;
612 }
613
614 work_done = TRUE;
615
616 u->write_count += frames * u->frame_size;
617 u->since_start += frames * u->frame_size;
618
619 #ifdef DEBUG_TIMING
620 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
621 #endif
622
623 if ((size_t) frames * u->frame_size >= n_bytes)
624 break;
625
626 n_bytes -= (size_t) frames * u->frame_size;
627 }
628 }
629
630 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
631
632 if (*sleep_usec > process_usec)
633 *sleep_usec -= process_usec;
634 else
635 *sleep_usec = 0;
636
637 return work_done ? 1 : 0;
638 }
639
640 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
641 pa_bool_t work_done = FALSE;
642 pa_usec_t max_sleep_usec = 0, process_usec = 0;
643 size_t left_to_play;
644 unsigned j = 0;
645
646 pa_assert(u);
647 pa_sink_assert_ref(u->sink);
648
649 if (u->use_tsched)
650 hw_sleep_time(u, &max_sleep_usec, &process_usec);
651
652 for (;;) {
653 snd_pcm_sframes_t n;
654 size_t n_bytes;
655 int r;
656
657 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
658
659 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
660 continue;
661
662 return r;
663 }
664
665 n_bytes = (size_t) n * u->frame_size;
666 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
667 on_timeout = FALSE;
668
669 if (u->use_tsched)
670
671 /* We won't fill up the playback buffer before at least
672 * half the sleep time is over because otherwise we might
673 * ask for more data from the clients then they expect. We
674 * need to guarantee that clients only have to keep around
675 * a single hw buffer length. */
676
677 if (!polled &&
678 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
679 break;
680
681 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
682
683 if (polled)
684 PA_ONCE_BEGIN {
685 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
686 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
687 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
688 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
689 pa_strnull(dn));
690 pa_xfree(dn);
691 } PA_ONCE_END;
692
693 break;
694 }
695
696 if (++j > 10) {
697 #ifdef DEBUG_TIMING
698 pa_log_debug("Not filling up, because already too many iterations.");
699 #endif
700
701 break;
702 }
703
704 n_bytes -= u->hwbuf_unused;
705 polled = FALSE;
706
707 for (;;) {
708 snd_pcm_sframes_t frames;
709 void *p;
710 pa_bool_t after_avail = TRUE;
711
712 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
713
714 if (u->memchunk.length <= 0)
715 pa_sink_render(u->sink, n_bytes, &u->memchunk);
716
717 pa_assert(u->memchunk.length > 0);
718
719 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
720
721 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
722 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
723
724 p = pa_memblock_acquire(u->memchunk.memblock);
725 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
726 pa_memblock_release(u->memchunk.memblock);
727
728 if (PA_UNLIKELY(frames < 0)) {
729
730 if (!after_avail && (int) frames == -EAGAIN)
731 break;
732
733 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
734 continue;
735
736 return r;
737 }
738
739 if (!after_avail && frames == 0)
740 break;
741
742 pa_assert(frames > 0);
743 after_avail = FALSE;
744
745 u->memchunk.index += (size_t) frames * u->frame_size;
746 u->memchunk.length -= (size_t) frames * u->frame_size;
747
748 if (u->memchunk.length <= 0) {
749 pa_memblock_unref(u->memchunk.memblock);
750 pa_memchunk_reset(&u->memchunk);
751 }
752
753 work_done = TRUE;
754
755 u->write_count += frames * u->frame_size;
756 u->since_start += frames * u->frame_size;
757
758 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
759
760 if ((size_t) frames * u->frame_size >= n_bytes)
761 break;
762
763 n_bytes -= (size_t) frames * u->frame_size;
764 }
765 }
766
767 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
768
769 if (*sleep_usec > process_usec)
770 *sleep_usec -= process_usec;
771 else
772 *sleep_usec = 0;
773
774 return work_done ? 1 : 0;
775 }
776
777 static void update_smoother(struct userdata *u) {
778 snd_pcm_sframes_t delay = 0;
779 int64_t position;
780 int err;
781 pa_usec_t now1 = 0, now2;
782 snd_pcm_status_t *status;
783
784 snd_pcm_status_alloca(&status);
785
786 pa_assert(u);
787 pa_assert(u->pcm_handle);
788
789 /* Let's update the time smoother */
790
791 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
792 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
793 return;
794 }
795
796 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
797 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
798 else {
799 snd_htimestamp_t htstamp = { 0, 0 };
800 snd_pcm_status_get_htstamp(status, &htstamp);
801 now1 = pa_timespec_load(&htstamp);
802 }
803
804 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
805 if (now1 <= 0)
806 now1 = pa_rtclock_now();
807
808 /* check if the time since the last update is bigger than the interval */
809 if (u->last_smoother_update > 0)
810 if (u->last_smoother_update + u->smoother_interval > now1)
811 return;
812
813 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
814
815 if (PA_UNLIKELY(position < 0))
816 position = 0;
817
818 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
819
820 pa_smoother_put(u->smoother, now1, now2);
821
822 u->last_smoother_update = now1;
823 /* exponentially increase the update interval up to the MAX limit */
824 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
825 }
826
827 static pa_usec_t sink_get_latency(struct userdata *u) {
828 pa_usec_t r;
829 int64_t delay;
830 pa_usec_t now1, now2;
831
832 pa_assert(u);
833
834 now1 = pa_rtclock_now();
835 now2 = pa_smoother_get(u->smoother, now1);
836
837 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
838
839 r = delay >= 0 ? (pa_usec_t) delay : 0;
840
841 if (u->memchunk.memblock)
842 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
843
844 return r;
845 }
846
847 static int build_pollfd(struct userdata *u) {
848 pa_assert(u);
849 pa_assert(u->pcm_handle);
850
851 if (u->alsa_rtpoll_item)
852 pa_rtpoll_item_free(u->alsa_rtpoll_item);
853
854 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
855 return -1;
856
857 return 0;
858 }
859
860 /* Called from IO context */
861 static int suspend(struct userdata *u) {
862 pa_assert(u);
863 pa_assert(u->pcm_handle);
864
865 pa_smoother_pause(u->smoother, pa_rtclock_now());
866
867 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
868 * take awfully long with our long buffer sizes today. */
869 snd_pcm_close(u->pcm_handle);
870 u->pcm_handle = NULL;
871
872 if (u->alsa_rtpoll_item) {
873 pa_rtpoll_item_free(u->alsa_rtpoll_item);
874 u->alsa_rtpoll_item = NULL;
875 }
876
877 pa_log_info("Device suspended...");
878
879 return 0;
880 }
881
882 /* Called from IO context */
883 static int update_sw_params(struct userdata *u) {
884 snd_pcm_uframes_t avail_min;
885 int err;
886
887 pa_assert(u);
888
889 /* Use the full buffer if noone asked us for anything specific */
890 u->hwbuf_unused = 0;
891
892 if (u->use_tsched) {
893 pa_usec_t latency;
894
895 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
896 size_t b;
897
898 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
899
900 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
901
902 /* We need at least one sample in our buffer */
903
904 if (PA_UNLIKELY(b < u->frame_size))
905 b = u->frame_size;
906
907 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
908 }
909
910 fix_min_sleep_wakeup(u);
911 fix_tsched_watermark(u);
912 }
913
914 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
915
916 /* We need at last one frame in the used part of the buffer */
917 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
918
919 if (u->use_tsched) {
920 pa_usec_t sleep_usec, process_usec;
921
922 hw_sleep_time(u, &sleep_usec, &process_usec);
923 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
924 }
925
926 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
927
928 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
929 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
930 return err;
931 }
932
933 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
934
935 return 0;
936 }
937
938 /* Called from IO context */
939 static int unsuspend(struct userdata *u) {
940 pa_sample_spec ss;
941 int err;
942 pa_bool_t b, d;
943 unsigned nfrags;
944 snd_pcm_uframes_t period_size;
945
946 pa_assert(u);
947 pa_assert(!u->pcm_handle);
948
949 pa_log_info("Trying resume...");
950
951 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
952 /*SND_PCM_NONBLOCK|*/
953 SND_PCM_NO_AUTO_RESAMPLE|
954 SND_PCM_NO_AUTO_CHANNELS|
955 SND_PCM_NO_AUTO_FORMAT)) < 0) {
956 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
957 goto fail;
958 }
959
960 ss = u->sink->sample_spec;
961 nfrags = u->nfragments;
962 period_size = u->fragment_size / u->frame_size;
963 b = u->use_mmap;
964 d = u->use_tsched;
965
966 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
967 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
968 goto fail;
969 }
970
971 if (b != u->use_mmap || d != u->use_tsched) {
972 pa_log_warn("Resume failed, couldn't get original access mode.");
973 goto fail;
974 }
975
976 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
977 pa_log_warn("Resume failed, couldn't restore original sample settings.");
978 goto fail;
979 }
980
981 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
982 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
983 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
984 (unsigned long) nfrags, period_size * u->frame_size);
985 goto fail;
986 }
987
988 if (update_sw_params(u) < 0)
989 goto fail;
990
991 if (build_pollfd(u) < 0)
992 goto fail;
993
994 u->write_count = 0;
995 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
996 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
997 u->last_smoother_update = 0;
998
999 u->first = TRUE;
1000 u->since_start = 0;
1001
1002 pa_log_info("Resumed successfully...");
1003
1004 return 0;
1005
1006 fail:
1007 if (u->pcm_handle) {
1008 snd_pcm_close(u->pcm_handle);
1009 u->pcm_handle = NULL;
1010 }
1011
1012 return -1;
1013 }
1014
1015 /* Called from IO context */
1016 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1017 struct userdata *u = PA_SINK(o)->userdata;
1018
1019 switch (code) {
1020
1021 case PA_SINK_MESSAGE_GET_LATENCY: {
1022 pa_usec_t r = 0;
1023
1024 if (u->pcm_handle)
1025 r = sink_get_latency(u);
1026
1027 *((pa_usec_t*) data) = r;
1028
1029 return 0;
1030 }
1031
1032 case PA_SINK_MESSAGE_SET_STATE:
1033
1034 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1035
1036 case PA_SINK_SUSPENDED:
1037 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1038
1039 if (suspend(u) < 0)
1040 return -1;
1041
1042 break;
1043
1044 case PA_SINK_IDLE:
1045 case PA_SINK_RUNNING:
1046
1047 if (u->sink->thread_info.state == PA_SINK_INIT) {
1048 if (build_pollfd(u) < 0)
1049 return -1;
1050 }
1051
1052 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1053 if (unsuspend(u) < 0)
1054 return -1;
1055 }
1056
1057 break;
1058
1059 case PA_SINK_UNLINKED:
1060 case PA_SINK_INIT:
1061 case PA_SINK_INVALID_STATE:
1062 ;
1063 }
1064
1065 break;
1066 }
1067
1068 return pa_sink_process_msg(o, code, data, offset, chunk);
1069 }
1070
1071 /* Called from main context */
1072 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1073 pa_sink_state_t old_state;
1074 struct userdata *u;
1075
1076 pa_sink_assert_ref(s);
1077 pa_assert_se(u = s->userdata);
1078
1079 old_state = pa_sink_get_state(u->sink);
1080
1081 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1082 reserve_done(u);
1083 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1084 if (reserve_init(u, u->device_name) < 0)
1085 return -1;
1086
1087 return 0;
1088 }
1089
1090 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1091 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1092
1093 pa_assert(u);
1094 pa_assert(u->mixer_handle);
1095
1096 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1097 return 0;
1098
1099 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1100 pa_sink_get_volume(u->sink, TRUE);
1101 pa_sink_get_mute(u->sink, TRUE);
1102 }
1103
1104 return 0;
1105 }
1106
1107 static void sink_get_volume_cb(pa_sink *s) {
1108 struct userdata *u = s->userdata;
1109 pa_cvolume r;
1110 char t[PA_CVOLUME_SNPRINT_MAX];
1111
1112 pa_assert(u);
1113 pa_assert(u->mixer_path);
1114 pa_assert(u->mixer_handle);
1115
1116 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1117 return;
1118
1119 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1120 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1121
1122 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1123
1124 if (pa_cvolume_equal(&u->hardware_volume, &r))
1125 return;
1126
1127 s->real_volume = u->hardware_volume = r;
1128
1129 /* Hmm, so the hardware volume changed, let's reset our software volume */
1130 if (u->mixer_path->has_dB)
1131 pa_sink_set_soft_volume(s, NULL);
1132 }
1133
1134 static void sink_set_volume_cb(pa_sink *s) {
1135 struct userdata *u = s->userdata;
1136 pa_cvolume r;
1137 char t[PA_CVOLUME_SNPRINT_MAX];
1138
1139 pa_assert(u);
1140 pa_assert(u->mixer_path);
1141 pa_assert(u->mixer_handle);
1142
1143 /* Shift up by the base volume */
1144 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1145
1146 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1147 return;
1148
1149 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1150 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1151
1152 u->hardware_volume = r;
1153
1154 if (u->mixer_path->has_dB) {
1155 pa_cvolume new_soft_volume;
1156 pa_bool_t accurate_enough;
1157
1158 /* Match exactly what the user requested by software */
1159 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1160
1161 /* If the adjustment to do in software is only minimal we
1162 * can skip it. That saves us CPU at the expense of a bit of
1163 * accuracy */
1164 accurate_enough =
1165 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1166 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1167
1168 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1169 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1170 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1171 pa_yes_no(accurate_enough));
1172
1173 if (!accurate_enough)
1174 s->soft_volume = new_soft_volume;
1175
1176 } else {
1177 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1178
1179 /* We can't match exactly what the user requested, hence let's
1180 * at least tell the user about it */
1181
1182 s->real_volume = r;
1183 }
1184 }
1185
1186 static void sink_get_mute_cb(pa_sink *s) {
1187 struct userdata *u = s->userdata;
1188 pa_bool_t b;
1189
1190 pa_assert(u);
1191 pa_assert(u->mixer_path);
1192 pa_assert(u->mixer_handle);
1193
1194 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1195 return;
1196
1197 s->muted = b;
1198 }
1199
1200 static void sink_set_mute_cb(pa_sink *s) {
1201 struct userdata *u = s->userdata;
1202
1203 pa_assert(u);
1204 pa_assert(u->mixer_path);
1205 pa_assert(u->mixer_handle);
1206
1207 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1208 }
1209
1210 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1211 struct userdata *u = s->userdata;
1212 pa_alsa_port_data *data;
1213
1214 pa_assert(u);
1215 pa_assert(p);
1216 pa_assert(u->mixer_handle);
1217
1218 data = PA_DEVICE_PORT_DATA(p);
1219
1220 pa_assert_se(u->mixer_path = data->path);
1221 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1222
1223 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1224 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1225 s->n_volume_steps = PA_VOLUME_NORM+1;
1226
1227 if (u->mixer_path->max_dB > 0.0)
1228 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1229 else
1230 pa_log_info("No particular base volume set, fixing to 0 dB");
1231 } else {
1232 s->base_volume = PA_VOLUME_NORM;
1233 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1234 }
1235
1236 if (data->setting)
1237 pa_alsa_setting_select(data->setting, u->mixer_handle);
1238
1239 if (s->set_mute)
1240 s->set_mute(s);
1241 if (s->set_volume)
1242 s->set_volume(s);
1243
1244 return 0;
1245 }
1246
1247 static void sink_update_requested_latency_cb(pa_sink *s) {
1248 struct userdata *u = s->userdata;
1249 size_t before;
1250 pa_assert(u);
1251
1252 if (!u->pcm_handle)
1253 return;
1254
1255 before = u->hwbuf_unused;
1256 update_sw_params(u);
1257
1258 /* Let's check whether we now use only a smaller part of the
1259 buffer then before. If so, we need to make sure that subsequent
1260 rewinds are relative to the new maximum fill level and not to the
1261 current fill level. Thus, let's do a full rewind once, to clear
1262 things up. */
1263
1264 if (u->hwbuf_unused > before) {
1265 pa_log_debug("Requesting rewind due to latency change.");
1266 pa_sink_request_rewind(s, (size_t) -1);
1267 }
1268 }
1269
1270 static int process_rewind(struct userdata *u) {
1271 snd_pcm_sframes_t unused;
1272 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1273 pa_assert(u);
1274
1275 /* Figure out how much we shall rewind and reset the counter */
1276 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1277
1278 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1279
1280 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1281 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1282 return -1;
1283 }
1284
1285 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1286
1287 if (u->hwbuf_size > unused_nbytes)
1288 limit_nbytes = u->hwbuf_size - unused_nbytes;
1289 else
1290 limit_nbytes = 0;
1291
1292 if (rewind_nbytes > limit_nbytes)
1293 rewind_nbytes = limit_nbytes;
1294
1295 if (rewind_nbytes > 0) {
1296 snd_pcm_sframes_t in_frames, out_frames;
1297
1298 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1299
1300 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1301 pa_log_debug("before: %lu", (unsigned long) in_frames);
1302 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1303 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1304 if (try_recover(u, "process_rewind", out_frames) < 0)
1305 return -1;
1306 out_frames = 0;
1307 }
1308
1309 pa_log_debug("after: %lu", (unsigned long) out_frames);
1310
1311 rewind_nbytes = (size_t) out_frames * u->frame_size;
1312
1313 if (rewind_nbytes <= 0)
1314 pa_log_info("Tried rewind, but was apparently not possible.");
1315 else {
1316 u->write_count -= rewind_nbytes;
1317 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1318 pa_sink_process_rewind(u->sink, rewind_nbytes);
1319
1320 u->after_rewind = TRUE;
1321 return 0;
1322 }
1323 } else
1324 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1325
1326 pa_sink_process_rewind(u->sink, 0);
1327 return 0;
1328 }
1329
1330 static void thread_func(void *userdata) {
1331 struct userdata *u = userdata;
1332 unsigned short revents = 0;
1333
1334 pa_assert(u);
1335
1336 pa_log_debug("Thread starting up");
1337
1338 if (u->core->realtime_scheduling)
1339 pa_make_realtime(u->core->realtime_priority);
1340
1341 pa_thread_mq_install(&u->thread_mq);
1342
1343 for (;;) {
1344 int ret;
1345
1346 #ifdef DEBUG_TIMING
1347 pa_log_debug("Loop");
1348 #endif
1349
1350 /* Render some data and write it to the dsp */
1351 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1352 int work_done;
1353 pa_usec_t sleep_usec = 0;
1354 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1355
1356 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1357 if (process_rewind(u) < 0)
1358 goto fail;
1359
1360 if (u->use_mmap)
1361 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1362 else
1363 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1364
1365 if (work_done < 0)
1366 goto fail;
1367
1368 /* pa_log_debug("work_done = %i", work_done); */
1369
1370 if (work_done) {
1371
1372 if (u->first) {
1373 pa_log_info("Starting playback.");
1374 snd_pcm_start(u->pcm_handle);
1375
1376 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1377 }
1378
1379 update_smoother(u);
1380 }
1381
1382 if (u->use_tsched) {
1383 pa_usec_t cusec;
1384
1385 if (u->since_start <= u->hwbuf_size) {
1386
1387 /* USB devices on ALSA seem to hit a buffer
1388 * underrun during the first iterations much
1389 * quicker then we calculate here, probably due to
1390 * the transport latency. To accommodate for that
1391 * we artificially decrease the sleep time until
1392 * we have filled the buffer at least once
1393 * completely.*/
1394
1395 if (pa_log_ratelimit())
1396 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1397 sleep_usec /= 2;
1398 }
1399
1400 /* OK, the playback buffer is now full, let's
1401 * calculate when to wake up next */
1402 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1403
1404 /* Convert from the sound card time domain to the
1405 * system time domain */
1406 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1407
1408 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1409
1410 /* We don't trust the conversion, so we wake up whatever comes first */
1411 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1412 }
1413
1414 u->first = FALSE;
1415 u->after_rewind = FALSE;
1416
1417 } else if (u->use_tsched)
1418
1419 /* OK, we're in an invalid state, let's disable our timers */
1420 pa_rtpoll_set_timer_disabled(u->rtpoll);
1421
1422 /* Hmm, nothing to do. Let's sleep */
1423 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1424 goto fail;
1425
1426 if (ret == 0)
1427 goto finish;
1428
1429 /* Tell ALSA about this and process its response */
1430 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1431 struct pollfd *pollfd;
1432 int err;
1433 unsigned n;
1434
1435 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1436
1437 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1438 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1439 goto fail;
1440 }
1441
1442 if (revents & ~POLLOUT) {
1443 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1444 goto fail;
1445
1446 u->first = TRUE;
1447 u->since_start = 0;
1448 } else if (revents && u->use_tsched && pa_log_ratelimit())
1449 pa_log_debug("Wakeup from ALSA!");
1450
1451 } else
1452 revents = 0;
1453 }
1454
1455 fail:
1456 /* If this was no regular exit from the loop we have to continue
1457 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1458 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1459 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1460
1461 finish:
1462 pa_log_debug("Thread shutting down");
1463 }
1464
1465 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1466 const char *n;
1467 char *t;
1468
1469 pa_assert(data);
1470 pa_assert(ma);
1471 pa_assert(device_name);
1472
1473 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1474 pa_sink_new_data_set_name(data, n);
1475 data->namereg_fail = TRUE;
1476 return;
1477 }
1478
1479 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1480 data->namereg_fail = TRUE;
1481 else {
1482 n = device_id ? device_id : device_name;
1483 data->namereg_fail = FALSE;
1484 }
1485
1486 if (mapping)
1487 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1488 else
1489 t = pa_sprintf_malloc("alsa_output.%s", n);
1490
1491 pa_sink_new_data_set_name(data, t);
1492 pa_xfree(t);
1493 }
1494
1495 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1496
1497 if (!mapping && !element)
1498 return;
1499
1500 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1501 pa_log_info("Failed to find a working mixer device.");
1502 return;
1503 }
1504
1505 if (element) {
1506
1507 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1508 goto fail;
1509
1510 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1511 goto fail;
1512
1513 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1514 pa_alsa_path_dump(u->mixer_path);
1515 } else {
1516
1517 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1518 goto fail;
1519
1520 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1521
1522 pa_log_debug("Probed mixer paths:");
1523 pa_alsa_path_set_dump(u->mixer_path_set);
1524 }
1525
1526 return;
1527
1528 fail:
1529
1530 if (u->mixer_path_set) {
1531 pa_alsa_path_set_free(u->mixer_path_set);
1532 u->mixer_path_set = NULL;
1533 } else if (u->mixer_path) {
1534 pa_alsa_path_free(u->mixer_path);
1535 u->mixer_path = NULL;
1536 }
1537
1538 if (u->mixer_handle) {
1539 snd_mixer_close(u->mixer_handle);
1540 u->mixer_handle = NULL;
1541 }
1542 }
1543
1544 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1545 pa_assert(u);
1546
1547 if (!u->mixer_handle)
1548 return 0;
1549
1550 if (u->sink->active_port) {
1551 pa_alsa_port_data *data;
1552
1553 /* We have a list of supported paths, so let's activate the
1554 * one that has been chosen as active */
1555
1556 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1557 u->mixer_path = data->path;
1558
1559 pa_alsa_path_select(data->path, u->mixer_handle);
1560
1561 if (data->setting)
1562 pa_alsa_setting_select(data->setting, u->mixer_handle);
1563
1564 } else {
1565
1566 if (!u->mixer_path && u->mixer_path_set)
1567 u->mixer_path = u->mixer_path_set->paths;
1568
1569 if (u->mixer_path) {
1570 /* Hmm, we have only a single path, then let's activate it */
1571
1572 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1573
1574 if (u->mixer_path->settings)
1575 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1576 } else
1577 return 0;
1578 }
1579
1580 if (!u->mixer_path->has_volume)
1581 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1582 else {
1583
1584 if (u->mixer_path->has_dB) {
1585 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1586
1587 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1588 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1589
1590 if (u->mixer_path->max_dB > 0.0)
1591 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1592 else
1593 pa_log_info("No particular base volume set, fixing to 0 dB");
1594
1595 } else {
1596 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1597 u->sink->base_volume = PA_VOLUME_NORM;
1598 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1599 }
1600
1601 u->sink->get_volume = sink_get_volume_cb;
1602 u->sink->set_volume = sink_set_volume_cb;
1603
1604 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1605 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1606 }
1607
1608 if (!u->mixer_path->has_mute) {
1609 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1610 } else {
1611 u->sink->get_mute = sink_get_mute_cb;
1612 u->sink->set_mute = sink_set_mute_cb;
1613 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1614 pa_log_info("Using hardware mute control.");
1615 }
1616
1617 u->mixer_fdl = pa_alsa_fdlist_new();
1618
1619 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1620 pa_log("Failed to initialize file descriptor monitoring");
1621 return -1;
1622 }
1623
1624 if (u->mixer_path_set)
1625 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1626 else
1627 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1628
1629 return 0;
1630 }
1631
1632 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1633
1634 struct userdata *u = NULL;
1635 const char *dev_id = NULL;
1636 pa_sample_spec ss, requested_ss;
1637 pa_channel_map map;
1638 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1639 snd_pcm_uframes_t period_frames, tsched_frames;
1640 size_t frame_size;
1641 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1642 pa_sink_new_data data;
1643 pa_alsa_profile_set *profile_set = NULL;
1644
1645 pa_assert(m);
1646 pa_assert(ma);
1647
1648 ss = m->core->default_sample_spec;
1649 map = m->core->default_channel_map;
1650 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1651 pa_log("Failed to parse sample specification and channel map");
1652 goto fail;
1653 }
1654
1655 requested_ss = ss;
1656 frame_size = pa_frame_size(&ss);
1657
1658 nfrags = m->core->default_n_fragments;
1659 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1660 if (frag_size <= 0)
1661 frag_size = (uint32_t) frame_size;
1662 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1663 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1664
1665 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1666 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1667 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1668 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1669 pa_log("Failed to parse buffer metrics");
1670 goto fail;
1671 }
1672
1673 hwbuf_size = frag_size * nfrags;
1674 period_frames = frag_size/frame_size;
1675 tsched_frames = tsched_size/frame_size;
1676
1677 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1678 pa_log("Failed to parse mmap argument.");
1679 goto fail;
1680 }
1681
1682 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1683 pa_log("Failed to parse tsched argument.");
1684 goto fail;
1685 }
1686
1687 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1688 pa_log("Failed to parse ignore_dB argument.");
1689 goto fail;
1690 }
1691
1692 if (use_tsched && !pa_rtclock_hrtimer()) {
1693 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1694 use_tsched = FALSE;
1695 }
1696
1697 u = pa_xnew0(struct userdata, 1);
1698 u->core = m->core;
1699 u->module = m;
1700 u->use_mmap = use_mmap;
1701 u->use_tsched = use_tsched;
1702 u->first = TRUE;
1703 u->rtpoll = pa_rtpoll_new();
1704 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1705
1706 u->smoother = pa_smoother_new(
1707 DEFAULT_TSCHED_BUFFER_USEC*2,
1708 DEFAULT_TSCHED_BUFFER_USEC*2,
1709 TRUE,
1710 TRUE,
1711 5,
1712 pa_rtclock_now(),
1713 TRUE);
1714 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1715
1716 dev_id = pa_modargs_get_value(
1717 ma, "device_id",
1718 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1719
1720 if (reserve_init(u, dev_id) < 0)
1721 goto fail;
1722
1723 if (reserve_monitor_init(u, dev_id) < 0)
1724 goto fail;
1725
1726 b = use_mmap;
1727 d = use_tsched;
1728
1729 if (mapping) {
1730
1731 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1732 pa_log("device_id= not set");
1733 goto fail;
1734 }
1735
1736 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1737 dev_id,
1738 &u->device_name,
1739 &ss, &map,
1740 SND_PCM_STREAM_PLAYBACK,
1741 &nfrags, &period_frames, tsched_frames,
1742 &b, &d, mapping)))
1743
1744 goto fail;
1745
1746 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1747
1748 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1749 goto fail;
1750
1751 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1752 dev_id,
1753 &u->device_name,
1754 &ss, &map,
1755 SND_PCM_STREAM_PLAYBACK,
1756 &nfrags, &period_frames, tsched_frames,
1757 &b, &d, profile_set, &mapping)))
1758
1759 goto fail;
1760
1761 } else {
1762
1763 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1764 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1765 &u->device_name,
1766 &ss, &map,
1767 SND_PCM_STREAM_PLAYBACK,
1768 &nfrags, &period_frames, tsched_frames,
1769 &b, &d, FALSE)))
1770 goto fail;
1771 }
1772
1773 pa_assert(u->device_name);
1774 pa_log_info("Successfully opened device %s.", u->device_name);
1775
1776 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1777 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1778 goto fail;
1779 }
1780
1781 if (mapping)
1782 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1783
1784 if (use_mmap && !b) {
1785 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1786 u->use_mmap = use_mmap = FALSE;
1787 }
1788
1789 if (use_tsched && (!b || !d)) {
1790 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1791 u->use_tsched = use_tsched = FALSE;
1792 }
1793
1794 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1795 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1796 u->use_tsched = use_tsched = FALSE;
1797 }
1798
1799 if (u->use_mmap)
1800 pa_log_info("Successfully enabled mmap() mode.");
1801
1802 if (u->use_tsched)
1803 pa_log_info("Successfully enabled timer-based scheduling mode.");
1804
1805 /* ALSA might tweak the sample spec, so recalculate the frame size */
1806 frame_size = pa_frame_size(&ss);
1807
1808 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1809
1810 pa_sink_new_data_init(&data);
1811 data.driver = driver;
1812 data.module = m;
1813 data.card = card;
1814 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1815 pa_sink_new_data_set_sample_spec(&data, &ss);
1816 pa_sink_new_data_set_channel_map(&data, &map);
1817
1818 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1819 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1820 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1821 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1822 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1823
1824 if (mapping) {
1825 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1826 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1827 }
1828
1829 pa_alsa_init_description(data.proplist);
1830
1831 if (u->control_device)
1832 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1833
1834 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1835 pa_log("Invalid properties");
1836 pa_sink_new_data_done(&data);
1837 goto fail;
1838 }
1839
1840 if (u->mixer_path_set)
1841 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1842
1843 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1844 pa_sink_new_data_done(&data);
1845
1846 if (!u->sink) {
1847 pa_log("Failed to create sink object");
1848 goto fail;
1849 }
1850
1851 u->sink->parent.process_msg = sink_process_msg;
1852 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1853 u->sink->set_state = sink_set_state_cb;
1854 u->sink->set_port = sink_set_port_cb;
1855 u->sink->userdata = u;
1856
1857 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1858 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1859
1860 u->frame_size = frame_size;
1861 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1862 u->nfragments = nfrags;
1863 u->hwbuf_size = u->fragment_size * nfrags;
1864 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1865
1866 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1867 nfrags, (long unsigned) u->fragment_size,
1868 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1869
1870 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1871 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1872
1873 if (u->use_tsched) {
1874 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1875
1876 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1877 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1878
1879 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1880 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1881
1882 fix_min_sleep_wakeup(u);
1883 fix_tsched_watermark(u);
1884
1885 pa_sink_set_latency_range(u->sink,
1886 0,
1887 pa_bytes_to_usec(u->hwbuf_size, &ss));
1888
1889 pa_log_info("Time scheduling watermark is %0.2fms",
1890 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1891 } else
1892 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1893
1894
1895 reserve_update(u);
1896
1897 if (update_sw_params(u) < 0)
1898 goto fail;
1899
1900 if (setup_mixer(u, ignore_dB) < 0)
1901 goto fail;
1902
1903 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1904
1905 if (!(u->thread = pa_thread_new(thread_func, u))) {
1906 pa_log("Failed to create thread.");
1907 goto fail;
1908 }
1909
1910 /* Get initial mixer settings */
1911 if (data.volume_is_set) {
1912 if (u->sink->set_volume)
1913 u->sink->set_volume(u->sink);
1914 } else {
1915 if (u->sink->get_volume)
1916 u->sink->get_volume(u->sink);
1917 }
1918
1919 if (data.muted_is_set) {
1920 if (u->sink->set_mute)
1921 u->sink->set_mute(u->sink);
1922 } else {
1923 if (u->sink->get_mute)
1924 u->sink->get_mute(u->sink);
1925 }
1926
1927 pa_sink_put(u->sink);
1928
1929 if (profile_set)
1930 pa_alsa_profile_set_free(profile_set);
1931
1932 return u->sink;
1933
1934 fail:
1935
1936 if (u)
1937 userdata_free(u);
1938
1939 if (profile_set)
1940 pa_alsa_profile_set_free(profile_set);
1941
1942 return NULL;
1943 }
1944
1945 static void userdata_free(struct userdata *u) {
1946 pa_assert(u);
1947
1948 if (u->sink)
1949 pa_sink_unlink(u->sink);
1950
1951 if (u->thread) {
1952 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1953 pa_thread_free(u->thread);
1954 }
1955
1956 pa_thread_mq_done(&u->thread_mq);
1957
1958 if (u->sink)
1959 pa_sink_unref(u->sink);
1960
1961 if (u->memchunk.memblock)
1962 pa_memblock_unref(u->memchunk.memblock);
1963
1964 if (u->alsa_rtpoll_item)
1965 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1966
1967 if (u->rtpoll)
1968 pa_rtpoll_free(u->rtpoll);
1969
1970 if (u->pcm_handle) {
1971 snd_pcm_drop(u->pcm_handle);
1972 snd_pcm_close(u->pcm_handle);
1973 }
1974
1975 if (u->mixer_fdl)
1976 pa_alsa_fdlist_free(u->mixer_fdl);
1977
1978 if (u->mixer_path_set)
1979 pa_alsa_path_set_free(u->mixer_path_set);
1980 else if (u->mixer_path)
1981 pa_alsa_path_free(u->mixer_path);
1982
1983 if (u->mixer_handle)
1984 snd_mixer_close(u->mixer_handle);
1985
1986 if (u->smoother)
1987 pa_smoother_free(u->smoother);
1988
1989 reserve_done(u);
1990 monitor_done(u);
1991
1992 pa_xfree(u->device_name);
1993 pa_xfree(u->control_device);
1994 pa_xfree(u);
1995 }
1996
1997 void pa_alsa_sink_free(pa_sink *s) {
1998 struct userdata *u;
1999
2000 pa_sink_assert_ref(s);
2001 pa_assert_se(u = s->userdata);
2002
2003 userdata_free(u);
2004 }