]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
add rewind-safeguard parameter
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
82 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
83
84 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
85
86 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256) /* 1.33ms @48kHz, should work for most hardware */
87
88 struct userdata {
89 pa_core *core;
90 pa_module *module;
91 pa_sink *sink;
92
93 pa_thread *thread;
94 pa_thread_mq thread_mq;
95 pa_rtpoll *rtpoll;
96
97 snd_pcm_t *pcm_handle;
98
99 pa_alsa_fdlist *mixer_fdl;
100 snd_mixer_t *mixer_handle;
101 pa_alsa_path_set *mixer_path_set;
102 pa_alsa_path *mixer_path;
103
104 pa_cvolume hardware_volume;
105
106 size_t
107 frame_size,
108 fragment_size,
109 hwbuf_size,
110 tsched_watermark,
111 hwbuf_unused,
112 min_sleep,
113 min_wakeup,
114 watermark_inc_step,
115 watermark_dec_step,
116 watermark_inc_threshold,
117 watermark_dec_threshold,
118 rewind_safeguard;
119
120 pa_usec_t watermark_dec_not_before;
121
122 pa_memchunk memchunk;
123
124 char *device_name; /* name of the PCM device */
125 char *control_device; /* name of the control device */
126
127 pa_bool_t use_mmap:1, use_tsched:1;
128
129 pa_bool_t first, after_rewind;
130
131 pa_rtpoll_item *alsa_rtpoll_item;
132
133 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
134
135 pa_smoother *smoother;
136 uint64_t write_count;
137 uint64_t since_start;
138 pa_usec_t smoother_interval;
139 pa_usec_t last_smoother_update;
140
141 pa_reserve_wrapper *reserve;
142 pa_hook_slot *reserve_slot;
143 pa_reserve_monitor_wrapper *monitor;
144 pa_hook_slot *monitor_slot;
145 };
146
147 static void userdata_free(struct userdata *u);
148
149 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
150 pa_assert(r);
151 pa_assert(u);
152
153 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
154 return PA_HOOK_CANCEL;
155
156 return PA_HOOK_OK;
157 }
158
159 static void reserve_done(struct userdata *u) {
160 pa_assert(u);
161
162 if (u->reserve_slot) {
163 pa_hook_slot_free(u->reserve_slot);
164 u->reserve_slot = NULL;
165 }
166
167 if (u->reserve) {
168 pa_reserve_wrapper_unref(u->reserve);
169 u->reserve = NULL;
170 }
171 }
172
173 static void reserve_update(struct userdata *u) {
174 const char *description;
175 pa_assert(u);
176
177 if (!u->sink || !u->reserve)
178 return;
179
180 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
181 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
182 }
183
184 static int reserve_init(struct userdata *u, const char *dname) {
185 char *rname;
186
187 pa_assert(u);
188 pa_assert(dname);
189
190 if (u->reserve)
191 return 0;
192
193 if (pa_in_system_mode())
194 return 0;
195
196 if (!(rname = pa_alsa_get_reserve_name(dname)))
197 return 0;
198
199 /* We are resuming, try to lock the device */
200 u->reserve = pa_reserve_wrapper_get(u->core, rname);
201 pa_xfree(rname);
202
203 if (!(u->reserve))
204 return -1;
205
206 reserve_update(u);
207
208 pa_assert(!u->reserve_slot);
209 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
210
211 return 0;
212 }
213
214 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
215 pa_bool_t b;
216
217 pa_assert(w);
218 pa_assert(u);
219
220 b = PA_PTR_TO_UINT(busy) && !u->reserve;
221
222 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
223 return PA_HOOK_OK;
224 }
225
226 static void monitor_done(struct userdata *u) {
227 pa_assert(u);
228
229 if (u->monitor_slot) {
230 pa_hook_slot_free(u->monitor_slot);
231 u->monitor_slot = NULL;
232 }
233
234 if (u->monitor) {
235 pa_reserve_monitor_wrapper_unref(u->monitor);
236 u->monitor = NULL;
237 }
238 }
239
240 static int reserve_monitor_init(struct userdata *u, const char *dname) {
241 char *rname;
242
243 pa_assert(u);
244 pa_assert(dname);
245
246 if (pa_in_system_mode())
247 return 0;
248
249 if (!(rname = pa_alsa_get_reserve_name(dname)))
250 return 0;
251
252 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
253 pa_xfree(rname);
254
255 if (!(u->monitor))
256 return -1;
257
258 pa_assert(!u->monitor_slot);
259 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
260
261 return 0;
262 }
263
264 static void fix_min_sleep_wakeup(struct userdata *u) {
265 size_t max_use, max_use_2;
266
267 pa_assert(u);
268 pa_assert(u->use_tsched);
269
270 max_use = u->hwbuf_size - u->hwbuf_unused;
271 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
272
273 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
274 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
275
276 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
277 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
278 }
279
280 static void fix_tsched_watermark(struct userdata *u) {
281 size_t max_use;
282 pa_assert(u);
283 pa_assert(u->use_tsched);
284
285 max_use = u->hwbuf_size - u->hwbuf_unused;
286
287 if (u->tsched_watermark > max_use - u->min_sleep)
288 u->tsched_watermark = max_use - u->min_sleep;
289
290 if (u->tsched_watermark < u->min_wakeup)
291 u->tsched_watermark = u->min_wakeup;
292 }
293
294 static void increase_watermark(struct userdata *u) {
295 size_t old_watermark;
296 pa_usec_t old_min_latency, new_min_latency;
297
298 pa_assert(u);
299 pa_assert(u->use_tsched);
300
301 /* First, just try to increase the watermark */
302 old_watermark = u->tsched_watermark;
303 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
304 fix_tsched_watermark(u);
305
306 if (old_watermark != u->tsched_watermark) {
307 pa_log_info("Increasing wakeup watermark to %0.2f ms",
308 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
309 return;
310 }
311
312 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
313 old_min_latency = u->sink->thread_info.min_latency;
314 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
315 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
316
317 if (old_min_latency != new_min_latency) {
318 pa_log_info("Increasing minimal latency to %0.2f ms",
319 (double) new_min_latency / PA_USEC_PER_MSEC);
320
321 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
322 }
323
324 /* When we reach this we're officialy fucked! */
325 }
326
327 static void decrease_watermark(struct userdata *u) {
328 size_t old_watermark;
329 pa_usec_t now;
330
331 pa_assert(u);
332 pa_assert(u->use_tsched);
333
334 now = pa_rtclock_now();
335
336 if (u->watermark_dec_not_before <= 0)
337 goto restart;
338
339 if (u->watermark_dec_not_before > now)
340 return;
341
342 old_watermark = u->tsched_watermark;
343
344 if (u->tsched_watermark < u->watermark_dec_step)
345 u->tsched_watermark = u->tsched_watermark / 2;
346 else
347 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
348
349 fix_tsched_watermark(u);
350
351 if (old_watermark != u->tsched_watermark)
352 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
353 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
354
355 /* We don't change the latency range*/
356
357 restart:
358 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
359 }
360
361 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
362 pa_usec_t usec, wm;
363
364 pa_assert(sleep_usec);
365 pa_assert(process_usec);
366
367 pa_assert(u);
368 pa_assert(u->use_tsched);
369
370 usec = pa_sink_get_requested_latency_within_thread(u->sink);
371
372 if (usec == (pa_usec_t) -1)
373 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
374
375 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
376
377 if (wm > usec)
378 wm = usec/2;
379
380 *sleep_usec = usec - wm;
381 *process_usec = wm;
382
383 #ifdef DEBUG_TIMING
384 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
385 (unsigned long) (usec / PA_USEC_PER_MSEC),
386 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
387 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
388 #endif
389 }
390
391 static int try_recover(struct userdata *u, const char *call, int err) {
392 pa_assert(u);
393 pa_assert(call);
394 pa_assert(err < 0);
395
396 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
397
398 pa_assert(err != -EAGAIN);
399
400 if (err == -EPIPE)
401 pa_log_debug("%s: Buffer underrun!", call);
402
403 if (err == -ESTRPIPE)
404 pa_log_debug("%s: System suspended!", call);
405
406 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
407 pa_log("%s: %s", call, pa_alsa_strerror(err));
408 return -1;
409 }
410
411 u->first = TRUE;
412 u->since_start = 0;
413 return 0;
414 }
415
416 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
417 size_t left_to_play;
418 pa_bool_t underrun = FALSE;
419
420 /* We use <= instead of < for this check here because an underrun
421 * only happens after the last sample was processed, not already when
422 * it is removed from the buffer. This is particularly important
423 * when block transfer is used. */
424
425 if (n_bytes <= u->hwbuf_size)
426 left_to_play = u->hwbuf_size - n_bytes;
427 else {
428
429 /* We got a dropout. What a mess! */
430 left_to_play = 0;
431 underrun = TRUE;
432
433 #ifdef DEBUG_TIMING
434 PA_DEBUG_TRAP;
435 #endif
436
437 if (!u->first && !u->after_rewind)
438 if (pa_log_ratelimit())
439 pa_log_info("Underrun!");
440 }
441
442 #ifdef DEBUG_TIMING
443 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
444 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
445 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
446 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
447 #endif
448
449 if (u->use_tsched) {
450 pa_bool_t reset_not_before = TRUE;
451
452 if (!u->first && !u->after_rewind) {
453 if (underrun || left_to_play < u->watermark_inc_threshold)
454 increase_watermark(u);
455 else if (left_to_play > u->watermark_dec_threshold) {
456 reset_not_before = FALSE;
457
458 /* We decrease the watermark only if have actually
459 * been woken up by a timeout. If something else woke
460 * us up it's too easy to fulfill the deadlines... */
461
462 if (on_timeout)
463 decrease_watermark(u);
464 }
465 }
466
467 if (reset_not_before)
468 u->watermark_dec_not_before = 0;
469 }
470
471 return left_to_play;
472 }
473
474 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
475 pa_bool_t work_done = TRUE;
476 pa_usec_t max_sleep_usec = 0, process_usec = 0;
477 size_t left_to_play;
478 unsigned j = 0;
479
480 pa_assert(u);
481 pa_sink_assert_ref(u->sink);
482
483 if (u->use_tsched)
484 hw_sleep_time(u, &max_sleep_usec, &process_usec);
485
486 for (;;) {
487 snd_pcm_sframes_t n;
488 size_t n_bytes;
489 int r;
490 pa_bool_t after_avail = TRUE;
491
492 /* First we determine how many samples are missing to fill the
493 * buffer up to 100% */
494
495 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
496
497 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
498 continue;
499
500 return r;
501 }
502
503 n_bytes = (size_t) n * u->frame_size;
504
505 #ifdef DEBUG_TIMING
506 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
507 #endif
508
509 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
510 on_timeout = FALSE;
511
512 if (u->use_tsched)
513
514 /* We won't fill up the playback buffer before at least
515 * half the sleep time is over because otherwise we might
516 * ask for more data from the clients then they expect. We
517 * need to guarantee that clients only have to keep around
518 * a single hw buffer length. */
519
520 if (!polled &&
521 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
522 #ifdef DEBUG_TIMING
523 pa_log_debug("Not filling up, because too early.");
524 #endif
525 break;
526 }
527
528 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
529
530 if (polled)
531 PA_ONCE_BEGIN {
532 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
533 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
534 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
535 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
536 pa_strnull(dn));
537 pa_xfree(dn);
538 } PA_ONCE_END;
539
540 #ifdef DEBUG_TIMING
541 pa_log_debug("Not filling up, because not necessary.");
542 #endif
543 break;
544 }
545
546
547 if (++j > 10) {
548 #ifdef DEBUG_TIMING
549 pa_log_debug("Not filling up, because already too many iterations.");
550 #endif
551
552 break;
553 }
554
555 n_bytes -= u->hwbuf_unused;
556 polled = FALSE;
557
558 #ifdef DEBUG_TIMING
559 pa_log_debug("Filling up");
560 #endif
561
562 for (;;) {
563 pa_memchunk chunk;
564 void *p;
565 int err;
566 const snd_pcm_channel_area_t *areas;
567 snd_pcm_uframes_t offset, frames;
568 snd_pcm_sframes_t sframes;
569
570 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
571 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
572
573 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
574
575 if (!after_avail && err == -EAGAIN)
576 break;
577
578 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
579 continue;
580
581 return r;
582 }
583
584 /* Make sure that if these memblocks need to be copied they will fit into one slot */
585 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
586 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
587
588 if (!after_avail && frames == 0)
589 break;
590
591 pa_assert(frames > 0);
592 after_avail = FALSE;
593
594 /* Check these are multiples of 8 bit */
595 pa_assert((areas[0].first & 7) == 0);
596 pa_assert((areas[0].step & 7)== 0);
597
598 /* We assume a single interleaved memory buffer */
599 pa_assert((areas[0].first >> 3) == 0);
600 pa_assert((areas[0].step >> 3) == u->frame_size);
601
602 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
603
604 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
605 chunk.length = pa_memblock_get_length(chunk.memblock);
606 chunk.index = 0;
607
608 pa_sink_render_into_full(u->sink, &chunk);
609 pa_memblock_unref_fixed(chunk.memblock);
610
611 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
612
613 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
614 continue;
615
616 return r;
617 }
618
619 work_done = TRUE;
620
621 u->write_count += frames * u->frame_size;
622 u->since_start += frames * u->frame_size;
623
624 #ifdef DEBUG_TIMING
625 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
626 #endif
627
628 if ((size_t) frames * u->frame_size >= n_bytes)
629 break;
630
631 n_bytes -= (size_t) frames * u->frame_size;
632 }
633 }
634
635 if (u->use_tsched) {
636 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
637
638 if (*sleep_usec > process_usec)
639 *sleep_usec -= process_usec;
640 else
641 *sleep_usec = 0;
642 } else
643 *sleep_usec = 0;
644
645 return work_done ? 1 : 0;
646 }
647
648 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
649 pa_bool_t work_done = FALSE;
650 pa_usec_t max_sleep_usec = 0, process_usec = 0;
651 size_t left_to_play;
652 unsigned j = 0;
653
654 pa_assert(u);
655 pa_sink_assert_ref(u->sink);
656
657 if (u->use_tsched)
658 hw_sleep_time(u, &max_sleep_usec, &process_usec);
659
660 for (;;) {
661 snd_pcm_sframes_t n;
662 size_t n_bytes;
663 int r;
664 pa_bool_t after_avail = TRUE;
665
666 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
667
668 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
669 continue;
670
671 return r;
672 }
673
674 n_bytes = (size_t) n * u->frame_size;
675 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
676 on_timeout = FALSE;
677
678 if (u->use_tsched)
679
680 /* We won't fill up the playback buffer before at least
681 * half the sleep time is over because otherwise we might
682 * ask for more data from the clients then they expect. We
683 * need to guarantee that clients only have to keep around
684 * a single hw buffer length. */
685
686 if (!polled &&
687 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
688 break;
689
690 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
691
692 if (polled)
693 PA_ONCE_BEGIN {
694 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
695 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
696 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
697 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
698 pa_strnull(dn));
699 pa_xfree(dn);
700 } PA_ONCE_END;
701
702 break;
703 }
704
705 if (++j > 10) {
706 #ifdef DEBUG_TIMING
707 pa_log_debug("Not filling up, because already too many iterations.");
708 #endif
709
710 break;
711 }
712
713 n_bytes -= u->hwbuf_unused;
714 polled = FALSE;
715
716 for (;;) {
717 snd_pcm_sframes_t frames;
718 void *p;
719
720 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
721
722 if (u->memchunk.length <= 0)
723 pa_sink_render(u->sink, n_bytes, &u->memchunk);
724
725 pa_assert(u->memchunk.length > 0);
726
727 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
728
729 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
730 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
731
732 p = pa_memblock_acquire(u->memchunk.memblock);
733 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
734 pa_memblock_release(u->memchunk.memblock);
735
736 if (PA_UNLIKELY(frames < 0)) {
737
738 if (!after_avail && (int) frames == -EAGAIN)
739 break;
740
741 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
742 continue;
743
744 return r;
745 }
746
747 if (!after_avail && frames == 0)
748 break;
749
750 pa_assert(frames > 0);
751 after_avail = FALSE;
752
753 u->memchunk.index += (size_t) frames * u->frame_size;
754 u->memchunk.length -= (size_t) frames * u->frame_size;
755
756 if (u->memchunk.length <= 0) {
757 pa_memblock_unref(u->memchunk.memblock);
758 pa_memchunk_reset(&u->memchunk);
759 }
760
761 work_done = TRUE;
762
763 u->write_count += frames * u->frame_size;
764 u->since_start += frames * u->frame_size;
765
766 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
767
768 if ((size_t) frames * u->frame_size >= n_bytes)
769 break;
770
771 n_bytes -= (size_t) frames * u->frame_size;
772 }
773 }
774
775 if (u->use_tsched) {
776 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
777
778 if (*sleep_usec > process_usec)
779 *sleep_usec -= process_usec;
780 else
781 *sleep_usec = 0;
782 } else
783 *sleep_usec = 0;
784
785 return work_done ? 1 : 0;
786 }
787
788 static void update_smoother(struct userdata *u) {
789 snd_pcm_sframes_t delay = 0;
790 int64_t position;
791 int err;
792 pa_usec_t now1 = 0, now2;
793 snd_pcm_status_t *status;
794
795 snd_pcm_status_alloca(&status);
796
797 pa_assert(u);
798 pa_assert(u->pcm_handle);
799
800 /* Let's update the time smoother */
801
802 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
803 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
804 return;
805 }
806
807 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
808 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
809 else {
810 snd_htimestamp_t htstamp = { 0, 0 };
811 snd_pcm_status_get_htstamp(status, &htstamp);
812 now1 = pa_timespec_load(&htstamp);
813 }
814
815 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
816 if (now1 <= 0)
817 now1 = pa_rtclock_now();
818
819 /* check if the time since the last update is bigger than the interval */
820 if (u->last_smoother_update > 0)
821 if (u->last_smoother_update + u->smoother_interval > now1)
822 return;
823
824 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
825
826 if (PA_UNLIKELY(position < 0))
827 position = 0;
828
829 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
830
831 pa_smoother_put(u->smoother, now1, now2);
832
833 u->last_smoother_update = now1;
834 /* exponentially increase the update interval up to the MAX limit */
835 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
836 }
837
838 static pa_usec_t sink_get_latency(struct userdata *u) {
839 pa_usec_t r;
840 int64_t delay;
841 pa_usec_t now1, now2;
842
843 pa_assert(u);
844
845 now1 = pa_rtclock_now();
846 now2 = pa_smoother_get(u->smoother, now1);
847
848 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
849
850 r = delay >= 0 ? (pa_usec_t) delay : 0;
851
852 if (u->memchunk.memblock)
853 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
854
855 return r;
856 }
857
858 static int build_pollfd(struct userdata *u) {
859 pa_assert(u);
860 pa_assert(u->pcm_handle);
861
862 if (u->alsa_rtpoll_item)
863 pa_rtpoll_item_free(u->alsa_rtpoll_item);
864
865 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
866 return -1;
867
868 return 0;
869 }
870
871 /* Called from IO context */
872 static int suspend(struct userdata *u) {
873 pa_assert(u);
874 pa_assert(u->pcm_handle);
875
876 pa_smoother_pause(u->smoother, pa_rtclock_now());
877
878 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
879 * take awfully long with our long buffer sizes today. */
880 snd_pcm_close(u->pcm_handle);
881 u->pcm_handle = NULL;
882
883 if (u->alsa_rtpoll_item) {
884 pa_rtpoll_item_free(u->alsa_rtpoll_item);
885 u->alsa_rtpoll_item = NULL;
886 }
887
888 /* We reset max_rewind/max_request here to make sure that while we
889 * are suspended the old max_request/max_rewind values set before
890 * the suspend can influence the per-stream buffer of newly
891 * created streams, without their requirements having any
892 * influence on them. */
893 pa_sink_set_max_rewind_within_thread(u->sink, 0);
894 pa_sink_set_max_request_within_thread(u->sink, 0);
895
896 pa_log_info("Device suspended...");
897
898 return 0;
899 }
900
901 /* Called from IO context */
902 static int update_sw_params(struct userdata *u) {
903 snd_pcm_uframes_t avail_min;
904 int err;
905
906 pa_assert(u);
907
908 /* Use the full buffer if noone asked us for anything specific */
909 u->hwbuf_unused = 0;
910
911 if (u->use_tsched) {
912 pa_usec_t latency;
913
914 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
915 size_t b;
916
917 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
918
919 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
920
921 /* We need at least one sample in our buffer */
922
923 if (PA_UNLIKELY(b < u->frame_size))
924 b = u->frame_size;
925
926 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
927 }
928
929 fix_min_sleep_wakeup(u);
930 fix_tsched_watermark(u);
931 }
932
933 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
934
935 /* We need at last one frame in the used part of the buffer */
936 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
937
938 if (u->use_tsched) {
939 pa_usec_t sleep_usec, process_usec;
940
941 hw_sleep_time(u, &sleep_usec, &process_usec);
942 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
943 }
944
945 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
946
947 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
948 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
949 return err;
950 }
951
952 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
953 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
954
955 return 0;
956 }
957
958 /* Called from IO context */
959 static int unsuspend(struct userdata *u) {
960 pa_sample_spec ss;
961 int err;
962 pa_bool_t b, d;
963 snd_pcm_uframes_t period_size, buffer_size;
964
965 pa_assert(u);
966 pa_assert(!u->pcm_handle);
967
968 pa_log_info("Trying resume...");
969
970 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
971 SND_PCM_NONBLOCK|
972 SND_PCM_NO_AUTO_RESAMPLE|
973 SND_PCM_NO_AUTO_CHANNELS|
974 SND_PCM_NO_AUTO_FORMAT)) < 0) {
975 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
976 goto fail;
977 }
978
979 ss = u->sink->sample_spec;
980 period_size = u->fragment_size / u->frame_size;
981 buffer_size = u->hwbuf_size / u->frame_size;
982 b = u->use_mmap;
983 d = u->use_tsched;
984
985 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
986 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
987 goto fail;
988 }
989
990 if (b != u->use_mmap || d != u->use_tsched) {
991 pa_log_warn("Resume failed, couldn't get original access mode.");
992 goto fail;
993 }
994
995 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
996 pa_log_warn("Resume failed, couldn't restore original sample settings.");
997 goto fail;
998 }
999
1000 if (period_size*u->frame_size != u->fragment_size ||
1001 buffer_size*u->frame_size != u->hwbuf_size) {
1002 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1003 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1004 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1005 goto fail;
1006 }
1007
1008 if (update_sw_params(u) < 0)
1009 goto fail;
1010
1011 if (build_pollfd(u) < 0)
1012 goto fail;
1013
1014 u->write_count = 0;
1015 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1016 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1017 u->last_smoother_update = 0;
1018
1019 u->first = TRUE;
1020 u->since_start = 0;
1021
1022 pa_log_info("Resumed successfully...");
1023
1024 return 0;
1025
1026 fail:
1027 if (u->pcm_handle) {
1028 snd_pcm_close(u->pcm_handle);
1029 u->pcm_handle = NULL;
1030 }
1031
1032 return -PA_ERR_IO;
1033 }
1034
1035 /* Called from IO context */
1036 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1037 struct userdata *u = PA_SINK(o)->userdata;
1038
1039 switch (code) {
1040
1041 case PA_SINK_MESSAGE_GET_LATENCY: {
1042 pa_usec_t r = 0;
1043
1044 if (u->pcm_handle)
1045 r = sink_get_latency(u);
1046
1047 *((pa_usec_t*) data) = r;
1048
1049 return 0;
1050 }
1051
1052 case PA_SINK_MESSAGE_SET_STATE:
1053
1054 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1055
1056 case PA_SINK_SUSPENDED: {
1057 int r;
1058
1059 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1060
1061 if ((r = suspend(u)) < 0)
1062 return r;
1063
1064 break;
1065 }
1066
1067 case PA_SINK_IDLE:
1068 case PA_SINK_RUNNING: {
1069 int r;
1070
1071 if (u->sink->thread_info.state == PA_SINK_INIT) {
1072 if (build_pollfd(u) < 0)
1073 return -PA_ERR_IO;
1074 }
1075
1076 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1077 if ((r = unsuspend(u)) < 0)
1078 return r;
1079 }
1080
1081 break;
1082 }
1083
1084 case PA_SINK_UNLINKED:
1085 case PA_SINK_INIT:
1086 case PA_SINK_INVALID_STATE:
1087 ;
1088 }
1089
1090 break;
1091 }
1092
1093 return pa_sink_process_msg(o, code, data, offset, chunk);
1094 }
1095
1096 /* Called from main context */
1097 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1098 pa_sink_state_t old_state;
1099 struct userdata *u;
1100
1101 pa_sink_assert_ref(s);
1102 pa_assert_se(u = s->userdata);
1103
1104 old_state = pa_sink_get_state(u->sink);
1105
1106 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1107 reserve_done(u);
1108 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1109 if (reserve_init(u, u->device_name) < 0)
1110 return -PA_ERR_BUSY;
1111
1112 return 0;
1113 }
1114
1115 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1116 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1117
1118 pa_assert(u);
1119 pa_assert(u->mixer_handle);
1120
1121 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1122 return 0;
1123
1124 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1125 return 0;
1126
1127 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1128 pa_sink_get_volume(u->sink, TRUE);
1129 pa_sink_get_mute(u->sink, TRUE);
1130 }
1131
1132 return 0;
1133 }
1134
1135 static void sink_get_volume_cb(pa_sink *s) {
1136 struct userdata *u = s->userdata;
1137 pa_cvolume r;
1138 char t[PA_CVOLUME_SNPRINT_MAX];
1139
1140 pa_assert(u);
1141 pa_assert(u->mixer_path);
1142 pa_assert(u->mixer_handle);
1143
1144 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1145 return;
1146
1147 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1148 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1149
1150 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1151
1152 if (pa_cvolume_equal(&u->hardware_volume, &r))
1153 return;
1154
1155 s->real_volume = u->hardware_volume = r;
1156
1157 /* Hmm, so the hardware volume changed, let's reset our software volume */
1158 if (u->mixer_path->has_dB)
1159 pa_sink_set_soft_volume(s, NULL);
1160 }
1161
1162 static void sink_set_volume_cb(pa_sink *s) {
1163 struct userdata *u = s->userdata;
1164 pa_cvolume r;
1165 char t[PA_CVOLUME_SNPRINT_MAX];
1166
1167 pa_assert(u);
1168 pa_assert(u->mixer_path);
1169 pa_assert(u->mixer_handle);
1170
1171 /* Shift up by the base volume */
1172 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1173
1174 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1175 return;
1176
1177 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1178 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1179
1180 u->hardware_volume = r;
1181
1182 if (u->mixer_path->has_dB) {
1183 pa_cvolume new_soft_volume;
1184 pa_bool_t accurate_enough;
1185
1186 /* Match exactly what the user requested by software */
1187 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1188
1189 /* If the adjustment to do in software is only minimal we
1190 * can skip it. That saves us CPU at the expense of a bit of
1191 * accuracy */
1192 accurate_enough =
1193 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1194 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1195
1196 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1197 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1198 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1199 pa_yes_no(accurate_enough));
1200
1201 if (!accurate_enough)
1202 s->soft_volume = new_soft_volume;
1203
1204 } else {
1205 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1206
1207 /* We can't match exactly what the user requested, hence let's
1208 * at least tell the user about it */
1209
1210 s->real_volume = r;
1211 }
1212 }
1213
1214 static void sink_get_mute_cb(pa_sink *s) {
1215 struct userdata *u = s->userdata;
1216 pa_bool_t b;
1217
1218 pa_assert(u);
1219 pa_assert(u->mixer_path);
1220 pa_assert(u->mixer_handle);
1221
1222 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1223 return;
1224
1225 s->muted = b;
1226 }
1227
1228 static void sink_set_mute_cb(pa_sink *s) {
1229 struct userdata *u = s->userdata;
1230
1231 pa_assert(u);
1232 pa_assert(u->mixer_path);
1233 pa_assert(u->mixer_handle);
1234
1235 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1236 }
1237
1238 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1239 struct userdata *u = s->userdata;
1240 pa_alsa_port_data *data;
1241
1242 pa_assert(u);
1243 pa_assert(p);
1244 pa_assert(u->mixer_handle);
1245
1246 data = PA_DEVICE_PORT_DATA(p);
1247
1248 pa_assert_se(u->mixer_path = data->path);
1249 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1250
1251 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1252 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1253 s->n_volume_steps = PA_VOLUME_NORM+1;
1254
1255 if (u->mixer_path->max_dB > 0.0)
1256 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1257 else
1258 pa_log_info("No particular base volume set, fixing to 0 dB");
1259 } else {
1260 s->base_volume = PA_VOLUME_NORM;
1261 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1262 }
1263
1264 if (data->setting)
1265 pa_alsa_setting_select(data->setting, u->mixer_handle);
1266
1267 if (s->set_mute)
1268 s->set_mute(s);
1269 if (s->set_volume)
1270 s->set_volume(s);
1271
1272 return 0;
1273 }
1274
1275 static void sink_update_requested_latency_cb(pa_sink *s) {
1276 struct userdata *u = s->userdata;
1277 size_t before;
1278 pa_assert(u);
1279 pa_assert(u->use_tsched); /* only when timer scheduling is used
1280 * we can dynamically adjust the
1281 * latency */
1282
1283 if (!u->pcm_handle)
1284 return;
1285
1286 before = u->hwbuf_unused;
1287 update_sw_params(u);
1288
1289 /* Let's check whether we now use only a smaller part of the
1290 buffer then before. If so, we need to make sure that subsequent
1291 rewinds are relative to the new maximum fill level and not to the
1292 current fill level. Thus, let's do a full rewind once, to clear
1293 things up. */
1294
1295 if (u->hwbuf_unused > before) {
1296 pa_log_debug("Requesting rewind due to latency change.");
1297 pa_sink_request_rewind(s, (size_t) -1);
1298 }
1299 }
1300
1301 static int process_rewind(struct userdata *u) {
1302 snd_pcm_sframes_t unused;
1303 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1304 pa_assert(u);
1305
1306 /* Figure out how much we shall rewind and reset the counter */
1307 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1308
1309 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1310
1311 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1312 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1313 return -1;
1314 }
1315
1316 unused_nbytes = (size_t) unused * u->frame_size;
1317
1318 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1319 unused_nbytes += u->rewind_safeguard;
1320
1321 if (u->hwbuf_size > unused_nbytes)
1322 limit_nbytes = u->hwbuf_size - unused_nbytes;
1323 else
1324 limit_nbytes = 0;
1325
1326 if (rewind_nbytes > limit_nbytes)
1327 rewind_nbytes = limit_nbytes;
1328
1329 if (rewind_nbytes > 0) {
1330 snd_pcm_sframes_t in_frames, out_frames;
1331
1332 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1333
1334 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1335 pa_log_debug("before: %lu", (unsigned long) in_frames);
1336 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1337 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1338 if (try_recover(u, "process_rewind", out_frames) < 0)
1339 return -1;
1340 out_frames = 0;
1341 }
1342
1343 pa_log_debug("after: %lu", (unsigned long) out_frames);
1344
1345 rewind_nbytes = (size_t) out_frames * u->frame_size;
1346
1347 if (rewind_nbytes <= 0)
1348 pa_log_info("Tried rewind, but was apparently not possible.");
1349 else {
1350 u->write_count -= rewind_nbytes;
1351 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1352 pa_sink_process_rewind(u->sink, rewind_nbytes);
1353
1354 u->after_rewind = TRUE;
1355 return 0;
1356 }
1357 } else
1358 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1359
1360 pa_sink_process_rewind(u->sink, 0);
1361 return 0;
1362 }
1363
1364 static void thread_func(void *userdata) {
1365 struct userdata *u = userdata;
1366 unsigned short revents = 0;
1367
1368 pa_assert(u);
1369
1370 pa_log_debug("Thread starting up");
1371
1372 if (u->core->realtime_scheduling)
1373 pa_make_realtime(u->core->realtime_priority);
1374
1375 pa_thread_mq_install(&u->thread_mq);
1376
1377 for (;;) {
1378 int ret;
1379
1380 #ifdef DEBUG_TIMING
1381 pa_log_debug("Loop");
1382 #endif
1383
1384 /* Render some data and write it to the dsp */
1385 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1386 int work_done;
1387 pa_usec_t sleep_usec = 0;
1388 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1389
1390 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1391 if (process_rewind(u) < 0)
1392 goto fail;
1393
1394 if (u->use_mmap)
1395 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1396 else
1397 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1398
1399 if (work_done < 0)
1400 goto fail;
1401
1402 /* pa_log_debug("work_done = %i", work_done); */
1403
1404 if (work_done) {
1405
1406 if (u->first) {
1407 pa_log_info("Starting playback.");
1408 snd_pcm_start(u->pcm_handle);
1409
1410 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1411 }
1412
1413 update_smoother(u);
1414 }
1415
1416 if (u->use_tsched) {
1417 pa_usec_t cusec;
1418
1419 if (u->since_start <= u->hwbuf_size) {
1420
1421 /* USB devices on ALSA seem to hit a buffer
1422 * underrun during the first iterations much
1423 * quicker then we calculate here, probably due to
1424 * the transport latency. To accommodate for that
1425 * we artificially decrease the sleep time until
1426 * we have filled the buffer at least once
1427 * completely.*/
1428
1429 if (pa_log_ratelimit())
1430 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1431 sleep_usec /= 2;
1432 }
1433
1434 /* OK, the playback buffer is now full, let's
1435 * calculate when to wake up next */
1436 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1437
1438 /* Convert from the sound card time domain to the
1439 * system time domain */
1440 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1441
1442 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1443
1444 /* We don't trust the conversion, so we wake up whatever comes first */
1445 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1446 }
1447
1448 u->first = FALSE;
1449 u->after_rewind = FALSE;
1450
1451 } else if (u->use_tsched)
1452
1453 /* OK, we're in an invalid state, let's disable our timers */
1454 pa_rtpoll_set_timer_disabled(u->rtpoll);
1455
1456 /* Hmm, nothing to do. Let's sleep */
1457 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1458 goto fail;
1459
1460 if (ret == 0)
1461 goto finish;
1462
1463 /* Tell ALSA about this and process its response */
1464 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1465 struct pollfd *pollfd;
1466 int err;
1467 unsigned n;
1468
1469 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1470
1471 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1472 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1473 goto fail;
1474 }
1475
1476 if (revents & ~POLLOUT) {
1477 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1478 goto fail;
1479
1480 u->first = TRUE;
1481 u->since_start = 0;
1482 } else if (revents && u->use_tsched && pa_log_ratelimit())
1483 pa_log_debug("Wakeup from ALSA!");
1484
1485 } else
1486 revents = 0;
1487 }
1488
1489 fail:
1490 /* If this was no regular exit from the loop we have to continue
1491 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1492 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1493 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1494
1495 finish:
1496 pa_log_debug("Thread shutting down");
1497 }
1498
1499 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1500 const char *n;
1501 char *t;
1502
1503 pa_assert(data);
1504 pa_assert(ma);
1505 pa_assert(device_name);
1506
1507 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1508 pa_sink_new_data_set_name(data, n);
1509 data->namereg_fail = TRUE;
1510 return;
1511 }
1512
1513 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1514 data->namereg_fail = TRUE;
1515 else {
1516 n = device_id ? device_id : device_name;
1517 data->namereg_fail = FALSE;
1518 }
1519
1520 if (mapping)
1521 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1522 else
1523 t = pa_sprintf_malloc("alsa_output.%s", n);
1524
1525 pa_sink_new_data_set_name(data, t);
1526 pa_xfree(t);
1527 }
1528
1529 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1530
1531 if (!mapping && !element)
1532 return;
1533
1534 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1535 pa_log_info("Failed to find a working mixer device.");
1536 return;
1537 }
1538
1539 if (element) {
1540
1541 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1542 goto fail;
1543
1544 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1545 goto fail;
1546
1547 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1548 pa_alsa_path_dump(u->mixer_path);
1549 } else {
1550
1551 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1552 goto fail;
1553
1554 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1555
1556 pa_log_debug("Probed mixer paths:");
1557 pa_alsa_path_set_dump(u->mixer_path_set);
1558 }
1559
1560 return;
1561
1562 fail:
1563
1564 if (u->mixer_path_set) {
1565 pa_alsa_path_set_free(u->mixer_path_set);
1566 u->mixer_path_set = NULL;
1567 } else if (u->mixer_path) {
1568 pa_alsa_path_free(u->mixer_path);
1569 u->mixer_path = NULL;
1570 }
1571
1572 if (u->mixer_handle) {
1573 snd_mixer_close(u->mixer_handle);
1574 u->mixer_handle = NULL;
1575 }
1576 }
1577
1578 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1579 pa_assert(u);
1580
1581 if (!u->mixer_handle)
1582 return 0;
1583
1584 if (u->sink->active_port) {
1585 pa_alsa_port_data *data;
1586
1587 /* We have a list of supported paths, so let's activate the
1588 * one that has been chosen as active */
1589
1590 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1591 u->mixer_path = data->path;
1592
1593 pa_alsa_path_select(data->path, u->mixer_handle);
1594
1595 if (data->setting)
1596 pa_alsa_setting_select(data->setting, u->mixer_handle);
1597
1598 } else {
1599
1600 if (!u->mixer_path && u->mixer_path_set)
1601 u->mixer_path = u->mixer_path_set->paths;
1602
1603 if (u->mixer_path) {
1604 /* Hmm, we have only a single path, then let's activate it */
1605
1606 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1607
1608 if (u->mixer_path->settings)
1609 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1610 } else
1611 return 0;
1612 }
1613
1614 if (!u->mixer_path->has_volume)
1615 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1616 else {
1617
1618 if (u->mixer_path->has_dB) {
1619 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1620
1621 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1622 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1623
1624 if (u->mixer_path->max_dB > 0.0)
1625 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1626 else
1627 pa_log_info("No particular base volume set, fixing to 0 dB");
1628
1629 } else {
1630 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1631 u->sink->base_volume = PA_VOLUME_NORM;
1632 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1633 }
1634
1635 u->sink->get_volume = sink_get_volume_cb;
1636 u->sink->set_volume = sink_set_volume_cb;
1637
1638 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1639 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1640 }
1641
1642 if (!u->mixer_path->has_mute) {
1643 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1644 } else {
1645 u->sink->get_mute = sink_get_mute_cb;
1646 u->sink->set_mute = sink_set_mute_cb;
1647 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1648 pa_log_info("Using hardware mute control.");
1649 }
1650
1651 u->mixer_fdl = pa_alsa_fdlist_new();
1652
1653 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1654 pa_log("Failed to initialize file descriptor monitoring");
1655 return -1;
1656 }
1657
1658 if (u->mixer_path_set)
1659 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1660 else
1661 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1662
1663 return 0;
1664 }
1665
1666 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1667
1668 struct userdata *u = NULL;
1669 const char *dev_id = NULL;
1670 pa_sample_spec ss, requested_ss;
1671 pa_channel_map map;
1672 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1673 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1674 size_t frame_size, rewind_safeguard;
1675 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1676 pa_sink_new_data data;
1677 pa_alsa_profile_set *profile_set = NULL;
1678
1679 pa_assert(m);
1680 pa_assert(ma);
1681
1682 ss = m->core->default_sample_spec;
1683 map = m->core->default_channel_map;
1684 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1685 pa_log("Failed to parse sample specification and channel map");
1686 goto fail;
1687 }
1688
1689 requested_ss = ss;
1690 frame_size = pa_frame_size(&ss);
1691
1692 nfrags = m->core->default_n_fragments;
1693 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1694 if (frag_size <= 0)
1695 frag_size = (uint32_t) frame_size;
1696 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1697 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1698
1699 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1700 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1701 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1702 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1703 pa_log("Failed to parse buffer metrics");
1704 goto fail;
1705 }
1706
1707 buffer_size = nfrags * frag_size;
1708
1709 period_frames = frag_size/frame_size;
1710 buffer_frames = buffer_size/frame_size;
1711 tsched_frames = tsched_size/frame_size;
1712
1713 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1714 pa_log("Failed to parse mmap argument.");
1715 goto fail;
1716 }
1717
1718 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1719 pa_log("Failed to parse tsched argument.");
1720 goto fail;
1721 }
1722
1723 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1724 pa_log("Failed to parse ignore_dB argument.");
1725 goto fail;
1726 }
1727
1728 rewind_safeguard = DEFAULT_REWIND_SAFEGUARD_BYTES;
1729 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1730 pa_log("Failed to parse rewind_safeguard argument");
1731 goto fail;
1732 }
1733
1734 use_tsched = pa_alsa_may_tsched(use_tsched);
1735
1736 u = pa_xnew0(struct userdata, 1);
1737 u->core = m->core;
1738 u->module = m;
1739 u->use_mmap = use_mmap;
1740 u->use_tsched = use_tsched;
1741 u->first = TRUE;
1742 u->rewind_safeguard = rewind_safeguard;
1743 u->rtpoll = pa_rtpoll_new();
1744 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1745
1746 u->smoother = pa_smoother_new(
1747 DEFAULT_TSCHED_BUFFER_USEC*2,
1748 DEFAULT_TSCHED_BUFFER_USEC*2,
1749 TRUE,
1750 TRUE,
1751 5,
1752 pa_rtclock_now(),
1753 TRUE);
1754 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1755
1756 dev_id = pa_modargs_get_value(
1757 ma, "device_id",
1758 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1759
1760 if (reserve_init(u, dev_id) < 0)
1761 goto fail;
1762
1763 if (reserve_monitor_init(u, dev_id) < 0)
1764 goto fail;
1765
1766 b = use_mmap;
1767 d = use_tsched;
1768
1769 if (mapping) {
1770
1771 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1772 pa_log("device_id= not set");
1773 goto fail;
1774 }
1775
1776 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1777 dev_id,
1778 &u->device_name,
1779 &ss, &map,
1780 SND_PCM_STREAM_PLAYBACK,
1781 &period_frames, &buffer_frames, tsched_frames,
1782 &b, &d, mapping)))
1783
1784 goto fail;
1785
1786 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1787
1788 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1789 goto fail;
1790
1791 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1792 dev_id,
1793 &u->device_name,
1794 &ss, &map,
1795 SND_PCM_STREAM_PLAYBACK,
1796 &period_frames, &buffer_frames, tsched_frames,
1797 &b, &d, profile_set, &mapping)))
1798
1799 goto fail;
1800
1801 } else {
1802
1803 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1804 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1805 &u->device_name,
1806 &ss, &map,
1807 SND_PCM_STREAM_PLAYBACK,
1808 &period_frames, &buffer_frames, tsched_frames,
1809 &b, &d, FALSE)))
1810 goto fail;
1811 }
1812
1813 pa_assert(u->device_name);
1814 pa_log_info("Successfully opened device %s.", u->device_name);
1815
1816 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1817 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1818 goto fail;
1819 }
1820
1821 if (mapping)
1822 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1823
1824 if (use_mmap && !b) {
1825 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1826 u->use_mmap = use_mmap = FALSE;
1827 }
1828
1829 if (use_tsched && (!b || !d)) {
1830 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1831 u->use_tsched = use_tsched = FALSE;
1832 }
1833
1834 if (u->use_mmap)
1835 pa_log_info("Successfully enabled mmap() mode.");
1836
1837 if (u->use_tsched)
1838 pa_log_info("Successfully enabled timer-based scheduling mode.");
1839
1840 /* ALSA might tweak the sample spec, so recalculate the frame size */
1841 frame_size = pa_frame_size(&ss);
1842
1843 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1844
1845 pa_sink_new_data_init(&data);
1846 data.driver = driver;
1847 data.module = m;
1848 data.card = card;
1849 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1850 pa_sink_new_data_set_sample_spec(&data, &ss);
1851 pa_sink_new_data_set_channel_map(&data, &map);
1852
1853 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1854 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1855 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1856 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1857 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1858
1859 if (mapping) {
1860 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1861 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1862 }
1863
1864 pa_alsa_init_description(data.proplist);
1865
1866 if (u->control_device)
1867 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1868
1869 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1870 pa_log("Invalid properties");
1871 pa_sink_new_data_done(&data);
1872 goto fail;
1873 }
1874
1875 if (u->mixer_path_set)
1876 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1877
1878 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1879 pa_sink_new_data_done(&data);
1880
1881 if (!u->sink) {
1882 pa_log("Failed to create sink object");
1883 goto fail;
1884 }
1885
1886 u->sink->parent.process_msg = sink_process_msg;
1887 if (u->use_tsched)
1888 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1889 u->sink->set_state = sink_set_state_cb;
1890 u->sink->set_port = sink_set_port_cb;
1891 u->sink->userdata = u;
1892
1893 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1894 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1895
1896 u->frame_size = frame_size;
1897 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1898 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1899 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1900
1901 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1902 (double) u->hwbuf_size / (double) u->fragment_size,
1903 (long unsigned) u->fragment_size,
1904 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1905 (long unsigned) u->hwbuf_size,
1906 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1907
1908 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1909 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1910
1911 if (u->use_tsched) {
1912 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1913
1914 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1915 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1916
1917 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1918 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1919
1920 fix_min_sleep_wakeup(u);
1921 fix_tsched_watermark(u);
1922
1923 pa_sink_set_latency_range(u->sink,
1924 0,
1925 pa_bytes_to_usec(u->hwbuf_size, &ss));
1926
1927 pa_log_info("Time scheduling watermark is %0.2fms",
1928 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1929 } else
1930 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1931
1932 reserve_update(u);
1933
1934 if (update_sw_params(u) < 0)
1935 goto fail;
1936
1937 if (setup_mixer(u, ignore_dB) < 0)
1938 goto fail;
1939
1940 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1941
1942 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
1943 pa_log("Failed to create thread.");
1944 goto fail;
1945 }
1946
1947 /* Get initial mixer settings */
1948 if (data.volume_is_set) {
1949 if (u->sink->set_volume)
1950 u->sink->set_volume(u->sink);
1951 } else {
1952 if (u->sink->get_volume)
1953 u->sink->get_volume(u->sink);
1954 }
1955
1956 if (data.muted_is_set) {
1957 if (u->sink->set_mute)
1958 u->sink->set_mute(u->sink);
1959 } else {
1960 if (u->sink->get_mute)
1961 u->sink->get_mute(u->sink);
1962 }
1963
1964 pa_sink_put(u->sink);
1965
1966 if (profile_set)
1967 pa_alsa_profile_set_free(profile_set);
1968
1969 return u->sink;
1970
1971 fail:
1972
1973 if (u)
1974 userdata_free(u);
1975
1976 if (profile_set)
1977 pa_alsa_profile_set_free(profile_set);
1978
1979 return NULL;
1980 }
1981
1982 static void userdata_free(struct userdata *u) {
1983 pa_assert(u);
1984
1985 if (u->sink)
1986 pa_sink_unlink(u->sink);
1987
1988 if (u->thread) {
1989 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1990 pa_thread_free(u->thread);
1991 }
1992
1993 pa_thread_mq_done(&u->thread_mq);
1994
1995 if (u->sink)
1996 pa_sink_unref(u->sink);
1997
1998 if (u->memchunk.memblock)
1999 pa_memblock_unref(u->memchunk.memblock);
2000
2001 if (u->alsa_rtpoll_item)
2002 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2003
2004 if (u->rtpoll)
2005 pa_rtpoll_free(u->rtpoll);
2006
2007 if (u->pcm_handle) {
2008 snd_pcm_drop(u->pcm_handle);
2009 snd_pcm_close(u->pcm_handle);
2010 }
2011
2012 if (u->mixer_fdl)
2013 pa_alsa_fdlist_free(u->mixer_fdl);
2014
2015 if (u->mixer_path_set)
2016 pa_alsa_path_set_free(u->mixer_path_set);
2017 else if (u->mixer_path)
2018 pa_alsa_path_free(u->mixer_path);
2019
2020 if (u->mixer_handle)
2021 snd_mixer_close(u->mixer_handle);
2022
2023 if (u->smoother)
2024 pa_smoother_free(u->smoother);
2025
2026 reserve_done(u);
2027 monitor_done(u);
2028
2029 pa_xfree(u->device_name);
2030 pa_xfree(u->control_device);
2031 pa_xfree(u);
2032 }
2033
2034 void pa_alsa_sink_free(pa_sink *s) {
2035 struct userdata *u;
2036
2037 pa_sink_assert_ref(s);
2038 pa_assert_se(u = s->userdata);
2039
2040 userdata_free(u);
2041 }