]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: Fix assertion on mmap_write (triggered via a52 plugin)
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
82 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
83
84 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
85
86 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256) /* 1.33ms @48kHz, should work for most hardware */
87
88 struct userdata {
89 pa_core *core;
90 pa_module *module;
91 pa_sink *sink;
92
93 pa_thread *thread;
94 pa_thread_mq thread_mq;
95 pa_rtpoll *rtpoll;
96
97 snd_pcm_t *pcm_handle;
98
99 pa_alsa_fdlist *mixer_fdl;
100 snd_mixer_t *mixer_handle;
101 pa_alsa_path_set *mixer_path_set;
102 pa_alsa_path *mixer_path;
103
104 pa_cvolume hardware_volume;
105
106 size_t
107 frame_size,
108 fragment_size,
109 hwbuf_size,
110 tsched_watermark,
111 hwbuf_unused,
112 min_sleep,
113 min_wakeup,
114 watermark_inc_step,
115 watermark_dec_step,
116 watermark_inc_threshold,
117 watermark_dec_threshold,
118 rewind_safeguard;
119
120 pa_usec_t watermark_dec_not_before;
121
122 pa_memchunk memchunk;
123
124 char *device_name; /* name of the PCM device */
125 char *control_device; /* name of the control device */
126
127 pa_bool_t use_mmap:1, use_tsched:1;
128
129 pa_bool_t first, after_rewind;
130
131 pa_rtpoll_item *alsa_rtpoll_item;
132
133 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
134
135 pa_smoother *smoother;
136 uint64_t write_count;
137 uint64_t since_start;
138 pa_usec_t smoother_interval;
139 pa_usec_t last_smoother_update;
140
141 pa_reserve_wrapper *reserve;
142 pa_hook_slot *reserve_slot;
143 pa_reserve_monitor_wrapper *monitor;
144 pa_hook_slot *monitor_slot;
145 };
146
147 static void userdata_free(struct userdata *u);
148
149 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
150 pa_assert(r);
151 pa_assert(u);
152
153 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
154 return PA_HOOK_CANCEL;
155
156 return PA_HOOK_OK;
157 }
158
159 static void reserve_done(struct userdata *u) {
160 pa_assert(u);
161
162 if (u->reserve_slot) {
163 pa_hook_slot_free(u->reserve_slot);
164 u->reserve_slot = NULL;
165 }
166
167 if (u->reserve) {
168 pa_reserve_wrapper_unref(u->reserve);
169 u->reserve = NULL;
170 }
171 }
172
173 static void reserve_update(struct userdata *u) {
174 const char *description;
175 pa_assert(u);
176
177 if (!u->sink || !u->reserve)
178 return;
179
180 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
181 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
182 }
183
184 static int reserve_init(struct userdata *u, const char *dname) {
185 char *rname;
186
187 pa_assert(u);
188 pa_assert(dname);
189
190 if (u->reserve)
191 return 0;
192
193 if (pa_in_system_mode())
194 return 0;
195
196 if (!(rname = pa_alsa_get_reserve_name(dname)))
197 return 0;
198
199 /* We are resuming, try to lock the device */
200 u->reserve = pa_reserve_wrapper_get(u->core, rname);
201 pa_xfree(rname);
202
203 if (!(u->reserve))
204 return -1;
205
206 reserve_update(u);
207
208 pa_assert(!u->reserve_slot);
209 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
210
211 return 0;
212 }
213
214 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
215 pa_bool_t b;
216
217 pa_assert(w);
218 pa_assert(u);
219
220 b = PA_PTR_TO_UINT(busy) && !u->reserve;
221
222 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
223 return PA_HOOK_OK;
224 }
225
226 static void monitor_done(struct userdata *u) {
227 pa_assert(u);
228
229 if (u->monitor_slot) {
230 pa_hook_slot_free(u->monitor_slot);
231 u->monitor_slot = NULL;
232 }
233
234 if (u->monitor) {
235 pa_reserve_monitor_wrapper_unref(u->monitor);
236 u->monitor = NULL;
237 }
238 }
239
240 static int reserve_monitor_init(struct userdata *u, const char *dname) {
241 char *rname;
242
243 pa_assert(u);
244 pa_assert(dname);
245
246 if (pa_in_system_mode())
247 return 0;
248
249 if (!(rname = pa_alsa_get_reserve_name(dname)))
250 return 0;
251
252 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
253 pa_xfree(rname);
254
255 if (!(u->monitor))
256 return -1;
257
258 pa_assert(!u->monitor_slot);
259 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
260
261 return 0;
262 }
263
264 static void fix_min_sleep_wakeup(struct userdata *u) {
265 size_t max_use, max_use_2;
266
267 pa_assert(u);
268 pa_assert(u->use_tsched);
269
270 max_use = u->hwbuf_size - u->hwbuf_unused;
271 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
272
273 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
274 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
275
276 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
277 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
278 }
279
280 static void fix_tsched_watermark(struct userdata *u) {
281 size_t max_use;
282 pa_assert(u);
283 pa_assert(u->use_tsched);
284
285 max_use = u->hwbuf_size - u->hwbuf_unused;
286
287 if (u->tsched_watermark > max_use - u->min_sleep)
288 u->tsched_watermark = max_use - u->min_sleep;
289
290 if (u->tsched_watermark < u->min_wakeup)
291 u->tsched_watermark = u->min_wakeup;
292 }
293
294 static void increase_watermark(struct userdata *u) {
295 size_t old_watermark;
296 pa_usec_t old_min_latency, new_min_latency;
297
298 pa_assert(u);
299 pa_assert(u->use_tsched);
300
301 /* First, just try to increase the watermark */
302 old_watermark = u->tsched_watermark;
303 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
304 fix_tsched_watermark(u);
305
306 if (old_watermark != u->tsched_watermark) {
307 pa_log_info("Increasing wakeup watermark to %0.2f ms",
308 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
309 return;
310 }
311
312 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
313 old_min_latency = u->sink->thread_info.min_latency;
314 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
315 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
316
317 if (old_min_latency != new_min_latency) {
318 pa_log_info("Increasing minimal latency to %0.2f ms",
319 (double) new_min_latency / PA_USEC_PER_MSEC);
320
321 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
322 }
323
324 /* When we reach this we're officialy fucked! */
325 }
326
327 static void decrease_watermark(struct userdata *u) {
328 size_t old_watermark;
329 pa_usec_t now;
330
331 pa_assert(u);
332 pa_assert(u->use_tsched);
333
334 now = pa_rtclock_now();
335
336 if (u->watermark_dec_not_before <= 0)
337 goto restart;
338
339 if (u->watermark_dec_not_before > now)
340 return;
341
342 old_watermark = u->tsched_watermark;
343
344 if (u->tsched_watermark < u->watermark_dec_step)
345 u->tsched_watermark = u->tsched_watermark / 2;
346 else
347 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
348
349 fix_tsched_watermark(u);
350
351 if (old_watermark != u->tsched_watermark)
352 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
353 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
354
355 /* We don't change the latency range*/
356
357 restart:
358 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
359 }
360
361 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
362 pa_usec_t usec, wm;
363
364 pa_assert(sleep_usec);
365 pa_assert(process_usec);
366
367 pa_assert(u);
368 pa_assert(u->use_tsched);
369
370 usec = pa_sink_get_requested_latency_within_thread(u->sink);
371
372 if (usec == (pa_usec_t) -1)
373 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
374
375 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
376
377 if (wm > usec)
378 wm = usec/2;
379
380 *sleep_usec = usec - wm;
381 *process_usec = wm;
382
383 #ifdef DEBUG_TIMING
384 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
385 (unsigned long) (usec / PA_USEC_PER_MSEC),
386 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
387 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
388 #endif
389 }
390
391 static int try_recover(struct userdata *u, const char *call, int err) {
392 pa_assert(u);
393 pa_assert(call);
394 pa_assert(err < 0);
395
396 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
397
398 pa_assert(err != -EAGAIN);
399
400 if (err == -EPIPE)
401 pa_log_debug("%s: Buffer underrun!", call);
402
403 if (err == -ESTRPIPE)
404 pa_log_debug("%s: System suspended!", call);
405
406 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
407 pa_log("%s: %s", call, pa_alsa_strerror(err));
408 return -1;
409 }
410
411 u->first = TRUE;
412 u->since_start = 0;
413 return 0;
414 }
415
416 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
417 size_t left_to_play;
418 pa_bool_t underrun = FALSE;
419
420 /* We use <= instead of < for this check here because an underrun
421 * only happens after the last sample was processed, not already when
422 * it is removed from the buffer. This is particularly important
423 * when block transfer is used. */
424
425 if (n_bytes <= u->hwbuf_size)
426 left_to_play = u->hwbuf_size - n_bytes;
427 else {
428
429 /* We got a dropout. What a mess! */
430 left_to_play = 0;
431 underrun = TRUE;
432
433 #ifdef DEBUG_TIMING
434 PA_DEBUG_TRAP;
435 #endif
436
437 if (!u->first && !u->after_rewind)
438 if (pa_log_ratelimit())
439 pa_log_info("Underrun!");
440 }
441
442 #ifdef DEBUG_TIMING
443 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
444 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
445 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
446 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
447 #endif
448
449 if (u->use_tsched) {
450 pa_bool_t reset_not_before = TRUE;
451
452 if (!u->first && !u->after_rewind) {
453 if (underrun || left_to_play < u->watermark_inc_threshold)
454 increase_watermark(u);
455 else if (left_to_play > u->watermark_dec_threshold) {
456 reset_not_before = FALSE;
457
458 /* We decrease the watermark only if have actually
459 * been woken up by a timeout. If something else woke
460 * us up it's too easy to fulfill the deadlines... */
461
462 if (on_timeout)
463 decrease_watermark(u);
464 }
465 }
466
467 if (reset_not_before)
468 u->watermark_dec_not_before = 0;
469 }
470
471 return left_to_play;
472 }
473
474 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
475 pa_bool_t work_done = TRUE;
476 pa_usec_t max_sleep_usec = 0, process_usec = 0;
477 size_t left_to_play;
478 unsigned j = 0;
479
480 pa_assert(u);
481 pa_sink_assert_ref(u->sink);
482
483 if (u->use_tsched)
484 hw_sleep_time(u, &max_sleep_usec, &process_usec);
485
486 for (;;) {
487 snd_pcm_sframes_t n;
488 size_t n_bytes;
489 int r;
490 pa_bool_t after_avail = TRUE;
491
492 /* First we determine how many samples are missing to fill the
493 * buffer up to 100% */
494
495 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
496
497 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
498 continue;
499
500 return r;
501 }
502
503 n_bytes = (size_t) n * u->frame_size;
504
505 #ifdef DEBUG_TIMING
506 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
507 #endif
508
509 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
510 on_timeout = FALSE;
511
512 if (u->use_tsched)
513
514 /* We won't fill up the playback buffer before at least
515 * half the sleep time is over because otherwise we might
516 * ask for more data from the clients then they expect. We
517 * need to guarantee that clients only have to keep around
518 * a single hw buffer length. */
519
520 if (!polled &&
521 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
522 #ifdef DEBUG_TIMING
523 pa_log_debug("Not filling up, because too early.");
524 #endif
525 break;
526 }
527
528 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
529
530 if (polled)
531 PA_ONCE_BEGIN {
532 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
533 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
534 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
535 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
536 pa_strnull(dn));
537 pa_xfree(dn);
538 } PA_ONCE_END;
539
540 #ifdef DEBUG_TIMING
541 pa_log_debug("Not filling up, because not necessary.");
542 #endif
543 break;
544 }
545
546
547 if (++j > 10) {
548 #ifdef DEBUG_TIMING
549 pa_log_debug("Not filling up, because already too many iterations.");
550 #endif
551
552 break;
553 }
554
555 n_bytes -= u->hwbuf_unused;
556 polled = FALSE;
557
558 #ifdef DEBUG_TIMING
559 pa_log_debug("Filling up");
560 #endif
561
562 for (;;) {
563 pa_memchunk chunk;
564 void *p;
565 int err;
566 const snd_pcm_channel_area_t *areas;
567 snd_pcm_uframes_t offset, frames;
568 snd_pcm_sframes_t sframes;
569
570 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
571 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
572
573 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
574
575 if (!after_avail && err == -EAGAIN)
576 break;
577
578 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
579 continue;
580
581 return r;
582 }
583
584 /* Make sure that if these memblocks need to be copied they will fit into one slot */
585 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
586 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
587
588 if (!after_avail && frames == 0)
589 break;
590
591 pa_assert(frames > 0);
592 after_avail = FALSE;
593
594 /* Check these are multiples of 8 bit */
595 pa_assert((areas[0].first & 7) == 0);
596 pa_assert((areas[0].step & 7)== 0);
597
598 /* We assume a single interleaved memory buffer */
599 pa_assert((areas[0].first >> 3) == 0);
600 pa_assert((areas[0].step >> 3) == u->frame_size);
601
602 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
603
604 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
605 chunk.length = pa_memblock_get_length(chunk.memblock);
606 chunk.index = 0;
607
608 pa_sink_render_into_full(u->sink, &chunk);
609 pa_memblock_unref_fixed(chunk.memblock);
610
611 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
612
613 if (!after_avail && (int) sframes == -EAGAIN)
614 break;
615
616 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
617 continue;
618
619 return r;
620 }
621
622 work_done = TRUE;
623
624 u->write_count += frames * u->frame_size;
625 u->since_start += frames * u->frame_size;
626
627 #ifdef DEBUG_TIMING
628 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
629 #endif
630
631 if ((size_t) frames * u->frame_size >= n_bytes)
632 break;
633
634 n_bytes -= (size_t) frames * u->frame_size;
635 }
636 }
637
638 if (u->use_tsched) {
639 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
640
641 if (*sleep_usec > process_usec)
642 *sleep_usec -= process_usec;
643 else
644 *sleep_usec = 0;
645 } else
646 *sleep_usec = 0;
647
648 return work_done ? 1 : 0;
649 }
650
651 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
652 pa_bool_t work_done = FALSE;
653 pa_usec_t max_sleep_usec = 0, process_usec = 0;
654 size_t left_to_play;
655 unsigned j = 0;
656
657 pa_assert(u);
658 pa_sink_assert_ref(u->sink);
659
660 if (u->use_tsched)
661 hw_sleep_time(u, &max_sleep_usec, &process_usec);
662
663 for (;;) {
664 snd_pcm_sframes_t n;
665 size_t n_bytes;
666 int r;
667 pa_bool_t after_avail = TRUE;
668
669 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
670
671 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
672 continue;
673
674 return r;
675 }
676
677 n_bytes = (size_t) n * u->frame_size;
678 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
679 on_timeout = FALSE;
680
681 if (u->use_tsched)
682
683 /* We won't fill up the playback buffer before at least
684 * half the sleep time is over because otherwise we might
685 * ask for more data from the clients then they expect. We
686 * need to guarantee that clients only have to keep around
687 * a single hw buffer length. */
688
689 if (!polled &&
690 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
691 break;
692
693 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
694
695 if (polled)
696 PA_ONCE_BEGIN {
697 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
698 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
699 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
700 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
701 pa_strnull(dn));
702 pa_xfree(dn);
703 } PA_ONCE_END;
704
705 break;
706 }
707
708 if (++j > 10) {
709 #ifdef DEBUG_TIMING
710 pa_log_debug("Not filling up, because already too many iterations.");
711 #endif
712
713 break;
714 }
715
716 n_bytes -= u->hwbuf_unused;
717 polled = FALSE;
718
719 for (;;) {
720 snd_pcm_sframes_t frames;
721 void *p;
722
723 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
724
725 if (u->memchunk.length <= 0)
726 pa_sink_render(u->sink, n_bytes, &u->memchunk);
727
728 pa_assert(u->memchunk.length > 0);
729
730 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
731
732 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
733 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
734
735 p = pa_memblock_acquire(u->memchunk.memblock);
736 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
737 pa_memblock_release(u->memchunk.memblock);
738
739 if (PA_UNLIKELY(frames < 0)) {
740
741 if (!after_avail && (int) frames == -EAGAIN)
742 break;
743
744 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
745 continue;
746
747 return r;
748 }
749
750 if (!after_avail && frames == 0)
751 break;
752
753 pa_assert(frames > 0);
754 after_avail = FALSE;
755
756 u->memchunk.index += (size_t) frames * u->frame_size;
757 u->memchunk.length -= (size_t) frames * u->frame_size;
758
759 if (u->memchunk.length <= 0) {
760 pa_memblock_unref(u->memchunk.memblock);
761 pa_memchunk_reset(&u->memchunk);
762 }
763
764 work_done = TRUE;
765
766 u->write_count += frames * u->frame_size;
767 u->since_start += frames * u->frame_size;
768
769 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
770
771 if ((size_t) frames * u->frame_size >= n_bytes)
772 break;
773
774 n_bytes -= (size_t) frames * u->frame_size;
775 }
776 }
777
778 if (u->use_tsched) {
779 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
780
781 if (*sleep_usec > process_usec)
782 *sleep_usec -= process_usec;
783 else
784 *sleep_usec = 0;
785 } else
786 *sleep_usec = 0;
787
788 return work_done ? 1 : 0;
789 }
790
791 static void update_smoother(struct userdata *u) {
792 snd_pcm_sframes_t delay = 0;
793 int64_t position;
794 int err;
795 pa_usec_t now1 = 0, now2;
796 snd_pcm_status_t *status;
797
798 snd_pcm_status_alloca(&status);
799
800 pa_assert(u);
801 pa_assert(u->pcm_handle);
802
803 /* Let's update the time smoother */
804
805 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
806 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
807 return;
808 }
809
810 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
811 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
812 else {
813 snd_htimestamp_t htstamp = { 0, 0 };
814 snd_pcm_status_get_htstamp(status, &htstamp);
815 now1 = pa_timespec_load(&htstamp);
816 }
817
818 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
819 if (now1 <= 0)
820 now1 = pa_rtclock_now();
821
822 /* check if the time since the last update is bigger than the interval */
823 if (u->last_smoother_update > 0)
824 if (u->last_smoother_update + u->smoother_interval > now1)
825 return;
826
827 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
828
829 if (PA_UNLIKELY(position < 0))
830 position = 0;
831
832 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
833
834 pa_smoother_put(u->smoother, now1, now2);
835
836 u->last_smoother_update = now1;
837 /* exponentially increase the update interval up to the MAX limit */
838 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
839 }
840
841 static pa_usec_t sink_get_latency(struct userdata *u) {
842 pa_usec_t r;
843 int64_t delay;
844 pa_usec_t now1, now2;
845
846 pa_assert(u);
847
848 now1 = pa_rtclock_now();
849 now2 = pa_smoother_get(u->smoother, now1);
850
851 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
852
853 r = delay >= 0 ? (pa_usec_t) delay : 0;
854
855 if (u->memchunk.memblock)
856 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
857
858 return r;
859 }
860
861 static int build_pollfd(struct userdata *u) {
862 pa_assert(u);
863 pa_assert(u->pcm_handle);
864
865 if (u->alsa_rtpoll_item)
866 pa_rtpoll_item_free(u->alsa_rtpoll_item);
867
868 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
869 return -1;
870
871 return 0;
872 }
873
874 /* Called from IO context */
875 static int suspend(struct userdata *u) {
876 pa_assert(u);
877 pa_assert(u->pcm_handle);
878
879 pa_smoother_pause(u->smoother, pa_rtclock_now());
880
881 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
882 * take awfully long with our long buffer sizes today. */
883 snd_pcm_close(u->pcm_handle);
884 u->pcm_handle = NULL;
885
886 if (u->alsa_rtpoll_item) {
887 pa_rtpoll_item_free(u->alsa_rtpoll_item);
888 u->alsa_rtpoll_item = NULL;
889 }
890
891 /* We reset max_rewind/max_request here to make sure that while we
892 * are suspended the old max_request/max_rewind values set before
893 * the suspend can influence the per-stream buffer of newly
894 * created streams, without their requirements having any
895 * influence on them. */
896 pa_sink_set_max_rewind_within_thread(u->sink, 0);
897 pa_sink_set_max_request_within_thread(u->sink, 0);
898
899 pa_log_info("Device suspended...");
900
901 return 0;
902 }
903
904 /* Called from IO context */
905 static int update_sw_params(struct userdata *u) {
906 snd_pcm_uframes_t avail_min;
907 int err;
908
909 pa_assert(u);
910
911 /* Use the full buffer if noone asked us for anything specific */
912 u->hwbuf_unused = 0;
913
914 if (u->use_tsched) {
915 pa_usec_t latency;
916
917 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
918 size_t b;
919
920 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
921
922 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
923
924 /* We need at least one sample in our buffer */
925
926 if (PA_UNLIKELY(b < u->frame_size))
927 b = u->frame_size;
928
929 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
930 }
931
932 fix_min_sleep_wakeup(u);
933 fix_tsched_watermark(u);
934 }
935
936 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
937
938 /* We need at last one frame in the used part of the buffer */
939 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
940
941 if (u->use_tsched) {
942 pa_usec_t sleep_usec, process_usec;
943
944 hw_sleep_time(u, &sleep_usec, &process_usec);
945 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
946 }
947
948 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
949
950 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
951 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
952 return err;
953 }
954
955 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
956 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
957
958 return 0;
959 }
960
961 /* Called from IO context */
962 static int unsuspend(struct userdata *u) {
963 pa_sample_spec ss;
964 int err;
965 pa_bool_t b, d;
966 snd_pcm_uframes_t period_size, buffer_size;
967
968 pa_assert(u);
969 pa_assert(!u->pcm_handle);
970
971 pa_log_info("Trying resume...");
972
973 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
974 SND_PCM_NONBLOCK|
975 SND_PCM_NO_AUTO_RESAMPLE|
976 SND_PCM_NO_AUTO_CHANNELS|
977 SND_PCM_NO_AUTO_FORMAT)) < 0) {
978 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
979 goto fail;
980 }
981
982 ss = u->sink->sample_spec;
983 period_size = u->fragment_size / u->frame_size;
984 buffer_size = u->hwbuf_size / u->frame_size;
985 b = u->use_mmap;
986 d = u->use_tsched;
987
988 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
989 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
990 goto fail;
991 }
992
993 if (b != u->use_mmap || d != u->use_tsched) {
994 pa_log_warn("Resume failed, couldn't get original access mode.");
995 goto fail;
996 }
997
998 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
999 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1000 goto fail;
1001 }
1002
1003 if (period_size*u->frame_size != u->fragment_size ||
1004 buffer_size*u->frame_size != u->hwbuf_size) {
1005 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1006 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1007 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1008 goto fail;
1009 }
1010
1011 if (update_sw_params(u) < 0)
1012 goto fail;
1013
1014 if (build_pollfd(u) < 0)
1015 goto fail;
1016
1017 u->write_count = 0;
1018 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1019 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1020 u->last_smoother_update = 0;
1021
1022 u->first = TRUE;
1023 u->since_start = 0;
1024
1025 pa_log_info("Resumed successfully...");
1026
1027 return 0;
1028
1029 fail:
1030 if (u->pcm_handle) {
1031 snd_pcm_close(u->pcm_handle);
1032 u->pcm_handle = NULL;
1033 }
1034
1035 return -PA_ERR_IO;
1036 }
1037
1038 /* Called from IO context */
1039 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1040 struct userdata *u = PA_SINK(o)->userdata;
1041
1042 switch (code) {
1043
1044 case PA_SINK_MESSAGE_GET_LATENCY: {
1045 pa_usec_t r = 0;
1046
1047 if (u->pcm_handle)
1048 r = sink_get_latency(u);
1049
1050 *((pa_usec_t*) data) = r;
1051
1052 return 0;
1053 }
1054
1055 case PA_SINK_MESSAGE_SET_STATE:
1056
1057 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1058
1059 case PA_SINK_SUSPENDED: {
1060 int r;
1061
1062 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1063
1064 if ((r = suspend(u)) < 0)
1065 return r;
1066
1067 break;
1068 }
1069
1070 case PA_SINK_IDLE:
1071 case PA_SINK_RUNNING: {
1072 int r;
1073
1074 if (u->sink->thread_info.state == PA_SINK_INIT) {
1075 if (build_pollfd(u) < 0)
1076 return -PA_ERR_IO;
1077 }
1078
1079 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1080 if ((r = unsuspend(u)) < 0)
1081 return r;
1082 }
1083
1084 break;
1085 }
1086
1087 case PA_SINK_UNLINKED:
1088 case PA_SINK_INIT:
1089 case PA_SINK_INVALID_STATE:
1090 ;
1091 }
1092
1093 break;
1094 }
1095
1096 return pa_sink_process_msg(o, code, data, offset, chunk);
1097 }
1098
1099 /* Called from main context */
1100 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1101 pa_sink_state_t old_state;
1102 struct userdata *u;
1103
1104 pa_sink_assert_ref(s);
1105 pa_assert_se(u = s->userdata);
1106
1107 old_state = pa_sink_get_state(u->sink);
1108
1109 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1110 reserve_done(u);
1111 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1112 if (reserve_init(u, u->device_name) < 0)
1113 return -PA_ERR_BUSY;
1114
1115 return 0;
1116 }
1117
1118 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1119 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1120
1121 pa_assert(u);
1122 pa_assert(u->mixer_handle);
1123
1124 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1125 return 0;
1126
1127 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1128 return 0;
1129
1130 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1131 pa_sink_get_volume(u->sink, TRUE);
1132 pa_sink_get_mute(u->sink, TRUE);
1133 }
1134
1135 return 0;
1136 }
1137
1138 static void sink_get_volume_cb(pa_sink *s) {
1139 struct userdata *u = s->userdata;
1140 pa_cvolume r;
1141 char t[PA_CVOLUME_SNPRINT_MAX];
1142
1143 pa_assert(u);
1144 pa_assert(u->mixer_path);
1145 pa_assert(u->mixer_handle);
1146
1147 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1148 return;
1149
1150 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1151 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1152
1153 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1154
1155 if (pa_cvolume_equal(&u->hardware_volume, &r))
1156 return;
1157
1158 s->real_volume = u->hardware_volume = r;
1159
1160 /* Hmm, so the hardware volume changed, let's reset our software volume */
1161 if (u->mixer_path->has_dB)
1162 pa_sink_set_soft_volume(s, NULL);
1163 }
1164
1165 static void sink_set_volume_cb(pa_sink *s) {
1166 struct userdata *u = s->userdata;
1167 pa_cvolume r;
1168 char t[PA_CVOLUME_SNPRINT_MAX];
1169
1170 pa_assert(u);
1171 pa_assert(u->mixer_path);
1172 pa_assert(u->mixer_handle);
1173
1174 /* Shift up by the base volume */
1175 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1176
1177 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1178 return;
1179
1180 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1181 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1182
1183 u->hardware_volume = r;
1184
1185 if (u->mixer_path->has_dB) {
1186 pa_cvolume new_soft_volume;
1187 pa_bool_t accurate_enough;
1188
1189 /* Match exactly what the user requested by software */
1190 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1191
1192 /* If the adjustment to do in software is only minimal we
1193 * can skip it. That saves us CPU at the expense of a bit of
1194 * accuracy */
1195 accurate_enough =
1196 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1197 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1198
1199 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1200 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1201 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1202 pa_yes_no(accurate_enough));
1203
1204 if (!accurate_enough)
1205 s->soft_volume = new_soft_volume;
1206
1207 } else {
1208 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1209
1210 /* We can't match exactly what the user requested, hence let's
1211 * at least tell the user about it */
1212
1213 s->real_volume = r;
1214 }
1215 }
1216
1217 static void sink_get_mute_cb(pa_sink *s) {
1218 struct userdata *u = s->userdata;
1219 pa_bool_t b;
1220
1221 pa_assert(u);
1222 pa_assert(u->mixer_path);
1223 pa_assert(u->mixer_handle);
1224
1225 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1226 return;
1227
1228 s->muted = b;
1229 }
1230
1231 static void sink_set_mute_cb(pa_sink *s) {
1232 struct userdata *u = s->userdata;
1233
1234 pa_assert(u);
1235 pa_assert(u->mixer_path);
1236 pa_assert(u->mixer_handle);
1237
1238 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1239 }
1240
1241 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1242 struct userdata *u = s->userdata;
1243 pa_alsa_port_data *data;
1244
1245 pa_assert(u);
1246 pa_assert(p);
1247 pa_assert(u->mixer_handle);
1248
1249 data = PA_DEVICE_PORT_DATA(p);
1250
1251 pa_assert_se(u->mixer_path = data->path);
1252 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1253
1254 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1255 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1256 s->n_volume_steps = PA_VOLUME_NORM+1;
1257
1258 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1259 } else {
1260 s->base_volume = PA_VOLUME_NORM;
1261 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1262 }
1263
1264 if (data->setting)
1265 pa_alsa_setting_select(data->setting, u->mixer_handle);
1266
1267 if (s->set_mute)
1268 s->set_mute(s);
1269 if (s->set_volume)
1270 s->set_volume(s);
1271
1272 return 0;
1273 }
1274
1275 static void sink_update_requested_latency_cb(pa_sink *s) {
1276 struct userdata *u = s->userdata;
1277 size_t before;
1278 pa_assert(u);
1279 pa_assert(u->use_tsched); /* only when timer scheduling is used
1280 * we can dynamically adjust the
1281 * latency */
1282
1283 if (!u->pcm_handle)
1284 return;
1285
1286 before = u->hwbuf_unused;
1287 update_sw_params(u);
1288
1289 /* Let's check whether we now use only a smaller part of the
1290 buffer then before. If so, we need to make sure that subsequent
1291 rewinds are relative to the new maximum fill level and not to the
1292 current fill level. Thus, let's do a full rewind once, to clear
1293 things up. */
1294
1295 if (u->hwbuf_unused > before) {
1296 pa_log_debug("Requesting rewind due to latency change.");
1297 pa_sink_request_rewind(s, (size_t) -1);
1298 }
1299 }
1300
1301 static int process_rewind(struct userdata *u) {
1302 snd_pcm_sframes_t unused;
1303 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1304 pa_assert(u);
1305
1306 /* Figure out how much we shall rewind and reset the counter */
1307 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1308
1309 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1310
1311 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1312 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1313 return -1;
1314 }
1315
1316 unused_nbytes = (size_t) unused * u->frame_size;
1317
1318 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1319 unused_nbytes += u->rewind_safeguard;
1320
1321 if (u->hwbuf_size > unused_nbytes)
1322 limit_nbytes = u->hwbuf_size - unused_nbytes;
1323 else
1324 limit_nbytes = 0;
1325
1326 if (rewind_nbytes > limit_nbytes)
1327 rewind_nbytes = limit_nbytes;
1328
1329 if (rewind_nbytes > 0) {
1330 snd_pcm_sframes_t in_frames, out_frames;
1331
1332 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1333
1334 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1335 pa_log_debug("before: %lu", (unsigned long) in_frames);
1336 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1337 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1338 if (try_recover(u, "process_rewind", out_frames) < 0)
1339 return -1;
1340 out_frames = 0;
1341 }
1342
1343 pa_log_debug("after: %lu", (unsigned long) out_frames);
1344
1345 rewind_nbytes = (size_t) out_frames * u->frame_size;
1346
1347 if (rewind_nbytes <= 0)
1348 pa_log_info("Tried rewind, but was apparently not possible.");
1349 else {
1350 u->write_count -= rewind_nbytes;
1351 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1352 pa_sink_process_rewind(u->sink, rewind_nbytes);
1353
1354 u->after_rewind = TRUE;
1355 return 0;
1356 }
1357 } else
1358 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1359
1360 pa_sink_process_rewind(u->sink, 0);
1361 return 0;
1362 }
1363
1364 static void thread_func(void *userdata) {
1365 struct userdata *u = userdata;
1366 unsigned short revents = 0;
1367
1368 pa_assert(u);
1369
1370 pa_log_debug("Thread starting up");
1371
1372 if (u->core->realtime_scheduling)
1373 pa_make_realtime(u->core->realtime_priority);
1374
1375 pa_thread_mq_install(&u->thread_mq);
1376
1377 for (;;) {
1378 int ret;
1379
1380 #ifdef DEBUG_TIMING
1381 pa_log_debug("Loop");
1382 #endif
1383
1384 /* Render some data and write it to the dsp */
1385 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1386 int work_done;
1387 pa_usec_t sleep_usec = 0;
1388 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1389
1390 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1391 if (process_rewind(u) < 0)
1392 goto fail;
1393
1394 if (u->use_mmap)
1395 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1396 else
1397 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1398
1399 if (work_done < 0)
1400 goto fail;
1401
1402 /* pa_log_debug("work_done = %i", work_done); */
1403
1404 if (work_done) {
1405
1406 if (u->first) {
1407 pa_log_info("Starting playback.");
1408 snd_pcm_start(u->pcm_handle);
1409
1410 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1411 }
1412
1413 update_smoother(u);
1414 }
1415
1416 if (u->use_tsched) {
1417 pa_usec_t cusec;
1418
1419 if (u->since_start <= u->hwbuf_size) {
1420
1421 /* USB devices on ALSA seem to hit a buffer
1422 * underrun during the first iterations much
1423 * quicker then we calculate here, probably due to
1424 * the transport latency. To accommodate for that
1425 * we artificially decrease the sleep time until
1426 * we have filled the buffer at least once
1427 * completely.*/
1428
1429 if (pa_log_ratelimit())
1430 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1431 sleep_usec /= 2;
1432 }
1433
1434 /* OK, the playback buffer is now full, let's
1435 * calculate when to wake up next */
1436 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1437
1438 /* Convert from the sound card time domain to the
1439 * system time domain */
1440 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1441
1442 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1443
1444 /* We don't trust the conversion, so we wake up whatever comes first */
1445 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1446 }
1447
1448 u->first = FALSE;
1449 u->after_rewind = FALSE;
1450
1451 } else if (u->use_tsched)
1452
1453 /* OK, we're in an invalid state, let's disable our timers */
1454 pa_rtpoll_set_timer_disabled(u->rtpoll);
1455
1456 /* Hmm, nothing to do. Let's sleep */
1457 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1458 goto fail;
1459
1460 if (ret == 0)
1461 goto finish;
1462
1463 /* Tell ALSA about this and process its response */
1464 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1465 struct pollfd *pollfd;
1466 int err;
1467 unsigned n;
1468
1469 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1470
1471 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1472 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1473 goto fail;
1474 }
1475
1476 if (revents & ~POLLOUT) {
1477 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1478 goto fail;
1479
1480 u->first = TRUE;
1481 u->since_start = 0;
1482 } else if (revents && u->use_tsched && pa_log_ratelimit())
1483 pa_log_debug("Wakeup from ALSA!");
1484
1485 } else
1486 revents = 0;
1487 }
1488
1489 fail:
1490 /* If this was no regular exit from the loop we have to continue
1491 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1492 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1493 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1494
1495 finish:
1496 pa_log_debug("Thread shutting down");
1497 }
1498
1499 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1500 const char *n;
1501 char *t;
1502
1503 pa_assert(data);
1504 pa_assert(ma);
1505 pa_assert(device_name);
1506
1507 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1508 pa_sink_new_data_set_name(data, n);
1509 data->namereg_fail = TRUE;
1510 return;
1511 }
1512
1513 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1514 data->namereg_fail = TRUE;
1515 else {
1516 n = device_id ? device_id : device_name;
1517 data->namereg_fail = FALSE;
1518 }
1519
1520 if (mapping)
1521 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1522 else
1523 t = pa_sprintf_malloc("alsa_output.%s", n);
1524
1525 pa_sink_new_data_set_name(data, t);
1526 pa_xfree(t);
1527 }
1528
1529 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1530
1531 if (!mapping && !element)
1532 return;
1533
1534 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1535 pa_log_info("Failed to find a working mixer device.");
1536 return;
1537 }
1538
1539 if (element) {
1540
1541 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1542 goto fail;
1543
1544 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1545 goto fail;
1546
1547 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1548 pa_alsa_path_dump(u->mixer_path);
1549 } else {
1550
1551 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1552 goto fail;
1553
1554 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1555
1556 pa_log_debug("Probed mixer paths:");
1557 pa_alsa_path_set_dump(u->mixer_path_set);
1558 }
1559
1560 return;
1561
1562 fail:
1563
1564 if (u->mixer_path_set) {
1565 pa_alsa_path_set_free(u->mixer_path_set);
1566 u->mixer_path_set = NULL;
1567 } else if (u->mixer_path) {
1568 pa_alsa_path_free(u->mixer_path);
1569 u->mixer_path = NULL;
1570 }
1571
1572 if (u->mixer_handle) {
1573 snd_mixer_close(u->mixer_handle);
1574 u->mixer_handle = NULL;
1575 }
1576 }
1577
1578 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1579 pa_assert(u);
1580
1581 if (!u->mixer_handle)
1582 return 0;
1583
1584 if (u->sink->active_port) {
1585 pa_alsa_port_data *data;
1586
1587 /* We have a list of supported paths, so let's activate the
1588 * one that has been chosen as active */
1589
1590 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1591 u->mixer_path = data->path;
1592
1593 pa_alsa_path_select(data->path, u->mixer_handle);
1594
1595 if (data->setting)
1596 pa_alsa_setting_select(data->setting, u->mixer_handle);
1597
1598 } else {
1599
1600 if (!u->mixer_path && u->mixer_path_set)
1601 u->mixer_path = u->mixer_path_set->paths;
1602
1603 if (u->mixer_path) {
1604 /* Hmm, we have only a single path, then let's activate it */
1605
1606 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1607
1608 if (u->mixer_path->settings)
1609 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1610 } else
1611 return 0;
1612 }
1613
1614 if (!u->mixer_path->has_volume)
1615 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1616 else {
1617
1618 if (u->mixer_path->has_dB) {
1619 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1620
1621 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1622 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1623
1624 if (u->mixer_path->max_dB > 0.0)
1625 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1626 else
1627 pa_log_info("No particular base volume set, fixing to 0 dB");
1628
1629 } else {
1630 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1631 u->sink->base_volume = PA_VOLUME_NORM;
1632 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1633 }
1634
1635 u->sink->get_volume = sink_get_volume_cb;
1636 u->sink->set_volume = sink_set_volume_cb;
1637
1638 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1639 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1640 }
1641
1642 if (!u->mixer_path->has_mute) {
1643 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1644 } else {
1645 u->sink->get_mute = sink_get_mute_cb;
1646 u->sink->set_mute = sink_set_mute_cb;
1647 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1648 pa_log_info("Using hardware mute control.");
1649 }
1650
1651 u->mixer_fdl = pa_alsa_fdlist_new();
1652
1653 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1654 pa_log("Failed to initialize file descriptor monitoring");
1655 return -1;
1656 }
1657
1658 if (u->mixer_path_set)
1659 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1660 else
1661 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1662
1663 return 0;
1664 }
1665
1666 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1667
1668 struct userdata *u = NULL;
1669 const char *dev_id = NULL;
1670 pa_sample_spec ss, requested_ss;
1671 pa_channel_map map;
1672 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1673 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1674 size_t frame_size, rewind_safeguard;
1675 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1676 pa_sink_new_data data;
1677 pa_alsa_profile_set *profile_set = NULL;
1678
1679 pa_assert(m);
1680 pa_assert(ma);
1681
1682 ss = m->core->default_sample_spec;
1683 map = m->core->default_channel_map;
1684 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1685 pa_log("Failed to parse sample specification and channel map");
1686 goto fail;
1687 }
1688
1689 requested_ss = ss;
1690 frame_size = pa_frame_size(&ss);
1691
1692 nfrags = m->core->default_n_fragments;
1693 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1694 if (frag_size <= 0)
1695 frag_size = (uint32_t) frame_size;
1696 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1697 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1698
1699 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1700 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1701 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1702 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1703 pa_log("Failed to parse buffer metrics");
1704 goto fail;
1705 }
1706
1707 buffer_size = nfrags * frag_size;
1708
1709 period_frames = frag_size/frame_size;
1710 buffer_frames = buffer_size/frame_size;
1711 tsched_frames = tsched_size/frame_size;
1712
1713 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1714 pa_log("Failed to parse mmap argument.");
1715 goto fail;
1716 }
1717
1718 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1719 pa_log("Failed to parse tsched argument.");
1720 goto fail;
1721 }
1722
1723 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1724 pa_log("Failed to parse ignore_dB argument.");
1725 goto fail;
1726 }
1727
1728 rewind_safeguard = DEFAULT_REWIND_SAFEGUARD_BYTES;
1729 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1730 pa_log("Failed to parse rewind_safeguard argument");
1731 goto fail;
1732 }
1733
1734 use_tsched = pa_alsa_may_tsched(use_tsched);
1735
1736 u = pa_xnew0(struct userdata, 1);
1737 u->core = m->core;
1738 u->module = m;
1739 u->use_mmap = use_mmap;
1740 u->use_tsched = use_tsched;
1741 u->first = TRUE;
1742 u->rewind_safeguard = rewind_safeguard;
1743 u->rtpoll = pa_rtpoll_new();
1744 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1745
1746 u->smoother = pa_smoother_new(
1747 DEFAULT_TSCHED_BUFFER_USEC*2,
1748 DEFAULT_TSCHED_BUFFER_USEC*2,
1749 TRUE,
1750 TRUE,
1751 5,
1752 pa_rtclock_now(),
1753 TRUE);
1754 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1755
1756 dev_id = pa_modargs_get_value(
1757 ma, "device_id",
1758 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1759
1760 if (reserve_init(u, dev_id) < 0)
1761 goto fail;
1762
1763 if (reserve_monitor_init(u, dev_id) < 0)
1764 goto fail;
1765
1766 b = use_mmap;
1767 d = use_tsched;
1768
1769 if (mapping) {
1770
1771 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1772 pa_log("device_id= not set");
1773 goto fail;
1774 }
1775
1776 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1777 dev_id,
1778 &u->device_name,
1779 &ss, &map,
1780 SND_PCM_STREAM_PLAYBACK,
1781 &period_frames, &buffer_frames, tsched_frames,
1782 &b, &d, mapping)))
1783
1784 goto fail;
1785
1786 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1787
1788 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1789 goto fail;
1790
1791 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1792 dev_id,
1793 &u->device_name,
1794 &ss, &map,
1795 SND_PCM_STREAM_PLAYBACK,
1796 &period_frames, &buffer_frames, tsched_frames,
1797 &b, &d, profile_set, &mapping)))
1798
1799 goto fail;
1800
1801 } else {
1802
1803 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1804 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1805 &u->device_name,
1806 &ss, &map,
1807 SND_PCM_STREAM_PLAYBACK,
1808 &period_frames, &buffer_frames, tsched_frames,
1809 &b, &d, FALSE)))
1810 goto fail;
1811 }
1812
1813 pa_assert(u->device_name);
1814 pa_log_info("Successfully opened device %s.", u->device_name);
1815
1816 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1817 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1818 goto fail;
1819 }
1820
1821 if (mapping)
1822 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1823
1824 if (use_mmap && !b) {
1825 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1826 u->use_mmap = use_mmap = FALSE;
1827 }
1828
1829 if (use_tsched && (!b || !d)) {
1830 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1831 u->use_tsched = use_tsched = FALSE;
1832 }
1833
1834 if (u->use_mmap)
1835 pa_log_info("Successfully enabled mmap() mode.");
1836
1837 if (u->use_tsched)
1838 pa_log_info("Successfully enabled timer-based scheduling mode.");
1839
1840 /* ALSA might tweak the sample spec, so recalculate the frame size */
1841 frame_size = pa_frame_size(&ss);
1842
1843 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1844
1845 pa_sink_new_data_init(&data);
1846 data.driver = driver;
1847 data.module = m;
1848 data.card = card;
1849 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1850 pa_sink_new_data_set_sample_spec(&data, &ss);
1851 pa_sink_new_data_set_channel_map(&data, &map);
1852
1853 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1854 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1855 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1856 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1857 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1858
1859 if (mapping) {
1860 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1861 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1862 }
1863
1864 pa_alsa_init_description(data.proplist);
1865
1866 if (u->control_device)
1867 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1868
1869 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1870 pa_log("Invalid properties");
1871 pa_sink_new_data_done(&data);
1872 goto fail;
1873 }
1874
1875 if (u->mixer_path_set)
1876 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1877
1878 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1879 pa_sink_new_data_done(&data);
1880
1881 if (!u->sink) {
1882 pa_log("Failed to create sink object");
1883 goto fail;
1884 }
1885
1886 u->sink->parent.process_msg = sink_process_msg;
1887 if (u->use_tsched)
1888 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1889 u->sink->set_state = sink_set_state_cb;
1890 u->sink->set_port = sink_set_port_cb;
1891 u->sink->userdata = u;
1892
1893 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1894 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1895
1896 u->frame_size = frame_size;
1897 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1898 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1899 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1900
1901 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1902 (double) u->hwbuf_size / (double) u->fragment_size,
1903 (long unsigned) u->fragment_size,
1904 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1905 (long unsigned) u->hwbuf_size,
1906 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1907
1908 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1909 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1910
1911 if (u->use_tsched) {
1912 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1913
1914 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1915 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1916
1917 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1918 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1919
1920 fix_min_sleep_wakeup(u);
1921 fix_tsched_watermark(u);
1922
1923 pa_sink_set_latency_range(u->sink,
1924 0,
1925 pa_bytes_to_usec(u->hwbuf_size, &ss));
1926
1927 pa_log_info("Time scheduling watermark is %0.2fms",
1928 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1929 } else
1930 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1931
1932 reserve_update(u);
1933
1934 if (update_sw_params(u) < 0)
1935 goto fail;
1936
1937 if (setup_mixer(u, ignore_dB) < 0)
1938 goto fail;
1939
1940 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1941
1942 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
1943 pa_log("Failed to create thread.");
1944 goto fail;
1945 }
1946
1947 /* Get initial mixer settings */
1948 if (data.volume_is_set) {
1949 if (u->sink->set_volume)
1950 u->sink->set_volume(u->sink);
1951 } else {
1952 if (u->sink->get_volume)
1953 u->sink->get_volume(u->sink);
1954 }
1955
1956 if (data.muted_is_set) {
1957 if (u->sink->set_mute)
1958 u->sink->set_mute(u->sink);
1959 } else {
1960 if (u->sink->get_mute)
1961 u->sink->get_mute(u->sink);
1962 }
1963
1964 pa_sink_put(u->sink);
1965
1966 if (profile_set)
1967 pa_alsa_profile_set_free(profile_set);
1968
1969 return u->sink;
1970
1971 fail:
1972
1973 if (u)
1974 userdata_free(u);
1975
1976 if (profile_set)
1977 pa_alsa_profile_set_free(profile_set);
1978
1979 return NULL;
1980 }
1981
1982 static void userdata_free(struct userdata *u) {
1983 pa_assert(u);
1984
1985 if (u->sink)
1986 pa_sink_unlink(u->sink);
1987
1988 if (u->thread) {
1989 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1990 pa_thread_free(u->thread);
1991 }
1992
1993 pa_thread_mq_done(&u->thread_mq);
1994
1995 if (u->sink)
1996 pa_sink_unref(u->sink);
1997
1998 if (u->memchunk.memblock)
1999 pa_memblock_unref(u->memchunk.memblock);
2000
2001 if (u->alsa_rtpoll_item)
2002 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2003
2004 if (u->rtpoll)
2005 pa_rtpoll_free(u->rtpoll);
2006
2007 if (u->pcm_handle) {
2008 snd_pcm_drop(u->pcm_handle);
2009 snd_pcm_close(u->pcm_handle);
2010 }
2011
2012 if (u->mixer_fdl)
2013 pa_alsa_fdlist_free(u->mixer_fdl);
2014
2015 if (u->mixer_path_set)
2016 pa_alsa_path_set_free(u->mixer_path_set);
2017 else if (u->mixer_path)
2018 pa_alsa_path_free(u->mixer_path);
2019
2020 if (u->mixer_handle)
2021 snd_mixer_close(u->mixer_handle);
2022
2023 if (u->smoother)
2024 pa_smoother_free(u->smoother);
2025
2026 reserve_done(u);
2027 monitor_done(u);
2028
2029 pa_xfree(u->device_name);
2030 pa_xfree(u->control_device);
2031 pa_xfree(u);
2032 }
2033
2034 void pa_alsa_sink_free(pa_sink *s) {
2035 struct userdata *u;
2036
2037 pa_sink_assert_ref(s);
2038 pa_assert_se(u = s->userdata);
2039
2040 userdata_free(u);
2041 }