]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa-sink: Get rid of a compiler warning regarding rewind_safeguard type.
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
82 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
83
84 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
85
86 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256) /* 1.33ms @48kHz, should work for most hardware */
87
88 struct userdata {
89 pa_core *core;
90 pa_module *module;
91 pa_sink *sink;
92
93 pa_thread *thread;
94 pa_thread_mq thread_mq;
95 pa_rtpoll *rtpoll;
96
97 snd_pcm_t *pcm_handle;
98
99 pa_alsa_fdlist *mixer_fdl;
100 snd_mixer_t *mixer_handle;
101 pa_alsa_path_set *mixer_path_set;
102 pa_alsa_path *mixer_path;
103
104 pa_cvolume hardware_volume;
105
106 size_t
107 frame_size,
108 fragment_size,
109 hwbuf_size,
110 tsched_watermark,
111 hwbuf_unused,
112 min_sleep,
113 min_wakeup,
114 watermark_inc_step,
115 watermark_dec_step,
116 watermark_inc_threshold,
117 watermark_dec_threshold,
118 rewind_safeguard;
119
120 pa_usec_t watermark_dec_not_before;
121
122 pa_memchunk memchunk;
123
124 char *device_name; /* name of the PCM device */
125 char *control_device; /* name of the control device */
126
127 pa_bool_t use_mmap:1, use_tsched:1;
128
129 pa_bool_t first, after_rewind;
130
131 pa_rtpoll_item *alsa_rtpoll_item;
132
133 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
134
135 pa_smoother *smoother;
136 uint64_t write_count;
137 uint64_t since_start;
138 pa_usec_t smoother_interval;
139 pa_usec_t last_smoother_update;
140
141 pa_reserve_wrapper *reserve;
142 pa_hook_slot *reserve_slot;
143 pa_reserve_monitor_wrapper *monitor;
144 pa_hook_slot *monitor_slot;
145 };
146
147 static void userdata_free(struct userdata *u);
148
149 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
150 pa_assert(r);
151 pa_assert(u);
152
153 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
154 return PA_HOOK_CANCEL;
155
156 return PA_HOOK_OK;
157 }
158
159 static void reserve_done(struct userdata *u) {
160 pa_assert(u);
161
162 if (u->reserve_slot) {
163 pa_hook_slot_free(u->reserve_slot);
164 u->reserve_slot = NULL;
165 }
166
167 if (u->reserve) {
168 pa_reserve_wrapper_unref(u->reserve);
169 u->reserve = NULL;
170 }
171 }
172
173 static void reserve_update(struct userdata *u) {
174 const char *description;
175 pa_assert(u);
176
177 if (!u->sink || !u->reserve)
178 return;
179
180 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
181 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
182 }
183
184 static int reserve_init(struct userdata *u, const char *dname) {
185 char *rname;
186
187 pa_assert(u);
188 pa_assert(dname);
189
190 if (u->reserve)
191 return 0;
192
193 if (pa_in_system_mode())
194 return 0;
195
196 if (!(rname = pa_alsa_get_reserve_name(dname)))
197 return 0;
198
199 /* We are resuming, try to lock the device */
200 u->reserve = pa_reserve_wrapper_get(u->core, rname);
201 pa_xfree(rname);
202
203 if (!(u->reserve))
204 return -1;
205
206 reserve_update(u);
207
208 pa_assert(!u->reserve_slot);
209 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
210
211 return 0;
212 }
213
214 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
215 pa_bool_t b;
216
217 pa_assert(w);
218 pa_assert(u);
219
220 b = PA_PTR_TO_UINT(busy) && !u->reserve;
221
222 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
223 return PA_HOOK_OK;
224 }
225
226 static void monitor_done(struct userdata *u) {
227 pa_assert(u);
228
229 if (u->monitor_slot) {
230 pa_hook_slot_free(u->monitor_slot);
231 u->monitor_slot = NULL;
232 }
233
234 if (u->monitor) {
235 pa_reserve_monitor_wrapper_unref(u->monitor);
236 u->monitor = NULL;
237 }
238 }
239
240 static int reserve_monitor_init(struct userdata *u, const char *dname) {
241 char *rname;
242
243 pa_assert(u);
244 pa_assert(dname);
245
246 if (pa_in_system_mode())
247 return 0;
248
249 if (!(rname = pa_alsa_get_reserve_name(dname)))
250 return 0;
251
252 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
253 pa_xfree(rname);
254
255 if (!(u->monitor))
256 return -1;
257
258 pa_assert(!u->monitor_slot);
259 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
260
261 return 0;
262 }
263
264 static void fix_min_sleep_wakeup(struct userdata *u) {
265 size_t max_use, max_use_2;
266
267 pa_assert(u);
268 pa_assert(u->use_tsched);
269
270 max_use = u->hwbuf_size - u->hwbuf_unused;
271 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
272
273 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
274 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
275
276 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
277 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
278 }
279
280 static void fix_tsched_watermark(struct userdata *u) {
281 size_t max_use;
282 pa_assert(u);
283 pa_assert(u->use_tsched);
284
285 max_use = u->hwbuf_size - u->hwbuf_unused;
286
287 if (u->tsched_watermark > max_use - u->min_sleep)
288 u->tsched_watermark = max_use - u->min_sleep;
289
290 if (u->tsched_watermark < u->min_wakeup)
291 u->tsched_watermark = u->min_wakeup;
292 }
293
294 static void increase_watermark(struct userdata *u) {
295 size_t old_watermark;
296 pa_usec_t old_min_latency, new_min_latency;
297
298 pa_assert(u);
299 pa_assert(u->use_tsched);
300
301 /* First, just try to increase the watermark */
302 old_watermark = u->tsched_watermark;
303 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
304 fix_tsched_watermark(u);
305
306 if (old_watermark != u->tsched_watermark) {
307 pa_log_info("Increasing wakeup watermark to %0.2f ms",
308 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
309 return;
310 }
311
312 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
313 old_min_latency = u->sink->thread_info.min_latency;
314 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
315 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
316
317 if (old_min_latency != new_min_latency) {
318 pa_log_info("Increasing minimal latency to %0.2f ms",
319 (double) new_min_latency / PA_USEC_PER_MSEC);
320
321 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
322 }
323
324 /* When we reach this we're officialy fucked! */
325 }
326
327 static void decrease_watermark(struct userdata *u) {
328 size_t old_watermark;
329 pa_usec_t now;
330
331 pa_assert(u);
332 pa_assert(u->use_tsched);
333
334 now = pa_rtclock_now();
335
336 if (u->watermark_dec_not_before <= 0)
337 goto restart;
338
339 if (u->watermark_dec_not_before > now)
340 return;
341
342 old_watermark = u->tsched_watermark;
343
344 if (u->tsched_watermark < u->watermark_dec_step)
345 u->tsched_watermark = u->tsched_watermark / 2;
346 else
347 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
348
349 fix_tsched_watermark(u);
350
351 if (old_watermark != u->tsched_watermark)
352 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
353 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
354
355 /* We don't change the latency range*/
356
357 restart:
358 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
359 }
360
361 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
362 pa_usec_t usec, wm;
363
364 pa_assert(sleep_usec);
365 pa_assert(process_usec);
366
367 pa_assert(u);
368 pa_assert(u->use_tsched);
369
370 usec = pa_sink_get_requested_latency_within_thread(u->sink);
371
372 if (usec == (pa_usec_t) -1)
373 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
374
375 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
376
377 if (wm > usec)
378 wm = usec/2;
379
380 *sleep_usec = usec - wm;
381 *process_usec = wm;
382
383 #ifdef DEBUG_TIMING
384 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
385 (unsigned long) (usec / PA_USEC_PER_MSEC),
386 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
387 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
388 #endif
389 }
390
391 static int try_recover(struct userdata *u, const char *call, int err) {
392 pa_assert(u);
393 pa_assert(call);
394 pa_assert(err < 0);
395
396 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
397
398 pa_assert(err != -EAGAIN);
399
400 if (err == -EPIPE)
401 pa_log_debug("%s: Buffer underrun!", call);
402
403 if (err == -ESTRPIPE)
404 pa_log_debug("%s: System suspended!", call);
405
406 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
407 pa_log("%s: %s", call, pa_alsa_strerror(err));
408 return -1;
409 }
410
411 u->first = TRUE;
412 u->since_start = 0;
413 return 0;
414 }
415
416 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
417 size_t left_to_play;
418 pa_bool_t underrun = FALSE;
419
420 /* We use <= instead of < for this check here because an underrun
421 * only happens after the last sample was processed, not already when
422 * it is removed from the buffer. This is particularly important
423 * when block transfer is used. */
424
425 if (n_bytes <= u->hwbuf_size)
426 left_to_play = u->hwbuf_size - n_bytes;
427 else {
428
429 /* We got a dropout. What a mess! */
430 left_to_play = 0;
431 underrun = TRUE;
432
433 #ifdef DEBUG_TIMING
434 PA_DEBUG_TRAP;
435 #endif
436
437 if (!u->first && !u->after_rewind)
438 if (pa_log_ratelimit())
439 pa_log_info("Underrun!");
440 }
441
442 #ifdef DEBUG_TIMING
443 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
444 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
445 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
446 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
447 #endif
448
449 if (u->use_tsched) {
450 pa_bool_t reset_not_before = TRUE;
451
452 if (!u->first && !u->after_rewind) {
453 if (underrun || left_to_play < u->watermark_inc_threshold)
454 increase_watermark(u);
455 else if (left_to_play > u->watermark_dec_threshold) {
456 reset_not_before = FALSE;
457
458 /* We decrease the watermark only if have actually
459 * been woken up by a timeout. If something else woke
460 * us up it's too easy to fulfill the deadlines... */
461
462 if (on_timeout)
463 decrease_watermark(u);
464 }
465 }
466
467 if (reset_not_before)
468 u->watermark_dec_not_before = 0;
469 }
470
471 return left_to_play;
472 }
473
474 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
475 pa_bool_t work_done = TRUE;
476 pa_usec_t max_sleep_usec = 0, process_usec = 0;
477 size_t left_to_play;
478 unsigned j = 0;
479
480 pa_assert(u);
481 pa_sink_assert_ref(u->sink);
482
483 if (u->use_tsched)
484 hw_sleep_time(u, &max_sleep_usec, &process_usec);
485
486 for (;;) {
487 snd_pcm_sframes_t n;
488 size_t n_bytes;
489 int r;
490 pa_bool_t after_avail = TRUE;
491
492 /* First we determine how many samples are missing to fill the
493 * buffer up to 100% */
494
495 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
496
497 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
498 continue;
499
500 return r;
501 }
502
503 n_bytes = (size_t) n * u->frame_size;
504
505 #ifdef DEBUG_TIMING
506 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
507 #endif
508
509 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
510 on_timeout = FALSE;
511
512 if (u->use_tsched)
513
514 /* We won't fill up the playback buffer before at least
515 * half the sleep time is over because otherwise we might
516 * ask for more data from the clients then they expect. We
517 * need to guarantee that clients only have to keep around
518 * a single hw buffer length. */
519
520 if (!polled &&
521 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
522 #ifdef DEBUG_TIMING
523 pa_log_debug("Not filling up, because too early.");
524 #endif
525 break;
526 }
527
528 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
529
530 if (polled)
531 PA_ONCE_BEGIN {
532 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
533 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
534 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
535 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
536 pa_strnull(dn));
537 pa_xfree(dn);
538 } PA_ONCE_END;
539
540 #ifdef DEBUG_TIMING
541 pa_log_debug("Not filling up, because not necessary.");
542 #endif
543 break;
544 }
545
546
547 if (++j > 10) {
548 #ifdef DEBUG_TIMING
549 pa_log_debug("Not filling up, because already too many iterations.");
550 #endif
551
552 break;
553 }
554
555 n_bytes -= u->hwbuf_unused;
556 polled = FALSE;
557
558 #ifdef DEBUG_TIMING
559 pa_log_debug("Filling up");
560 #endif
561
562 for (;;) {
563 pa_memchunk chunk;
564 void *p;
565 int err;
566 const snd_pcm_channel_area_t *areas;
567 snd_pcm_uframes_t offset, frames;
568 snd_pcm_sframes_t sframes;
569
570 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
571 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
572
573 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
574
575 if (!after_avail && err == -EAGAIN)
576 break;
577
578 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
579 continue;
580
581 return r;
582 }
583
584 /* Make sure that if these memblocks need to be copied they will fit into one slot */
585 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
586 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
587
588 if (!after_avail && frames == 0)
589 break;
590
591 pa_assert(frames > 0);
592 after_avail = FALSE;
593
594 /* Check these are multiples of 8 bit */
595 pa_assert((areas[0].first & 7) == 0);
596 pa_assert((areas[0].step & 7)== 0);
597
598 /* We assume a single interleaved memory buffer */
599 pa_assert((areas[0].first >> 3) == 0);
600 pa_assert((areas[0].step >> 3) == u->frame_size);
601
602 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
603
604 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
605 chunk.length = pa_memblock_get_length(chunk.memblock);
606 chunk.index = 0;
607
608 pa_sink_render_into_full(u->sink, &chunk);
609 pa_memblock_unref_fixed(chunk.memblock);
610
611 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
612
613 if (!after_avail && (int) sframes == -EAGAIN)
614 break;
615
616 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
617 continue;
618
619 return r;
620 }
621
622 work_done = TRUE;
623
624 u->write_count += frames * u->frame_size;
625 u->since_start += frames * u->frame_size;
626
627 #ifdef DEBUG_TIMING
628 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
629 #endif
630
631 if ((size_t) frames * u->frame_size >= n_bytes)
632 break;
633
634 n_bytes -= (size_t) frames * u->frame_size;
635 }
636 }
637
638 if (u->use_tsched) {
639 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
640
641 if (*sleep_usec > process_usec)
642 *sleep_usec -= process_usec;
643 else
644 *sleep_usec = 0;
645 } else
646 *sleep_usec = 0;
647
648 return work_done ? 1 : 0;
649 }
650
651 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
652 pa_bool_t work_done = FALSE;
653 pa_usec_t max_sleep_usec = 0, process_usec = 0;
654 size_t left_to_play;
655 unsigned j = 0;
656
657 pa_assert(u);
658 pa_sink_assert_ref(u->sink);
659
660 if (u->use_tsched)
661 hw_sleep_time(u, &max_sleep_usec, &process_usec);
662
663 for (;;) {
664 snd_pcm_sframes_t n;
665 size_t n_bytes;
666 int r;
667 pa_bool_t after_avail = TRUE;
668
669 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
670
671 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
672 continue;
673
674 return r;
675 }
676
677 n_bytes = (size_t) n * u->frame_size;
678 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
679 on_timeout = FALSE;
680
681 if (u->use_tsched)
682
683 /* We won't fill up the playback buffer before at least
684 * half the sleep time is over because otherwise we might
685 * ask for more data from the clients then they expect. We
686 * need to guarantee that clients only have to keep around
687 * a single hw buffer length. */
688
689 if (!polled &&
690 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
691 break;
692
693 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
694
695 if (polled)
696 PA_ONCE_BEGIN {
697 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
698 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
699 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
700 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
701 pa_strnull(dn));
702 pa_xfree(dn);
703 } PA_ONCE_END;
704
705 break;
706 }
707
708 if (++j > 10) {
709 #ifdef DEBUG_TIMING
710 pa_log_debug("Not filling up, because already too many iterations.");
711 #endif
712
713 break;
714 }
715
716 n_bytes -= u->hwbuf_unused;
717 polled = FALSE;
718
719 for (;;) {
720 snd_pcm_sframes_t frames;
721 void *p;
722
723 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
724
725 if (u->memchunk.length <= 0)
726 pa_sink_render(u->sink, n_bytes, &u->memchunk);
727
728 pa_assert(u->memchunk.length > 0);
729
730 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
731
732 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
733 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
734
735 p = pa_memblock_acquire(u->memchunk.memblock);
736 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
737 pa_memblock_release(u->memchunk.memblock);
738
739 if (PA_UNLIKELY(frames < 0)) {
740
741 if (!after_avail && (int) frames == -EAGAIN)
742 break;
743
744 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
745 continue;
746
747 return r;
748 }
749
750 if (!after_avail && frames == 0)
751 break;
752
753 pa_assert(frames > 0);
754 after_avail = FALSE;
755
756 u->memchunk.index += (size_t) frames * u->frame_size;
757 u->memchunk.length -= (size_t) frames * u->frame_size;
758
759 if (u->memchunk.length <= 0) {
760 pa_memblock_unref(u->memchunk.memblock);
761 pa_memchunk_reset(&u->memchunk);
762 }
763
764 work_done = TRUE;
765
766 u->write_count += frames * u->frame_size;
767 u->since_start += frames * u->frame_size;
768
769 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
770
771 if ((size_t) frames * u->frame_size >= n_bytes)
772 break;
773
774 n_bytes -= (size_t) frames * u->frame_size;
775 }
776 }
777
778 if (u->use_tsched) {
779 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
780
781 if (*sleep_usec > process_usec)
782 *sleep_usec -= process_usec;
783 else
784 *sleep_usec = 0;
785 } else
786 *sleep_usec = 0;
787
788 return work_done ? 1 : 0;
789 }
790
791 static void update_smoother(struct userdata *u) {
792 snd_pcm_sframes_t delay = 0;
793 int64_t position;
794 int err;
795 pa_usec_t now1 = 0, now2;
796 snd_pcm_status_t *status;
797
798 snd_pcm_status_alloca(&status);
799
800 pa_assert(u);
801 pa_assert(u->pcm_handle);
802
803 /* Let's update the time smoother */
804
805 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
806 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
807 return;
808 }
809
810 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
811 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
812 else {
813 snd_htimestamp_t htstamp = { 0, 0 };
814 snd_pcm_status_get_htstamp(status, &htstamp);
815 now1 = pa_timespec_load(&htstamp);
816 }
817
818 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
819 if (now1 <= 0)
820 now1 = pa_rtclock_now();
821
822 /* check if the time since the last update is bigger than the interval */
823 if (u->last_smoother_update > 0)
824 if (u->last_smoother_update + u->smoother_interval > now1)
825 return;
826
827 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
828
829 if (PA_UNLIKELY(position < 0))
830 position = 0;
831
832 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
833
834 pa_smoother_put(u->smoother, now1, now2);
835
836 u->last_smoother_update = now1;
837 /* exponentially increase the update interval up to the MAX limit */
838 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
839 }
840
841 static pa_usec_t sink_get_latency(struct userdata *u) {
842 pa_usec_t r;
843 int64_t delay;
844 pa_usec_t now1, now2;
845
846 pa_assert(u);
847
848 now1 = pa_rtclock_now();
849 now2 = pa_smoother_get(u->smoother, now1);
850
851 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
852
853 r = delay >= 0 ? (pa_usec_t) delay : 0;
854
855 if (u->memchunk.memblock)
856 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
857
858 return r;
859 }
860
861 static int build_pollfd(struct userdata *u) {
862 pa_assert(u);
863 pa_assert(u->pcm_handle);
864
865 if (u->alsa_rtpoll_item)
866 pa_rtpoll_item_free(u->alsa_rtpoll_item);
867
868 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
869 return -1;
870
871 return 0;
872 }
873
874 /* Called from IO context */
875 static int suspend(struct userdata *u) {
876 pa_assert(u);
877 pa_assert(u->pcm_handle);
878
879 pa_smoother_pause(u->smoother, pa_rtclock_now());
880
881 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
882 * take awfully long with our long buffer sizes today. */
883 snd_pcm_close(u->pcm_handle);
884 u->pcm_handle = NULL;
885
886 if (u->alsa_rtpoll_item) {
887 pa_rtpoll_item_free(u->alsa_rtpoll_item);
888 u->alsa_rtpoll_item = NULL;
889 }
890
891 /* We reset max_rewind/max_request here to make sure that while we
892 * are suspended the old max_request/max_rewind values set before
893 * the suspend can influence the per-stream buffer of newly
894 * created streams, without their requirements having any
895 * influence on them. */
896 pa_sink_set_max_rewind_within_thread(u->sink, 0);
897 pa_sink_set_max_request_within_thread(u->sink, 0);
898
899 pa_log_info("Device suspended...");
900
901 return 0;
902 }
903
904 /* Called from IO context */
905 static int update_sw_params(struct userdata *u) {
906 snd_pcm_uframes_t avail_min;
907 int err;
908
909 pa_assert(u);
910
911 /* Use the full buffer if noone asked us for anything specific */
912 u->hwbuf_unused = 0;
913
914 if (u->use_tsched) {
915 pa_usec_t latency;
916
917 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
918 size_t b;
919
920 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
921
922 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
923
924 /* We need at least one sample in our buffer */
925
926 if (PA_UNLIKELY(b < u->frame_size))
927 b = u->frame_size;
928
929 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
930 }
931
932 fix_min_sleep_wakeup(u);
933 fix_tsched_watermark(u);
934 }
935
936 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
937
938 /* We need at last one frame in the used part of the buffer */
939 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
940
941 if (u->use_tsched) {
942 pa_usec_t sleep_usec, process_usec;
943
944 hw_sleep_time(u, &sleep_usec, &process_usec);
945 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
946 }
947
948 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
949
950 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
951 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
952 return err;
953 }
954
955 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
956 if (pa_alsa_pcm_is_hw(u->pcm_handle))
957 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
958 else {
959 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
960 pa_sink_set_max_rewind_within_thread(u->sink, 0);
961 }
962
963 return 0;
964 }
965
966 /* Called from IO context */
967 static int unsuspend(struct userdata *u) {
968 pa_sample_spec ss;
969 int err;
970 pa_bool_t b, d;
971 snd_pcm_uframes_t period_size, buffer_size;
972
973 pa_assert(u);
974 pa_assert(!u->pcm_handle);
975
976 pa_log_info("Trying resume...");
977
978 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
979 SND_PCM_NONBLOCK|
980 SND_PCM_NO_AUTO_RESAMPLE|
981 SND_PCM_NO_AUTO_CHANNELS|
982 SND_PCM_NO_AUTO_FORMAT)) < 0) {
983 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
984 goto fail;
985 }
986
987 ss = u->sink->sample_spec;
988 period_size = u->fragment_size / u->frame_size;
989 buffer_size = u->hwbuf_size / u->frame_size;
990 b = u->use_mmap;
991 d = u->use_tsched;
992
993 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
994 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
995 goto fail;
996 }
997
998 if (b != u->use_mmap || d != u->use_tsched) {
999 pa_log_warn("Resume failed, couldn't get original access mode.");
1000 goto fail;
1001 }
1002
1003 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1004 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1005 goto fail;
1006 }
1007
1008 if (period_size*u->frame_size != u->fragment_size ||
1009 buffer_size*u->frame_size != u->hwbuf_size) {
1010 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1011 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1012 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1013 goto fail;
1014 }
1015
1016 if (update_sw_params(u) < 0)
1017 goto fail;
1018
1019 if (build_pollfd(u) < 0)
1020 goto fail;
1021
1022 u->write_count = 0;
1023 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1024 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1025 u->last_smoother_update = 0;
1026
1027 u->first = TRUE;
1028 u->since_start = 0;
1029
1030 pa_log_info("Resumed successfully...");
1031
1032 return 0;
1033
1034 fail:
1035 if (u->pcm_handle) {
1036 snd_pcm_close(u->pcm_handle);
1037 u->pcm_handle = NULL;
1038 }
1039
1040 return -PA_ERR_IO;
1041 }
1042
1043 /* Called from IO context */
1044 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1045 struct userdata *u = PA_SINK(o)->userdata;
1046
1047 switch (code) {
1048
1049 case PA_SINK_MESSAGE_GET_LATENCY: {
1050 pa_usec_t r = 0;
1051
1052 if (u->pcm_handle)
1053 r = sink_get_latency(u);
1054
1055 *((pa_usec_t*) data) = r;
1056
1057 return 0;
1058 }
1059
1060 case PA_SINK_MESSAGE_SET_STATE:
1061
1062 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1063
1064 case PA_SINK_SUSPENDED: {
1065 int r;
1066
1067 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1068
1069 if ((r = suspend(u)) < 0)
1070 return r;
1071
1072 break;
1073 }
1074
1075 case PA_SINK_IDLE:
1076 case PA_SINK_RUNNING: {
1077 int r;
1078
1079 if (u->sink->thread_info.state == PA_SINK_INIT) {
1080 if (build_pollfd(u) < 0)
1081 return -PA_ERR_IO;
1082 }
1083
1084 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1085 if ((r = unsuspend(u)) < 0)
1086 return r;
1087 }
1088
1089 break;
1090 }
1091
1092 case PA_SINK_UNLINKED:
1093 case PA_SINK_INIT:
1094 case PA_SINK_INVALID_STATE:
1095 ;
1096 }
1097
1098 break;
1099 }
1100
1101 return pa_sink_process_msg(o, code, data, offset, chunk);
1102 }
1103
1104 /* Called from main context */
1105 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1106 pa_sink_state_t old_state;
1107 struct userdata *u;
1108
1109 pa_sink_assert_ref(s);
1110 pa_assert_se(u = s->userdata);
1111
1112 old_state = pa_sink_get_state(u->sink);
1113
1114 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1115 reserve_done(u);
1116 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1117 if (reserve_init(u, u->device_name) < 0)
1118 return -PA_ERR_BUSY;
1119
1120 return 0;
1121 }
1122
1123 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1124 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1125
1126 pa_assert(u);
1127 pa_assert(u->mixer_handle);
1128
1129 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1130 return 0;
1131
1132 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1133 return 0;
1134
1135 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1136 pa_sink_get_volume(u->sink, TRUE);
1137 pa_sink_get_mute(u->sink, TRUE);
1138 }
1139
1140 return 0;
1141 }
1142
1143 static void sink_get_volume_cb(pa_sink *s) {
1144 struct userdata *u = s->userdata;
1145 pa_cvolume r;
1146 char t[PA_CVOLUME_SNPRINT_MAX];
1147
1148 pa_assert(u);
1149 pa_assert(u->mixer_path);
1150 pa_assert(u->mixer_handle);
1151
1152 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1153 return;
1154
1155 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1156 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1157
1158 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1159
1160 if (pa_cvolume_equal(&u->hardware_volume, &r))
1161 return;
1162
1163 s->real_volume = u->hardware_volume = r;
1164
1165 /* Hmm, so the hardware volume changed, let's reset our software volume */
1166 if (u->mixer_path->has_dB)
1167 pa_sink_set_soft_volume(s, NULL);
1168 }
1169
1170 static void sink_set_volume_cb(pa_sink *s) {
1171 struct userdata *u = s->userdata;
1172 pa_cvolume r;
1173 char t[PA_CVOLUME_SNPRINT_MAX];
1174
1175 pa_assert(u);
1176 pa_assert(u->mixer_path);
1177 pa_assert(u->mixer_handle);
1178
1179 /* Shift up by the base volume */
1180 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1181
1182 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1183 return;
1184
1185 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1186 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1187
1188 u->hardware_volume = r;
1189
1190 if (u->mixer_path->has_dB) {
1191 pa_cvolume new_soft_volume;
1192 pa_bool_t accurate_enough;
1193
1194 /* Match exactly what the user requested by software */
1195 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1196
1197 /* If the adjustment to do in software is only minimal we
1198 * can skip it. That saves us CPU at the expense of a bit of
1199 * accuracy */
1200 accurate_enough =
1201 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1202 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1203
1204 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1205 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1206 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1207 pa_yes_no(accurate_enough));
1208
1209 if (!accurate_enough)
1210 s->soft_volume = new_soft_volume;
1211
1212 } else {
1213 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1214
1215 /* We can't match exactly what the user requested, hence let's
1216 * at least tell the user about it */
1217
1218 s->real_volume = r;
1219 }
1220 }
1221
1222 static void sink_get_mute_cb(pa_sink *s) {
1223 struct userdata *u = s->userdata;
1224 pa_bool_t b;
1225
1226 pa_assert(u);
1227 pa_assert(u->mixer_path);
1228 pa_assert(u->mixer_handle);
1229
1230 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1231 return;
1232
1233 s->muted = b;
1234 }
1235
1236 static void sink_set_mute_cb(pa_sink *s) {
1237 struct userdata *u = s->userdata;
1238
1239 pa_assert(u);
1240 pa_assert(u->mixer_path);
1241 pa_assert(u->mixer_handle);
1242
1243 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1244 }
1245
1246 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1247 struct userdata *u = s->userdata;
1248 pa_alsa_port_data *data;
1249
1250 pa_assert(u);
1251 pa_assert(p);
1252 pa_assert(u->mixer_handle);
1253
1254 data = PA_DEVICE_PORT_DATA(p);
1255
1256 pa_assert_se(u->mixer_path = data->path);
1257 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1258
1259 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1260 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1261 s->n_volume_steps = PA_VOLUME_NORM+1;
1262
1263 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1264 } else {
1265 s->base_volume = PA_VOLUME_NORM;
1266 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1267 }
1268
1269 if (data->setting)
1270 pa_alsa_setting_select(data->setting, u->mixer_handle);
1271
1272 if (s->set_mute)
1273 s->set_mute(s);
1274 if (s->set_volume)
1275 s->set_volume(s);
1276
1277 return 0;
1278 }
1279
1280 static void sink_update_requested_latency_cb(pa_sink *s) {
1281 struct userdata *u = s->userdata;
1282 size_t before;
1283 pa_assert(u);
1284 pa_assert(u->use_tsched); /* only when timer scheduling is used
1285 * we can dynamically adjust the
1286 * latency */
1287
1288 if (!u->pcm_handle)
1289 return;
1290
1291 before = u->hwbuf_unused;
1292 update_sw_params(u);
1293
1294 /* Let's check whether we now use only a smaller part of the
1295 buffer then before. If so, we need to make sure that subsequent
1296 rewinds are relative to the new maximum fill level and not to the
1297 current fill level. Thus, let's do a full rewind once, to clear
1298 things up. */
1299
1300 if (u->hwbuf_unused > before) {
1301 pa_log_debug("Requesting rewind due to latency change.");
1302 pa_sink_request_rewind(s, (size_t) -1);
1303 }
1304 }
1305
1306 static int process_rewind(struct userdata *u) {
1307 snd_pcm_sframes_t unused;
1308 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1309 pa_assert(u);
1310
1311 /* Figure out how much we shall rewind and reset the counter */
1312 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1313
1314 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1315
1316 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1317 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1318 return -1;
1319 }
1320
1321 unused_nbytes = (size_t) unused * u->frame_size;
1322
1323 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1324 unused_nbytes += u->rewind_safeguard;
1325
1326 if (u->hwbuf_size > unused_nbytes)
1327 limit_nbytes = u->hwbuf_size - unused_nbytes;
1328 else
1329 limit_nbytes = 0;
1330
1331 if (rewind_nbytes > limit_nbytes)
1332 rewind_nbytes = limit_nbytes;
1333
1334 if (rewind_nbytes > 0) {
1335 snd_pcm_sframes_t in_frames, out_frames;
1336
1337 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1338
1339 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1340 pa_log_debug("before: %lu", (unsigned long) in_frames);
1341 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1342 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1343 if (try_recover(u, "process_rewind", out_frames) < 0)
1344 return -1;
1345 out_frames = 0;
1346 }
1347
1348 pa_log_debug("after: %lu", (unsigned long) out_frames);
1349
1350 rewind_nbytes = (size_t) out_frames * u->frame_size;
1351
1352 if (rewind_nbytes <= 0)
1353 pa_log_info("Tried rewind, but was apparently not possible.");
1354 else {
1355 u->write_count -= rewind_nbytes;
1356 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1357 pa_sink_process_rewind(u->sink, rewind_nbytes);
1358
1359 u->after_rewind = TRUE;
1360 return 0;
1361 }
1362 } else
1363 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1364
1365 pa_sink_process_rewind(u->sink, 0);
1366 return 0;
1367 }
1368
1369 static void thread_func(void *userdata) {
1370 struct userdata *u = userdata;
1371 unsigned short revents = 0;
1372
1373 pa_assert(u);
1374
1375 pa_log_debug("Thread starting up");
1376
1377 if (u->core->realtime_scheduling)
1378 pa_make_realtime(u->core->realtime_priority);
1379
1380 pa_thread_mq_install(&u->thread_mq);
1381
1382 for (;;) {
1383 int ret;
1384
1385 #ifdef DEBUG_TIMING
1386 pa_log_debug("Loop");
1387 #endif
1388
1389 /* Render some data and write it to the dsp */
1390 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1391 int work_done;
1392 pa_usec_t sleep_usec = 0;
1393 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1394
1395 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1396 if (process_rewind(u) < 0)
1397 goto fail;
1398
1399 if (u->use_mmap)
1400 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1401 else
1402 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1403
1404 if (work_done < 0)
1405 goto fail;
1406
1407 /* pa_log_debug("work_done = %i", work_done); */
1408
1409 if (work_done) {
1410
1411 if (u->first) {
1412 pa_log_info("Starting playback.");
1413 snd_pcm_start(u->pcm_handle);
1414
1415 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1416 }
1417
1418 update_smoother(u);
1419 }
1420
1421 if (u->use_tsched) {
1422 pa_usec_t cusec;
1423
1424 if (u->since_start <= u->hwbuf_size) {
1425
1426 /* USB devices on ALSA seem to hit a buffer
1427 * underrun during the first iterations much
1428 * quicker then we calculate here, probably due to
1429 * the transport latency. To accommodate for that
1430 * we artificially decrease the sleep time until
1431 * we have filled the buffer at least once
1432 * completely.*/
1433
1434 if (pa_log_ratelimit())
1435 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1436 sleep_usec /= 2;
1437 }
1438
1439 /* OK, the playback buffer is now full, let's
1440 * calculate when to wake up next */
1441 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1442
1443 /* Convert from the sound card time domain to the
1444 * system time domain */
1445 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1446
1447 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1448
1449 /* We don't trust the conversion, so we wake up whatever comes first */
1450 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1451 }
1452
1453 u->first = FALSE;
1454 u->after_rewind = FALSE;
1455
1456 } else if (u->use_tsched)
1457
1458 /* OK, we're in an invalid state, let's disable our timers */
1459 pa_rtpoll_set_timer_disabled(u->rtpoll);
1460
1461 /* Hmm, nothing to do. Let's sleep */
1462 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1463 goto fail;
1464
1465 if (ret == 0)
1466 goto finish;
1467
1468 /* Tell ALSA about this and process its response */
1469 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1470 struct pollfd *pollfd;
1471 int err;
1472 unsigned n;
1473
1474 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1475
1476 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1477 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1478 goto fail;
1479 }
1480
1481 if (revents & ~POLLOUT) {
1482 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1483 goto fail;
1484
1485 u->first = TRUE;
1486 u->since_start = 0;
1487 } else if (revents && u->use_tsched && pa_log_ratelimit())
1488 pa_log_debug("Wakeup from ALSA!");
1489
1490 } else
1491 revents = 0;
1492 }
1493
1494 fail:
1495 /* If this was no regular exit from the loop we have to continue
1496 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1497 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1498 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1499
1500 finish:
1501 pa_log_debug("Thread shutting down");
1502 }
1503
1504 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1505 const char *n;
1506 char *t;
1507
1508 pa_assert(data);
1509 pa_assert(ma);
1510 pa_assert(device_name);
1511
1512 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1513 pa_sink_new_data_set_name(data, n);
1514 data->namereg_fail = TRUE;
1515 return;
1516 }
1517
1518 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1519 data->namereg_fail = TRUE;
1520 else {
1521 n = device_id ? device_id : device_name;
1522 data->namereg_fail = FALSE;
1523 }
1524
1525 if (mapping)
1526 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1527 else
1528 t = pa_sprintf_malloc("alsa_output.%s", n);
1529
1530 pa_sink_new_data_set_name(data, t);
1531 pa_xfree(t);
1532 }
1533
1534 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1535
1536 if (!mapping && !element)
1537 return;
1538
1539 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1540 pa_log_info("Failed to find a working mixer device.");
1541 return;
1542 }
1543
1544 if (element) {
1545
1546 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1547 goto fail;
1548
1549 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1550 goto fail;
1551
1552 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1553 pa_alsa_path_dump(u->mixer_path);
1554 } else {
1555
1556 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1557 goto fail;
1558
1559 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1560
1561 pa_log_debug("Probed mixer paths:");
1562 pa_alsa_path_set_dump(u->mixer_path_set);
1563 }
1564
1565 return;
1566
1567 fail:
1568
1569 if (u->mixer_path_set) {
1570 pa_alsa_path_set_free(u->mixer_path_set);
1571 u->mixer_path_set = NULL;
1572 } else if (u->mixer_path) {
1573 pa_alsa_path_free(u->mixer_path);
1574 u->mixer_path = NULL;
1575 }
1576
1577 if (u->mixer_handle) {
1578 snd_mixer_close(u->mixer_handle);
1579 u->mixer_handle = NULL;
1580 }
1581 }
1582
1583 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1584 pa_assert(u);
1585
1586 if (!u->mixer_handle)
1587 return 0;
1588
1589 if (u->sink->active_port) {
1590 pa_alsa_port_data *data;
1591
1592 /* We have a list of supported paths, so let's activate the
1593 * one that has been chosen as active */
1594
1595 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1596 u->mixer_path = data->path;
1597
1598 pa_alsa_path_select(data->path, u->mixer_handle);
1599
1600 if (data->setting)
1601 pa_alsa_setting_select(data->setting, u->mixer_handle);
1602
1603 } else {
1604
1605 if (!u->mixer_path && u->mixer_path_set)
1606 u->mixer_path = u->mixer_path_set->paths;
1607
1608 if (u->mixer_path) {
1609 /* Hmm, we have only a single path, then let's activate it */
1610
1611 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1612
1613 if (u->mixer_path->settings)
1614 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1615 } else
1616 return 0;
1617 }
1618
1619 /* FIXME: need automatic detection rather than hard-coded path */
1620 if (!strcmp(u->mixer_path->name, "iec958-passthrough-output")) {
1621 u->sink->flags |= PA_SINK_PASSTHROUGH;
1622 } else {
1623 u->sink->flags &= ~PA_SINK_PASSTHROUGH;
1624 }
1625
1626 if (!u->mixer_path->has_volume)
1627 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1628 else {
1629
1630 if (u->mixer_path->has_dB) {
1631 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1632
1633 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1634 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1635
1636 if (u->mixer_path->max_dB > 0.0)
1637 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1638 else
1639 pa_log_info("No particular base volume set, fixing to 0 dB");
1640
1641 } else {
1642 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1643 u->sink->base_volume = PA_VOLUME_NORM;
1644 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1645 }
1646
1647 u->sink->get_volume = sink_get_volume_cb;
1648 u->sink->set_volume = sink_set_volume_cb;
1649
1650 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1651 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1652 }
1653
1654 if (!u->mixer_path->has_mute) {
1655 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1656 } else {
1657 u->sink->get_mute = sink_get_mute_cb;
1658 u->sink->set_mute = sink_set_mute_cb;
1659 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1660 pa_log_info("Using hardware mute control.");
1661 }
1662
1663 u->mixer_fdl = pa_alsa_fdlist_new();
1664
1665 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1666 pa_log("Failed to initialize file descriptor monitoring");
1667 return -1;
1668 }
1669
1670 if (u->mixer_path_set)
1671 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1672 else
1673 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1674
1675 return 0;
1676 }
1677
1678 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1679
1680 struct userdata *u = NULL;
1681 const char *dev_id = NULL;
1682 pa_sample_spec ss, requested_ss;
1683 pa_channel_map map;
1684 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1685 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1686 size_t frame_size;
1687 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1688 pa_sink_new_data data;
1689 pa_alsa_profile_set *profile_set = NULL;
1690
1691 pa_assert(m);
1692 pa_assert(ma);
1693
1694 ss = m->core->default_sample_spec;
1695 map = m->core->default_channel_map;
1696 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1697 pa_log("Failed to parse sample specification and channel map");
1698 goto fail;
1699 }
1700
1701 requested_ss = ss;
1702 frame_size = pa_frame_size(&ss);
1703
1704 nfrags = m->core->default_n_fragments;
1705 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1706 if (frag_size <= 0)
1707 frag_size = (uint32_t) frame_size;
1708 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1709 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1710
1711 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1712 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1713 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1714 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1715 pa_log("Failed to parse buffer metrics");
1716 goto fail;
1717 }
1718
1719 buffer_size = nfrags * frag_size;
1720
1721 period_frames = frag_size/frame_size;
1722 buffer_frames = buffer_size/frame_size;
1723 tsched_frames = tsched_size/frame_size;
1724
1725 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1726 pa_log("Failed to parse mmap argument.");
1727 goto fail;
1728 }
1729
1730 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1731 pa_log("Failed to parse tsched argument.");
1732 goto fail;
1733 }
1734
1735 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1736 pa_log("Failed to parse ignore_dB argument.");
1737 goto fail;
1738 }
1739
1740 rewind_safeguard = DEFAULT_REWIND_SAFEGUARD_BYTES;
1741 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1742 pa_log("Failed to parse rewind_safeguard argument");
1743 goto fail;
1744 }
1745
1746 use_tsched = pa_alsa_may_tsched(use_tsched);
1747
1748 u = pa_xnew0(struct userdata, 1);
1749 u->core = m->core;
1750 u->module = m;
1751 u->use_mmap = use_mmap;
1752 u->use_tsched = use_tsched;
1753 u->first = TRUE;
1754 u->rewind_safeguard = rewind_safeguard;
1755 u->rtpoll = pa_rtpoll_new();
1756 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1757
1758 u->smoother = pa_smoother_new(
1759 DEFAULT_TSCHED_BUFFER_USEC*2,
1760 DEFAULT_TSCHED_BUFFER_USEC*2,
1761 TRUE,
1762 TRUE,
1763 5,
1764 pa_rtclock_now(),
1765 TRUE);
1766 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1767
1768 dev_id = pa_modargs_get_value(
1769 ma, "device_id",
1770 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1771
1772 if (reserve_init(u, dev_id) < 0)
1773 goto fail;
1774
1775 if (reserve_monitor_init(u, dev_id) < 0)
1776 goto fail;
1777
1778 b = use_mmap;
1779 d = use_tsched;
1780
1781 if (mapping) {
1782
1783 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1784 pa_log("device_id= not set");
1785 goto fail;
1786 }
1787
1788 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1789 dev_id,
1790 &u->device_name,
1791 &ss, &map,
1792 SND_PCM_STREAM_PLAYBACK,
1793 &period_frames, &buffer_frames, tsched_frames,
1794 &b, &d, mapping)))
1795
1796 goto fail;
1797
1798 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1799
1800 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1801 goto fail;
1802
1803 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1804 dev_id,
1805 &u->device_name,
1806 &ss, &map,
1807 SND_PCM_STREAM_PLAYBACK,
1808 &period_frames, &buffer_frames, tsched_frames,
1809 &b, &d, profile_set, &mapping)))
1810
1811 goto fail;
1812
1813 } else {
1814
1815 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1816 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1817 &u->device_name,
1818 &ss, &map,
1819 SND_PCM_STREAM_PLAYBACK,
1820 &period_frames, &buffer_frames, tsched_frames,
1821 &b, &d, FALSE)))
1822 goto fail;
1823 }
1824
1825 pa_assert(u->device_name);
1826 pa_log_info("Successfully opened device %s.", u->device_name);
1827
1828 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1829 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1830 goto fail;
1831 }
1832
1833 if (mapping)
1834 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1835
1836 if (use_mmap && !b) {
1837 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1838 u->use_mmap = use_mmap = FALSE;
1839 }
1840
1841 if (use_tsched && (!b || !d)) {
1842 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1843 u->use_tsched = use_tsched = FALSE;
1844 }
1845
1846 if (u->use_mmap)
1847 pa_log_info("Successfully enabled mmap() mode.");
1848
1849 if (u->use_tsched)
1850 pa_log_info("Successfully enabled timer-based scheduling mode.");
1851
1852 /* ALSA might tweak the sample spec, so recalculate the frame size */
1853 frame_size = pa_frame_size(&ss);
1854
1855 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1856
1857 pa_sink_new_data_init(&data);
1858 data.driver = driver;
1859 data.module = m;
1860 data.card = card;
1861 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1862 pa_sink_new_data_set_sample_spec(&data, &ss);
1863 pa_sink_new_data_set_channel_map(&data, &map);
1864
1865 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1866 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1867 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1868 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1869 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1870
1871 if (mapping) {
1872 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1873 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1874 }
1875
1876 pa_alsa_init_description(data.proplist);
1877
1878 if (u->control_device)
1879 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1880
1881 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1882 pa_log("Invalid properties");
1883 pa_sink_new_data_done(&data);
1884 goto fail;
1885 }
1886
1887 if (u->mixer_path_set)
1888 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1889
1890 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1891 pa_sink_new_data_done(&data);
1892
1893 if (!u->sink) {
1894 pa_log("Failed to create sink object");
1895 goto fail;
1896 }
1897
1898 u->sink->parent.process_msg = sink_process_msg;
1899 if (u->use_tsched)
1900 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1901 u->sink->set_state = sink_set_state_cb;
1902 u->sink->set_port = sink_set_port_cb;
1903 u->sink->userdata = u;
1904
1905 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1906 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1907
1908 u->frame_size = frame_size;
1909 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1910 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1911 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1912
1913 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1914 (double) u->hwbuf_size / (double) u->fragment_size,
1915 (long unsigned) u->fragment_size,
1916 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1917 (long unsigned) u->hwbuf_size,
1918 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1919
1920 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1921 if (pa_alsa_pcm_is_hw(u->pcm_handle))
1922 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1923 else {
1924 pa_log_info("Disabling rewind for device %s", u->device_name);
1925 pa_sink_set_max_rewind(u->sink, 0);
1926 }
1927
1928 if (u->use_tsched) {
1929 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1930
1931 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1932 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1933
1934 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1935 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1936
1937 fix_min_sleep_wakeup(u);
1938 fix_tsched_watermark(u);
1939
1940 pa_sink_set_latency_range(u->sink,
1941 0,
1942 pa_bytes_to_usec(u->hwbuf_size, &ss));
1943
1944 pa_log_info("Time scheduling watermark is %0.2fms",
1945 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1946 } else
1947 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1948
1949 reserve_update(u);
1950
1951 if (update_sw_params(u) < 0)
1952 goto fail;
1953
1954 if (setup_mixer(u, ignore_dB) < 0)
1955 goto fail;
1956
1957 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1958
1959 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
1960 pa_log("Failed to create thread.");
1961 goto fail;
1962 }
1963
1964 /* Get initial mixer settings */
1965 if (data.volume_is_set) {
1966 if (u->sink->set_volume)
1967 u->sink->set_volume(u->sink);
1968 } else {
1969 if (u->sink->get_volume)
1970 u->sink->get_volume(u->sink);
1971 }
1972
1973 if (data.muted_is_set) {
1974 if (u->sink->set_mute)
1975 u->sink->set_mute(u->sink);
1976 } else {
1977 if (u->sink->get_mute)
1978 u->sink->get_mute(u->sink);
1979 }
1980
1981 pa_sink_put(u->sink);
1982
1983 if (profile_set)
1984 pa_alsa_profile_set_free(profile_set);
1985
1986 return u->sink;
1987
1988 fail:
1989
1990 if (u)
1991 userdata_free(u);
1992
1993 if (profile_set)
1994 pa_alsa_profile_set_free(profile_set);
1995
1996 return NULL;
1997 }
1998
1999 static void userdata_free(struct userdata *u) {
2000 pa_assert(u);
2001
2002 if (u->sink)
2003 pa_sink_unlink(u->sink);
2004
2005 if (u->thread) {
2006 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2007 pa_thread_free(u->thread);
2008 }
2009
2010 pa_thread_mq_done(&u->thread_mq);
2011
2012 if (u->sink)
2013 pa_sink_unref(u->sink);
2014
2015 if (u->memchunk.memblock)
2016 pa_memblock_unref(u->memchunk.memblock);
2017
2018 if (u->alsa_rtpoll_item)
2019 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2020
2021 if (u->rtpoll)
2022 pa_rtpoll_free(u->rtpoll);
2023
2024 if (u->pcm_handle) {
2025 snd_pcm_drop(u->pcm_handle);
2026 snd_pcm_close(u->pcm_handle);
2027 }
2028
2029 if (u->mixer_fdl)
2030 pa_alsa_fdlist_free(u->mixer_fdl);
2031
2032 if (u->mixer_path_set)
2033 pa_alsa_path_set_free(u->mixer_path_set);
2034 else if (u->mixer_path)
2035 pa_alsa_path_free(u->mixer_path);
2036
2037 if (u->mixer_handle)
2038 snd_mixer_close(u->mixer_handle);
2039
2040 if (u->smoother)
2041 pa_smoother_free(u->smoother);
2042
2043 reserve_done(u);
2044 monitor_done(u);
2045
2046 pa_xfree(u->device_name);
2047 pa_xfree(u->control_device);
2048 pa_xfree(u);
2049 }
2050
2051 void pa_alsa_sink_free(pa_sink *s) {
2052 struct userdata *u;
2053
2054 pa_sink_assert_ref(s);
2055 pa_assert_se(u = s->userdata);
2056
2057 userdata_free(u);
2058 }