]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
thread: name all threads so that the names appear in /proc/$PID/task/$TID/comm
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
82 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
83
84 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
85
86 struct userdata {
87 pa_core *core;
88 pa_module *module;
89 pa_sink *sink;
90
91 pa_thread *thread;
92 pa_thread_mq thread_mq;
93 pa_rtpoll *rtpoll;
94
95 snd_pcm_t *pcm_handle;
96
97 pa_alsa_fdlist *mixer_fdl;
98 snd_mixer_t *mixer_handle;
99 pa_alsa_path_set *mixer_path_set;
100 pa_alsa_path *mixer_path;
101
102 pa_cvolume hardware_volume;
103
104 size_t
105 frame_size,
106 fragment_size,
107 hwbuf_size,
108 tsched_watermark,
109 hwbuf_unused,
110 min_sleep,
111 min_wakeup,
112 watermark_inc_step,
113 watermark_dec_step,
114 watermark_inc_threshold,
115 watermark_dec_threshold;
116
117 pa_usec_t watermark_dec_not_before;
118
119 pa_memchunk memchunk;
120
121 char *device_name; /* name of the PCM device */
122 char *control_device; /* name of the control device */
123
124 pa_bool_t use_mmap:1, use_tsched:1;
125
126 pa_bool_t first, after_rewind;
127
128 pa_rtpoll_item *alsa_rtpoll_item;
129
130 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
131
132 pa_smoother *smoother;
133 uint64_t write_count;
134 uint64_t since_start;
135 pa_usec_t smoother_interval;
136 pa_usec_t last_smoother_update;
137
138 pa_reserve_wrapper *reserve;
139 pa_hook_slot *reserve_slot;
140 pa_reserve_monitor_wrapper *monitor;
141 pa_hook_slot *monitor_slot;
142 };
143
144 static void userdata_free(struct userdata *u);
145
146 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
147 pa_assert(r);
148 pa_assert(u);
149
150 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
151 return PA_HOOK_CANCEL;
152
153 return PA_HOOK_OK;
154 }
155
156 static void reserve_done(struct userdata *u) {
157 pa_assert(u);
158
159 if (u->reserve_slot) {
160 pa_hook_slot_free(u->reserve_slot);
161 u->reserve_slot = NULL;
162 }
163
164 if (u->reserve) {
165 pa_reserve_wrapper_unref(u->reserve);
166 u->reserve = NULL;
167 }
168 }
169
170 static void reserve_update(struct userdata *u) {
171 const char *description;
172 pa_assert(u);
173
174 if (!u->sink || !u->reserve)
175 return;
176
177 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
178 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
179 }
180
181 static int reserve_init(struct userdata *u, const char *dname) {
182 char *rname;
183
184 pa_assert(u);
185 pa_assert(dname);
186
187 if (u->reserve)
188 return 0;
189
190 if (pa_in_system_mode())
191 return 0;
192
193 if (!(rname = pa_alsa_get_reserve_name(dname)))
194 return 0;
195
196 /* We are resuming, try to lock the device */
197 u->reserve = pa_reserve_wrapper_get(u->core, rname);
198 pa_xfree(rname);
199
200 if (!(u->reserve))
201 return -1;
202
203 reserve_update(u);
204
205 pa_assert(!u->reserve_slot);
206 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
207
208 return 0;
209 }
210
211 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
212 pa_bool_t b;
213
214 pa_assert(w);
215 pa_assert(u);
216
217 b = PA_PTR_TO_UINT(busy) && !u->reserve;
218
219 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
220 return PA_HOOK_OK;
221 }
222
223 static void monitor_done(struct userdata *u) {
224 pa_assert(u);
225
226 if (u->monitor_slot) {
227 pa_hook_slot_free(u->monitor_slot);
228 u->monitor_slot = NULL;
229 }
230
231 if (u->monitor) {
232 pa_reserve_monitor_wrapper_unref(u->monitor);
233 u->monitor = NULL;
234 }
235 }
236
237 static int reserve_monitor_init(struct userdata *u, const char *dname) {
238 char *rname;
239
240 pa_assert(u);
241 pa_assert(dname);
242
243 if (pa_in_system_mode())
244 return 0;
245
246 if (!(rname = pa_alsa_get_reserve_name(dname)))
247 return 0;
248
249 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
250 pa_xfree(rname);
251
252 if (!(u->monitor))
253 return -1;
254
255 pa_assert(!u->monitor_slot);
256 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
257
258 return 0;
259 }
260
261 static void fix_min_sleep_wakeup(struct userdata *u) {
262 size_t max_use, max_use_2;
263
264 pa_assert(u);
265 pa_assert(u->use_tsched);
266
267 max_use = u->hwbuf_size - u->hwbuf_unused;
268 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
269
270 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
271 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
272
273 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
274 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
275 }
276
277 static void fix_tsched_watermark(struct userdata *u) {
278 size_t max_use;
279 pa_assert(u);
280 pa_assert(u->use_tsched);
281
282 max_use = u->hwbuf_size - u->hwbuf_unused;
283
284 if (u->tsched_watermark > max_use - u->min_sleep)
285 u->tsched_watermark = max_use - u->min_sleep;
286
287 if (u->tsched_watermark < u->min_wakeup)
288 u->tsched_watermark = u->min_wakeup;
289 }
290
291 static void increase_watermark(struct userdata *u) {
292 size_t old_watermark;
293 pa_usec_t old_min_latency, new_min_latency;
294
295 pa_assert(u);
296 pa_assert(u->use_tsched);
297
298 /* First, just try to increase the watermark */
299 old_watermark = u->tsched_watermark;
300 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
301 fix_tsched_watermark(u);
302
303 if (old_watermark != u->tsched_watermark) {
304 pa_log_info("Increasing wakeup watermark to %0.2f ms",
305 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
306 return;
307 }
308
309 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
310 old_min_latency = u->sink->thread_info.min_latency;
311 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
312 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
313
314 if (old_min_latency != new_min_latency) {
315 pa_log_info("Increasing minimal latency to %0.2f ms",
316 (double) new_min_latency / PA_USEC_PER_MSEC);
317
318 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
319 }
320
321 /* When we reach this we're officialy fucked! */
322 }
323
324 static void decrease_watermark(struct userdata *u) {
325 size_t old_watermark;
326 pa_usec_t now;
327
328 pa_assert(u);
329 pa_assert(u->use_tsched);
330
331 now = pa_rtclock_now();
332
333 if (u->watermark_dec_not_before <= 0)
334 goto restart;
335
336 if (u->watermark_dec_not_before > now)
337 return;
338
339 old_watermark = u->tsched_watermark;
340
341 if (u->tsched_watermark < u->watermark_dec_step)
342 u->tsched_watermark = u->tsched_watermark / 2;
343 else
344 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
345
346 fix_tsched_watermark(u);
347
348 if (old_watermark != u->tsched_watermark)
349 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
350 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
351
352 /* We don't change the latency range*/
353
354 restart:
355 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
356 }
357
358 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
359 pa_usec_t usec, wm;
360
361 pa_assert(sleep_usec);
362 pa_assert(process_usec);
363
364 pa_assert(u);
365 pa_assert(u->use_tsched);
366
367 usec = pa_sink_get_requested_latency_within_thread(u->sink);
368
369 if (usec == (pa_usec_t) -1)
370 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
371
372 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
373
374 if (wm > usec)
375 wm = usec/2;
376
377 *sleep_usec = usec - wm;
378 *process_usec = wm;
379
380 #ifdef DEBUG_TIMING
381 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
382 (unsigned long) (usec / PA_USEC_PER_MSEC),
383 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
384 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
385 #endif
386 }
387
388 static int try_recover(struct userdata *u, const char *call, int err) {
389 pa_assert(u);
390 pa_assert(call);
391 pa_assert(err < 0);
392
393 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
394
395 pa_assert(err != -EAGAIN);
396
397 if (err == -EPIPE)
398 pa_log_debug("%s: Buffer underrun!", call);
399
400 if (err == -ESTRPIPE)
401 pa_log_debug("%s: System suspended!", call);
402
403 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
404 pa_log("%s: %s", call, pa_alsa_strerror(err));
405 return -1;
406 }
407
408 u->first = TRUE;
409 u->since_start = 0;
410 return 0;
411 }
412
413 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
414 size_t left_to_play;
415 pa_bool_t underrun = FALSE;
416
417 /* We use <= instead of < for this check here because an underrun
418 * only happens after the last sample was processed, not already when
419 * it is removed from the buffer. This is particularly important
420 * when block transfer is used. */
421
422 if (n_bytes <= u->hwbuf_size)
423 left_to_play = u->hwbuf_size - n_bytes;
424 else {
425
426 /* We got a dropout. What a mess! */
427 left_to_play = 0;
428 underrun = TRUE;
429
430 #ifdef DEBUG_TIMING
431 PA_DEBUG_TRAP;
432 #endif
433
434 if (!u->first && !u->after_rewind)
435 if (pa_log_ratelimit())
436 pa_log_info("Underrun!");
437 }
438
439 #ifdef DEBUG_TIMING
440 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
441 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
442 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
443 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
444 #endif
445
446 if (u->use_tsched) {
447 pa_bool_t reset_not_before = TRUE;
448
449 if (!u->first && !u->after_rewind) {
450 if (underrun || left_to_play < u->watermark_inc_threshold)
451 increase_watermark(u);
452 else if (left_to_play > u->watermark_dec_threshold) {
453 reset_not_before = FALSE;
454
455 /* We decrease the watermark only if have actually
456 * been woken up by a timeout. If something else woke
457 * us up it's too easy to fulfill the deadlines... */
458
459 if (on_timeout)
460 decrease_watermark(u);
461 }
462 }
463
464 if (reset_not_before)
465 u->watermark_dec_not_before = 0;
466 }
467
468 return left_to_play;
469 }
470
471 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
472 pa_bool_t work_done = TRUE;
473 pa_usec_t max_sleep_usec = 0, process_usec = 0;
474 size_t left_to_play;
475 unsigned j = 0;
476
477 pa_assert(u);
478 pa_sink_assert_ref(u->sink);
479
480 if (u->use_tsched)
481 hw_sleep_time(u, &max_sleep_usec, &process_usec);
482
483 for (;;) {
484 snd_pcm_sframes_t n;
485 size_t n_bytes;
486 int r;
487 pa_bool_t after_avail = TRUE;
488
489 /* First we determine how many samples are missing to fill the
490 * buffer up to 100% */
491
492 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
493
494 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
495 continue;
496
497 return r;
498 }
499
500 n_bytes = (size_t) n * u->frame_size;
501
502 #ifdef DEBUG_TIMING
503 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
504 #endif
505
506 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
507 on_timeout = FALSE;
508
509 if (u->use_tsched)
510
511 /* We won't fill up the playback buffer before at least
512 * half the sleep time is over because otherwise we might
513 * ask for more data from the clients then they expect. We
514 * need to guarantee that clients only have to keep around
515 * a single hw buffer length. */
516
517 if (!polled &&
518 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
519 #ifdef DEBUG_TIMING
520 pa_log_debug("Not filling up, because too early.");
521 #endif
522 break;
523 }
524
525 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
526
527 if (polled)
528 PA_ONCE_BEGIN {
529 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
530 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
531 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
532 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
533 pa_strnull(dn));
534 pa_xfree(dn);
535 } PA_ONCE_END;
536
537 #ifdef DEBUG_TIMING
538 pa_log_debug("Not filling up, because not necessary.");
539 #endif
540 break;
541 }
542
543
544 if (++j > 10) {
545 #ifdef DEBUG_TIMING
546 pa_log_debug("Not filling up, because already too many iterations.");
547 #endif
548
549 break;
550 }
551
552 n_bytes -= u->hwbuf_unused;
553 polled = FALSE;
554
555 #ifdef DEBUG_TIMING
556 pa_log_debug("Filling up");
557 #endif
558
559 for (;;) {
560 pa_memchunk chunk;
561 void *p;
562 int err;
563 const snd_pcm_channel_area_t *areas;
564 snd_pcm_uframes_t offset, frames;
565 snd_pcm_sframes_t sframes;
566
567 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
568 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
569
570 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
571
572 if (!after_avail && err == -EAGAIN)
573 break;
574
575 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
576 continue;
577
578 return r;
579 }
580
581 /* Make sure that if these memblocks need to be copied they will fit into one slot */
582 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
583 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
584
585 if (!after_avail && frames == 0)
586 break;
587
588 pa_assert(frames > 0);
589 after_avail = FALSE;
590
591 /* Check these are multiples of 8 bit */
592 pa_assert((areas[0].first & 7) == 0);
593 pa_assert((areas[0].step & 7)== 0);
594
595 /* We assume a single interleaved memory buffer */
596 pa_assert((areas[0].first >> 3) == 0);
597 pa_assert((areas[0].step >> 3) == u->frame_size);
598
599 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
600
601 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
602 chunk.length = pa_memblock_get_length(chunk.memblock);
603 chunk.index = 0;
604
605 pa_sink_render_into_full(u->sink, &chunk);
606 pa_memblock_unref_fixed(chunk.memblock);
607
608 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
609
610 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
611 continue;
612
613 return r;
614 }
615
616 work_done = TRUE;
617
618 u->write_count += frames * u->frame_size;
619 u->since_start += frames * u->frame_size;
620
621 #ifdef DEBUG_TIMING
622 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
623 #endif
624
625 if ((size_t) frames * u->frame_size >= n_bytes)
626 break;
627
628 n_bytes -= (size_t) frames * u->frame_size;
629 }
630 }
631
632 if (u->use_tsched) {
633 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
634
635 if (*sleep_usec > process_usec)
636 *sleep_usec -= process_usec;
637 else
638 *sleep_usec = 0;
639 } else
640 *sleep_usec = 0;
641
642 return work_done ? 1 : 0;
643 }
644
645 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
646 pa_bool_t work_done = FALSE;
647 pa_usec_t max_sleep_usec = 0, process_usec = 0;
648 size_t left_to_play;
649 unsigned j = 0;
650
651 pa_assert(u);
652 pa_sink_assert_ref(u->sink);
653
654 if (u->use_tsched)
655 hw_sleep_time(u, &max_sleep_usec, &process_usec);
656
657 for (;;) {
658 snd_pcm_sframes_t n;
659 size_t n_bytes;
660 int r;
661 pa_bool_t after_avail = TRUE;
662
663 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
664
665 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
666 continue;
667
668 return r;
669 }
670
671 n_bytes = (size_t) n * u->frame_size;
672 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
673 on_timeout = FALSE;
674
675 if (u->use_tsched)
676
677 /* We won't fill up the playback buffer before at least
678 * half the sleep time is over because otherwise we might
679 * ask for more data from the clients then they expect. We
680 * need to guarantee that clients only have to keep around
681 * a single hw buffer length. */
682
683 if (!polled &&
684 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
685 break;
686
687 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
688
689 if (polled)
690 PA_ONCE_BEGIN {
691 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
692 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
693 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
694 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
695 pa_strnull(dn));
696 pa_xfree(dn);
697 } PA_ONCE_END;
698
699 break;
700 }
701
702 if (++j > 10) {
703 #ifdef DEBUG_TIMING
704 pa_log_debug("Not filling up, because already too many iterations.");
705 #endif
706
707 break;
708 }
709
710 n_bytes -= u->hwbuf_unused;
711 polled = FALSE;
712
713 for (;;) {
714 snd_pcm_sframes_t frames;
715 void *p;
716
717 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
718
719 if (u->memchunk.length <= 0)
720 pa_sink_render(u->sink, n_bytes, &u->memchunk);
721
722 pa_assert(u->memchunk.length > 0);
723
724 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
725
726 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
727 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
728
729 p = pa_memblock_acquire(u->memchunk.memblock);
730 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
731 pa_memblock_release(u->memchunk.memblock);
732
733 if (PA_UNLIKELY(frames < 0)) {
734
735 if (!after_avail && (int) frames == -EAGAIN)
736 break;
737
738 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
739 continue;
740
741 return r;
742 }
743
744 if (!after_avail && frames == 0)
745 break;
746
747 pa_assert(frames > 0);
748 after_avail = FALSE;
749
750 u->memchunk.index += (size_t) frames * u->frame_size;
751 u->memchunk.length -= (size_t) frames * u->frame_size;
752
753 if (u->memchunk.length <= 0) {
754 pa_memblock_unref(u->memchunk.memblock);
755 pa_memchunk_reset(&u->memchunk);
756 }
757
758 work_done = TRUE;
759
760 u->write_count += frames * u->frame_size;
761 u->since_start += frames * u->frame_size;
762
763 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
764
765 if ((size_t) frames * u->frame_size >= n_bytes)
766 break;
767
768 n_bytes -= (size_t) frames * u->frame_size;
769 }
770 }
771
772 if (u->use_tsched) {
773 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
774
775 if (*sleep_usec > process_usec)
776 *sleep_usec -= process_usec;
777 else
778 *sleep_usec = 0;
779 } else
780 *sleep_usec = 0;
781
782 return work_done ? 1 : 0;
783 }
784
785 static void update_smoother(struct userdata *u) {
786 snd_pcm_sframes_t delay = 0;
787 int64_t position;
788 int err;
789 pa_usec_t now1 = 0, now2;
790 snd_pcm_status_t *status;
791
792 snd_pcm_status_alloca(&status);
793
794 pa_assert(u);
795 pa_assert(u->pcm_handle);
796
797 /* Let's update the time smoother */
798
799 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
800 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
801 return;
802 }
803
804 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
805 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
806 else {
807 snd_htimestamp_t htstamp = { 0, 0 };
808 snd_pcm_status_get_htstamp(status, &htstamp);
809 now1 = pa_timespec_load(&htstamp);
810 }
811
812 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
813 if (now1 <= 0)
814 now1 = pa_rtclock_now();
815
816 /* check if the time since the last update is bigger than the interval */
817 if (u->last_smoother_update > 0)
818 if (u->last_smoother_update + u->smoother_interval > now1)
819 return;
820
821 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
822
823 if (PA_UNLIKELY(position < 0))
824 position = 0;
825
826 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
827
828 pa_smoother_put(u->smoother, now1, now2);
829
830 u->last_smoother_update = now1;
831 /* exponentially increase the update interval up to the MAX limit */
832 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
833 }
834
835 static pa_usec_t sink_get_latency(struct userdata *u) {
836 pa_usec_t r;
837 int64_t delay;
838 pa_usec_t now1, now2;
839
840 pa_assert(u);
841
842 now1 = pa_rtclock_now();
843 now2 = pa_smoother_get(u->smoother, now1);
844
845 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
846
847 r = delay >= 0 ? (pa_usec_t) delay : 0;
848
849 if (u->memchunk.memblock)
850 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
851
852 return r;
853 }
854
855 static int build_pollfd(struct userdata *u) {
856 pa_assert(u);
857 pa_assert(u->pcm_handle);
858
859 if (u->alsa_rtpoll_item)
860 pa_rtpoll_item_free(u->alsa_rtpoll_item);
861
862 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
863 return -1;
864
865 return 0;
866 }
867
868 /* Called from IO context */
869 static int suspend(struct userdata *u) {
870 pa_assert(u);
871 pa_assert(u->pcm_handle);
872
873 pa_smoother_pause(u->smoother, pa_rtclock_now());
874
875 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
876 * take awfully long with our long buffer sizes today. */
877 snd_pcm_close(u->pcm_handle);
878 u->pcm_handle = NULL;
879
880 if (u->alsa_rtpoll_item) {
881 pa_rtpoll_item_free(u->alsa_rtpoll_item);
882 u->alsa_rtpoll_item = NULL;
883 }
884
885 /* We reset max_rewind/max_request here to make sure that while we
886 * are suspended the old max_request/max_rewind values set before
887 * the suspend can influence the per-stream buffer of newly
888 * created streams, without their requirements having any
889 * influence on them. */
890 pa_sink_set_max_rewind_within_thread(u->sink, 0);
891 pa_sink_set_max_request_within_thread(u->sink, 0);
892
893 pa_log_info("Device suspended...");
894
895 return 0;
896 }
897
898 /* Called from IO context */
899 static int update_sw_params(struct userdata *u) {
900 snd_pcm_uframes_t avail_min;
901 int err;
902
903 pa_assert(u);
904
905 /* Use the full buffer if noone asked us for anything specific */
906 u->hwbuf_unused = 0;
907
908 if (u->use_tsched) {
909 pa_usec_t latency;
910
911 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
912 size_t b;
913
914 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
915
916 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
917
918 /* We need at least one sample in our buffer */
919
920 if (PA_UNLIKELY(b < u->frame_size))
921 b = u->frame_size;
922
923 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
924 }
925
926 fix_min_sleep_wakeup(u);
927 fix_tsched_watermark(u);
928 }
929
930 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
931
932 /* We need at last one frame in the used part of the buffer */
933 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
934
935 if (u->use_tsched) {
936 pa_usec_t sleep_usec, process_usec;
937
938 hw_sleep_time(u, &sleep_usec, &process_usec);
939 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
940 }
941
942 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
943
944 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
945 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
946 return err;
947 }
948
949 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
950 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
951
952 return 0;
953 }
954
955 /* Called from IO context */
956 static int unsuspend(struct userdata *u) {
957 pa_sample_spec ss;
958 int err;
959 pa_bool_t b, d;
960 snd_pcm_uframes_t period_size, buffer_size;
961
962 pa_assert(u);
963 pa_assert(!u->pcm_handle);
964
965 pa_log_info("Trying resume...");
966
967 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
968 SND_PCM_NONBLOCK|
969 SND_PCM_NO_AUTO_RESAMPLE|
970 SND_PCM_NO_AUTO_CHANNELS|
971 SND_PCM_NO_AUTO_FORMAT)) < 0) {
972 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
973 goto fail;
974 }
975
976 ss = u->sink->sample_spec;
977 period_size = u->fragment_size / u->frame_size;
978 buffer_size = u->hwbuf_size / u->frame_size;
979 b = u->use_mmap;
980 d = u->use_tsched;
981
982 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
983 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
984 goto fail;
985 }
986
987 if (b != u->use_mmap || d != u->use_tsched) {
988 pa_log_warn("Resume failed, couldn't get original access mode.");
989 goto fail;
990 }
991
992 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
993 pa_log_warn("Resume failed, couldn't restore original sample settings.");
994 goto fail;
995 }
996
997 if (period_size*u->frame_size != u->fragment_size ||
998 buffer_size*u->frame_size != u->hwbuf_size) {
999 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1000 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1001 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1002 goto fail;
1003 }
1004
1005 if (update_sw_params(u) < 0)
1006 goto fail;
1007
1008 if (build_pollfd(u) < 0)
1009 goto fail;
1010
1011 u->write_count = 0;
1012 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1013 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1014 u->last_smoother_update = 0;
1015
1016 u->first = TRUE;
1017 u->since_start = 0;
1018
1019 pa_log_info("Resumed successfully...");
1020
1021 return 0;
1022
1023 fail:
1024 if (u->pcm_handle) {
1025 snd_pcm_close(u->pcm_handle);
1026 u->pcm_handle = NULL;
1027 }
1028
1029 return -PA_ERR_IO;
1030 }
1031
1032 /* Called from IO context */
1033 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1034 struct userdata *u = PA_SINK(o)->userdata;
1035
1036 switch (code) {
1037
1038 case PA_SINK_MESSAGE_GET_LATENCY: {
1039 pa_usec_t r = 0;
1040
1041 if (u->pcm_handle)
1042 r = sink_get_latency(u);
1043
1044 *((pa_usec_t*) data) = r;
1045
1046 return 0;
1047 }
1048
1049 case PA_SINK_MESSAGE_SET_STATE:
1050
1051 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1052
1053 case PA_SINK_SUSPENDED: {
1054 int r;
1055
1056 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1057
1058 if ((r = suspend(u)) < 0)
1059 return r;
1060
1061 break;
1062 }
1063
1064 case PA_SINK_IDLE:
1065 case PA_SINK_RUNNING: {
1066 int r;
1067
1068 if (u->sink->thread_info.state == PA_SINK_INIT) {
1069 if (build_pollfd(u) < 0)
1070 return -PA_ERR_IO;
1071 }
1072
1073 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1074 if ((r = unsuspend(u)) < 0)
1075 return r;
1076 }
1077
1078 break;
1079 }
1080
1081 case PA_SINK_UNLINKED:
1082 case PA_SINK_INIT:
1083 case PA_SINK_INVALID_STATE:
1084 ;
1085 }
1086
1087 break;
1088 }
1089
1090 return pa_sink_process_msg(o, code, data, offset, chunk);
1091 }
1092
1093 /* Called from main context */
1094 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1095 pa_sink_state_t old_state;
1096 struct userdata *u;
1097
1098 pa_sink_assert_ref(s);
1099 pa_assert_se(u = s->userdata);
1100
1101 old_state = pa_sink_get_state(u->sink);
1102
1103 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1104 reserve_done(u);
1105 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1106 if (reserve_init(u, u->device_name) < 0)
1107 return -PA_ERR_BUSY;
1108
1109 return 0;
1110 }
1111
1112 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1113 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1114
1115 pa_assert(u);
1116 pa_assert(u->mixer_handle);
1117
1118 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1119 return 0;
1120
1121 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1122 return 0;
1123
1124 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1125 pa_sink_get_volume(u->sink, TRUE);
1126 pa_sink_get_mute(u->sink, TRUE);
1127 }
1128
1129 return 0;
1130 }
1131
1132 static void sink_get_volume_cb(pa_sink *s) {
1133 struct userdata *u = s->userdata;
1134 pa_cvolume r;
1135 char t[PA_CVOLUME_SNPRINT_MAX];
1136
1137 pa_assert(u);
1138 pa_assert(u->mixer_path);
1139 pa_assert(u->mixer_handle);
1140
1141 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1142 return;
1143
1144 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1145 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1146
1147 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1148
1149 if (pa_cvolume_equal(&u->hardware_volume, &r))
1150 return;
1151
1152 s->real_volume = u->hardware_volume = r;
1153
1154 /* Hmm, so the hardware volume changed, let's reset our software volume */
1155 if (u->mixer_path->has_dB)
1156 pa_sink_set_soft_volume(s, NULL);
1157 }
1158
1159 static void sink_set_volume_cb(pa_sink *s) {
1160 struct userdata *u = s->userdata;
1161 pa_cvolume r;
1162 char t[PA_CVOLUME_SNPRINT_MAX];
1163
1164 pa_assert(u);
1165 pa_assert(u->mixer_path);
1166 pa_assert(u->mixer_handle);
1167
1168 /* Shift up by the base volume */
1169 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1170
1171 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1172 return;
1173
1174 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1175 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1176
1177 u->hardware_volume = r;
1178
1179 if (u->mixer_path->has_dB) {
1180 pa_cvolume new_soft_volume;
1181 pa_bool_t accurate_enough;
1182
1183 /* Match exactly what the user requested by software */
1184 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1185
1186 /* If the adjustment to do in software is only minimal we
1187 * can skip it. That saves us CPU at the expense of a bit of
1188 * accuracy */
1189 accurate_enough =
1190 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1191 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1192
1193 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1194 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1195 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1196 pa_yes_no(accurate_enough));
1197
1198 if (!accurate_enough)
1199 s->soft_volume = new_soft_volume;
1200
1201 } else {
1202 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1203
1204 /* We can't match exactly what the user requested, hence let's
1205 * at least tell the user about it */
1206
1207 s->real_volume = r;
1208 }
1209 }
1210
1211 static void sink_get_mute_cb(pa_sink *s) {
1212 struct userdata *u = s->userdata;
1213 pa_bool_t b;
1214
1215 pa_assert(u);
1216 pa_assert(u->mixer_path);
1217 pa_assert(u->mixer_handle);
1218
1219 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1220 return;
1221
1222 s->muted = b;
1223 }
1224
1225 static void sink_set_mute_cb(pa_sink *s) {
1226 struct userdata *u = s->userdata;
1227
1228 pa_assert(u);
1229 pa_assert(u->mixer_path);
1230 pa_assert(u->mixer_handle);
1231
1232 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1233 }
1234
1235 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1236 struct userdata *u = s->userdata;
1237 pa_alsa_port_data *data;
1238
1239 pa_assert(u);
1240 pa_assert(p);
1241 pa_assert(u->mixer_handle);
1242
1243 data = PA_DEVICE_PORT_DATA(p);
1244
1245 pa_assert_se(u->mixer_path = data->path);
1246 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1247
1248 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1249 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1250 s->n_volume_steps = PA_VOLUME_NORM+1;
1251
1252 if (u->mixer_path->max_dB > 0.0)
1253 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1254 else
1255 pa_log_info("No particular base volume set, fixing to 0 dB");
1256 } else {
1257 s->base_volume = PA_VOLUME_NORM;
1258 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1259 }
1260
1261 if (data->setting)
1262 pa_alsa_setting_select(data->setting, u->mixer_handle);
1263
1264 if (s->set_mute)
1265 s->set_mute(s);
1266 if (s->set_volume)
1267 s->set_volume(s);
1268
1269 return 0;
1270 }
1271
1272 static void sink_update_requested_latency_cb(pa_sink *s) {
1273 struct userdata *u = s->userdata;
1274 size_t before;
1275 pa_assert(u);
1276 pa_assert(u->use_tsched); /* only when timer scheduling is used
1277 * we can dynamically adjust the
1278 * latency */
1279
1280 if (!u->pcm_handle)
1281 return;
1282
1283 before = u->hwbuf_unused;
1284 update_sw_params(u);
1285
1286 /* Let's check whether we now use only a smaller part of the
1287 buffer then before. If so, we need to make sure that subsequent
1288 rewinds are relative to the new maximum fill level and not to the
1289 current fill level. Thus, let's do a full rewind once, to clear
1290 things up. */
1291
1292 if (u->hwbuf_unused > before) {
1293 pa_log_debug("Requesting rewind due to latency change.");
1294 pa_sink_request_rewind(s, (size_t) -1);
1295 }
1296 }
1297
1298 static int process_rewind(struct userdata *u) {
1299 snd_pcm_sframes_t unused;
1300 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1301 pa_assert(u);
1302
1303 /* Figure out how much we shall rewind and reset the counter */
1304 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1305
1306 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1307
1308 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1309 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1310 return -1;
1311 }
1312
1313 unused_nbytes = (size_t) unused * u->frame_size;
1314
1315 if (u->use_tsched)
1316 unused_nbytes += u->tsched_watermark;
1317
1318 if (u->hwbuf_size > unused_nbytes)
1319 limit_nbytes = u->hwbuf_size - unused_nbytes;
1320 else
1321 limit_nbytes = 0;
1322
1323 if (rewind_nbytes > limit_nbytes)
1324 rewind_nbytes = limit_nbytes;
1325
1326 if (rewind_nbytes > 0) {
1327 snd_pcm_sframes_t in_frames, out_frames;
1328
1329 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1330
1331 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1332 pa_log_debug("before: %lu", (unsigned long) in_frames);
1333 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1334 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1335 if (try_recover(u, "process_rewind", out_frames) < 0)
1336 return -1;
1337 out_frames = 0;
1338 }
1339
1340 pa_log_debug("after: %lu", (unsigned long) out_frames);
1341
1342 rewind_nbytes = (size_t) out_frames * u->frame_size;
1343
1344 if (rewind_nbytes <= 0)
1345 pa_log_info("Tried rewind, but was apparently not possible.");
1346 else {
1347 u->write_count -= rewind_nbytes;
1348 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1349 pa_sink_process_rewind(u->sink, rewind_nbytes);
1350
1351 u->after_rewind = TRUE;
1352 return 0;
1353 }
1354 } else
1355 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1356
1357 pa_sink_process_rewind(u->sink, 0);
1358 return 0;
1359 }
1360
1361 static void thread_func(void *userdata) {
1362 struct userdata *u = userdata;
1363 unsigned short revents = 0;
1364
1365 pa_assert(u);
1366
1367 pa_log_debug("Thread starting up");
1368
1369 if (u->core->realtime_scheduling)
1370 pa_make_realtime(u->core->realtime_priority);
1371
1372 pa_thread_mq_install(&u->thread_mq);
1373
1374 for (;;) {
1375 int ret;
1376
1377 #ifdef DEBUG_TIMING
1378 pa_log_debug("Loop");
1379 #endif
1380
1381 /* Render some data and write it to the dsp */
1382 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1383 int work_done;
1384 pa_usec_t sleep_usec = 0;
1385 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1386
1387 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1388 if (process_rewind(u) < 0)
1389 goto fail;
1390
1391 if (u->use_mmap)
1392 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1393 else
1394 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1395
1396 if (work_done < 0)
1397 goto fail;
1398
1399 /* pa_log_debug("work_done = %i", work_done); */
1400
1401 if (work_done) {
1402
1403 if (u->first) {
1404 pa_log_info("Starting playback.");
1405 snd_pcm_start(u->pcm_handle);
1406
1407 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1408 }
1409
1410 update_smoother(u);
1411 }
1412
1413 if (u->use_tsched) {
1414 pa_usec_t cusec;
1415
1416 if (u->since_start <= u->hwbuf_size) {
1417
1418 /* USB devices on ALSA seem to hit a buffer
1419 * underrun during the first iterations much
1420 * quicker then we calculate here, probably due to
1421 * the transport latency. To accommodate for that
1422 * we artificially decrease the sleep time until
1423 * we have filled the buffer at least once
1424 * completely.*/
1425
1426 if (pa_log_ratelimit())
1427 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1428 sleep_usec /= 2;
1429 }
1430
1431 /* OK, the playback buffer is now full, let's
1432 * calculate when to wake up next */
1433 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1434
1435 /* Convert from the sound card time domain to the
1436 * system time domain */
1437 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1438
1439 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1440
1441 /* We don't trust the conversion, so we wake up whatever comes first */
1442 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1443 }
1444
1445 u->first = FALSE;
1446 u->after_rewind = FALSE;
1447
1448 } else if (u->use_tsched)
1449
1450 /* OK, we're in an invalid state, let's disable our timers */
1451 pa_rtpoll_set_timer_disabled(u->rtpoll);
1452
1453 /* Hmm, nothing to do. Let's sleep */
1454 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1455 goto fail;
1456
1457 if (ret == 0)
1458 goto finish;
1459
1460 /* Tell ALSA about this and process its response */
1461 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1462 struct pollfd *pollfd;
1463 int err;
1464 unsigned n;
1465
1466 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1467
1468 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1469 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1470 goto fail;
1471 }
1472
1473 if (revents & ~POLLOUT) {
1474 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1475 goto fail;
1476
1477 u->first = TRUE;
1478 u->since_start = 0;
1479 } else if (revents && u->use_tsched && pa_log_ratelimit())
1480 pa_log_debug("Wakeup from ALSA!");
1481
1482 } else
1483 revents = 0;
1484 }
1485
1486 fail:
1487 /* If this was no regular exit from the loop we have to continue
1488 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1489 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1490 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1491
1492 finish:
1493 pa_log_debug("Thread shutting down");
1494 }
1495
1496 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1497 const char *n;
1498 char *t;
1499
1500 pa_assert(data);
1501 pa_assert(ma);
1502 pa_assert(device_name);
1503
1504 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1505 pa_sink_new_data_set_name(data, n);
1506 data->namereg_fail = TRUE;
1507 return;
1508 }
1509
1510 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1511 data->namereg_fail = TRUE;
1512 else {
1513 n = device_id ? device_id : device_name;
1514 data->namereg_fail = FALSE;
1515 }
1516
1517 if (mapping)
1518 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1519 else
1520 t = pa_sprintf_malloc("alsa_output.%s", n);
1521
1522 pa_sink_new_data_set_name(data, t);
1523 pa_xfree(t);
1524 }
1525
1526 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1527
1528 if (!mapping && !element)
1529 return;
1530
1531 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1532 pa_log_info("Failed to find a working mixer device.");
1533 return;
1534 }
1535
1536 if (element) {
1537
1538 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1539 goto fail;
1540
1541 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1542 goto fail;
1543
1544 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1545 pa_alsa_path_dump(u->mixer_path);
1546 } else {
1547
1548 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1549 goto fail;
1550
1551 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1552
1553 pa_log_debug("Probed mixer paths:");
1554 pa_alsa_path_set_dump(u->mixer_path_set);
1555 }
1556
1557 return;
1558
1559 fail:
1560
1561 if (u->mixer_path_set) {
1562 pa_alsa_path_set_free(u->mixer_path_set);
1563 u->mixer_path_set = NULL;
1564 } else if (u->mixer_path) {
1565 pa_alsa_path_free(u->mixer_path);
1566 u->mixer_path = NULL;
1567 }
1568
1569 if (u->mixer_handle) {
1570 snd_mixer_close(u->mixer_handle);
1571 u->mixer_handle = NULL;
1572 }
1573 }
1574
1575 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1576 pa_assert(u);
1577
1578 if (!u->mixer_handle)
1579 return 0;
1580
1581 if (u->sink->active_port) {
1582 pa_alsa_port_data *data;
1583
1584 /* We have a list of supported paths, so let's activate the
1585 * one that has been chosen as active */
1586
1587 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1588 u->mixer_path = data->path;
1589
1590 pa_alsa_path_select(data->path, u->mixer_handle);
1591
1592 if (data->setting)
1593 pa_alsa_setting_select(data->setting, u->mixer_handle);
1594
1595 } else {
1596
1597 if (!u->mixer_path && u->mixer_path_set)
1598 u->mixer_path = u->mixer_path_set->paths;
1599
1600 if (u->mixer_path) {
1601 /* Hmm, we have only a single path, then let's activate it */
1602
1603 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1604
1605 if (u->mixer_path->settings)
1606 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1607 } else
1608 return 0;
1609 }
1610
1611 if (!u->mixer_path->has_volume)
1612 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1613 else {
1614
1615 if (u->mixer_path->has_dB) {
1616 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1617
1618 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1619 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1620
1621 if (u->mixer_path->max_dB > 0.0)
1622 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1623 else
1624 pa_log_info("No particular base volume set, fixing to 0 dB");
1625
1626 } else {
1627 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1628 u->sink->base_volume = PA_VOLUME_NORM;
1629 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1630 }
1631
1632 u->sink->get_volume = sink_get_volume_cb;
1633 u->sink->set_volume = sink_set_volume_cb;
1634
1635 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1636 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1637 }
1638
1639 if (!u->mixer_path->has_mute) {
1640 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1641 } else {
1642 u->sink->get_mute = sink_get_mute_cb;
1643 u->sink->set_mute = sink_set_mute_cb;
1644 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1645 pa_log_info("Using hardware mute control.");
1646 }
1647
1648 u->mixer_fdl = pa_alsa_fdlist_new();
1649
1650 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1651 pa_log("Failed to initialize file descriptor monitoring");
1652 return -1;
1653 }
1654
1655 if (u->mixer_path_set)
1656 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1657 else
1658 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1659
1660 return 0;
1661 }
1662
1663 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1664
1665 struct userdata *u = NULL;
1666 const char *dev_id = NULL;
1667 pa_sample_spec ss, requested_ss;
1668 pa_channel_map map;
1669 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1670 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1671 size_t frame_size;
1672 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1673 pa_sink_new_data data;
1674 pa_alsa_profile_set *profile_set = NULL;
1675
1676 pa_assert(m);
1677 pa_assert(ma);
1678
1679 ss = m->core->default_sample_spec;
1680 map = m->core->default_channel_map;
1681 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1682 pa_log("Failed to parse sample specification and channel map");
1683 goto fail;
1684 }
1685
1686 requested_ss = ss;
1687 frame_size = pa_frame_size(&ss);
1688
1689 nfrags = m->core->default_n_fragments;
1690 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1691 if (frag_size <= 0)
1692 frag_size = (uint32_t) frame_size;
1693 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1694 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1695
1696 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1697 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1698 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1699 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1700 pa_log("Failed to parse buffer metrics");
1701 goto fail;
1702 }
1703
1704 buffer_size = nfrags * frag_size;
1705
1706 period_frames = frag_size/frame_size;
1707 buffer_frames = buffer_size/frame_size;
1708 tsched_frames = tsched_size/frame_size;
1709
1710 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1711 pa_log("Failed to parse mmap argument.");
1712 goto fail;
1713 }
1714
1715 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1716 pa_log("Failed to parse tsched argument.");
1717 goto fail;
1718 }
1719
1720 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1721 pa_log("Failed to parse ignore_dB argument.");
1722 goto fail;
1723 }
1724
1725 use_tsched = pa_alsa_may_tsched(use_tsched);
1726
1727 u = pa_xnew0(struct userdata, 1);
1728 u->core = m->core;
1729 u->module = m;
1730 u->use_mmap = use_mmap;
1731 u->use_tsched = use_tsched;
1732 u->first = TRUE;
1733 u->rtpoll = pa_rtpoll_new();
1734 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1735
1736 u->smoother = pa_smoother_new(
1737 DEFAULT_TSCHED_BUFFER_USEC*2,
1738 DEFAULT_TSCHED_BUFFER_USEC*2,
1739 TRUE,
1740 TRUE,
1741 5,
1742 pa_rtclock_now(),
1743 TRUE);
1744 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1745
1746 dev_id = pa_modargs_get_value(
1747 ma, "device_id",
1748 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1749
1750 if (reserve_init(u, dev_id) < 0)
1751 goto fail;
1752
1753 if (reserve_monitor_init(u, dev_id) < 0)
1754 goto fail;
1755
1756 b = use_mmap;
1757 d = use_tsched;
1758
1759 if (mapping) {
1760
1761 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1762 pa_log("device_id= not set");
1763 goto fail;
1764 }
1765
1766 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1767 dev_id,
1768 &u->device_name,
1769 &ss, &map,
1770 SND_PCM_STREAM_PLAYBACK,
1771 &period_frames, &buffer_frames, tsched_frames,
1772 &b, &d, mapping)))
1773
1774 goto fail;
1775
1776 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1777
1778 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1779 goto fail;
1780
1781 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1782 dev_id,
1783 &u->device_name,
1784 &ss, &map,
1785 SND_PCM_STREAM_PLAYBACK,
1786 &period_frames, &buffer_frames, tsched_frames,
1787 &b, &d, profile_set, &mapping)))
1788
1789 goto fail;
1790
1791 } else {
1792
1793 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1794 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1795 &u->device_name,
1796 &ss, &map,
1797 SND_PCM_STREAM_PLAYBACK,
1798 &period_frames, &buffer_frames, tsched_frames,
1799 &b, &d, FALSE)))
1800 goto fail;
1801 }
1802
1803 pa_assert(u->device_name);
1804 pa_log_info("Successfully opened device %s.", u->device_name);
1805
1806 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1807 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1808 goto fail;
1809 }
1810
1811 if (mapping)
1812 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1813
1814 if (use_mmap && !b) {
1815 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1816 u->use_mmap = use_mmap = FALSE;
1817 }
1818
1819 if (use_tsched && (!b || !d)) {
1820 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1821 u->use_tsched = use_tsched = FALSE;
1822 }
1823
1824 if (u->use_mmap)
1825 pa_log_info("Successfully enabled mmap() mode.");
1826
1827 if (u->use_tsched)
1828 pa_log_info("Successfully enabled timer-based scheduling mode.");
1829
1830 /* ALSA might tweak the sample spec, so recalculate the frame size */
1831 frame_size = pa_frame_size(&ss);
1832
1833 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1834
1835 pa_sink_new_data_init(&data);
1836 data.driver = driver;
1837 data.module = m;
1838 data.card = card;
1839 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1840 pa_sink_new_data_set_sample_spec(&data, &ss);
1841 pa_sink_new_data_set_channel_map(&data, &map);
1842
1843 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1844 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1845 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1846 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1847 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1848
1849 if (mapping) {
1850 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1851 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1852 }
1853
1854 pa_alsa_init_description(data.proplist);
1855
1856 if (u->control_device)
1857 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1858
1859 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1860 pa_log("Invalid properties");
1861 pa_sink_new_data_done(&data);
1862 goto fail;
1863 }
1864
1865 if (u->mixer_path_set)
1866 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1867
1868 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1869 pa_sink_new_data_done(&data);
1870
1871 if (!u->sink) {
1872 pa_log("Failed to create sink object");
1873 goto fail;
1874 }
1875
1876 u->sink->parent.process_msg = sink_process_msg;
1877 if (u->use_tsched)
1878 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1879 u->sink->set_state = sink_set_state_cb;
1880 u->sink->set_port = sink_set_port_cb;
1881 u->sink->userdata = u;
1882
1883 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1884 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1885
1886 u->frame_size = frame_size;
1887 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1888 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1889 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1890
1891 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1892 (double) u->hwbuf_size / (double) u->fragment_size,
1893 (long unsigned) u->fragment_size,
1894 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1895 (long unsigned) u->hwbuf_size,
1896 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1897
1898 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1899 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1900
1901 if (u->use_tsched) {
1902 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1903
1904 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1905 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1906
1907 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1908 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1909
1910 fix_min_sleep_wakeup(u);
1911 fix_tsched_watermark(u);
1912
1913 pa_sink_set_latency_range(u->sink,
1914 0,
1915 pa_bytes_to_usec(u->hwbuf_size, &ss));
1916
1917 pa_log_info("Time scheduling watermark is %0.2fms",
1918 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1919 } else
1920 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1921
1922 reserve_update(u);
1923
1924 if (update_sw_params(u) < 0)
1925 goto fail;
1926
1927 if (setup_mixer(u, ignore_dB) < 0)
1928 goto fail;
1929
1930 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1931
1932 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
1933 pa_log("Failed to create thread.");
1934 goto fail;
1935 }
1936
1937 /* Get initial mixer settings */
1938 if (data.volume_is_set) {
1939 if (u->sink->set_volume)
1940 u->sink->set_volume(u->sink);
1941 } else {
1942 if (u->sink->get_volume)
1943 u->sink->get_volume(u->sink);
1944 }
1945
1946 if (data.muted_is_set) {
1947 if (u->sink->set_mute)
1948 u->sink->set_mute(u->sink);
1949 } else {
1950 if (u->sink->get_mute)
1951 u->sink->get_mute(u->sink);
1952 }
1953
1954 pa_sink_put(u->sink);
1955
1956 if (profile_set)
1957 pa_alsa_profile_set_free(profile_set);
1958
1959 return u->sink;
1960
1961 fail:
1962
1963 if (u)
1964 userdata_free(u);
1965
1966 if (profile_set)
1967 pa_alsa_profile_set_free(profile_set);
1968
1969 return NULL;
1970 }
1971
1972 static void userdata_free(struct userdata *u) {
1973 pa_assert(u);
1974
1975 if (u->sink)
1976 pa_sink_unlink(u->sink);
1977
1978 if (u->thread) {
1979 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1980 pa_thread_free(u->thread);
1981 }
1982
1983 pa_thread_mq_done(&u->thread_mq);
1984
1985 if (u->sink)
1986 pa_sink_unref(u->sink);
1987
1988 if (u->memchunk.memblock)
1989 pa_memblock_unref(u->memchunk.memblock);
1990
1991 if (u->alsa_rtpoll_item)
1992 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1993
1994 if (u->rtpoll)
1995 pa_rtpoll_free(u->rtpoll);
1996
1997 if (u->pcm_handle) {
1998 snd_pcm_drop(u->pcm_handle);
1999 snd_pcm_close(u->pcm_handle);
2000 }
2001
2002 if (u->mixer_fdl)
2003 pa_alsa_fdlist_free(u->mixer_fdl);
2004
2005 if (u->mixer_path_set)
2006 pa_alsa_path_set_free(u->mixer_path_set);
2007 else if (u->mixer_path)
2008 pa_alsa_path_free(u->mixer_path);
2009
2010 if (u->mixer_handle)
2011 snd_mixer_close(u->mixer_handle);
2012
2013 if (u->smoother)
2014 pa_smoother_free(u->smoother);
2015
2016 reserve_done(u);
2017 monitor_done(u);
2018
2019 pa_xfree(u->device_name);
2020 pa_xfree(u->control_device);
2021 pa_xfree(u);
2022 }
2023
2024 void pa_alsa_sink_free(pa_sink *s) {
2025 struct userdata *u;
2026
2027 pa_sink_assert_ref(s);
2028 pa_assert_se(u = s->userdata);
2029
2030 userdata_free(u);
2031 }