]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: reset watermark to initial values on resume
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 pa_alsa_fdlist *mixer_fdl;
104 pa_alsa_mixer_pdata *mixer_pd;
105 snd_mixer_t *mixer_handle;
106 pa_alsa_path_set *mixer_path_set;
107 pa_alsa_path *mixer_path;
108
109 pa_cvolume hardware_volume;
110
111 uint32_t old_rate;
112
113 size_t
114 frame_size,
115 fragment_size,
116 hwbuf_size,
117 tsched_watermark,
118 tsched_watermark_ref,
119 hwbuf_unused,
120 min_sleep,
121 min_wakeup,
122 watermark_inc_step,
123 watermark_dec_step,
124 watermark_inc_threshold,
125 watermark_dec_threshold,
126 rewind_safeguard;
127
128 pa_usec_t watermark_dec_not_before;
129 pa_usec_t min_latency_ref;
130
131 pa_memchunk memchunk;
132
133 char *device_name; /* name of the PCM device */
134 char *control_device; /* name of the control device */
135
136 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1;
137
138 pa_bool_t first, after_rewind;
139
140 pa_rtpoll_item *alsa_rtpoll_item;
141
142 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
143
144 pa_smoother *smoother;
145 uint64_t write_count;
146 uint64_t since_start;
147 pa_usec_t smoother_interval;
148 pa_usec_t last_smoother_update;
149
150 pa_idxset *formats;
151
152 pa_reserve_wrapper *reserve;
153 pa_hook_slot *reserve_slot;
154 pa_reserve_monitor_wrapper *monitor;
155 pa_hook_slot *monitor_slot;
156 };
157
158 static void userdata_free(struct userdata *u);
159
160 /* FIXME: Is there a better way to do this than device names? */
161 static pa_bool_t is_iec958(struct userdata *u) {
162 return (strncmp("iec958", u->device_name, 6) == 0);
163 }
164
165 static pa_bool_t is_hdmi(struct userdata *u) {
166 return (strncmp("hdmi", u->device_name, 4) == 0);
167 }
168
169 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
170 pa_assert(r);
171 pa_assert(u);
172
173 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
174 return PA_HOOK_CANCEL;
175
176 return PA_HOOK_OK;
177 }
178
179 static void reserve_done(struct userdata *u) {
180 pa_assert(u);
181
182 if (u->reserve_slot) {
183 pa_hook_slot_free(u->reserve_slot);
184 u->reserve_slot = NULL;
185 }
186
187 if (u->reserve) {
188 pa_reserve_wrapper_unref(u->reserve);
189 u->reserve = NULL;
190 }
191 }
192
193 static void reserve_update(struct userdata *u) {
194 const char *description;
195 pa_assert(u);
196
197 if (!u->sink || !u->reserve)
198 return;
199
200 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
201 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
202 }
203
204 static int reserve_init(struct userdata *u, const char *dname) {
205 char *rname;
206
207 pa_assert(u);
208 pa_assert(dname);
209
210 if (u->reserve)
211 return 0;
212
213 if (pa_in_system_mode())
214 return 0;
215
216 if (!(rname = pa_alsa_get_reserve_name(dname)))
217 return 0;
218
219 /* We are resuming, try to lock the device */
220 u->reserve = pa_reserve_wrapper_get(u->core, rname);
221 pa_xfree(rname);
222
223 if (!(u->reserve))
224 return -1;
225
226 reserve_update(u);
227
228 pa_assert(!u->reserve_slot);
229 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
230
231 return 0;
232 }
233
234 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
235 pa_bool_t b;
236
237 pa_assert(w);
238 pa_assert(u);
239
240 b = PA_PTR_TO_UINT(busy) && !u->reserve;
241
242 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
243 return PA_HOOK_OK;
244 }
245
246 static void monitor_done(struct userdata *u) {
247 pa_assert(u);
248
249 if (u->monitor_slot) {
250 pa_hook_slot_free(u->monitor_slot);
251 u->monitor_slot = NULL;
252 }
253
254 if (u->monitor) {
255 pa_reserve_monitor_wrapper_unref(u->monitor);
256 u->monitor = NULL;
257 }
258 }
259
260 static int reserve_monitor_init(struct userdata *u, const char *dname) {
261 char *rname;
262
263 pa_assert(u);
264 pa_assert(dname);
265
266 if (pa_in_system_mode())
267 return 0;
268
269 if (!(rname = pa_alsa_get_reserve_name(dname)))
270 return 0;
271
272 /* We are resuming, try to lock the device */
273 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
274 pa_xfree(rname);
275
276 if (!(u->monitor))
277 return -1;
278
279 pa_assert(!u->monitor_slot);
280 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
281
282 return 0;
283 }
284
285 static void fix_min_sleep_wakeup(struct userdata *u) {
286 size_t max_use, max_use_2;
287
288 pa_assert(u);
289 pa_assert(u->use_tsched);
290
291 max_use = u->hwbuf_size - u->hwbuf_unused;
292 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
293
294 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
295 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
296
297 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
298 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
299 }
300
301 static void fix_tsched_watermark(struct userdata *u) {
302 size_t max_use;
303 pa_assert(u);
304 pa_assert(u->use_tsched);
305
306 max_use = u->hwbuf_size - u->hwbuf_unused;
307
308 if (u->tsched_watermark > max_use - u->min_sleep)
309 u->tsched_watermark = max_use - u->min_sleep;
310
311 if (u->tsched_watermark < u->min_wakeup)
312 u->tsched_watermark = u->min_wakeup;
313 }
314
315 static void increase_watermark(struct userdata *u) {
316 size_t old_watermark;
317 pa_usec_t old_min_latency, new_min_latency;
318
319 pa_assert(u);
320 pa_assert(u->use_tsched);
321
322 /* First, just try to increase the watermark */
323 old_watermark = u->tsched_watermark;
324 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
325 fix_tsched_watermark(u);
326
327 if (old_watermark != u->tsched_watermark) {
328 pa_log_info("Increasing wakeup watermark to %0.2f ms",
329 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
330 return;
331 }
332
333 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
334 old_min_latency = u->sink->thread_info.min_latency;
335 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
336 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
337
338 if (old_min_latency != new_min_latency) {
339 pa_log_info("Increasing minimal latency to %0.2f ms",
340 (double) new_min_latency / PA_USEC_PER_MSEC);
341
342 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
343 }
344
345 /* When we reach this we're officialy fucked! */
346 }
347
348 static void decrease_watermark(struct userdata *u) {
349 size_t old_watermark;
350 pa_usec_t now;
351
352 pa_assert(u);
353 pa_assert(u->use_tsched);
354
355 now = pa_rtclock_now();
356
357 if (u->watermark_dec_not_before <= 0)
358 goto restart;
359
360 if (u->watermark_dec_not_before > now)
361 return;
362
363 old_watermark = u->tsched_watermark;
364
365 if (u->tsched_watermark < u->watermark_dec_step)
366 u->tsched_watermark = u->tsched_watermark / 2;
367 else
368 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
369
370 fix_tsched_watermark(u);
371
372 if (old_watermark != u->tsched_watermark)
373 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
374 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
375
376 /* We don't change the latency range*/
377
378 restart:
379 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
380 }
381
382 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
383 pa_usec_t usec, wm;
384
385 pa_assert(sleep_usec);
386 pa_assert(process_usec);
387
388 pa_assert(u);
389 pa_assert(u->use_tsched);
390
391 usec = pa_sink_get_requested_latency_within_thread(u->sink);
392
393 if (usec == (pa_usec_t) -1)
394 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
395
396 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
397
398 if (wm > usec)
399 wm = usec/2;
400
401 *sleep_usec = usec - wm;
402 *process_usec = wm;
403
404 #ifdef DEBUG_TIMING
405 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
406 (unsigned long) (usec / PA_USEC_PER_MSEC),
407 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
408 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
409 #endif
410 }
411
412 static int try_recover(struct userdata *u, const char *call, int err) {
413 pa_assert(u);
414 pa_assert(call);
415 pa_assert(err < 0);
416
417 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
418
419 pa_assert(err != -EAGAIN);
420
421 if (err == -EPIPE)
422 pa_log_debug("%s: Buffer underrun!", call);
423
424 if (err == -ESTRPIPE)
425 pa_log_debug("%s: System suspended!", call);
426
427 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
428 pa_log("%s: %s", call, pa_alsa_strerror(err));
429 return -1;
430 }
431
432 u->first = TRUE;
433 u->since_start = 0;
434 return 0;
435 }
436
437 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
438 size_t left_to_play;
439 pa_bool_t underrun = FALSE;
440
441 /* We use <= instead of < for this check here because an underrun
442 * only happens after the last sample was processed, not already when
443 * it is removed from the buffer. This is particularly important
444 * when block transfer is used. */
445
446 if (n_bytes <= u->hwbuf_size)
447 left_to_play = u->hwbuf_size - n_bytes;
448 else {
449
450 /* We got a dropout. What a mess! */
451 left_to_play = 0;
452 underrun = TRUE;
453
454 #ifdef DEBUG_TIMING
455 PA_DEBUG_TRAP;
456 #endif
457
458 if (!u->first && !u->after_rewind)
459 if (pa_log_ratelimit(PA_LOG_INFO))
460 pa_log_info("Underrun!");
461 }
462
463 #ifdef DEBUG_TIMING
464 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
465 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
466 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
467 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
468 #endif
469
470 if (u->use_tsched) {
471 pa_bool_t reset_not_before = TRUE;
472
473 if (!u->first && !u->after_rewind) {
474 if (underrun || left_to_play < u->watermark_inc_threshold)
475 increase_watermark(u);
476 else if (left_to_play > u->watermark_dec_threshold) {
477 reset_not_before = FALSE;
478
479 /* We decrease the watermark only if have actually
480 * been woken up by a timeout. If something else woke
481 * us up it's too easy to fulfill the deadlines... */
482
483 if (on_timeout)
484 decrease_watermark(u);
485 }
486 }
487
488 if (reset_not_before)
489 u->watermark_dec_not_before = 0;
490 }
491
492 return left_to_play;
493 }
494
495 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
496 pa_bool_t work_done = FALSE;
497 pa_usec_t max_sleep_usec = 0, process_usec = 0;
498 size_t left_to_play;
499 unsigned j = 0;
500
501 pa_assert(u);
502 pa_sink_assert_ref(u->sink);
503
504 if (u->use_tsched)
505 hw_sleep_time(u, &max_sleep_usec, &process_usec);
506
507 for (;;) {
508 snd_pcm_sframes_t n;
509 size_t n_bytes;
510 int r;
511 pa_bool_t after_avail = TRUE;
512
513 /* First we determine how many samples are missing to fill the
514 * buffer up to 100% */
515
516 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
517
518 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
519 continue;
520
521 return r;
522 }
523
524 n_bytes = (size_t) n * u->frame_size;
525
526 #ifdef DEBUG_TIMING
527 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
528 #endif
529
530 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
531 on_timeout = FALSE;
532
533 if (u->use_tsched)
534
535 /* We won't fill up the playback buffer before at least
536 * half the sleep time is over because otherwise we might
537 * ask for more data from the clients then they expect. We
538 * need to guarantee that clients only have to keep around
539 * a single hw buffer length. */
540
541 if (!polled &&
542 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
543 #ifdef DEBUG_TIMING
544 pa_log_debug("Not filling up, because too early.");
545 #endif
546 break;
547 }
548
549 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
550
551 if (polled)
552 PA_ONCE_BEGIN {
553 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
554 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
555 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
556 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
557 pa_strnull(dn));
558 pa_xfree(dn);
559 } PA_ONCE_END;
560
561 #ifdef DEBUG_TIMING
562 pa_log_debug("Not filling up, because not necessary.");
563 #endif
564 break;
565 }
566
567
568 if (++j > 10) {
569 #ifdef DEBUG_TIMING
570 pa_log_debug("Not filling up, because already too many iterations.");
571 #endif
572
573 break;
574 }
575
576 n_bytes -= u->hwbuf_unused;
577 polled = FALSE;
578
579 #ifdef DEBUG_TIMING
580 pa_log_debug("Filling up");
581 #endif
582
583 for (;;) {
584 pa_memchunk chunk;
585 void *p;
586 int err;
587 const snd_pcm_channel_area_t *areas;
588 snd_pcm_uframes_t offset, frames;
589 snd_pcm_sframes_t sframes;
590
591 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
592 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
593
594 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
595
596 if (!after_avail && err == -EAGAIN)
597 break;
598
599 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
600 continue;
601
602 return r;
603 }
604
605 /* Make sure that if these memblocks need to be copied they will fit into one slot */
606 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
607 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
608
609 if (!after_avail && frames == 0)
610 break;
611
612 pa_assert(frames > 0);
613 after_avail = FALSE;
614
615 /* Check these are multiples of 8 bit */
616 pa_assert((areas[0].first & 7) == 0);
617 pa_assert((areas[0].step & 7)== 0);
618
619 /* We assume a single interleaved memory buffer */
620 pa_assert((areas[0].first >> 3) == 0);
621 pa_assert((areas[0].step >> 3) == u->frame_size);
622
623 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
624
625 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
626 chunk.length = pa_memblock_get_length(chunk.memblock);
627 chunk.index = 0;
628
629 pa_sink_render_into_full(u->sink, &chunk);
630 pa_memblock_unref_fixed(chunk.memblock);
631
632 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
633
634 if (!after_avail && (int) sframes == -EAGAIN)
635 break;
636
637 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
638 continue;
639
640 return r;
641 }
642
643 work_done = TRUE;
644
645 u->write_count += frames * u->frame_size;
646 u->since_start += frames * u->frame_size;
647
648 #ifdef DEBUG_TIMING
649 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
650 #endif
651
652 if ((size_t) frames * u->frame_size >= n_bytes)
653 break;
654
655 n_bytes -= (size_t) frames * u->frame_size;
656 }
657 }
658
659 if (u->use_tsched) {
660 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
661 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
662
663 if (*sleep_usec > process_usec)
664 *sleep_usec -= process_usec;
665 else
666 *sleep_usec = 0;
667 } else
668 *sleep_usec = 0;
669
670 return work_done ? 1 : 0;
671 }
672
673 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
674 pa_bool_t work_done = FALSE;
675 pa_usec_t max_sleep_usec = 0, process_usec = 0;
676 size_t left_to_play;
677 unsigned j = 0;
678
679 pa_assert(u);
680 pa_sink_assert_ref(u->sink);
681
682 if (u->use_tsched)
683 hw_sleep_time(u, &max_sleep_usec, &process_usec);
684
685 for (;;) {
686 snd_pcm_sframes_t n;
687 size_t n_bytes;
688 int r;
689 pa_bool_t after_avail = TRUE;
690
691 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
692
693 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
694 continue;
695
696 return r;
697 }
698
699 n_bytes = (size_t) n * u->frame_size;
700 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
701 on_timeout = FALSE;
702
703 if (u->use_tsched)
704
705 /* We won't fill up the playback buffer before at least
706 * half the sleep time is over because otherwise we might
707 * ask for more data from the clients then they expect. We
708 * need to guarantee that clients only have to keep around
709 * a single hw buffer length. */
710
711 if (!polled &&
712 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
713 break;
714
715 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
716
717 if (polled)
718 PA_ONCE_BEGIN {
719 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
720 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
721 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
722 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
723 pa_strnull(dn));
724 pa_xfree(dn);
725 } PA_ONCE_END;
726
727 break;
728 }
729
730 if (++j > 10) {
731 #ifdef DEBUG_TIMING
732 pa_log_debug("Not filling up, because already too many iterations.");
733 #endif
734
735 break;
736 }
737
738 n_bytes -= u->hwbuf_unused;
739 polled = FALSE;
740
741 for (;;) {
742 snd_pcm_sframes_t frames;
743 void *p;
744
745 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
746
747 if (u->memchunk.length <= 0)
748 pa_sink_render(u->sink, n_bytes, &u->memchunk);
749
750 pa_assert(u->memchunk.length > 0);
751
752 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
753
754 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
755 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
756
757 p = pa_memblock_acquire(u->memchunk.memblock);
758 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
759 pa_memblock_release(u->memchunk.memblock);
760
761 if (PA_UNLIKELY(frames < 0)) {
762
763 if (!after_avail && (int) frames == -EAGAIN)
764 break;
765
766 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
767 continue;
768
769 return r;
770 }
771
772 if (!after_avail && frames == 0)
773 break;
774
775 pa_assert(frames > 0);
776 after_avail = FALSE;
777
778 u->memchunk.index += (size_t) frames * u->frame_size;
779 u->memchunk.length -= (size_t) frames * u->frame_size;
780
781 if (u->memchunk.length <= 0) {
782 pa_memblock_unref(u->memchunk.memblock);
783 pa_memchunk_reset(&u->memchunk);
784 }
785
786 work_done = TRUE;
787
788 u->write_count += frames * u->frame_size;
789 u->since_start += frames * u->frame_size;
790
791 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
792
793 if ((size_t) frames * u->frame_size >= n_bytes)
794 break;
795
796 n_bytes -= (size_t) frames * u->frame_size;
797 }
798 }
799
800 if (u->use_tsched) {
801 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
802 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
803
804 if (*sleep_usec > process_usec)
805 *sleep_usec -= process_usec;
806 else
807 *sleep_usec = 0;
808 } else
809 *sleep_usec = 0;
810
811 return work_done ? 1 : 0;
812 }
813
814 static void update_smoother(struct userdata *u) {
815 snd_pcm_sframes_t delay = 0;
816 int64_t position;
817 int err;
818 pa_usec_t now1 = 0, now2;
819 snd_pcm_status_t *status;
820
821 snd_pcm_status_alloca(&status);
822
823 pa_assert(u);
824 pa_assert(u->pcm_handle);
825
826 /* Let's update the time smoother */
827
828 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
829 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
830 return;
831 }
832
833 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
834 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
835 else {
836 snd_htimestamp_t htstamp = { 0, 0 };
837 snd_pcm_status_get_htstamp(status, &htstamp);
838 now1 = pa_timespec_load(&htstamp);
839 }
840
841 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
842 if (now1 <= 0)
843 now1 = pa_rtclock_now();
844
845 /* check if the time since the last update is bigger than the interval */
846 if (u->last_smoother_update > 0)
847 if (u->last_smoother_update + u->smoother_interval > now1)
848 return;
849
850 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
851
852 if (PA_UNLIKELY(position < 0))
853 position = 0;
854
855 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
856
857 pa_smoother_put(u->smoother, now1, now2);
858
859 u->last_smoother_update = now1;
860 /* exponentially increase the update interval up to the MAX limit */
861 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
862 }
863
864 static pa_usec_t sink_get_latency(struct userdata *u) {
865 pa_usec_t r;
866 int64_t delay;
867 pa_usec_t now1, now2;
868
869 pa_assert(u);
870
871 now1 = pa_rtclock_now();
872 now2 = pa_smoother_get(u->smoother, now1);
873
874 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
875
876 r = delay >= 0 ? (pa_usec_t) delay : 0;
877
878 if (u->memchunk.memblock)
879 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
880
881 return r;
882 }
883
884 static int build_pollfd(struct userdata *u) {
885 pa_assert(u);
886 pa_assert(u->pcm_handle);
887
888 if (u->alsa_rtpoll_item)
889 pa_rtpoll_item_free(u->alsa_rtpoll_item);
890
891 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
892 return -1;
893
894 return 0;
895 }
896
897 /* Called from IO context */
898 static int suspend(struct userdata *u) {
899 pa_assert(u);
900 pa_assert(u->pcm_handle);
901
902 pa_smoother_pause(u->smoother, pa_rtclock_now());
903
904 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
905 * take awfully long with our long buffer sizes today. */
906 snd_pcm_close(u->pcm_handle);
907 u->pcm_handle = NULL;
908
909 if (u->alsa_rtpoll_item) {
910 pa_rtpoll_item_free(u->alsa_rtpoll_item);
911 u->alsa_rtpoll_item = NULL;
912 }
913
914 /* We reset max_rewind/max_request here to make sure that while we
915 * are suspended the old max_request/max_rewind values set before
916 * the suspend can influence the per-stream buffer of newly
917 * created streams, without their requirements having any
918 * influence on them. */
919 pa_sink_set_max_rewind_within_thread(u->sink, 0);
920 pa_sink_set_max_request_within_thread(u->sink, 0);
921
922 pa_log_info("Device suspended...");
923
924 return 0;
925 }
926
927 /* Called from IO context */
928 static int update_sw_params(struct userdata *u) {
929 snd_pcm_uframes_t avail_min;
930 int err;
931
932 pa_assert(u);
933
934 /* Use the full buffer if no one asked us for anything specific */
935 u->hwbuf_unused = 0;
936
937 if (u->use_tsched) {
938 pa_usec_t latency;
939
940 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
941 size_t b;
942
943 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
944
945 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
946
947 /* We need at least one sample in our buffer */
948
949 if (PA_UNLIKELY(b < u->frame_size))
950 b = u->frame_size;
951
952 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
953 }
954
955 fix_min_sleep_wakeup(u);
956 fix_tsched_watermark(u);
957 }
958
959 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
960
961 /* We need at last one frame in the used part of the buffer */
962 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
963
964 if (u->use_tsched) {
965 pa_usec_t sleep_usec, process_usec;
966
967 hw_sleep_time(u, &sleep_usec, &process_usec);
968 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
969 }
970
971 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
972
973 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
974 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
975 return err;
976 }
977
978 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
979 if (pa_alsa_pcm_is_hw(u->pcm_handle))
980 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
981 else {
982 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
983 pa_sink_set_max_rewind_within_thread(u->sink, 0);
984 }
985
986 return 0;
987 }
988
989 /* Called from IO Context on unsuspend or from main thread when creating sink */
990 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
991 pa_bool_t in_thread)
992 {
993 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
994 &u->sink->sample_spec);
995
996 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
997 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
998
999 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1000 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1001
1002 fix_min_sleep_wakeup(u);
1003 fix_tsched_watermark(u);
1004
1005 if (in_thread)
1006 pa_sink_set_latency_range_within_thread(u->sink,
1007 u->min_latency_ref,
1008 pa_bytes_to_usec(u->hwbuf_size, ss));
1009 else {
1010 pa_sink_set_latency_range(u->sink,
1011 0,
1012 pa_bytes_to_usec(u->hwbuf_size, ss));
1013
1014 /* work-around assert in pa_sink_set_latency_within_thead,
1015 keep track of min_latency and reuse it when
1016 this routine is called from IO context */
1017 u->min_latency_ref = u->sink->thread_info.min_latency;
1018 }
1019
1020 pa_log_info("Time scheduling watermark is %0.2fms",
1021 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1022 }
1023
1024 /* Called from IO context */
1025 static int unsuspend(struct userdata *u) {
1026 pa_sample_spec ss;
1027 int err;
1028 pa_bool_t b, d;
1029 snd_pcm_uframes_t period_size, buffer_size;
1030 char *device_name = NULL;
1031
1032 pa_assert(u);
1033 pa_assert(!u->pcm_handle);
1034
1035 pa_log_info("Trying resume...");
1036
1037 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1038 /* Need to open device in NONAUDIO mode */
1039 int len = strlen(u->device_name) + 8;
1040
1041 device_name = pa_xmalloc(len);
1042 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1043 }
1044
1045 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1046 SND_PCM_NONBLOCK|
1047 SND_PCM_NO_AUTO_RESAMPLE|
1048 SND_PCM_NO_AUTO_CHANNELS|
1049 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1050 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1051 goto fail;
1052 }
1053
1054 ss = u->sink->sample_spec;
1055 period_size = u->fragment_size / u->frame_size;
1056 buffer_size = u->hwbuf_size / u->frame_size;
1057 b = u->use_mmap;
1058 d = u->use_tsched;
1059
1060 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1061 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1062 goto fail;
1063 }
1064
1065 if (b != u->use_mmap || d != u->use_tsched) {
1066 pa_log_warn("Resume failed, couldn't get original access mode.");
1067 goto fail;
1068 }
1069
1070 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1071 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1072 goto fail;
1073 }
1074
1075 if (period_size*u->frame_size != u->fragment_size ||
1076 buffer_size*u->frame_size != u->hwbuf_size) {
1077 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1078 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1079 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1080 goto fail;
1081 }
1082
1083 if (update_sw_params(u) < 0)
1084 goto fail;
1085
1086 if (build_pollfd(u) < 0)
1087 goto fail;
1088
1089 u->write_count = 0;
1090 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1091 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1092 u->last_smoother_update = 0;
1093
1094 u->first = TRUE;
1095 u->since_start = 0;
1096
1097 /* reset the watermark to the value defined when sink was created */
1098 if (u->use_tsched)
1099 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1100
1101 pa_log_info("Resumed successfully...");
1102
1103 pa_xfree(device_name);
1104 return 0;
1105
1106 fail:
1107 if (u->pcm_handle) {
1108 snd_pcm_close(u->pcm_handle);
1109 u->pcm_handle = NULL;
1110 }
1111
1112 pa_xfree(device_name);
1113
1114 return -PA_ERR_IO;
1115 }
1116
1117 /* Called from IO context */
1118 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1119 struct userdata *u = PA_SINK(o)->userdata;
1120
1121 switch (code) {
1122
1123 case PA_SINK_MESSAGE_FINISH_MOVE:
1124 case PA_SINK_MESSAGE_ADD_INPUT: {
1125 pa_sink_input *i = PA_SINK_INPUT(data);
1126 int r = 0;
1127
1128 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1129 break;
1130
1131 u->old_rate = u->sink->sample_spec.rate;
1132
1133 /* Passthrough format, see if we need to reset sink sample rate */
1134 if (u->sink->sample_spec.rate == i->thread_info.sample_spec.rate)
1135 break;
1136
1137 /* .. we do */
1138 if ((r = suspend(u)) < 0)
1139 return r;
1140
1141 u->sink->sample_spec.rate = i->thread_info.sample_spec.rate;
1142
1143 if ((r = unsuspend(u)) < 0)
1144 return r;
1145
1146 break;
1147 }
1148
1149 case PA_SINK_MESSAGE_START_MOVE:
1150 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1151 pa_sink_input *i = PA_SINK_INPUT(data);
1152 int r = 0;
1153
1154 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1155 break;
1156
1157 /* Passthrough format, see if we need to reset sink sample rate */
1158 if (u->sink->sample_spec.rate == u->old_rate)
1159 break;
1160
1161 /* .. we do */
1162 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && ((r = suspend(u)) < 0))
1163 return r;
1164
1165 u->sink->sample_spec.rate = u->old_rate;
1166
1167 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && ((r = unsuspend(u)) < 0))
1168 return r;
1169
1170 break;
1171 }
1172
1173 case PA_SINK_MESSAGE_GET_LATENCY: {
1174 pa_usec_t r = 0;
1175
1176 if (u->pcm_handle)
1177 r = sink_get_latency(u);
1178
1179 *((pa_usec_t*) data) = r;
1180
1181 return 0;
1182 }
1183
1184 case PA_SINK_MESSAGE_SET_STATE:
1185
1186 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1187
1188 case PA_SINK_SUSPENDED: {
1189 int r;
1190
1191 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1192
1193 if ((r = suspend(u)) < 0)
1194 return r;
1195
1196 break;
1197 }
1198
1199 case PA_SINK_IDLE:
1200 case PA_SINK_RUNNING: {
1201 int r;
1202
1203 if (u->sink->thread_info.state == PA_SINK_INIT) {
1204 if (build_pollfd(u) < 0)
1205 return -PA_ERR_IO;
1206 }
1207
1208 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1209 if ((r = unsuspend(u)) < 0)
1210 return r;
1211 }
1212
1213 break;
1214 }
1215
1216 case PA_SINK_UNLINKED:
1217 case PA_SINK_INIT:
1218 case PA_SINK_INVALID_STATE:
1219 ;
1220 }
1221
1222 break;
1223 }
1224
1225 return pa_sink_process_msg(o, code, data, offset, chunk);
1226 }
1227
1228 /* Called from main context */
1229 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1230 pa_sink_state_t old_state;
1231 struct userdata *u;
1232
1233 pa_sink_assert_ref(s);
1234 pa_assert_se(u = s->userdata);
1235
1236 old_state = pa_sink_get_state(u->sink);
1237
1238 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1239 reserve_done(u);
1240 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1241 if (reserve_init(u, u->device_name) < 0)
1242 return -PA_ERR_BUSY;
1243
1244 return 0;
1245 }
1246
1247 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1248 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1249
1250 pa_assert(u);
1251 pa_assert(u->mixer_handle);
1252
1253 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1254 return 0;
1255
1256 if (!PA_SINK_IS_LINKED(u->sink->state))
1257 return 0;
1258
1259 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1260 return 0;
1261
1262 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1263 pa_sink_get_volume(u->sink, TRUE);
1264 pa_sink_get_mute(u->sink, TRUE);
1265 }
1266
1267 return 0;
1268 }
1269
1270 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1271 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1272
1273 pa_assert(u);
1274 pa_assert(u->mixer_handle);
1275
1276 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1277 return 0;
1278
1279 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1280 return 0;
1281
1282 if (mask & SND_CTL_EVENT_MASK_VALUE)
1283 pa_sink_update_volume_and_mute(u->sink);
1284
1285 return 0;
1286 }
1287
1288 static void sink_get_volume_cb(pa_sink *s) {
1289 struct userdata *u = s->userdata;
1290 pa_cvolume r;
1291 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1292
1293 pa_assert(u);
1294 pa_assert(u->mixer_path);
1295 pa_assert(u->mixer_handle);
1296
1297 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1298 return;
1299
1300 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1301 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1302
1303 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1304
1305 if (u->mixer_path->has_dB) {
1306 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1307
1308 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1309 }
1310
1311 if (pa_cvolume_equal(&u->hardware_volume, &r))
1312 return;
1313
1314 s->real_volume = u->hardware_volume = r;
1315
1316 /* Hmm, so the hardware volume changed, let's reset our software volume */
1317 if (u->mixer_path->has_dB)
1318 pa_sink_set_soft_volume(s, NULL);
1319 }
1320
1321 static void sink_set_volume_cb(pa_sink *s) {
1322 struct userdata *u = s->userdata;
1323 pa_cvolume r;
1324 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1325 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1326
1327 pa_assert(u);
1328 pa_assert(u->mixer_path);
1329 pa_assert(u->mixer_handle);
1330
1331 /* Shift up by the base volume */
1332 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1333
1334 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1335 return;
1336
1337 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1338 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1339
1340 u->hardware_volume = r;
1341
1342 if (u->mixer_path->has_dB) {
1343 pa_cvolume new_soft_volume;
1344 pa_bool_t accurate_enough;
1345 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1346
1347 /* Match exactly what the user requested by software */
1348 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1349
1350 /* If the adjustment to do in software is only minimal we
1351 * can skip it. That saves us CPU at the expense of a bit of
1352 * accuracy */
1353 accurate_enough =
1354 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1355 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1356
1357 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1358 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1359 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1360 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1361 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1362 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1363 pa_yes_no(accurate_enough));
1364 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1365
1366 if (!accurate_enough)
1367 s->soft_volume = new_soft_volume;
1368
1369 } else {
1370 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1371
1372 /* We can't match exactly what the user requested, hence let's
1373 * at least tell the user about it */
1374
1375 s->real_volume = r;
1376 }
1377 }
1378
1379 static void sink_write_volume_cb(pa_sink *s) {
1380 struct userdata *u = s->userdata;
1381 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1382
1383 pa_assert(u);
1384 pa_assert(u->mixer_path);
1385 pa_assert(u->mixer_handle);
1386 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1387
1388 /* Shift up by the base volume */
1389 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1390
1391 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1392 pa_log_error("Writing HW volume failed");
1393 else {
1394 pa_cvolume tmp_vol;
1395 pa_bool_t accurate_enough;
1396
1397 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1398 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1399
1400 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1401 accurate_enough =
1402 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1403 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1404
1405 if (!accurate_enough) {
1406 union {
1407 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1408 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1409 } vol;
1410
1411 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1412 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1413 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1414 pa_log_debug(" in dB: %s (request) != %s",
1415 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1416 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1417 }
1418 }
1419 }
1420
1421 static void sink_get_mute_cb(pa_sink *s) {
1422 struct userdata *u = s->userdata;
1423 pa_bool_t b;
1424
1425 pa_assert(u);
1426 pa_assert(u->mixer_path);
1427 pa_assert(u->mixer_handle);
1428
1429 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1430 return;
1431
1432 s->muted = b;
1433 }
1434
1435 static void sink_set_mute_cb(pa_sink *s) {
1436 struct userdata *u = s->userdata;
1437
1438 pa_assert(u);
1439 pa_assert(u->mixer_path);
1440 pa_assert(u->mixer_handle);
1441
1442 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1443 }
1444
1445 static void mixer_volume_init(struct userdata *u) {
1446 pa_assert(u);
1447
1448 if (!u->mixer_path->has_volume) {
1449 pa_sink_set_write_volume_callback(u->sink, NULL);
1450 pa_sink_set_get_volume_callback(u->sink, NULL);
1451 pa_sink_set_set_volume_callback(u->sink, NULL);
1452
1453 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1454 } else {
1455 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1456 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1457
1458 if (u->mixer_path->has_dB && u->deferred_volume) {
1459 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1460 pa_log_info("Successfully enabled synchronous volume.");
1461 } else
1462 pa_sink_set_write_volume_callback(u->sink, NULL);
1463
1464 if (u->mixer_path->has_dB) {
1465 pa_sink_enable_decibel_volume(u->sink, TRUE);
1466 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1467
1468 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1469 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1470
1471 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1472 } else {
1473 pa_sink_enable_decibel_volume(u->sink, FALSE);
1474 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1475
1476 u->sink->base_volume = PA_VOLUME_NORM;
1477 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1478 }
1479
1480 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1481 }
1482
1483 if (!u->mixer_path->has_mute) {
1484 pa_sink_set_get_mute_callback(u->sink, NULL);
1485 pa_sink_set_set_mute_callback(u->sink, NULL);
1486 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1487 } else {
1488 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1489 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1490 pa_log_info("Using hardware mute control.");
1491 }
1492 }
1493
1494 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1495 struct userdata *u = s->userdata;
1496 pa_alsa_port_data *data;
1497
1498 pa_assert(u);
1499 pa_assert(p);
1500 pa_assert(u->mixer_handle);
1501
1502 data = PA_DEVICE_PORT_DATA(p);
1503
1504 pa_assert_se(u->mixer_path = data->path);
1505 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1506
1507 mixer_volume_init(u);
1508
1509 if (data->setting)
1510 pa_alsa_setting_select(data->setting, u->mixer_handle);
1511
1512 if (s->set_mute)
1513 s->set_mute(s);
1514 if (s->set_volume)
1515 s->set_volume(s);
1516
1517 return 0;
1518 }
1519
1520 static void sink_update_requested_latency_cb(pa_sink *s) {
1521 struct userdata *u = s->userdata;
1522 size_t before;
1523 pa_assert(u);
1524 pa_assert(u->use_tsched); /* only when timer scheduling is used
1525 * we can dynamically adjust the
1526 * latency */
1527
1528 if (!u->pcm_handle)
1529 return;
1530
1531 before = u->hwbuf_unused;
1532 update_sw_params(u);
1533
1534 /* Let's check whether we now use only a smaller part of the
1535 buffer then before. If so, we need to make sure that subsequent
1536 rewinds are relative to the new maximum fill level and not to the
1537 current fill level. Thus, let's do a full rewind once, to clear
1538 things up. */
1539
1540 if (u->hwbuf_unused > before) {
1541 pa_log_debug("Requesting rewind due to latency change.");
1542 pa_sink_request_rewind(s, (size_t) -1);
1543 }
1544 }
1545
1546 static pa_idxset* sink_get_formats(pa_sink *s) {
1547 struct userdata *u = s->userdata;
1548 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1549 pa_format_info *f;
1550 uint32_t idx;
1551
1552 pa_assert(u);
1553
1554 PA_IDXSET_FOREACH(f, u->formats, idx) {
1555 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1556 }
1557
1558 return ret;
1559 }
1560
1561 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1562 struct userdata *u = s->userdata;
1563 pa_format_info *f;
1564 uint32_t idx;
1565
1566 pa_assert(u);
1567
1568 /* FIXME: also validate sample rates against what the device supports */
1569 PA_IDXSET_FOREACH(f, formats, idx) {
1570 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1571 /* EAC3 cannot be sent over over S/PDIF */
1572 return FALSE;
1573 }
1574
1575 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1576 u->formats = pa_idxset_new(NULL, NULL);
1577
1578 /* Note: the logic below won't apply if we're using software encoding.
1579 * This is fine for now since we don't support that via the passthrough
1580 * framework, but this must be changed if we do. */
1581
1582 /* First insert non-PCM formats since we prefer those. */
1583 PA_IDXSET_FOREACH(f, formats, idx) {
1584 if (!pa_format_info_is_pcm(f))
1585 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1586 }
1587
1588 /* Now add any PCM formats */
1589 PA_IDXSET_FOREACH(f, formats, idx) {
1590 if (pa_format_info_is_pcm(f))
1591 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1592 }
1593
1594 return TRUE;
1595 }
1596
1597 static int process_rewind(struct userdata *u) {
1598 snd_pcm_sframes_t unused;
1599 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1600 pa_assert(u);
1601
1602 /* Figure out how much we shall rewind and reset the counter */
1603 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1604
1605 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1606
1607 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1608 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1609 return -1;
1610 }
1611
1612 unused_nbytes = (size_t) unused * u->frame_size;
1613
1614 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1615 unused_nbytes += u->rewind_safeguard;
1616
1617 if (u->hwbuf_size > unused_nbytes)
1618 limit_nbytes = u->hwbuf_size - unused_nbytes;
1619 else
1620 limit_nbytes = 0;
1621
1622 if (rewind_nbytes > limit_nbytes)
1623 rewind_nbytes = limit_nbytes;
1624
1625 if (rewind_nbytes > 0) {
1626 snd_pcm_sframes_t in_frames, out_frames;
1627
1628 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1629
1630 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1631 pa_log_debug("before: %lu", (unsigned long) in_frames);
1632 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1633 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1634 if (try_recover(u, "process_rewind", out_frames) < 0)
1635 return -1;
1636 out_frames = 0;
1637 }
1638
1639 pa_log_debug("after: %lu", (unsigned long) out_frames);
1640
1641 rewind_nbytes = (size_t) out_frames * u->frame_size;
1642
1643 if (rewind_nbytes <= 0)
1644 pa_log_info("Tried rewind, but was apparently not possible.");
1645 else {
1646 u->write_count -= rewind_nbytes;
1647 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1648 pa_sink_process_rewind(u->sink, rewind_nbytes);
1649
1650 u->after_rewind = TRUE;
1651 return 0;
1652 }
1653 } else
1654 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1655
1656 pa_sink_process_rewind(u->sink, 0);
1657 return 0;
1658 }
1659
1660 static void thread_func(void *userdata) {
1661 struct userdata *u = userdata;
1662 unsigned short revents = 0;
1663
1664 pa_assert(u);
1665
1666 pa_log_debug("Thread starting up");
1667
1668 if (u->core->realtime_scheduling)
1669 pa_make_realtime(u->core->realtime_priority);
1670
1671 pa_thread_mq_install(&u->thread_mq);
1672
1673 for (;;) {
1674 int ret;
1675 pa_usec_t rtpoll_sleep = 0;
1676
1677 #ifdef DEBUG_TIMING
1678 pa_log_debug("Loop");
1679 #endif
1680
1681 /* Render some data and write it to the dsp */
1682 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1683 int work_done;
1684 pa_usec_t sleep_usec = 0;
1685 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1686
1687 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1688 if (process_rewind(u) < 0)
1689 goto fail;
1690
1691 if (u->use_mmap)
1692 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1693 else
1694 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1695
1696 if (work_done < 0)
1697 goto fail;
1698
1699 /* pa_log_debug("work_done = %i", work_done); */
1700
1701 if (work_done) {
1702
1703 if (u->first) {
1704 pa_log_info("Starting playback.");
1705 snd_pcm_start(u->pcm_handle);
1706
1707 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1708
1709 u->first = FALSE;
1710 }
1711
1712 update_smoother(u);
1713 }
1714
1715 if (u->use_tsched) {
1716 pa_usec_t cusec;
1717
1718 if (u->since_start <= u->hwbuf_size) {
1719
1720 /* USB devices on ALSA seem to hit a buffer
1721 * underrun during the first iterations much
1722 * quicker then we calculate here, probably due to
1723 * the transport latency. To accommodate for that
1724 * we artificially decrease the sleep time until
1725 * we have filled the buffer at least once
1726 * completely.*/
1727
1728 if (pa_log_ratelimit(PA_LOG_DEBUG))
1729 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1730 sleep_usec /= 2;
1731 }
1732
1733 /* OK, the playback buffer is now full, let's
1734 * calculate when to wake up next */
1735 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1736
1737 /* Convert from the sound card time domain to the
1738 * system time domain */
1739 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1740
1741 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1742
1743 /* We don't trust the conversion, so we wake up whatever comes first */
1744 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1745 }
1746
1747 u->after_rewind = FALSE;
1748
1749 }
1750
1751 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1752 pa_usec_t volume_sleep;
1753 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1754 if (volume_sleep > 0)
1755 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1756 }
1757
1758 if (rtpoll_sleep > 0)
1759 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1760 else
1761 pa_rtpoll_set_timer_disabled(u->rtpoll);
1762
1763 /* Hmm, nothing to do. Let's sleep */
1764 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1765 goto fail;
1766
1767 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1768 pa_sink_volume_change_apply(u->sink, NULL);
1769
1770 if (ret == 0)
1771 goto finish;
1772
1773 /* Tell ALSA about this and process its response */
1774 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1775 struct pollfd *pollfd;
1776 int err;
1777 unsigned n;
1778
1779 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1780
1781 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1782 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1783 goto fail;
1784 }
1785
1786 if (revents & ~POLLOUT) {
1787 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1788 goto fail;
1789
1790 u->first = TRUE;
1791 u->since_start = 0;
1792 revents = 0;
1793 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1794 pa_log_debug("Wakeup from ALSA!");
1795
1796 } else
1797 revents = 0;
1798 }
1799
1800 fail:
1801 /* If this was no regular exit from the loop we have to continue
1802 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1803 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1804 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1805
1806 finish:
1807 pa_log_debug("Thread shutting down");
1808 }
1809
1810 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1811 const char *n;
1812 char *t;
1813
1814 pa_assert(data);
1815 pa_assert(ma);
1816 pa_assert(device_name);
1817
1818 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1819 pa_sink_new_data_set_name(data, n);
1820 data->namereg_fail = TRUE;
1821 return;
1822 }
1823
1824 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1825 data->namereg_fail = TRUE;
1826 else {
1827 n = device_id ? device_id : device_name;
1828 data->namereg_fail = FALSE;
1829 }
1830
1831 if (mapping)
1832 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1833 else
1834 t = pa_sprintf_malloc("alsa_output.%s", n);
1835
1836 pa_sink_new_data_set_name(data, t);
1837 pa_xfree(t);
1838 }
1839
1840 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1841
1842 if (!mapping && !element)
1843 return;
1844
1845 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1846 pa_log_info("Failed to find a working mixer device.");
1847 return;
1848 }
1849
1850 if (element) {
1851
1852 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1853 goto fail;
1854
1855 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1856 goto fail;
1857
1858 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1859 pa_alsa_path_dump(u->mixer_path);
1860 } else {
1861
1862 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1863 goto fail;
1864
1865 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1866 }
1867
1868 return;
1869
1870 fail:
1871
1872 if (u->mixer_path_set) {
1873 pa_alsa_path_set_free(u->mixer_path_set);
1874 u->mixer_path_set = NULL;
1875 } else if (u->mixer_path) {
1876 pa_alsa_path_free(u->mixer_path);
1877 u->mixer_path = NULL;
1878 }
1879
1880 if (u->mixer_handle) {
1881 snd_mixer_close(u->mixer_handle);
1882 u->mixer_handle = NULL;
1883 }
1884 }
1885
1886
1887 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1888 pa_bool_t need_mixer_callback = FALSE;
1889
1890 pa_assert(u);
1891
1892 if (!u->mixer_handle)
1893 return 0;
1894
1895 if (u->sink->active_port) {
1896 pa_alsa_port_data *data;
1897
1898 /* We have a list of supported paths, so let's activate the
1899 * one that has been chosen as active */
1900
1901 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1902 u->mixer_path = data->path;
1903
1904 pa_alsa_path_select(data->path, u->mixer_handle);
1905
1906 if (data->setting)
1907 pa_alsa_setting_select(data->setting, u->mixer_handle);
1908
1909 } else {
1910
1911 if (!u->mixer_path && u->mixer_path_set)
1912 u->mixer_path = u->mixer_path_set->paths;
1913
1914 if (u->mixer_path) {
1915 /* Hmm, we have only a single path, then let's activate it */
1916
1917 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1918
1919 if (u->mixer_path->settings)
1920 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1921 } else
1922 return 0;
1923 }
1924
1925 mixer_volume_init(u);
1926
1927 /* Will we need to register callbacks? */
1928 if (u->mixer_path_set && u->mixer_path_set->paths) {
1929 pa_alsa_path *p;
1930
1931 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1932 if (p->has_volume || p->has_mute)
1933 need_mixer_callback = TRUE;
1934 }
1935 }
1936 else if (u->mixer_path)
1937 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1938
1939 if (need_mixer_callback) {
1940 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1941 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1942 u->mixer_pd = pa_alsa_mixer_pdata_new();
1943 mixer_callback = io_mixer_callback;
1944
1945 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1946 pa_log("Failed to initialize file descriptor monitoring");
1947 return -1;
1948 }
1949 } else {
1950 u->mixer_fdl = pa_alsa_fdlist_new();
1951 mixer_callback = ctl_mixer_callback;
1952
1953 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1954 pa_log("Failed to initialize file descriptor monitoring");
1955 return -1;
1956 }
1957 }
1958
1959 if (u->mixer_path_set)
1960 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1961 else
1962 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1963 }
1964
1965 return 0;
1966 }
1967
1968 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1969
1970 struct userdata *u = NULL;
1971 const char *dev_id = NULL;
1972 pa_sample_spec ss;
1973 pa_channel_map map;
1974 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1975 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1976 size_t frame_size;
1977 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE;
1978 pa_sink_new_data data;
1979 pa_alsa_profile_set *profile_set = NULL;
1980
1981 pa_assert(m);
1982 pa_assert(ma);
1983
1984 ss = m->core->default_sample_spec;
1985 map = m->core->default_channel_map;
1986 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1987 pa_log("Failed to parse sample specification and channel map");
1988 goto fail;
1989 }
1990
1991 frame_size = pa_frame_size(&ss);
1992
1993 nfrags = m->core->default_n_fragments;
1994 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1995 if (frag_size <= 0)
1996 frag_size = (uint32_t) frame_size;
1997 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1998 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1999
2000 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2001 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2002 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2003 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2004 pa_log("Failed to parse buffer metrics");
2005 goto fail;
2006 }
2007
2008 buffer_size = nfrags * frag_size;
2009
2010 period_frames = frag_size/frame_size;
2011 buffer_frames = buffer_size/frame_size;
2012 tsched_frames = tsched_size/frame_size;
2013
2014 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2015 pa_log("Failed to parse mmap argument.");
2016 goto fail;
2017 }
2018
2019 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2020 pa_log("Failed to parse tsched argument.");
2021 goto fail;
2022 }
2023
2024 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2025 pa_log("Failed to parse ignore_dB argument.");
2026 goto fail;
2027 }
2028
2029 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2030 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2031 pa_log("Failed to parse rewind_safeguard argument");
2032 goto fail;
2033 }
2034
2035 deferred_volume = m->core->deferred_volume;
2036 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2037 pa_log("Failed to parse deferred_volume argument.");
2038 goto fail;
2039 }
2040
2041 use_tsched = pa_alsa_may_tsched(use_tsched);
2042
2043 u = pa_xnew0(struct userdata, 1);
2044 u->core = m->core;
2045 u->module = m;
2046 u->use_mmap = use_mmap;
2047 u->use_tsched = use_tsched;
2048 u->deferred_volume = deferred_volume;
2049 u->first = TRUE;
2050 u->rewind_safeguard = rewind_safeguard;
2051 u->rtpoll = pa_rtpoll_new();
2052 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2053
2054 u->smoother = pa_smoother_new(
2055 SMOOTHER_ADJUST_USEC,
2056 SMOOTHER_WINDOW_USEC,
2057 TRUE,
2058 TRUE,
2059 5,
2060 pa_rtclock_now(),
2061 TRUE);
2062 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2063
2064 dev_id = pa_modargs_get_value(
2065 ma, "device_id",
2066 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2067
2068 if (reserve_init(u, dev_id) < 0)
2069 goto fail;
2070
2071 if (reserve_monitor_init(u, dev_id) < 0)
2072 goto fail;
2073
2074 b = use_mmap;
2075 d = use_tsched;
2076
2077 if (mapping) {
2078
2079 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2080 pa_log("device_id= not set");
2081 goto fail;
2082 }
2083
2084 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2085 dev_id,
2086 &u->device_name,
2087 &ss, &map,
2088 SND_PCM_STREAM_PLAYBACK,
2089 &period_frames, &buffer_frames, tsched_frames,
2090 &b, &d, mapping)))
2091 goto fail;
2092
2093 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2094
2095 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2096 goto fail;
2097
2098 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2099 dev_id,
2100 &u->device_name,
2101 &ss, &map,
2102 SND_PCM_STREAM_PLAYBACK,
2103 &period_frames, &buffer_frames, tsched_frames,
2104 &b, &d, profile_set, &mapping)))
2105 goto fail;
2106
2107 } else {
2108
2109 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2110 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2111 &u->device_name,
2112 &ss, &map,
2113 SND_PCM_STREAM_PLAYBACK,
2114 &period_frames, &buffer_frames, tsched_frames,
2115 &b, &d, FALSE)))
2116 goto fail;
2117 }
2118
2119 pa_assert(u->device_name);
2120 pa_log_info("Successfully opened device %s.", u->device_name);
2121
2122 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2123 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2124 goto fail;
2125 }
2126
2127 if (mapping)
2128 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2129
2130 if (use_mmap && !b) {
2131 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2132 u->use_mmap = use_mmap = FALSE;
2133 }
2134
2135 if (use_tsched && (!b || !d)) {
2136 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2137 u->use_tsched = use_tsched = FALSE;
2138 }
2139
2140 if (u->use_mmap)
2141 pa_log_info("Successfully enabled mmap() mode.");
2142
2143 if (u->use_tsched)
2144 pa_log_info("Successfully enabled timer-based scheduling mode.");
2145
2146 if (is_iec958(u) || is_hdmi(u))
2147 set_formats = TRUE;
2148
2149 /* ALSA might tweak the sample spec, so recalculate the frame size */
2150 frame_size = pa_frame_size(&ss);
2151
2152 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2153
2154 pa_sink_new_data_init(&data);
2155 data.driver = driver;
2156 data.module = m;
2157 data.card = card;
2158 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2159
2160 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2161 * variable instead of using &data.namereg_fail directly, because
2162 * data.namereg_fail is a bitfield and taking the address of a bitfield
2163 * variable is impossible. */
2164 namereg_fail = data.namereg_fail;
2165 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2166 pa_log("Failed to parse namereg_fail argument.");
2167 pa_sink_new_data_done(&data);
2168 goto fail;
2169 }
2170 data.namereg_fail = namereg_fail;
2171
2172 pa_sink_new_data_set_sample_spec(&data, &ss);
2173 pa_sink_new_data_set_channel_map(&data, &map);
2174
2175 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2176 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2177 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2178 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2179 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2180
2181 if (mapping) {
2182 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2183 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2184 }
2185
2186 pa_alsa_init_description(data.proplist);
2187
2188 if (u->control_device)
2189 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2190
2191 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2192 pa_log("Invalid properties");
2193 pa_sink_new_data_done(&data);
2194 goto fail;
2195 }
2196
2197 if (u->mixer_path_set)
2198 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2199
2200 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2201 (set_formats ? PA_SINK_SET_FORMATS : 0));
2202 pa_sink_new_data_done(&data);
2203
2204 if (!u->sink) {
2205 pa_log("Failed to create sink object");
2206 goto fail;
2207 }
2208
2209 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2210 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2211 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2212 goto fail;
2213 }
2214
2215 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2216 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2217 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2218 goto fail;
2219 }
2220
2221 u->sink->parent.process_msg = sink_process_msg;
2222 if (u->use_tsched)
2223 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2224 u->sink->set_state = sink_set_state_cb;
2225 u->sink->set_port = sink_set_port_cb;
2226 u->sink->userdata = u;
2227
2228 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2229 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2230
2231 u->frame_size = frame_size;
2232 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2233 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2234 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2235
2236 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2237 (double) u->hwbuf_size / (double) u->fragment_size,
2238 (long unsigned) u->fragment_size,
2239 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2240 (long unsigned) u->hwbuf_size,
2241 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2242
2243 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2244 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2245 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2246 else {
2247 pa_log_info("Disabling rewind for device %s", u->device_name);
2248 pa_sink_set_max_rewind(u->sink, 0);
2249 }
2250
2251 if (u->use_tsched) {
2252 u->tsched_watermark_ref = tsched_watermark;
2253 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2254 } else
2255 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2256
2257 reserve_update(u);
2258
2259 if (update_sw_params(u) < 0)
2260 goto fail;
2261
2262 if (setup_mixer(u, ignore_dB) < 0)
2263 goto fail;
2264
2265 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2266
2267 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2268 pa_log("Failed to create thread.");
2269 goto fail;
2270 }
2271
2272 /* Get initial mixer settings */
2273 if (data.volume_is_set) {
2274 if (u->sink->set_volume)
2275 u->sink->set_volume(u->sink);
2276 } else {
2277 if (u->sink->get_volume)
2278 u->sink->get_volume(u->sink);
2279 }
2280
2281 if (data.muted_is_set) {
2282 if (u->sink->set_mute)
2283 u->sink->set_mute(u->sink);
2284 } else {
2285 if (u->sink->get_mute)
2286 u->sink->get_mute(u->sink);
2287 }
2288
2289 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2290 u->sink->write_volume(u->sink);
2291
2292 if (set_formats) {
2293 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2294 pa_format_info *format;
2295
2296 /* To start with, we only support PCM formats. Other formats may be added
2297 * with pa_sink_set_formats().*/
2298 format = pa_format_info_new();
2299 format->encoding = PA_ENCODING_PCM;
2300 u->formats = pa_idxset_new(NULL, NULL);
2301 pa_idxset_put(u->formats, format, NULL);
2302
2303 u->sink->get_formats = sink_get_formats;
2304 u->sink->set_formats = sink_set_formats;
2305 }
2306
2307 pa_sink_put(u->sink);
2308
2309 if (profile_set)
2310 pa_alsa_profile_set_free(profile_set);
2311
2312 return u->sink;
2313
2314 fail:
2315
2316 if (u)
2317 userdata_free(u);
2318
2319 if (profile_set)
2320 pa_alsa_profile_set_free(profile_set);
2321
2322 return NULL;
2323 }
2324
2325 static void userdata_free(struct userdata *u) {
2326 pa_assert(u);
2327
2328 if (u->sink)
2329 pa_sink_unlink(u->sink);
2330
2331 if (u->thread) {
2332 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2333 pa_thread_free(u->thread);
2334 }
2335
2336 pa_thread_mq_done(&u->thread_mq);
2337
2338 if (u->sink)
2339 pa_sink_unref(u->sink);
2340
2341 if (u->memchunk.memblock)
2342 pa_memblock_unref(u->memchunk.memblock);
2343
2344 if (u->mixer_pd)
2345 pa_alsa_mixer_pdata_free(u->mixer_pd);
2346
2347 if (u->alsa_rtpoll_item)
2348 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2349
2350 if (u->rtpoll)
2351 pa_rtpoll_free(u->rtpoll);
2352
2353 if (u->pcm_handle) {
2354 snd_pcm_drop(u->pcm_handle);
2355 snd_pcm_close(u->pcm_handle);
2356 }
2357
2358 if (u->mixer_fdl)
2359 pa_alsa_fdlist_free(u->mixer_fdl);
2360
2361 if (u->mixer_path_set)
2362 pa_alsa_path_set_free(u->mixer_path_set);
2363 else if (u->mixer_path)
2364 pa_alsa_path_free(u->mixer_path);
2365
2366 if (u->mixer_handle)
2367 snd_mixer_close(u->mixer_handle);
2368
2369 if (u->smoother)
2370 pa_smoother_free(u->smoother);
2371
2372 if (u->formats)
2373 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2374
2375 reserve_done(u);
2376 monitor_done(u);
2377
2378 pa_xfree(u->device_name);
2379 pa_xfree(u->control_device);
2380 pa_xfree(u);
2381 }
2382
2383 void pa_alsa_sink_free(pa_sink *s) {
2384 struct userdata *u;
2385
2386 pa_sink_assert_ref(s);
2387 pa_assert_se(u = s->userdata);
2388
2389 userdata_free(u);
2390 }