]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: support fixed latency range in alsa modules
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 char *paths_dir;
104 pa_alsa_fdlist *mixer_fdl;
105 pa_alsa_mixer_pdata *mixer_pd;
106 snd_mixer_t *mixer_handle;
107 pa_alsa_path_set *mixer_path_set;
108 pa_alsa_path *mixer_path;
109
110 pa_cvolume hardware_volume;
111
112 unsigned int *rates;
113
114 size_t
115 frame_size,
116 fragment_size,
117 hwbuf_size,
118 tsched_watermark,
119 tsched_watermark_ref,
120 hwbuf_unused,
121 min_sleep,
122 min_wakeup,
123 watermark_inc_step,
124 watermark_dec_step,
125 watermark_inc_threshold,
126 watermark_dec_threshold,
127 rewind_safeguard;
128
129 pa_usec_t watermark_dec_not_before;
130 pa_usec_t min_latency_ref;
131
132 pa_memchunk memchunk;
133
134 char *device_name; /* name of the PCM device */
135 char *control_device; /* name of the control device */
136
137 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
138
139 pa_bool_t first, after_rewind;
140
141 pa_rtpoll_item *alsa_rtpoll_item;
142
143 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
144
145 pa_smoother *smoother;
146 uint64_t write_count;
147 uint64_t since_start;
148 pa_usec_t smoother_interval;
149 pa_usec_t last_smoother_update;
150
151 pa_idxset *formats;
152
153 pa_reserve_wrapper *reserve;
154 pa_hook_slot *reserve_slot;
155 pa_reserve_monitor_wrapper *monitor;
156 pa_hook_slot *monitor_slot;
157 };
158
159 static void userdata_free(struct userdata *u);
160
161 /* FIXME: Is there a better way to do this than device names? */
162 static pa_bool_t is_iec958(struct userdata *u) {
163 return (strncmp("iec958", u->device_name, 6) == 0);
164 }
165
166 static pa_bool_t is_hdmi(struct userdata *u) {
167 return (strncmp("hdmi", u->device_name, 4) == 0);
168 }
169
170 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
171 pa_assert(r);
172 pa_assert(u);
173
174 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
175 return PA_HOOK_CANCEL;
176
177 return PA_HOOK_OK;
178 }
179
180 static void reserve_done(struct userdata *u) {
181 pa_assert(u);
182
183 if (u->reserve_slot) {
184 pa_hook_slot_free(u->reserve_slot);
185 u->reserve_slot = NULL;
186 }
187
188 if (u->reserve) {
189 pa_reserve_wrapper_unref(u->reserve);
190 u->reserve = NULL;
191 }
192 }
193
194 static void reserve_update(struct userdata *u) {
195 const char *description;
196 pa_assert(u);
197
198 if (!u->sink || !u->reserve)
199 return;
200
201 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
202 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
203 }
204
205 static int reserve_init(struct userdata *u, const char *dname) {
206 char *rname;
207
208 pa_assert(u);
209 pa_assert(dname);
210
211 if (u->reserve)
212 return 0;
213
214 if (pa_in_system_mode())
215 return 0;
216
217 if (!(rname = pa_alsa_get_reserve_name(dname)))
218 return 0;
219
220 /* We are resuming, try to lock the device */
221 u->reserve = pa_reserve_wrapper_get(u->core, rname);
222 pa_xfree(rname);
223
224 if (!(u->reserve))
225 return -1;
226
227 reserve_update(u);
228
229 pa_assert(!u->reserve_slot);
230 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
231
232 return 0;
233 }
234
235 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
236 pa_bool_t b;
237
238 pa_assert(w);
239 pa_assert(u);
240
241 b = PA_PTR_TO_UINT(busy) && !u->reserve;
242
243 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
244 return PA_HOOK_OK;
245 }
246
247 static void monitor_done(struct userdata *u) {
248 pa_assert(u);
249
250 if (u->monitor_slot) {
251 pa_hook_slot_free(u->monitor_slot);
252 u->monitor_slot = NULL;
253 }
254
255 if (u->monitor) {
256 pa_reserve_monitor_wrapper_unref(u->monitor);
257 u->monitor = NULL;
258 }
259 }
260
261 static int reserve_monitor_init(struct userdata *u, const char *dname) {
262 char *rname;
263
264 pa_assert(u);
265 pa_assert(dname);
266
267 if (pa_in_system_mode())
268 return 0;
269
270 if (!(rname = pa_alsa_get_reserve_name(dname)))
271 return 0;
272
273 /* We are resuming, try to lock the device */
274 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
275 pa_xfree(rname);
276
277 if (!(u->monitor))
278 return -1;
279
280 pa_assert(!u->monitor_slot);
281 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
282
283 return 0;
284 }
285
286 static void fix_min_sleep_wakeup(struct userdata *u) {
287 size_t max_use, max_use_2;
288
289 pa_assert(u);
290 pa_assert(u->use_tsched);
291
292 max_use = u->hwbuf_size - u->hwbuf_unused;
293 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
294
295 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
296 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
297
298 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
299 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
300 }
301
302 static void fix_tsched_watermark(struct userdata *u) {
303 size_t max_use;
304 pa_assert(u);
305 pa_assert(u->use_tsched);
306
307 max_use = u->hwbuf_size - u->hwbuf_unused;
308
309 if (u->tsched_watermark > max_use - u->min_sleep)
310 u->tsched_watermark = max_use - u->min_sleep;
311
312 if (u->tsched_watermark < u->min_wakeup)
313 u->tsched_watermark = u->min_wakeup;
314 }
315
316 static void increase_watermark(struct userdata *u) {
317 size_t old_watermark;
318 pa_usec_t old_min_latency, new_min_latency;
319
320 pa_assert(u);
321 pa_assert(u->use_tsched);
322
323 /* First, just try to increase the watermark */
324 old_watermark = u->tsched_watermark;
325 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
326 fix_tsched_watermark(u);
327
328 if (old_watermark != u->tsched_watermark) {
329 pa_log_info("Increasing wakeup watermark to %0.2f ms",
330 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
331 return;
332 }
333
334 /* Hmm, we cannot increase the watermark any further, hence let's
335 raise the latency, unless doing so was disabled in
336 configuration */
337 if (u->fixed_latency_range)
338 return;
339
340 old_min_latency = u->sink->thread_info.min_latency;
341 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
342 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
343
344 if (old_min_latency != new_min_latency) {
345 pa_log_info("Increasing minimal latency to %0.2f ms",
346 (double) new_min_latency / PA_USEC_PER_MSEC);
347
348 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
349 }
350
351 /* When we reach this we're officialy fucked! */
352 }
353
354 static void decrease_watermark(struct userdata *u) {
355 size_t old_watermark;
356 pa_usec_t now;
357
358 pa_assert(u);
359 pa_assert(u->use_tsched);
360
361 now = pa_rtclock_now();
362
363 if (u->watermark_dec_not_before <= 0)
364 goto restart;
365
366 if (u->watermark_dec_not_before > now)
367 return;
368
369 old_watermark = u->tsched_watermark;
370
371 if (u->tsched_watermark < u->watermark_dec_step)
372 u->tsched_watermark = u->tsched_watermark / 2;
373 else
374 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
375
376 fix_tsched_watermark(u);
377
378 if (old_watermark != u->tsched_watermark)
379 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
380 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
381
382 /* We don't change the latency range*/
383
384 restart:
385 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
386 }
387
388 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
389 pa_usec_t usec, wm;
390
391 pa_assert(sleep_usec);
392 pa_assert(process_usec);
393
394 pa_assert(u);
395 pa_assert(u->use_tsched);
396
397 usec = pa_sink_get_requested_latency_within_thread(u->sink);
398
399 if (usec == (pa_usec_t) -1)
400 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
401
402 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
403
404 if (wm > usec)
405 wm = usec/2;
406
407 *sleep_usec = usec - wm;
408 *process_usec = wm;
409
410 #ifdef DEBUG_TIMING
411 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
412 (unsigned long) (usec / PA_USEC_PER_MSEC),
413 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
414 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
415 #endif
416 }
417
418 static int try_recover(struct userdata *u, const char *call, int err) {
419 pa_assert(u);
420 pa_assert(call);
421 pa_assert(err < 0);
422
423 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
424
425 pa_assert(err != -EAGAIN);
426
427 if (err == -EPIPE)
428 pa_log_debug("%s: Buffer underrun!", call);
429
430 if (err == -ESTRPIPE)
431 pa_log_debug("%s: System suspended!", call);
432
433 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
434 pa_log("%s: %s", call, pa_alsa_strerror(err));
435 return -1;
436 }
437
438 u->first = TRUE;
439 u->since_start = 0;
440 return 0;
441 }
442
443 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
444 size_t left_to_play;
445 pa_bool_t underrun = FALSE;
446
447 /* We use <= instead of < for this check here because an underrun
448 * only happens after the last sample was processed, not already when
449 * it is removed from the buffer. This is particularly important
450 * when block transfer is used. */
451
452 if (n_bytes <= u->hwbuf_size)
453 left_to_play = u->hwbuf_size - n_bytes;
454 else {
455
456 /* We got a dropout. What a mess! */
457 left_to_play = 0;
458 underrun = TRUE;
459
460 #ifdef DEBUG_TIMING
461 PA_DEBUG_TRAP;
462 #endif
463
464 if (!u->first && !u->after_rewind)
465 if (pa_log_ratelimit(PA_LOG_INFO))
466 pa_log_info("Underrun!");
467 }
468
469 #ifdef DEBUG_TIMING
470 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
471 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
472 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
473 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
474 #endif
475
476 if (u->use_tsched) {
477 pa_bool_t reset_not_before = TRUE;
478
479 if (!u->first && !u->after_rewind) {
480 if (underrun || left_to_play < u->watermark_inc_threshold)
481 increase_watermark(u);
482 else if (left_to_play > u->watermark_dec_threshold) {
483 reset_not_before = FALSE;
484
485 /* We decrease the watermark only if have actually
486 * been woken up by a timeout. If something else woke
487 * us up it's too easy to fulfill the deadlines... */
488
489 if (on_timeout)
490 decrease_watermark(u);
491 }
492 }
493
494 if (reset_not_before)
495 u->watermark_dec_not_before = 0;
496 }
497
498 return left_to_play;
499 }
500
501 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
502 pa_bool_t work_done = FALSE;
503 pa_usec_t max_sleep_usec = 0, process_usec = 0;
504 size_t left_to_play;
505 unsigned j = 0;
506
507 pa_assert(u);
508 pa_sink_assert_ref(u->sink);
509
510 if (u->use_tsched)
511 hw_sleep_time(u, &max_sleep_usec, &process_usec);
512
513 for (;;) {
514 snd_pcm_sframes_t n;
515 size_t n_bytes;
516 int r;
517 pa_bool_t after_avail = TRUE;
518
519 /* First we determine how many samples are missing to fill the
520 * buffer up to 100% */
521
522 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
523
524 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
525 continue;
526
527 return r;
528 }
529
530 n_bytes = (size_t) n * u->frame_size;
531
532 #ifdef DEBUG_TIMING
533 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
534 #endif
535
536 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
537 on_timeout = FALSE;
538
539 if (u->use_tsched)
540
541 /* We won't fill up the playback buffer before at least
542 * half the sleep time is over because otherwise we might
543 * ask for more data from the clients then they expect. We
544 * need to guarantee that clients only have to keep around
545 * a single hw buffer length. */
546
547 if (!polled &&
548 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
549 #ifdef DEBUG_TIMING
550 pa_log_debug("Not filling up, because too early.");
551 #endif
552 break;
553 }
554
555 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
556
557 if (polled)
558 PA_ONCE_BEGIN {
559 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
560 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
561 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
562 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
563 pa_strnull(dn));
564 pa_xfree(dn);
565 } PA_ONCE_END;
566
567 #ifdef DEBUG_TIMING
568 pa_log_debug("Not filling up, because not necessary.");
569 #endif
570 break;
571 }
572
573
574 if (++j > 10) {
575 #ifdef DEBUG_TIMING
576 pa_log_debug("Not filling up, because already too many iterations.");
577 #endif
578
579 break;
580 }
581
582 n_bytes -= u->hwbuf_unused;
583 polled = FALSE;
584
585 #ifdef DEBUG_TIMING
586 pa_log_debug("Filling up");
587 #endif
588
589 for (;;) {
590 pa_memchunk chunk;
591 void *p;
592 int err;
593 const snd_pcm_channel_area_t *areas;
594 snd_pcm_uframes_t offset, frames;
595 snd_pcm_sframes_t sframes;
596
597 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
598 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
599
600 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
601
602 if (!after_avail && err == -EAGAIN)
603 break;
604
605 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
606 continue;
607
608 return r;
609 }
610
611 /* Make sure that if these memblocks need to be copied they will fit into one slot */
612 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
613 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
614
615 if (!after_avail && frames == 0)
616 break;
617
618 pa_assert(frames > 0);
619 after_avail = FALSE;
620
621 /* Check these are multiples of 8 bit */
622 pa_assert((areas[0].first & 7) == 0);
623 pa_assert((areas[0].step & 7)== 0);
624
625 /* We assume a single interleaved memory buffer */
626 pa_assert((areas[0].first >> 3) == 0);
627 pa_assert((areas[0].step >> 3) == u->frame_size);
628
629 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
630
631 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
632 chunk.length = pa_memblock_get_length(chunk.memblock);
633 chunk.index = 0;
634
635 pa_sink_render_into_full(u->sink, &chunk);
636 pa_memblock_unref_fixed(chunk.memblock);
637
638 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
639
640 if (!after_avail && (int) sframes == -EAGAIN)
641 break;
642
643 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
644 continue;
645
646 return r;
647 }
648
649 work_done = TRUE;
650
651 u->write_count += frames * u->frame_size;
652 u->since_start += frames * u->frame_size;
653
654 #ifdef DEBUG_TIMING
655 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
656 #endif
657
658 if ((size_t) frames * u->frame_size >= n_bytes)
659 break;
660
661 n_bytes -= (size_t) frames * u->frame_size;
662 }
663 }
664
665 if (u->use_tsched) {
666 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
667 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
668
669 if (*sleep_usec > process_usec)
670 *sleep_usec -= process_usec;
671 else
672 *sleep_usec = 0;
673 } else
674 *sleep_usec = 0;
675
676 return work_done ? 1 : 0;
677 }
678
679 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
680 pa_bool_t work_done = FALSE;
681 pa_usec_t max_sleep_usec = 0, process_usec = 0;
682 size_t left_to_play;
683 unsigned j = 0;
684
685 pa_assert(u);
686 pa_sink_assert_ref(u->sink);
687
688 if (u->use_tsched)
689 hw_sleep_time(u, &max_sleep_usec, &process_usec);
690
691 for (;;) {
692 snd_pcm_sframes_t n;
693 size_t n_bytes;
694 int r;
695 pa_bool_t after_avail = TRUE;
696
697 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
698
699 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
700 continue;
701
702 return r;
703 }
704
705 n_bytes = (size_t) n * u->frame_size;
706 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
707 on_timeout = FALSE;
708
709 if (u->use_tsched)
710
711 /* We won't fill up the playback buffer before at least
712 * half the sleep time is over because otherwise we might
713 * ask for more data from the clients then they expect. We
714 * need to guarantee that clients only have to keep around
715 * a single hw buffer length. */
716
717 if (!polled &&
718 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
719 break;
720
721 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
722
723 if (polled)
724 PA_ONCE_BEGIN {
725 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
726 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
727 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
728 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
729 pa_strnull(dn));
730 pa_xfree(dn);
731 } PA_ONCE_END;
732
733 break;
734 }
735
736 if (++j > 10) {
737 #ifdef DEBUG_TIMING
738 pa_log_debug("Not filling up, because already too many iterations.");
739 #endif
740
741 break;
742 }
743
744 n_bytes -= u->hwbuf_unused;
745 polled = FALSE;
746
747 for (;;) {
748 snd_pcm_sframes_t frames;
749 void *p;
750
751 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
752
753 if (u->memchunk.length <= 0)
754 pa_sink_render(u->sink, n_bytes, &u->memchunk);
755
756 pa_assert(u->memchunk.length > 0);
757
758 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
759
760 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
761 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
762
763 p = pa_memblock_acquire(u->memchunk.memblock);
764 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
765 pa_memblock_release(u->memchunk.memblock);
766
767 if (PA_UNLIKELY(frames < 0)) {
768
769 if (!after_avail && (int) frames == -EAGAIN)
770 break;
771
772 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
773 continue;
774
775 return r;
776 }
777
778 if (!after_avail && frames == 0)
779 break;
780
781 pa_assert(frames > 0);
782 after_avail = FALSE;
783
784 u->memchunk.index += (size_t) frames * u->frame_size;
785 u->memchunk.length -= (size_t) frames * u->frame_size;
786
787 if (u->memchunk.length <= 0) {
788 pa_memblock_unref(u->memchunk.memblock);
789 pa_memchunk_reset(&u->memchunk);
790 }
791
792 work_done = TRUE;
793
794 u->write_count += frames * u->frame_size;
795 u->since_start += frames * u->frame_size;
796
797 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
798
799 if ((size_t) frames * u->frame_size >= n_bytes)
800 break;
801
802 n_bytes -= (size_t) frames * u->frame_size;
803 }
804 }
805
806 if (u->use_tsched) {
807 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
808 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
809
810 if (*sleep_usec > process_usec)
811 *sleep_usec -= process_usec;
812 else
813 *sleep_usec = 0;
814 } else
815 *sleep_usec = 0;
816
817 return work_done ? 1 : 0;
818 }
819
820 static void update_smoother(struct userdata *u) {
821 snd_pcm_sframes_t delay = 0;
822 int64_t position;
823 int err;
824 pa_usec_t now1 = 0, now2;
825 snd_pcm_status_t *status;
826
827 snd_pcm_status_alloca(&status);
828
829 pa_assert(u);
830 pa_assert(u->pcm_handle);
831
832 /* Let's update the time smoother */
833
834 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
835 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
836 return;
837 }
838
839 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
840 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
841 else {
842 snd_htimestamp_t htstamp = { 0, 0 };
843 snd_pcm_status_get_htstamp(status, &htstamp);
844 now1 = pa_timespec_load(&htstamp);
845 }
846
847 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
848 if (now1 <= 0)
849 now1 = pa_rtclock_now();
850
851 /* check if the time since the last update is bigger than the interval */
852 if (u->last_smoother_update > 0)
853 if (u->last_smoother_update + u->smoother_interval > now1)
854 return;
855
856 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
857
858 if (PA_UNLIKELY(position < 0))
859 position = 0;
860
861 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
862
863 pa_smoother_put(u->smoother, now1, now2);
864
865 u->last_smoother_update = now1;
866 /* exponentially increase the update interval up to the MAX limit */
867 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
868 }
869
870 static pa_usec_t sink_get_latency(struct userdata *u) {
871 pa_usec_t r;
872 int64_t delay;
873 pa_usec_t now1, now2;
874
875 pa_assert(u);
876
877 now1 = pa_rtclock_now();
878 now2 = pa_smoother_get(u->smoother, now1);
879
880 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
881
882 r = delay >= 0 ? (pa_usec_t) delay : 0;
883
884 if (u->memchunk.memblock)
885 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
886
887 return r;
888 }
889
890 static int build_pollfd(struct userdata *u) {
891 pa_assert(u);
892 pa_assert(u->pcm_handle);
893
894 if (u->alsa_rtpoll_item)
895 pa_rtpoll_item_free(u->alsa_rtpoll_item);
896
897 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
898 return -1;
899
900 return 0;
901 }
902
903 /* Called from IO context */
904 static int suspend(struct userdata *u) {
905 pa_assert(u);
906 pa_assert(u->pcm_handle);
907
908 pa_smoother_pause(u->smoother, pa_rtclock_now());
909
910 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
911 * take awfully long with our long buffer sizes today. */
912 snd_pcm_close(u->pcm_handle);
913 u->pcm_handle = NULL;
914
915 if (u->alsa_rtpoll_item) {
916 pa_rtpoll_item_free(u->alsa_rtpoll_item);
917 u->alsa_rtpoll_item = NULL;
918 }
919
920 /* We reset max_rewind/max_request here to make sure that while we
921 * are suspended the old max_request/max_rewind values set before
922 * the suspend can influence the per-stream buffer of newly
923 * created streams, without their requirements having any
924 * influence on them. */
925 pa_sink_set_max_rewind_within_thread(u->sink, 0);
926 pa_sink_set_max_request_within_thread(u->sink, 0);
927
928 pa_log_info("Device suspended...");
929
930 return 0;
931 }
932
933 /* Called from IO context */
934 static int update_sw_params(struct userdata *u) {
935 snd_pcm_uframes_t avail_min;
936 int err;
937
938 pa_assert(u);
939
940 /* Use the full buffer if no one asked us for anything specific */
941 u->hwbuf_unused = 0;
942
943 if (u->use_tsched) {
944 pa_usec_t latency;
945
946 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
947 size_t b;
948
949 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
950
951 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
952
953 /* We need at least one sample in our buffer */
954
955 if (PA_UNLIKELY(b < u->frame_size))
956 b = u->frame_size;
957
958 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
959 }
960
961 fix_min_sleep_wakeup(u);
962 fix_tsched_watermark(u);
963 }
964
965 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
966
967 /* We need at last one frame in the used part of the buffer */
968 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
969
970 if (u->use_tsched) {
971 pa_usec_t sleep_usec, process_usec;
972
973 hw_sleep_time(u, &sleep_usec, &process_usec);
974 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
975 }
976
977 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
978
979 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
980 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
981 return err;
982 }
983
984 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
985 if (pa_alsa_pcm_is_hw(u->pcm_handle))
986 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
987 else {
988 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
989 pa_sink_set_max_rewind_within_thread(u->sink, 0);
990 }
991
992 return 0;
993 }
994
995 /* Called from IO Context on unsuspend or from main thread when creating sink */
996 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
997 pa_bool_t in_thread)
998 {
999 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
1000 &u->sink->sample_spec);
1001
1002 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1003 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1004
1005 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1006 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1007
1008 fix_min_sleep_wakeup(u);
1009 fix_tsched_watermark(u);
1010
1011 if (in_thread)
1012 pa_sink_set_latency_range_within_thread(u->sink,
1013 u->min_latency_ref,
1014 pa_bytes_to_usec(u->hwbuf_size, ss));
1015 else {
1016 pa_sink_set_latency_range(u->sink,
1017 0,
1018 pa_bytes_to_usec(u->hwbuf_size, ss));
1019
1020 /* work-around assert in pa_sink_set_latency_within_thead,
1021 keep track of min_latency and reuse it when
1022 this routine is called from IO context */
1023 u->min_latency_ref = u->sink->thread_info.min_latency;
1024 }
1025
1026 pa_log_info("Time scheduling watermark is %0.2fms",
1027 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1028 }
1029
1030 /* Called from IO context */
1031 static int unsuspend(struct userdata *u) {
1032 pa_sample_spec ss;
1033 int err;
1034 pa_bool_t b, d;
1035 snd_pcm_uframes_t period_size, buffer_size;
1036 char *device_name = NULL;
1037
1038 pa_assert(u);
1039 pa_assert(!u->pcm_handle);
1040
1041 pa_log_info("Trying resume...");
1042
1043 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1044 /* Need to open device in NONAUDIO mode */
1045 int len = strlen(u->device_name) + 8;
1046
1047 device_name = pa_xmalloc(len);
1048 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1049 }
1050
1051 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1052 SND_PCM_NONBLOCK|
1053 SND_PCM_NO_AUTO_RESAMPLE|
1054 SND_PCM_NO_AUTO_CHANNELS|
1055 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1056 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1057 goto fail;
1058 }
1059
1060 ss = u->sink->sample_spec;
1061 period_size = u->fragment_size / u->frame_size;
1062 buffer_size = u->hwbuf_size / u->frame_size;
1063 b = u->use_mmap;
1064 d = u->use_tsched;
1065
1066 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1067 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1068 goto fail;
1069 }
1070
1071 if (b != u->use_mmap || d != u->use_tsched) {
1072 pa_log_warn("Resume failed, couldn't get original access mode.");
1073 goto fail;
1074 }
1075
1076 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1077 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1078 goto fail;
1079 }
1080
1081 if (period_size*u->frame_size != u->fragment_size ||
1082 buffer_size*u->frame_size != u->hwbuf_size) {
1083 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1084 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1085 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1086 goto fail;
1087 }
1088
1089 if (update_sw_params(u) < 0)
1090 goto fail;
1091
1092 if (build_pollfd(u) < 0)
1093 goto fail;
1094
1095 u->write_count = 0;
1096 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1097 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1098 u->last_smoother_update = 0;
1099
1100 u->first = TRUE;
1101 u->since_start = 0;
1102
1103 /* reset the watermark to the value defined when sink was created */
1104 if (u->use_tsched)
1105 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1106
1107 pa_log_info("Resumed successfully...");
1108
1109 pa_xfree(device_name);
1110 return 0;
1111
1112 fail:
1113 if (u->pcm_handle) {
1114 snd_pcm_close(u->pcm_handle);
1115 u->pcm_handle = NULL;
1116 }
1117
1118 pa_xfree(device_name);
1119
1120 return -PA_ERR_IO;
1121 }
1122
1123 /* Called from IO context */
1124 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1125 struct userdata *u = PA_SINK(o)->userdata;
1126
1127 switch (code) {
1128
1129 case PA_SINK_MESSAGE_GET_LATENCY: {
1130 pa_usec_t r = 0;
1131
1132 if (u->pcm_handle)
1133 r = sink_get_latency(u);
1134
1135 *((pa_usec_t*) data) = r;
1136
1137 return 0;
1138 }
1139
1140 case PA_SINK_MESSAGE_SET_STATE:
1141
1142 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1143
1144 case PA_SINK_SUSPENDED: {
1145 int r;
1146
1147 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1148
1149 if ((r = suspend(u)) < 0)
1150 return r;
1151
1152 break;
1153 }
1154
1155 case PA_SINK_IDLE:
1156 case PA_SINK_RUNNING: {
1157 int r;
1158
1159 if (u->sink->thread_info.state == PA_SINK_INIT) {
1160 if (build_pollfd(u) < 0)
1161 return -PA_ERR_IO;
1162 }
1163
1164 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1165 if ((r = unsuspend(u)) < 0)
1166 return r;
1167 }
1168
1169 break;
1170 }
1171
1172 case PA_SINK_UNLINKED:
1173 case PA_SINK_INIT:
1174 case PA_SINK_INVALID_STATE:
1175 ;
1176 }
1177
1178 break;
1179 }
1180
1181 return pa_sink_process_msg(o, code, data, offset, chunk);
1182 }
1183
1184 /* Called from main context */
1185 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1186 pa_sink_state_t old_state;
1187 struct userdata *u;
1188
1189 pa_sink_assert_ref(s);
1190 pa_assert_se(u = s->userdata);
1191
1192 old_state = pa_sink_get_state(u->sink);
1193
1194 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1195 reserve_done(u);
1196 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1197 if (reserve_init(u, u->device_name) < 0)
1198 return -PA_ERR_BUSY;
1199
1200 return 0;
1201 }
1202
1203 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1204 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1205
1206 pa_assert(u);
1207 pa_assert(u->mixer_handle);
1208
1209 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1210 return 0;
1211
1212 if (!PA_SINK_IS_LINKED(u->sink->state))
1213 return 0;
1214
1215 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1216 return 0;
1217
1218 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1219 pa_sink_get_volume(u->sink, TRUE);
1220 pa_sink_get_mute(u->sink, TRUE);
1221 }
1222
1223 return 0;
1224 }
1225
1226 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1227 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1228
1229 pa_assert(u);
1230 pa_assert(u->mixer_handle);
1231
1232 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1233 return 0;
1234
1235 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1236 return 0;
1237
1238 if (mask & SND_CTL_EVENT_MASK_VALUE)
1239 pa_sink_update_volume_and_mute(u->sink);
1240
1241 return 0;
1242 }
1243
1244 static void sink_get_volume_cb(pa_sink *s) {
1245 struct userdata *u = s->userdata;
1246 pa_cvolume r;
1247 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1248
1249 pa_assert(u);
1250 pa_assert(u->mixer_path);
1251 pa_assert(u->mixer_handle);
1252
1253 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1254 return;
1255
1256 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1257 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1258
1259 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1260
1261 if (u->mixer_path->has_dB) {
1262 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1263
1264 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1265 }
1266
1267 if (pa_cvolume_equal(&u->hardware_volume, &r))
1268 return;
1269
1270 s->real_volume = u->hardware_volume = r;
1271
1272 /* Hmm, so the hardware volume changed, let's reset our software volume */
1273 if (u->mixer_path->has_dB)
1274 pa_sink_set_soft_volume(s, NULL);
1275 }
1276
1277 static void sink_set_volume_cb(pa_sink *s) {
1278 struct userdata *u = s->userdata;
1279 pa_cvolume r;
1280 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1281 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1282
1283 pa_assert(u);
1284 pa_assert(u->mixer_path);
1285 pa_assert(u->mixer_handle);
1286
1287 /* Shift up by the base volume */
1288 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1289
1290 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1291 return;
1292
1293 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1294 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1295
1296 u->hardware_volume = r;
1297
1298 if (u->mixer_path->has_dB) {
1299 pa_cvolume new_soft_volume;
1300 pa_bool_t accurate_enough;
1301 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1302
1303 /* Match exactly what the user requested by software */
1304 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1305
1306 /* If the adjustment to do in software is only minimal we
1307 * can skip it. That saves us CPU at the expense of a bit of
1308 * accuracy */
1309 accurate_enough =
1310 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1311 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1312
1313 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1314 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1315 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1316 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1317 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1318 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1319 pa_yes_no(accurate_enough));
1320 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1321
1322 if (!accurate_enough)
1323 s->soft_volume = new_soft_volume;
1324
1325 } else {
1326 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1327
1328 /* We can't match exactly what the user requested, hence let's
1329 * at least tell the user about it */
1330
1331 s->real_volume = r;
1332 }
1333 }
1334
1335 static void sink_write_volume_cb(pa_sink *s) {
1336 struct userdata *u = s->userdata;
1337 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1338
1339 pa_assert(u);
1340 pa_assert(u->mixer_path);
1341 pa_assert(u->mixer_handle);
1342 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1343
1344 /* Shift up by the base volume */
1345 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1346
1347 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1348 pa_log_error("Writing HW volume failed");
1349 else {
1350 pa_cvolume tmp_vol;
1351 pa_bool_t accurate_enough;
1352
1353 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1354 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1355
1356 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1357 accurate_enough =
1358 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1359 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1360
1361 if (!accurate_enough) {
1362 union {
1363 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1364 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1365 } vol;
1366
1367 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1368 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1369 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1370 pa_log_debug(" in dB: %s (request) != %s",
1371 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1372 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1373 }
1374 }
1375 }
1376
1377 static void sink_get_mute_cb(pa_sink *s) {
1378 struct userdata *u = s->userdata;
1379 pa_bool_t b;
1380
1381 pa_assert(u);
1382 pa_assert(u->mixer_path);
1383 pa_assert(u->mixer_handle);
1384
1385 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1386 return;
1387
1388 s->muted = b;
1389 }
1390
1391 static void sink_set_mute_cb(pa_sink *s) {
1392 struct userdata *u = s->userdata;
1393
1394 pa_assert(u);
1395 pa_assert(u->mixer_path);
1396 pa_assert(u->mixer_handle);
1397
1398 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1399 }
1400
1401 static void mixer_volume_init(struct userdata *u) {
1402 pa_assert(u);
1403
1404 if (!u->mixer_path->has_volume) {
1405 pa_sink_set_write_volume_callback(u->sink, NULL);
1406 pa_sink_set_get_volume_callback(u->sink, NULL);
1407 pa_sink_set_set_volume_callback(u->sink, NULL);
1408
1409 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1410 } else {
1411 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1412 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1413
1414 if (u->mixer_path->has_dB && u->deferred_volume) {
1415 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1416 pa_log_info("Successfully enabled synchronous volume.");
1417 } else
1418 pa_sink_set_write_volume_callback(u->sink, NULL);
1419
1420 if (u->mixer_path->has_dB) {
1421 pa_sink_enable_decibel_volume(u->sink, TRUE);
1422 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1423
1424 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1425 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1426
1427 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1428 } else {
1429 pa_sink_enable_decibel_volume(u->sink, FALSE);
1430 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1431
1432 u->sink->base_volume = PA_VOLUME_NORM;
1433 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1434 }
1435
1436 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1437 }
1438
1439 if (!u->mixer_path->has_mute) {
1440 pa_sink_set_get_mute_callback(u->sink, NULL);
1441 pa_sink_set_set_mute_callback(u->sink, NULL);
1442 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1443 } else {
1444 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1445 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1446 pa_log_info("Using hardware mute control.");
1447 }
1448 }
1449
1450 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1451 struct userdata *u = s->userdata;
1452 pa_alsa_port_data *data;
1453
1454 pa_assert(u);
1455 pa_assert(p);
1456 pa_assert(u->mixer_handle);
1457
1458 data = PA_DEVICE_PORT_DATA(p);
1459
1460 pa_assert_se(u->mixer_path = data->path);
1461 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1462
1463 mixer_volume_init(u);
1464
1465 if (data->setting)
1466 pa_alsa_setting_select(data->setting, u->mixer_handle);
1467
1468 if (s->set_mute)
1469 s->set_mute(s);
1470 if (s->set_volume)
1471 s->set_volume(s);
1472
1473 return 0;
1474 }
1475
1476 static void sink_update_requested_latency_cb(pa_sink *s) {
1477 struct userdata *u = s->userdata;
1478 size_t before;
1479 pa_assert(u);
1480 pa_assert(u->use_tsched); /* only when timer scheduling is used
1481 * we can dynamically adjust the
1482 * latency */
1483
1484 if (!u->pcm_handle)
1485 return;
1486
1487 before = u->hwbuf_unused;
1488 update_sw_params(u);
1489
1490 /* Let's check whether we now use only a smaller part of the
1491 buffer then before. If so, we need to make sure that subsequent
1492 rewinds are relative to the new maximum fill level and not to the
1493 current fill level. Thus, let's do a full rewind once, to clear
1494 things up. */
1495
1496 if (u->hwbuf_unused > before) {
1497 pa_log_debug("Requesting rewind due to latency change.");
1498 pa_sink_request_rewind(s, (size_t) -1);
1499 }
1500 }
1501
1502 static pa_idxset* sink_get_formats(pa_sink *s) {
1503 struct userdata *u = s->userdata;
1504 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1505 pa_format_info *f;
1506 uint32_t idx;
1507
1508 pa_assert(u);
1509
1510 PA_IDXSET_FOREACH(f, u->formats, idx) {
1511 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1512 }
1513
1514 return ret;
1515 }
1516
1517 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1518 struct userdata *u = s->userdata;
1519 pa_format_info *f, *g;
1520 uint32_t idx, n;
1521
1522 pa_assert(u);
1523
1524 /* FIXME: also validate sample rates against what the device supports */
1525 PA_IDXSET_FOREACH(f, formats, idx) {
1526 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1527 /* EAC3 cannot be sent over over S/PDIF */
1528 return FALSE;
1529 }
1530
1531 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1532 u->formats = pa_idxset_new(NULL, NULL);
1533
1534 /* Note: the logic below won't apply if we're using software encoding.
1535 * This is fine for now since we don't support that via the passthrough
1536 * framework, but this must be changed if we do. */
1537
1538 /* Count how many sample rates we support */
1539 for (idx = 0, n = 0; u->rates[idx]; idx++)
1540 n++;
1541
1542 /* First insert non-PCM formats since we prefer those. */
1543 PA_IDXSET_FOREACH(f, formats, idx) {
1544 if (!pa_format_info_is_pcm(f)) {
1545 g = pa_format_info_copy(f);
1546 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1547 pa_idxset_put(u->formats, g, NULL);
1548 }
1549 }
1550
1551 /* Now add any PCM formats */
1552 PA_IDXSET_FOREACH(f, formats, idx) {
1553 if (pa_format_info_is_pcm(f)) {
1554 /* We don't set rates here since we'll just tack on a resampler for
1555 * unsupported rates */
1556 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1557 }
1558 }
1559
1560 return TRUE;
1561 }
1562
1563 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
1564 {
1565 struct userdata *u = s->userdata;
1566 int i;
1567 pa_bool_t supported = FALSE;
1568
1569 pa_assert(u);
1570
1571 for (i = 0; u->rates[i]; i++) {
1572 if (u->rates[i] == rate) {
1573 supported = TRUE;
1574 break;
1575 }
1576 }
1577
1578 if (!supported) {
1579 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1580 return FALSE;
1581 }
1582
1583 if (!PA_SINK_IS_OPENED(s->state)) {
1584 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1585 u->sink->sample_spec.rate = rate;
1586 return TRUE;
1587 }
1588
1589 return FALSE;
1590 }
1591
1592 static int process_rewind(struct userdata *u) {
1593 snd_pcm_sframes_t unused;
1594 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1595 pa_assert(u);
1596
1597 /* Figure out how much we shall rewind and reset the counter */
1598 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1599
1600 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1601
1602 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1603 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1604 return -1;
1605 }
1606
1607 unused_nbytes = (size_t) unused * u->frame_size;
1608
1609 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1610 unused_nbytes += u->rewind_safeguard;
1611
1612 if (u->hwbuf_size > unused_nbytes)
1613 limit_nbytes = u->hwbuf_size - unused_nbytes;
1614 else
1615 limit_nbytes = 0;
1616
1617 if (rewind_nbytes > limit_nbytes)
1618 rewind_nbytes = limit_nbytes;
1619
1620 if (rewind_nbytes > 0) {
1621 snd_pcm_sframes_t in_frames, out_frames;
1622
1623 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1624
1625 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1626 pa_log_debug("before: %lu", (unsigned long) in_frames);
1627 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1628 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1629 if (try_recover(u, "process_rewind", out_frames) < 0)
1630 return -1;
1631 out_frames = 0;
1632 }
1633
1634 pa_log_debug("after: %lu", (unsigned long) out_frames);
1635
1636 rewind_nbytes = (size_t) out_frames * u->frame_size;
1637
1638 if (rewind_nbytes <= 0)
1639 pa_log_info("Tried rewind, but was apparently not possible.");
1640 else {
1641 u->write_count -= rewind_nbytes;
1642 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1643 pa_sink_process_rewind(u->sink, rewind_nbytes);
1644
1645 u->after_rewind = TRUE;
1646 return 0;
1647 }
1648 } else
1649 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1650
1651 pa_sink_process_rewind(u->sink, 0);
1652 return 0;
1653 }
1654
1655 static void thread_func(void *userdata) {
1656 struct userdata *u = userdata;
1657 unsigned short revents = 0;
1658
1659 pa_assert(u);
1660
1661 pa_log_debug("Thread starting up");
1662
1663 if (u->core->realtime_scheduling)
1664 pa_make_realtime(u->core->realtime_priority);
1665
1666 pa_thread_mq_install(&u->thread_mq);
1667
1668 for (;;) {
1669 int ret;
1670 pa_usec_t rtpoll_sleep = 0;
1671
1672 #ifdef DEBUG_TIMING
1673 pa_log_debug("Loop");
1674 #endif
1675
1676 /* Render some data and write it to the dsp */
1677 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1678 int work_done;
1679 pa_usec_t sleep_usec = 0;
1680 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1681
1682 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1683 if (process_rewind(u) < 0)
1684 goto fail;
1685
1686 if (u->use_mmap)
1687 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1688 else
1689 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1690
1691 if (work_done < 0)
1692 goto fail;
1693
1694 /* pa_log_debug("work_done = %i", work_done); */
1695
1696 if (work_done) {
1697
1698 if (u->first) {
1699 pa_log_info("Starting playback.");
1700 snd_pcm_start(u->pcm_handle);
1701
1702 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1703
1704 u->first = FALSE;
1705 }
1706
1707 update_smoother(u);
1708 }
1709
1710 if (u->use_tsched) {
1711 pa_usec_t cusec;
1712
1713 if (u->since_start <= u->hwbuf_size) {
1714
1715 /* USB devices on ALSA seem to hit a buffer
1716 * underrun during the first iterations much
1717 * quicker then we calculate here, probably due to
1718 * the transport latency. To accommodate for that
1719 * we artificially decrease the sleep time until
1720 * we have filled the buffer at least once
1721 * completely.*/
1722
1723 if (pa_log_ratelimit(PA_LOG_DEBUG))
1724 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1725 sleep_usec /= 2;
1726 }
1727
1728 /* OK, the playback buffer is now full, let's
1729 * calculate when to wake up next */
1730 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1731
1732 /* Convert from the sound card time domain to the
1733 * system time domain */
1734 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1735
1736 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1737
1738 /* We don't trust the conversion, so we wake up whatever comes first */
1739 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1740 }
1741
1742 u->after_rewind = FALSE;
1743
1744 }
1745
1746 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1747 pa_usec_t volume_sleep;
1748 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1749 if (volume_sleep > 0) {
1750 if (rtpoll_sleep > 0)
1751 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1752 else
1753 rtpoll_sleep = volume_sleep;
1754 }
1755 }
1756
1757 if (rtpoll_sleep > 0)
1758 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1759 else
1760 pa_rtpoll_set_timer_disabled(u->rtpoll);
1761
1762 /* Hmm, nothing to do. Let's sleep */
1763 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1764 goto fail;
1765
1766 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1767 pa_sink_volume_change_apply(u->sink, NULL);
1768
1769 if (ret == 0)
1770 goto finish;
1771
1772 /* Tell ALSA about this and process its response */
1773 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1774 struct pollfd *pollfd;
1775 int err;
1776 unsigned n;
1777
1778 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1779
1780 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1781 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1782 goto fail;
1783 }
1784
1785 if (revents & ~POLLOUT) {
1786 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1787 goto fail;
1788
1789 u->first = TRUE;
1790 u->since_start = 0;
1791 revents = 0;
1792 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1793 pa_log_debug("Wakeup from ALSA!");
1794
1795 } else
1796 revents = 0;
1797 }
1798
1799 fail:
1800 /* If this was no regular exit from the loop we have to continue
1801 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1802 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1803 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1804
1805 finish:
1806 pa_log_debug("Thread shutting down");
1807 }
1808
1809 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1810 const char *n;
1811 char *t;
1812
1813 pa_assert(data);
1814 pa_assert(ma);
1815 pa_assert(device_name);
1816
1817 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1818 pa_sink_new_data_set_name(data, n);
1819 data->namereg_fail = TRUE;
1820 return;
1821 }
1822
1823 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1824 data->namereg_fail = TRUE;
1825 else {
1826 n = device_id ? device_id : device_name;
1827 data->namereg_fail = FALSE;
1828 }
1829
1830 if (mapping)
1831 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1832 else
1833 t = pa_sprintf_malloc("alsa_output.%s", n);
1834
1835 pa_sink_new_data_set_name(data, t);
1836 pa_xfree(t);
1837 }
1838
1839 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1840
1841 if (!mapping && !element)
1842 return;
1843
1844 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1845 pa_log_info("Failed to find a working mixer device.");
1846 return;
1847 }
1848
1849 if (element) {
1850
1851 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1852 goto fail;
1853
1854 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1855 goto fail;
1856
1857 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1858 pa_alsa_path_dump(u->mixer_path);
1859 } else {
1860
1861 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT, u->paths_dir)))
1862 goto fail;
1863
1864 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1865 }
1866
1867 return;
1868
1869 fail:
1870
1871 if (u->mixer_path_set) {
1872 pa_alsa_path_set_free(u->mixer_path_set);
1873 u->mixer_path_set = NULL;
1874 } else if (u->mixer_path) {
1875 pa_alsa_path_free(u->mixer_path);
1876 u->mixer_path = NULL;
1877 }
1878
1879 if (u->mixer_handle) {
1880 snd_mixer_close(u->mixer_handle);
1881 u->mixer_handle = NULL;
1882 }
1883 }
1884
1885
1886 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1887 pa_bool_t need_mixer_callback = FALSE;
1888
1889 pa_assert(u);
1890
1891 if (!u->mixer_handle)
1892 return 0;
1893
1894 if (u->sink->active_port) {
1895 pa_alsa_port_data *data;
1896
1897 /* We have a list of supported paths, so let's activate the
1898 * one that has been chosen as active */
1899
1900 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1901 u->mixer_path = data->path;
1902
1903 pa_alsa_path_select(data->path, u->mixer_handle);
1904
1905 if (data->setting)
1906 pa_alsa_setting_select(data->setting, u->mixer_handle);
1907
1908 } else {
1909
1910 if (!u->mixer_path && u->mixer_path_set)
1911 u->mixer_path = u->mixer_path_set->paths;
1912
1913 if (u->mixer_path) {
1914 /* Hmm, we have only a single path, then let's activate it */
1915
1916 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1917
1918 if (u->mixer_path->settings)
1919 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1920 } else
1921 return 0;
1922 }
1923
1924 mixer_volume_init(u);
1925
1926 /* Will we need to register callbacks? */
1927 if (u->mixer_path_set && u->mixer_path_set->paths) {
1928 pa_alsa_path *p;
1929
1930 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1931 if (p->has_volume || p->has_mute)
1932 need_mixer_callback = TRUE;
1933 }
1934 }
1935 else if (u->mixer_path)
1936 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1937
1938 if (need_mixer_callback) {
1939 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1940 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1941 u->mixer_pd = pa_alsa_mixer_pdata_new();
1942 mixer_callback = io_mixer_callback;
1943
1944 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1945 pa_log("Failed to initialize file descriptor monitoring");
1946 return -1;
1947 }
1948 } else {
1949 u->mixer_fdl = pa_alsa_fdlist_new();
1950 mixer_callback = ctl_mixer_callback;
1951
1952 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1953 pa_log("Failed to initialize file descriptor monitoring");
1954 return -1;
1955 }
1956 }
1957
1958 if (u->mixer_path_set)
1959 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1960 else
1961 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1962 }
1963
1964 return 0;
1965 }
1966
1967 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1968
1969 struct userdata *u = NULL;
1970 const char *dev_id = NULL;
1971 pa_sample_spec ss;
1972 uint32_t alternate_sample_rate;
1973 pa_channel_map map;
1974 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1975 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1976 size_t frame_size;
1977 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE, fixed_latency_range = FALSE;
1978 pa_sink_new_data data;
1979 pa_alsa_profile_set *profile_set = NULL;
1980
1981 pa_assert(m);
1982 pa_assert(ma);
1983
1984 ss = m->core->default_sample_spec;
1985 map = m->core->default_channel_map;
1986 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1987 pa_log("Failed to parse sample specification and channel map");
1988 goto fail;
1989 }
1990
1991 alternate_sample_rate = m->core->alternate_sample_rate;
1992 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1993 pa_log("Failed to parse alternate sample rate");
1994 goto fail;
1995 }
1996
1997 frame_size = pa_frame_size(&ss);
1998
1999 nfrags = m->core->default_n_fragments;
2000 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2001 if (frag_size <= 0)
2002 frag_size = (uint32_t) frame_size;
2003 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2004 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2005
2006 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2007 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2008 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2009 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2010 pa_log("Failed to parse buffer metrics");
2011 goto fail;
2012 }
2013
2014 buffer_size = nfrags * frag_size;
2015
2016 period_frames = frag_size/frame_size;
2017 buffer_frames = buffer_size/frame_size;
2018 tsched_frames = tsched_size/frame_size;
2019
2020 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2021 pa_log("Failed to parse mmap argument.");
2022 goto fail;
2023 }
2024
2025 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2026 pa_log("Failed to parse tsched argument.");
2027 goto fail;
2028 }
2029
2030 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2031 pa_log("Failed to parse ignore_dB argument.");
2032 goto fail;
2033 }
2034
2035 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2036 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2037 pa_log("Failed to parse rewind_safeguard argument");
2038 goto fail;
2039 }
2040
2041 deferred_volume = m->core->deferred_volume;
2042 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2043 pa_log("Failed to parse deferred_volume argument.");
2044 goto fail;
2045 }
2046
2047 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2048 pa_log("Failed to parse fixed_latency_range argument.");
2049 goto fail;
2050 }
2051
2052 use_tsched = pa_alsa_may_tsched(use_tsched);
2053
2054 u = pa_xnew0(struct userdata, 1);
2055 u->core = m->core;
2056 u->module = m;
2057 u->use_mmap = use_mmap;
2058 u->use_tsched = use_tsched;
2059 u->deferred_volume = deferred_volume;
2060 u->fixed_latency_range = fixed_latency_range;
2061 u->first = TRUE;
2062 u->rewind_safeguard = rewind_safeguard;
2063 u->rtpoll = pa_rtpoll_new();
2064 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2065
2066 u->smoother = pa_smoother_new(
2067 SMOOTHER_ADJUST_USEC,
2068 SMOOTHER_WINDOW_USEC,
2069 TRUE,
2070 TRUE,
2071 5,
2072 pa_rtclock_now(),
2073 TRUE);
2074 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2075
2076 dev_id = pa_modargs_get_value(
2077 ma, "device_id",
2078 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2079
2080 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2081
2082 if (reserve_init(u, dev_id) < 0)
2083 goto fail;
2084
2085 if (reserve_monitor_init(u, dev_id) < 0)
2086 goto fail;
2087
2088 b = use_mmap;
2089 d = use_tsched;
2090
2091 if (mapping) {
2092
2093 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2094 pa_log("device_id= not set");
2095 goto fail;
2096 }
2097
2098 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2099 dev_id,
2100 &u->device_name,
2101 &ss, &map,
2102 SND_PCM_STREAM_PLAYBACK,
2103 &period_frames, &buffer_frames, tsched_frames,
2104 &b, &d, mapping)))
2105 goto fail;
2106
2107 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2108
2109 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2110 goto fail;
2111
2112 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2113 dev_id,
2114 &u->device_name,
2115 &ss, &map,
2116 SND_PCM_STREAM_PLAYBACK,
2117 &period_frames, &buffer_frames, tsched_frames,
2118 &b, &d, profile_set, &mapping)))
2119 goto fail;
2120
2121 } else {
2122
2123 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2124 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2125 &u->device_name,
2126 &ss, &map,
2127 SND_PCM_STREAM_PLAYBACK,
2128 &period_frames, &buffer_frames, tsched_frames,
2129 &b, &d, FALSE)))
2130 goto fail;
2131 }
2132
2133 pa_assert(u->device_name);
2134 pa_log_info("Successfully opened device %s.", u->device_name);
2135
2136 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2137 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2138 goto fail;
2139 }
2140
2141 if (mapping)
2142 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2143
2144 if (use_mmap && !b) {
2145 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2146 u->use_mmap = use_mmap = FALSE;
2147 }
2148
2149 if (use_tsched && (!b || !d)) {
2150 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2151 u->use_tsched = use_tsched = FALSE;
2152 }
2153
2154 if (u->use_mmap)
2155 pa_log_info("Successfully enabled mmap() mode.");
2156
2157 if (u->use_tsched) {
2158 pa_log_info("Successfully enabled timer-based scheduling mode.");
2159
2160 if (u->fixed_latency_range)
2161 pa_log_info("Disabling latency range changes on underrun");
2162 }
2163
2164 if (is_iec958(u) || is_hdmi(u))
2165 set_formats = TRUE;
2166
2167 u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
2168 if (!u->rates) {
2169 pa_log_error("Failed to find any supported sample rates.");
2170 goto fail;
2171 }
2172
2173 /* ALSA might tweak the sample spec, so recalculate the frame size */
2174 frame_size = pa_frame_size(&ss);
2175
2176 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2177
2178 pa_sink_new_data_init(&data);
2179 data.driver = driver;
2180 data.module = m;
2181 data.card = card;
2182 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2183
2184 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2185 * variable instead of using &data.namereg_fail directly, because
2186 * data.namereg_fail is a bitfield and taking the address of a bitfield
2187 * variable is impossible. */
2188 namereg_fail = data.namereg_fail;
2189 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2190 pa_log("Failed to parse namereg_fail argument.");
2191 pa_sink_new_data_done(&data);
2192 goto fail;
2193 }
2194 data.namereg_fail = namereg_fail;
2195
2196 pa_sink_new_data_set_sample_spec(&data, &ss);
2197 pa_sink_new_data_set_channel_map(&data, &map);
2198 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2199
2200 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2201 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2202 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2203 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2204 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2205
2206 if (mapping) {
2207 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2208 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2209 }
2210
2211 pa_alsa_init_description(data.proplist);
2212
2213 if (u->control_device)
2214 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2215
2216 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2217 pa_log("Invalid properties");
2218 pa_sink_new_data_done(&data);
2219 goto fail;
2220 }
2221
2222 if (u->mixer_path_set)
2223 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2224
2225 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2226 (set_formats ? PA_SINK_SET_FORMATS : 0));
2227 pa_sink_new_data_done(&data);
2228
2229 if (!u->sink) {
2230 pa_log("Failed to create sink object");
2231 goto fail;
2232 }
2233
2234 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2235 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2236 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2237 goto fail;
2238 }
2239
2240 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2241 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2242 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2243 goto fail;
2244 }
2245
2246 u->sink->parent.process_msg = sink_process_msg;
2247 if (u->use_tsched)
2248 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2249 u->sink->set_state = sink_set_state_cb;
2250 u->sink->set_port = sink_set_port_cb;
2251 if (u->sink->alternate_sample_rate)
2252 u->sink->update_rate = sink_update_rate_cb;
2253 u->sink->userdata = u;
2254
2255 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2256 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2257
2258 u->frame_size = frame_size;
2259 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2260 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2261 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2262
2263 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2264 (double) u->hwbuf_size / (double) u->fragment_size,
2265 (long unsigned) u->fragment_size,
2266 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2267 (long unsigned) u->hwbuf_size,
2268 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2269
2270 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2271 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2272 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2273 else {
2274 pa_log_info("Disabling rewind for device %s", u->device_name);
2275 pa_sink_set_max_rewind(u->sink, 0);
2276 }
2277
2278 if (u->use_tsched) {
2279 u->tsched_watermark_ref = tsched_watermark;
2280 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2281 } else
2282 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2283
2284 reserve_update(u);
2285
2286 if (update_sw_params(u) < 0)
2287 goto fail;
2288
2289 if (setup_mixer(u, ignore_dB) < 0)
2290 goto fail;
2291
2292 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2293
2294 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2295 pa_log("Failed to create thread.");
2296 goto fail;
2297 }
2298
2299 /* Get initial mixer settings */
2300 if (data.volume_is_set) {
2301 if (u->sink->set_volume)
2302 u->sink->set_volume(u->sink);
2303 } else {
2304 if (u->sink->get_volume)
2305 u->sink->get_volume(u->sink);
2306 }
2307
2308 if (data.muted_is_set) {
2309 if (u->sink->set_mute)
2310 u->sink->set_mute(u->sink);
2311 } else {
2312 if (u->sink->get_mute)
2313 u->sink->get_mute(u->sink);
2314 }
2315
2316 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2317 u->sink->write_volume(u->sink);
2318
2319 if (set_formats) {
2320 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2321 pa_format_info *format;
2322
2323 /* To start with, we only support PCM formats. Other formats may be added
2324 * with pa_sink_set_formats().*/
2325 format = pa_format_info_new();
2326 format->encoding = PA_ENCODING_PCM;
2327 u->formats = pa_idxset_new(NULL, NULL);
2328 pa_idxset_put(u->formats, format, NULL);
2329
2330 u->sink->get_formats = sink_get_formats;
2331 u->sink->set_formats = sink_set_formats;
2332 }
2333
2334 pa_sink_put(u->sink);
2335
2336 if (profile_set)
2337 pa_alsa_profile_set_free(profile_set);
2338
2339 return u->sink;
2340
2341 fail:
2342
2343 if (u)
2344 userdata_free(u);
2345
2346 if (profile_set)
2347 pa_alsa_profile_set_free(profile_set);
2348
2349 return NULL;
2350 }
2351
2352 static void userdata_free(struct userdata *u) {
2353 pa_assert(u);
2354
2355 if (u->sink)
2356 pa_sink_unlink(u->sink);
2357
2358 if (u->thread) {
2359 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2360 pa_thread_free(u->thread);
2361 }
2362
2363 pa_thread_mq_done(&u->thread_mq);
2364
2365 if (u->sink)
2366 pa_sink_unref(u->sink);
2367
2368 if (u->memchunk.memblock)
2369 pa_memblock_unref(u->memchunk.memblock);
2370
2371 if (u->mixer_pd)
2372 pa_alsa_mixer_pdata_free(u->mixer_pd);
2373
2374 if (u->alsa_rtpoll_item)
2375 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2376
2377 if (u->rtpoll)
2378 pa_rtpoll_free(u->rtpoll);
2379
2380 if (u->pcm_handle) {
2381 snd_pcm_drop(u->pcm_handle);
2382 snd_pcm_close(u->pcm_handle);
2383 }
2384
2385 if (u->mixer_fdl)
2386 pa_alsa_fdlist_free(u->mixer_fdl);
2387
2388 if (u->mixer_path_set)
2389 pa_alsa_path_set_free(u->mixer_path_set);
2390 else if (u->mixer_path)
2391 pa_alsa_path_free(u->mixer_path);
2392
2393 if (u->mixer_handle)
2394 snd_mixer_close(u->mixer_handle);
2395
2396 if (u->smoother)
2397 pa_smoother_free(u->smoother);
2398
2399 if (u->formats)
2400 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2401
2402 if (u->rates)
2403 pa_xfree(u->rates);
2404
2405 reserve_done(u);
2406 monitor_done(u);
2407
2408 pa_xfree(u->device_name);
2409 pa_xfree(u->control_device);
2410 pa_xfree(u->paths_dir);
2411 pa_xfree(u);
2412 }
2413
2414 void pa_alsa_sink_free(pa_sink *s) {
2415 struct userdata *u;
2416
2417 pa_sink_assert_ref(s);
2418 pa_assert_se(u = s->userdata);
2419
2420 userdata_free(u);
2421 }