]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa-sink/source: Really set volumes on port change
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 char *paths_dir;
104 pa_alsa_fdlist *mixer_fdl;
105 pa_alsa_mixer_pdata *mixer_pd;
106 snd_mixer_t *mixer_handle;
107 pa_alsa_path_set *mixer_path_set;
108 pa_alsa_path *mixer_path;
109
110 pa_cvolume hardware_volume;
111
112 unsigned int *rates;
113
114 size_t
115 frame_size,
116 fragment_size,
117 hwbuf_size,
118 tsched_watermark,
119 tsched_watermark_ref,
120 hwbuf_unused,
121 min_sleep,
122 min_wakeup,
123 watermark_inc_step,
124 watermark_dec_step,
125 watermark_inc_threshold,
126 watermark_dec_threshold,
127 rewind_safeguard;
128
129 pa_usec_t watermark_dec_not_before;
130 pa_usec_t min_latency_ref;
131
132 pa_memchunk memchunk;
133
134 char *device_name; /* name of the PCM device */
135 char *control_device; /* name of the control device */
136
137 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
138
139 pa_bool_t first, after_rewind;
140
141 pa_rtpoll_item *alsa_rtpoll_item;
142
143 pa_smoother *smoother;
144 uint64_t write_count;
145 uint64_t since_start;
146 pa_usec_t smoother_interval;
147 pa_usec_t last_smoother_update;
148
149 pa_idxset *formats;
150
151 pa_reserve_wrapper *reserve;
152 pa_hook_slot *reserve_slot;
153 pa_reserve_monitor_wrapper *monitor;
154 pa_hook_slot *monitor_slot;
155 };
156
157 static void userdata_free(struct userdata *u);
158
159 /* FIXME: Is there a better way to do this than device names? */
160 static pa_bool_t is_iec958(struct userdata *u) {
161 return (strncmp("iec958", u->device_name, 6) == 0);
162 }
163
164 static pa_bool_t is_hdmi(struct userdata *u) {
165 return (strncmp("hdmi", u->device_name, 4) == 0);
166 }
167
168 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
169 pa_assert(r);
170 pa_assert(u);
171
172 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
173 return PA_HOOK_CANCEL;
174
175 return PA_HOOK_OK;
176 }
177
178 static void reserve_done(struct userdata *u) {
179 pa_assert(u);
180
181 if (u->reserve_slot) {
182 pa_hook_slot_free(u->reserve_slot);
183 u->reserve_slot = NULL;
184 }
185
186 if (u->reserve) {
187 pa_reserve_wrapper_unref(u->reserve);
188 u->reserve = NULL;
189 }
190 }
191
192 static void reserve_update(struct userdata *u) {
193 const char *description;
194 pa_assert(u);
195
196 if (!u->sink || !u->reserve)
197 return;
198
199 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
200 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
201 }
202
203 static int reserve_init(struct userdata *u, const char *dname) {
204 char *rname;
205
206 pa_assert(u);
207 pa_assert(dname);
208
209 if (u->reserve)
210 return 0;
211
212 if (pa_in_system_mode())
213 return 0;
214
215 if (!(rname = pa_alsa_get_reserve_name(dname)))
216 return 0;
217
218 /* We are resuming, try to lock the device */
219 u->reserve = pa_reserve_wrapper_get(u->core, rname);
220 pa_xfree(rname);
221
222 if (!(u->reserve))
223 return -1;
224
225 reserve_update(u);
226
227 pa_assert(!u->reserve_slot);
228 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
229
230 return 0;
231 }
232
233 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
234 pa_bool_t b;
235
236 pa_assert(w);
237 pa_assert(u);
238
239 b = PA_PTR_TO_UINT(busy) && !u->reserve;
240
241 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
242 return PA_HOOK_OK;
243 }
244
245 static void monitor_done(struct userdata *u) {
246 pa_assert(u);
247
248 if (u->monitor_slot) {
249 pa_hook_slot_free(u->monitor_slot);
250 u->monitor_slot = NULL;
251 }
252
253 if (u->monitor) {
254 pa_reserve_monitor_wrapper_unref(u->monitor);
255 u->monitor = NULL;
256 }
257 }
258
259 static int reserve_monitor_init(struct userdata *u, const char *dname) {
260 char *rname;
261
262 pa_assert(u);
263 pa_assert(dname);
264
265 if (pa_in_system_mode())
266 return 0;
267
268 if (!(rname = pa_alsa_get_reserve_name(dname)))
269 return 0;
270
271 /* We are resuming, try to lock the device */
272 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
273 pa_xfree(rname);
274
275 if (!(u->monitor))
276 return -1;
277
278 pa_assert(!u->monitor_slot);
279 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
280
281 return 0;
282 }
283
284 static void fix_min_sleep_wakeup(struct userdata *u) {
285 size_t max_use, max_use_2;
286
287 pa_assert(u);
288 pa_assert(u->use_tsched);
289
290 max_use = u->hwbuf_size - u->hwbuf_unused;
291 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
292
293 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
294 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
295
296 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
297 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
298 }
299
300 static void fix_tsched_watermark(struct userdata *u) {
301 size_t max_use;
302 pa_assert(u);
303 pa_assert(u->use_tsched);
304
305 max_use = u->hwbuf_size - u->hwbuf_unused;
306
307 if (u->tsched_watermark > max_use - u->min_sleep)
308 u->tsched_watermark = max_use - u->min_sleep;
309
310 if (u->tsched_watermark < u->min_wakeup)
311 u->tsched_watermark = u->min_wakeup;
312 }
313
314 static void increase_watermark(struct userdata *u) {
315 size_t old_watermark;
316 pa_usec_t old_min_latency, new_min_latency;
317
318 pa_assert(u);
319 pa_assert(u->use_tsched);
320
321 /* First, just try to increase the watermark */
322 old_watermark = u->tsched_watermark;
323 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
324 fix_tsched_watermark(u);
325
326 if (old_watermark != u->tsched_watermark) {
327 pa_log_info("Increasing wakeup watermark to %0.2f ms",
328 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
329 return;
330 }
331
332 /* Hmm, we cannot increase the watermark any further, hence let's
333 raise the latency, unless doing so was disabled in
334 configuration */
335 if (u->fixed_latency_range)
336 return;
337
338 old_min_latency = u->sink->thread_info.min_latency;
339 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
340 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
341
342 if (old_min_latency != new_min_latency) {
343 pa_log_info("Increasing minimal latency to %0.2f ms",
344 (double) new_min_latency / PA_USEC_PER_MSEC);
345
346 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
347 }
348
349 /* When we reach this we're officialy fucked! */
350 }
351
352 static void decrease_watermark(struct userdata *u) {
353 size_t old_watermark;
354 pa_usec_t now;
355
356 pa_assert(u);
357 pa_assert(u->use_tsched);
358
359 now = pa_rtclock_now();
360
361 if (u->watermark_dec_not_before <= 0)
362 goto restart;
363
364 if (u->watermark_dec_not_before > now)
365 return;
366
367 old_watermark = u->tsched_watermark;
368
369 if (u->tsched_watermark < u->watermark_dec_step)
370 u->tsched_watermark = u->tsched_watermark / 2;
371 else
372 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
373
374 fix_tsched_watermark(u);
375
376 if (old_watermark != u->tsched_watermark)
377 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
378 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
379
380 /* We don't change the latency range*/
381
382 restart:
383 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
384 }
385
386 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
387 pa_usec_t usec, wm;
388
389 pa_assert(sleep_usec);
390 pa_assert(process_usec);
391
392 pa_assert(u);
393 pa_assert(u->use_tsched);
394
395 usec = pa_sink_get_requested_latency_within_thread(u->sink);
396
397 if (usec == (pa_usec_t) -1)
398 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
399
400 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
401
402 if (wm > usec)
403 wm = usec/2;
404
405 *sleep_usec = usec - wm;
406 *process_usec = wm;
407
408 #ifdef DEBUG_TIMING
409 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
410 (unsigned long) (usec / PA_USEC_PER_MSEC),
411 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
412 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
413 #endif
414 }
415
416 static int try_recover(struct userdata *u, const char *call, int err) {
417 pa_assert(u);
418 pa_assert(call);
419 pa_assert(err < 0);
420
421 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
422
423 pa_assert(err != -EAGAIN);
424
425 if (err == -EPIPE)
426 pa_log_debug("%s: Buffer underrun!", call);
427
428 if (err == -ESTRPIPE)
429 pa_log_debug("%s: System suspended!", call);
430
431 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
432 pa_log("%s: %s", call, pa_alsa_strerror(err));
433 return -1;
434 }
435
436 u->first = TRUE;
437 u->since_start = 0;
438 return 0;
439 }
440
441 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
442 size_t left_to_play;
443 pa_bool_t underrun = FALSE;
444
445 /* We use <= instead of < for this check here because an underrun
446 * only happens after the last sample was processed, not already when
447 * it is removed from the buffer. This is particularly important
448 * when block transfer is used. */
449
450 if (n_bytes <= u->hwbuf_size)
451 left_to_play = u->hwbuf_size - n_bytes;
452 else {
453
454 /* We got a dropout. What a mess! */
455 left_to_play = 0;
456 underrun = TRUE;
457
458 #if 0
459 PA_DEBUG_TRAP;
460 #endif
461
462 if (!u->first && !u->after_rewind)
463 if (pa_log_ratelimit(PA_LOG_INFO))
464 pa_log_info("Underrun!");
465 }
466
467 #ifdef DEBUG_TIMING
468 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
469 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
470 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
471 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
472 #endif
473
474 if (u->use_tsched) {
475 pa_bool_t reset_not_before = TRUE;
476
477 if (!u->first && !u->after_rewind) {
478 if (underrun || left_to_play < u->watermark_inc_threshold)
479 increase_watermark(u);
480 else if (left_to_play > u->watermark_dec_threshold) {
481 reset_not_before = FALSE;
482
483 /* We decrease the watermark only if have actually
484 * been woken up by a timeout. If something else woke
485 * us up it's too easy to fulfill the deadlines... */
486
487 if (on_timeout)
488 decrease_watermark(u);
489 }
490 }
491
492 if (reset_not_before)
493 u->watermark_dec_not_before = 0;
494 }
495
496 return left_to_play;
497 }
498
499 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
500 pa_bool_t work_done = FALSE;
501 pa_usec_t max_sleep_usec = 0, process_usec = 0;
502 size_t left_to_play;
503 unsigned j = 0;
504
505 pa_assert(u);
506 pa_sink_assert_ref(u->sink);
507
508 if (u->use_tsched)
509 hw_sleep_time(u, &max_sleep_usec, &process_usec);
510
511 for (;;) {
512 snd_pcm_sframes_t n;
513 size_t n_bytes;
514 int r;
515 pa_bool_t after_avail = TRUE;
516
517 /* First we determine how many samples are missing to fill the
518 * buffer up to 100% */
519
520 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
521
522 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
523 continue;
524
525 return r;
526 }
527
528 n_bytes = (size_t) n * u->frame_size;
529
530 #ifdef DEBUG_TIMING
531 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
532 #endif
533
534 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
535 on_timeout = FALSE;
536
537 if (u->use_tsched)
538
539 /* We won't fill up the playback buffer before at least
540 * half the sleep time is over because otherwise we might
541 * ask for more data from the clients then they expect. We
542 * need to guarantee that clients only have to keep around
543 * a single hw buffer length. */
544
545 if (!polled &&
546 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
547 #ifdef DEBUG_TIMING
548 pa_log_debug("Not filling up, because too early.");
549 #endif
550 break;
551 }
552
553 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
554
555 if (polled)
556 PA_ONCE_BEGIN {
557 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
558 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
559 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
560 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
561 pa_strnull(dn));
562 pa_xfree(dn);
563 } PA_ONCE_END;
564
565 #ifdef DEBUG_TIMING
566 pa_log_debug("Not filling up, because not necessary.");
567 #endif
568 break;
569 }
570
571
572 if (++j > 10) {
573 #ifdef DEBUG_TIMING
574 pa_log_debug("Not filling up, because already too many iterations.");
575 #endif
576
577 break;
578 }
579
580 n_bytes -= u->hwbuf_unused;
581 polled = FALSE;
582
583 #ifdef DEBUG_TIMING
584 pa_log_debug("Filling up");
585 #endif
586
587 for (;;) {
588 pa_memchunk chunk;
589 void *p;
590 int err;
591 const snd_pcm_channel_area_t *areas;
592 snd_pcm_uframes_t offset, frames;
593 snd_pcm_sframes_t sframes;
594
595 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
596 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
597
598 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
599
600 if (!after_avail && err == -EAGAIN)
601 break;
602
603 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
604 continue;
605
606 return r;
607 }
608
609 /* Make sure that if these memblocks need to be copied they will fit into one slot */
610 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
611 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
612
613 if (!after_avail && frames == 0)
614 break;
615
616 pa_assert(frames > 0);
617 after_avail = FALSE;
618
619 /* Check these are multiples of 8 bit */
620 pa_assert((areas[0].first & 7) == 0);
621 pa_assert((areas[0].step & 7)== 0);
622
623 /* We assume a single interleaved memory buffer */
624 pa_assert((areas[0].first >> 3) == 0);
625 pa_assert((areas[0].step >> 3) == u->frame_size);
626
627 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
628
629 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
630 chunk.length = pa_memblock_get_length(chunk.memblock);
631 chunk.index = 0;
632
633 pa_sink_render_into_full(u->sink, &chunk);
634 pa_memblock_unref_fixed(chunk.memblock);
635
636 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
637
638 if (!after_avail && (int) sframes == -EAGAIN)
639 break;
640
641 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
642 continue;
643
644 return r;
645 }
646
647 work_done = TRUE;
648
649 u->write_count += frames * u->frame_size;
650 u->since_start += frames * u->frame_size;
651
652 #ifdef DEBUG_TIMING
653 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
654 #endif
655
656 if ((size_t) frames * u->frame_size >= n_bytes)
657 break;
658
659 n_bytes -= (size_t) frames * u->frame_size;
660 }
661 }
662
663 if (u->use_tsched) {
664 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
665 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
666
667 if (*sleep_usec > process_usec)
668 *sleep_usec -= process_usec;
669 else
670 *sleep_usec = 0;
671 } else
672 *sleep_usec = 0;
673
674 return work_done ? 1 : 0;
675 }
676
677 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
678 pa_bool_t work_done = FALSE;
679 pa_usec_t max_sleep_usec = 0, process_usec = 0;
680 size_t left_to_play;
681 unsigned j = 0;
682
683 pa_assert(u);
684 pa_sink_assert_ref(u->sink);
685
686 if (u->use_tsched)
687 hw_sleep_time(u, &max_sleep_usec, &process_usec);
688
689 for (;;) {
690 snd_pcm_sframes_t n;
691 size_t n_bytes;
692 int r;
693 pa_bool_t after_avail = TRUE;
694
695 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
696
697 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
698 continue;
699
700 return r;
701 }
702
703 n_bytes = (size_t) n * u->frame_size;
704 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
705 on_timeout = FALSE;
706
707 if (u->use_tsched)
708
709 /* We won't fill up the playback buffer before at least
710 * half the sleep time is over because otherwise we might
711 * ask for more data from the clients then they expect. We
712 * need to guarantee that clients only have to keep around
713 * a single hw buffer length. */
714
715 if (!polled &&
716 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
717 break;
718
719 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
720
721 if (polled)
722 PA_ONCE_BEGIN {
723 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
724 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
725 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
726 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
727 pa_strnull(dn));
728 pa_xfree(dn);
729 } PA_ONCE_END;
730
731 break;
732 }
733
734 if (++j > 10) {
735 #ifdef DEBUG_TIMING
736 pa_log_debug("Not filling up, because already too many iterations.");
737 #endif
738
739 break;
740 }
741
742 n_bytes -= u->hwbuf_unused;
743 polled = FALSE;
744
745 for (;;) {
746 snd_pcm_sframes_t frames;
747 void *p;
748
749 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
750
751 if (u->memchunk.length <= 0)
752 pa_sink_render(u->sink, n_bytes, &u->memchunk);
753
754 pa_assert(u->memchunk.length > 0);
755
756 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
757
758 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
759 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
760
761 p = pa_memblock_acquire(u->memchunk.memblock);
762 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
763 pa_memblock_release(u->memchunk.memblock);
764
765 if (PA_UNLIKELY(frames < 0)) {
766
767 if (!after_avail && (int) frames == -EAGAIN)
768 break;
769
770 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
771 continue;
772
773 return r;
774 }
775
776 if (!after_avail && frames == 0)
777 break;
778
779 pa_assert(frames > 0);
780 after_avail = FALSE;
781
782 u->memchunk.index += (size_t) frames * u->frame_size;
783 u->memchunk.length -= (size_t) frames * u->frame_size;
784
785 if (u->memchunk.length <= 0) {
786 pa_memblock_unref(u->memchunk.memblock);
787 pa_memchunk_reset(&u->memchunk);
788 }
789
790 work_done = TRUE;
791
792 u->write_count += frames * u->frame_size;
793 u->since_start += frames * u->frame_size;
794
795 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
796
797 if ((size_t) frames * u->frame_size >= n_bytes)
798 break;
799
800 n_bytes -= (size_t) frames * u->frame_size;
801 }
802 }
803
804 if (u->use_tsched) {
805 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
806 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
807
808 if (*sleep_usec > process_usec)
809 *sleep_usec -= process_usec;
810 else
811 *sleep_usec = 0;
812 } else
813 *sleep_usec = 0;
814
815 return work_done ? 1 : 0;
816 }
817
818 static void update_smoother(struct userdata *u) {
819 snd_pcm_sframes_t delay = 0;
820 int64_t position;
821 int err;
822 pa_usec_t now1 = 0, now2;
823 snd_pcm_status_t *status;
824
825 snd_pcm_status_alloca(&status);
826
827 pa_assert(u);
828 pa_assert(u->pcm_handle);
829
830 /* Let's update the time smoother */
831
832 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
833 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
834 return;
835 }
836
837 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
838 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
839 else {
840 snd_htimestamp_t htstamp = { 0, 0 };
841 snd_pcm_status_get_htstamp(status, &htstamp);
842 now1 = pa_timespec_load(&htstamp);
843 }
844
845 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
846 if (now1 <= 0)
847 now1 = pa_rtclock_now();
848
849 /* check if the time since the last update is bigger than the interval */
850 if (u->last_smoother_update > 0)
851 if (u->last_smoother_update + u->smoother_interval > now1)
852 return;
853
854 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
855
856 if (PA_UNLIKELY(position < 0))
857 position = 0;
858
859 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
860
861 pa_smoother_put(u->smoother, now1, now2);
862
863 u->last_smoother_update = now1;
864 /* exponentially increase the update interval up to the MAX limit */
865 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
866 }
867
868 static pa_usec_t sink_get_latency(struct userdata *u) {
869 pa_usec_t r;
870 int64_t delay;
871 pa_usec_t now1, now2;
872
873 pa_assert(u);
874
875 now1 = pa_rtclock_now();
876 now2 = pa_smoother_get(u->smoother, now1);
877
878 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
879
880 r = delay >= 0 ? (pa_usec_t) delay : 0;
881
882 if (u->memchunk.memblock)
883 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
884
885 return r;
886 }
887
888 static int build_pollfd(struct userdata *u) {
889 pa_assert(u);
890 pa_assert(u->pcm_handle);
891
892 if (u->alsa_rtpoll_item)
893 pa_rtpoll_item_free(u->alsa_rtpoll_item);
894
895 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
896 return -1;
897
898 return 0;
899 }
900
901 /* Called from IO context */
902 static int suspend(struct userdata *u) {
903 pa_assert(u);
904 pa_assert(u->pcm_handle);
905
906 pa_smoother_pause(u->smoother, pa_rtclock_now());
907
908 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
909 * take awfully long with our long buffer sizes today. */
910 snd_pcm_close(u->pcm_handle);
911 u->pcm_handle = NULL;
912
913 if (u->alsa_rtpoll_item) {
914 pa_rtpoll_item_free(u->alsa_rtpoll_item);
915 u->alsa_rtpoll_item = NULL;
916 }
917
918 /* We reset max_rewind/max_request here to make sure that while we
919 * are suspended the old max_request/max_rewind values set before
920 * the suspend can influence the per-stream buffer of newly
921 * created streams, without their requirements having any
922 * influence on them. */
923 pa_sink_set_max_rewind_within_thread(u->sink, 0);
924 pa_sink_set_max_request_within_thread(u->sink, 0);
925
926 pa_log_info("Device suspended...");
927
928 return 0;
929 }
930
931 /* Called from IO context */
932 static int update_sw_params(struct userdata *u) {
933 snd_pcm_uframes_t avail_min;
934 int err;
935
936 pa_assert(u);
937
938 /* Use the full buffer if no one asked us for anything specific */
939 u->hwbuf_unused = 0;
940
941 if (u->use_tsched) {
942 pa_usec_t latency;
943
944 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
945 size_t b;
946
947 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
948
949 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
950
951 /* We need at least one sample in our buffer */
952
953 if (PA_UNLIKELY(b < u->frame_size))
954 b = u->frame_size;
955
956 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
957 }
958
959 fix_min_sleep_wakeup(u);
960 fix_tsched_watermark(u);
961 }
962
963 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
964
965 /* We need at last one frame in the used part of the buffer */
966 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
967
968 if (u->use_tsched) {
969 pa_usec_t sleep_usec, process_usec;
970
971 hw_sleep_time(u, &sleep_usec, &process_usec);
972 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
973 }
974
975 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
976
977 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
978 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
979 return err;
980 }
981
982 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
983 if (pa_alsa_pcm_is_hw(u->pcm_handle))
984 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
985 else {
986 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
987 pa_sink_set_max_rewind_within_thread(u->sink, 0);
988 }
989
990 return 0;
991 }
992
993 /* Called from IO Context on unsuspend or from main thread when creating sink */
994 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
995 pa_bool_t in_thread)
996 {
997 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
998 &u->sink->sample_spec);
999
1000 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1001 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1002
1003 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1004 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1005
1006 fix_min_sleep_wakeup(u);
1007 fix_tsched_watermark(u);
1008
1009 if (in_thread)
1010 pa_sink_set_latency_range_within_thread(u->sink,
1011 u->min_latency_ref,
1012 pa_bytes_to_usec(u->hwbuf_size, ss));
1013 else {
1014 pa_sink_set_latency_range(u->sink,
1015 0,
1016 pa_bytes_to_usec(u->hwbuf_size, ss));
1017
1018 /* work-around assert in pa_sink_set_latency_within_thead,
1019 keep track of min_latency and reuse it when
1020 this routine is called from IO context */
1021 u->min_latency_ref = u->sink->thread_info.min_latency;
1022 }
1023
1024 pa_log_info("Time scheduling watermark is %0.2fms",
1025 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1026 }
1027
1028 /* Called from IO context */
1029 static int unsuspend(struct userdata *u) {
1030 pa_sample_spec ss;
1031 int err;
1032 pa_bool_t b, d;
1033 snd_pcm_uframes_t period_size, buffer_size;
1034 char *device_name = NULL;
1035
1036 pa_assert(u);
1037 pa_assert(!u->pcm_handle);
1038
1039 pa_log_info("Trying resume...");
1040
1041 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1042 /* Need to open device in NONAUDIO mode */
1043 int len = strlen(u->device_name) + 8;
1044
1045 device_name = pa_xmalloc(len);
1046 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1047 }
1048
1049 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1050 SND_PCM_NONBLOCK|
1051 SND_PCM_NO_AUTO_RESAMPLE|
1052 SND_PCM_NO_AUTO_CHANNELS|
1053 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1054 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1055 goto fail;
1056 }
1057
1058 ss = u->sink->sample_spec;
1059 period_size = u->fragment_size / u->frame_size;
1060 buffer_size = u->hwbuf_size / u->frame_size;
1061 b = u->use_mmap;
1062 d = u->use_tsched;
1063
1064 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1065 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1066 goto fail;
1067 }
1068
1069 if (b != u->use_mmap || d != u->use_tsched) {
1070 pa_log_warn("Resume failed, couldn't get original access mode.");
1071 goto fail;
1072 }
1073
1074 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1075 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1076 goto fail;
1077 }
1078
1079 if (period_size*u->frame_size != u->fragment_size ||
1080 buffer_size*u->frame_size != u->hwbuf_size) {
1081 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1082 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1083 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1084 goto fail;
1085 }
1086
1087 if (update_sw_params(u) < 0)
1088 goto fail;
1089
1090 if (build_pollfd(u) < 0)
1091 goto fail;
1092
1093 u->write_count = 0;
1094 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1095 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1096 u->last_smoother_update = 0;
1097
1098 u->first = TRUE;
1099 u->since_start = 0;
1100
1101 /* reset the watermark to the value defined when sink was created */
1102 if (u->use_tsched)
1103 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1104
1105 pa_log_info("Resumed successfully...");
1106
1107 pa_xfree(device_name);
1108 return 0;
1109
1110 fail:
1111 if (u->pcm_handle) {
1112 snd_pcm_close(u->pcm_handle);
1113 u->pcm_handle = NULL;
1114 }
1115
1116 pa_xfree(device_name);
1117
1118 return -PA_ERR_IO;
1119 }
1120
1121 /* Called from IO context */
1122 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1123 struct userdata *u = PA_SINK(o)->userdata;
1124
1125 switch (code) {
1126
1127 case PA_SINK_MESSAGE_GET_LATENCY: {
1128 pa_usec_t r = 0;
1129
1130 if (u->pcm_handle)
1131 r = sink_get_latency(u);
1132
1133 *((pa_usec_t*) data) = r;
1134
1135 return 0;
1136 }
1137
1138 case PA_SINK_MESSAGE_SET_STATE:
1139
1140 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1141
1142 case PA_SINK_SUSPENDED: {
1143 int r;
1144
1145 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1146
1147 if ((r = suspend(u)) < 0)
1148 return r;
1149
1150 break;
1151 }
1152
1153 case PA_SINK_IDLE:
1154 case PA_SINK_RUNNING: {
1155 int r;
1156
1157 if (u->sink->thread_info.state == PA_SINK_INIT) {
1158 if (build_pollfd(u) < 0)
1159 return -PA_ERR_IO;
1160 }
1161
1162 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1163 if ((r = unsuspend(u)) < 0)
1164 return r;
1165 }
1166
1167 break;
1168 }
1169
1170 case PA_SINK_UNLINKED:
1171 case PA_SINK_INIT:
1172 case PA_SINK_INVALID_STATE:
1173 ;
1174 }
1175
1176 break;
1177 }
1178
1179 return pa_sink_process_msg(o, code, data, offset, chunk);
1180 }
1181
1182 /* Called from main context */
1183 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1184 pa_sink_state_t old_state;
1185 struct userdata *u;
1186
1187 pa_sink_assert_ref(s);
1188 pa_assert_se(u = s->userdata);
1189
1190 old_state = pa_sink_get_state(u->sink);
1191
1192 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1193 reserve_done(u);
1194 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1195 if (reserve_init(u, u->device_name) < 0)
1196 return -PA_ERR_BUSY;
1197
1198 return 0;
1199 }
1200
1201 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1202 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1203
1204 pa_assert(u);
1205 pa_assert(u->mixer_handle);
1206
1207 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1208 return 0;
1209
1210 if (!PA_SINK_IS_LINKED(u->sink->state))
1211 return 0;
1212
1213 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1214 return 0;
1215
1216 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1217 pa_sink_get_volume(u->sink, TRUE);
1218 pa_sink_get_mute(u->sink, TRUE);
1219 }
1220
1221 return 0;
1222 }
1223
1224 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1225 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1226
1227 pa_assert(u);
1228 pa_assert(u->mixer_handle);
1229
1230 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1231 return 0;
1232
1233 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1234 return 0;
1235
1236 if (mask & SND_CTL_EVENT_MASK_VALUE)
1237 pa_sink_update_volume_and_mute(u->sink);
1238
1239 return 0;
1240 }
1241
1242 static void sink_get_volume_cb(pa_sink *s) {
1243 struct userdata *u = s->userdata;
1244 pa_cvolume r;
1245 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1246
1247 pa_assert(u);
1248 pa_assert(u->mixer_path);
1249 pa_assert(u->mixer_handle);
1250
1251 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1252 return;
1253
1254 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1255 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1256
1257 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1258
1259 if (u->mixer_path->has_dB) {
1260 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1261
1262 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1263 }
1264
1265 if (pa_cvolume_equal(&u->hardware_volume, &r))
1266 return;
1267
1268 s->real_volume = u->hardware_volume = r;
1269
1270 /* Hmm, so the hardware volume changed, let's reset our software volume */
1271 if (u->mixer_path->has_dB)
1272 pa_sink_set_soft_volume(s, NULL);
1273 }
1274
1275 static void sink_set_volume_cb(pa_sink *s) {
1276 struct userdata *u = s->userdata;
1277 pa_cvolume r;
1278 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1279 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1280
1281 pa_assert(u);
1282 pa_assert(u->mixer_path);
1283 pa_assert(u->mixer_handle);
1284
1285 /* Shift up by the base volume */
1286 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1287
1288 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1289 return;
1290
1291 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1292 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1293
1294 u->hardware_volume = r;
1295
1296 if (u->mixer_path->has_dB) {
1297 pa_cvolume new_soft_volume;
1298 pa_bool_t accurate_enough;
1299 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1300
1301 /* Match exactly what the user requested by software */
1302 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1303
1304 /* If the adjustment to do in software is only minimal we
1305 * can skip it. That saves us CPU at the expense of a bit of
1306 * accuracy */
1307 accurate_enough =
1308 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1309 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1310
1311 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1312 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1313 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1314 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1315 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1316 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1317 pa_yes_no(accurate_enough));
1318 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1319
1320 if (!accurate_enough)
1321 s->soft_volume = new_soft_volume;
1322
1323 } else {
1324 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1325
1326 /* We can't match exactly what the user requested, hence let's
1327 * at least tell the user about it */
1328
1329 s->real_volume = r;
1330 }
1331 }
1332
1333 static void sink_write_volume_cb(pa_sink *s) {
1334 struct userdata *u = s->userdata;
1335 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1336
1337 pa_assert(u);
1338 pa_assert(u->mixer_path);
1339 pa_assert(u->mixer_handle);
1340 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1341
1342 /* Shift up by the base volume */
1343 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1344
1345 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1346 pa_log_error("Writing HW volume failed");
1347 else {
1348 pa_cvolume tmp_vol;
1349 pa_bool_t accurate_enough;
1350
1351 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1352 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1353
1354 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1355 accurate_enough =
1356 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1357 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1358
1359 if (!accurate_enough) {
1360 union {
1361 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1362 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1363 } vol;
1364
1365 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1366 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1367 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1368 pa_log_debug(" in dB: %s (request) != %s",
1369 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1370 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1371 }
1372 }
1373 }
1374
1375 static void sink_get_mute_cb(pa_sink *s) {
1376 struct userdata *u = s->userdata;
1377 pa_bool_t b;
1378
1379 pa_assert(u);
1380 pa_assert(u->mixer_path);
1381 pa_assert(u->mixer_handle);
1382
1383 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1384 return;
1385
1386 s->muted = b;
1387 }
1388
1389 static void sink_set_mute_cb(pa_sink *s) {
1390 struct userdata *u = s->userdata;
1391
1392 pa_assert(u);
1393 pa_assert(u->mixer_path);
1394 pa_assert(u->mixer_handle);
1395
1396 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1397 }
1398
1399 static void mixer_volume_init(struct userdata *u) {
1400 pa_assert(u);
1401
1402 if (!u->mixer_path->has_volume) {
1403 pa_sink_set_write_volume_callback(u->sink, NULL);
1404 pa_sink_set_get_volume_callback(u->sink, NULL);
1405 pa_sink_set_set_volume_callback(u->sink, NULL);
1406
1407 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1408 } else {
1409 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1410 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1411
1412 if (u->mixer_path->has_dB && u->deferred_volume) {
1413 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1414 pa_log_info("Successfully enabled deferred volume.");
1415 } else
1416 pa_sink_set_write_volume_callback(u->sink, NULL);
1417
1418 if (u->mixer_path->has_dB) {
1419 pa_sink_enable_decibel_volume(u->sink, TRUE);
1420 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1421
1422 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1423 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1424
1425 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1426 } else {
1427 pa_sink_enable_decibel_volume(u->sink, FALSE);
1428 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1429
1430 u->sink->base_volume = PA_VOLUME_NORM;
1431 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1432 }
1433
1434 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1435 }
1436
1437 if (!u->mixer_path->has_mute) {
1438 pa_sink_set_get_mute_callback(u->sink, NULL);
1439 pa_sink_set_set_mute_callback(u->sink, NULL);
1440 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1441 } else {
1442 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1443 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1444 pa_log_info("Using hardware mute control.");
1445 }
1446 }
1447
1448 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1449 struct userdata *u = s->userdata;
1450 pa_alsa_port_data *data;
1451
1452 pa_assert(u);
1453 pa_assert(p);
1454 pa_assert(u->mixer_handle);
1455
1456 data = PA_DEVICE_PORT_DATA(p);
1457
1458 pa_assert_se(u->mixer_path = data->path);
1459 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1460
1461 mixer_volume_init(u);
1462
1463 if (data->setting)
1464 pa_alsa_setting_select(data->setting, u->mixer_handle);
1465
1466 if (s->set_mute)
1467 s->set_mute(s);
1468 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1469 if (s->write_volume)
1470 s->write_volume(s);
1471 } else {
1472 if (s->set_volume)
1473 s->set_volume(s);
1474 }
1475
1476 return 0;
1477 }
1478
1479 static void sink_update_requested_latency_cb(pa_sink *s) {
1480 struct userdata *u = s->userdata;
1481 size_t before;
1482 pa_assert(u);
1483 pa_assert(u->use_tsched); /* only when timer scheduling is used
1484 * we can dynamically adjust the
1485 * latency */
1486
1487 if (!u->pcm_handle)
1488 return;
1489
1490 before = u->hwbuf_unused;
1491 update_sw_params(u);
1492
1493 /* Let's check whether we now use only a smaller part of the
1494 buffer then before. If so, we need to make sure that subsequent
1495 rewinds are relative to the new maximum fill level and not to the
1496 current fill level. Thus, let's do a full rewind once, to clear
1497 things up. */
1498
1499 if (u->hwbuf_unused > before) {
1500 pa_log_debug("Requesting rewind due to latency change.");
1501 pa_sink_request_rewind(s, (size_t) -1);
1502 }
1503 }
1504
1505 static pa_idxset* sink_get_formats(pa_sink *s) {
1506 struct userdata *u = s->userdata;
1507 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1508 pa_format_info *f;
1509 uint32_t idx;
1510
1511 pa_assert(u);
1512
1513 PA_IDXSET_FOREACH(f, u->formats, idx) {
1514 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1515 }
1516
1517 return ret;
1518 }
1519
1520 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1521 struct userdata *u = s->userdata;
1522 pa_format_info *f, *g;
1523 uint32_t idx, n;
1524
1525 pa_assert(u);
1526
1527 /* FIXME: also validate sample rates against what the device supports */
1528 PA_IDXSET_FOREACH(f, formats, idx) {
1529 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1530 /* EAC3 cannot be sent over over S/PDIF */
1531 return FALSE;
1532 }
1533
1534 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1535 u->formats = pa_idxset_new(NULL, NULL);
1536
1537 /* Note: the logic below won't apply if we're using software encoding.
1538 * This is fine for now since we don't support that via the passthrough
1539 * framework, but this must be changed if we do. */
1540
1541 /* Count how many sample rates we support */
1542 for (idx = 0, n = 0; u->rates[idx]; idx++)
1543 n++;
1544
1545 /* First insert non-PCM formats since we prefer those. */
1546 PA_IDXSET_FOREACH(f, formats, idx) {
1547 if (!pa_format_info_is_pcm(f)) {
1548 g = pa_format_info_copy(f);
1549 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1550 pa_idxset_put(u->formats, g, NULL);
1551 }
1552 }
1553
1554 /* Now add any PCM formats */
1555 PA_IDXSET_FOREACH(f, formats, idx) {
1556 if (pa_format_info_is_pcm(f)) {
1557 /* We don't set rates here since we'll just tack on a resampler for
1558 * unsupported rates */
1559 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1560 }
1561 }
1562
1563 return TRUE;
1564 }
1565
1566 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
1567 {
1568 struct userdata *u = s->userdata;
1569 int i;
1570 pa_bool_t supported = FALSE;
1571
1572 pa_assert(u);
1573
1574 for (i = 0; u->rates[i]; i++) {
1575 if (u->rates[i] == rate) {
1576 supported = TRUE;
1577 break;
1578 }
1579 }
1580
1581 if (!supported) {
1582 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1583 return FALSE;
1584 }
1585
1586 if (!PA_SINK_IS_OPENED(s->state)) {
1587 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1588 u->sink->sample_spec.rate = rate;
1589 return TRUE;
1590 }
1591
1592 return FALSE;
1593 }
1594
1595 static int process_rewind(struct userdata *u) {
1596 snd_pcm_sframes_t unused;
1597 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1598 pa_assert(u);
1599
1600 /* Figure out how much we shall rewind and reset the counter */
1601 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1602
1603 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1604
1605 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1606 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1607 return -1;
1608 }
1609
1610 unused_nbytes = (size_t) unused * u->frame_size;
1611
1612 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1613 unused_nbytes += u->rewind_safeguard;
1614
1615 if (u->hwbuf_size > unused_nbytes)
1616 limit_nbytes = u->hwbuf_size - unused_nbytes;
1617 else
1618 limit_nbytes = 0;
1619
1620 if (rewind_nbytes > limit_nbytes)
1621 rewind_nbytes = limit_nbytes;
1622
1623 if (rewind_nbytes > 0) {
1624 snd_pcm_sframes_t in_frames, out_frames;
1625
1626 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1627
1628 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1629 pa_log_debug("before: %lu", (unsigned long) in_frames);
1630 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1631 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1632 if (try_recover(u, "process_rewind", out_frames) < 0)
1633 return -1;
1634 out_frames = 0;
1635 }
1636
1637 pa_log_debug("after: %lu", (unsigned long) out_frames);
1638
1639 rewind_nbytes = (size_t) out_frames * u->frame_size;
1640
1641 if (rewind_nbytes <= 0)
1642 pa_log_info("Tried rewind, but was apparently not possible.");
1643 else {
1644 u->write_count -= rewind_nbytes;
1645 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1646 pa_sink_process_rewind(u->sink, rewind_nbytes);
1647
1648 u->after_rewind = TRUE;
1649 return 0;
1650 }
1651 } else
1652 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1653
1654 pa_sink_process_rewind(u->sink, 0);
1655 return 0;
1656 }
1657
1658 static void thread_func(void *userdata) {
1659 struct userdata *u = userdata;
1660 unsigned short revents = 0;
1661
1662 pa_assert(u);
1663
1664 pa_log_debug("Thread starting up");
1665
1666 if (u->core->realtime_scheduling)
1667 pa_make_realtime(u->core->realtime_priority);
1668
1669 pa_thread_mq_install(&u->thread_mq);
1670
1671 for (;;) {
1672 int ret;
1673 pa_usec_t rtpoll_sleep = 0;
1674
1675 #ifdef DEBUG_TIMING
1676 pa_log_debug("Loop");
1677 #endif
1678
1679 /* Render some data and write it to the dsp */
1680 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1681 int work_done;
1682 pa_usec_t sleep_usec = 0;
1683 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1684
1685 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1686 if (process_rewind(u) < 0)
1687 goto fail;
1688
1689 if (u->use_mmap)
1690 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1691 else
1692 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1693
1694 if (work_done < 0)
1695 goto fail;
1696
1697 /* pa_log_debug("work_done = %i", work_done); */
1698
1699 if (work_done) {
1700
1701 if (u->first) {
1702 pa_log_info("Starting playback.");
1703 snd_pcm_start(u->pcm_handle);
1704
1705 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1706
1707 u->first = FALSE;
1708 }
1709
1710 update_smoother(u);
1711 }
1712
1713 if (u->use_tsched) {
1714 pa_usec_t cusec;
1715
1716 if (u->since_start <= u->hwbuf_size) {
1717
1718 /* USB devices on ALSA seem to hit a buffer
1719 * underrun during the first iterations much
1720 * quicker then we calculate here, probably due to
1721 * the transport latency. To accommodate for that
1722 * we artificially decrease the sleep time until
1723 * we have filled the buffer at least once
1724 * completely.*/
1725
1726 if (pa_log_ratelimit(PA_LOG_DEBUG))
1727 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1728 sleep_usec /= 2;
1729 }
1730
1731 /* OK, the playback buffer is now full, let's
1732 * calculate when to wake up next */
1733 #ifdef DEBUG_TIMING
1734 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1735 #endif
1736
1737 /* Convert from the sound card time domain to the
1738 * system time domain */
1739 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1740
1741 #ifdef DEBUG_TIMING
1742 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1743 #endif
1744
1745 /* We don't trust the conversion, so we wake up whatever comes first */
1746 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1747 }
1748
1749 u->after_rewind = FALSE;
1750
1751 }
1752
1753 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1754 pa_usec_t volume_sleep;
1755 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1756 if (volume_sleep > 0) {
1757 if (rtpoll_sleep > 0)
1758 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1759 else
1760 rtpoll_sleep = volume_sleep;
1761 }
1762 }
1763
1764 if (rtpoll_sleep > 0)
1765 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1766 else
1767 pa_rtpoll_set_timer_disabled(u->rtpoll);
1768
1769 /* Hmm, nothing to do. Let's sleep */
1770 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1771 goto fail;
1772
1773 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1774 pa_sink_volume_change_apply(u->sink, NULL);
1775
1776 if (ret == 0)
1777 goto finish;
1778
1779 /* Tell ALSA about this and process its response */
1780 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1781 struct pollfd *pollfd;
1782 int err;
1783 unsigned n;
1784
1785 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1786
1787 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1788 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1789 goto fail;
1790 }
1791
1792 if (revents & ~POLLOUT) {
1793 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1794 goto fail;
1795
1796 u->first = TRUE;
1797 u->since_start = 0;
1798 revents = 0;
1799 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1800 pa_log_debug("Wakeup from ALSA!");
1801
1802 } else
1803 revents = 0;
1804 }
1805
1806 fail:
1807 /* If this was no regular exit from the loop we have to continue
1808 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1809 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1810 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1811
1812 finish:
1813 pa_log_debug("Thread shutting down");
1814 }
1815
1816 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1817 const char *n;
1818 char *t;
1819
1820 pa_assert(data);
1821 pa_assert(ma);
1822 pa_assert(device_name);
1823
1824 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1825 pa_sink_new_data_set_name(data, n);
1826 data->namereg_fail = TRUE;
1827 return;
1828 }
1829
1830 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1831 data->namereg_fail = TRUE;
1832 else {
1833 n = device_id ? device_id : device_name;
1834 data->namereg_fail = FALSE;
1835 }
1836
1837 if (mapping)
1838 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1839 else
1840 t = pa_sprintf_malloc("alsa_output.%s", n);
1841
1842 pa_sink_new_data_set_name(data, t);
1843 pa_xfree(t);
1844 }
1845
1846 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1847 snd_hctl_t *hctl;
1848
1849 if (!mapping && !element)
1850 return;
1851
1852 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1853 pa_log_info("Failed to find a working mixer device.");
1854 return;
1855 }
1856
1857 if (element) {
1858
1859 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1860 goto fail;
1861
1862 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1863 goto fail;
1864
1865 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1866 pa_alsa_path_dump(u->mixer_path);
1867 } else if (!(u->mixer_path_set = mapping->output_path_set))
1868 goto fail;
1869
1870 return;
1871
1872 fail:
1873
1874 if (u->mixer_path) {
1875 pa_alsa_path_free(u->mixer_path);
1876 u->mixer_path = NULL;
1877 }
1878
1879 if (u->mixer_handle) {
1880 snd_mixer_close(u->mixer_handle);
1881 u->mixer_handle = NULL;
1882 }
1883 }
1884
1885
1886 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1887 pa_bool_t need_mixer_callback = FALSE;
1888
1889 pa_assert(u);
1890
1891 if (!u->mixer_handle)
1892 return 0;
1893
1894 if (u->sink->active_port) {
1895 pa_alsa_port_data *data;
1896
1897 /* We have a list of supported paths, so let's activate the
1898 * one that has been chosen as active */
1899
1900 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1901 u->mixer_path = data->path;
1902
1903 pa_alsa_path_select(data->path, u->mixer_handle);
1904
1905 if (data->setting)
1906 pa_alsa_setting_select(data->setting, u->mixer_handle);
1907
1908 } else {
1909
1910 if (!u->mixer_path && u->mixer_path_set)
1911 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1912
1913 if (u->mixer_path) {
1914 /* Hmm, we have only a single path, then let's activate it */
1915
1916 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1917
1918 if (u->mixer_path->settings)
1919 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1920 } else
1921 return 0;
1922 }
1923
1924 mixer_volume_init(u);
1925
1926 /* Will we need to register callbacks? */
1927 if (u->mixer_path_set && u->mixer_path_set->paths) {
1928 pa_alsa_path *p;
1929 void *state;
1930
1931 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1932 if (p->has_volume || p->has_mute)
1933 need_mixer_callback = TRUE;
1934 }
1935 }
1936 else if (u->mixer_path)
1937 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1938
1939 if (need_mixer_callback) {
1940 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1941 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1942 u->mixer_pd = pa_alsa_mixer_pdata_new();
1943 mixer_callback = io_mixer_callback;
1944
1945 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1946 pa_log("Failed to initialize file descriptor monitoring");
1947 return -1;
1948 }
1949 } else {
1950 u->mixer_fdl = pa_alsa_fdlist_new();
1951 mixer_callback = ctl_mixer_callback;
1952
1953 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1954 pa_log("Failed to initialize file descriptor monitoring");
1955 return -1;
1956 }
1957 }
1958
1959 if (u->mixer_path_set)
1960 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1961 else
1962 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1963 }
1964
1965 return 0;
1966 }
1967
1968 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1969
1970 struct userdata *u = NULL;
1971 const char *dev_id = NULL;
1972 pa_sample_spec ss;
1973 uint32_t alternate_sample_rate;
1974 pa_channel_map map;
1975 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1976 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1977 size_t frame_size;
1978 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE, fixed_latency_range = FALSE;
1979 pa_sink_new_data data;
1980 pa_alsa_profile_set *profile_set = NULL;
1981
1982 pa_assert(m);
1983 pa_assert(ma);
1984
1985 ss = m->core->default_sample_spec;
1986 map = m->core->default_channel_map;
1987 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1988 pa_log("Failed to parse sample specification and channel map");
1989 goto fail;
1990 }
1991
1992 alternate_sample_rate = m->core->alternate_sample_rate;
1993 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1994 pa_log("Failed to parse alternate sample rate");
1995 goto fail;
1996 }
1997
1998 frame_size = pa_frame_size(&ss);
1999
2000 nfrags = m->core->default_n_fragments;
2001 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2002 if (frag_size <= 0)
2003 frag_size = (uint32_t) frame_size;
2004 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2005 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2006
2007 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2008 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2009 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2010 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2011 pa_log("Failed to parse buffer metrics");
2012 goto fail;
2013 }
2014
2015 buffer_size = nfrags * frag_size;
2016
2017 period_frames = frag_size/frame_size;
2018 buffer_frames = buffer_size/frame_size;
2019 tsched_frames = tsched_size/frame_size;
2020
2021 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2022 pa_log("Failed to parse mmap argument.");
2023 goto fail;
2024 }
2025
2026 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2027 pa_log("Failed to parse tsched argument.");
2028 goto fail;
2029 }
2030
2031 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2032 pa_log("Failed to parse ignore_dB argument.");
2033 goto fail;
2034 }
2035
2036 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2037 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2038 pa_log("Failed to parse rewind_safeguard argument");
2039 goto fail;
2040 }
2041
2042 deferred_volume = m->core->deferred_volume;
2043 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2044 pa_log("Failed to parse deferred_volume argument.");
2045 goto fail;
2046 }
2047
2048 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2049 pa_log("Failed to parse fixed_latency_range argument.");
2050 goto fail;
2051 }
2052
2053 use_tsched = pa_alsa_may_tsched(use_tsched);
2054
2055 u = pa_xnew0(struct userdata, 1);
2056 u->core = m->core;
2057 u->module = m;
2058 u->use_mmap = use_mmap;
2059 u->use_tsched = use_tsched;
2060 u->deferred_volume = deferred_volume;
2061 u->fixed_latency_range = fixed_latency_range;
2062 u->first = TRUE;
2063 u->rewind_safeguard = rewind_safeguard;
2064 u->rtpoll = pa_rtpoll_new();
2065 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2066
2067 u->smoother = pa_smoother_new(
2068 SMOOTHER_ADJUST_USEC,
2069 SMOOTHER_WINDOW_USEC,
2070 TRUE,
2071 TRUE,
2072 5,
2073 pa_rtclock_now(),
2074 TRUE);
2075 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2076
2077 dev_id = pa_modargs_get_value(
2078 ma, "device_id",
2079 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2080
2081 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2082
2083 if (reserve_init(u, dev_id) < 0)
2084 goto fail;
2085
2086 if (reserve_monitor_init(u, dev_id) < 0)
2087 goto fail;
2088
2089 b = use_mmap;
2090 d = use_tsched;
2091
2092 if (mapping) {
2093
2094 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2095 pa_log("device_id= not set");
2096 goto fail;
2097 }
2098
2099 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2100 dev_id,
2101 &u->device_name,
2102 &ss, &map,
2103 SND_PCM_STREAM_PLAYBACK,
2104 &period_frames, &buffer_frames, tsched_frames,
2105 &b, &d, mapping)))
2106 goto fail;
2107
2108 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2109
2110 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2111 goto fail;
2112
2113 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2114 dev_id,
2115 &u->device_name,
2116 &ss, &map,
2117 SND_PCM_STREAM_PLAYBACK,
2118 &period_frames, &buffer_frames, tsched_frames,
2119 &b, &d, profile_set, &mapping)))
2120 goto fail;
2121
2122 } else {
2123
2124 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2125 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2126 &u->device_name,
2127 &ss, &map,
2128 SND_PCM_STREAM_PLAYBACK,
2129 &period_frames, &buffer_frames, tsched_frames,
2130 &b, &d, FALSE)))
2131 goto fail;
2132 }
2133
2134 pa_assert(u->device_name);
2135 pa_log_info("Successfully opened device %s.", u->device_name);
2136
2137 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2138 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2139 goto fail;
2140 }
2141
2142 if (mapping)
2143 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2144
2145 if (use_mmap && !b) {
2146 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2147 u->use_mmap = use_mmap = FALSE;
2148 }
2149
2150 if (use_tsched && (!b || !d)) {
2151 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2152 u->use_tsched = use_tsched = FALSE;
2153 }
2154
2155 if (u->use_mmap)
2156 pa_log_info("Successfully enabled mmap() mode.");
2157
2158 if (u->use_tsched) {
2159 pa_log_info("Successfully enabled timer-based scheduling mode.");
2160
2161 if (u->fixed_latency_range)
2162 pa_log_info("Disabling latency range changes on underrun");
2163 }
2164
2165 if (is_iec958(u) || is_hdmi(u))
2166 set_formats = TRUE;
2167
2168 u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
2169 if (!u->rates) {
2170 pa_log_error("Failed to find any supported sample rates.");
2171 goto fail;
2172 }
2173
2174 /* ALSA might tweak the sample spec, so recalculate the frame size */
2175 frame_size = pa_frame_size(&ss);
2176
2177 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2178
2179 pa_sink_new_data_init(&data);
2180 data.driver = driver;
2181 data.module = m;
2182 data.card = card;
2183 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2184
2185 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2186 * variable instead of using &data.namereg_fail directly, because
2187 * data.namereg_fail is a bitfield and taking the address of a bitfield
2188 * variable is impossible. */
2189 namereg_fail = data.namereg_fail;
2190 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2191 pa_log("Failed to parse namereg_fail argument.");
2192 pa_sink_new_data_done(&data);
2193 goto fail;
2194 }
2195 data.namereg_fail = namereg_fail;
2196
2197 pa_sink_new_data_set_sample_spec(&data, &ss);
2198 pa_sink_new_data_set_channel_map(&data, &map);
2199 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2200
2201 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2202 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2203 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2204 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2205 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2206
2207 if (mapping) {
2208 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2209 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2210 }
2211
2212 pa_alsa_init_description(data.proplist);
2213
2214 if (u->control_device)
2215 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2216
2217 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2218 pa_log("Invalid properties");
2219 pa_sink_new_data_done(&data);
2220 goto fail;
2221 }
2222
2223 if (u->mixer_path_set)
2224 pa_alsa_add_ports(&data.ports, u->mixer_path_set, card);
2225
2226 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2227 (set_formats ? PA_SINK_SET_FORMATS : 0));
2228 pa_sink_new_data_done(&data);
2229
2230 if (!u->sink) {
2231 pa_log("Failed to create sink object");
2232 goto fail;
2233 }
2234
2235 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2236 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2237 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2238 goto fail;
2239 }
2240
2241 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2242 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2243 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2244 goto fail;
2245 }
2246
2247 u->sink->parent.process_msg = sink_process_msg;
2248 if (u->use_tsched)
2249 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2250 u->sink->set_state = sink_set_state_cb;
2251 u->sink->set_port = sink_set_port_cb;
2252 if (u->sink->alternate_sample_rate)
2253 u->sink->update_rate = sink_update_rate_cb;
2254 u->sink->userdata = u;
2255
2256 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2257 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2258
2259 u->frame_size = frame_size;
2260 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2261 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2262 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2263
2264 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2265 (double) u->hwbuf_size / (double) u->fragment_size,
2266 (long unsigned) u->fragment_size,
2267 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2268 (long unsigned) u->hwbuf_size,
2269 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2270
2271 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2272 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2273 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2274 else {
2275 pa_log_info("Disabling rewind for device %s", u->device_name);
2276 pa_sink_set_max_rewind(u->sink, 0);
2277 }
2278
2279 if (u->use_tsched) {
2280 u->tsched_watermark_ref = tsched_watermark;
2281 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2282 } else
2283 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2284
2285 reserve_update(u);
2286
2287 if (update_sw_params(u) < 0)
2288 goto fail;
2289
2290 if (setup_mixer(u, ignore_dB) < 0)
2291 goto fail;
2292
2293 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2294
2295 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2296 pa_log("Failed to create thread.");
2297 goto fail;
2298 }
2299
2300 /* Get initial mixer settings */
2301 if (data.volume_is_set) {
2302 if (u->sink->set_volume)
2303 u->sink->set_volume(u->sink);
2304 } else {
2305 if (u->sink->get_volume)
2306 u->sink->get_volume(u->sink);
2307 }
2308
2309 if (data.muted_is_set) {
2310 if (u->sink->set_mute)
2311 u->sink->set_mute(u->sink);
2312 } else {
2313 if (u->sink->get_mute)
2314 u->sink->get_mute(u->sink);
2315 }
2316
2317 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2318 u->sink->write_volume(u->sink);
2319
2320 if (set_formats) {
2321 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2322 pa_format_info *format;
2323
2324 /* To start with, we only support PCM formats. Other formats may be added
2325 * with pa_sink_set_formats().*/
2326 format = pa_format_info_new();
2327 format->encoding = PA_ENCODING_PCM;
2328 u->formats = pa_idxset_new(NULL, NULL);
2329 pa_idxset_put(u->formats, format, NULL);
2330
2331 u->sink->get_formats = sink_get_formats;
2332 u->sink->set_formats = sink_set_formats;
2333 }
2334
2335 pa_sink_put(u->sink);
2336
2337 if (profile_set)
2338 pa_alsa_profile_set_free(profile_set);
2339
2340 return u->sink;
2341
2342 fail:
2343
2344 if (u)
2345 userdata_free(u);
2346
2347 if (profile_set)
2348 pa_alsa_profile_set_free(profile_set);
2349
2350 return NULL;
2351 }
2352
2353 static void userdata_free(struct userdata *u) {
2354 pa_assert(u);
2355
2356 if (u->sink)
2357 pa_sink_unlink(u->sink);
2358
2359 if (u->thread) {
2360 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2361 pa_thread_free(u->thread);
2362 }
2363
2364 pa_thread_mq_done(&u->thread_mq);
2365
2366 if (u->sink)
2367 pa_sink_unref(u->sink);
2368
2369 if (u->memchunk.memblock)
2370 pa_memblock_unref(u->memchunk.memblock);
2371
2372 if (u->mixer_pd)
2373 pa_alsa_mixer_pdata_free(u->mixer_pd);
2374
2375 if (u->alsa_rtpoll_item)
2376 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2377
2378 if (u->rtpoll)
2379 pa_rtpoll_free(u->rtpoll);
2380
2381 if (u->pcm_handle) {
2382 snd_pcm_drop(u->pcm_handle);
2383 snd_pcm_close(u->pcm_handle);
2384 }
2385
2386 if (u->mixer_fdl)
2387 pa_alsa_fdlist_free(u->mixer_fdl);
2388
2389 if (u->mixer_path && !u->mixer_path_set)
2390 pa_alsa_path_free(u->mixer_path);
2391
2392 if (u->mixer_handle)
2393 snd_mixer_close(u->mixer_handle);
2394
2395 if (u->smoother)
2396 pa_smoother_free(u->smoother);
2397
2398 if (u->formats)
2399 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2400
2401 if (u->rates)
2402 pa_xfree(u->rates);
2403
2404 reserve_done(u);
2405 monitor_done(u);
2406
2407 pa_xfree(u->device_name);
2408 pa_xfree(u->control_device);
2409 pa_xfree(u->paths_dir);
2410 pa_xfree(u);
2411 }
2412
2413 void pa_alsa_sink_free(pa_sink *s) {
2414 struct userdata *u;
2415
2416 pa_sink_assert_ref(s);
2417 pa_assert_se(u = s->userdata);
2418
2419 userdata_free(u);
2420 }