]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: Give compressed formats preference over PCM
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 pa_alsa_fdlist *mixer_fdl;
104 pa_alsa_mixer_pdata *mixer_pd;
105 snd_mixer_t *mixer_handle;
106 pa_alsa_path_set *mixer_path_set;
107 pa_alsa_path *mixer_path;
108
109 pa_cvolume hardware_volume;
110
111 uint32_t old_rate;
112
113 size_t
114 frame_size,
115 fragment_size,
116 hwbuf_size,
117 tsched_watermark,
118 hwbuf_unused,
119 min_sleep,
120 min_wakeup,
121 watermark_inc_step,
122 watermark_dec_step,
123 watermark_inc_threshold,
124 watermark_dec_threshold,
125 rewind_safeguard;
126
127 pa_usec_t watermark_dec_not_before;
128
129 pa_memchunk memchunk;
130
131 char *device_name; /* name of the PCM device */
132 char *control_device; /* name of the control device */
133
134 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1;
135
136 pa_bool_t first, after_rewind;
137
138 pa_rtpoll_item *alsa_rtpoll_item;
139
140 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
141
142 pa_smoother *smoother;
143 uint64_t write_count;
144 uint64_t since_start;
145 pa_usec_t smoother_interval;
146 pa_usec_t last_smoother_update;
147
148 pa_idxset *formats;
149
150 pa_reserve_wrapper *reserve;
151 pa_hook_slot *reserve_slot;
152 pa_reserve_monitor_wrapper *monitor;
153 pa_hook_slot *monitor_slot;
154 };
155
156 static void userdata_free(struct userdata *u);
157
158 /* FIXME: Is there a better way to do this than device names? */
159 static pa_bool_t is_iec958(struct userdata *u) {
160 return (strncmp("iec958", u->device_name, 6) == 0);
161 }
162
163 static pa_bool_t is_hdmi(struct userdata *u) {
164 return (strncmp("hdmi", u->device_name, 4) == 0);
165 }
166
167 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
168 pa_assert(r);
169 pa_assert(u);
170
171 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
172 return PA_HOOK_CANCEL;
173
174 return PA_HOOK_OK;
175 }
176
177 static void reserve_done(struct userdata *u) {
178 pa_assert(u);
179
180 if (u->reserve_slot) {
181 pa_hook_slot_free(u->reserve_slot);
182 u->reserve_slot = NULL;
183 }
184
185 if (u->reserve) {
186 pa_reserve_wrapper_unref(u->reserve);
187 u->reserve = NULL;
188 }
189 }
190
191 static void reserve_update(struct userdata *u) {
192 const char *description;
193 pa_assert(u);
194
195 if (!u->sink || !u->reserve)
196 return;
197
198 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
199 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
200 }
201
202 static int reserve_init(struct userdata *u, const char *dname) {
203 char *rname;
204
205 pa_assert(u);
206 pa_assert(dname);
207
208 if (u->reserve)
209 return 0;
210
211 if (pa_in_system_mode())
212 return 0;
213
214 if (!(rname = pa_alsa_get_reserve_name(dname)))
215 return 0;
216
217 /* We are resuming, try to lock the device */
218 u->reserve = pa_reserve_wrapper_get(u->core, rname);
219 pa_xfree(rname);
220
221 if (!(u->reserve))
222 return -1;
223
224 reserve_update(u);
225
226 pa_assert(!u->reserve_slot);
227 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
228
229 return 0;
230 }
231
232 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
233 pa_bool_t b;
234
235 pa_assert(w);
236 pa_assert(u);
237
238 b = PA_PTR_TO_UINT(busy) && !u->reserve;
239
240 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
241 return PA_HOOK_OK;
242 }
243
244 static void monitor_done(struct userdata *u) {
245 pa_assert(u);
246
247 if (u->monitor_slot) {
248 pa_hook_slot_free(u->monitor_slot);
249 u->monitor_slot = NULL;
250 }
251
252 if (u->monitor) {
253 pa_reserve_monitor_wrapper_unref(u->monitor);
254 u->monitor = NULL;
255 }
256 }
257
258 static int reserve_monitor_init(struct userdata *u, const char *dname) {
259 char *rname;
260
261 pa_assert(u);
262 pa_assert(dname);
263
264 if (pa_in_system_mode())
265 return 0;
266
267 if (!(rname = pa_alsa_get_reserve_name(dname)))
268 return 0;
269
270 /* We are resuming, try to lock the device */
271 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
272 pa_xfree(rname);
273
274 if (!(u->monitor))
275 return -1;
276
277 pa_assert(!u->monitor_slot);
278 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
279
280 return 0;
281 }
282
283 static void fix_min_sleep_wakeup(struct userdata *u) {
284 size_t max_use, max_use_2;
285
286 pa_assert(u);
287 pa_assert(u->use_tsched);
288
289 max_use = u->hwbuf_size - u->hwbuf_unused;
290 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
291
292 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
293 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
294
295 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
296 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
297 }
298
299 static void fix_tsched_watermark(struct userdata *u) {
300 size_t max_use;
301 pa_assert(u);
302 pa_assert(u->use_tsched);
303
304 max_use = u->hwbuf_size - u->hwbuf_unused;
305
306 if (u->tsched_watermark > max_use - u->min_sleep)
307 u->tsched_watermark = max_use - u->min_sleep;
308
309 if (u->tsched_watermark < u->min_wakeup)
310 u->tsched_watermark = u->min_wakeup;
311 }
312
313 static void increase_watermark(struct userdata *u) {
314 size_t old_watermark;
315 pa_usec_t old_min_latency, new_min_latency;
316
317 pa_assert(u);
318 pa_assert(u->use_tsched);
319
320 /* First, just try to increase the watermark */
321 old_watermark = u->tsched_watermark;
322 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
323 fix_tsched_watermark(u);
324
325 if (old_watermark != u->tsched_watermark) {
326 pa_log_info("Increasing wakeup watermark to %0.2f ms",
327 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
328 return;
329 }
330
331 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
332 old_min_latency = u->sink->thread_info.min_latency;
333 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
334 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
335
336 if (old_min_latency != new_min_latency) {
337 pa_log_info("Increasing minimal latency to %0.2f ms",
338 (double) new_min_latency / PA_USEC_PER_MSEC);
339
340 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
341 }
342
343 /* When we reach this we're officialy fucked! */
344 }
345
346 static void decrease_watermark(struct userdata *u) {
347 size_t old_watermark;
348 pa_usec_t now;
349
350 pa_assert(u);
351 pa_assert(u->use_tsched);
352
353 now = pa_rtclock_now();
354
355 if (u->watermark_dec_not_before <= 0)
356 goto restart;
357
358 if (u->watermark_dec_not_before > now)
359 return;
360
361 old_watermark = u->tsched_watermark;
362
363 if (u->tsched_watermark < u->watermark_dec_step)
364 u->tsched_watermark = u->tsched_watermark / 2;
365 else
366 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
367
368 fix_tsched_watermark(u);
369
370 if (old_watermark != u->tsched_watermark)
371 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
372 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
373
374 /* We don't change the latency range*/
375
376 restart:
377 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
378 }
379
380 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
381 pa_usec_t usec, wm;
382
383 pa_assert(sleep_usec);
384 pa_assert(process_usec);
385
386 pa_assert(u);
387 pa_assert(u->use_tsched);
388
389 usec = pa_sink_get_requested_latency_within_thread(u->sink);
390
391 if (usec == (pa_usec_t) -1)
392 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
393
394 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
395
396 if (wm > usec)
397 wm = usec/2;
398
399 *sleep_usec = usec - wm;
400 *process_usec = wm;
401
402 #ifdef DEBUG_TIMING
403 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
404 (unsigned long) (usec / PA_USEC_PER_MSEC),
405 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
406 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
407 #endif
408 }
409
410 static int try_recover(struct userdata *u, const char *call, int err) {
411 pa_assert(u);
412 pa_assert(call);
413 pa_assert(err < 0);
414
415 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
416
417 pa_assert(err != -EAGAIN);
418
419 if (err == -EPIPE)
420 pa_log_debug("%s: Buffer underrun!", call);
421
422 if (err == -ESTRPIPE)
423 pa_log_debug("%s: System suspended!", call);
424
425 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
426 pa_log("%s: %s", call, pa_alsa_strerror(err));
427 return -1;
428 }
429
430 u->first = TRUE;
431 u->since_start = 0;
432 return 0;
433 }
434
435 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
436 size_t left_to_play;
437 pa_bool_t underrun = FALSE;
438
439 /* We use <= instead of < for this check here because an underrun
440 * only happens after the last sample was processed, not already when
441 * it is removed from the buffer. This is particularly important
442 * when block transfer is used. */
443
444 if (n_bytes <= u->hwbuf_size)
445 left_to_play = u->hwbuf_size - n_bytes;
446 else {
447
448 /* We got a dropout. What a mess! */
449 left_to_play = 0;
450 underrun = TRUE;
451
452 #ifdef DEBUG_TIMING
453 PA_DEBUG_TRAP;
454 #endif
455
456 if (!u->first && !u->after_rewind)
457 if (pa_log_ratelimit(PA_LOG_INFO))
458 pa_log_info("Underrun!");
459 }
460
461 #ifdef DEBUG_TIMING
462 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
463 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
464 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
465 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
466 #endif
467
468 if (u->use_tsched) {
469 pa_bool_t reset_not_before = TRUE;
470
471 if (!u->first && !u->after_rewind) {
472 if (underrun || left_to_play < u->watermark_inc_threshold)
473 increase_watermark(u);
474 else if (left_to_play > u->watermark_dec_threshold) {
475 reset_not_before = FALSE;
476
477 /* We decrease the watermark only if have actually
478 * been woken up by a timeout. If something else woke
479 * us up it's too easy to fulfill the deadlines... */
480
481 if (on_timeout)
482 decrease_watermark(u);
483 }
484 }
485
486 if (reset_not_before)
487 u->watermark_dec_not_before = 0;
488 }
489
490 return left_to_play;
491 }
492
493 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
494 pa_bool_t work_done = FALSE;
495 pa_usec_t max_sleep_usec = 0, process_usec = 0;
496 size_t left_to_play;
497 unsigned j = 0;
498
499 pa_assert(u);
500 pa_sink_assert_ref(u->sink);
501
502 if (u->use_tsched)
503 hw_sleep_time(u, &max_sleep_usec, &process_usec);
504
505 for (;;) {
506 snd_pcm_sframes_t n;
507 size_t n_bytes;
508 int r;
509 pa_bool_t after_avail = TRUE;
510
511 /* First we determine how many samples are missing to fill the
512 * buffer up to 100% */
513
514 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
515
516 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
517 continue;
518
519 return r;
520 }
521
522 n_bytes = (size_t) n * u->frame_size;
523
524 #ifdef DEBUG_TIMING
525 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
526 #endif
527
528 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
529 on_timeout = FALSE;
530
531 if (u->use_tsched)
532
533 /* We won't fill up the playback buffer before at least
534 * half the sleep time is over because otherwise we might
535 * ask for more data from the clients then they expect. We
536 * need to guarantee that clients only have to keep around
537 * a single hw buffer length. */
538
539 if (!polled &&
540 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
541 #ifdef DEBUG_TIMING
542 pa_log_debug("Not filling up, because too early.");
543 #endif
544 break;
545 }
546
547 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
548
549 if (polled)
550 PA_ONCE_BEGIN {
551 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
552 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
553 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
554 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
555 pa_strnull(dn));
556 pa_xfree(dn);
557 } PA_ONCE_END;
558
559 #ifdef DEBUG_TIMING
560 pa_log_debug("Not filling up, because not necessary.");
561 #endif
562 break;
563 }
564
565
566 if (++j > 10) {
567 #ifdef DEBUG_TIMING
568 pa_log_debug("Not filling up, because already too many iterations.");
569 #endif
570
571 break;
572 }
573
574 n_bytes -= u->hwbuf_unused;
575 polled = FALSE;
576
577 #ifdef DEBUG_TIMING
578 pa_log_debug("Filling up");
579 #endif
580
581 for (;;) {
582 pa_memchunk chunk;
583 void *p;
584 int err;
585 const snd_pcm_channel_area_t *areas;
586 snd_pcm_uframes_t offset, frames;
587 snd_pcm_sframes_t sframes;
588
589 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
590 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
591
592 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
593
594 if (!after_avail && err == -EAGAIN)
595 break;
596
597 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
598 continue;
599
600 return r;
601 }
602
603 /* Make sure that if these memblocks need to be copied they will fit into one slot */
604 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
605 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
606
607 if (!after_avail && frames == 0)
608 break;
609
610 pa_assert(frames > 0);
611 after_avail = FALSE;
612
613 /* Check these are multiples of 8 bit */
614 pa_assert((areas[0].first & 7) == 0);
615 pa_assert((areas[0].step & 7)== 0);
616
617 /* We assume a single interleaved memory buffer */
618 pa_assert((areas[0].first >> 3) == 0);
619 pa_assert((areas[0].step >> 3) == u->frame_size);
620
621 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
622
623 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
624 chunk.length = pa_memblock_get_length(chunk.memblock);
625 chunk.index = 0;
626
627 pa_sink_render_into_full(u->sink, &chunk);
628 pa_memblock_unref_fixed(chunk.memblock);
629
630 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
631
632 if (!after_avail && (int) sframes == -EAGAIN)
633 break;
634
635 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
636 continue;
637
638 return r;
639 }
640
641 work_done = TRUE;
642
643 u->write_count += frames * u->frame_size;
644 u->since_start += frames * u->frame_size;
645
646 #ifdef DEBUG_TIMING
647 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
648 #endif
649
650 if ((size_t) frames * u->frame_size >= n_bytes)
651 break;
652
653 n_bytes -= (size_t) frames * u->frame_size;
654 }
655 }
656
657 if (u->use_tsched) {
658 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
659 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
660
661 if (*sleep_usec > process_usec)
662 *sleep_usec -= process_usec;
663 else
664 *sleep_usec = 0;
665 } else
666 *sleep_usec = 0;
667
668 return work_done ? 1 : 0;
669 }
670
671 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
672 pa_bool_t work_done = FALSE;
673 pa_usec_t max_sleep_usec = 0, process_usec = 0;
674 size_t left_to_play;
675 unsigned j = 0;
676
677 pa_assert(u);
678 pa_sink_assert_ref(u->sink);
679
680 if (u->use_tsched)
681 hw_sleep_time(u, &max_sleep_usec, &process_usec);
682
683 for (;;) {
684 snd_pcm_sframes_t n;
685 size_t n_bytes;
686 int r;
687 pa_bool_t after_avail = TRUE;
688
689 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
690
691 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
692 continue;
693
694 return r;
695 }
696
697 n_bytes = (size_t) n * u->frame_size;
698 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
699 on_timeout = FALSE;
700
701 if (u->use_tsched)
702
703 /* We won't fill up the playback buffer before at least
704 * half the sleep time is over because otherwise we might
705 * ask for more data from the clients then they expect. We
706 * need to guarantee that clients only have to keep around
707 * a single hw buffer length. */
708
709 if (!polled &&
710 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
711 break;
712
713 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
714
715 if (polled)
716 PA_ONCE_BEGIN {
717 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
718 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
719 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
720 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
721 pa_strnull(dn));
722 pa_xfree(dn);
723 } PA_ONCE_END;
724
725 break;
726 }
727
728 if (++j > 10) {
729 #ifdef DEBUG_TIMING
730 pa_log_debug("Not filling up, because already too many iterations.");
731 #endif
732
733 break;
734 }
735
736 n_bytes -= u->hwbuf_unused;
737 polled = FALSE;
738
739 for (;;) {
740 snd_pcm_sframes_t frames;
741 void *p;
742
743 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
744
745 if (u->memchunk.length <= 0)
746 pa_sink_render(u->sink, n_bytes, &u->memchunk);
747
748 pa_assert(u->memchunk.length > 0);
749
750 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
751
752 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
753 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
754
755 p = pa_memblock_acquire(u->memchunk.memblock);
756 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
757 pa_memblock_release(u->memchunk.memblock);
758
759 if (PA_UNLIKELY(frames < 0)) {
760
761 if (!after_avail && (int) frames == -EAGAIN)
762 break;
763
764 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
765 continue;
766
767 return r;
768 }
769
770 if (!after_avail && frames == 0)
771 break;
772
773 pa_assert(frames > 0);
774 after_avail = FALSE;
775
776 u->memchunk.index += (size_t) frames * u->frame_size;
777 u->memchunk.length -= (size_t) frames * u->frame_size;
778
779 if (u->memchunk.length <= 0) {
780 pa_memblock_unref(u->memchunk.memblock);
781 pa_memchunk_reset(&u->memchunk);
782 }
783
784 work_done = TRUE;
785
786 u->write_count += frames * u->frame_size;
787 u->since_start += frames * u->frame_size;
788
789 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
790
791 if ((size_t) frames * u->frame_size >= n_bytes)
792 break;
793
794 n_bytes -= (size_t) frames * u->frame_size;
795 }
796 }
797
798 if (u->use_tsched) {
799 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
800 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
801
802 if (*sleep_usec > process_usec)
803 *sleep_usec -= process_usec;
804 else
805 *sleep_usec = 0;
806 } else
807 *sleep_usec = 0;
808
809 return work_done ? 1 : 0;
810 }
811
812 static void update_smoother(struct userdata *u) {
813 snd_pcm_sframes_t delay = 0;
814 int64_t position;
815 int err;
816 pa_usec_t now1 = 0, now2;
817 snd_pcm_status_t *status;
818
819 snd_pcm_status_alloca(&status);
820
821 pa_assert(u);
822 pa_assert(u->pcm_handle);
823
824 /* Let's update the time smoother */
825
826 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
827 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
828 return;
829 }
830
831 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
832 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
833 else {
834 snd_htimestamp_t htstamp = { 0, 0 };
835 snd_pcm_status_get_htstamp(status, &htstamp);
836 now1 = pa_timespec_load(&htstamp);
837 }
838
839 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
840 if (now1 <= 0)
841 now1 = pa_rtclock_now();
842
843 /* check if the time since the last update is bigger than the interval */
844 if (u->last_smoother_update > 0)
845 if (u->last_smoother_update + u->smoother_interval > now1)
846 return;
847
848 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
849
850 if (PA_UNLIKELY(position < 0))
851 position = 0;
852
853 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
854
855 pa_smoother_put(u->smoother, now1, now2);
856
857 u->last_smoother_update = now1;
858 /* exponentially increase the update interval up to the MAX limit */
859 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
860 }
861
862 static pa_usec_t sink_get_latency(struct userdata *u) {
863 pa_usec_t r;
864 int64_t delay;
865 pa_usec_t now1, now2;
866
867 pa_assert(u);
868
869 now1 = pa_rtclock_now();
870 now2 = pa_smoother_get(u->smoother, now1);
871
872 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
873
874 r = delay >= 0 ? (pa_usec_t) delay : 0;
875
876 if (u->memchunk.memblock)
877 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
878
879 return r;
880 }
881
882 static int build_pollfd(struct userdata *u) {
883 pa_assert(u);
884 pa_assert(u->pcm_handle);
885
886 if (u->alsa_rtpoll_item)
887 pa_rtpoll_item_free(u->alsa_rtpoll_item);
888
889 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
890 return -1;
891
892 return 0;
893 }
894
895 /* Called from IO context */
896 static int suspend(struct userdata *u) {
897 pa_assert(u);
898 pa_assert(u->pcm_handle);
899
900 pa_smoother_pause(u->smoother, pa_rtclock_now());
901
902 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
903 * take awfully long with our long buffer sizes today. */
904 snd_pcm_close(u->pcm_handle);
905 u->pcm_handle = NULL;
906
907 if (u->alsa_rtpoll_item) {
908 pa_rtpoll_item_free(u->alsa_rtpoll_item);
909 u->alsa_rtpoll_item = NULL;
910 }
911
912 /* We reset max_rewind/max_request here to make sure that while we
913 * are suspended the old max_request/max_rewind values set before
914 * the suspend can influence the per-stream buffer of newly
915 * created streams, without their requirements having any
916 * influence on them. */
917 pa_sink_set_max_rewind_within_thread(u->sink, 0);
918 pa_sink_set_max_request_within_thread(u->sink, 0);
919
920 pa_log_info("Device suspended...");
921
922 return 0;
923 }
924
925 /* Called from IO context */
926 static int update_sw_params(struct userdata *u) {
927 snd_pcm_uframes_t avail_min;
928 int err;
929
930 pa_assert(u);
931
932 /* Use the full buffer if no one asked us for anything specific */
933 u->hwbuf_unused = 0;
934
935 if (u->use_tsched) {
936 pa_usec_t latency;
937
938 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
939 size_t b;
940
941 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
942
943 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
944
945 /* We need at least one sample in our buffer */
946
947 if (PA_UNLIKELY(b < u->frame_size))
948 b = u->frame_size;
949
950 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
951 }
952
953 fix_min_sleep_wakeup(u);
954 fix_tsched_watermark(u);
955 }
956
957 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
958
959 /* We need at last one frame in the used part of the buffer */
960 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
961
962 if (u->use_tsched) {
963 pa_usec_t sleep_usec, process_usec;
964
965 hw_sleep_time(u, &sleep_usec, &process_usec);
966 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
967 }
968
969 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
970
971 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
972 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
973 return err;
974 }
975
976 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
977 if (pa_alsa_pcm_is_hw(u->pcm_handle))
978 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
979 else {
980 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
981 pa_sink_set_max_rewind_within_thread(u->sink, 0);
982 }
983
984 return 0;
985 }
986
987 /* Called from IO context */
988 static int unsuspend(struct userdata *u) {
989 pa_sample_spec ss;
990 int err;
991 pa_bool_t b, d;
992 snd_pcm_uframes_t period_size, buffer_size;
993 char *device_name = NULL;
994
995 pa_assert(u);
996 pa_assert(!u->pcm_handle);
997
998 pa_log_info("Trying resume...");
999
1000 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1001 /* Need to open device in NONAUDIO mode */
1002 int len = strlen(u->device_name) + 8;
1003
1004 device_name = pa_xmalloc(len);
1005 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1006 }
1007
1008 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1009 SND_PCM_NONBLOCK|
1010 SND_PCM_NO_AUTO_RESAMPLE|
1011 SND_PCM_NO_AUTO_CHANNELS|
1012 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1013 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1014 goto fail;
1015 }
1016
1017 ss = u->sink->sample_spec;
1018 period_size = u->fragment_size / u->frame_size;
1019 buffer_size = u->hwbuf_size / u->frame_size;
1020 b = u->use_mmap;
1021 d = u->use_tsched;
1022
1023 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1024 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1025 goto fail;
1026 }
1027
1028 if (b != u->use_mmap || d != u->use_tsched) {
1029 pa_log_warn("Resume failed, couldn't get original access mode.");
1030 goto fail;
1031 }
1032
1033 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1034 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1035 goto fail;
1036 }
1037
1038 if (period_size*u->frame_size != u->fragment_size ||
1039 buffer_size*u->frame_size != u->hwbuf_size) {
1040 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1041 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1042 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1043 goto fail;
1044 }
1045
1046 if (update_sw_params(u) < 0)
1047 goto fail;
1048
1049 if (build_pollfd(u) < 0)
1050 goto fail;
1051
1052 u->write_count = 0;
1053 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1054 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1055 u->last_smoother_update = 0;
1056
1057 u->first = TRUE;
1058 u->since_start = 0;
1059
1060 pa_log_info("Resumed successfully...");
1061
1062 pa_xfree(device_name);
1063 return 0;
1064
1065 fail:
1066 if (u->pcm_handle) {
1067 snd_pcm_close(u->pcm_handle);
1068 u->pcm_handle = NULL;
1069 }
1070
1071 pa_xfree(device_name);
1072
1073 return -PA_ERR_IO;
1074 }
1075
1076 /* Called from IO context */
1077 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1078 struct userdata *u = PA_SINK(o)->userdata;
1079
1080 switch (code) {
1081
1082 case PA_SINK_MESSAGE_FINISH_MOVE:
1083 case PA_SINK_MESSAGE_ADD_INPUT: {
1084 pa_sink_input *i = PA_SINK_INPUT(data);
1085 int r = 0;
1086
1087 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1088 break;
1089
1090 u->old_rate = u->sink->sample_spec.rate;
1091
1092 /* Passthrough format, see if we need to reset sink sample rate */
1093 if (u->sink->sample_spec.rate == i->thread_info.sample_spec.rate)
1094 break;
1095
1096 /* .. we do */
1097 if ((r = suspend(u)) < 0)
1098 return r;
1099
1100 u->sink->sample_spec.rate = i->thread_info.sample_spec.rate;
1101
1102 if ((r = unsuspend(u)) < 0)
1103 return r;
1104
1105 break;
1106 }
1107
1108 case PA_SINK_MESSAGE_START_MOVE:
1109 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1110 pa_sink_input *i = PA_SINK_INPUT(data);
1111 int r = 0;
1112
1113 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1114 break;
1115
1116 /* Passthrough format, see if we need to reset sink sample rate */
1117 if (u->sink->sample_spec.rate == u->old_rate)
1118 break;
1119
1120 /* .. we do */
1121 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && ((r = suspend(u)) < 0))
1122 return r;
1123
1124 u->sink->sample_spec.rate = u->old_rate;
1125
1126 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && ((r = unsuspend(u)) < 0))
1127 return r;
1128
1129 break;
1130 }
1131
1132 case PA_SINK_MESSAGE_GET_LATENCY: {
1133 pa_usec_t r = 0;
1134
1135 if (u->pcm_handle)
1136 r = sink_get_latency(u);
1137
1138 *((pa_usec_t*) data) = r;
1139
1140 return 0;
1141 }
1142
1143 case PA_SINK_MESSAGE_SET_STATE:
1144
1145 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1146
1147 case PA_SINK_SUSPENDED: {
1148 int r;
1149
1150 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1151
1152 if ((r = suspend(u)) < 0)
1153 return r;
1154
1155 break;
1156 }
1157
1158 case PA_SINK_IDLE:
1159 case PA_SINK_RUNNING: {
1160 int r;
1161
1162 if (u->sink->thread_info.state == PA_SINK_INIT) {
1163 if (build_pollfd(u) < 0)
1164 return -PA_ERR_IO;
1165 }
1166
1167 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1168 if ((r = unsuspend(u)) < 0)
1169 return r;
1170 }
1171
1172 break;
1173 }
1174
1175 case PA_SINK_UNLINKED:
1176 case PA_SINK_INIT:
1177 case PA_SINK_INVALID_STATE:
1178 ;
1179 }
1180
1181 break;
1182 }
1183
1184 return pa_sink_process_msg(o, code, data, offset, chunk);
1185 }
1186
1187 /* Called from main context */
1188 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1189 pa_sink_state_t old_state;
1190 struct userdata *u;
1191
1192 pa_sink_assert_ref(s);
1193 pa_assert_se(u = s->userdata);
1194
1195 old_state = pa_sink_get_state(u->sink);
1196
1197 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1198 reserve_done(u);
1199 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1200 if (reserve_init(u, u->device_name) < 0)
1201 return -PA_ERR_BUSY;
1202
1203 return 0;
1204 }
1205
1206 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1207 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1208
1209 pa_assert(u);
1210 pa_assert(u->mixer_handle);
1211
1212 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1213 return 0;
1214
1215 if (!PA_SINK_IS_LINKED(u->sink->state))
1216 return 0;
1217
1218 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1219 return 0;
1220
1221 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1222 pa_sink_get_volume(u->sink, TRUE);
1223 pa_sink_get_mute(u->sink, TRUE);
1224 }
1225
1226 return 0;
1227 }
1228
1229 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1230 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1231
1232 pa_assert(u);
1233 pa_assert(u->mixer_handle);
1234
1235 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1236 return 0;
1237
1238 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1239 return 0;
1240
1241 if (mask & SND_CTL_EVENT_MASK_VALUE)
1242 pa_sink_update_volume_and_mute(u->sink);
1243
1244 return 0;
1245 }
1246
1247 static void sink_get_volume_cb(pa_sink *s) {
1248 struct userdata *u = s->userdata;
1249 pa_cvolume r;
1250 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1251
1252 pa_assert(u);
1253 pa_assert(u->mixer_path);
1254 pa_assert(u->mixer_handle);
1255
1256 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1257 return;
1258
1259 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1260 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1261
1262 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1263
1264 if (u->mixer_path->has_dB) {
1265 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1266
1267 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1268 }
1269
1270 if (pa_cvolume_equal(&u->hardware_volume, &r))
1271 return;
1272
1273 s->real_volume = u->hardware_volume = r;
1274
1275 /* Hmm, so the hardware volume changed, let's reset our software volume */
1276 if (u->mixer_path->has_dB)
1277 pa_sink_set_soft_volume(s, NULL);
1278 }
1279
1280 static void sink_set_volume_cb(pa_sink *s) {
1281 struct userdata *u = s->userdata;
1282 pa_cvolume r;
1283 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1284 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1285
1286 pa_assert(u);
1287 pa_assert(u->mixer_path);
1288 pa_assert(u->mixer_handle);
1289
1290 /* Shift up by the base volume */
1291 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1292
1293 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1294 return;
1295
1296 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1297 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1298
1299 u->hardware_volume = r;
1300
1301 if (u->mixer_path->has_dB) {
1302 pa_cvolume new_soft_volume;
1303 pa_bool_t accurate_enough;
1304 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1305
1306 /* Match exactly what the user requested by software */
1307 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1308
1309 /* If the adjustment to do in software is only minimal we
1310 * can skip it. That saves us CPU at the expense of a bit of
1311 * accuracy */
1312 accurate_enough =
1313 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1314 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1315
1316 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1317 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1318 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1319 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1320 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1321 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1322 pa_yes_no(accurate_enough));
1323 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1324
1325 if (!accurate_enough)
1326 s->soft_volume = new_soft_volume;
1327
1328 } else {
1329 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1330
1331 /* We can't match exactly what the user requested, hence let's
1332 * at least tell the user about it */
1333
1334 s->real_volume = r;
1335 }
1336 }
1337
1338 static void sink_write_volume_cb(pa_sink *s) {
1339 struct userdata *u = s->userdata;
1340 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1341
1342 pa_assert(u);
1343 pa_assert(u->mixer_path);
1344 pa_assert(u->mixer_handle);
1345 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1346
1347 /* Shift up by the base volume */
1348 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1349
1350 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1351 pa_log_error("Writing HW volume failed");
1352 else {
1353 pa_cvolume tmp_vol;
1354 pa_bool_t accurate_enough;
1355
1356 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1357 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1358
1359 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1360 accurate_enough =
1361 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1362 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1363
1364 if (!accurate_enough) {
1365 union {
1366 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1367 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1368 } vol;
1369
1370 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1371 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1372 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1373 pa_log_debug(" in dB: %s (request) != %s",
1374 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1375 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1376 }
1377 }
1378 }
1379
1380 static void sink_get_mute_cb(pa_sink *s) {
1381 struct userdata *u = s->userdata;
1382 pa_bool_t b;
1383
1384 pa_assert(u);
1385 pa_assert(u->mixer_path);
1386 pa_assert(u->mixer_handle);
1387
1388 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1389 return;
1390
1391 s->muted = b;
1392 }
1393
1394 static void sink_set_mute_cb(pa_sink *s) {
1395 struct userdata *u = s->userdata;
1396
1397 pa_assert(u);
1398 pa_assert(u->mixer_path);
1399 pa_assert(u->mixer_handle);
1400
1401 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1402 }
1403
1404 static void mixer_volume_init(struct userdata *u) {
1405 pa_assert(u);
1406
1407 if (!u->mixer_path->has_volume) {
1408 pa_sink_set_write_volume_callback(u->sink, NULL);
1409 pa_sink_set_get_volume_callback(u->sink, NULL);
1410 pa_sink_set_set_volume_callback(u->sink, NULL);
1411
1412 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1413 } else {
1414 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1415 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1416
1417 if (u->mixer_path->has_dB && u->deferred_volume) {
1418 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1419 pa_log_info("Successfully enabled synchronous volume.");
1420 } else
1421 pa_sink_set_write_volume_callback(u->sink, NULL);
1422
1423 if (u->mixer_path->has_dB) {
1424 pa_sink_enable_decibel_volume(u->sink, TRUE);
1425 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1426
1427 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1428 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1429
1430 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1431 } else {
1432 pa_sink_enable_decibel_volume(u->sink, FALSE);
1433 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1434
1435 u->sink->base_volume = PA_VOLUME_NORM;
1436 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1437 }
1438
1439 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1440 }
1441
1442 if (!u->mixer_path->has_mute) {
1443 pa_sink_set_get_mute_callback(u->sink, NULL);
1444 pa_sink_set_set_mute_callback(u->sink, NULL);
1445 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1446 } else {
1447 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1448 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1449 pa_log_info("Using hardware mute control.");
1450 }
1451 }
1452
1453 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1454 struct userdata *u = s->userdata;
1455 pa_alsa_port_data *data;
1456
1457 pa_assert(u);
1458 pa_assert(p);
1459 pa_assert(u->mixer_handle);
1460
1461 data = PA_DEVICE_PORT_DATA(p);
1462
1463 pa_assert_se(u->mixer_path = data->path);
1464 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1465
1466 mixer_volume_init(u);
1467
1468 if (data->setting)
1469 pa_alsa_setting_select(data->setting, u->mixer_handle);
1470
1471 if (s->set_mute)
1472 s->set_mute(s);
1473 if (s->set_volume)
1474 s->set_volume(s);
1475
1476 return 0;
1477 }
1478
1479 static void sink_update_requested_latency_cb(pa_sink *s) {
1480 struct userdata *u = s->userdata;
1481 size_t before;
1482 pa_assert(u);
1483 pa_assert(u->use_tsched); /* only when timer scheduling is used
1484 * we can dynamically adjust the
1485 * latency */
1486
1487 if (!u->pcm_handle)
1488 return;
1489
1490 before = u->hwbuf_unused;
1491 update_sw_params(u);
1492
1493 /* Let's check whether we now use only a smaller part of the
1494 buffer then before. If so, we need to make sure that subsequent
1495 rewinds are relative to the new maximum fill level and not to the
1496 current fill level. Thus, let's do a full rewind once, to clear
1497 things up. */
1498
1499 if (u->hwbuf_unused > before) {
1500 pa_log_debug("Requesting rewind due to latency change.");
1501 pa_sink_request_rewind(s, (size_t) -1);
1502 }
1503 }
1504
1505 static pa_idxset* sink_get_formats(pa_sink *s) {
1506 struct userdata *u = s->userdata;
1507 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1508 pa_format_info *f;
1509 uint32_t idx;
1510
1511 pa_assert(u);
1512
1513 PA_IDXSET_FOREACH(f, u->formats, idx) {
1514 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1515 }
1516
1517 return ret;
1518 }
1519
1520 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1521 struct userdata *u = s->userdata;
1522 pa_format_info *f;
1523 uint32_t idx;
1524
1525 pa_assert(u);
1526
1527 /* FIXME: also validate sample rates against what the device supports */
1528 PA_IDXSET_FOREACH(f, formats, idx) {
1529 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1530 /* EAC3 cannot be sent over over S/PDIF */
1531 return FALSE;
1532 }
1533
1534 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1535 u->formats = pa_idxset_new(NULL, NULL);
1536
1537 /* Note: the logic below won't apply if we're using software encoding.
1538 * This is fine for now since we don't support that via the passthrough
1539 * framework, but this must be changed if we do. */
1540
1541 /* First insert non-PCM formats since we prefer those. */
1542 PA_IDXSET_FOREACH(f, formats, idx) {
1543 if (!pa_format_info_is_pcm(f))
1544 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1545 }
1546
1547 /* Now add any PCM formats */
1548 PA_IDXSET_FOREACH(f, formats, idx) {
1549 if (pa_format_info_is_pcm(f))
1550 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1551 }
1552
1553 return TRUE;
1554 }
1555
1556 static int process_rewind(struct userdata *u) {
1557 snd_pcm_sframes_t unused;
1558 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1559 pa_assert(u);
1560
1561 /* Figure out how much we shall rewind and reset the counter */
1562 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1563
1564 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1565
1566 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1567 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1568 return -1;
1569 }
1570
1571 unused_nbytes = (size_t) unused * u->frame_size;
1572
1573 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1574 unused_nbytes += u->rewind_safeguard;
1575
1576 if (u->hwbuf_size > unused_nbytes)
1577 limit_nbytes = u->hwbuf_size - unused_nbytes;
1578 else
1579 limit_nbytes = 0;
1580
1581 if (rewind_nbytes > limit_nbytes)
1582 rewind_nbytes = limit_nbytes;
1583
1584 if (rewind_nbytes > 0) {
1585 snd_pcm_sframes_t in_frames, out_frames;
1586
1587 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1588
1589 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1590 pa_log_debug("before: %lu", (unsigned long) in_frames);
1591 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1592 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1593 if (try_recover(u, "process_rewind", out_frames) < 0)
1594 return -1;
1595 out_frames = 0;
1596 }
1597
1598 pa_log_debug("after: %lu", (unsigned long) out_frames);
1599
1600 rewind_nbytes = (size_t) out_frames * u->frame_size;
1601
1602 if (rewind_nbytes <= 0)
1603 pa_log_info("Tried rewind, but was apparently not possible.");
1604 else {
1605 u->write_count -= rewind_nbytes;
1606 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1607 pa_sink_process_rewind(u->sink, rewind_nbytes);
1608
1609 u->after_rewind = TRUE;
1610 return 0;
1611 }
1612 } else
1613 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1614
1615 pa_sink_process_rewind(u->sink, 0);
1616 return 0;
1617 }
1618
1619 static void thread_func(void *userdata) {
1620 struct userdata *u = userdata;
1621 unsigned short revents = 0;
1622
1623 pa_assert(u);
1624
1625 pa_log_debug("Thread starting up");
1626
1627 if (u->core->realtime_scheduling)
1628 pa_make_realtime(u->core->realtime_priority);
1629
1630 pa_thread_mq_install(&u->thread_mq);
1631
1632 for (;;) {
1633 int ret;
1634 pa_usec_t rtpoll_sleep = 0;
1635
1636 #ifdef DEBUG_TIMING
1637 pa_log_debug("Loop");
1638 #endif
1639
1640 /* Render some data and write it to the dsp */
1641 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1642 int work_done;
1643 pa_usec_t sleep_usec = 0;
1644 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1645
1646 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1647 if (process_rewind(u) < 0)
1648 goto fail;
1649
1650 if (u->use_mmap)
1651 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1652 else
1653 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1654
1655 if (work_done < 0)
1656 goto fail;
1657
1658 /* pa_log_debug("work_done = %i", work_done); */
1659
1660 if (work_done) {
1661
1662 if (u->first) {
1663 pa_log_info("Starting playback.");
1664 snd_pcm_start(u->pcm_handle);
1665
1666 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1667
1668 u->first = FALSE;
1669 }
1670
1671 update_smoother(u);
1672 }
1673
1674 if (u->use_tsched) {
1675 pa_usec_t cusec;
1676
1677 if (u->since_start <= u->hwbuf_size) {
1678
1679 /* USB devices on ALSA seem to hit a buffer
1680 * underrun during the first iterations much
1681 * quicker then we calculate here, probably due to
1682 * the transport latency. To accommodate for that
1683 * we artificially decrease the sleep time until
1684 * we have filled the buffer at least once
1685 * completely.*/
1686
1687 if (pa_log_ratelimit(PA_LOG_DEBUG))
1688 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1689 sleep_usec /= 2;
1690 }
1691
1692 /* OK, the playback buffer is now full, let's
1693 * calculate when to wake up next */
1694 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1695
1696 /* Convert from the sound card time domain to the
1697 * system time domain */
1698 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1699
1700 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1701
1702 /* We don't trust the conversion, so we wake up whatever comes first */
1703 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1704 }
1705
1706 u->after_rewind = FALSE;
1707
1708 }
1709
1710 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1711 pa_usec_t volume_sleep;
1712 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1713 if (volume_sleep > 0)
1714 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1715 }
1716
1717 if (rtpoll_sleep > 0)
1718 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1719 else
1720 pa_rtpoll_set_timer_disabled(u->rtpoll);
1721
1722 /* Hmm, nothing to do. Let's sleep */
1723 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1724 goto fail;
1725
1726 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1727 pa_sink_volume_change_apply(u->sink, NULL);
1728
1729 if (ret == 0)
1730 goto finish;
1731
1732 /* Tell ALSA about this and process its response */
1733 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1734 struct pollfd *pollfd;
1735 int err;
1736 unsigned n;
1737
1738 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1739
1740 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1741 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1742 goto fail;
1743 }
1744
1745 if (revents & ~POLLOUT) {
1746 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1747 goto fail;
1748
1749 u->first = TRUE;
1750 u->since_start = 0;
1751 revents = 0;
1752 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1753 pa_log_debug("Wakeup from ALSA!");
1754
1755 } else
1756 revents = 0;
1757 }
1758
1759 fail:
1760 /* If this was no regular exit from the loop we have to continue
1761 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1762 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1763 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1764
1765 finish:
1766 pa_log_debug("Thread shutting down");
1767 }
1768
1769 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1770 const char *n;
1771 char *t;
1772
1773 pa_assert(data);
1774 pa_assert(ma);
1775 pa_assert(device_name);
1776
1777 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1778 pa_sink_new_data_set_name(data, n);
1779 data->namereg_fail = TRUE;
1780 return;
1781 }
1782
1783 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1784 data->namereg_fail = TRUE;
1785 else {
1786 n = device_id ? device_id : device_name;
1787 data->namereg_fail = FALSE;
1788 }
1789
1790 if (mapping)
1791 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1792 else
1793 t = pa_sprintf_malloc("alsa_output.%s", n);
1794
1795 pa_sink_new_data_set_name(data, t);
1796 pa_xfree(t);
1797 }
1798
1799 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1800
1801 if (!mapping && !element)
1802 return;
1803
1804 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1805 pa_log_info("Failed to find a working mixer device.");
1806 return;
1807 }
1808
1809 if (element) {
1810
1811 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1812 goto fail;
1813
1814 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1815 goto fail;
1816
1817 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1818 pa_alsa_path_dump(u->mixer_path);
1819 } else {
1820
1821 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1822 goto fail;
1823
1824 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1825 }
1826
1827 return;
1828
1829 fail:
1830
1831 if (u->mixer_path_set) {
1832 pa_alsa_path_set_free(u->mixer_path_set);
1833 u->mixer_path_set = NULL;
1834 } else if (u->mixer_path) {
1835 pa_alsa_path_free(u->mixer_path);
1836 u->mixer_path = NULL;
1837 }
1838
1839 if (u->mixer_handle) {
1840 snd_mixer_close(u->mixer_handle);
1841 u->mixer_handle = NULL;
1842 }
1843 }
1844
1845
1846 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1847 pa_bool_t need_mixer_callback = FALSE;
1848
1849 pa_assert(u);
1850
1851 if (!u->mixer_handle)
1852 return 0;
1853
1854 if (u->sink->active_port) {
1855 pa_alsa_port_data *data;
1856
1857 /* We have a list of supported paths, so let's activate the
1858 * one that has been chosen as active */
1859
1860 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1861 u->mixer_path = data->path;
1862
1863 pa_alsa_path_select(data->path, u->mixer_handle);
1864
1865 if (data->setting)
1866 pa_alsa_setting_select(data->setting, u->mixer_handle);
1867
1868 } else {
1869
1870 if (!u->mixer_path && u->mixer_path_set)
1871 u->mixer_path = u->mixer_path_set->paths;
1872
1873 if (u->mixer_path) {
1874 /* Hmm, we have only a single path, then let's activate it */
1875
1876 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1877
1878 if (u->mixer_path->settings)
1879 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1880 } else
1881 return 0;
1882 }
1883
1884 mixer_volume_init(u);
1885
1886 /* Will we need to register callbacks? */
1887 if (u->mixer_path_set && u->mixer_path_set->paths) {
1888 pa_alsa_path *p;
1889
1890 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1891 if (p->has_volume || p->has_mute)
1892 need_mixer_callback = TRUE;
1893 }
1894 }
1895 else if (u->mixer_path)
1896 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1897
1898 if (need_mixer_callback) {
1899 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1900 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1901 u->mixer_pd = pa_alsa_mixer_pdata_new();
1902 mixer_callback = io_mixer_callback;
1903
1904 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1905 pa_log("Failed to initialize file descriptor monitoring");
1906 return -1;
1907 }
1908 } else {
1909 u->mixer_fdl = pa_alsa_fdlist_new();
1910 mixer_callback = ctl_mixer_callback;
1911
1912 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1913 pa_log("Failed to initialize file descriptor monitoring");
1914 return -1;
1915 }
1916 }
1917
1918 if (u->mixer_path_set)
1919 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1920 else
1921 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1922 }
1923
1924 return 0;
1925 }
1926
1927 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1928
1929 struct userdata *u = NULL;
1930 const char *dev_id = NULL;
1931 pa_sample_spec ss, requested_ss;
1932 pa_channel_map map;
1933 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1934 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1935 size_t frame_size;
1936 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE;
1937 pa_sink_new_data data;
1938 pa_alsa_profile_set *profile_set = NULL;
1939
1940 pa_assert(m);
1941 pa_assert(ma);
1942
1943 ss = m->core->default_sample_spec;
1944 map = m->core->default_channel_map;
1945 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1946 pa_log("Failed to parse sample specification and channel map");
1947 goto fail;
1948 }
1949
1950 requested_ss = ss;
1951 frame_size = pa_frame_size(&ss);
1952
1953 nfrags = m->core->default_n_fragments;
1954 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1955 if (frag_size <= 0)
1956 frag_size = (uint32_t) frame_size;
1957 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1958 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1959
1960 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1961 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1962 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1963 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1964 pa_log("Failed to parse buffer metrics");
1965 goto fail;
1966 }
1967
1968 buffer_size = nfrags * frag_size;
1969
1970 period_frames = frag_size/frame_size;
1971 buffer_frames = buffer_size/frame_size;
1972 tsched_frames = tsched_size/frame_size;
1973
1974 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1975 pa_log("Failed to parse mmap argument.");
1976 goto fail;
1977 }
1978
1979 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1980 pa_log("Failed to parse tsched argument.");
1981 goto fail;
1982 }
1983
1984 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1985 pa_log("Failed to parse ignore_dB argument.");
1986 goto fail;
1987 }
1988
1989 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
1990 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1991 pa_log("Failed to parse rewind_safeguard argument");
1992 goto fail;
1993 }
1994
1995 deferred_volume = m->core->deferred_volume;
1996 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1997 pa_log("Failed to parse deferred_volume argument.");
1998 goto fail;
1999 }
2000
2001 use_tsched = pa_alsa_may_tsched(use_tsched);
2002
2003 u = pa_xnew0(struct userdata, 1);
2004 u->core = m->core;
2005 u->module = m;
2006 u->use_mmap = use_mmap;
2007 u->use_tsched = use_tsched;
2008 u->deferred_volume = deferred_volume;
2009 u->first = TRUE;
2010 u->rewind_safeguard = rewind_safeguard;
2011 u->rtpoll = pa_rtpoll_new();
2012 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2013
2014 u->smoother = pa_smoother_new(
2015 SMOOTHER_ADJUST_USEC,
2016 SMOOTHER_WINDOW_USEC,
2017 TRUE,
2018 TRUE,
2019 5,
2020 pa_rtclock_now(),
2021 TRUE);
2022 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2023
2024 dev_id = pa_modargs_get_value(
2025 ma, "device_id",
2026 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2027
2028 if (reserve_init(u, dev_id) < 0)
2029 goto fail;
2030
2031 if (reserve_monitor_init(u, dev_id) < 0)
2032 goto fail;
2033
2034 b = use_mmap;
2035 d = use_tsched;
2036
2037 if (mapping) {
2038
2039 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2040 pa_log("device_id= not set");
2041 goto fail;
2042 }
2043
2044 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2045 dev_id,
2046 &u->device_name,
2047 &ss, &map,
2048 SND_PCM_STREAM_PLAYBACK,
2049 &period_frames, &buffer_frames, tsched_frames,
2050 &b, &d, mapping)))
2051 goto fail;
2052
2053 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2054
2055 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2056 goto fail;
2057
2058 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2059 dev_id,
2060 &u->device_name,
2061 &ss, &map,
2062 SND_PCM_STREAM_PLAYBACK,
2063 &period_frames, &buffer_frames, tsched_frames,
2064 &b, &d, profile_set, &mapping)))
2065 goto fail;
2066
2067 } else {
2068
2069 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2070 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2071 &u->device_name,
2072 &ss, &map,
2073 SND_PCM_STREAM_PLAYBACK,
2074 &period_frames, &buffer_frames, tsched_frames,
2075 &b, &d, FALSE)))
2076 goto fail;
2077 }
2078
2079 pa_assert(u->device_name);
2080 pa_log_info("Successfully opened device %s.", u->device_name);
2081
2082 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2083 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2084 goto fail;
2085 }
2086
2087 if (mapping)
2088 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2089
2090 if (use_mmap && !b) {
2091 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2092 u->use_mmap = use_mmap = FALSE;
2093 }
2094
2095 if (use_tsched && (!b || !d)) {
2096 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2097 u->use_tsched = use_tsched = FALSE;
2098 }
2099
2100 if (u->use_mmap)
2101 pa_log_info("Successfully enabled mmap() mode.");
2102
2103 if (u->use_tsched)
2104 pa_log_info("Successfully enabled timer-based scheduling mode.");
2105
2106 if (is_iec958(u) || is_hdmi(u))
2107 set_formats = TRUE;
2108
2109 /* ALSA might tweak the sample spec, so recalculate the frame size */
2110 frame_size = pa_frame_size(&ss);
2111
2112 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2113
2114 pa_sink_new_data_init(&data);
2115 data.driver = driver;
2116 data.module = m;
2117 data.card = card;
2118 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2119
2120 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2121 * variable instead of using &data.namereg_fail directly, because
2122 * data.namereg_fail is a bitfield and taking the address of a bitfield
2123 * variable is impossible. */
2124 namereg_fail = data.namereg_fail;
2125 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2126 pa_log("Failed to parse namereg_fail argument.");
2127 pa_sink_new_data_done(&data);
2128 goto fail;
2129 }
2130 data.namereg_fail = namereg_fail;
2131
2132 pa_sink_new_data_set_sample_spec(&data, &ss);
2133 pa_sink_new_data_set_channel_map(&data, &map);
2134
2135 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2136 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2137 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2138 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2139 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2140
2141 if (mapping) {
2142 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2143 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2144 }
2145
2146 pa_alsa_init_description(data.proplist);
2147
2148 if (u->control_device)
2149 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2150
2151 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2152 pa_log("Invalid properties");
2153 pa_sink_new_data_done(&data);
2154 goto fail;
2155 }
2156
2157 if (u->mixer_path_set)
2158 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2159
2160 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2161 (set_formats ? PA_SINK_SET_FORMATS : 0));
2162 pa_sink_new_data_done(&data);
2163
2164 if (!u->sink) {
2165 pa_log("Failed to create sink object");
2166 goto fail;
2167 }
2168
2169 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2170 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2171 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2172 goto fail;
2173 }
2174
2175 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2176 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2177 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2178 goto fail;
2179 }
2180
2181 u->sink->parent.process_msg = sink_process_msg;
2182 if (u->use_tsched)
2183 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2184 u->sink->set_state = sink_set_state_cb;
2185 u->sink->set_port = sink_set_port_cb;
2186 u->sink->userdata = u;
2187
2188 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2189 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2190
2191 u->frame_size = frame_size;
2192 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2193 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2194 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2195
2196 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2197 (double) u->hwbuf_size / (double) u->fragment_size,
2198 (long unsigned) u->fragment_size,
2199 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2200 (long unsigned) u->hwbuf_size,
2201 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2202
2203 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2204 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2205 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2206 else {
2207 pa_log_info("Disabling rewind for device %s", u->device_name);
2208 pa_sink_set_max_rewind(u->sink, 0);
2209 }
2210
2211 if (u->use_tsched) {
2212 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
2213
2214 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
2215 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
2216
2217 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
2218 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
2219
2220 fix_min_sleep_wakeup(u);
2221 fix_tsched_watermark(u);
2222
2223 pa_sink_set_latency_range(u->sink,
2224 0,
2225 pa_bytes_to_usec(u->hwbuf_size, &ss));
2226
2227 pa_log_info("Time scheduling watermark is %0.2fms",
2228 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
2229 } else
2230 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2231
2232 reserve_update(u);
2233
2234 if (update_sw_params(u) < 0)
2235 goto fail;
2236
2237 if (setup_mixer(u, ignore_dB) < 0)
2238 goto fail;
2239
2240 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2241
2242 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2243 pa_log("Failed to create thread.");
2244 goto fail;
2245 }
2246
2247 /* Get initial mixer settings */
2248 if (data.volume_is_set) {
2249 if (u->sink->set_volume)
2250 u->sink->set_volume(u->sink);
2251 } else {
2252 if (u->sink->get_volume)
2253 u->sink->get_volume(u->sink);
2254 }
2255
2256 if (data.muted_is_set) {
2257 if (u->sink->set_mute)
2258 u->sink->set_mute(u->sink);
2259 } else {
2260 if (u->sink->get_mute)
2261 u->sink->get_mute(u->sink);
2262 }
2263
2264 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2265 u->sink->write_volume(u->sink);
2266
2267 if (set_formats) {
2268 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2269 pa_format_info *format;
2270
2271 /* To start with, we only support PCM formats. Other formats may be added
2272 * with pa_sink_set_formats().*/
2273 format = pa_format_info_new();
2274 format->encoding = PA_ENCODING_PCM;
2275 u->formats = pa_idxset_new(NULL, NULL);
2276 pa_idxset_put(u->formats, format, NULL);
2277
2278 u->sink->get_formats = sink_get_formats;
2279 u->sink->set_formats = sink_set_formats;
2280 }
2281
2282 pa_sink_put(u->sink);
2283
2284 if (profile_set)
2285 pa_alsa_profile_set_free(profile_set);
2286
2287 return u->sink;
2288
2289 fail:
2290
2291 if (u)
2292 userdata_free(u);
2293
2294 if (profile_set)
2295 pa_alsa_profile_set_free(profile_set);
2296
2297 return NULL;
2298 }
2299
2300 static void userdata_free(struct userdata *u) {
2301 pa_assert(u);
2302
2303 if (u->sink)
2304 pa_sink_unlink(u->sink);
2305
2306 if (u->thread) {
2307 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2308 pa_thread_free(u->thread);
2309 }
2310
2311 pa_thread_mq_done(&u->thread_mq);
2312
2313 if (u->sink)
2314 pa_sink_unref(u->sink);
2315
2316 if (u->memchunk.memblock)
2317 pa_memblock_unref(u->memchunk.memblock);
2318
2319 if (u->mixer_pd)
2320 pa_alsa_mixer_pdata_free(u->mixer_pd);
2321
2322 if (u->alsa_rtpoll_item)
2323 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2324
2325 if (u->rtpoll)
2326 pa_rtpoll_free(u->rtpoll);
2327
2328 if (u->pcm_handle) {
2329 snd_pcm_drop(u->pcm_handle);
2330 snd_pcm_close(u->pcm_handle);
2331 }
2332
2333 if (u->mixer_fdl)
2334 pa_alsa_fdlist_free(u->mixer_fdl);
2335
2336 if (u->mixer_path_set)
2337 pa_alsa_path_set_free(u->mixer_path_set);
2338 else if (u->mixer_path)
2339 pa_alsa_path_free(u->mixer_path);
2340
2341 if (u->mixer_handle)
2342 snd_mixer_close(u->mixer_handle);
2343
2344 if (u->smoother)
2345 pa_smoother_free(u->smoother);
2346
2347 if (u->formats)
2348 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2349
2350 reserve_done(u);
2351 monitor_done(u);
2352
2353 pa_xfree(u->device_name);
2354 pa_xfree(u->control_device);
2355 pa_xfree(u);
2356 }
2357
2358 void pa_alsa_sink_free(pa_sink *s) {
2359 struct userdata *u;
2360
2361 pa_sink_assert_ref(s);
2362 pa_assert_se(u = s->userdata);
2363
2364 userdata_free(u);
2365 }