]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Notify port available status changes, and update protocol version
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 char *paths_dir;
104 pa_alsa_fdlist *mixer_fdl;
105 pa_alsa_mixer_pdata *mixer_pd;
106 snd_mixer_t *mixer_handle;
107 pa_alsa_path_set *mixer_path_set;
108 pa_alsa_path *mixer_path;
109
110 pa_cvolume hardware_volume;
111
112 unsigned int *rates;
113
114 size_t
115 frame_size,
116 fragment_size,
117 hwbuf_size,
118 tsched_watermark,
119 tsched_watermark_ref,
120 hwbuf_unused,
121 min_sleep,
122 min_wakeup,
123 watermark_inc_step,
124 watermark_dec_step,
125 watermark_inc_threshold,
126 watermark_dec_threshold,
127 rewind_safeguard;
128
129 pa_usec_t watermark_dec_not_before;
130 pa_usec_t min_latency_ref;
131
132 pa_memchunk memchunk;
133
134 char *device_name; /* name of the PCM device */
135 char *control_device; /* name of the control device */
136
137 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
138
139 pa_bool_t first, after_rewind;
140
141 pa_rtpoll_item *alsa_rtpoll_item;
142
143 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
144
145 pa_smoother *smoother;
146 uint64_t write_count;
147 uint64_t since_start;
148 pa_usec_t smoother_interval;
149 pa_usec_t last_smoother_update;
150
151 pa_idxset *formats;
152
153 pa_reserve_wrapper *reserve;
154 pa_hook_slot *reserve_slot;
155 pa_reserve_monitor_wrapper *monitor;
156 pa_hook_slot *monitor_slot;
157 };
158
159 static void userdata_free(struct userdata *u);
160
161 /* FIXME: Is there a better way to do this than device names? */
162 static pa_bool_t is_iec958(struct userdata *u) {
163 return (strncmp("iec958", u->device_name, 6) == 0);
164 }
165
166 static pa_bool_t is_hdmi(struct userdata *u) {
167 return (strncmp("hdmi", u->device_name, 4) == 0);
168 }
169
170 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
171 pa_assert(r);
172 pa_assert(u);
173
174 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
175 return PA_HOOK_CANCEL;
176
177 return PA_HOOK_OK;
178 }
179
180 static void reserve_done(struct userdata *u) {
181 pa_assert(u);
182
183 if (u->reserve_slot) {
184 pa_hook_slot_free(u->reserve_slot);
185 u->reserve_slot = NULL;
186 }
187
188 if (u->reserve) {
189 pa_reserve_wrapper_unref(u->reserve);
190 u->reserve = NULL;
191 }
192 }
193
194 static void reserve_update(struct userdata *u) {
195 const char *description;
196 pa_assert(u);
197
198 if (!u->sink || !u->reserve)
199 return;
200
201 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
202 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
203 }
204
205 static int reserve_init(struct userdata *u, const char *dname) {
206 char *rname;
207
208 pa_assert(u);
209 pa_assert(dname);
210
211 if (u->reserve)
212 return 0;
213
214 if (pa_in_system_mode())
215 return 0;
216
217 if (!(rname = pa_alsa_get_reserve_name(dname)))
218 return 0;
219
220 /* We are resuming, try to lock the device */
221 u->reserve = pa_reserve_wrapper_get(u->core, rname);
222 pa_xfree(rname);
223
224 if (!(u->reserve))
225 return -1;
226
227 reserve_update(u);
228
229 pa_assert(!u->reserve_slot);
230 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
231
232 return 0;
233 }
234
235 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
236 pa_bool_t b;
237
238 pa_assert(w);
239 pa_assert(u);
240
241 b = PA_PTR_TO_UINT(busy) && !u->reserve;
242
243 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
244 return PA_HOOK_OK;
245 }
246
247 static void monitor_done(struct userdata *u) {
248 pa_assert(u);
249
250 if (u->monitor_slot) {
251 pa_hook_slot_free(u->monitor_slot);
252 u->monitor_slot = NULL;
253 }
254
255 if (u->monitor) {
256 pa_reserve_monitor_wrapper_unref(u->monitor);
257 u->monitor = NULL;
258 }
259 }
260
261 static int reserve_monitor_init(struct userdata *u, const char *dname) {
262 char *rname;
263
264 pa_assert(u);
265 pa_assert(dname);
266
267 if (pa_in_system_mode())
268 return 0;
269
270 if (!(rname = pa_alsa_get_reserve_name(dname)))
271 return 0;
272
273 /* We are resuming, try to lock the device */
274 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
275 pa_xfree(rname);
276
277 if (!(u->monitor))
278 return -1;
279
280 pa_assert(!u->monitor_slot);
281 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
282
283 return 0;
284 }
285
286 static void fix_min_sleep_wakeup(struct userdata *u) {
287 size_t max_use, max_use_2;
288
289 pa_assert(u);
290 pa_assert(u->use_tsched);
291
292 max_use = u->hwbuf_size - u->hwbuf_unused;
293 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
294
295 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
296 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
297
298 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
299 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
300 }
301
302 static void fix_tsched_watermark(struct userdata *u) {
303 size_t max_use;
304 pa_assert(u);
305 pa_assert(u->use_tsched);
306
307 max_use = u->hwbuf_size - u->hwbuf_unused;
308
309 if (u->tsched_watermark > max_use - u->min_sleep)
310 u->tsched_watermark = max_use - u->min_sleep;
311
312 if (u->tsched_watermark < u->min_wakeup)
313 u->tsched_watermark = u->min_wakeup;
314 }
315
316 static void increase_watermark(struct userdata *u) {
317 size_t old_watermark;
318 pa_usec_t old_min_latency, new_min_latency;
319
320 pa_assert(u);
321 pa_assert(u->use_tsched);
322
323 /* First, just try to increase the watermark */
324 old_watermark = u->tsched_watermark;
325 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
326 fix_tsched_watermark(u);
327
328 if (old_watermark != u->tsched_watermark) {
329 pa_log_info("Increasing wakeup watermark to %0.2f ms",
330 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
331 return;
332 }
333
334 /* Hmm, we cannot increase the watermark any further, hence let's
335 raise the latency, unless doing so was disabled in
336 configuration */
337 if (u->fixed_latency_range)
338 return;
339
340 old_min_latency = u->sink->thread_info.min_latency;
341 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
342 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
343
344 if (old_min_latency != new_min_latency) {
345 pa_log_info("Increasing minimal latency to %0.2f ms",
346 (double) new_min_latency / PA_USEC_PER_MSEC);
347
348 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
349 }
350
351 /* When we reach this we're officialy fucked! */
352 }
353
354 static void decrease_watermark(struct userdata *u) {
355 size_t old_watermark;
356 pa_usec_t now;
357
358 pa_assert(u);
359 pa_assert(u->use_tsched);
360
361 now = pa_rtclock_now();
362
363 if (u->watermark_dec_not_before <= 0)
364 goto restart;
365
366 if (u->watermark_dec_not_before > now)
367 return;
368
369 old_watermark = u->tsched_watermark;
370
371 if (u->tsched_watermark < u->watermark_dec_step)
372 u->tsched_watermark = u->tsched_watermark / 2;
373 else
374 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
375
376 fix_tsched_watermark(u);
377
378 if (old_watermark != u->tsched_watermark)
379 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
380 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
381
382 /* We don't change the latency range*/
383
384 restart:
385 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
386 }
387
388 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
389 pa_usec_t usec, wm;
390
391 pa_assert(sleep_usec);
392 pa_assert(process_usec);
393
394 pa_assert(u);
395 pa_assert(u->use_tsched);
396
397 usec = pa_sink_get_requested_latency_within_thread(u->sink);
398
399 if (usec == (pa_usec_t) -1)
400 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
401
402 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
403
404 if (wm > usec)
405 wm = usec/2;
406
407 *sleep_usec = usec - wm;
408 *process_usec = wm;
409
410 #ifdef DEBUG_TIMING
411 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
412 (unsigned long) (usec / PA_USEC_PER_MSEC),
413 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
414 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
415 #endif
416 }
417
418 static int try_recover(struct userdata *u, const char *call, int err) {
419 pa_assert(u);
420 pa_assert(call);
421 pa_assert(err < 0);
422
423 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
424
425 pa_assert(err != -EAGAIN);
426
427 if (err == -EPIPE)
428 pa_log_debug("%s: Buffer underrun!", call);
429
430 if (err == -ESTRPIPE)
431 pa_log_debug("%s: System suspended!", call);
432
433 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
434 pa_log("%s: %s", call, pa_alsa_strerror(err));
435 return -1;
436 }
437
438 u->first = TRUE;
439 u->since_start = 0;
440 return 0;
441 }
442
443 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
444 size_t left_to_play;
445 pa_bool_t underrun = FALSE;
446
447 /* We use <= instead of < for this check here because an underrun
448 * only happens after the last sample was processed, not already when
449 * it is removed from the buffer. This is particularly important
450 * when block transfer is used. */
451
452 if (n_bytes <= u->hwbuf_size)
453 left_to_play = u->hwbuf_size - n_bytes;
454 else {
455
456 /* We got a dropout. What a mess! */
457 left_to_play = 0;
458 underrun = TRUE;
459
460 #if 0
461 PA_DEBUG_TRAP;
462 #endif
463
464 if (!u->first && !u->after_rewind)
465 if (pa_log_ratelimit(PA_LOG_INFO))
466 pa_log_info("Underrun!");
467 }
468
469 #ifdef DEBUG_TIMING
470 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
471 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
472 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
473 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
474 #endif
475
476 if (u->use_tsched) {
477 pa_bool_t reset_not_before = TRUE;
478
479 if (!u->first && !u->after_rewind) {
480 if (underrun || left_to_play < u->watermark_inc_threshold)
481 increase_watermark(u);
482 else if (left_to_play > u->watermark_dec_threshold) {
483 reset_not_before = FALSE;
484
485 /* We decrease the watermark only if have actually
486 * been woken up by a timeout. If something else woke
487 * us up it's too easy to fulfill the deadlines... */
488
489 if (on_timeout)
490 decrease_watermark(u);
491 }
492 }
493
494 if (reset_not_before)
495 u->watermark_dec_not_before = 0;
496 }
497
498 return left_to_play;
499 }
500
501 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
502 pa_bool_t work_done = FALSE;
503 pa_usec_t max_sleep_usec = 0, process_usec = 0;
504 size_t left_to_play;
505 unsigned j = 0;
506
507 pa_assert(u);
508 pa_sink_assert_ref(u->sink);
509
510 if (u->use_tsched)
511 hw_sleep_time(u, &max_sleep_usec, &process_usec);
512
513 for (;;) {
514 snd_pcm_sframes_t n;
515 size_t n_bytes;
516 int r;
517 pa_bool_t after_avail = TRUE;
518
519 /* First we determine how many samples are missing to fill the
520 * buffer up to 100% */
521
522 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
523
524 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
525 continue;
526
527 return r;
528 }
529
530 n_bytes = (size_t) n * u->frame_size;
531
532 #ifdef DEBUG_TIMING
533 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
534 #endif
535
536 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
537 on_timeout = FALSE;
538
539 if (u->use_tsched)
540
541 /* We won't fill up the playback buffer before at least
542 * half the sleep time is over because otherwise we might
543 * ask for more data from the clients then they expect. We
544 * need to guarantee that clients only have to keep around
545 * a single hw buffer length. */
546
547 if (!polled &&
548 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
549 #ifdef DEBUG_TIMING
550 pa_log_debug("Not filling up, because too early.");
551 #endif
552 break;
553 }
554
555 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
556
557 if (polled)
558 PA_ONCE_BEGIN {
559 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
560 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
561 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
562 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
563 pa_strnull(dn));
564 pa_xfree(dn);
565 } PA_ONCE_END;
566
567 #ifdef DEBUG_TIMING
568 pa_log_debug("Not filling up, because not necessary.");
569 #endif
570 break;
571 }
572
573
574 if (++j > 10) {
575 #ifdef DEBUG_TIMING
576 pa_log_debug("Not filling up, because already too many iterations.");
577 #endif
578
579 break;
580 }
581
582 n_bytes -= u->hwbuf_unused;
583 polled = FALSE;
584
585 #ifdef DEBUG_TIMING
586 pa_log_debug("Filling up");
587 #endif
588
589 for (;;) {
590 pa_memchunk chunk;
591 void *p;
592 int err;
593 const snd_pcm_channel_area_t *areas;
594 snd_pcm_uframes_t offset, frames;
595 snd_pcm_sframes_t sframes;
596
597 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
598 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
599
600 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
601
602 if (!after_avail && err == -EAGAIN)
603 break;
604
605 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
606 continue;
607
608 return r;
609 }
610
611 /* Make sure that if these memblocks need to be copied they will fit into one slot */
612 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
613 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
614
615 if (!after_avail && frames == 0)
616 break;
617
618 pa_assert(frames > 0);
619 after_avail = FALSE;
620
621 /* Check these are multiples of 8 bit */
622 pa_assert((areas[0].first & 7) == 0);
623 pa_assert((areas[0].step & 7)== 0);
624
625 /* We assume a single interleaved memory buffer */
626 pa_assert((areas[0].first >> 3) == 0);
627 pa_assert((areas[0].step >> 3) == u->frame_size);
628
629 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
630
631 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
632 chunk.length = pa_memblock_get_length(chunk.memblock);
633 chunk.index = 0;
634
635 pa_sink_render_into_full(u->sink, &chunk);
636 pa_memblock_unref_fixed(chunk.memblock);
637
638 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
639
640 if (!after_avail && (int) sframes == -EAGAIN)
641 break;
642
643 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
644 continue;
645
646 return r;
647 }
648
649 work_done = TRUE;
650
651 u->write_count += frames * u->frame_size;
652 u->since_start += frames * u->frame_size;
653
654 #ifdef DEBUG_TIMING
655 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
656 #endif
657
658 if ((size_t) frames * u->frame_size >= n_bytes)
659 break;
660
661 n_bytes -= (size_t) frames * u->frame_size;
662 }
663 }
664
665 if (u->use_tsched) {
666 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
667 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
668
669 if (*sleep_usec > process_usec)
670 *sleep_usec -= process_usec;
671 else
672 *sleep_usec = 0;
673 } else
674 *sleep_usec = 0;
675
676 return work_done ? 1 : 0;
677 }
678
679 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
680 pa_bool_t work_done = FALSE;
681 pa_usec_t max_sleep_usec = 0, process_usec = 0;
682 size_t left_to_play;
683 unsigned j = 0;
684
685 pa_assert(u);
686 pa_sink_assert_ref(u->sink);
687
688 if (u->use_tsched)
689 hw_sleep_time(u, &max_sleep_usec, &process_usec);
690
691 for (;;) {
692 snd_pcm_sframes_t n;
693 size_t n_bytes;
694 int r;
695 pa_bool_t after_avail = TRUE;
696
697 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
698
699 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
700 continue;
701
702 return r;
703 }
704
705 n_bytes = (size_t) n * u->frame_size;
706 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
707 on_timeout = FALSE;
708
709 if (u->use_tsched)
710
711 /* We won't fill up the playback buffer before at least
712 * half the sleep time is over because otherwise we might
713 * ask for more data from the clients then they expect. We
714 * need to guarantee that clients only have to keep around
715 * a single hw buffer length. */
716
717 if (!polled &&
718 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
719 break;
720
721 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
722
723 if (polled)
724 PA_ONCE_BEGIN {
725 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
726 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
727 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
728 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
729 pa_strnull(dn));
730 pa_xfree(dn);
731 } PA_ONCE_END;
732
733 break;
734 }
735
736 if (++j > 10) {
737 #ifdef DEBUG_TIMING
738 pa_log_debug("Not filling up, because already too many iterations.");
739 #endif
740
741 break;
742 }
743
744 n_bytes -= u->hwbuf_unused;
745 polled = FALSE;
746
747 for (;;) {
748 snd_pcm_sframes_t frames;
749 void *p;
750
751 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
752
753 if (u->memchunk.length <= 0)
754 pa_sink_render(u->sink, n_bytes, &u->memchunk);
755
756 pa_assert(u->memchunk.length > 0);
757
758 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
759
760 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
761 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
762
763 p = pa_memblock_acquire(u->memchunk.memblock);
764 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
765 pa_memblock_release(u->memchunk.memblock);
766
767 if (PA_UNLIKELY(frames < 0)) {
768
769 if (!after_avail && (int) frames == -EAGAIN)
770 break;
771
772 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
773 continue;
774
775 return r;
776 }
777
778 if (!after_avail && frames == 0)
779 break;
780
781 pa_assert(frames > 0);
782 after_avail = FALSE;
783
784 u->memchunk.index += (size_t) frames * u->frame_size;
785 u->memchunk.length -= (size_t) frames * u->frame_size;
786
787 if (u->memchunk.length <= 0) {
788 pa_memblock_unref(u->memchunk.memblock);
789 pa_memchunk_reset(&u->memchunk);
790 }
791
792 work_done = TRUE;
793
794 u->write_count += frames * u->frame_size;
795 u->since_start += frames * u->frame_size;
796
797 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
798
799 if ((size_t) frames * u->frame_size >= n_bytes)
800 break;
801
802 n_bytes -= (size_t) frames * u->frame_size;
803 }
804 }
805
806 if (u->use_tsched) {
807 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
808 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
809
810 if (*sleep_usec > process_usec)
811 *sleep_usec -= process_usec;
812 else
813 *sleep_usec = 0;
814 } else
815 *sleep_usec = 0;
816
817 return work_done ? 1 : 0;
818 }
819
820 static void update_smoother(struct userdata *u) {
821 snd_pcm_sframes_t delay = 0;
822 int64_t position;
823 int err;
824 pa_usec_t now1 = 0, now2;
825 snd_pcm_status_t *status;
826
827 snd_pcm_status_alloca(&status);
828
829 pa_assert(u);
830 pa_assert(u->pcm_handle);
831
832 /* Let's update the time smoother */
833
834 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
835 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
836 return;
837 }
838
839 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
840 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
841 else {
842 snd_htimestamp_t htstamp = { 0, 0 };
843 snd_pcm_status_get_htstamp(status, &htstamp);
844 now1 = pa_timespec_load(&htstamp);
845 }
846
847 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
848 if (now1 <= 0)
849 now1 = pa_rtclock_now();
850
851 /* check if the time since the last update is bigger than the interval */
852 if (u->last_smoother_update > 0)
853 if (u->last_smoother_update + u->smoother_interval > now1)
854 return;
855
856 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
857
858 if (PA_UNLIKELY(position < 0))
859 position = 0;
860
861 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
862
863 pa_smoother_put(u->smoother, now1, now2);
864
865 u->last_smoother_update = now1;
866 /* exponentially increase the update interval up to the MAX limit */
867 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
868 }
869
870 static pa_usec_t sink_get_latency(struct userdata *u) {
871 pa_usec_t r;
872 int64_t delay;
873 pa_usec_t now1, now2;
874
875 pa_assert(u);
876
877 now1 = pa_rtclock_now();
878 now2 = pa_smoother_get(u->smoother, now1);
879
880 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
881
882 r = delay >= 0 ? (pa_usec_t) delay : 0;
883
884 if (u->memchunk.memblock)
885 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
886
887 return r;
888 }
889
890 static int build_pollfd(struct userdata *u) {
891 pa_assert(u);
892 pa_assert(u->pcm_handle);
893
894 if (u->alsa_rtpoll_item)
895 pa_rtpoll_item_free(u->alsa_rtpoll_item);
896
897 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
898 return -1;
899
900 return 0;
901 }
902
903 /* Called from IO context */
904 static int suspend(struct userdata *u) {
905 pa_assert(u);
906 pa_assert(u->pcm_handle);
907
908 pa_smoother_pause(u->smoother, pa_rtclock_now());
909
910 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
911 * take awfully long with our long buffer sizes today. */
912 snd_pcm_close(u->pcm_handle);
913 u->pcm_handle = NULL;
914
915 if (u->alsa_rtpoll_item) {
916 pa_rtpoll_item_free(u->alsa_rtpoll_item);
917 u->alsa_rtpoll_item = NULL;
918 }
919
920 /* We reset max_rewind/max_request here to make sure that while we
921 * are suspended the old max_request/max_rewind values set before
922 * the suspend can influence the per-stream buffer of newly
923 * created streams, without their requirements having any
924 * influence on them. */
925 pa_sink_set_max_rewind_within_thread(u->sink, 0);
926 pa_sink_set_max_request_within_thread(u->sink, 0);
927
928 pa_log_info("Device suspended...");
929
930 return 0;
931 }
932
933 /* Called from IO context */
934 static int update_sw_params(struct userdata *u) {
935 snd_pcm_uframes_t avail_min;
936 int err;
937
938 pa_assert(u);
939
940 /* Use the full buffer if no one asked us for anything specific */
941 u->hwbuf_unused = 0;
942
943 if (u->use_tsched) {
944 pa_usec_t latency;
945
946 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
947 size_t b;
948
949 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
950
951 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
952
953 /* We need at least one sample in our buffer */
954
955 if (PA_UNLIKELY(b < u->frame_size))
956 b = u->frame_size;
957
958 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
959 }
960
961 fix_min_sleep_wakeup(u);
962 fix_tsched_watermark(u);
963 }
964
965 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
966
967 /* We need at last one frame in the used part of the buffer */
968 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
969
970 if (u->use_tsched) {
971 pa_usec_t sleep_usec, process_usec;
972
973 hw_sleep_time(u, &sleep_usec, &process_usec);
974 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
975 }
976
977 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
978
979 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
980 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
981 return err;
982 }
983
984 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
985 if (pa_alsa_pcm_is_hw(u->pcm_handle))
986 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
987 else {
988 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
989 pa_sink_set_max_rewind_within_thread(u->sink, 0);
990 }
991
992 return 0;
993 }
994
995 /* Called from IO Context on unsuspend or from main thread when creating sink */
996 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
997 pa_bool_t in_thread)
998 {
999 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
1000 &u->sink->sample_spec);
1001
1002 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1003 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1004
1005 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1006 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1007
1008 fix_min_sleep_wakeup(u);
1009 fix_tsched_watermark(u);
1010
1011 if (in_thread)
1012 pa_sink_set_latency_range_within_thread(u->sink,
1013 u->min_latency_ref,
1014 pa_bytes_to_usec(u->hwbuf_size, ss));
1015 else {
1016 pa_sink_set_latency_range(u->sink,
1017 0,
1018 pa_bytes_to_usec(u->hwbuf_size, ss));
1019
1020 /* work-around assert in pa_sink_set_latency_within_thead,
1021 keep track of min_latency and reuse it when
1022 this routine is called from IO context */
1023 u->min_latency_ref = u->sink->thread_info.min_latency;
1024 }
1025
1026 pa_log_info("Time scheduling watermark is %0.2fms",
1027 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1028 }
1029
1030 /* Called from IO context */
1031 static int unsuspend(struct userdata *u) {
1032 pa_sample_spec ss;
1033 int err;
1034 pa_bool_t b, d;
1035 snd_pcm_uframes_t period_size, buffer_size;
1036 char *device_name = NULL;
1037
1038 pa_assert(u);
1039 pa_assert(!u->pcm_handle);
1040
1041 pa_log_info("Trying resume...");
1042
1043 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1044 /* Need to open device in NONAUDIO mode */
1045 int len = strlen(u->device_name) + 8;
1046
1047 device_name = pa_xmalloc(len);
1048 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1049 }
1050
1051 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1052 SND_PCM_NONBLOCK|
1053 SND_PCM_NO_AUTO_RESAMPLE|
1054 SND_PCM_NO_AUTO_CHANNELS|
1055 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1056 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1057 goto fail;
1058 }
1059
1060 ss = u->sink->sample_spec;
1061 period_size = u->fragment_size / u->frame_size;
1062 buffer_size = u->hwbuf_size / u->frame_size;
1063 b = u->use_mmap;
1064 d = u->use_tsched;
1065
1066 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1067 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1068 goto fail;
1069 }
1070
1071 if (b != u->use_mmap || d != u->use_tsched) {
1072 pa_log_warn("Resume failed, couldn't get original access mode.");
1073 goto fail;
1074 }
1075
1076 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1077 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1078 goto fail;
1079 }
1080
1081 if (period_size*u->frame_size != u->fragment_size ||
1082 buffer_size*u->frame_size != u->hwbuf_size) {
1083 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1084 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1085 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1086 goto fail;
1087 }
1088
1089 if (update_sw_params(u) < 0)
1090 goto fail;
1091
1092 if (build_pollfd(u) < 0)
1093 goto fail;
1094
1095 u->write_count = 0;
1096 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1097 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1098 u->last_smoother_update = 0;
1099
1100 u->first = TRUE;
1101 u->since_start = 0;
1102
1103 /* reset the watermark to the value defined when sink was created */
1104 if (u->use_tsched)
1105 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1106
1107 pa_log_info("Resumed successfully...");
1108
1109 pa_xfree(device_name);
1110 return 0;
1111
1112 fail:
1113 if (u->pcm_handle) {
1114 snd_pcm_close(u->pcm_handle);
1115 u->pcm_handle = NULL;
1116 }
1117
1118 pa_xfree(device_name);
1119
1120 return -PA_ERR_IO;
1121 }
1122
1123 /* Called from IO context */
1124 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1125 struct userdata *u = PA_SINK(o)->userdata;
1126
1127 switch (code) {
1128
1129 case PA_SINK_MESSAGE_GET_LATENCY: {
1130 pa_usec_t r = 0;
1131
1132 if (u->pcm_handle)
1133 r = sink_get_latency(u);
1134
1135 *((pa_usec_t*) data) = r;
1136
1137 return 0;
1138 }
1139
1140 case PA_SINK_MESSAGE_SET_STATE:
1141
1142 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1143
1144 case PA_SINK_SUSPENDED: {
1145 int r;
1146
1147 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1148
1149 if ((r = suspend(u)) < 0)
1150 return r;
1151
1152 break;
1153 }
1154
1155 case PA_SINK_IDLE:
1156 case PA_SINK_RUNNING: {
1157 int r;
1158
1159 if (u->sink->thread_info.state == PA_SINK_INIT) {
1160 if (build_pollfd(u) < 0)
1161 return -PA_ERR_IO;
1162 }
1163
1164 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1165 if ((r = unsuspend(u)) < 0)
1166 return r;
1167 }
1168
1169 break;
1170 }
1171
1172 case PA_SINK_UNLINKED:
1173 case PA_SINK_INIT:
1174 case PA_SINK_INVALID_STATE:
1175 ;
1176 }
1177
1178 break;
1179 }
1180
1181 return pa_sink_process_msg(o, code, data, offset, chunk);
1182 }
1183
1184 /* Called from main context */
1185 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1186 pa_sink_state_t old_state;
1187 struct userdata *u;
1188
1189 pa_sink_assert_ref(s);
1190 pa_assert_se(u = s->userdata);
1191
1192 old_state = pa_sink_get_state(u->sink);
1193
1194 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1195 reserve_done(u);
1196 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1197 if (reserve_init(u, u->device_name) < 0)
1198 return -PA_ERR_BUSY;
1199
1200 return 0;
1201 }
1202
1203 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1204 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1205
1206 pa_assert(u);
1207 pa_assert(u->mixer_handle);
1208
1209 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1210 return 0;
1211
1212 if (!PA_SINK_IS_LINKED(u->sink->state))
1213 return 0;
1214
1215 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1216 return 0;
1217
1218 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1219 pa_sink_get_volume(u->sink, TRUE);
1220 pa_sink_get_mute(u->sink, TRUE);
1221 }
1222
1223 return 0;
1224 }
1225
1226 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1227 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1228
1229 pa_assert(u);
1230 pa_assert(u->mixer_handle);
1231
1232 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1233 return 0;
1234
1235 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1236 return 0;
1237
1238 if (mask & SND_CTL_EVENT_MASK_VALUE)
1239 pa_sink_update_volume_and_mute(u->sink);
1240
1241 return 0;
1242 }
1243
1244 static void sink_get_volume_cb(pa_sink *s) {
1245 struct userdata *u = s->userdata;
1246 pa_cvolume r;
1247 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1248
1249 pa_assert(u);
1250 pa_assert(u->mixer_path);
1251 pa_assert(u->mixer_handle);
1252
1253 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1254 return;
1255
1256 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1257 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1258
1259 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1260
1261 if (u->mixer_path->has_dB) {
1262 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1263
1264 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1265 }
1266
1267 if (pa_cvolume_equal(&u->hardware_volume, &r))
1268 return;
1269
1270 s->real_volume = u->hardware_volume = r;
1271
1272 /* Hmm, so the hardware volume changed, let's reset our software volume */
1273 if (u->mixer_path->has_dB)
1274 pa_sink_set_soft_volume(s, NULL);
1275 }
1276
1277 static void sink_set_volume_cb(pa_sink *s) {
1278 struct userdata *u = s->userdata;
1279 pa_cvolume r;
1280 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1281 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1282
1283 pa_assert(u);
1284 pa_assert(u->mixer_path);
1285 pa_assert(u->mixer_handle);
1286
1287 /* Shift up by the base volume */
1288 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1289
1290 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1291 return;
1292
1293 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1294 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1295
1296 u->hardware_volume = r;
1297
1298 if (u->mixer_path->has_dB) {
1299 pa_cvolume new_soft_volume;
1300 pa_bool_t accurate_enough;
1301 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1302
1303 /* Match exactly what the user requested by software */
1304 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1305
1306 /* If the adjustment to do in software is only minimal we
1307 * can skip it. That saves us CPU at the expense of a bit of
1308 * accuracy */
1309 accurate_enough =
1310 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1311 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1312
1313 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1314 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1315 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1316 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1317 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1318 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1319 pa_yes_no(accurate_enough));
1320 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1321
1322 if (!accurate_enough)
1323 s->soft_volume = new_soft_volume;
1324
1325 } else {
1326 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1327
1328 /* We can't match exactly what the user requested, hence let's
1329 * at least tell the user about it */
1330
1331 s->real_volume = r;
1332 }
1333 }
1334
1335 static void sink_write_volume_cb(pa_sink *s) {
1336 struct userdata *u = s->userdata;
1337 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1338
1339 pa_assert(u);
1340 pa_assert(u->mixer_path);
1341 pa_assert(u->mixer_handle);
1342 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1343
1344 /* Shift up by the base volume */
1345 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1346
1347 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1348 pa_log_error("Writing HW volume failed");
1349 else {
1350 pa_cvolume tmp_vol;
1351 pa_bool_t accurate_enough;
1352
1353 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1354 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1355
1356 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1357 accurate_enough =
1358 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1359 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1360
1361 if (!accurate_enough) {
1362 union {
1363 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1364 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1365 } vol;
1366
1367 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1368 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1369 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1370 pa_log_debug(" in dB: %s (request) != %s",
1371 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1372 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1373 }
1374 }
1375 }
1376
1377 static void sink_get_mute_cb(pa_sink *s) {
1378 struct userdata *u = s->userdata;
1379 pa_bool_t b;
1380
1381 pa_assert(u);
1382 pa_assert(u->mixer_path);
1383 pa_assert(u->mixer_handle);
1384
1385 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1386 return;
1387
1388 s->muted = b;
1389 }
1390
1391 static void sink_set_mute_cb(pa_sink *s) {
1392 struct userdata *u = s->userdata;
1393
1394 pa_assert(u);
1395 pa_assert(u->mixer_path);
1396 pa_assert(u->mixer_handle);
1397
1398 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1399 }
1400
1401 static void mixer_volume_init(struct userdata *u) {
1402 pa_assert(u);
1403
1404 if (!u->mixer_path->has_volume) {
1405 pa_sink_set_write_volume_callback(u->sink, NULL);
1406 pa_sink_set_get_volume_callback(u->sink, NULL);
1407 pa_sink_set_set_volume_callback(u->sink, NULL);
1408
1409 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1410 } else {
1411 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1412 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1413
1414 if (u->mixer_path->has_dB && u->deferred_volume) {
1415 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1416 pa_log_info("Successfully enabled synchronous volume.");
1417 } else
1418 pa_sink_set_write_volume_callback(u->sink, NULL);
1419
1420 if (u->mixer_path->has_dB) {
1421 pa_sink_enable_decibel_volume(u->sink, TRUE);
1422 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1423
1424 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1425 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1426
1427 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1428 } else {
1429 pa_sink_enable_decibel_volume(u->sink, FALSE);
1430 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1431
1432 u->sink->base_volume = PA_VOLUME_NORM;
1433 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1434 }
1435
1436 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1437 }
1438
1439 if (!u->mixer_path->has_mute) {
1440 pa_sink_set_get_mute_callback(u->sink, NULL);
1441 pa_sink_set_set_mute_callback(u->sink, NULL);
1442 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1443 } else {
1444 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1445 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1446 pa_log_info("Using hardware mute control.");
1447 }
1448 }
1449
1450 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1451 struct userdata *u = s->userdata;
1452 pa_alsa_port_data *data;
1453
1454 pa_assert(u);
1455 pa_assert(p);
1456 pa_assert(u->mixer_handle);
1457
1458 data = PA_DEVICE_PORT_DATA(p);
1459
1460 pa_assert_se(u->mixer_path = data->path);
1461 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1462
1463 mixer_volume_init(u);
1464
1465 if (data->setting)
1466 pa_alsa_setting_select(data->setting, u->mixer_handle);
1467
1468 if (s->set_mute)
1469 s->set_mute(s);
1470 if (s->set_volume)
1471 s->set_volume(s);
1472
1473 return 0;
1474 }
1475
1476 static void sink_update_requested_latency_cb(pa_sink *s) {
1477 struct userdata *u = s->userdata;
1478 size_t before;
1479 pa_assert(u);
1480 pa_assert(u->use_tsched); /* only when timer scheduling is used
1481 * we can dynamically adjust the
1482 * latency */
1483
1484 if (!u->pcm_handle)
1485 return;
1486
1487 before = u->hwbuf_unused;
1488 update_sw_params(u);
1489
1490 /* Let's check whether we now use only a smaller part of the
1491 buffer then before. If so, we need to make sure that subsequent
1492 rewinds are relative to the new maximum fill level and not to the
1493 current fill level. Thus, let's do a full rewind once, to clear
1494 things up. */
1495
1496 if (u->hwbuf_unused > before) {
1497 pa_log_debug("Requesting rewind due to latency change.");
1498 pa_sink_request_rewind(s, (size_t) -1);
1499 }
1500 }
1501
1502 static pa_idxset* sink_get_formats(pa_sink *s) {
1503 struct userdata *u = s->userdata;
1504 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1505 pa_format_info *f;
1506 uint32_t idx;
1507
1508 pa_assert(u);
1509
1510 PA_IDXSET_FOREACH(f, u->formats, idx) {
1511 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1512 }
1513
1514 return ret;
1515 }
1516
1517 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1518 struct userdata *u = s->userdata;
1519 pa_format_info *f, *g;
1520 uint32_t idx, n;
1521
1522 pa_assert(u);
1523
1524 /* FIXME: also validate sample rates against what the device supports */
1525 PA_IDXSET_FOREACH(f, formats, idx) {
1526 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1527 /* EAC3 cannot be sent over over S/PDIF */
1528 return FALSE;
1529 }
1530
1531 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1532 u->formats = pa_idxset_new(NULL, NULL);
1533
1534 /* Note: the logic below won't apply if we're using software encoding.
1535 * This is fine for now since we don't support that via the passthrough
1536 * framework, but this must be changed if we do. */
1537
1538 /* Count how many sample rates we support */
1539 for (idx = 0, n = 0; u->rates[idx]; idx++)
1540 n++;
1541
1542 /* First insert non-PCM formats since we prefer those. */
1543 PA_IDXSET_FOREACH(f, formats, idx) {
1544 if (!pa_format_info_is_pcm(f)) {
1545 g = pa_format_info_copy(f);
1546 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1547 pa_idxset_put(u->formats, g, NULL);
1548 }
1549 }
1550
1551 /* Now add any PCM formats */
1552 PA_IDXSET_FOREACH(f, formats, idx) {
1553 if (pa_format_info_is_pcm(f)) {
1554 /* We don't set rates here since we'll just tack on a resampler for
1555 * unsupported rates */
1556 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1557 }
1558 }
1559
1560 return TRUE;
1561 }
1562
1563 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
1564 {
1565 struct userdata *u = s->userdata;
1566 int i;
1567 pa_bool_t supported = FALSE;
1568
1569 pa_assert(u);
1570
1571 for (i = 0; u->rates[i]; i++) {
1572 if (u->rates[i] == rate) {
1573 supported = TRUE;
1574 break;
1575 }
1576 }
1577
1578 if (!supported) {
1579 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1580 return FALSE;
1581 }
1582
1583 if (!PA_SINK_IS_OPENED(s->state)) {
1584 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1585 u->sink->sample_spec.rate = rate;
1586 return TRUE;
1587 }
1588
1589 return FALSE;
1590 }
1591
1592 static int process_rewind(struct userdata *u) {
1593 snd_pcm_sframes_t unused;
1594 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1595 pa_assert(u);
1596
1597 /* Figure out how much we shall rewind and reset the counter */
1598 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1599
1600 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1601
1602 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1603 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1604 return -1;
1605 }
1606
1607 unused_nbytes = (size_t) unused * u->frame_size;
1608
1609 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1610 unused_nbytes += u->rewind_safeguard;
1611
1612 if (u->hwbuf_size > unused_nbytes)
1613 limit_nbytes = u->hwbuf_size - unused_nbytes;
1614 else
1615 limit_nbytes = 0;
1616
1617 if (rewind_nbytes > limit_nbytes)
1618 rewind_nbytes = limit_nbytes;
1619
1620 if (rewind_nbytes > 0) {
1621 snd_pcm_sframes_t in_frames, out_frames;
1622
1623 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1624
1625 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1626 pa_log_debug("before: %lu", (unsigned long) in_frames);
1627 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1628 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1629 if (try_recover(u, "process_rewind", out_frames) < 0)
1630 return -1;
1631 out_frames = 0;
1632 }
1633
1634 pa_log_debug("after: %lu", (unsigned long) out_frames);
1635
1636 rewind_nbytes = (size_t) out_frames * u->frame_size;
1637
1638 if (rewind_nbytes <= 0)
1639 pa_log_info("Tried rewind, but was apparently not possible.");
1640 else {
1641 u->write_count -= rewind_nbytes;
1642 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1643 pa_sink_process_rewind(u->sink, rewind_nbytes);
1644
1645 u->after_rewind = TRUE;
1646 return 0;
1647 }
1648 } else
1649 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1650
1651 pa_sink_process_rewind(u->sink, 0);
1652 return 0;
1653 }
1654
1655 static void thread_func(void *userdata) {
1656 struct userdata *u = userdata;
1657 unsigned short revents = 0;
1658
1659 pa_assert(u);
1660
1661 pa_log_debug("Thread starting up");
1662
1663 if (u->core->realtime_scheduling)
1664 pa_make_realtime(u->core->realtime_priority);
1665
1666 pa_thread_mq_install(&u->thread_mq);
1667
1668 for (;;) {
1669 int ret;
1670 pa_usec_t rtpoll_sleep = 0;
1671
1672 #ifdef DEBUG_TIMING
1673 pa_log_debug("Loop");
1674 #endif
1675
1676 /* Render some data and write it to the dsp */
1677 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1678 int work_done;
1679 pa_usec_t sleep_usec = 0;
1680 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1681
1682 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1683 if (process_rewind(u) < 0)
1684 goto fail;
1685
1686 if (u->use_mmap)
1687 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1688 else
1689 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1690
1691 if (work_done < 0)
1692 goto fail;
1693
1694 /* pa_log_debug("work_done = %i", work_done); */
1695
1696 if (work_done) {
1697
1698 if (u->first) {
1699 pa_log_info("Starting playback.");
1700 snd_pcm_start(u->pcm_handle);
1701
1702 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1703
1704 u->first = FALSE;
1705 }
1706
1707 update_smoother(u);
1708 }
1709
1710 if (u->use_tsched) {
1711 pa_usec_t cusec;
1712
1713 if (u->since_start <= u->hwbuf_size) {
1714
1715 /* USB devices on ALSA seem to hit a buffer
1716 * underrun during the first iterations much
1717 * quicker then we calculate here, probably due to
1718 * the transport latency. To accommodate for that
1719 * we artificially decrease the sleep time until
1720 * we have filled the buffer at least once
1721 * completely.*/
1722
1723 if (pa_log_ratelimit(PA_LOG_DEBUG))
1724 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1725 sleep_usec /= 2;
1726 }
1727
1728 /* OK, the playback buffer is now full, let's
1729 * calculate when to wake up next */
1730 #ifdef DEBUG_TIMING
1731 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1732 #endif
1733
1734 /* Convert from the sound card time domain to the
1735 * system time domain */
1736 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1737
1738 #ifdef DEBUG_TIMING
1739 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1740 #endif
1741
1742 /* We don't trust the conversion, so we wake up whatever comes first */
1743 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1744 }
1745
1746 u->after_rewind = FALSE;
1747
1748 }
1749
1750 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1751 pa_usec_t volume_sleep;
1752 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1753 if (volume_sleep > 0) {
1754 if (rtpoll_sleep > 0)
1755 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1756 else
1757 rtpoll_sleep = volume_sleep;
1758 }
1759 }
1760
1761 if (rtpoll_sleep > 0)
1762 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1763 else
1764 pa_rtpoll_set_timer_disabled(u->rtpoll);
1765
1766 /* Hmm, nothing to do. Let's sleep */
1767 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1768 goto fail;
1769
1770 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1771 pa_sink_volume_change_apply(u->sink, NULL);
1772
1773 if (ret == 0)
1774 goto finish;
1775
1776 /* Tell ALSA about this and process its response */
1777 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1778 struct pollfd *pollfd;
1779 int err;
1780 unsigned n;
1781
1782 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1783
1784 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1785 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1786 goto fail;
1787 }
1788
1789 if (revents & ~POLLOUT) {
1790 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1791 goto fail;
1792
1793 u->first = TRUE;
1794 u->since_start = 0;
1795 revents = 0;
1796 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1797 pa_log_debug("Wakeup from ALSA!");
1798
1799 } else
1800 revents = 0;
1801 }
1802
1803 fail:
1804 /* If this was no regular exit from the loop we have to continue
1805 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1806 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1807 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1808
1809 finish:
1810 pa_log_debug("Thread shutting down");
1811 }
1812
1813 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1814 const char *n;
1815 char *t;
1816
1817 pa_assert(data);
1818 pa_assert(ma);
1819 pa_assert(device_name);
1820
1821 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1822 pa_sink_new_data_set_name(data, n);
1823 data->namereg_fail = TRUE;
1824 return;
1825 }
1826
1827 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1828 data->namereg_fail = TRUE;
1829 else {
1830 n = device_id ? device_id : device_name;
1831 data->namereg_fail = FALSE;
1832 }
1833
1834 if (mapping)
1835 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1836 else
1837 t = pa_sprintf_malloc("alsa_output.%s", n);
1838
1839 pa_sink_new_data_set_name(data, t);
1840 pa_xfree(t);
1841 }
1842
1843 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1844
1845 if (!mapping && !element)
1846 return;
1847
1848 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1849 pa_log_info("Failed to find a working mixer device.");
1850 return;
1851 }
1852
1853 if (element) {
1854
1855 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1856 goto fail;
1857
1858 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1859 goto fail;
1860
1861 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1862 pa_alsa_path_dump(u->mixer_path);
1863 } else {
1864
1865 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT, u->paths_dir)))
1866 goto fail;
1867
1868 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1869 }
1870
1871 return;
1872
1873 fail:
1874
1875 if (u->mixer_path_set) {
1876 pa_alsa_path_set_free(u->mixer_path_set);
1877 u->mixer_path_set = NULL;
1878 } else if (u->mixer_path) {
1879 pa_alsa_path_free(u->mixer_path);
1880 u->mixer_path = NULL;
1881 }
1882
1883 if (u->mixer_handle) {
1884 snd_mixer_close(u->mixer_handle);
1885 u->mixer_handle = NULL;
1886 }
1887 }
1888
1889
1890 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1891 pa_bool_t need_mixer_callback = FALSE;
1892
1893 pa_assert(u);
1894
1895 if (!u->mixer_handle)
1896 return 0;
1897
1898 if (u->sink->active_port) {
1899 pa_alsa_port_data *data;
1900
1901 /* We have a list of supported paths, so let's activate the
1902 * one that has been chosen as active */
1903
1904 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1905 u->mixer_path = data->path;
1906
1907 pa_alsa_path_select(data->path, u->mixer_handle);
1908
1909 if (data->setting)
1910 pa_alsa_setting_select(data->setting, u->mixer_handle);
1911
1912 } else {
1913
1914 if (!u->mixer_path && u->mixer_path_set)
1915 u->mixer_path = u->mixer_path_set->paths;
1916
1917 if (u->mixer_path) {
1918 /* Hmm, we have only a single path, then let's activate it */
1919
1920 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1921
1922 if (u->mixer_path->settings)
1923 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1924 } else
1925 return 0;
1926 }
1927
1928 mixer_volume_init(u);
1929
1930 /* Will we need to register callbacks? */
1931 if (u->mixer_path_set && u->mixer_path_set->paths) {
1932 pa_alsa_path *p;
1933
1934 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1935 if (p->has_volume || p->has_mute)
1936 need_mixer_callback = TRUE;
1937 }
1938 }
1939 else if (u->mixer_path)
1940 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1941
1942 if (need_mixer_callback) {
1943 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1944 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1945 u->mixer_pd = pa_alsa_mixer_pdata_new();
1946 mixer_callback = io_mixer_callback;
1947
1948 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1949 pa_log("Failed to initialize file descriptor monitoring");
1950 return -1;
1951 }
1952 } else {
1953 u->mixer_fdl = pa_alsa_fdlist_new();
1954 mixer_callback = ctl_mixer_callback;
1955
1956 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1957 pa_log("Failed to initialize file descriptor monitoring");
1958 return -1;
1959 }
1960 }
1961
1962 if (u->mixer_path_set)
1963 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1964 else
1965 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1966 }
1967
1968 return 0;
1969 }
1970
1971 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1972
1973 struct userdata *u = NULL;
1974 const char *dev_id = NULL;
1975 pa_sample_spec ss;
1976 uint32_t alternate_sample_rate;
1977 pa_channel_map map;
1978 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1979 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1980 size_t frame_size;
1981 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE, fixed_latency_range = FALSE;
1982 pa_sink_new_data data;
1983 pa_alsa_profile_set *profile_set = NULL;
1984
1985 pa_assert(m);
1986 pa_assert(ma);
1987
1988 ss = m->core->default_sample_spec;
1989 map = m->core->default_channel_map;
1990 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1991 pa_log("Failed to parse sample specification and channel map");
1992 goto fail;
1993 }
1994
1995 alternate_sample_rate = m->core->alternate_sample_rate;
1996 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1997 pa_log("Failed to parse alternate sample rate");
1998 goto fail;
1999 }
2000
2001 frame_size = pa_frame_size(&ss);
2002
2003 nfrags = m->core->default_n_fragments;
2004 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2005 if (frag_size <= 0)
2006 frag_size = (uint32_t) frame_size;
2007 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2008 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2009
2010 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2011 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2012 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2013 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2014 pa_log("Failed to parse buffer metrics");
2015 goto fail;
2016 }
2017
2018 buffer_size = nfrags * frag_size;
2019
2020 period_frames = frag_size/frame_size;
2021 buffer_frames = buffer_size/frame_size;
2022 tsched_frames = tsched_size/frame_size;
2023
2024 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2025 pa_log("Failed to parse mmap argument.");
2026 goto fail;
2027 }
2028
2029 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2030 pa_log("Failed to parse tsched argument.");
2031 goto fail;
2032 }
2033
2034 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2035 pa_log("Failed to parse ignore_dB argument.");
2036 goto fail;
2037 }
2038
2039 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2040 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2041 pa_log("Failed to parse rewind_safeguard argument");
2042 goto fail;
2043 }
2044
2045 deferred_volume = m->core->deferred_volume;
2046 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2047 pa_log("Failed to parse deferred_volume argument.");
2048 goto fail;
2049 }
2050
2051 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2052 pa_log("Failed to parse fixed_latency_range argument.");
2053 goto fail;
2054 }
2055
2056 use_tsched = pa_alsa_may_tsched(use_tsched);
2057
2058 u = pa_xnew0(struct userdata, 1);
2059 u->core = m->core;
2060 u->module = m;
2061 u->use_mmap = use_mmap;
2062 u->use_tsched = use_tsched;
2063 u->deferred_volume = deferred_volume;
2064 u->fixed_latency_range = fixed_latency_range;
2065 u->first = TRUE;
2066 u->rewind_safeguard = rewind_safeguard;
2067 u->rtpoll = pa_rtpoll_new();
2068 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2069
2070 u->smoother = pa_smoother_new(
2071 SMOOTHER_ADJUST_USEC,
2072 SMOOTHER_WINDOW_USEC,
2073 TRUE,
2074 TRUE,
2075 5,
2076 pa_rtclock_now(),
2077 TRUE);
2078 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2079
2080 dev_id = pa_modargs_get_value(
2081 ma, "device_id",
2082 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2083
2084 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2085
2086 if (reserve_init(u, dev_id) < 0)
2087 goto fail;
2088
2089 if (reserve_monitor_init(u, dev_id) < 0)
2090 goto fail;
2091
2092 b = use_mmap;
2093 d = use_tsched;
2094
2095 if (mapping) {
2096
2097 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2098 pa_log("device_id= not set");
2099 goto fail;
2100 }
2101
2102 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2103 dev_id,
2104 &u->device_name,
2105 &ss, &map,
2106 SND_PCM_STREAM_PLAYBACK,
2107 &period_frames, &buffer_frames, tsched_frames,
2108 &b, &d, mapping)))
2109 goto fail;
2110
2111 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2112
2113 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2114 goto fail;
2115
2116 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2117 dev_id,
2118 &u->device_name,
2119 &ss, &map,
2120 SND_PCM_STREAM_PLAYBACK,
2121 &period_frames, &buffer_frames, tsched_frames,
2122 &b, &d, profile_set, &mapping)))
2123 goto fail;
2124
2125 } else {
2126
2127 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2128 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2129 &u->device_name,
2130 &ss, &map,
2131 SND_PCM_STREAM_PLAYBACK,
2132 &period_frames, &buffer_frames, tsched_frames,
2133 &b, &d, FALSE)))
2134 goto fail;
2135 }
2136
2137 pa_assert(u->device_name);
2138 pa_log_info("Successfully opened device %s.", u->device_name);
2139
2140 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2141 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2142 goto fail;
2143 }
2144
2145 if (mapping)
2146 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2147
2148 if (use_mmap && !b) {
2149 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2150 u->use_mmap = use_mmap = FALSE;
2151 }
2152
2153 if (use_tsched && (!b || !d)) {
2154 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2155 u->use_tsched = use_tsched = FALSE;
2156 }
2157
2158 if (u->use_mmap)
2159 pa_log_info("Successfully enabled mmap() mode.");
2160
2161 if (u->use_tsched) {
2162 pa_log_info("Successfully enabled timer-based scheduling mode.");
2163
2164 if (u->fixed_latency_range)
2165 pa_log_info("Disabling latency range changes on underrun");
2166 }
2167
2168 if (is_iec958(u) || is_hdmi(u))
2169 set_formats = TRUE;
2170
2171 u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
2172 if (!u->rates) {
2173 pa_log_error("Failed to find any supported sample rates.");
2174 goto fail;
2175 }
2176
2177 /* ALSA might tweak the sample spec, so recalculate the frame size */
2178 frame_size = pa_frame_size(&ss);
2179
2180 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2181
2182 pa_sink_new_data_init(&data);
2183 data.driver = driver;
2184 data.module = m;
2185 data.card = card;
2186 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2187
2188 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2189 * variable instead of using &data.namereg_fail directly, because
2190 * data.namereg_fail is a bitfield and taking the address of a bitfield
2191 * variable is impossible. */
2192 namereg_fail = data.namereg_fail;
2193 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2194 pa_log("Failed to parse namereg_fail argument.");
2195 pa_sink_new_data_done(&data);
2196 goto fail;
2197 }
2198 data.namereg_fail = namereg_fail;
2199
2200 pa_sink_new_data_set_sample_spec(&data, &ss);
2201 pa_sink_new_data_set_channel_map(&data, &map);
2202 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2203
2204 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2205 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2206 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2207 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2208 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2209
2210 if (mapping) {
2211 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2212 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2213 }
2214
2215 pa_alsa_init_description(data.proplist);
2216
2217 if (u->control_device)
2218 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2219
2220 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2221 pa_log("Invalid properties");
2222 pa_sink_new_data_done(&data);
2223 goto fail;
2224 }
2225
2226 if (u->mixer_path_set)
2227 pa_alsa_add_ports(u->core, &data.ports, u->mixer_path_set);
2228
2229 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2230 (set_formats ? PA_SINK_SET_FORMATS : 0));
2231 pa_sink_new_data_done(&data);
2232
2233 if (!u->sink) {
2234 pa_log("Failed to create sink object");
2235 goto fail;
2236 }
2237
2238 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2239 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2240 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2241 goto fail;
2242 }
2243
2244 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2245 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2246 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2247 goto fail;
2248 }
2249
2250 u->sink->parent.process_msg = sink_process_msg;
2251 if (u->use_tsched)
2252 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2253 u->sink->set_state = sink_set_state_cb;
2254 u->sink->set_port = sink_set_port_cb;
2255 if (u->sink->alternate_sample_rate)
2256 u->sink->update_rate = sink_update_rate_cb;
2257 u->sink->userdata = u;
2258
2259 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2260 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2261
2262 u->frame_size = frame_size;
2263 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2264 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2265 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2266
2267 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2268 (double) u->hwbuf_size / (double) u->fragment_size,
2269 (long unsigned) u->fragment_size,
2270 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2271 (long unsigned) u->hwbuf_size,
2272 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2273
2274 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2275 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2276 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2277 else {
2278 pa_log_info("Disabling rewind for device %s", u->device_name);
2279 pa_sink_set_max_rewind(u->sink, 0);
2280 }
2281
2282 if (u->use_tsched) {
2283 u->tsched_watermark_ref = tsched_watermark;
2284 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2285 } else
2286 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2287
2288 reserve_update(u);
2289
2290 if (update_sw_params(u) < 0)
2291 goto fail;
2292
2293 if (setup_mixer(u, ignore_dB) < 0)
2294 goto fail;
2295
2296 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2297
2298 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2299 pa_log("Failed to create thread.");
2300 goto fail;
2301 }
2302
2303 /* Get initial mixer settings */
2304 if (data.volume_is_set) {
2305 if (u->sink->set_volume)
2306 u->sink->set_volume(u->sink);
2307 } else {
2308 if (u->sink->get_volume)
2309 u->sink->get_volume(u->sink);
2310 }
2311
2312 if (data.muted_is_set) {
2313 if (u->sink->set_mute)
2314 u->sink->set_mute(u->sink);
2315 } else {
2316 if (u->sink->get_mute)
2317 u->sink->get_mute(u->sink);
2318 }
2319
2320 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2321 u->sink->write_volume(u->sink);
2322
2323 if (set_formats) {
2324 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2325 pa_format_info *format;
2326
2327 /* To start with, we only support PCM formats. Other formats may be added
2328 * with pa_sink_set_formats().*/
2329 format = pa_format_info_new();
2330 format->encoding = PA_ENCODING_PCM;
2331 u->formats = pa_idxset_new(NULL, NULL);
2332 pa_idxset_put(u->formats, format, NULL);
2333
2334 u->sink->get_formats = sink_get_formats;
2335 u->sink->set_formats = sink_set_formats;
2336 }
2337
2338 pa_sink_put(u->sink);
2339
2340 if (profile_set)
2341 pa_alsa_profile_set_free(profile_set);
2342
2343 return u->sink;
2344
2345 fail:
2346
2347 if (u)
2348 userdata_free(u);
2349
2350 if (profile_set)
2351 pa_alsa_profile_set_free(profile_set);
2352
2353 return NULL;
2354 }
2355
2356 static void userdata_free(struct userdata *u) {
2357 pa_assert(u);
2358
2359 if (u->sink)
2360 pa_sink_unlink(u->sink);
2361
2362 if (u->thread) {
2363 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2364 pa_thread_free(u->thread);
2365 }
2366
2367 pa_thread_mq_done(&u->thread_mq);
2368
2369 if (u->sink)
2370 pa_sink_unref(u->sink);
2371
2372 if (u->memchunk.memblock)
2373 pa_memblock_unref(u->memchunk.memblock);
2374
2375 if (u->mixer_pd)
2376 pa_alsa_mixer_pdata_free(u->mixer_pd);
2377
2378 if (u->alsa_rtpoll_item)
2379 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2380
2381 if (u->rtpoll)
2382 pa_rtpoll_free(u->rtpoll);
2383
2384 if (u->pcm_handle) {
2385 snd_pcm_drop(u->pcm_handle);
2386 snd_pcm_close(u->pcm_handle);
2387 }
2388
2389 if (u->mixer_fdl)
2390 pa_alsa_fdlist_free(u->mixer_fdl);
2391
2392 if (u->mixer_path_set)
2393 pa_alsa_path_set_free(u->mixer_path_set);
2394 else if (u->mixer_path)
2395 pa_alsa_path_free(u->mixer_path);
2396
2397 if (u->mixer_handle)
2398 snd_mixer_close(u->mixer_handle);
2399
2400 if (u->smoother)
2401 pa_smoother_free(u->smoother);
2402
2403 if (u->formats)
2404 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2405
2406 if (u->rates)
2407 pa_xfree(u->rates);
2408
2409 reserve_done(u);
2410 monitor_done(u);
2411
2412 pa_xfree(u->device_name);
2413 pa_xfree(u->control_device);
2414 pa_xfree(u->paths_dir);
2415 pa_xfree(u);
2416 }
2417
2418 void pa_alsa_sink_free(pa_sink *s) {
2419 struct userdata *u;
2420
2421 pa_sink_assert_ref(s);
2422 pa_assert_se(u = s->userdata);
2423
2424 userdata_free(u);
2425 }