]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: New modarg "paths_dir" for module-alsa-card
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 char *paths_dir;
104 pa_alsa_fdlist *mixer_fdl;
105 pa_alsa_mixer_pdata *mixer_pd;
106 snd_mixer_t *mixer_handle;
107 pa_alsa_path_set *mixer_path_set;
108 pa_alsa_path *mixer_path;
109
110 pa_cvolume hardware_volume;
111
112 uint32_t old_rate;
113
114 size_t
115 frame_size,
116 fragment_size,
117 hwbuf_size,
118 tsched_watermark,
119 tsched_watermark_ref,
120 hwbuf_unused,
121 min_sleep,
122 min_wakeup,
123 watermark_inc_step,
124 watermark_dec_step,
125 watermark_inc_threshold,
126 watermark_dec_threshold,
127 rewind_safeguard;
128
129 pa_usec_t watermark_dec_not_before;
130 pa_usec_t min_latency_ref;
131
132 pa_memchunk memchunk;
133
134 char *device_name; /* name of the PCM device */
135 char *control_device; /* name of the control device */
136
137 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1;
138
139 pa_bool_t first, after_rewind;
140
141 pa_rtpoll_item *alsa_rtpoll_item;
142
143 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
144
145 pa_smoother *smoother;
146 uint64_t write_count;
147 uint64_t since_start;
148 pa_usec_t smoother_interval;
149 pa_usec_t last_smoother_update;
150
151 pa_idxset *formats;
152
153 pa_reserve_wrapper *reserve;
154 pa_hook_slot *reserve_slot;
155 pa_reserve_monitor_wrapper *monitor;
156 pa_hook_slot *monitor_slot;
157 };
158
159 static void userdata_free(struct userdata *u);
160
161 /* FIXME: Is there a better way to do this than device names? */
162 static pa_bool_t is_iec958(struct userdata *u) {
163 return (strncmp("iec958", u->device_name, 6) == 0);
164 }
165
166 static pa_bool_t is_hdmi(struct userdata *u) {
167 return (strncmp("hdmi", u->device_name, 4) == 0);
168 }
169
170 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
171 pa_assert(r);
172 pa_assert(u);
173
174 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
175 return PA_HOOK_CANCEL;
176
177 return PA_HOOK_OK;
178 }
179
180 static void reserve_done(struct userdata *u) {
181 pa_assert(u);
182
183 if (u->reserve_slot) {
184 pa_hook_slot_free(u->reserve_slot);
185 u->reserve_slot = NULL;
186 }
187
188 if (u->reserve) {
189 pa_reserve_wrapper_unref(u->reserve);
190 u->reserve = NULL;
191 }
192 }
193
194 static void reserve_update(struct userdata *u) {
195 const char *description;
196 pa_assert(u);
197
198 if (!u->sink || !u->reserve)
199 return;
200
201 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
202 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
203 }
204
205 static int reserve_init(struct userdata *u, const char *dname) {
206 char *rname;
207
208 pa_assert(u);
209 pa_assert(dname);
210
211 if (u->reserve)
212 return 0;
213
214 if (pa_in_system_mode())
215 return 0;
216
217 if (!(rname = pa_alsa_get_reserve_name(dname)))
218 return 0;
219
220 /* We are resuming, try to lock the device */
221 u->reserve = pa_reserve_wrapper_get(u->core, rname);
222 pa_xfree(rname);
223
224 if (!(u->reserve))
225 return -1;
226
227 reserve_update(u);
228
229 pa_assert(!u->reserve_slot);
230 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
231
232 return 0;
233 }
234
235 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
236 pa_bool_t b;
237
238 pa_assert(w);
239 pa_assert(u);
240
241 b = PA_PTR_TO_UINT(busy) && !u->reserve;
242
243 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
244 return PA_HOOK_OK;
245 }
246
247 static void monitor_done(struct userdata *u) {
248 pa_assert(u);
249
250 if (u->monitor_slot) {
251 pa_hook_slot_free(u->monitor_slot);
252 u->monitor_slot = NULL;
253 }
254
255 if (u->monitor) {
256 pa_reserve_monitor_wrapper_unref(u->monitor);
257 u->monitor = NULL;
258 }
259 }
260
261 static int reserve_monitor_init(struct userdata *u, const char *dname) {
262 char *rname;
263
264 pa_assert(u);
265 pa_assert(dname);
266
267 if (pa_in_system_mode())
268 return 0;
269
270 if (!(rname = pa_alsa_get_reserve_name(dname)))
271 return 0;
272
273 /* We are resuming, try to lock the device */
274 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
275 pa_xfree(rname);
276
277 if (!(u->monitor))
278 return -1;
279
280 pa_assert(!u->monitor_slot);
281 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
282
283 return 0;
284 }
285
286 static void fix_min_sleep_wakeup(struct userdata *u) {
287 size_t max_use, max_use_2;
288
289 pa_assert(u);
290 pa_assert(u->use_tsched);
291
292 max_use = u->hwbuf_size - u->hwbuf_unused;
293 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
294
295 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
296 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
297
298 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
299 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
300 }
301
302 static void fix_tsched_watermark(struct userdata *u) {
303 size_t max_use;
304 pa_assert(u);
305 pa_assert(u->use_tsched);
306
307 max_use = u->hwbuf_size - u->hwbuf_unused;
308
309 if (u->tsched_watermark > max_use - u->min_sleep)
310 u->tsched_watermark = max_use - u->min_sleep;
311
312 if (u->tsched_watermark < u->min_wakeup)
313 u->tsched_watermark = u->min_wakeup;
314 }
315
316 static void increase_watermark(struct userdata *u) {
317 size_t old_watermark;
318 pa_usec_t old_min_latency, new_min_latency;
319
320 pa_assert(u);
321 pa_assert(u->use_tsched);
322
323 /* First, just try to increase the watermark */
324 old_watermark = u->tsched_watermark;
325 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
326 fix_tsched_watermark(u);
327
328 if (old_watermark != u->tsched_watermark) {
329 pa_log_info("Increasing wakeup watermark to %0.2f ms",
330 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
331 return;
332 }
333
334 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
335 old_min_latency = u->sink->thread_info.min_latency;
336 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
337 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
338
339 if (old_min_latency != new_min_latency) {
340 pa_log_info("Increasing minimal latency to %0.2f ms",
341 (double) new_min_latency / PA_USEC_PER_MSEC);
342
343 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
344 }
345
346 /* When we reach this we're officialy fucked! */
347 }
348
349 static void decrease_watermark(struct userdata *u) {
350 size_t old_watermark;
351 pa_usec_t now;
352
353 pa_assert(u);
354 pa_assert(u->use_tsched);
355
356 now = pa_rtclock_now();
357
358 if (u->watermark_dec_not_before <= 0)
359 goto restart;
360
361 if (u->watermark_dec_not_before > now)
362 return;
363
364 old_watermark = u->tsched_watermark;
365
366 if (u->tsched_watermark < u->watermark_dec_step)
367 u->tsched_watermark = u->tsched_watermark / 2;
368 else
369 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
370
371 fix_tsched_watermark(u);
372
373 if (old_watermark != u->tsched_watermark)
374 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
375 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
376
377 /* We don't change the latency range*/
378
379 restart:
380 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
381 }
382
383 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
384 pa_usec_t usec, wm;
385
386 pa_assert(sleep_usec);
387 pa_assert(process_usec);
388
389 pa_assert(u);
390 pa_assert(u->use_tsched);
391
392 usec = pa_sink_get_requested_latency_within_thread(u->sink);
393
394 if (usec == (pa_usec_t) -1)
395 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
396
397 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
398
399 if (wm > usec)
400 wm = usec/2;
401
402 *sleep_usec = usec - wm;
403 *process_usec = wm;
404
405 #ifdef DEBUG_TIMING
406 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
407 (unsigned long) (usec / PA_USEC_PER_MSEC),
408 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
409 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
410 #endif
411 }
412
413 static int try_recover(struct userdata *u, const char *call, int err) {
414 pa_assert(u);
415 pa_assert(call);
416 pa_assert(err < 0);
417
418 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
419
420 pa_assert(err != -EAGAIN);
421
422 if (err == -EPIPE)
423 pa_log_debug("%s: Buffer underrun!", call);
424
425 if (err == -ESTRPIPE)
426 pa_log_debug("%s: System suspended!", call);
427
428 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
429 pa_log("%s: %s", call, pa_alsa_strerror(err));
430 return -1;
431 }
432
433 u->first = TRUE;
434 u->since_start = 0;
435 return 0;
436 }
437
438 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
439 size_t left_to_play;
440 pa_bool_t underrun = FALSE;
441
442 /* We use <= instead of < for this check here because an underrun
443 * only happens after the last sample was processed, not already when
444 * it is removed from the buffer. This is particularly important
445 * when block transfer is used. */
446
447 if (n_bytes <= u->hwbuf_size)
448 left_to_play = u->hwbuf_size - n_bytes;
449 else {
450
451 /* We got a dropout. What a mess! */
452 left_to_play = 0;
453 underrun = TRUE;
454
455 #ifdef DEBUG_TIMING
456 PA_DEBUG_TRAP;
457 #endif
458
459 if (!u->first && !u->after_rewind)
460 if (pa_log_ratelimit(PA_LOG_INFO))
461 pa_log_info("Underrun!");
462 }
463
464 #ifdef DEBUG_TIMING
465 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
466 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
467 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
468 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
469 #endif
470
471 if (u->use_tsched) {
472 pa_bool_t reset_not_before = TRUE;
473
474 if (!u->first && !u->after_rewind) {
475 if (underrun || left_to_play < u->watermark_inc_threshold)
476 increase_watermark(u);
477 else if (left_to_play > u->watermark_dec_threshold) {
478 reset_not_before = FALSE;
479
480 /* We decrease the watermark only if have actually
481 * been woken up by a timeout. If something else woke
482 * us up it's too easy to fulfill the deadlines... */
483
484 if (on_timeout)
485 decrease_watermark(u);
486 }
487 }
488
489 if (reset_not_before)
490 u->watermark_dec_not_before = 0;
491 }
492
493 return left_to_play;
494 }
495
496 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
497 pa_bool_t work_done = FALSE;
498 pa_usec_t max_sleep_usec = 0, process_usec = 0;
499 size_t left_to_play;
500 unsigned j = 0;
501
502 pa_assert(u);
503 pa_sink_assert_ref(u->sink);
504
505 if (u->use_tsched)
506 hw_sleep_time(u, &max_sleep_usec, &process_usec);
507
508 for (;;) {
509 snd_pcm_sframes_t n;
510 size_t n_bytes;
511 int r;
512 pa_bool_t after_avail = TRUE;
513
514 /* First we determine how many samples are missing to fill the
515 * buffer up to 100% */
516
517 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
518
519 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
520 continue;
521
522 return r;
523 }
524
525 n_bytes = (size_t) n * u->frame_size;
526
527 #ifdef DEBUG_TIMING
528 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
529 #endif
530
531 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
532 on_timeout = FALSE;
533
534 if (u->use_tsched)
535
536 /* We won't fill up the playback buffer before at least
537 * half the sleep time is over because otherwise we might
538 * ask for more data from the clients then they expect. We
539 * need to guarantee that clients only have to keep around
540 * a single hw buffer length. */
541
542 if (!polled &&
543 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
544 #ifdef DEBUG_TIMING
545 pa_log_debug("Not filling up, because too early.");
546 #endif
547 break;
548 }
549
550 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
551
552 if (polled)
553 PA_ONCE_BEGIN {
554 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
555 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
556 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
557 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
558 pa_strnull(dn));
559 pa_xfree(dn);
560 } PA_ONCE_END;
561
562 #ifdef DEBUG_TIMING
563 pa_log_debug("Not filling up, because not necessary.");
564 #endif
565 break;
566 }
567
568
569 if (++j > 10) {
570 #ifdef DEBUG_TIMING
571 pa_log_debug("Not filling up, because already too many iterations.");
572 #endif
573
574 break;
575 }
576
577 n_bytes -= u->hwbuf_unused;
578 polled = FALSE;
579
580 #ifdef DEBUG_TIMING
581 pa_log_debug("Filling up");
582 #endif
583
584 for (;;) {
585 pa_memchunk chunk;
586 void *p;
587 int err;
588 const snd_pcm_channel_area_t *areas;
589 snd_pcm_uframes_t offset, frames;
590 snd_pcm_sframes_t sframes;
591
592 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
593 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
594
595 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
596
597 if (!after_avail && err == -EAGAIN)
598 break;
599
600 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
601 continue;
602
603 return r;
604 }
605
606 /* Make sure that if these memblocks need to be copied they will fit into one slot */
607 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
608 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
609
610 if (!after_avail && frames == 0)
611 break;
612
613 pa_assert(frames > 0);
614 after_avail = FALSE;
615
616 /* Check these are multiples of 8 bit */
617 pa_assert((areas[0].first & 7) == 0);
618 pa_assert((areas[0].step & 7)== 0);
619
620 /* We assume a single interleaved memory buffer */
621 pa_assert((areas[0].first >> 3) == 0);
622 pa_assert((areas[0].step >> 3) == u->frame_size);
623
624 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
625
626 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
627 chunk.length = pa_memblock_get_length(chunk.memblock);
628 chunk.index = 0;
629
630 pa_sink_render_into_full(u->sink, &chunk);
631 pa_memblock_unref_fixed(chunk.memblock);
632
633 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
634
635 if (!after_avail && (int) sframes == -EAGAIN)
636 break;
637
638 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
639 continue;
640
641 return r;
642 }
643
644 work_done = TRUE;
645
646 u->write_count += frames * u->frame_size;
647 u->since_start += frames * u->frame_size;
648
649 #ifdef DEBUG_TIMING
650 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
651 #endif
652
653 if ((size_t) frames * u->frame_size >= n_bytes)
654 break;
655
656 n_bytes -= (size_t) frames * u->frame_size;
657 }
658 }
659
660 if (u->use_tsched) {
661 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
662 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
663
664 if (*sleep_usec > process_usec)
665 *sleep_usec -= process_usec;
666 else
667 *sleep_usec = 0;
668 } else
669 *sleep_usec = 0;
670
671 return work_done ? 1 : 0;
672 }
673
674 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
675 pa_bool_t work_done = FALSE;
676 pa_usec_t max_sleep_usec = 0, process_usec = 0;
677 size_t left_to_play;
678 unsigned j = 0;
679
680 pa_assert(u);
681 pa_sink_assert_ref(u->sink);
682
683 if (u->use_tsched)
684 hw_sleep_time(u, &max_sleep_usec, &process_usec);
685
686 for (;;) {
687 snd_pcm_sframes_t n;
688 size_t n_bytes;
689 int r;
690 pa_bool_t after_avail = TRUE;
691
692 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
693
694 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
695 continue;
696
697 return r;
698 }
699
700 n_bytes = (size_t) n * u->frame_size;
701 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
702 on_timeout = FALSE;
703
704 if (u->use_tsched)
705
706 /* We won't fill up the playback buffer before at least
707 * half the sleep time is over because otherwise we might
708 * ask for more data from the clients then they expect. We
709 * need to guarantee that clients only have to keep around
710 * a single hw buffer length. */
711
712 if (!polled &&
713 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
714 break;
715
716 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
717
718 if (polled)
719 PA_ONCE_BEGIN {
720 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
721 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
722 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
723 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
724 pa_strnull(dn));
725 pa_xfree(dn);
726 } PA_ONCE_END;
727
728 break;
729 }
730
731 if (++j > 10) {
732 #ifdef DEBUG_TIMING
733 pa_log_debug("Not filling up, because already too many iterations.");
734 #endif
735
736 break;
737 }
738
739 n_bytes -= u->hwbuf_unused;
740 polled = FALSE;
741
742 for (;;) {
743 snd_pcm_sframes_t frames;
744 void *p;
745
746 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
747
748 if (u->memchunk.length <= 0)
749 pa_sink_render(u->sink, n_bytes, &u->memchunk);
750
751 pa_assert(u->memchunk.length > 0);
752
753 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
754
755 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
756 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
757
758 p = pa_memblock_acquire(u->memchunk.memblock);
759 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
760 pa_memblock_release(u->memchunk.memblock);
761
762 if (PA_UNLIKELY(frames < 0)) {
763
764 if (!after_avail && (int) frames == -EAGAIN)
765 break;
766
767 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
768 continue;
769
770 return r;
771 }
772
773 if (!after_avail && frames == 0)
774 break;
775
776 pa_assert(frames > 0);
777 after_avail = FALSE;
778
779 u->memchunk.index += (size_t) frames * u->frame_size;
780 u->memchunk.length -= (size_t) frames * u->frame_size;
781
782 if (u->memchunk.length <= 0) {
783 pa_memblock_unref(u->memchunk.memblock);
784 pa_memchunk_reset(&u->memchunk);
785 }
786
787 work_done = TRUE;
788
789 u->write_count += frames * u->frame_size;
790 u->since_start += frames * u->frame_size;
791
792 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
793
794 if ((size_t) frames * u->frame_size >= n_bytes)
795 break;
796
797 n_bytes -= (size_t) frames * u->frame_size;
798 }
799 }
800
801 if (u->use_tsched) {
802 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
803 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
804
805 if (*sleep_usec > process_usec)
806 *sleep_usec -= process_usec;
807 else
808 *sleep_usec = 0;
809 } else
810 *sleep_usec = 0;
811
812 return work_done ? 1 : 0;
813 }
814
815 static void update_smoother(struct userdata *u) {
816 snd_pcm_sframes_t delay = 0;
817 int64_t position;
818 int err;
819 pa_usec_t now1 = 0, now2;
820 snd_pcm_status_t *status;
821
822 snd_pcm_status_alloca(&status);
823
824 pa_assert(u);
825 pa_assert(u->pcm_handle);
826
827 /* Let's update the time smoother */
828
829 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
830 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
831 return;
832 }
833
834 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
835 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
836 else {
837 snd_htimestamp_t htstamp = { 0, 0 };
838 snd_pcm_status_get_htstamp(status, &htstamp);
839 now1 = pa_timespec_load(&htstamp);
840 }
841
842 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
843 if (now1 <= 0)
844 now1 = pa_rtclock_now();
845
846 /* check if the time since the last update is bigger than the interval */
847 if (u->last_smoother_update > 0)
848 if (u->last_smoother_update + u->smoother_interval > now1)
849 return;
850
851 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
852
853 if (PA_UNLIKELY(position < 0))
854 position = 0;
855
856 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
857
858 pa_smoother_put(u->smoother, now1, now2);
859
860 u->last_smoother_update = now1;
861 /* exponentially increase the update interval up to the MAX limit */
862 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
863 }
864
865 static pa_usec_t sink_get_latency(struct userdata *u) {
866 pa_usec_t r;
867 int64_t delay;
868 pa_usec_t now1, now2;
869
870 pa_assert(u);
871
872 now1 = pa_rtclock_now();
873 now2 = pa_smoother_get(u->smoother, now1);
874
875 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
876
877 r = delay >= 0 ? (pa_usec_t) delay : 0;
878
879 if (u->memchunk.memblock)
880 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
881
882 return r;
883 }
884
885 static int build_pollfd(struct userdata *u) {
886 pa_assert(u);
887 pa_assert(u->pcm_handle);
888
889 if (u->alsa_rtpoll_item)
890 pa_rtpoll_item_free(u->alsa_rtpoll_item);
891
892 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
893 return -1;
894
895 return 0;
896 }
897
898 /* Called from IO context */
899 static int suspend(struct userdata *u) {
900 pa_assert(u);
901 pa_assert(u->pcm_handle);
902
903 pa_smoother_pause(u->smoother, pa_rtclock_now());
904
905 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
906 * take awfully long with our long buffer sizes today. */
907 snd_pcm_close(u->pcm_handle);
908 u->pcm_handle = NULL;
909
910 if (u->alsa_rtpoll_item) {
911 pa_rtpoll_item_free(u->alsa_rtpoll_item);
912 u->alsa_rtpoll_item = NULL;
913 }
914
915 /* We reset max_rewind/max_request here to make sure that while we
916 * are suspended the old max_request/max_rewind values set before
917 * the suspend can influence the per-stream buffer of newly
918 * created streams, without their requirements having any
919 * influence on them. */
920 pa_sink_set_max_rewind_within_thread(u->sink, 0);
921 pa_sink_set_max_request_within_thread(u->sink, 0);
922
923 pa_log_info("Device suspended...");
924
925 return 0;
926 }
927
928 /* Called from IO context */
929 static int update_sw_params(struct userdata *u) {
930 snd_pcm_uframes_t avail_min;
931 int err;
932
933 pa_assert(u);
934
935 /* Use the full buffer if no one asked us for anything specific */
936 u->hwbuf_unused = 0;
937
938 if (u->use_tsched) {
939 pa_usec_t latency;
940
941 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
942 size_t b;
943
944 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
945
946 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
947
948 /* We need at least one sample in our buffer */
949
950 if (PA_UNLIKELY(b < u->frame_size))
951 b = u->frame_size;
952
953 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
954 }
955
956 fix_min_sleep_wakeup(u);
957 fix_tsched_watermark(u);
958 }
959
960 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
961
962 /* We need at last one frame in the used part of the buffer */
963 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
964
965 if (u->use_tsched) {
966 pa_usec_t sleep_usec, process_usec;
967
968 hw_sleep_time(u, &sleep_usec, &process_usec);
969 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
970 }
971
972 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
973
974 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
975 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
976 return err;
977 }
978
979 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
980 if (pa_alsa_pcm_is_hw(u->pcm_handle))
981 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
982 else {
983 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
984 pa_sink_set_max_rewind_within_thread(u->sink, 0);
985 }
986
987 return 0;
988 }
989
990 /* Called from IO Context on unsuspend or from main thread when creating sink */
991 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
992 pa_bool_t in_thread)
993 {
994 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
995 &u->sink->sample_spec);
996
997 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
998 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
999
1000 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1001 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1002
1003 fix_min_sleep_wakeup(u);
1004 fix_tsched_watermark(u);
1005
1006 if (in_thread)
1007 pa_sink_set_latency_range_within_thread(u->sink,
1008 u->min_latency_ref,
1009 pa_bytes_to_usec(u->hwbuf_size, ss));
1010 else {
1011 pa_sink_set_latency_range(u->sink,
1012 0,
1013 pa_bytes_to_usec(u->hwbuf_size, ss));
1014
1015 /* work-around assert in pa_sink_set_latency_within_thead,
1016 keep track of min_latency and reuse it when
1017 this routine is called from IO context */
1018 u->min_latency_ref = u->sink->thread_info.min_latency;
1019 }
1020
1021 pa_log_info("Time scheduling watermark is %0.2fms",
1022 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1023 }
1024
1025 /* Called from IO context */
1026 static int unsuspend(struct userdata *u) {
1027 pa_sample_spec ss;
1028 int err;
1029 pa_bool_t b, d;
1030 snd_pcm_uframes_t period_size, buffer_size;
1031 char *device_name = NULL;
1032
1033 pa_assert(u);
1034 pa_assert(!u->pcm_handle);
1035
1036 pa_log_info("Trying resume...");
1037
1038 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1039 /* Need to open device in NONAUDIO mode */
1040 int len = strlen(u->device_name) + 8;
1041
1042 device_name = pa_xmalloc(len);
1043 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1044 }
1045
1046 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1047 SND_PCM_NONBLOCK|
1048 SND_PCM_NO_AUTO_RESAMPLE|
1049 SND_PCM_NO_AUTO_CHANNELS|
1050 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1051 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1052 goto fail;
1053 }
1054
1055 ss = u->sink->sample_spec;
1056 period_size = u->fragment_size / u->frame_size;
1057 buffer_size = u->hwbuf_size / u->frame_size;
1058 b = u->use_mmap;
1059 d = u->use_tsched;
1060
1061 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1062 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1063 goto fail;
1064 }
1065
1066 if (b != u->use_mmap || d != u->use_tsched) {
1067 pa_log_warn("Resume failed, couldn't get original access mode.");
1068 goto fail;
1069 }
1070
1071 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1072 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1073 goto fail;
1074 }
1075
1076 if (period_size*u->frame_size != u->fragment_size ||
1077 buffer_size*u->frame_size != u->hwbuf_size) {
1078 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1079 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1080 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1081 goto fail;
1082 }
1083
1084 if (update_sw_params(u) < 0)
1085 goto fail;
1086
1087 if (build_pollfd(u) < 0)
1088 goto fail;
1089
1090 u->write_count = 0;
1091 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1092 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1093 u->last_smoother_update = 0;
1094
1095 u->first = TRUE;
1096 u->since_start = 0;
1097
1098 /* reset the watermark to the value defined when sink was created */
1099 if (u->use_tsched)
1100 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1101
1102 pa_log_info("Resumed successfully...");
1103
1104 pa_xfree(device_name);
1105 return 0;
1106
1107 fail:
1108 if (u->pcm_handle) {
1109 snd_pcm_close(u->pcm_handle);
1110 u->pcm_handle = NULL;
1111 }
1112
1113 pa_xfree(device_name);
1114
1115 return -PA_ERR_IO;
1116 }
1117
1118 /* Called from IO context */
1119 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1120 struct userdata *u = PA_SINK(o)->userdata;
1121
1122 switch (code) {
1123
1124 case PA_SINK_MESSAGE_FINISH_MOVE:
1125 case PA_SINK_MESSAGE_ADD_INPUT: {
1126 pa_sink_input *i = PA_SINK_INPUT(data);
1127 int r = 0;
1128
1129 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1130 break;
1131
1132 u->old_rate = u->sink->sample_spec.rate;
1133
1134 /* Passthrough format, see if we need to reset sink sample rate */
1135 if (u->sink->sample_spec.rate == i->thread_info.sample_spec.rate)
1136 break;
1137
1138 /* .. we do */
1139 if ((r = suspend(u)) < 0)
1140 return r;
1141
1142 u->sink->sample_spec.rate = i->thread_info.sample_spec.rate;
1143
1144 if ((r = unsuspend(u)) < 0)
1145 return r;
1146
1147 break;
1148 }
1149
1150 case PA_SINK_MESSAGE_START_MOVE:
1151 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1152 pa_sink_input *i = PA_SINK_INPUT(data);
1153 int r = 0;
1154
1155 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1156 break;
1157
1158 /* Passthrough format, see if we need to reset sink sample rate */
1159 if (u->sink->sample_spec.rate == u->old_rate)
1160 break;
1161
1162 /* .. we do */
1163 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && ((r = suspend(u)) < 0))
1164 return r;
1165
1166 u->sink->sample_spec.rate = u->old_rate;
1167
1168 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && ((r = unsuspend(u)) < 0))
1169 return r;
1170
1171 break;
1172 }
1173
1174 case PA_SINK_MESSAGE_GET_LATENCY: {
1175 pa_usec_t r = 0;
1176
1177 if (u->pcm_handle)
1178 r = sink_get_latency(u);
1179
1180 *((pa_usec_t*) data) = r;
1181
1182 return 0;
1183 }
1184
1185 case PA_SINK_MESSAGE_SET_STATE:
1186
1187 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1188
1189 case PA_SINK_SUSPENDED: {
1190 int r;
1191
1192 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1193
1194 if ((r = suspend(u)) < 0)
1195 return r;
1196
1197 break;
1198 }
1199
1200 case PA_SINK_IDLE:
1201 case PA_SINK_RUNNING: {
1202 int r;
1203
1204 if (u->sink->thread_info.state == PA_SINK_INIT) {
1205 if (build_pollfd(u) < 0)
1206 return -PA_ERR_IO;
1207 }
1208
1209 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1210 if ((r = unsuspend(u)) < 0)
1211 return r;
1212 }
1213
1214 break;
1215 }
1216
1217 case PA_SINK_UNLINKED:
1218 case PA_SINK_INIT:
1219 case PA_SINK_INVALID_STATE:
1220 ;
1221 }
1222
1223 break;
1224 }
1225
1226 return pa_sink_process_msg(o, code, data, offset, chunk);
1227 }
1228
1229 /* Called from main context */
1230 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1231 pa_sink_state_t old_state;
1232 struct userdata *u;
1233
1234 pa_sink_assert_ref(s);
1235 pa_assert_se(u = s->userdata);
1236
1237 old_state = pa_sink_get_state(u->sink);
1238
1239 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1240 reserve_done(u);
1241 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1242 if (reserve_init(u, u->device_name) < 0)
1243 return -PA_ERR_BUSY;
1244
1245 return 0;
1246 }
1247
1248 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1249 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1250
1251 pa_assert(u);
1252 pa_assert(u->mixer_handle);
1253
1254 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1255 return 0;
1256
1257 if (!PA_SINK_IS_LINKED(u->sink->state))
1258 return 0;
1259
1260 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1261 return 0;
1262
1263 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1264 pa_sink_get_volume(u->sink, TRUE);
1265 pa_sink_get_mute(u->sink, TRUE);
1266 }
1267
1268 return 0;
1269 }
1270
1271 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1272 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1273
1274 pa_assert(u);
1275 pa_assert(u->mixer_handle);
1276
1277 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1278 return 0;
1279
1280 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1281 return 0;
1282
1283 if (mask & SND_CTL_EVENT_MASK_VALUE)
1284 pa_sink_update_volume_and_mute(u->sink);
1285
1286 return 0;
1287 }
1288
1289 static void sink_get_volume_cb(pa_sink *s) {
1290 struct userdata *u = s->userdata;
1291 pa_cvolume r;
1292 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1293
1294 pa_assert(u);
1295 pa_assert(u->mixer_path);
1296 pa_assert(u->mixer_handle);
1297
1298 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1299 return;
1300
1301 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1302 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1303
1304 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1305
1306 if (u->mixer_path->has_dB) {
1307 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1308
1309 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1310 }
1311
1312 if (pa_cvolume_equal(&u->hardware_volume, &r))
1313 return;
1314
1315 s->real_volume = u->hardware_volume = r;
1316
1317 /* Hmm, so the hardware volume changed, let's reset our software volume */
1318 if (u->mixer_path->has_dB)
1319 pa_sink_set_soft_volume(s, NULL);
1320 }
1321
1322 static void sink_set_volume_cb(pa_sink *s) {
1323 struct userdata *u = s->userdata;
1324 pa_cvolume r;
1325 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1326 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1327
1328 pa_assert(u);
1329 pa_assert(u->mixer_path);
1330 pa_assert(u->mixer_handle);
1331
1332 /* Shift up by the base volume */
1333 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1334
1335 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1336 return;
1337
1338 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1339 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1340
1341 u->hardware_volume = r;
1342
1343 if (u->mixer_path->has_dB) {
1344 pa_cvolume new_soft_volume;
1345 pa_bool_t accurate_enough;
1346 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1347
1348 /* Match exactly what the user requested by software */
1349 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1350
1351 /* If the adjustment to do in software is only minimal we
1352 * can skip it. That saves us CPU at the expense of a bit of
1353 * accuracy */
1354 accurate_enough =
1355 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1356 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1357
1358 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1359 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1360 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1361 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1362 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1363 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1364 pa_yes_no(accurate_enough));
1365 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1366
1367 if (!accurate_enough)
1368 s->soft_volume = new_soft_volume;
1369
1370 } else {
1371 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1372
1373 /* We can't match exactly what the user requested, hence let's
1374 * at least tell the user about it */
1375
1376 s->real_volume = r;
1377 }
1378 }
1379
1380 static void sink_write_volume_cb(pa_sink *s) {
1381 struct userdata *u = s->userdata;
1382 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1383
1384 pa_assert(u);
1385 pa_assert(u->mixer_path);
1386 pa_assert(u->mixer_handle);
1387 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1388
1389 /* Shift up by the base volume */
1390 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1391
1392 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1393 pa_log_error("Writing HW volume failed");
1394 else {
1395 pa_cvolume tmp_vol;
1396 pa_bool_t accurate_enough;
1397
1398 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1399 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1400
1401 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1402 accurate_enough =
1403 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1404 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1405
1406 if (!accurate_enough) {
1407 union {
1408 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1409 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1410 } vol;
1411
1412 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1413 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1414 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1415 pa_log_debug(" in dB: %s (request) != %s",
1416 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1417 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1418 }
1419 }
1420 }
1421
1422 static void sink_get_mute_cb(pa_sink *s) {
1423 struct userdata *u = s->userdata;
1424 pa_bool_t b;
1425
1426 pa_assert(u);
1427 pa_assert(u->mixer_path);
1428 pa_assert(u->mixer_handle);
1429
1430 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1431 return;
1432
1433 s->muted = b;
1434 }
1435
1436 static void sink_set_mute_cb(pa_sink *s) {
1437 struct userdata *u = s->userdata;
1438
1439 pa_assert(u);
1440 pa_assert(u->mixer_path);
1441 pa_assert(u->mixer_handle);
1442
1443 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1444 }
1445
1446 static void mixer_volume_init(struct userdata *u) {
1447 pa_assert(u);
1448
1449 if (!u->mixer_path->has_volume) {
1450 pa_sink_set_write_volume_callback(u->sink, NULL);
1451 pa_sink_set_get_volume_callback(u->sink, NULL);
1452 pa_sink_set_set_volume_callback(u->sink, NULL);
1453
1454 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1455 } else {
1456 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1457 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1458
1459 if (u->mixer_path->has_dB && u->deferred_volume) {
1460 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1461 pa_log_info("Successfully enabled synchronous volume.");
1462 } else
1463 pa_sink_set_write_volume_callback(u->sink, NULL);
1464
1465 if (u->mixer_path->has_dB) {
1466 pa_sink_enable_decibel_volume(u->sink, TRUE);
1467 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1468
1469 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1470 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1471
1472 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1473 } else {
1474 pa_sink_enable_decibel_volume(u->sink, FALSE);
1475 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1476
1477 u->sink->base_volume = PA_VOLUME_NORM;
1478 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1479 }
1480
1481 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1482 }
1483
1484 if (!u->mixer_path->has_mute) {
1485 pa_sink_set_get_mute_callback(u->sink, NULL);
1486 pa_sink_set_set_mute_callback(u->sink, NULL);
1487 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1488 } else {
1489 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1490 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1491 pa_log_info("Using hardware mute control.");
1492 }
1493 }
1494
1495 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1496 struct userdata *u = s->userdata;
1497 pa_alsa_port_data *data;
1498
1499 pa_assert(u);
1500 pa_assert(p);
1501 pa_assert(u->mixer_handle);
1502
1503 data = PA_DEVICE_PORT_DATA(p);
1504
1505 pa_assert_se(u->mixer_path = data->path);
1506 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1507
1508 mixer_volume_init(u);
1509
1510 if (data->setting)
1511 pa_alsa_setting_select(data->setting, u->mixer_handle);
1512
1513 if (s->set_mute)
1514 s->set_mute(s);
1515 if (s->set_volume)
1516 s->set_volume(s);
1517
1518 return 0;
1519 }
1520
1521 static void sink_update_requested_latency_cb(pa_sink *s) {
1522 struct userdata *u = s->userdata;
1523 size_t before;
1524 pa_assert(u);
1525 pa_assert(u->use_tsched); /* only when timer scheduling is used
1526 * we can dynamically adjust the
1527 * latency */
1528
1529 if (!u->pcm_handle)
1530 return;
1531
1532 before = u->hwbuf_unused;
1533 update_sw_params(u);
1534
1535 /* Let's check whether we now use only a smaller part of the
1536 buffer then before. If so, we need to make sure that subsequent
1537 rewinds are relative to the new maximum fill level and not to the
1538 current fill level. Thus, let's do a full rewind once, to clear
1539 things up. */
1540
1541 if (u->hwbuf_unused > before) {
1542 pa_log_debug("Requesting rewind due to latency change.");
1543 pa_sink_request_rewind(s, (size_t) -1);
1544 }
1545 }
1546
1547 static pa_idxset* sink_get_formats(pa_sink *s) {
1548 struct userdata *u = s->userdata;
1549 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1550 pa_format_info *f;
1551 uint32_t idx;
1552
1553 pa_assert(u);
1554
1555 PA_IDXSET_FOREACH(f, u->formats, idx) {
1556 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1557 }
1558
1559 return ret;
1560 }
1561
1562 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1563 struct userdata *u = s->userdata;
1564 pa_format_info *f;
1565 uint32_t idx;
1566
1567 pa_assert(u);
1568
1569 /* FIXME: also validate sample rates against what the device supports */
1570 PA_IDXSET_FOREACH(f, formats, idx) {
1571 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1572 /* EAC3 cannot be sent over over S/PDIF */
1573 return FALSE;
1574 }
1575
1576 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1577 u->formats = pa_idxset_new(NULL, NULL);
1578
1579 /* Note: the logic below won't apply if we're using software encoding.
1580 * This is fine for now since we don't support that via the passthrough
1581 * framework, but this must be changed if we do. */
1582
1583 /* First insert non-PCM formats since we prefer those. */
1584 PA_IDXSET_FOREACH(f, formats, idx) {
1585 if (!pa_format_info_is_pcm(f))
1586 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1587 }
1588
1589 /* Now add any PCM formats */
1590 PA_IDXSET_FOREACH(f, formats, idx) {
1591 if (pa_format_info_is_pcm(f))
1592 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1593 }
1594
1595 return TRUE;
1596 }
1597
1598 static int process_rewind(struct userdata *u) {
1599 snd_pcm_sframes_t unused;
1600 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1601 pa_assert(u);
1602
1603 /* Figure out how much we shall rewind and reset the counter */
1604 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1605
1606 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1607
1608 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1609 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1610 return -1;
1611 }
1612
1613 unused_nbytes = (size_t) unused * u->frame_size;
1614
1615 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1616 unused_nbytes += u->rewind_safeguard;
1617
1618 if (u->hwbuf_size > unused_nbytes)
1619 limit_nbytes = u->hwbuf_size - unused_nbytes;
1620 else
1621 limit_nbytes = 0;
1622
1623 if (rewind_nbytes > limit_nbytes)
1624 rewind_nbytes = limit_nbytes;
1625
1626 if (rewind_nbytes > 0) {
1627 snd_pcm_sframes_t in_frames, out_frames;
1628
1629 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1630
1631 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1632 pa_log_debug("before: %lu", (unsigned long) in_frames);
1633 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1634 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1635 if (try_recover(u, "process_rewind", out_frames) < 0)
1636 return -1;
1637 out_frames = 0;
1638 }
1639
1640 pa_log_debug("after: %lu", (unsigned long) out_frames);
1641
1642 rewind_nbytes = (size_t) out_frames * u->frame_size;
1643
1644 if (rewind_nbytes <= 0)
1645 pa_log_info("Tried rewind, but was apparently not possible.");
1646 else {
1647 u->write_count -= rewind_nbytes;
1648 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1649 pa_sink_process_rewind(u->sink, rewind_nbytes);
1650
1651 u->after_rewind = TRUE;
1652 return 0;
1653 }
1654 } else
1655 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1656
1657 pa_sink_process_rewind(u->sink, 0);
1658 return 0;
1659 }
1660
1661 static void thread_func(void *userdata) {
1662 struct userdata *u = userdata;
1663 unsigned short revents = 0;
1664
1665 pa_assert(u);
1666
1667 pa_log_debug("Thread starting up");
1668
1669 if (u->core->realtime_scheduling)
1670 pa_make_realtime(u->core->realtime_priority);
1671
1672 pa_thread_mq_install(&u->thread_mq);
1673
1674 for (;;) {
1675 int ret;
1676 pa_usec_t rtpoll_sleep = 0;
1677
1678 #ifdef DEBUG_TIMING
1679 pa_log_debug("Loop");
1680 #endif
1681
1682 /* Render some data and write it to the dsp */
1683 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1684 int work_done;
1685 pa_usec_t sleep_usec = 0;
1686 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1687
1688 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1689 if (process_rewind(u) < 0)
1690 goto fail;
1691
1692 if (u->use_mmap)
1693 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1694 else
1695 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1696
1697 if (work_done < 0)
1698 goto fail;
1699
1700 /* pa_log_debug("work_done = %i", work_done); */
1701
1702 if (work_done) {
1703
1704 if (u->first) {
1705 pa_log_info("Starting playback.");
1706 snd_pcm_start(u->pcm_handle);
1707
1708 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1709
1710 u->first = FALSE;
1711 }
1712
1713 update_smoother(u);
1714 }
1715
1716 if (u->use_tsched) {
1717 pa_usec_t cusec;
1718
1719 if (u->since_start <= u->hwbuf_size) {
1720
1721 /* USB devices on ALSA seem to hit a buffer
1722 * underrun during the first iterations much
1723 * quicker then we calculate here, probably due to
1724 * the transport latency. To accommodate for that
1725 * we artificially decrease the sleep time until
1726 * we have filled the buffer at least once
1727 * completely.*/
1728
1729 if (pa_log_ratelimit(PA_LOG_DEBUG))
1730 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1731 sleep_usec /= 2;
1732 }
1733
1734 /* OK, the playback buffer is now full, let's
1735 * calculate when to wake up next */
1736 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1737
1738 /* Convert from the sound card time domain to the
1739 * system time domain */
1740 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1741
1742 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1743
1744 /* We don't trust the conversion, so we wake up whatever comes first */
1745 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1746 }
1747
1748 u->after_rewind = FALSE;
1749
1750 }
1751
1752 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1753 pa_usec_t volume_sleep;
1754 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1755 if (volume_sleep > 0) {
1756 if (rtpoll_sleep > 0)
1757 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1758 else
1759 rtpoll_sleep = volume_sleep;
1760 }
1761 }
1762
1763 if (rtpoll_sleep > 0)
1764 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1765 else
1766 pa_rtpoll_set_timer_disabled(u->rtpoll);
1767
1768 /* Hmm, nothing to do. Let's sleep */
1769 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1770 goto fail;
1771
1772 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1773 pa_sink_volume_change_apply(u->sink, NULL);
1774
1775 if (ret == 0)
1776 goto finish;
1777
1778 /* Tell ALSA about this and process its response */
1779 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1780 struct pollfd *pollfd;
1781 int err;
1782 unsigned n;
1783
1784 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1785
1786 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1787 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1788 goto fail;
1789 }
1790
1791 if (revents & ~POLLOUT) {
1792 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1793 goto fail;
1794
1795 u->first = TRUE;
1796 u->since_start = 0;
1797 revents = 0;
1798 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1799 pa_log_debug("Wakeup from ALSA!");
1800
1801 } else
1802 revents = 0;
1803 }
1804
1805 fail:
1806 /* If this was no regular exit from the loop we have to continue
1807 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1808 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1809 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1810
1811 finish:
1812 pa_log_debug("Thread shutting down");
1813 }
1814
1815 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1816 const char *n;
1817 char *t;
1818
1819 pa_assert(data);
1820 pa_assert(ma);
1821 pa_assert(device_name);
1822
1823 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1824 pa_sink_new_data_set_name(data, n);
1825 data->namereg_fail = TRUE;
1826 return;
1827 }
1828
1829 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1830 data->namereg_fail = TRUE;
1831 else {
1832 n = device_id ? device_id : device_name;
1833 data->namereg_fail = FALSE;
1834 }
1835
1836 if (mapping)
1837 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1838 else
1839 t = pa_sprintf_malloc("alsa_output.%s", n);
1840
1841 pa_sink_new_data_set_name(data, t);
1842 pa_xfree(t);
1843 }
1844
1845 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1846
1847 if (!mapping && !element)
1848 return;
1849
1850 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1851 pa_log_info("Failed to find a working mixer device.");
1852 return;
1853 }
1854
1855 if (element) {
1856
1857 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1858 goto fail;
1859
1860 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1861 goto fail;
1862
1863 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1864 pa_alsa_path_dump(u->mixer_path);
1865 } else {
1866
1867 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT, u->paths_dir)))
1868 goto fail;
1869
1870 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1871 }
1872
1873 return;
1874
1875 fail:
1876
1877 if (u->mixer_path_set) {
1878 pa_alsa_path_set_free(u->mixer_path_set);
1879 u->mixer_path_set = NULL;
1880 } else if (u->mixer_path) {
1881 pa_alsa_path_free(u->mixer_path);
1882 u->mixer_path = NULL;
1883 }
1884
1885 if (u->mixer_handle) {
1886 snd_mixer_close(u->mixer_handle);
1887 u->mixer_handle = NULL;
1888 }
1889 }
1890
1891
1892 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1893 pa_bool_t need_mixer_callback = FALSE;
1894
1895 pa_assert(u);
1896
1897 if (!u->mixer_handle)
1898 return 0;
1899
1900 if (u->sink->active_port) {
1901 pa_alsa_port_data *data;
1902
1903 /* We have a list of supported paths, so let's activate the
1904 * one that has been chosen as active */
1905
1906 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1907 u->mixer_path = data->path;
1908
1909 pa_alsa_path_select(data->path, u->mixer_handle);
1910
1911 if (data->setting)
1912 pa_alsa_setting_select(data->setting, u->mixer_handle);
1913
1914 } else {
1915
1916 if (!u->mixer_path && u->mixer_path_set)
1917 u->mixer_path = u->mixer_path_set->paths;
1918
1919 if (u->mixer_path) {
1920 /* Hmm, we have only a single path, then let's activate it */
1921
1922 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1923
1924 if (u->mixer_path->settings)
1925 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1926 } else
1927 return 0;
1928 }
1929
1930 mixer_volume_init(u);
1931
1932 /* Will we need to register callbacks? */
1933 if (u->mixer_path_set && u->mixer_path_set->paths) {
1934 pa_alsa_path *p;
1935
1936 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1937 if (p->has_volume || p->has_mute)
1938 need_mixer_callback = TRUE;
1939 }
1940 }
1941 else if (u->mixer_path)
1942 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1943
1944 if (need_mixer_callback) {
1945 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1946 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1947 u->mixer_pd = pa_alsa_mixer_pdata_new();
1948 mixer_callback = io_mixer_callback;
1949
1950 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1951 pa_log("Failed to initialize file descriptor monitoring");
1952 return -1;
1953 }
1954 } else {
1955 u->mixer_fdl = pa_alsa_fdlist_new();
1956 mixer_callback = ctl_mixer_callback;
1957
1958 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1959 pa_log("Failed to initialize file descriptor monitoring");
1960 return -1;
1961 }
1962 }
1963
1964 if (u->mixer_path_set)
1965 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1966 else
1967 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1968 }
1969
1970 return 0;
1971 }
1972
1973 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1974
1975 struct userdata *u = NULL;
1976 const char *dev_id = NULL;
1977 pa_sample_spec ss;
1978 pa_channel_map map;
1979 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1980 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1981 size_t frame_size;
1982 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE;
1983 pa_sink_new_data data;
1984 pa_alsa_profile_set *profile_set = NULL;
1985
1986 pa_assert(m);
1987 pa_assert(ma);
1988
1989 ss = m->core->default_sample_spec;
1990 map = m->core->default_channel_map;
1991 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1992 pa_log("Failed to parse sample specification and channel map");
1993 goto fail;
1994 }
1995
1996 frame_size = pa_frame_size(&ss);
1997
1998 nfrags = m->core->default_n_fragments;
1999 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2000 if (frag_size <= 0)
2001 frag_size = (uint32_t) frame_size;
2002 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2003 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2004
2005 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2006 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2007 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2008 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2009 pa_log("Failed to parse buffer metrics");
2010 goto fail;
2011 }
2012
2013 buffer_size = nfrags * frag_size;
2014
2015 period_frames = frag_size/frame_size;
2016 buffer_frames = buffer_size/frame_size;
2017 tsched_frames = tsched_size/frame_size;
2018
2019 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2020 pa_log("Failed to parse mmap argument.");
2021 goto fail;
2022 }
2023
2024 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2025 pa_log("Failed to parse tsched argument.");
2026 goto fail;
2027 }
2028
2029 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2030 pa_log("Failed to parse ignore_dB argument.");
2031 goto fail;
2032 }
2033
2034 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2035 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2036 pa_log("Failed to parse rewind_safeguard argument");
2037 goto fail;
2038 }
2039
2040 deferred_volume = m->core->deferred_volume;
2041 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2042 pa_log("Failed to parse deferred_volume argument.");
2043 goto fail;
2044 }
2045
2046 use_tsched = pa_alsa_may_tsched(use_tsched);
2047
2048 u = pa_xnew0(struct userdata, 1);
2049 u->core = m->core;
2050 u->module = m;
2051 u->use_mmap = use_mmap;
2052 u->use_tsched = use_tsched;
2053 u->deferred_volume = deferred_volume;
2054 u->first = TRUE;
2055 u->rewind_safeguard = rewind_safeguard;
2056 u->rtpoll = pa_rtpoll_new();
2057 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2058
2059 u->smoother = pa_smoother_new(
2060 SMOOTHER_ADJUST_USEC,
2061 SMOOTHER_WINDOW_USEC,
2062 TRUE,
2063 TRUE,
2064 5,
2065 pa_rtclock_now(),
2066 TRUE);
2067 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2068
2069 dev_id = pa_modargs_get_value(
2070 ma, "device_id",
2071 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2072
2073 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2074
2075 if (reserve_init(u, dev_id) < 0)
2076 goto fail;
2077
2078 if (reserve_monitor_init(u, dev_id) < 0)
2079 goto fail;
2080
2081 b = use_mmap;
2082 d = use_tsched;
2083
2084 if (mapping) {
2085
2086 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2087 pa_log("device_id= not set");
2088 goto fail;
2089 }
2090
2091 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2092 dev_id,
2093 &u->device_name,
2094 &ss, &map,
2095 SND_PCM_STREAM_PLAYBACK,
2096 &period_frames, &buffer_frames, tsched_frames,
2097 &b, &d, mapping)))
2098 goto fail;
2099
2100 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2101
2102 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2103 goto fail;
2104
2105 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2106 dev_id,
2107 &u->device_name,
2108 &ss, &map,
2109 SND_PCM_STREAM_PLAYBACK,
2110 &period_frames, &buffer_frames, tsched_frames,
2111 &b, &d, profile_set, &mapping)))
2112 goto fail;
2113
2114 } else {
2115
2116 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2117 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2118 &u->device_name,
2119 &ss, &map,
2120 SND_PCM_STREAM_PLAYBACK,
2121 &period_frames, &buffer_frames, tsched_frames,
2122 &b, &d, FALSE)))
2123 goto fail;
2124 }
2125
2126 pa_assert(u->device_name);
2127 pa_log_info("Successfully opened device %s.", u->device_name);
2128
2129 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2130 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2131 goto fail;
2132 }
2133
2134 if (mapping)
2135 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2136
2137 if (use_mmap && !b) {
2138 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2139 u->use_mmap = use_mmap = FALSE;
2140 }
2141
2142 if (use_tsched && (!b || !d)) {
2143 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2144 u->use_tsched = use_tsched = FALSE;
2145 }
2146
2147 if (u->use_mmap)
2148 pa_log_info("Successfully enabled mmap() mode.");
2149
2150 if (u->use_tsched)
2151 pa_log_info("Successfully enabled timer-based scheduling mode.");
2152
2153 if (is_iec958(u) || is_hdmi(u))
2154 set_formats = TRUE;
2155
2156 /* ALSA might tweak the sample spec, so recalculate the frame size */
2157 frame_size = pa_frame_size(&ss);
2158
2159 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2160
2161 pa_sink_new_data_init(&data);
2162 data.driver = driver;
2163 data.module = m;
2164 data.card = card;
2165 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2166
2167 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2168 * variable instead of using &data.namereg_fail directly, because
2169 * data.namereg_fail is a bitfield and taking the address of a bitfield
2170 * variable is impossible. */
2171 namereg_fail = data.namereg_fail;
2172 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2173 pa_log("Failed to parse namereg_fail argument.");
2174 pa_sink_new_data_done(&data);
2175 goto fail;
2176 }
2177 data.namereg_fail = namereg_fail;
2178
2179 pa_sink_new_data_set_sample_spec(&data, &ss);
2180 pa_sink_new_data_set_channel_map(&data, &map);
2181
2182 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2183 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2184 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2185 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2186 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2187
2188 if (mapping) {
2189 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2190 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2191 }
2192
2193 pa_alsa_init_description(data.proplist);
2194
2195 if (u->control_device)
2196 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2197
2198 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2199 pa_log("Invalid properties");
2200 pa_sink_new_data_done(&data);
2201 goto fail;
2202 }
2203
2204 if (u->mixer_path_set)
2205 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2206
2207 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2208 (set_formats ? PA_SINK_SET_FORMATS : 0));
2209 pa_sink_new_data_done(&data);
2210
2211 if (!u->sink) {
2212 pa_log("Failed to create sink object");
2213 goto fail;
2214 }
2215
2216 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2217 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2218 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2219 goto fail;
2220 }
2221
2222 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2223 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2224 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2225 goto fail;
2226 }
2227
2228 u->sink->parent.process_msg = sink_process_msg;
2229 if (u->use_tsched)
2230 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2231 u->sink->set_state = sink_set_state_cb;
2232 u->sink->set_port = sink_set_port_cb;
2233 u->sink->userdata = u;
2234
2235 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2236 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2237
2238 u->frame_size = frame_size;
2239 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2240 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2241 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2242
2243 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2244 (double) u->hwbuf_size / (double) u->fragment_size,
2245 (long unsigned) u->fragment_size,
2246 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2247 (long unsigned) u->hwbuf_size,
2248 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2249
2250 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2251 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2252 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2253 else {
2254 pa_log_info("Disabling rewind for device %s", u->device_name);
2255 pa_sink_set_max_rewind(u->sink, 0);
2256 }
2257
2258 if (u->use_tsched) {
2259 u->tsched_watermark_ref = tsched_watermark;
2260 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2261 } else
2262 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2263
2264 reserve_update(u);
2265
2266 if (update_sw_params(u) < 0)
2267 goto fail;
2268
2269 if (setup_mixer(u, ignore_dB) < 0)
2270 goto fail;
2271
2272 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2273
2274 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2275 pa_log("Failed to create thread.");
2276 goto fail;
2277 }
2278
2279 /* Get initial mixer settings */
2280 if (data.volume_is_set) {
2281 if (u->sink->set_volume)
2282 u->sink->set_volume(u->sink);
2283 } else {
2284 if (u->sink->get_volume)
2285 u->sink->get_volume(u->sink);
2286 }
2287
2288 if (data.muted_is_set) {
2289 if (u->sink->set_mute)
2290 u->sink->set_mute(u->sink);
2291 } else {
2292 if (u->sink->get_mute)
2293 u->sink->get_mute(u->sink);
2294 }
2295
2296 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2297 u->sink->write_volume(u->sink);
2298
2299 if (set_formats) {
2300 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2301 pa_format_info *format;
2302
2303 /* To start with, we only support PCM formats. Other formats may be added
2304 * with pa_sink_set_formats().*/
2305 format = pa_format_info_new();
2306 format->encoding = PA_ENCODING_PCM;
2307 u->formats = pa_idxset_new(NULL, NULL);
2308 pa_idxset_put(u->formats, format, NULL);
2309
2310 u->sink->get_formats = sink_get_formats;
2311 u->sink->set_formats = sink_set_formats;
2312 }
2313
2314 pa_sink_put(u->sink);
2315
2316 if (profile_set)
2317 pa_alsa_profile_set_free(profile_set);
2318
2319 return u->sink;
2320
2321 fail:
2322
2323 if (u)
2324 userdata_free(u);
2325
2326 if (profile_set)
2327 pa_alsa_profile_set_free(profile_set);
2328
2329 return NULL;
2330 }
2331
2332 static void userdata_free(struct userdata *u) {
2333 pa_assert(u);
2334
2335 if (u->sink)
2336 pa_sink_unlink(u->sink);
2337
2338 if (u->thread) {
2339 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2340 pa_thread_free(u->thread);
2341 }
2342
2343 pa_thread_mq_done(&u->thread_mq);
2344
2345 if (u->sink)
2346 pa_sink_unref(u->sink);
2347
2348 if (u->memchunk.memblock)
2349 pa_memblock_unref(u->memchunk.memblock);
2350
2351 if (u->mixer_pd)
2352 pa_alsa_mixer_pdata_free(u->mixer_pd);
2353
2354 if (u->alsa_rtpoll_item)
2355 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2356
2357 if (u->rtpoll)
2358 pa_rtpoll_free(u->rtpoll);
2359
2360 if (u->pcm_handle) {
2361 snd_pcm_drop(u->pcm_handle);
2362 snd_pcm_close(u->pcm_handle);
2363 }
2364
2365 if (u->mixer_fdl)
2366 pa_alsa_fdlist_free(u->mixer_fdl);
2367
2368 if (u->mixer_path_set)
2369 pa_alsa_path_set_free(u->mixer_path_set);
2370 else if (u->mixer_path)
2371 pa_alsa_path_free(u->mixer_path);
2372
2373 if (u->mixer_handle)
2374 snd_mixer_close(u->mixer_handle);
2375
2376 if (u->smoother)
2377 pa_smoother_free(u->smoother);
2378
2379 if (u->formats)
2380 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2381
2382 reserve_done(u);
2383 monitor_done(u);
2384
2385 pa_xfree(u->device_name);
2386 pa_xfree(u->control_device);
2387 pa_xfree(u->paths_dir);
2388 pa_xfree(u);
2389 }
2390
2391 void pa_alsa_sink_free(pa_sink *s) {
2392 struct userdata *u;
2393
2394 pa_sink_assert_ref(s);
2395 pa_assert_se(u = s->userdata);
2396
2397 userdata_free(u);
2398 }