]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: Remove unused variable
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 char *paths_dir;
104 pa_alsa_fdlist *mixer_fdl;
105 pa_alsa_mixer_pdata *mixer_pd;
106 snd_mixer_t *mixer_handle;
107 pa_alsa_path_set *mixer_path_set;
108 pa_alsa_path *mixer_path;
109
110 pa_cvolume hardware_volume;
111
112 size_t
113 frame_size,
114 fragment_size,
115 hwbuf_size,
116 tsched_watermark,
117 tsched_watermark_ref,
118 hwbuf_unused,
119 min_sleep,
120 min_wakeup,
121 watermark_inc_step,
122 watermark_dec_step,
123 watermark_inc_threshold,
124 watermark_dec_threshold,
125 rewind_safeguard;
126
127 pa_usec_t watermark_dec_not_before;
128 pa_usec_t min_latency_ref;
129
130 pa_memchunk memchunk;
131
132 char *device_name; /* name of the PCM device */
133 char *control_device; /* name of the control device */
134
135 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1;
136
137 pa_bool_t first, after_rewind;
138
139 pa_rtpoll_item *alsa_rtpoll_item;
140
141 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
142
143 pa_smoother *smoother;
144 uint64_t write_count;
145 uint64_t since_start;
146 pa_usec_t smoother_interval;
147 pa_usec_t last_smoother_update;
148
149 pa_idxset *formats;
150
151 pa_reserve_wrapper *reserve;
152 pa_hook_slot *reserve_slot;
153 pa_reserve_monitor_wrapper *monitor;
154 pa_hook_slot *monitor_slot;
155 };
156
157 static void userdata_free(struct userdata *u);
158
159 /* FIXME: Is there a better way to do this than device names? */
160 static pa_bool_t is_iec958(struct userdata *u) {
161 return (strncmp("iec958", u->device_name, 6) == 0);
162 }
163
164 static pa_bool_t is_hdmi(struct userdata *u) {
165 return (strncmp("hdmi", u->device_name, 4) == 0);
166 }
167
168 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
169 pa_assert(r);
170 pa_assert(u);
171
172 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
173 return PA_HOOK_CANCEL;
174
175 return PA_HOOK_OK;
176 }
177
178 static void reserve_done(struct userdata *u) {
179 pa_assert(u);
180
181 if (u->reserve_slot) {
182 pa_hook_slot_free(u->reserve_slot);
183 u->reserve_slot = NULL;
184 }
185
186 if (u->reserve) {
187 pa_reserve_wrapper_unref(u->reserve);
188 u->reserve = NULL;
189 }
190 }
191
192 static void reserve_update(struct userdata *u) {
193 const char *description;
194 pa_assert(u);
195
196 if (!u->sink || !u->reserve)
197 return;
198
199 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
200 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
201 }
202
203 static int reserve_init(struct userdata *u, const char *dname) {
204 char *rname;
205
206 pa_assert(u);
207 pa_assert(dname);
208
209 if (u->reserve)
210 return 0;
211
212 if (pa_in_system_mode())
213 return 0;
214
215 if (!(rname = pa_alsa_get_reserve_name(dname)))
216 return 0;
217
218 /* We are resuming, try to lock the device */
219 u->reserve = pa_reserve_wrapper_get(u->core, rname);
220 pa_xfree(rname);
221
222 if (!(u->reserve))
223 return -1;
224
225 reserve_update(u);
226
227 pa_assert(!u->reserve_slot);
228 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
229
230 return 0;
231 }
232
233 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
234 pa_bool_t b;
235
236 pa_assert(w);
237 pa_assert(u);
238
239 b = PA_PTR_TO_UINT(busy) && !u->reserve;
240
241 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
242 return PA_HOOK_OK;
243 }
244
245 static void monitor_done(struct userdata *u) {
246 pa_assert(u);
247
248 if (u->monitor_slot) {
249 pa_hook_slot_free(u->monitor_slot);
250 u->monitor_slot = NULL;
251 }
252
253 if (u->monitor) {
254 pa_reserve_monitor_wrapper_unref(u->monitor);
255 u->monitor = NULL;
256 }
257 }
258
259 static int reserve_monitor_init(struct userdata *u, const char *dname) {
260 char *rname;
261
262 pa_assert(u);
263 pa_assert(dname);
264
265 if (pa_in_system_mode())
266 return 0;
267
268 if (!(rname = pa_alsa_get_reserve_name(dname)))
269 return 0;
270
271 /* We are resuming, try to lock the device */
272 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
273 pa_xfree(rname);
274
275 if (!(u->monitor))
276 return -1;
277
278 pa_assert(!u->monitor_slot);
279 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
280
281 return 0;
282 }
283
284 static void fix_min_sleep_wakeup(struct userdata *u) {
285 size_t max_use, max_use_2;
286
287 pa_assert(u);
288 pa_assert(u->use_tsched);
289
290 max_use = u->hwbuf_size - u->hwbuf_unused;
291 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
292
293 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
294 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
295
296 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
297 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
298 }
299
300 static void fix_tsched_watermark(struct userdata *u) {
301 size_t max_use;
302 pa_assert(u);
303 pa_assert(u->use_tsched);
304
305 max_use = u->hwbuf_size - u->hwbuf_unused;
306
307 if (u->tsched_watermark > max_use - u->min_sleep)
308 u->tsched_watermark = max_use - u->min_sleep;
309
310 if (u->tsched_watermark < u->min_wakeup)
311 u->tsched_watermark = u->min_wakeup;
312 }
313
314 static void increase_watermark(struct userdata *u) {
315 size_t old_watermark;
316 pa_usec_t old_min_latency, new_min_latency;
317
318 pa_assert(u);
319 pa_assert(u->use_tsched);
320
321 /* First, just try to increase the watermark */
322 old_watermark = u->tsched_watermark;
323 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
324 fix_tsched_watermark(u);
325
326 if (old_watermark != u->tsched_watermark) {
327 pa_log_info("Increasing wakeup watermark to %0.2f ms",
328 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
329 return;
330 }
331
332 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
333 old_min_latency = u->sink->thread_info.min_latency;
334 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
335 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
336
337 if (old_min_latency != new_min_latency) {
338 pa_log_info("Increasing minimal latency to %0.2f ms",
339 (double) new_min_latency / PA_USEC_PER_MSEC);
340
341 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
342 }
343
344 /* When we reach this we're officialy fucked! */
345 }
346
347 static void decrease_watermark(struct userdata *u) {
348 size_t old_watermark;
349 pa_usec_t now;
350
351 pa_assert(u);
352 pa_assert(u->use_tsched);
353
354 now = pa_rtclock_now();
355
356 if (u->watermark_dec_not_before <= 0)
357 goto restart;
358
359 if (u->watermark_dec_not_before > now)
360 return;
361
362 old_watermark = u->tsched_watermark;
363
364 if (u->tsched_watermark < u->watermark_dec_step)
365 u->tsched_watermark = u->tsched_watermark / 2;
366 else
367 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
368
369 fix_tsched_watermark(u);
370
371 if (old_watermark != u->tsched_watermark)
372 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
373 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
374
375 /* We don't change the latency range*/
376
377 restart:
378 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
379 }
380
381 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
382 pa_usec_t usec, wm;
383
384 pa_assert(sleep_usec);
385 pa_assert(process_usec);
386
387 pa_assert(u);
388 pa_assert(u->use_tsched);
389
390 usec = pa_sink_get_requested_latency_within_thread(u->sink);
391
392 if (usec == (pa_usec_t) -1)
393 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
394
395 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
396
397 if (wm > usec)
398 wm = usec/2;
399
400 *sleep_usec = usec - wm;
401 *process_usec = wm;
402
403 #ifdef DEBUG_TIMING
404 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
405 (unsigned long) (usec / PA_USEC_PER_MSEC),
406 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
407 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
408 #endif
409 }
410
411 static int try_recover(struct userdata *u, const char *call, int err) {
412 pa_assert(u);
413 pa_assert(call);
414 pa_assert(err < 0);
415
416 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
417
418 pa_assert(err != -EAGAIN);
419
420 if (err == -EPIPE)
421 pa_log_debug("%s: Buffer underrun!", call);
422
423 if (err == -ESTRPIPE)
424 pa_log_debug("%s: System suspended!", call);
425
426 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
427 pa_log("%s: %s", call, pa_alsa_strerror(err));
428 return -1;
429 }
430
431 u->first = TRUE;
432 u->since_start = 0;
433 return 0;
434 }
435
436 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
437 size_t left_to_play;
438 pa_bool_t underrun = FALSE;
439
440 /* We use <= instead of < for this check here because an underrun
441 * only happens after the last sample was processed, not already when
442 * it is removed from the buffer. This is particularly important
443 * when block transfer is used. */
444
445 if (n_bytes <= u->hwbuf_size)
446 left_to_play = u->hwbuf_size - n_bytes;
447 else {
448
449 /* We got a dropout. What a mess! */
450 left_to_play = 0;
451 underrun = TRUE;
452
453 #ifdef DEBUG_TIMING
454 PA_DEBUG_TRAP;
455 #endif
456
457 if (!u->first && !u->after_rewind)
458 if (pa_log_ratelimit(PA_LOG_INFO))
459 pa_log_info("Underrun!");
460 }
461
462 #ifdef DEBUG_TIMING
463 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
464 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
465 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
466 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
467 #endif
468
469 if (u->use_tsched) {
470 pa_bool_t reset_not_before = TRUE;
471
472 if (!u->first && !u->after_rewind) {
473 if (underrun || left_to_play < u->watermark_inc_threshold)
474 increase_watermark(u);
475 else if (left_to_play > u->watermark_dec_threshold) {
476 reset_not_before = FALSE;
477
478 /* We decrease the watermark only if have actually
479 * been woken up by a timeout. If something else woke
480 * us up it's too easy to fulfill the deadlines... */
481
482 if (on_timeout)
483 decrease_watermark(u);
484 }
485 }
486
487 if (reset_not_before)
488 u->watermark_dec_not_before = 0;
489 }
490
491 return left_to_play;
492 }
493
494 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
495 pa_bool_t work_done = FALSE;
496 pa_usec_t max_sleep_usec = 0, process_usec = 0;
497 size_t left_to_play;
498 unsigned j = 0;
499
500 pa_assert(u);
501 pa_sink_assert_ref(u->sink);
502
503 if (u->use_tsched)
504 hw_sleep_time(u, &max_sleep_usec, &process_usec);
505
506 for (;;) {
507 snd_pcm_sframes_t n;
508 size_t n_bytes;
509 int r;
510 pa_bool_t after_avail = TRUE;
511
512 /* First we determine how many samples are missing to fill the
513 * buffer up to 100% */
514
515 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
516
517 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
518 continue;
519
520 return r;
521 }
522
523 n_bytes = (size_t) n * u->frame_size;
524
525 #ifdef DEBUG_TIMING
526 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
527 #endif
528
529 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
530 on_timeout = FALSE;
531
532 if (u->use_tsched)
533
534 /* We won't fill up the playback buffer before at least
535 * half the sleep time is over because otherwise we might
536 * ask for more data from the clients then they expect. We
537 * need to guarantee that clients only have to keep around
538 * a single hw buffer length. */
539
540 if (!polled &&
541 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
542 #ifdef DEBUG_TIMING
543 pa_log_debug("Not filling up, because too early.");
544 #endif
545 break;
546 }
547
548 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
549
550 if (polled)
551 PA_ONCE_BEGIN {
552 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
553 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
554 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
555 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
556 pa_strnull(dn));
557 pa_xfree(dn);
558 } PA_ONCE_END;
559
560 #ifdef DEBUG_TIMING
561 pa_log_debug("Not filling up, because not necessary.");
562 #endif
563 break;
564 }
565
566
567 if (++j > 10) {
568 #ifdef DEBUG_TIMING
569 pa_log_debug("Not filling up, because already too many iterations.");
570 #endif
571
572 break;
573 }
574
575 n_bytes -= u->hwbuf_unused;
576 polled = FALSE;
577
578 #ifdef DEBUG_TIMING
579 pa_log_debug("Filling up");
580 #endif
581
582 for (;;) {
583 pa_memchunk chunk;
584 void *p;
585 int err;
586 const snd_pcm_channel_area_t *areas;
587 snd_pcm_uframes_t offset, frames;
588 snd_pcm_sframes_t sframes;
589
590 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
591 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
592
593 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
594
595 if (!after_avail && err == -EAGAIN)
596 break;
597
598 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
599 continue;
600
601 return r;
602 }
603
604 /* Make sure that if these memblocks need to be copied they will fit into one slot */
605 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
606 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
607
608 if (!after_avail && frames == 0)
609 break;
610
611 pa_assert(frames > 0);
612 after_avail = FALSE;
613
614 /* Check these are multiples of 8 bit */
615 pa_assert((areas[0].first & 7) == 0);
616 pa_assert((areas[0].step & 7)== 0);
617
618 /* We assume a single interleaved memory buffer */
619 pa_assert((areas[0].first >> 3) == 0);
620 pa_assert((areas[0].step >> 3) == u->frame_size);
621
622 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
623
624 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
625 chunk.length = pa_memblock_get_length(chunk.memblock);
626 chunk.index = 0;
627
628 pa_sink_render_into_full(u->sink, &chunk);
629 pa_memblock_unref_fixed(chunk.memblock);
630
631 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
632
633 if (!after_avail && (int) sframes == -EAGAIN)
634 break;
635
636 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
637 continue;
638
639 return r;
640 }
641
642 work_done = TRUE;
643
644 u->write_count += frames * u->frame_size;
645 u->since_start += frames * u->frame_size;
646
647 #ifdef DEBUG_TIMING
648 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
649 #endif
650
651 if ((size_t) frames * u->frame_size >= n_bytes)
652 break;
653
654 n_bytes -= (size_t) frames * u->frame_size;
655 }
656 }
657
658 if (u->use_tsched) {
659 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
660 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
661
662 if (*sleep_usec > process_usec)
663 *sleep_usec -= process_usec;
664 else
665 *sleep_usec = 0;
666 } else
667 *sleep_usec = 0;
668
669 return work_done ? 1 : 0;
670 }
671
672 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
673 pa_bool_t work_done = FALSE;
674 pa_usec_t max_sleep_usec = 0, process_usec = 0;
675 size_t left_to_play;
676 unsigned j = 0;
677
678 pa_assert(u);
679 pa_sink_assert_ref(u->sink);
680
681 if (u->use_tsched)
682 hw_sleep_time(u, &max_sleep_usec, &process_usec);
683
684 for (;;) {
685 snd_pcm_sframes_t n;
686 size_t n_bytes;
687 int r;
688 pa_bool_t after_avail = TRUE;
689
690 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
691
692 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
693 continue;
694
695 return r;
696 }
697
698 n_bytes = (size_t) n * u->frame_size;
699 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
700 on_timeout = FALSE;
701
702 if (u->use_tsched)
703
704 /* We won't fill up the playback buffer before at least
705 * half the sleep time is over because otherwise we might
706 * ask for more data from the clients then they expect. We
707 * need to guarantee that clients only have to keep around
708 * a single hw buffer length. */
709
710 if (!polled &&
711 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
712 break;
713
714 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
715
716 if (polled)
717 PA_ONCE_BEGIN {
718 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
719 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
720 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
721 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
722 pa_strnull(dn));
723 pa_xfree(dn);
724 } PA_ONCE_END;
725
726 break;
727 }
728
729 if (++j > 10) {
730 #ifdef DEBUG_TIMING
731 pa_log_debug("Not filling up, because already too many iterations.");
732 #endif
733
734 break;
735 }
736
737 n_bytes -= u->hwbuf_unused;
738 polled = FALSE;
739
740 for (;;) {
741 snd_pcm_sframes_t frames;
742 void *p;
743
744 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
745
746 if (u->memchunk.length <= 0)
747 pa_sink_render(u->sink, n_bytes, &u->memchunk);
748
749 pa_assert(u->memchunk.length > 0);
750
751 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
752
753 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
754 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
755
756 p = pa_memblock_acquire(u->memchunk.memblock);
757 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
758 pa_memblock_release(u->memchunk.memblock);
759
760 if (PA_UNLIKELY(frames < 0)) {
761
762 if (!after_avail && (int) frames == -EAGAIN)
763 break;
764
765 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
766 continue;
767
768 return r;
769 }
770
771 if (!after_avail && frames == 0)
772 break;
773
774 pa_assert(frames > 0);
775 after_avail = FALSE;
776
777 u->memchunk.index += (size_t) frames * u->frame_size;
778 u->memchunk.length -= (size_t) frames * u->frame_size;
779
780 if (u->memchunk.length <= 0) {
781 pa_memblock_unref(u->memchunk.memblock);
782 pa_memchunk_reset(&u->memchunk);
783 }
784
785 work_done = TRUE;
786
787 u->write_count += frames * u->frame_size;
788 u->since_start += frames * u->frame_size;
789
790 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
791
792 if ((size_t) frames * u->frame_size >= n_bytes)
793 break;
794
795 n_bytes -= (size_t) frames * u->frame_size;
796 }
797 }
798
799 if (u->use_tsched) {
800 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
801 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
802
803 if (*sleep_usec > process_usec)
804 *sleep_usec -= process_usec;
805 else
806 *sleep_usec = 0;
807 } else
808 *sleep_usec = 0;
809
810 return work_done ? 1 : 0;
811 }
812
813 static void update_smoother(struct userdata *u) {
814 snd_pcm_sframes_t delay = 0;
815 int64_t position;
816 int err;
817 pa_usec_t now1 = 0, now2;
818 snd_pcm_status_t *status;
819
820 snd_pcm_status_alloca(&status);
821
822 pa_assert(u);
823 pa_assert(u->pcm_handle);
824
825 /* Let's update the time smoother */
826
827 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
828 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
829 return;
830 }
831
832 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
833 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
834 else {
835 snd_htimestamp_t htstamp = { 0, 0 };
836 snd_pcm_status_get_htstamp(status, &htstamp);
837 now1 = pa_timespec_load(&htstamp);
838 }
839
840 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
841 if (now1 <= 0)
842 now1 = pa_rtclock_now();
843
844 /* check if the time since the last update is bigger than the interval */
845 if (u->last_smoother_update > 0)
846 if (u->last_smoother_update + u->smoother_interval > now1)
847 return;
848
849 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
850
851 if (PA_UNLIKELY(position < 0))
852 position = 0;
853
854 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
855
856 pa_smoother_put(u->smoother, now1, now2);
857
858 u->last_smoother_update = now1;
859 /* exponentially increase the update interval up to the MAX limit */
860 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
861 }
862
863 static pa_usec_t sink_get_latency(struct userdata *u) {
864 pa_usec_t r;
865 int64_t delay;
866 pa_usec_t now1, now2;
867
868 pa_assert(u);
869
870 now1 = pa_rtclock_now();
871 now2 = pa_smoother_get(u->smoother, now1);
872
873 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
874
875 r = delay >= 0 ? (pa_usec_t) delay : 0;
876
877 if (u->memchunk.memblock)
878 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
879
880 return r;
881 }
882
883 static int build_pollfd(struct userdata *u) {
884 pa_assert(u);
885 pa_assert(u->pcm_handle);
886
887 if (u->alsa_rtpoll_item)
888 pa_rtpoll_item_free(u->alsa_rtpoll_item);
889
890 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
891 return -1;
892
893 return 0;
894 }
895
896 /* Called from IO context */
897 static int suspend(struct userdata *u) {
898 pa_assert(u);
899 pa_assert(u->pcm_handle);
900
901 pa_smoother_pause(u->smoother, pa_rtclock_now());
902
903 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
904 * take awfully long with our long buffer sizes today. */
905 snd_pcm_close(u->pcm_handle);
906 u->pcm_handle = NULL;
907
908 if (u->alsa_rtpoll_item) {
909 pa_rtpoll_item_free(u->alsa_rtpoll_item);
910 u->alsa_rtpoll_item = NULL;
911 }
912
913 /* We reset max_rewind/max_request here to make sure that while we
914 * are suspended the old max_request/max_rewind values set before
915 * the suspend can influence the per-stream buffer of newly
916 * created streams, without their requirements having any
917 * influence on them. */
918 pa_sink_set_max_rewind_within_thread(u->sink, 0);
919 pa_sink_set_max_request_within_thread(u->sink, 0);
920
921 pa_log_info("Device suspended...");
922
923 return 0;
924 }
925
926 /* Called from IO context */
927 static int update_sw_params(struct userdata *u) {
928 snd_pcm_uframes_t avail_min;
929 int err;
930
931 pa_assert(u);
932
933 /* Use the full buffer if no one asked us for anything specific */
934 u->hwbuf_unused = 0;
935
936 if (u->use_tsched) {
937 pa_usec_t latency;
938
939 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
940 size_t b;
941
942 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
943
944 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
945
946 /* We need at least one sample in our buffer */
947
948 if (PA_UNLIKELY(b < u->frame_size))
949 b = u->frame_size;
950
951 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
952 }
953
954 fix_min_sleep_wakeup(u);
955 fix_tsched_watermark(u);
956 }
957
958 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
959
960 /* We need at last one frame in the used part of the buffer */
961 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
962
963 if (u->use_tsched) {
964 pa_usec_t sleep_usec, process_usec;
965
966 hw_sleep_time(u, &sleep_usec, &process_usec);
967 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
968 }
969
970 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
971
972 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
973 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
974 return err;
975 }
976
977 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
978 if (pa_alsa_pcm_is_hw(u->pcm_handle))
979 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
980 else {
981 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
982 pa_sink_set_max_rewind_within_thread(u->sink, 0);
983 }
984
985 return 0;
986 }
987
988 /* Called from IO Context on unsuspend or from main thread when creating sink */
989 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
990 pa_bool_t in_thread)
991 {
992 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
993 &u->sink->sample_spec);
994
995 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
996 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
997
998 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
999 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1000
1001 fix_min_sleep_wakeup(u);
1002 fix_tsched_watermark(u);
1003
1004 if (in_thread)
1005 pa_sink_set_latency_range_within_thread(u->sink,
1006 u->min_latency_ref,
1007 pa_bytes_to_usec(u->hwbuf_size, ss));
1008 else {
1009 pa_sink_set_latency_range(u->sink,
1010 0,
1011 pa_bytes_to_usec(u->hwbuf_size, ss));
1012
1013 /* work-around assert in pa_sink_set_latency_within_thead,
1014 keep track of min_latency and reuse it when
1015 this routine is called from IO context */
1016 u->min_latency_ref = u->sink->thread_info.min_latency;
1017 }
1018
1019 pa_log_info("Time scheduling watermark is %0.2fms",
1020 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1021 }
1022
1023 /* Called from IO context */
1024 static int unsuspend(struct userdata *u) {
1025 pa_sample_spec ss;
1026 int err;
1027 pa_bool_t b, d;
1028 snd_pcm_uframes_t period_size, buffer_size;
1029 char *device_name = NULL;
1030
1031 pa_assert(u);
1032 pa_assert(!u->pcm_handle);
1033
1034 pa_log_info("Trying resume...");
1035
1036 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1037 /* Need to open device in NONAUDIO mode */
1038 int len = strlen(u->device_name) + 8;
1039
1040 device_name = pa_xmalloc(len);
1041 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1042 }
1043
1044 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1045 SND_PCM_NONBLOCK|
1046 SND_PCM_NO_AUTO_RESAMPLE|
1047 SND_PCM_NO_AUTO_CHANNELS|
1048 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1049 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1050 goto fail;
1051 }
1052
1053 ss = u->sink->sample_spec;
1054 period_size = u->fragment_size / u->frame_size;
1055 buffer_size = u->hwbuf_size / u->frame_size;
1056 b = u->use_mmap;
1057 d = u->use_tsched;
1058
1059 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1060 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1061 goto fail;
1062 }
1063
1064 if (b != u->use_mmap || d != u->use_tsched) {
1065 pa_log_warn("Resume failed, couldn't get original access mode.");
1066 goto fail;
1067 }
1068
1069 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1070 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1071 goto fail;
1072 }
1073
1074 if (period_size*u->frame_size != u->fragment_size ||
1075 buffer_size*u->frame_size != u->hwbuf_size) {
1076 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1077 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1078 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1079 goto fail;
1080 }
1081
1082 if (update_sw_params(u) < 0)
1083 goto fail;
1084
1085 if (build_pollfd(u) < 0)
1086 goto fail;
1087
1088 u->write_count = 0;
1089 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1090 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1091 u->last_smoother_update = 0;
1092
1093 u->first = TRUE;
1094 u->since_start = 0;
1095
1096 /* reset the watermark to the value defined when sink was created */
1097 if (u->use_tsched)
1098 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1099
1100 pa_log_info("Resumed successfully...");
1101
1102 pa_xfree(device_name);
1103 return 0;
1104
1105 fail:
1106 if (u->pcm_handle) {
1107 snd_pcm_close(u->pcm_handle);
1108 u->pcm_handle = NULL;
1109 }
1110
1111 pa_xfree(device_name);
1112
1113 return -PA_ERR_IO;
1114 }
1115
1116 /* Called from IO context */
1117 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1118 struct userdata *u = PA_SINK(o)->userdata;
1119
1120 switch (code) {
1121
1122 case PA_SINK_MESSAGE_GET_LATENCY: {
1123 pa_usec_t r = 0;
1124
1125 if (u->pcm_handle)
1126 r = sink_get_latency(u);
1127
1128 *((pa_usec_t*) data) = r;
1129
1130 return 0;
1131 }
1132
1133 case PA_SINK_MESSAGE_SET_STATE:
1134
1135 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1136
1137 case PA_SINK_SUSPENDED: {
1138 int r;
1139
1140 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1141
1142 if ((r = suspend(u)) < 0)
1143 return r;
1144
1145 break;
1146 }
1147
1148 case PA_SINK_IDLE:
1149 case PA_SINK_RUNNING: {
1150 int r;
1151
1152 if (u->sink->thread_info.state == PA_SINK_INIT) {
1153 if (build_pollfd(u) < 0)
1154 return -PA_ERR_IO;
1155 }
1156
1157 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1158 if ((r = unsuspend(u)) < 0)
1159 return r;
1160 }
1161
1162 break;
1163 }
1164
1165 case PA_SINK_UNLINKED:
1166 case PA_SINK_INIT:
1167 case PA_SINK_INVALID_STATE:
1168 ;
1169 }
1170
1171 break;
1172 }
1173
1174 return pa_sink_process_msg(o, code, data, offset, chunk);
1175 }
1176
1177 /* Called from main context */
1178 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1179 pa_sink_state_t old_state;
1180 struct userdata *u;
1181
1182 pa_sink_assert_ref(s);
1183 pa_assert_se(u = s->userdata);
1184
1185 old_state = pa_sink_get_state(u->sink);
1186
1187 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1188 reserve_done(u);
1189 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1190 if (reserve_init(u, u->device_name) < 0)
1191 return -PA_ERR_BUSY;
1192
1193 return 0;
1194 }
1195
1196 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1197 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1198
1199 pa_assert(u);
1200 pa_assert(u->mixer_handle);
1201
1202 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1203 return 0;
1204
1205 if (!PA_SINK_IS_LINKED(u->sink->state))
1206 return 0;
1207
1208 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1209 return 0;
1210
1211 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1212 pa_sink_get_volume(u->sink, TRUE);
1213 pa_sink_get_mute(u->sink, TRUE);
1214 }
1215
1216 return 0;
1217 }
1218
1219 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1220 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1221
1222 pa_assert(u);
1223 pa_assert(u->mixer_handle);
1224
1225 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1226 return 0;
1227
1228 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1229 return 0;
1230
1231 if (mask & SND_CTL_EVENT_MASK_VALUE)
1232 pa_sink_update_volume_and_mute(u->sink);
1233
1234 return 0;
1235 }
1236
1237 static void sink_get_volume_cb(pa_sink *s) {
1238 struct userdata *u = s->userdata;
1239 pa_cvolume r;
1240 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1241
1242 pa_assert(u);
1243 pa_assert(u->mixer_path);
1244 pa_assert(u->mixer_handle);
1245
1246 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1247 return;
1248
1249 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1250 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1251
1252 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1253
1254 if (u->mixer_path->has_dB) {
1255 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1256
1257 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1258 }
1259
1260 if (pa_cvolume_equal(&u->hardware_volume, &r))
1261 return;
1262
1263 s->real_volume = u->hardware_volume = r;
1264
1265 /* Hmm, so the hardware volume changed, let's reset our software volume */
1266 if (u->mixer_path->has_dB)
1267 pa_sink_set_soft_volume(s, NULL);
1268 }
1269
1270 static void sink_set_volume_cb(pa_sink *s) {
1271 struct userdata *u = s->userdata;
1272 pa_cvolume r;
1273 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1274 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1275
1276 pa_assert(u);
1277 pa_assert(u->mixer_path);
1278 pa_assert(u->mixer_handle);
1279
1280 /* Shift up by the base volume */
1281 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1282
1283 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1284 return;
1285
1286 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1287 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1288
1289 u->hardware_volume = r;
1290
1291 if (u->mixer_path->has_dB) {
1292 pa_cvolume new_soft_volume;
1293 pa_bool_t accurate_enough;
1294 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1295
1296 /* Match exactly what the user requested by software */
1297 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1298
1299 /* If the adjustment to do in software is only minimal we
1300 * can skip it. That saves us CPU at the expense of a bit of
1301 * accuracy */
1302 accurate_enough =
1303 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1304 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1305
1306 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1307 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1308 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1309 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1310 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1311 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1312 pa_yes_no(accurate_enough));
1313 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1314
1315 if (!accurate_enough)
1316 s->soft_volume = new_soft_volume;
1317
1318 } else {
1319 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1320
1321 /* We can't match exactly what the user requested, hence let's
1322 * at least tell the user about it */
1323
1324 s->real_volume = r;
1325 }
1326 }
1327
1328 static void sink_write_volume_cb(pa_sink *s) {
1329 struct userdata *u = s->userdata;
1330 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1331
1332 pa_assert(u);
1333 pa_assert(u->mixer_path);
1334 pa_assert(u->mixer_handle);
1335 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1336
1337 /* Shift up by the base volume */
1338 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1339
1340 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1341 pa_log_error("Writing HW volume failed");
1342 else {
1343 pa_cvolume tmp_vol;
1344 pa_bool_t accurate_enough;
1345
1346 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1347 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1348
1349 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1350 accurate_enough =
1351 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1352 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1353
1354 if (!accurate_enough) {
1355 union {
1356 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1357 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1358 } vol;
1359
1360 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1361 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1362 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1363 pa_log_debug(" in dB: %s (request) != %s",
1364 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1365 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1366 }
1367 }
1368 }
1369
1370 static void sink_get_mute_cb(pa_sink *s) {
1371 struct userdata *u = s->userdata;
1372 pa_bool_t b;
1373
1374 pa_assert(u);
1375 pa_assert(u->mixer_path);
1376 pa_assert(u->mixer_handle);
1377
1378 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1379 return;
1380
1381 s->muted = b;
1382 }
1383
1384 static void sink_set_mute_cb(pa_sink *s) {
1385 struct userdata *u = s->userdata;
1386
1387 pa_assert(u);
1388 pa_assert(u->mixer_path);
1389 pa_assert(u->mixer_handle);
1390
1391 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1392 }
1393
1394 static void mixer_volume_init(struct userdata *u) {
1395 pa_assert(u);
1396
1397 if (!u->mixer_path->has_volume) {
1398 pa_sink_set_write_volume_callback(u->sink, NULL);
1399 pa_sink_set_get_volume_callback(u->sink, NULL);
1400 pa_sink_set_set_volume_callback(u->sink, NULL);
1401
1402 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1403 } else {
1404 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1405 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1406
1407 if (u->mixer_path->has_dB && u->deferred_volume) {
1408 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1409 pa_log_info("Successfully enabled synchronous volume.");
1410 } else
1411 pa_sink_set_write_volume_callback(u->sink, NULL);
1412
1413 if (u->mixer_path->has_dB) {
1414 pa_sink_enable_decibel_volume(u->sink, TRUE);
1415 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1416
1417 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1418 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1419
1420 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1421 } else {
1422 pa_sink_enable_decibel_volume(u->sink, FALSE);
1423 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1424
1425 u->sink->base_volume = PA_VOLUME_NORM;
1426 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1427 }
1428
1429 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1430 }
1431
1432 if (!u->mixer_path->has_mute) {
1433 pa_sink_set_get_mute_callback(u->sink, NULL);
1434 pa_sink_set_set_mute_callback(u->sink, NULL);
1435 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1436 } else {
1437 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1438 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1439 pa_log_info("Using hardware mute control.");
1440 }
1441 }
1442
1443 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1444 struct userdata *u = s->userdata;
1445 pa_alsa_port_data *data;
1446
1447 pa_assert(u);
1448 pa_assert(p);
1449 pa_assert(u->mixer_handle);
1450
1451 data = PA_DEVICE_PORT_DATA(p);
1452
1453 pa_assert_se(u->mixer_path = data->path);
1454 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1455
1456 mixer_volume_init(u);
1457
1458 if (data->setting)
1459 pa_alsa_setting_select(data->setting, u->mixer_handle);
1460
1461 if (s->set_mute)
1462 s->set_mute(s);
1463 if (s->set_volume)
1464 s->set_volume(s);
1465
1466 return 0;
1467 }
1468
1469 static void sink_update_requested_latency_cb(pa_sink *s) {
1470 struct userdata *u = s->userdata;
1471 size_t before;
1472 pa_assert(u);
1473 pa_assert(u->use_tsched); /* only when timer scheduling is used
1474 * we can dynamically adjust the
1475 * latency */
1476
1477 if (!u->pcm_handle)
1478 return;
1479
1480 before = u->hwbuf_unused;
1481 update_sw_params(u);
1482
1483 /* Let's check whether we now use only a smaller part of the
1484 buffer then before. If so, we need to make sure that subsequent
1485 rewinds are relative to the new maximum fill level and not to the
1486 current fill level. Thus, let's do a full rewind once, to clear
1487 things up. */
1488
1489 if (u->hwbuf_unused > before) {
1490 pa_log_debug("Requesting rewind due to latency change.");
1491 pa_sink_request_rewind(s, (size_t) -1);
1492 }
1493 }
1494
1495 static pa_idxset* sink_get_formats(pa_sink *s) {
1496 struct userdata *u = s->userdata;
1497 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1498 pa_format_info *f;
1499 uint32_t idx;
1500
1501 pa_assert(u);
1502
1503 PA_IDXSET_FOREACH(f, u->formats, idx) {
1504 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1505 }
1506
1507 return ret;
1508 }
1509
1510 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1511 struct userdata *u = s->userdata;
1512 pa_format_info *f;
1513 uint32_t idx;
1514
1515 pa_assert(u);
1516
1517 /* FIXME: also validate sample rates against what the device supports */
1518 PA_IDXSET_FOREACH(f, formats, idx) {
1519 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1520 /* EAC3 cannot be sent over over S/PDIF */
1521 return FALSE;
1522 }
1523
1524 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1525 u->formats = pa_idxset_new(NULL, NULL);
1526
1527 /* Note: the logic below won't apply if we're using software encoding.
1528 * This is fine for now since we don't support that via the passthrough
1529 * framework, but this must be changed if we do. */
1530
1531 /* First insert non-PCM formats since we prefer those. */
1532 PA_IDXSET_FOREACH(f, formats, idx) {
1533 if (!pa_format_info_is_pcm(f))
1534 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1535 }
1536
1537 /* Now add any PCM formats */
1538 PA_IDXSET_FOREACH(f, formats, idx) {
1539 if (pa_format_info_is_pcm(f))
1540 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1541 }
1542
1543 return TRUE;
1544 }
1545
1546 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
1547 {
1548 struct userdata *u = s->userdata;
1549 pa_assert(u);
1550
1551 if (!PA_SINK_IS_OPENED(s->state)) {
1552 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1553 u->sink->sample_spec.rate = rate;
1554 return TRUE;
1555 }
1556 return FALSE;
1557 }
1558
1559 static int process_rewind(struct userdata *u) {
1560 snd_pcm_sframes_t unused;
1561 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1562 pa_assert(u);
1563
1564 /* Figure out how much we shall rewind and reset the counter */
1565 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1566
1567 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1568
1569 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1570 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1571 return -1;
1572 }
1573
1574 unused_nbytes = (size_t) unused * u->frame_size;
1575
1576 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1577 unused_nbytes += u->rewind_safeguard;
1578
1579 if (u->hwbuf_size > unused_nbytes)
1580 limit_nbytes = u->hwbuf_size - unused_nbytes;
1581 else
1582 limit_nbytes = 0;
1583
1584 if (rewind_nbytes > limit_nbytes)
1585 rewind_nbytes = limit_nbytes;
1586
1587 if (rewind_nbytes > 0) {
1588 snd_pcm_sframes_t in_frames, out_frames;
1589
1590 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1591
1592 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1593 pa_log_debug("before: %lu", (unsigned long) in_frames);
1594 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1595 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1596 if (try_recover(u, "process_rewind", out_frames) < 0)
1597 return -1;
1598 out_frames = 0;
1599 }
1600
1601 pa_log_debug("after: %lu", (unsigned long) out_frames);
1602
1603 rewind_nbytes = (size_t) out_frames * u->frame_size;
1604
1605 if (rewind_nbytes <= 0)
1606 pa_log_info("Tried rewind, but was apparently not possible.");
1607 else {
1608 u->write_count -= rewind_nbytes;
1609 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1610 pa_sink_process_rewind(u->sink, rewind_nbytes);
1611
1612 u->after_rewind = TRUE;
1613 return 0;
1614 }
1615 } else
1616 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1617
1618 pa_sink_process_rewind(u->sink, 0);
1619 return 0;
1620 }
1621
1622 static void thread_func(void *userdata) {
1623 struct userdata *u = userdata;
1624 unsigned short revents = 0;
1625
1626 pa_assert(u);
1627
1628 pa_log_debug("Thread starting up");
1629
1630 if (u->core->realtime_scheduling)
1631 pa_make_realtime(u->core->realtime_priority);
1632
1633 pa_thread_mq_install(&u->thread_mq);
1634
1635 for (;;) {
1636 int ret;
1637 pa_usec_t rtpoll_sleep = 0;
1638
1639 #ifdef DEBUG_TIMING
1640 pa_log_debug("Loop");
1641 #endif
1642
1643 /* Render some data and write it to the dsp */
1644 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1645 int work_done;
1646 pa_usec_t sleep_usec = 0;
1647 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1648
1649 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1650 if (process_rewind(u) < 0)
1651 goto fail;
1652
1653 if (u->use_mmap)
1654 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1655 else
1656 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1657
1658 if (work_done < 0)
1659 goto fail;
1660
1661 /* pa_log_debug("work_done = %i", work_done); */
1662
1663 if (work_done) {
1664
1665 if (u->first) {
1666 pa_log_info("Starting playback.");
1667 snd_pcm_start(u->pcm_handle);
1668
1669 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1670
1671 u->first = FALSE;
1672 }
1673
1674 update_smoother(u);
1675 }
1676
1677 if (u->use_tsched) {
1678 pa_usec_t cusec;
1679
1680 if (u->since_start <= u->hwbuf_size) {
1681
1682 /* USB devices on ALSA seem to hit a buffer
1683 * underrun during the first iterations much
1684 * quicker then we calculate here, probably due to
1685 * the transport latency. To accommodate for that
1686 * we artificially decrease the sleep time until
1687 * we have filled the buffer at least once
1688 * completely.*/
1689
1690 if (pa_log_ratelimit(PA_LOG_DEBUG))
1691 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1692 sleep_usec /= 2;
1693 }
1694
1695 /* OK, the playback buffer is now full, let's
1696 * calculate when to wake up next */
1697 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1698
1699 /* Convert from the sound card time domain to the
1700 * system time domain */
1701 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1702
1703 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1704
1705 /* We don't trust the conversion, so we wake up whatever comes first */
1706 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1707 }
1708
1709 u->after_rewind = FALSE;
1710
1711 }
1712
1713 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1714 pa_usec_t volume_sleep;
1715 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1716 if (volume_sleep > 0) {
1717 if (rtpoll_sleep > 0)
1718 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1719 else
1720 rtpoll_sleep = volume_sleep;
1721 }
1722 }
1723
1724 if (rtpoll_sleep > 0)
1725 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1726 else
1727 pa_rtpoll_set_timer_disabled(u->rtpoll);
1728
1729 /* Hmm, nothing to do. Let's sleep */
1730 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1731 goto fail;
1732
1733 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1734 pa_sink_volume_change_apply(u->sink, NULL);
1735
1736 if (ret == 0)
1737 goto finish;
1738
1739 /* Tell ALSA about this and process its response */
1740 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1741 struct pollfd *pollfd;
1742 int err;
1743 unsigned n;
1744
1745 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1746
1747 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1748 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1749 goto fail;
1750 }
1751
1752 if (revents & ~POLLOUT) {
1753 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1754 goto fail;
1755
1756 u->first = TRUE;
1757 u->since_start = 0;
1758 revents = 0;
1759 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1760 pa_log_debug("Wakeup from ALSA!");
1761
1762 } else
1763 revents = 0;
1764 }
1765
1766 fail:
1767 /* If this was no regular exit from the loop we have to continue
1768 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1769 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1770 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1771
1772 finish:
1773 pa_log_debug("Thread shutting down");
1774 }
1775
1776 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1777 const char *n;
1778 char *t;
1779
1780 pa_assert(data);
1781 pa_assert(ma);
1782 pa_assert(device_name);
1783
1784 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1785 pa_sink_new_data_set_name(data, n);
1786 data->namereg_fail = TRUE;
1787 return;
1788 }
1789
1790 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1791 data->namereg_fail = TRUE;
1792 else {
1793 n = device_id ? device_id : device_name;
1794 data->namereg_fail = FALSE;
1795 }
1796
1797 if (mapping)
1798 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1799 else
1800 t = pa_sprintf_malloc("alsa_output.%s", n);
1801
1802 pa_sink_new_data_set_name(data, t);
1803 pa_xfree(t);
1804 }
1805
1806 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1807
1808 if (!mapping && !element)
1809 return;
1810
1811 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1812 pa_log_info("Failed to find a working mixer device.");
1813 return;
1814 }
1815
1816 if (element) {
1817
1818 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1819 goto fail;
1820
1821 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1822 goto fail;
1823
1824 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1825 pa_alsa_path_dump(u->mixer_path);
1826 } else {
1827
1828 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT, u->paths_dir)))
1829 goto fail;
1830
1831 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1832 }
1833
1834 return;
1835
1836 fail:
1837
1838 if (u->mixer_path_set) {
1839 pa_alsa_path_set_free(u->mixer_path_set);
1840 u->mixer_path_set = NULL;
1841 } else if (u->mixer_path) {
1842 pa_alsa_path_free(u->mixer_path);
1843 u->mixer_path = NULL;
1844 }
1845
1846 if (u->mixer_handle) {
1847 snd_mixer_close(u->mixer_handle);
1848 u->mixer_handle = NULL;
1849 }
1850 }
1851
1852
1853 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1854 pa_bool_t need_mixer_callback = FALSE;
1855
1856 pa_assert(u);
1857
1858 if (!u->mixer_handle)
1859 return 0;
1860
1861 if (u->sink->active_port) {
1862 pa_alsa_port_data *data;
1863
1864 /* We have a list of supported paths, so let's activate the
1865 * one that has been chosen as active */
1866
1867 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1868 u->mixer_path = data->path;
1869
1870 pa_alsa_path_select(data->path, u->mixer_handle);
1871
1872 if (data->setting)
1873 pa_alsa_setting_select(data->setting, u->mixer_handle);
1874
1875 } else {
1876
1877 if (!u->mixer_path && u->mixer_path_set)
1878 u->mixer_path = u->mixer_path_set->paths;
1879
1880 if (u->mixer_path) {
1881 /* Hmm, we have only a single path, then let's activate it */
1882
1883 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1884
1885 if (u->mixer_path->settings)
1886 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1887 } else
1888 return 0;
1889 }
1890
1891 mixer_volume_init(u);
1892
1893 /* Will we need to register callbacks? */
1894 if (u->mixer_path_set && u->mixer_path_set->paths) {
1895 pa_alsa_path *p;
1896
1897 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1898 if (p->has_volume || p->has_mute)
1899 need_mixer_callback = TRUE;
1900 }
1901 }
1902 else if (u->mixer_path)
1903 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1904
1905 if (need_mixer_callback) {
1906 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1907 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1908 u->mixer_pd = pa_alsa_mixer_pdata_new();
1909 mixer_callback = io_mixer_callback;
1910
1911 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1912 pa_log("Failed to initialize file descriptor monitoring");
1913 return -1;
1914 }
1915 } else {
1916 u->mixer_fdl = pa_alsa_fdlist_new();
1917 mixer_callback = ctl_mixer_callback;
1918
1919 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1920 pa_log("Failed to initialize file descriptor monitoring");
1921 return -1;
1922 }
1923 }
1924
1925 if (u->mixer_path_set)
1926 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1927 else
1928 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1929 }
1930
1931 return 0;
1932 }
1933
1934 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1935
1936 struct userdata *u = NULL;
1937 const char *dev_id = NULL;
1938 pa_sample_spec ss;
1939 uint32_t alternate_sample_rate;
1940 pa_channel_map map;
1941 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1942 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1943 size_t frame_size;
1944 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE;
1945 pa_sink_new_data data;
1946 pa_alsa_profile_set *profile_set = NULL;
1947
1948 pa_assert(m);
1949 pa_assert(ma);
1950
1951 ss = m->core->default_sample_spec;
1952 map = m->core->default_channel_map;
1953 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1954 pa_log("Failed to parse sample specification and channel map");
1955 goto fail;
1956 }
1957
1958 alternate_sample_rate = m->core->alternate_sample_rate;
1959 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1960 pa_log("Failed to parse alternate sample rate");
1961 goto fail;
1962 }
1963
1964 frame_size = pa_frame_size(&ss);
1965
1966 nfrags = m->core->default_n_fragments;
1967 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1968 if (frag_size <= 0)
1969 frag_size = (uint32_t) frame_size;
1970 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1971 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1972
1973 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1974 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1975 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1976 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1977 pa_log("Failed to parse buffer metrics");
1978 goto fail;
1979 }
1980
1981 buffer_size = nfrags * frag_size;
1982
1983 period_frames = frag_size/frame_size;
1984 buffer_frames = buffer_size/frame_size;
1985 tsched_frames = tsched_size/frame_size;
1986
1987 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1988 pa_log("Failed to parse mmap argument.");
1989 goto fail;
1990 }
1991
1992 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1993 pa_log("Failed to parse tsched argument.");
1994 goto fail;
1995 }
1996
1997 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1998 pa_log("Failed to parse ignore_dB argument.");
1999 goto fail;
2000 }
2001
2002 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2003 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2004 pa_log("Failed to parse rewind_safeguard argument");
2005 goto fail;
2006 }
2007
2008 deferred_volume = m->core->deferred_volume;
2009 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2010 pa_log("Failed to parse deferred_volume argument.");
2011 goto fail;
2012 }
2013
2014 use_tsched = pa_alsa_may_tsched(use_tsched);
2015
2016 u = pa_xnew0(struct userdata, 1);
2017 u->core = m->core;
2018 u->module = m;
2019 u->use_mmap = use_mmap;
2020 u->use_tsched = use_tsched;
2021 u->deferred_volume = deferred_volume;
2022 u->first = TRUE;
2023 u->rewind_safeguard = rewind_safeguard;
2024 u->rtpoll = pa_rtpoll_new();
2025 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2026
2027 u->smoother = pa_smoother_new(
2028 SMOOTHER_ADJUST_USEC,
2029 SMOOTHER_WINDOW_USEC,
2030 TRUE,
2031 TRUE,
2032 5,
2033 pa_rtclock_now(),
2034 TRUE);
2035 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2036
2037 dev_id = pa_modargs_get_value(
2038 ma, "device_id",
2039 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2040
2041 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2042
2043 if (reserve_init(u, dev_id) < 0)
2044 goto fail;
2045
2046 if (reserve_monitor_init(u, dev_id) < 0)
2047 goto fail;
2048
2049 b = use_mmap;
2050 d = use_tsched;
2051
2052 if (mapping) {
2053
2054 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2055 pa_log("device_id= not set");
2056 goto fail;
2057 }
2058
2059 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2060 dev_id,
2061 &u->device_name,
2062 &ss, &map,
2063 SND_PCM_STREAM_PLAYBACK,
2064 &period_frames, &buffer_frames, tsched_frames,
2065 &b, &d, mapping)))
2066 goto fail;
2067
2068 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2069
2070 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2071 goto fail;
2072
2073 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2074 dev_id,
2075 &u->device_name,
2076 &ss, &map,
2077 SND_PCM_STREAM_PLAYBACK,
2078 &period_frames, &buffer_frames, tsched_frames,
2079 &b, &d, profile_set, &mapping)))
2080 goto fail;
2081
2082 } else {
2083
2084 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2085 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2086 &u->device_name,
2087 &ss, &map,
2088 SND_PCM_STREAM_PLAYBACK,
2089 &period_frames, &buffer_frames, tsched_frames,
2090 &b, &d, FALSE)))
2091 goto fail;
2092 }
2093
2094 pa_assert(u->device_name);
2095 pa_log_info("Successfully opened device %s.", u->device_name);
2096
2097 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2098 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2099 goto fail;
2100 }
2101
2102 if (mapping)
2103 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2104
2105 if (use_mmap && !b) {
2106 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2107 u->use_mmap = use_mmap = FALSE;
2108 }
2109
2110 if (use_tsched && (!b || !d)) {
2111 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2112 u->use_tsched = use_tsched = FALSE;
2113 }
2114
2115 if (u->use_mmap)
2116 pa_log_info("Successfully enabled mmap() mode.");
2117
2118 if (u->use_tsched)
2119 pa_log_info("Successfully enabled timer-based scheduling mode.");
2120
2121 if (is_iec958(u) || is_hdmi(u))
2122 set_formats = TRUE;
2123
2124 /* ALSA might tweak the sample spec, so recalculate the frame size */
2125 frame_size = pa_frame_size(&ss);
2126
2127 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2128
2129 pa_sink_new_data_init(&data);
2130 data.driver = driver;
2131 data.module = m;
2132 data.card = card;
2133 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2134
2135 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2136 * variable instead of using &data.namereg_fail directly, because
2137 * data.namereg_fail is a bitfield and taking the address of a bitfield
2138 * variable is impossible. */
2139 namereg_fail = data.namereg_fail;
2140 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2141 pa_log("Failed to parse namereg_fail argument.");
2142 pa_sink_new_data_done(&data);
2143 goto fail;
2144 }
2145 data.namereg_fail = namereg_fail;
2146
2147 pa_sink_new_data_set_sample_spec(&data, &ss);
2148 pa_sink_new_data_set_channel_map(&data, &map);
2149 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2150
2151 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2152 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2153 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2154 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2155 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2156
2157 if (mapping) {
2158 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2159 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2160 }
2161
2162 pa_alsa_init_description(data.proplist);
2163
2164 if (u->control_device)
2165 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2166
2167 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2168 pa_log("Invalid properties");
2169 pa_sink_new_data_done(&data);
2170 goto fail;
2171 }
2172
2173 if (u->mixer_path_set)
2174 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2175
2176 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2177 (set_formats ? PA_SINK_SET_FORMATS : 0));
2178 pa_sink_new_data_done(&data);
2179
2180 if (!u->sink) {
2181 pa_log("Failed to create sink object");
2182 goto fail;
2183 }
2184
2185 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2186 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2187 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2188 goto fail;
2189 }
2190
2191 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2192 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2193 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2194 goto fail;
2195 }
2196
2197 u->sink->parent.process_msg = sink_process_msg;
2198 if (u->use_tsched)
2199 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2200 u->sink->set_state = sink_set_state_cb;
2201 u->sink->set_port = sink_set_port_cb;
2202 if (u->sink->alternate_sample_rate)
2203 u->sink->update_rate = sink_update_rate_cb;
2204 u->sink->userdata = u;
2205
2206 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2207 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2208
2209 u->frame_size = frame_size;
2210 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2211 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2212 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2213
2214 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2215 (double) u->hwbuf_size / (double) u->fragment_size,
2216 (long unsigned) u->fragment_size,
2217 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2218 (long unsigned) u->hwbuf_size,
2219 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2220
2221 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2222 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2223 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2224 else {
2225 pa_log_info("Disabling rewind for device %s", u->device_name);
2226 pa_sink_set_max_rewind(u->sink, 0);
2227 }
2228
2229 if (u->use_tsched) {
2230 u->tsched_watermark_ref = tsched_watermark;
2231 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2232 } else
2233 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2234
2235 reserve_update(u);
2236
2237 if (update_sw_params(u) < 0)
2238 goto fail;
2239
2240 if (setup_mixer(u, ignore_dB) < 0)
2241 goto fail;
2242
2243 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2244
2245 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2246 pa_log("Failed to create thread.");
2247 goto fail;
2248 }
2249
2250 /* Get initial mixer settings */
2251 if (data.volume_is_set) {
2252 if (u->sink->set_volume)
2253 u->sink->set_volume(u->sink);
2254 } else {
2255 if (u->sink->get_volume)
2256 u->sink->get_volume(u->sink);
2257 }
2258
2259 if (data.muted_is_set) {
2260 if (u->sink->set_mute)
2261 u->sink->set_mute(u->sink);
2262 } else {
2263 if (u->sink->get_mute)
2264 u->sink->get_mute(u->sink);
2265 }
2266
2267 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2268 u->sink->write_volume(u->sink);
2269
2270 if (set_formats) {
2271 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2272 pa_format_info *format;
2273
2274 /* To start with, we only support PCM formats. Other formats may be added
2275 * with pa_sink_set_formats().*/
2276 format = pa_format_info_new();
2277 format->encoding = PA_ENCODING_PCM;
2278 u->formats = pa_idxset_new(NULL, NULL);
2279 pa_idxset_put(u->formats, format, NULL);
2280
2281 u->sink->get_formats = sink_get_formats;
2282 u->sink->set_formats = sink_set_formats;
2283 }
2284
2285 pa_sink_put(u->sink);
2286
2287 if (profile_set)
2288 pa_alsa_profile_set_free(profile_set);
2289
2290 return u->sink;
2291
2292 fail:
2293
2294 if (u)
2295 userdata_free(u);
2296
2297 if (profile_set)
2298 pa_alsa_profile_set_free(profile_set);
2299
2300 return NULL;
2301 }
2302
2303 static void userdata_free(struct userdata *u) {
2304 pa_assert(u);
2305
2306 if (u->sink)
2307 pa_sink_unlink(u->sink);
2308
2309 if (u->thread) {
2310 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2311 pa_thread_free(u->thread);
2312 }
2313
2314 pa_thread_mq_done(&u->thread_mq);
2315
2316 if (u->sink)
2317 pa_sink_unref(u->sink);
2318
2319 if (u->memchunk.memblock)
2320 pa_memblock_unref(u->memchunk.memblock);
2321
2322 if (u->mixer_pd)
2323 pa_alsa_mixer_pdata_free(u->mixer_pd);
2324
2325 if (u->alsa_rtpoll_item)
2326 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2327
2328 if (u->rtpoll)
2329 pa_rtpoll_free(u->rtpoll);
2330
2331 if (u->pcm_handle) {
2332 snd_pcm_drop(u->pcm_handle);
2333 snd_pcm_close(u->pcm_handle);
2334 }
2335
2336 if (u->mixer_fdl)
2337 pa_alsa_fdlist_free(u->mixer_fdl);
2338
2339 if (u->mixer_path_set)
2340 pa_alsa_path_set_free(u->mixer_path_set);
2341 else if (u->mixer_path)
2342 pa_alsa_path_free(u->mixer_path);
2343
2344 if (u->mixer_handle)
2345 snd_mixer_close(u->mixer_handle);
2346
2347 if (u->smoother)
2348 pa_smoother_free(u->smoother);
2349
2350 if (u->formats)
2351 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2352
2353 reserve_done(u);
2354 monitor_done(u);
2355
2356 pa_xfree(u->device_name);
2357 pa_xfree(u->control_device);
2358 pa_xfree(u->paths_dir);
2359 pa_xfree(u);
2360 }
2361
2362 void pa_alsa_sink_free(pa_sink *s) {
2363 struct userdata *u;
2364
2365 pa_sink_assert_ref(s);
2366 pa_assert_se(u = s->userdata);
2367
2368 userdata_free(u);
2369 }