]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: Don't always suspend/unsuspend on sink-input removal
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 pa_alsa_fdlist *mixer_fdl;
104 pa_alsa_mixer_pdata *mixer_pd;
105 snd_mixer_t *mixer_handle;
106 pa_alsa_path_set *mixer_path_set;
107 pa_alsa_path *mixer_path;
108
109 pa_cvolume hardware_volume;
110
111 uint32_t old_rate;
112
113 size_t
114 frame_size,
115 fragment_size,
116 hwbuf_size,
117 tsched_watermark,
118 hwbuf_unused,
119 min_sleep,
120 min_wakeup,
121 watermark_inc_step,
122 watermark_dec_step,
123 watermark_inc_threshold,
124 watermark_dec_threshold,
125 rewind_safeguard;
126
127 pa_usec_t watermark_dec_not_before;
128
129 pa_memchunk memchunk;
130
131 char *device_name; /* name of the PCM device */
132 char *control_device; /* name of the control device */
133
134 pa_bool_t use_mmap:1, use_tsched:1, sync_volume:1;
135
136 pa_bool_t first, after_rewind;
137
138 pa_rtpoll_item *alsa_rtpoll_item;
139
140 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
141
142 pa_smoother *smoother;
143 uint64_t write_count;
144 uint64_t since_start;
145 pa_usec_t smoother_interval;
146 pa_usec_t last_smoother_update;
147
148 pa_idxset *formats;
149
150 pa_reserve_wrapper *reserve;
151 pa_hook_slot *reserve_slot;
152 pa_reserve_monitor_wrapper *monitor;
153 pa_hook_slot *monitor_slot;
154 };
155
156 static void userdata_free(struct userdata *u);
157
158 /* FIXME: Is there a better way to do this than device names? */
159 static pa_bool_t is_iec958(struct userdata *u) {
160 return (strncmp("iec958", u->device_name, 6) == 0);
161 }
162
163 static pa_bool_t is_hdmi(struct userdata *u) {
164 return (strncmp("hdmi", u->device_name, 4) == 0);
165 }
166
167 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
168 pa_assert(r);
169 pa_assert(u);
170
171 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
172 return PA_HOOK_CANCEL;
173
174 return PA_HOOK_OK;
175 }
176
177 static void reserve_done(struct userdata *u) {
178 pa_assert(u);
179
180 if (u->reserve_slot) {
181 pa_hook_slot_free(u->reserve_slot);
182 u->reserve_slot = NULL;
183 }
184
185 if (u->reserve) {
186 pa_reserve_wrapper_unref(u->reserve);
187 u->reserve = NULL;
188 }
189 }
190
191 static void reserve_update(struct userdata *u) {
192 const char *description;
193 pa_assert(u);
194
195 if (!u->sink || !u->reserve)
196 return;
197
198 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
199 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
200 }
201
202 static int reserve_init(struct userdata *u, const char *dname) {
203 char *rname;
204
205 pa_assert(u);
206 pa_assert(dname);
207
208 if (u->reserve)
209 return 0;
210
211 if (pa_in_system_mode())
212 return 0;
213
214 if (!(rname = pa_alsa_get_reserve_name(dname)))
215 return 0;
216
217 /* We are resuming, try to lock the device */
218 u->reserve = pa_reserve_wrapper_get(u->core, rname);
219 pa_xfree(rname);
220
221 if (!(u->reserve))
222 return -1;
223
224 reserve_update(u);
225
226 pa_assert(!u->reserve_slot);
227 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
228
229 return 0;
230 }
231
232 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
233 pa_bool_t b;
234
235 pa_assert(w);
236 pa_assert(u);
237
238 b = PA_PTR_TO_UINT(busy) && !u->reserve;
239
240 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
241 return PA_HOOK_OK;
242 }
243
244 static void monitor_done(struct userdata *u) {
245 pa_assert(u);
246
247 if (u->monitor_slot) {
248 pa_hook_slot_free(u->monitor_slot);
249 u->monitor_slot = NULL;
250 }
251
252 if (u->monitor) {
253 pa_reserve_monitor_wrapper_unref(u->monitor);
254 u->monitor = NULL;
255 }
256 }
257
258 static int reserve_monitor_init(struct userdata *u, const char *dname) {
259 char *rname;
260
261 pa_assert(u);
262 pa_assert(dname);
263
264 if (pa_in_system_mode())
265 return 0;
266
267 if (!(rname = pa_alsa_get_reserve_name(dname)))
268 return 0;
269
270 /* We are resuming, try to lock the device */
271 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
272 pa_xfree(rname);
273
274 if (!(u->monitor))
275 return -1;
276
277 pa_assert(!u->monitor_slot);
278 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
279
280 return 0;
281 }
282
283 static void fix_min_sleep_wakeup(struct userdata *u) {
284 size_t max_use, max_use_2;
285
286 pa_assert(u);
287 pa_assert(u->use_tsched);
288
289 max_use = u->hwbuf_size - u->hwbuf_unused;
290 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
291
292 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
293 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
294
295 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
296 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
297 }
298
299 static void fix_tsched_watermark(struct userdata *u) {
300 size_t max_use;
301 pa_assert(u);
302 pa_assert(u->use_tsched);
303
304 max_use = u->hwbuf_size - u->hwbuf_unused;
305
306 if (u->tsched_watermark > max_use - u->min_sleep)
307 u->tsched_watermark = max_use - u->min_sleep;
308
309 if (u->tsched_watermark < u->min_wakeup)
310 u->tsched_watermark = u->min_wakeup;
311 }
312
313 static void increase_watermark(struct userdata *u) {
314 size_t old_watermark;
315 pa_usec_t old_min_latency, new_min_latency;
316
317 pa_assert(u);
318 pa_assert(u->use_tsched);
319
320 /* First, just try to increase the watermark */
321 old_watermark = u->tsched_watermark;
322 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
323 fix_tsched_watermark(u);
324
325 if (old_watermark != u->tsched_watermark) {
326 pa_log_info("Increasing wakeup watermark to %0.2f ms",
327 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
328 return;
329 }
330
331 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
332 old_min_latency = u->sink->thread_info.min_latency;
333 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
334 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
335
336 if (old_min_latency != new_min_latency) {
337 pa_log_info("Increasing minimal latency to %0.2f ms",
338 (double) new_min_latency / PA_USEC_PER_MSEC);
339
340 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
341 }
342
343 /* When we reach this we're officialy fucked! */
344 }
345
346 static void decrease_watermark(struct userdata *u) {
347 size_t old_watermark;
348 pa_usec_t now;
349
350 pa_assert(u);
351 pa_assert(u->use_tsched);
352
353 now = pa_rtclock_now();
354
355 if (u->watermark_dec_not_before <= 0)
356 goto restart;
357
358 if (u->watermark_dec_not_before > now)
359 return;
360
361 old_watermark = u->tsched_watermark;
362
363 if (u->tsched_watermark < u->watermark_dec_step)
364 u->tsched_watermark = u->tsched_watermark / 2;
365 else
366 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
367
368 fix_tsched_watermark(u);
369
370 if (old_watermark != u->tsched_watermark)
371 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
372 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
373
374 /* We don't change the latency range*/
375
376 restart:
377 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
378 }
379
380 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
381 pa_usec_t usec, wm;
382
383 pa_assert(sleep_usec);
384 pa_assert(process_usec);
385
386 pa_assert(u);
387 pa_assert(u->use_tsched);
388
389 usec = pa_sink_get_requested_latency_within_thread(u->sink);
390
391 if (usec == (pa_usec_t) -1)
392 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
393
394 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
395
396 if (wm > usec)
397 wm = usec/2;
398
399 *sleep_usec = usec - wm;
400 *process_usec = wm;
401
402 #ifdef DEBUG_TIMING
403 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
404 (unsigned long) (usec / PA_USEC_PER_MSEC),
405 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
406 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
407 #endif
408 }
409
410 static int try_recover(struct userdata *u, const char *call, int err) {
411 pa_assert(u);
412 pa_assert(call);
413 pa_assert(err < 0);
414
415 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
416
417 pa_assert(err != -EAGAIN);
418
419 if (err == -EPIPE)
420 pa_log_debug("%s: Buffer underrun!", call);
421
422 if (err == -ESTRPIPE)
423 pa_log_debug("%s: System suspended!", call);
424
425 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
426 pa_log("%s: %s", call, pa_alsa_strerror(err));
427 return -1;
428 }
429
430 u->first = TRUE;
431 u->since_start = 0;
432 return 0;
433 }
434
435 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
436 size_t left_to_play;
437 pa_bool_t underrun = FALSE;
438
439 /* We use <= instead of < for this check here because an underrun
440 * only happens after the last sample was processed, not already when
441 * it is removed from the buffer. This is particularly important
442 * when block transfer is used. */
443
444 if (n_bytes <= u->hwbuf_size)
445 left_to_play = u->hwbuf_size - n_bytes;
446 else {
447
448 /* We got a dropout. What a mess! */
449 left_to_play = 0;
450 underrun = TRUE;
451
452 #ifdef DEBUG_TIMING
453 PA_DEBUG_TRAP;
454 #endif
455
456 if (!u->first && !u->after_rewind)
457 if (pa_log_ratelimit(PA_LOG_INFO))
458 pa_log_info("Underrun!");
459 }
460
461 #ifdef DEBUG_TIMING
462 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
463 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
464 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
465 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
466 #endif
467
468 if (u->use_tsched) {
469 pa_bool_t reset_not_before = TRUE;
470
471 if (!u->first && !u->after_rewind) {
472 if (underrun || left_to_play < u->watermark_inc_threshold)
473 increase_watermark(u);
474 else if (left_to_play > u->watermark_dec_threshold) {
475 reset_not_before = FALSE;
476
477 /* We decrease the watermark only if have actually
478 * been woken up by a timeout. If something else woke
479 * us up it's too easy to fulfill the deadlines... */
480
481 if (on_timeout)
482 decrease_watermark(u);
483 }
484 }
485
486 if (reset_not_before)
487 u->watermark_dec_not_before = 0;
488 }
489
490 return left_to_play;
491 }
492
493 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
494 pa_bool_t work_done = FALSE;
495 pa_usec_t max_sleep_usec = 0, process_usec = 0;
496 size_t left_to_play;
497 unsigned j = 0;
498
499 pa_assert(u);
500 pa_sink_assert_ref(u->sink);
501
502 if (u->use_tsched)
503 hw_sleep_time(u, &max_sleep_usec, &process_usec);
504
505 for (;;) {
506 snd_pcm_sframes_t n;
507 size_t n_bytes;
508 int r;
509 pa_bool_t after_avail = TRUE;
510
511 /* First we determine how many samples are missing to fill the
512 * buffer up to 100% */
513
514 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
515
516 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
517 continue;
518
519 return r;
520 }
521
522 n_bytes = (size_t) n * u->frame_size;
523
524 #ifdef DEBUG_TIMING
525 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
526 #endif
527
528 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
529 on_timeout = FALSE;
530
531 if (u->use_tsched)
532
533 /* We won't fill up the playback buffer before at least
534 * half the sleep time is over because otherwise we might
535 * ask for more data from the clients then they expect. We
536 * need to guarantee that clients only have to keep around
537 * a single hw buffer length. */
538
539 if (!polled &&
540 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
541 #ifdef DEBUG_TIMING
542 pa_log_debug("Not filling up, because too early.");
543 #endif
544 break;
545 }
546
547 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
548
549 if (polled)
550 PA_ONCE_BEGIN {
551 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
552 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
553 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
554 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
555 pa_strnull(dn));
556 pa_xfree(dn);
557 } PA_ONCE_END;
558
559 #ifdef DEBUG_TIMING
560 pa_log_debug("Not filling up, because not necessary.");
561 #endif
562 break;
563 }
564
565
566 if (++j > 10) {
567 #ifdef DEBUG_TIMING
568 pa_log_debug("Not filling up, because already too many iterations.");
569 #endif
570
571 break;
572 }
573
574 n_bytes -= u->hwbuf_unused;
575 polled = FALSE;
576
577 #ifdef DEBUG_TIMING
578 pa_log_debug("Filling up");
579 #endif
580
581 for (;;) {
582 pa_memchunk chunk;
583 void *p;
584 int err;
585 const snd_pcm_channel_area_t *areas;
586 snd_pcm_uframes_t offset, frames;
587 snd_pcm_sframes_t sframes;
588
589 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
590 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
591
592 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
593
594 if (!after_avail && err == -EAGAIN)
595 break;
596
597 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
598 continue;
599
600 return r;
601 }
602
603 /* Make sure that if these memblocks need to be copied they will fit into one slot */
604 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
605 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
606
607 if (!after_avail && frames == 0)
608 break;
609
610 pa_assert(frames > 0);
611 after_avail = FALSE;
612
613 /* Check these are multiples of 8 bit */
614 pa_assert((areas[0].first & 7) == 0);
615 pa_assert((areas[0].step & 7)== 0);
616
617 /* We assume a single interleaved memory buffer */
618 pa_assert((areas[0].first >> 3) == 0);
619 pa_assert((areas[0].step >> 3) == u->frame_size);
620
621 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
622
623 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
624 chunk.length = pa_memblock_get_length(chunk.memblock);
625 chunk.index = 0;
626
627 pa_sink_render_into_full(u->sink, &chunk);
628 pa_memblock_unref_fixed(chunk.memblock);
629
630 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
631
632 if (!after_avail && (int) sframes == -EAGAIN)
633 break;
634
635 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
636 continue;
637
638 return r;
639 }
640
641 work_done = TRUE;
642
643 u->write_count += frames * u->frame_size;
644 u->since_start += frames * u->frame_size;
645
646 #ifdef DEBUG_TIMING
647 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
648 #endif
649
650 if ((size_t) frames * u->frame_size >= n_bytes)
651 break;
652
653 n_bytes -= (size_t) frames * u->frame_size;
654 }
655 }
656
657 if (u->use_tsched) {
658 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
659 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
660
661 if (*sleep_usec > process_usec)
662 *sleep_usec -= process_usec;
663 else
664 *sleep_usec = 0;
665 } else
666 *sleep_usec = 0;
667
668 return work_done ? 1 : 0;
669 }
670
671 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
672 pa_bool_t work_done = FALSE;
673 pa_usec_t max_sleep_usec = 0, process_usec = 0;
674 size_t left_to_play;
675 unsigned j = 0;
676
677 pa_assert(u);
678 pa_sink_assert_ref(u->sink);
679
680 if (u->use_tsched)
681 hw_sleep_time(u, &max_sleep_usec, &process_usec);
682
683 for (;;) {
684 snd_pcm_sframes_t n;
685 size_t n_bytes;
686 int r;
687 pa_bool_t after_avail = TRUE;
688
689 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
690
691 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
692 continue;
693
694 return r;
695 }
696
697 n_bytes = (size_t) n * u->frame_size;
698 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
699 on_timeout = FALSE;
700
701 if (u->use_tsched)
702
703 /* We won't fill up the playback buffer before at least
704 * half the sleep time is over because otherwise we might
705 * ask for more data from the clients then they expect. We
706 * need to guarantee that clients only have to keep around
707 * a single hw buffer length. */
708
709 if (!polled &&
710 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
711 break;
712
713 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
714
715 if (polled)
716 PA_ONCE_BEGIN {
717 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
718 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
719 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
720 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
721 pa_strnull(dn));
722 pa_xfree(dn);
723 } PA_ONCE_END;
724
725 break;
726 }
727
728 if (++j > 10) {
729 #ifdef DEBUG_TIMING
730 pa_log_debug("Not filling up, because already too many iterations.");
731 #endif
732
733 break;
734 }
735
736 n_bytes -= u->hwbuf_unused;
737 polled = FALSE;
738
739 for (;;) {
740 snd_pcm_sframes_t frames;
741 void *p;
742
743 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
744
745 if (u->memchunk.length <= 0)
746 pa_sink_render(u->sink, n_bytes, &u->memchunk);
747
748 pa_assert(u->memchunk.length > 0);
749
750 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
751
752 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
753 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
754
755 p = pa_memblock_acquire(u->memchunk.memblock);
756 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
757 pa_memblock_release(u->memchunk.memblock);
758
759 if (PA_UNLIKELY(frames < 0)) {
760
761 if (!after_avail && (int) frames == -EAGAIN)
762 break;
763
764 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
765 continue;
766
767 return r;
768 }
769
770 if (!after_avail && frames == 0)
771 break;
772
773 pa_assert(frames > 0);
774 after_avail = FALSE;
775
776 u->memchunk.index += (size_t) frames * u->frame_size;
777 u->memchunk.length -= (size_t) frames * u->frame_size;
778
779 if (u->memchunk.length <= 0) {
780 pa_memblock_unref(u->memchunk.memblock);
781 pa_memchunk_reset(&u->memchunk);
782 }
783
784 work_done = TRUE;
785
786 u->write_count += frames * u->frame_size;
787 u->since_start += frames * u->frame_size;
788
789 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
790
791 if ((size_t) frames * u->frame_size >= n_bytes)
792 break;
793
794 n_bytes -= (size_t) frames * u->frame_size;
795 }
796 }
797
798 if (u->use_tsched) {
799 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
800 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
801
802 if (*sleep_usec > process_usec)
803 *sleep_usec -= process_usec;
804 else
805 *sleep_usec = 0;
806 } else
807 *sleep_usec = 0;
808
809 return work_done ? 1 : 0;
810 }
811
812 static void update_smoother(struct userdata *u) {
813 snd_pcm_sframes_t delay = 0;
814 int64_t position;
815 int err;
816 pa_usec_t now1 = 0, now2;
817 snd_pcm_status_t *status;
818
819 snd_pcm_status_alloca(&status);
820
821 pa_assert(u);
822 pa_assert(u->pcm_handle);
823
824 /* Let's update the time smoother */
825
826 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
827 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
828 return;
829 }
830
831 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
832 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
833 else {
834 snd_htimestamp_t htstamp = { 0, 0 };
835 snd_pcm_status_get_htstamp(status, &htstamp);
836 now1 = pa_timespec_load(&htstamp);
837 }
838
839 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
840 if (now1 <= 0)
841 now1 = pa_rtclock_now();
842
843 /* check if the time since the last update is bigger than the interval */
844 if (u->last_smoother_update > 0)
845 if (u->last_smoother_update + u->smoother_interval > now1)
846 return;
847
848 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
849
850 if (PA_UNLIKELY(position < 0))
851 position = 0;
852
853 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
854
855 pa_smoother_put(u->smoother, now1, now2);
856
857 u->last_smoother_update = now1;
858 /* exponentially increase the update interval up to the MAX limit */
859 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
860 }
861
862 static pa_usec_t sink_get_latency(struct userdata *u) {
863 pa_usec_t r;
864 int64_t delay;
865 pa_usec_t now1, now2;
866
867 pa_assert(u);
868
869 now1 = pa_rtclock_now();
870 now2 = pa_smoother_get(u->smoother, now1);
871
872 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
873
874 r = delay >= 0 ? (pa_usec_t) delay : 0;
875
876 if (u->memchunk.memblock)
877 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
878
879 return r;
880 }
881
882 static int build_pollfd(struct userdata *u) {
883 pa_assert(u);
884 pa_assert(u->pcm_handle);
885
886 if (u->alsa_rtpoll_item)
887 pa_rtpoll_item_free(u->alsa_rtpoll_item);
888
889 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
890 return -1;
891
892 return 0;
893 }
894
895 /* Called from IO context */
896 static int suspend(struct userdata *u) {
897 pa_assert(u);
898 pa_assert(u->pcm_handle);
899
900 pa_smoother_pause(u->smoother, pa_rtclock_now());
901
902 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
903 * take awfully long with our long buffer sizes today. */
904 snd_pcm_close(u->pcm_handle);
905 u->pcm_handle = NULL;
906
907 if (u->alsa_rtpoll_item) {
908 pa_rtpoll_item_free(u->alsa_rtpoll_item);
909 u->alsa_rtpoll_item = NULL;
910 }
911
912 /* We reset max_rewind/max_request here to make sure that while we
913 * are suspended the old max_request/max_rewind values set before
914 * the suspend can influence the per-stream buffer of newly
915 * created streams, without their requirements having any
916 * influence on them. */
917 pa_sink_set_max_rewind_within_thread(u->sink, 0);
918 pa_sink_set_max_request_within_thread(u->sink, 0);
919
920 pa_log_info("Device suspended...");
921
922 return 0;
923 }
924
925 /* Called from IO context */
926 static int update_sw_params(struct userdata *u) {
927 snd_pcm_uframes_t avail_min;
928 int err;
929
930 pa_assert(u);
931
932 /* Use the full buffer if noone asked us for anything specific */
933 u->hwbuf_unused = 0;
934
935 if (u->use_tsched) {
936 pa_usec_t latency;
937
938 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
939 size_t b;
940
941 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
942
943 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
944
945 /* We need at least one sample in our buffer */
946
947 if (PA_UNLIKELY(b < u->frame_size))
948 b = u->frame_size;
949
950 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
951 }
952
953 fix_min_sleep_wakeup(u);
954 fix_tsched_watermark(u);
955 }
956
957 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
958
959 /* We need at last one frame in the used part of the buffer */
960 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
961
962 if (u->use_tsched) {
963 pa_usec_t sleep_usec, process_usec;
964
965 hw_sleep_time(u, &sleep_usec, &process_usec);
966 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
967 }
968
969 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
970
971 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
972 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
973 return err;
974 }
975
976 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
977 if (pa_alsa_pcm_is_hw(u->pcm_handle))
978 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
979 else {
980 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
981 pa_sink_set_max_rewind_within_thread(u->sink, 0);
982 }
983
984 return 0;
985 }
986
987 /* Called from IO context */
988 static int unsuspend(struct userdata *u) {
989 pa_sample_spec ss;
990 int err;
991 pa_bool_t b, d;
992 snd_pcm_uframes_t period_size, buffer_size;
993
994 pa_assert(u);
995 pa_assert(!u->pcm_handle);
996
997 pa_log_info("Trying resume...");
998
999 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
1000 SND_PCM_NONBLOCK|
1001 SND_PCM_NO_AUTO_RESAMPLE|
1002 SND_PCM_NO_AUTO_CHANNELS|
1003 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1004 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1005 goto fail;
1006 }
1007
1008 ss = u->sink->sample_spec;
1009 period_size = u->fragment_size / u->frame_size;
1010 buffer_size = u->hwbuf_size / u->frame_size;
1011 b = u->use_mmap;
1012 d = u->use_tsched;
1013
1014 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1015 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1016 goto fail;
1017 }
1018
1019 if (b != u->use_mmap || d != u->use_tsched) {
1020 pa_log_warn("Resume failed, couldn't get original access mode.");
1021 goto fail;
1022 }
1023
1024 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1025 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1026 goto fail;
1027 }
1028
1029 if (period_size*u->frame_size != u->fragment_size ||
1030 buffer_size*u->frame_size != u->hwbuf_size) {
1031 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1032 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1033 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1034 goto fail;
1035 }
1036
1037 if (update_sw_params(u) < 0)
1038 goto fail;
1039
1040 if (build_pollfd(u) < 0)
1041 goto fail;
1042
1043 u->write_count = 0;
1044 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1045 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1046 u->last_smoother_update = 0;
1047
1048 u->first = TRUE;
1049 u->since_start = 0;
1050
1051 pa_log_info("Resumed successfully...");
1052
1053 return 0;
1054
1055 fail:
1056 if (u->pcm_handle) {
1057 snd_pcm_close(u->pcm_handle);
1058 u->pcm_handle = NULL;
1059 }
1060
1061 return -PA_ERR_IO;
1062 }
1063
1064 /* Called from IO context */
1065 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1066 struct userdata *u = PA_SINK(o)->userdata;
1067
1068 switch (code) {
1069
1070 case PA_SINK_MESSAGE_FINISH_MOVE:
1071 case PA_SINK_MESSAGE_ADD_INPUT: {
1072 pa_sink_input *i = PA_SINK_INPUT(data);
1073 int r = 0;
1074
1075 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1076 break;
1077
1078 u->old_rate = u->sink->sample_spec.rate;
1079
1080 /* Passthrough format, see if we need to reset sink sample rate */
1081 if (u->sink->sample_spec.rate == i->thread_info.sample_spec.rate)
1082 break;
1083
1084 /* .. we do */
1085 if ((r = suspend(u)) < 0)
1086 return r;
1087
1088 u->sink->sample_spec.rate = i->thread_info.sample_spec.rate;
1089
1090 if ((r = unsuspend(u)) < 0)
1091 return r;
1092
1093 break;
1094 }
1095
1096 case PA_SINK_MESSAGE_START_MOVE:
1097 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1098 pa_sink_input *i = PA_SINK_INPUT(data);
1099 int r = 0;
1100
1101 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1102 break;
1103
1104 /* Passthrough format, see if we need to reset sink sample rate */
1105 if (u->sink->sample_spec.rate == u->old_rate)
1106 break;
1107
1108 /* .. we do */
1109 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && ((r = suspend(u)) < 0))
1110 return r;
1111
1112 u->sink->sample_spec.rate = u->old_rate;
1113
1114 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && ((r = unsuspend(u)) < 0))
1115 return r;
1116
1117 break;
1118 }
1119
1120 case PA_SINK_MESSAGE_GET_LATENCY: {
1121 pa_usec_t r = 0;
1122
1123 if (u->pcm_handle)
1124 r = sink_get_latency(u);
1125
1126 *((pa_usec_t*) data) = r;
1127
1128 return 0;
1129 }
1130
1131 case PA_SINK_MESSAGE_SET_STATE:
1132
1133 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1134
1135 case PA_SINK_SUSPENDED: {
1136 int r;
1137
1138 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1139
1140 if ((r = suspend(u)) < 0)
1141 return r;
1142
1143 break;
1144 }
1145
1146 case PA_SINK_IDLE:
1147 case PA_SINK_RUNNING: {
1148 int r;
1149
1150 if (u->sink->thread_info.state == PA_SINK_INIT) {
1151 if (build_pollfd(u) < 0)
1152 return -PA_ERR_IO;
1153 }
1154
1155 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1156 if ((r = unsuspend(u)) < 0)
1157 return r;
1158 }
1159
1160 break;
1161 }
1162
1163 case PA_SINK_UNLINKED:
1164 case PA_SINK_INIT:
1165 case PA_SINK_INVALID_STATE:
1166 ;
1167 }
1168
1169 break;
1170 }
1171
1172 return pa_sink_process_msg(o, code, data, offset, chunk);
1173 }
1174
1175 /* Called from main context */
1176 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1177 pa_sink_state_t old_state;
1178 struct userdata *u;
1179
1180 pa_sink_assert_ref(s);
1181 pa_assert_se(u = s->userdata);
1182
1183 old_state = pa_sink_get_state(u->sink);
1184
1185 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1186 reserve_done(u);
1187 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1188 if (reserve_init(u, u->device_name) < 0)
1189 return -PA_ERR_BUSY;
1190
1191 return 0;
1192 }
1193
1194 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1195 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1196
1197 pa_assert(u);
1198 pa_assert(u->mixer_handle);
1199
1200 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1201 return 0;
1202
1203 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1204 return 0;
1205
1206 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1207 pa_sink_get_volume(u->sink, TRUE);
1208 pa_sink_get_mute(u->sink, TRUE);
1209 }
1210
1211 return 0;
1212 }
1213
1214 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1215 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1216
1217 pa_assert(u);
1218 pa_assert(u->mixer_handle);
1219
1220 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1221 return 0;
1222
1223 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1224 return 0;
1225
1226 if (mask & SND_CTL_EVENT_MASK_VALUE)
1227 pa_sink_update_volume_and_mute(u->sink);
1228
1229 return 0;
1230 }
1231
1232 static void sink_get_volume_cb(pa_sink *s) {
1233 struct userdata *u = s->userdata;
1234 pa_cvolume r;
1235 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1236
1237 pa_assert(u);
1238 pa_assert(u->mixer_path);
1239 pa_assert(u->mixer_handle);
1240
1241 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1242 return;
1243
1244 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1245 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1246
1247 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1248
1249 if (u->mixer_path->has_dB) {
1250 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1251
1252 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1253 }
1254
1255 if (pa_cvolume_equal(&u->hardware_volume, &r))
1256 return;
1257
1258 s->real_volume = u->hardware_volume = r;
1259
1260 /* Hmm, so the hardware volume changed, let's reset our software volume */
1261 if (u->mixer_path->has_dB)
1262 pa_sink_set_soft_volume(s, NULL);
1263 }
1264
1265 static void sink_set_volume_cb(pa_sink *s) {
1266 struct userdata *u = s->userdata;
1267 pa_cvolume r;
1268 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1269 pa_bool_t sync_volume = !!(s->flags & PA_SINK_SYNC_VOLUME);
1270
1271 pa_assert(u);
1272 pa_assert(u->mixer_path);
1273 pa_assert(u->mixer_handle);
1274
1275 /* Shift up by the base volume */
1276 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1277
1278 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, sync_volume, !sync_volume) < 0)
1279 return;
1280
1281 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1282 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1283
1284 u->hardware_volume = r;
1285
1286 if (u->mixer_path->has_dB) {
1287 pa_cvolume new_soft_volume;
1288 pa_bool_t accurate_enough;
1289 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1290
1291 /* Match exactly what the user requested by software */
1292 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1293
1294 /* If the adjustment to do in software is only minimal we
1295 * can skip it. That saves us CPU at the expense of a bit of
1296 * accuracy */
1297 accurate_enough =
1298 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1299 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1300
1301 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1302 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1303 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1304 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1305 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1306 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1307 pa_yes_no(accurate_enough));
1308 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1309
1310 if (!accurate_enough)
1311 s->soft_volume = new_soft_volume;
1312
1313 } else {
1314 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1315
1316 /* We can't match exactly what the user requested, hence let's
1317 * at least tell the user about it */
1318
1319 s->real_volume = r;
1320 }
1321 }
1322
1323 static void sink_write_volume_cb(pa_sink *s) {
1324 struct userdata *u = s->userdata;
1325 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1326
1327 pa_assert(u);
1328 pa_assert(u->mixer_path);
1329 pa_assert(u->mixer_handle);
1330 pa_assert(s->flags & PA_SINK_SYNC_VOLUME);
1331
1332 /* Shift up by the base volume */
1333 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1334
1335 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1336 pa_log_error("Writing HW volume failed");
1337 else {
1338 pa_cvolume tmp_vol;
1339 pa_bool_t accurate_enough;
1340
1341 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1342 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1343
1344 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1345 accurate_enough =
1346 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1347 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1348
1349 if (!accurate_enough) {
1350 union {
1351 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1352 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1353 } vol;
1354
1355 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1356 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1357 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1358 pa_log_debug(" in dB: %s (request) != %s",
1359 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1360 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1361 }
1362 }
1363 }
1364
1365 static void sink_get_mute_cb(pa_sink *s) {
1366 struct userdata *u = s->userdata;
1367 pa_bool_t b;
1368
1369 pa_assert(u);
1370 pa_assert(u->mixer_path);
1371 pa_assert(u->mixer_handle);
1372
1373 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1374 return;
1375
1376 s->muted = b;
1377 }
1378
1379 static void sink_set_mute_cb(pa_sink *s) {
1380 struct userdata *u = s->userdata;
1381
1382 pa_assert(u);
1383 pa_assert(u->mixer_path);
1384 pa_assert(u->mixer_handle);
1385
1386 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1387 }
1388
1389 static void mixer_volume_init(struct userdata *u) {
1390 pa_assert(u);
1391
1392 if (!u->mixer_path->has_volume) {
1393 pa_sink_set_write_volume_callback(u->sink, NULL);
1394 pa_sink_set_get_volume_callback(u->sink, NULL);
1395 pa_sink_set_set_volume_callback(u->sink, NULL);
1396
1397 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1398 } else {
1399 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1400 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1401
1402 if (u->mixer_path->has_dB && u->sync_volume) {
1403 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1404 pa_log_info("Successfully enabled synchronous volume.");
1405 } else
1406 pa_sink_set_write_volume_callback(u->sink, NULL);
1407
1408 if (u->mixer_path->has_dB) {
1409 pa_sink_enable_decibel_volume(u->sink, TRUE);
1410 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1411
1412 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1413 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1414
1415 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1416 } else {
1417 pa_sink_enable_decibel_volume(u->sink, FALSE);
1418 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1419
1420 u->sink->base_volume = PA_VOLUME_NORM;
1421 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1422 }
1423
1424 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1425 }
1426
1427 if (!u->mixer_path->has_mute) {
1428 pa_sink_set_get_mute_callback(u->sink, NULL);
1429 pa_sink_set_set_mute_callback(u->sink, NULL);
1430 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1431 } else {
1432 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1433 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1434 pa_log_info("Using hardware mute control.");
1435 }
1436 }
1437
1438 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1439 struct userdata *u = s->userdata;
1440 pa_alsa_port_data *data;
1441
1442 pa_assert(u);
1443 pa_assert(p);
1444 pa_assert(u->mixer_handle);
1445
1446 data = PA_DEVICE_PORT_DATA(p);
1447
1448 pa_assert_se(u->mixer_path = data->path);
1449 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1450
1451 mixer_volume_init(u);
1452
1453 if (data->setting)
1454 pa_alsa_setting_select(data->setting, u->mixer_handle);
1455
1456 if (s->set_mute)
1457 s->set_mute(s);
1458 if (s->set_volume)
1459 s->set_volume(s);
1460
1461 return 0;
1462 }
1463
1464 static void sink_update_requested_latency_cb(pa_sink *s) {
1465 struct userdata *u = s->userdata;
1466 size_t before;
1467 pa_assert(u);
1468 pa_assert(u->use_tsched); /* only when timer scheduling is used
1469 * we can dynamically adjust the
1470 * latency */
1471
1472 if (!u->pcm_handle)
1473 return;
1474
1475 before = u->hwbuf_unused;
1476 update_sw_params(u);
1477
1478 /* Let's check whether we now use only a smaller part of the
1479 buffer then before. If so, we need to make sure that subsequent
1480 rewinds are relative to the new maximum fill level and not to the
1481 current fill level. Thus, let's do a full rewind once, to clear
1482 things up. */
1483
1484 if (u->hwbuf_unused > before) {
1485 pa_log_debug("Requesting rewind due to latency change.");
1486 pa_sink_request_rewind(s, (size_t) -1);
1487 }
1488 }
1489
1490 static pa_idxset* sink_get_formats(pa_sink *s) {
1491 struct userdata *u = s->userdata;
1492 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1493 pa_format_info *f;
1494 uint32_t idx;
1495
1496 pa_assert(u);
1497
1498 PA_IDXSET_FOREACH(f, u->formats, idx) {
1499 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1500 }
1501
1502 return ret;
1503 }
1504
1505 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1506 struct userdata *u = s->userdata;
1507 pa_format_info *f;
1508 uint32_t idx;
1509
1510 pa_assert(u);
1511
1512 /* FIXME: also validate sample rates against what the device supports */
1513 PA_IDXSET_FOREACH(f, formats, idx) {
1514 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1515 /* EAC3 cannot be sent over over S/PDIF */
1516 return FALSE;
1517 }
1518
1519 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1520 u->formats = pa_idxset_new(NULL, NULL);
1521
1522 PA_IDXSET_FOREACH(f, formats, idx) {
1523 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1524 }
1525
1526 return TRUE;
1527 }
1528
1529 static int process_rewind(struct userdata *u) {
1530 snd_pcm_sframes_t unused;
1531 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1532 pa_assert(u);
1533
1534 /* Figure out how much we shall rewind and reset the counter */
1535 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1536
1537 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1538
1539 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1540 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1541 return -1;
1542 }
1543
1544 unused_nbytes = (size_t) unused * u->frame_size;
1545
1546 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1547 unused_nbytes += u->rewind_safeguard;
1548
1549 if (u->hwbuf_size > unused_nbytes)
1550 limit_nbytes = u->hwbuf_size - unused_nbytes;
1551 else
1552 limit_nbytes = 0;
1553
1554 if (rewind_nbytes > limit_nbytes)
1555 rewind_nbytes = limit_nbytes;
1556
1557 if (rewind_nbytes > 0) {
1558 snd_pcm_sframes_t in_frames, out_frames;
1559
1560 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1561
1562 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1563 pa_log_debug("before: %lu", (unsigned long) in_frames);
1564 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1565 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1566 if (try_recover(u, "process_rewind", out_frames) < 0)
1567 return -1;
1568 out_frames = 0;
1569 }
1570
1571 pa_log_debug("after: %lu", (unsigned long) out_frames);
1572
1573 rewind_nbytes = (size_t) out_frames * u->frame_size;
1574
1575 if (rewind_nbytes <= 0)
1576 pa_log_info("Tried rewind, but was apparently not possible.");
1577 else {
1578 u->write_count -= rewind_nbytes;
1579 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1580 pa_sink_process_rewind(u->sink, rewind_nbytes);
1581
1582 u->after_rewind = TRUE;
1583 return 0;
1584 }
1585 } else
1586 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1587
1588 pa_sink_process_rewind(u->sink, 0);
1589 return 0;
1590 }
1591
1592 static void thread_func(void *userdata) {
1593 struct userdata *u = userdata;
1594 unsigned short revents = 0;
1595
1596 pa_assert(u);
1597
1598 pa_log_debug("Thread starting up");
1599
1600 if (u->core->realtime_scheduling)
1601 pa_make_realtime(u->core->realtime_priority);
1602
1603 pa_thread_mq_install(&u->thread_mq);
1604
1605 for (;;) {
1606 int ret;
1607 pa_usec_t rtpoll_sleep = 0;
1608
1609 #ifdef DEBUG_TIMING
1610 pa_log_debug("Loop");
1611 #endif
1612
1613 /* Render some data and write it to the dsp */
1614 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1615 int work_done;
1616 pa_usec_t sleep_usec = 0;
1617 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1618
1619 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1620 if (process_rewind(u) < 0)
1621 goto fail;
1622
1623 if (u->use_mmap)
1624 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1625 else
1626 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1627
1628 if (work_done < 0)
1629 goto fail;
1630
1631 /* pa_log_debug("work_done = %i", work_done); */
1632
1633 if (work_done) {
1634
1635 if (u->first) {
1636 pa_log_info("Starting playback.");
1637 snd_pcm_start(u->pcm_handle);
1638
1639 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1640
1641 u->first = FALSE;
1642 }
1643
1644 update_smoother(u);
1645 }
1646
1647 if (u->use_tsched) {
1648 pa_usec_t cusec;
1649
1650 if (u->since_start <= u->hwbuf_size) {
1651
1652 /* USB devices on ALSA seem to hit a buffer
1653 * underrun during the first iterations much
1654 * quicker then we calculate here, probably due to
1655 * the transport latency. To accommodate for that
1656 * we artificially decrease the sleep time until
1657 * we have filled the buffer at least once
1658 * completely.*/
1659
1660 if (pa_log_ratelimit(PA_LOG_DEBUG))
1661 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1662 sleep_usec /= 2;
1663 }
1664
1665 /* OK, the playback buffer is now full, let's
1666 * calculate when to wake up next */
1667 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1668
1669 /* Convert from the sound card time domain to the
1670 * system time domain */
1671 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1672
1673 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1674
1675 /* We don't trust the conversion, so we wake up whatever comes first */
1676 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1677 }
1678
1679 u->after_rewind = FALSE;
1680
1681 }
1682
1683 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1684 pa_usec_t volume_sleep;
1685 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1686 if (volume_sleep > 0)
1687 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1688 }
1689
1690 if (rtpoll_sleep > 0)
1691 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1692 else
1693 pa_rtpoll_set_timer_disabled(u->rtpoll);
1694
1695 /* Hmm, nothing to do. Let's sleep */
1696 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1697 goto fail;
1698
1699 if (u->sink->flags & PA_SINK_SYNC_VOLUME)
1700 pa_sink_volume_change_apply(u->sink, NULL);
1701
1702 if (ret == 0)
1703 goto finish;
1704
1705 /* Tell ALSA about this and process its response */
1706 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1707 struct pollfd *pollfd;
1708 int err;
1709 unsigned n;
1710
1711 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1712
1713 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1714 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1715 goto fail;
1716 }
1717
1718 if (revents & ~POLLOUT) {
1719 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1720 goto fail;
1721
1722 u->first = TRUE;
1723 u->since_start = 0;
1724 revents = 0;
1725 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1726 pa_log_debug("Wakeup from ALSA!");
1727
1728 } else
1729 revents = 0;
1730 }
1731
1732 fail:
1733 /* If this was no regular exit from the loop we have to continue
1734 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1735 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1736 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1737
1738 finish:
1739 pa_log_debug("Thread shutting down");
1740 }
1741
1742 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1743 const char *n;
1744 char *t;
1745
1746 pa_assert(data);
1747 pa_assert(ma);
1748 pa_assert(device_name);
1749
1750 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1751 pa_sink_new_data_set_name(data, n);
1752 data->namereg_fail = TRUE;
1753 return;
1754 }
1755
1756 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1757 data->namereg_fail = TRUE;
1758 else {
1759 n = device_id ? device_id : device_name;
1760 data->namereg_fail = FALSE;
1761 }
1762
1763 if (mapping)
1764 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1765 else
1766 t = pa_sprintf_malloc("alsa_output.%s", n);
1767
1768 pa_sink_new_data_set_name(data, t);
1769 pa_xfree(t);
1770 }
1771
1772 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1773
1774 if (!mapping && !element)
1775 return;
1776
1777 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1778 pa_log_info("Failed to find a working mixer device.");
1779 return;
1780 }
1781
1782 if (element) {
1783
1784 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1785 goto fail;
1786
1787 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1788 goto fail;
1789
1790 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1791 pa_alsa_path_dump(u->mixer_path);
1792 } else {
1793
1794 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1795 goto fail;
1796
1797 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1798 }
1799
1800 return;
1801
1802 fail:
1803
1804 if (u->mixer_path_set) {
1805 pa_alsa_path_set_free(u->mixer_path_set);
1806 u->mixer_path_set = NULL;
1807 } else if (u->mixer_path) {
1808 pa_alsa_path_free(u->mixer_path);
1809 u->mixer_path = NULL;
1810 }
1811
1812 if (u->mixer_handle) {
1813 snd_mixer_close(u->mixer_handle);
1814 u->mixer_handle = NULL;
1815 }
1816 }
1817
1818
1819 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1820 pa_bool_t need_mixer_callback = FALSE;
1821
1822 pa_assert(u);
1823
1824 if (!u->mixer_handle)
1825 return 0;
1826
1827 if (u->sink->active_port) {
1828 pa_alsa_port_data *data;
1829
1830 /* We have a list of supported paths, so let's activate the
1831 * one that has been chosen as active */
1832
1833 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1834 u->mixer_path = data->path;
1835
1836 pa_alsa_path_select(data->path, u->mixer_handle);
1837
1838 if (data->setting)
1839 pa_alsa_setting_select(data->setting, u->mixer_handle);
1840
1841 } else {
1842
1843 if (!u->mixer_path && u->mixer_path_set)
1844 u->mixer_path = u->mixer_path_set->paths;
1845
1846 if (u->mixer_path) {
1847 /* Hmm, we have only a single path, then let's activate it */
1848
1849 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1850
1851 if (u->mixer_path->settings)
1852 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1853 } else
1854 return 0;
1855 }
1856
1857 mixer_volume_init(u);
1858
1859 /* Will we need to register callbacks? */
1860 if (u->mixer_path_set && u->mixer_path_set->paths) {
1861 pa_alsa_path *p;
1862
1863 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1864 if (p->has_volume || p->has_mute)
1865 need_mixer_callback = TRUE;
1866 }
1867 }
1868 else if (u->mixer_path)
1869 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1870
1871 if (need_mixer_callback) {
1872 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1873 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1874 u->mixer_pd = pa_alsa_mixer_pdata_new();
1875 mixer_callback = io_mixer_callback;
1876
1877 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1878 pa_log("Failed to initialize file descriptor monitoring");
1879 return -1;
1880 }
1881 } else {
1882 u->mixer_fdl = pa_alsa_fdlist_new();
1883 mixer_callback = ctl_mixer_callback;
1884
1885 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1886 pa_log("Failed to initialize file descriptor monitoring");
1887 return -1;
1888 }
1889 }
1890
1891 if (u->mixer_path_set)
1892 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1893 else
1894 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1895 }
1896
1897 return 0;
1898 }
1899
1900 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1901
1902 struct userdata *u = NULL;
1903 const char *dev_id = NULL;
1904 pa_sample_spec ss, requested_ss;
1905 pa_channel_map map;
1906 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1907 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1908 size_t frame_size;
1909 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE, set_formats = FALSE;
1910 pa_sink_new_data data;
1911 pa_alsa_profile_set *profile_set = NULL;
1912
1913 pa_assert(m);
1914 pa_assert(ma);
1915
1916 ss = m->core->default_sample_spec;
1917 map = m->core->default_channel_map;
1918 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1919 pa_log("Failed to parse sample specification and channel map");
1920 goto fail;
1921 }
1922
1923 requested_ss = ss;
1924 frame_size = pa_frame_size(&ss);
1925
1926 nfrags = m->core->default_n_fragments;
1927 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1928 if (frag_size <= 0)
1929 frag_size = (uint32_t) frame_size;
1930 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1931 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1932
1933 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1934 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1935 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1936 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1937 pa_log("Failed to parse buffer metrics");
1938 goto fail;
1939 }
1940
1941 buffer_size = nfrags * frag_size;
1942
1943 period_frames = frag_size/frame_size;
1944 buffer_frames = buffer_size/frame_size;
1945 tsched_frames = tsched_size/frame_size;
1946
1947 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1948 pa_log("Failed to parse mmap argument.");
1949 goto fail;
1950 }
1951
1952 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1953 pa_log("Failed to parse tsched argument.");
1954 goto fail;
1955 }
1956
1957 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1958 pa_log("Failed to parse ignore_dB argument.");
1959 goto fail;
1960 }
1961
1962 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
1963 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1964 pa_log("Failed to parse rewind_safeguard argument");
1965 goto fail;
1966 }
1967
1968 sync_volume = m->core->sync_volume;
1969 if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1970 pa_log("Failed to parse sync_volume argument.");
1971 goto fail;
1972 }
1973
1974 use_tsched = pa_alsa_may_tsched(use_tsched);
1975
1976 u = pa_xnew0(struct userdata, 1);
1977 u->core = m->core;
1978 u->module = m;
1979 u->use_mmap = use_mmap;
1980 u->use_tsched = use_tsched;
1981 u->sync_volume = sync_volume;
1982 u->first = TRUE;
1983 u->rewind_safeguard = rewind_safeguard;
1984 u->rtpoll = pa_rtpoll_new();
1985 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1986
1987 u->smoother = pa_smoother_new(
1988 SMOOTHER_ADJUST_USEC,
1989 SMOOTHER_WINDOW_USEC,
1990 TRUE,
1991 TRUE,
1992 5,
1993 pa_rtclock_now(),
1994 TRUE);
1995 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1996
1997 dev_id = pa_modargs_get_value(
1998 ma, "device_id",
1999 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2000
2001 if (reserve_init(u, dev_id) < 0)
2002 goto fail;
2003
2004 if (reserve_monitor_init(u, dev_id) < 0)
2005 goto fail;
2006
2007 b = use_mmap;
2008 d = use_tsched;
2009
2010 if (mapping) {
2011
2012 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2013 pa_log("device_id= not set");
2014 goto fail;
2015 }
2016
2017 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2018 dev_id,
2019 &u->device_name,
2020 &ss, &map,
2021 SND_PCM_STREAM_PLAYBACK,
2022 &period_frames, &buffer_frames, tsched_frames,
2023 &b, &d, mapping)))
2024 goto fail;
2025
2026 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2027
2028 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2029 goto fail;
2030
2031 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2032 dev_id,
2033 &u->device_name,
2034 &ss, &map,
2035 SND_PCM_STREAM_PLAYBACK,
2036 &period_frames, &buffer_frames, tsched_frames,
2037 &b, &d, profile_set, &mapping)))
2038 goto fail;
2039
2040 } else {
2041
2042 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2043 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2044 &u->device_name,
2045 &ss, &map,
2046 SND_PCM_STREAM_PLAYBACK,
2047 &period_frames, &buffer_frames, tsched_frames,
2048 &b, &d, FALSE)))
2049 goto fail;
2050 }
2051
2052 pa_assert(u->device_name);
2053 pa_log_info("Successfully opened device %s.", u->device_name);
2054
2055 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2056 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2057 goto fail;
2058 }
2059
2060 if (mapping)
2061 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2062
2063 if (use_mmap && !b) {
2064 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2065 u->use_mmap = use_mmap = FALSE;
2066 }
2067
2068 if (use_tsched && (!b || !d)) {
2069 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2070 u->use_tsched = use_tsched = FALSE;
2071 }
2072
2073 if (u->use_mmap)
2074 pa_log_info("Successfully enabled mmap() mode.");
2075
2076 if (u->use_tsched)
2077 pa_log_info("Successfully enabled timer-based scheduling mode.");
2078
2079 if (is_iec958(u) || is_hdmi(u))
2080 set_formats = TRUE;
2081
2082 /* ALSA might tweak the sample spec, so recalculate the frame size */
2083 frame_size = pa_frame_size(&ss);
2084
2085 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2086
2087 pa_sink_new_data_init(&data);
2088 data.driver = driver;
2089 data.module = m;
2090 data.card = card;
2091 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2092
2093 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2094 * variable instead of using &data.namereg_fail directly, because
2095 * data.namereg_fail is a bitfield and taking the address of a bitfield
2096 * variable is impossible. */
2097 namereg_fail = data.namereg_fail;
2098 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2099 pa_log("Failed to parse boolean argument namereg_fail.");
2100 pa_sink_new_data_done(&data);
2101 goto fail;
2102 }
2103 data.namereg_fail = namereg_fail;
2104
2105 pa_sink_new_data_set_sample_spec(&data, &ss);
2106 pa_sink_new_data_set_channel_map(&data, &map);
2107
2108 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2109 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2110 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2111 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2112 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2113
2114 if (mapping) {
2115 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2116 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2117 }
2118
2119 pa_alsa_init_description(data.proplist);
2120
2121 if (u->control_device)
2122 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2123
2124 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2125 pa_log("Invalid properties");
2126 pa_sink_new_data_done(&data);
2127 goto fail;
2128 }
2129
2130 if (u->mixer_path_set)
2131 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2132
2133 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2134 (set_formats ? PA_SINK_SET_FORMATS : 0));
2135 pa_sink_new_data_done(&data);
2136
2137 if (!u->sink) {
2138 pa_log("Failed to create sink object");
2139 goto fail;
2140 }
2141
2142 if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
2143 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2144 pa_log("Failed to parse sync_volume_safety_margin parameter");
2145 goto fail;
2146 }
2147
2148 if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
2149 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2150 pa_log("Failed to parse sync_volume_extra_delay parameter");
2151 goto fail;
2152 }
2153
2154 u->sink->parent.process_msg = sink_process_msg;
2155 if (u->use_tsched)
2156 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2157 u->sink->set_state = sink_set_state_cb;
2158 u->sink->set_port = sink_set_port_cb;
2159 u->sink->userdata = u;
2160
2161 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2162 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2163
2164 u->frame_size = frame_size;
2165 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2166 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2167 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2168
2169 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2170 (double) u->hwbuf_size / (double) u->fragment_size,
2171 (long unsigned) u->fragment_size,
2172 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2173 (long unsigned) u->hwbuf_size,
2174 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2175
2176 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2177 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2178 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2179 else {
2180 pa_log_info("Disabling rewind for device %s", u->device_name);
2181 pa_sink_set_max_rewind(u->sink, 0);
2182 }
2183
2184 if (u->use_tsched) {
2185 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
2186
2187 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
2188 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
2189
2190 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
2191 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
2192
2193 fix_min_sleep_wakeup(u);
2194 fix_tsched_watermark(u);
2195
2196 pa_sink_set_latency_range(u->sink,
2197 0,
2198 pa_bytes_to_usec(u->hwbuf_size, &ss));
2199
2200 pa_log_info("Time scheduling watermark is %0.2fms",
2201 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
2202 } else
2203 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2204
2205 reserve_update(u);
2206
2207 if (update_sw_params(u) < 0)
2208 goto fail;
2209
2210 if (setup_mixer(u, ignore_dB) < 0)
2211 goto fail;
2212
2213 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2214
2215 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2216 pa_log("Failed to create thread.");
2217 goto fail;
2218 }
2219
2220 /* Get initial mixer settings */
2221 if (data.volume_is_set) {
2222 if (u->sink->set_volume)
2223 u->sink->set_volume(u->sink);
2224 } else {
2225 if (u->sink->get_volume)
2226 u->sink->get_volume(u->sink);
2227 }
2228
2229 if (data.muted_is_set) {
2230 if (u->sink->set_mute)
2231 u->sink->set_mute(u->sink);
2232 } else {
2233 if (u->sink->get_mute)
2234 u->sink->get_mute(u->sink);
2235 }
2236
2237 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2238 u->sink->write_volume(u->sink);
2239
2240 if (set_formats) {
2241 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2242 pa_format_info *format;
2243
2244 /* To start with, we only support PCM formats. Other formats may be added
2245 * with pa_sink_set_formats().*/
2246 format = pa_format_info_new();
2247 format->encoding = PA_ENCODING_PCM;
2248 u->formats = pa_idxset_new(NULL, NULL);
2249 pa_idxset_put(u->formats, format, NULL);
2250
2251 u->sink->get_formats = sink_get_formats;
2252 u->sink->set_formats = sink_set_formats;
2253 }
2254
2255 pa_sink_put(u->sink);
2256
2257 if (profile_set)
2258 pa_alsa_profile_set_free(profile_set);
2259
2260 return u->sink;
2261
2262 fail:
2263
2264 if (u)
2265 userdata_free(u);
2266
2267 if (profile_set)
2268 pa_alsa_profile_set_free(profile_set);
2269
2270 return NULL;
2271 }
2272
2273 static void userdata_free(struct userdata *u) {
2274 pa_assert(u);
2275
2276 if (u->sink)
2277 pa_sink_unlink(u->sink);
2278
2279 if (u->thread) {
2280 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2281 pa_thread_free(u->thread);
2282 }
2283
2284 pa_thread_mq_done(&u->thread_mq);
2285
2286 if (u->sink)
2287 pa_sink_unref(u->sink);
2288
2289 if (u->memchunk.memblock)
2290 pa_memblock_unref(u->memchunk.memblock);
2291
2292 if (u->mixer_pd)
2293 pa_alsa_mixer_pdata_free(u->mixer_pd);
2294
2295 if (u->alsa_rtpoll_item)
2296 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2297
2298 if (u->rtpoll)
2299 pa_rtpoll_free(u->rtpoll);
2300
2301 if (u->pcm_handle) {
2302 snd_pcm_drop(u->pcm_handle);
2303 snd_pcm_close(u->pcm_handle);
2304 }
2305
2306 if (u->mixer_fdl)
2307 pa_alsa_fdlist_free(u->mixer_fdl);
2308
2309 if (u->mixer_path_set)
2310 pa_alsa_path_set_free(u->mixer_path_set);
2311 else if (u->mixer_path)
2312 pa_alsa_path_free(u->mixer_path);
2313
2314 if (u->mixer_handle)
2315 snd_mixer_close(u->mixer_handle);
2316
2317 if (u->smoother)
2318 pa_smoother_free(u->smoother);
2319
2320 if (u->formats)
2321 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2322
2323 reserve_done(u);
2324 monitor_done(u);
2325
2326 pa_xfree(u->device_name);
2327 pa_xfree(u->control_device);
2328 pa_xfree(u);
2329 }
2330
2331 void pa_alsa_sink_free(pa_sink *s) {
2332 struct userdata *u;
2333
2334 pa_sink_assert_ref(s);
2335 pa_assert_se(u = s->userdata);
2336
2337 userdata_free(u);
2338 }