]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Move i18n.[ch] to src/pulsecore
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39
40 #include <pulsecore/core.h>
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/thread-mq.h>
53 #include <pulsecore/rtpoll.h>
54 #include <pulsecore/time-smoother.h>
55
56 #include <modules/reserve-wrap.h>
57
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
60
61 /* #define DEBUG_TIMING */
62
63 #define DEFAULT_DEVICE "default"
64
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
67
68 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
69 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
70 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
71 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
72 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
73
74 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
75 * will increase the watermark only if we hit a real underrun. */
76
77 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
78 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
79
80 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
81 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
82
83 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
84 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
85
86 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
87
88 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
89 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
90
91 struct userdata {
92 pa_core *core;
93 pa_module *module;
94 pa_sink *sink;
95
96 pa_thread *thread;
97 pa_thread_mq thread_mq;
98 pa_rtpoll *rtpoll;
99
100 snd_pcm_t *pcm_handle;
101
102 pa_alsa_fdlist *mixer_fdl;
103 pa_alsa_mixer_pdata *mixer_pd;
104 snd_mixer_t *mixer_handle;
105 pa_alsa_path_set *mixer_path_set;
106 pa_alsa_path *mixer_path;
107
108 pa_cvolume hardware_volume;
109
110 uint32_t old_rate;
111
112 size_t
113 frame_size,
114 fragment_size,
115 hwbuf_size,
116 tsched_watermark,
117 hwbuf_unused,
118 min_sleep,
119 min_wakeup,
120 watermark_inc_step,
121 watermark_dec_step,
122 watermark_inc_threshold,
123 watermark_dec_threshold,
124 rewind_safeguard;
125
126 pa_usec_t watermark_dec_not_before;
127
128 pa_memchunk memchunk;
129
130 char *device_name; /* name of the PCM device */
131 char *control_device; /* name of the control device */
132
133 pa_bool_t use_mmap:1, use_tsched:1, sync_volume:1;
134
135 pa_bool_t first, after_rewind;
136
137 pa_rtpoll_item *alsa_rtpoll_item;
138
139 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
140
141 pa_smoother *smoother;
142 uint64_t write_count;
143 uint64_t since_start;
144 pa_usec_t smoother_interval;
145 pa_usec_t last_smoother_update;
146
147 pa_reserve_wrapper *reserve;
148 pa_hook_slot *reserve_slot;
149 pa_reserve_monitor_wrapper *monitor;
150 pa_hook_slot *monitor_slot;
151 };
152
153 static void userdata_free(struct userdata *u);
154
155 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
156 pa_assert(r);
157 pa_assert(u);
158
159 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
160 return PA_HOOK_CANCEL;
161
162 return PA_HOOK_OK;
163 }
164
165 static void reserve_done(struct userdata *u) {
166 pa_assert(u);
167
168 if (u->reserve_slot) {
169 pa_hook_slot_free(u->reserve_slot);
170 u->reserve_slot = NULL;
171 }
172
173 if (u->reserve) {
174 pa_reserve_wrapper_unref(u->reserve);
175 u->reserve = NULL;
176 }
177 }
178
179 static void reserve_update(struct userdata *u) {
180 const char *description;
181 pa_assert(u);
182
183 if (!u->sink || !u->reserve)
184 return;
185
186 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
187 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
188 }
189
190 static int reserve_init(struct userdata *u, const char *dname) {
191 char *rname;
192
193 pa_assert(u);
194 pa_assert(dname);
195
196 if (u->reserve)
197 return 0;
198
199 if (pa_in_system_mode())
200 return 0;
201
202 if (!(rname = pa_alsa_get_reserve_name(dname)))
203 return 0;
204
205 /* We are resuming, try to lock the device */
206 u->reserve = pa_reserve_wrapper_get(u->core, rname);
207 pa_xfree(rname);
208
209 if (!(u->reserve))
210 return -1;
211
212 reserve_update(u);
213
214 pa_assert(!u->reserve_slot);
215 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
216
217 return 0;
218 }
219
220 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
221 pa_bool_t b;
222
223 pa_assert(w);
224 pa_assert(u);
225
226 b = PA_PTR_TO_UINT(busy) && !u->reserve;
227
228 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
229 return PA_HOOK_OK;
230 }
231
232 static void monitor_done(struct userdata *u) {
233 pa_assert(u);
234
235 if (u->monitor_slot) {
236 pa_hook_slot_free(u->monitor_slot);
237 u->monitor_slot = NULL;
238 }
239
240 if (u->monitor) {
241 pa_reserve_monitor_wrapper_unref(u->monitor);
242 u->monitor = NULL;
243 }
244 }
245
246 static int reserve_monitor_init(struct userdata *u, const char *dname) {
247 char *rname;
248
249 pa_assert(u);
250 pa_assert(dname);
251
252 if (pa_in_system_mode())
253 return 0;
254
255 if (!(rname = pa_alsa_get_reserve_name(dname)))
256 return 0;
257
258 /* We are resuming, try to lock the device */
259 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
260 pa_xfree(rname);
261
262 if (!(u->monitor))
263 return -1;
264
265 pa_assert(!u->monitor_slot);
266 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
267
268 return 0;
269 }
270
271 static void fix_min_sleep_wakeup(struct userdata *u) {
272 size_t max_use, max_use_2;
273
274 pa_assert(u);
275 pa_assert(u->use_tsched);
276
277 max_use = u->hwbuf_size - u->hwbuf_unused;
278 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
279
280 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
281 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
282
283 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
284 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
285 }
286
287 static void fix_tsched_watermark(struct userdata *u) {
288 size_t max_use;
289 pa_assert(u);
290 pa_assert(u->use_tsched);
291
292 max_use = u->hwbuf_size - u->hwbuf_unused;
293
294 if (u->tsched_watermark > max_use - u->min_sleep)
295 u->tsched_watermark = max_use - u->min_sleep;
296
297 if (u->tsched_watermark < u->min_wakeup)
298 u->tsched_watermark = u->min_wakeup;
299 }
300
301 static void increase_watermark(struct userdata *u) {
302 size_t old_watermark;
303 pa_usec_t old_min_latency, new_min_latency;
304
305 pa_assert(u);
306 pa_assert(u->use_tsched);
307
308 /* First, just try to increase the watermark */
309 old_watermark = u->tsched_watermark;
310 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
311 fix_tsched_watermark(u);
312
313 if (old_watermark != u->tsched_watermark) {
314 pa_log_info("Increasing wakeup watermark to %0.2f ms",
315 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
316 return;
317 }
318
319 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
320 old_min_latency = u->sink->thread_info.min_latency;
321 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
322 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
323
324 if (old_min_latency != new_min_latency) {
325 pa_log_info("Increasing minimal latency to %0.2f ms",
326 (double) new_min_latency / PA_USEC_PER_MSEC);
327
328 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
329 }
330
331 /* When we reach this we're officialy fucked! */
332 }
333
334 static void decrease_watermark(struct userdata *u) {
335 size_t old_watermark;
336 pa_usec_t now;
337
338 pa_assert(u);
339 pa_assert(u->use_tsched);
340
341 now = pa_rtclock_now();
342
343 if (u->watermark_dec_not_before <= 0)
344 goto restart;
345
346 if (u->watermark_dec_not_before > now)
347 return;
348
349 old_watermark = u->tsched_watermark;
350
351 if (u->tsched_watermark < u->watermark_dec_step)
352 u->tsched_watermark = u->tsched_watermark / 2;
353 else
354 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
355
356 fix_tsched_watermark(u);
357
358 if (old_watermark != u->tsched_watermark)
359 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
360 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
361
362 /* We don't change the latency range*/
363
364 restart:
365 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
366 }
367
368 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
369 pa_usec_t usec, wm;
370
371 pa_assert(sleep_usec);
372 pa_assert(process_usec);
373
374 pa_assert(u);
375 pa_assert(u->use_tsched);
376
377 usec = pa_sink_get_requested_latency_within_thread(u->sink);
378
379 if (usec == (pa_usec_t) -1)
380 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
381
382 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
383
384 if (wm > usec)
385 wm = usec/2;
386
387 *sleep_usec = usec - wm;
388 *process_usec = wm;
389
390 #ifdef DEBUG_TIMING
391 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
392 (unsigned long) (usec / PA_USEC_PER_MSEC),
393 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
394 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
395 #endif
396 }
397
398 static int try_recover(struct userdata *u, const char *call, int err) {
399 pa_assert(u);
400 pa_assert(call);
401 pa_assert(err < 0);
402
403 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
404
405 pa_assert(err != -EAGAIN);
406
407 if (err == -EPIPE)
408 pa_log_debug("%s: Buffer underrun!", call);
409
410 if (err == -ESTRPIPE)
411 pa_log_debug("%s: System suspended!", call);
412
413 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
414 pa_log("%s: %s", call, pa_alsa_strerror(err));
415 return -1;
416 }
417
418 u->first = TRUE;
419 u->since_start = 0;
420 return 0;
421 }
422
423 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
424 size_t left_to_play;
425 pa_bool_t underrun = FALSE;
426
427 /* We use <= instead of < for this check here because an underrun
428 * only happens after the last sample was processed, not already when
429 * it is removed from the buffer. This is particularly important
430 * when block transfer is used. */
431
432 if (n_bytes <= u->hwbuf_size)
433 left_to_play = u->hwbuf_size - n_bytes;
434 else {
435
436 /* We got a dropout. What a mess! */
437 left_to_play = 0;
438 underrun = TRUE;
439
440 #ifdef DEBUG_TIMING
441 PA_DEBUG_TRAP;
442 #endif
443
444 if (!u->first && !u->after_rewind)
445 if (pa_log_ratelimit(PA_LOG_INFO))
446 pa_log_info("Underrun!");
447 }
448
449 #ifdef DEBUG_TIMING
450 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
451 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
452 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
453 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
454 #endif
455
456 if (u->use_tsched) {
457 pa_bool_t reset_not_before = TRUE;
458
459 if (!u->first && !u->after_rewind) {
460 if (underrun || left_to_play < u->watermark_inc_threshold)
461 increase_watermark(u);
462 else if (left_to_play > u->watermark_dec_threshold) {
463 reset_not_before = FALSE;
464
465 /* We decrease the watermark only if have actually
466 * been woken up by a timeout. If something else woke
467 * us up it's too easy to fulfill the deadlines... */
468
469 if (on_timeout)
470 decrease_watermark(u);
471 }
472 }
473
474 if (reset_not_before)
475 u->watermark_dec_not_before = 0;
476 }
477
478 return left_to_play;
479 }
480
481 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
482 pa_bool_t work_done = FALSE;
483 pa_usec_t max_sleep_usec = 0, process_usec = 0;
484 size_t left_to_play;
485 unsigned j = 0;
486
487 pa_assert(u);
488 pa_sink_assert_ref(u->sink);
489
490 if (u->use_tsched)
491 hw_sleep_time(u, &max_sleep_usec, &process_usec);
492
493 for (;;) {
494 snd_pcm_sframes_t n;
495 size_t n_bytes;
496 int r;
497 pa_bool_t after_avail = TRUE;
498
499 /* First we determine how many samples are missing to fill the
500 * buffer up to 100% */
501
502 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
503
504 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
505 continue;
506
507 return r;
508 }
509
510 n_bytes = (size_t) n * u->frame_size;
511
512 #ifdef DEBUG_TIMING
513 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
514 #endif
515
516 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
517 on_timeout = FALSE;
518
519 if (u->use_tsched)
520
521 /* We won't fill up the playback buffer before at least
522 * half the sleep time is over because otherwise we might
523 * ask for more data from the clients then they expect. We
524 * need to guarantee that clients only have to keep around
525 * a single hw buffer length. */
526
527 if (!polled &&
528 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
529 #ifdef DEBUG_TIMING
530 pa_log_debug("Not filling up, because too early.");
531 #endif
532 break;
533 }
534
535 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
536
537 if (polled)
538 PA_ONCE_BEGIN {
539 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
540 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
541 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
542 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
543 pa_strnull(dn));
544 pa_xfree(dn);
545 } PA_ONCE_END;
546
547 #ifdef DEBUG_TIMING
548 pa_log_debug("Not filling up, because not necessary.");
549 #endif
550 break;
551 }
552
553
554 if (++j > 10) {
555 #ifdef DEBUG_TIMING
556 pa_log_debug("Not filling up, because already too many iterations.");
557 #endif
558
559 break;
560 }
561
562 n_bytes -= u->hwbuf_unused;
563 polled = FALSE;
564
565 #ifdef DEBUG_TIMING
566 pa_log_debug("Filling up");
567 #endif
568
569 for (;;) {
570 pa_memchunk chunk;
571 void *p;
572 int err;
573 const snd_pcm_channel_area_t *areas;
574 snd_pcm_uframes_t offset, frames;
575 snd_pcm_sframes_t sframes;
576
577 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
578 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
579
580 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
581
582 if (!after_avail && err == -EAGAIN)
583 break;
584
585 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
586 continue;
587
588 return r;
589 }
590
591 /* Make sure that if these memblocks need to be copied they will fit into one slot */
592 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
593 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
594
595 if (!after_avail && frames == 0)
596 break;
597
598 pa_assert(frames > 0);
599 after_avail = FALSE;
600
601 /* Check these are multiples of 8 bit */
602 pa_assert((areas[0].first & 7) == 0);
603 pa_assert((areas[0].step & 7)== 0);
604
605 /* We assume a single interleaved memory buffer */
606 pa_assert((areas[0].first >> 3) == 0);
607 pa_assert((areas[0].step >> 3) == u->frame_size);
608
609 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
610
611 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
612 chunk.length = pa_memblock_get_length(chunk.memblock);
613 chunk.index = 0;
614
615 pa_sink_render_into_full(u->sink, &chunk);
616 pa_memblock_unref_fixed(chunk.memblock);
617
618 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
619
620 if (!after_avail && (int) sframes == -EAGAIN)
621 break;
622
623 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
624 continue;
625
626 return r;
627 }
628
629 work_done = TRUE;
630
631 u->write_count += frames * u->frame_size;
632 u->since_start += frames * u->frame_size;
633
634 #ifdef DEBUG_TIMING
635 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
636 #endif
637
638 if ((size_t) frames * u->frame_size >= n_bytes)
639 break;
640
641 n_bytes -= (size_t) frames * u->frame_size;
642 }
643 }
644
645 if (u->use_tsched) {
646 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
647
648 if (*sleep_usec > process_usec)
649 *sleep_usec -= process_usec;
650 else
651 *sleep_usec = 0;
652 } else
653 *sleep_usec = 0;
654
655 return work_done ? 1 : 0;
656 }
657
658 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
659 pa_bool_t work_done = FALSE;
660 pa_usec_t max_sleep_usec = 0, process_usec = 0;
661 size_t left_to_play;
662 unsigned j = 0;
663
664 pa_assert(u);
665 pa_sink_assert_ref(u->sink);
666
667 if (u->use_tsched)
668 hw_sleep_time(u, &max_sleep_usec, &process_usec);
669
670 for (;;) {
671 snd_pcm_sframes_t n;
672 size_t n_bytes;
673 int r;
674 pa_bool_t after_avail = TRUE;
675
676 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
677
678 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
679 continue;
680
681 return r;
682 }
683
684 n_bytes = (size_t) n * u->frame_size;
685 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
686 on_timeout = FALSE;
687
688 if (u->use_tsched)
689
690 /* We won't fill up the playback buffer before at least
691 * half the sleep time is over because otherwise we might
692 * ask for more data from the clients then they expect. We
693 * need to guarantee that clients only have to keep around
694 * a single hw buffer length. */
695
696 if (!polled &&
697 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
698 break;
699
700 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
701
702 if (polled)
703 PA_ONCE_BEGIN {
704 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
705 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
706 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
707 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
708 pa_strnull(dn));
709 pa_xfree(dn);
710 } PA_ONCE_END;
711
712 break;
713 }
714
715 if (++j > 10) {
716 #ifdef DEBUG_TIMING
717 pa_log_debug("Not filling up, because already too many iterations.");
718 #endif
719
720 break;
721 }
722
723 n_bytes -= u->hwbuf_unused;
724 polled = FALSE;
725
726 for (;;) {
727 snd_pcm_sframes_t frames;
728 void *p;
729
730 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
731
732 if (u->memchunk.length <= 0)
733 pa_sink_render(u->sink, n_bytes, &u->memchunk);
734
735 pa_assert(u->memchunk.length > 0);
736
737 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
738
739 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
740 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
741
742 p = pa_memblock_acquire(u->memchunk.memblock);
743 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
744 pa_memblock_release(u->memchunk.memblock);
745
746 if (PA_UNLIKELY(frames < 0)) {
747
748 if (!after_avail && (int) frames == -EAGAIN)
749 break;
750
751 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
752 continue;
753
754 return r;
755 }
756
757 if (!after_avail && frames == 0)
758 break;
759
760 pa_assert(frames > 0);
761 after_avail = FALSE;
762
763 u->memchunk.index += (size_t) frames * u->frame_size;
764 u->memchunk.length -= (size_t) frames * u->frame_size;
765
766 if (u->memchunk.length <= 0) {
767 pa_memblock_unref(u->memchunk.memblock);
768 pa_memchunk_reset(&u->memchunk);
769 }
770
771 work_done = TRUE;
772
773 u->write_count += frames * u->frame_size;
774 u->since_start += frames * u->frame_size;
775
776 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
777
778 if ((size_t) frames * u->frame_size >= n_bytes)
779 break;
780
781 n_bytes -= (size_t) frames * u->frame_size;
782 }
783 }
784
785 if (u->use_tsched) {
786 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
787
788 if (*sleep_usec > process_usec)
789 *sleep_usec -= process_usec;
790 else
791 *sleep_usec = 0;
792 } else
793 *sleep_usec = 0;
794
795 return work_done ? 1 : 0;
796 }
797
798 static void update_smoother(struct userdata *u) {
799 snd_pcm_sframes_t delay = 0;
800 int64_t position;
801 int err;
802 pa_usec_t now1 = 0, now2;
803 snd_pcm_status_t *status;
804
805 snd_pcm_status_alloca(&status);
806
807 pa_assert(u);
808 pa_assert(u->pcm_handle);
809
810 /* Let's update the time smoother */
811
812 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
813 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
814 return;
815 }
816
817 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
818 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
819 else {
820 snd_htimestamp_t htstamp = { 0, 0 };
821 snd_pcm_status_get_htstamp(status, &htstamp);
822 now1 = pa_timespec_load(&htstamp);
823 }
824
825 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
826 if (now1 <= 0)
827 now1 = pa_rtclock_now();
828
829 /* check if the time since the last update is bigger than the interval */
830 if (u->last_smoother_update > 0)
831 if (u->last_smoother_update + u->smoother_interval > now1)
832 return;
833
834 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
835
836 if (PA_UNLIKELY(position < 0))
837 position = 0;
838
839 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
840
841 pa_smoother_put(u->smoother, now1, now2);
842
843 u->last_smoother_update = now1;
844 /* exponentially increase the update interval up to the MAX limit */
845 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
846 }
847
848 static pa_usec_t sink_get_latency(struct userdata *u) {
849 pa_usec_t r;
850 int64_t delay;
851 pa_usec_t now1, now2;
852
853 pa_assert(u);
854
855 now1 = pa_rtclock_now();
856 now2 = pa_smoother_get(u->smoother, now1);
857
858 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
859
860 r = delay >= 0 ? (pa_usec_t) delay : 0;
861
862 if (u->memchunk.memblock)
863 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
864
865 return r;
866 }
867
868 static int build_pollfd(struct userdata *u) {
869 pa_assert(u);
870 pa_assert(u->pcm_handle);
871
872 if (u->alsa_rtpoll_item)
873 pa_rtpoll_item_free(u->alsa_rtpoll_item);
874
875 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
876 return -1;
877
878 return 0;
879 }
880
881 /* Called from IO context */
882 static int suspend(struct userdata *u) {
883 pa_assert(u);
884 pa_assert(u->pcm_handle);
885
886 pa_smoother_pause(u->smoother, pa_rtclock_now());
887
888 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
889 * take awfully long with our long buffer sizes today. */
890 snd_pcm_close(u->pcm_handle);
891 u->pcm_handle = NULL;
892
893 if (u->alsa_rtpoll_item) {
894 pa_rtpoll_item_free(u->alsa_rtpoll_item);
895 u->alsa_rtpoll_item = NULL;
896 }
897
898 /* We reset max_rewind/max_request here to make sure that while we
899 * are suspended the old max_request/max_rewind values set before
900 * the suspend can influence the per-stream buffer of newly
901 * created streams, without their requirements having any
902 * influence on them. */
903 pa_sink_set_max_rewind_within_thread(u->sink, 0);
904 pa_sink_set_max_request_within_thread(u->sink, 0);
905
906 pa_log_info("Device suspended...");
907
908 return 0;
909 }
910
911 /* Called from IO context */
912 static int update_sw_params(struct userdata *u) {
913 snd_pcm_uframes_t avail_min;
914 int err;
915
916 pa_assert(u);
917
918 /* Use the full buffer if noone asked us for anything specific */
919 u->hwbuf_unused = 0;
920
921 if (u->use_tsched) {
922 pa_usec_t latency;
923
924 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
925 size_t b;
926
927 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
928
929 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
930
931 /* We need at least one sample in our buffer */
932
933 if (PA_UNLIKELY(b < u->frame_size))
934 b = u->frame_size;
935
936 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
937 }
938
939 fix_min_sleep_wakeup(u);
940 fix_tsched_watermark(u);
941 }
942
943 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
944
945 /* We need at last one frame in the used part of the buffer */
946 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
947
948 if (u->use_tsched) {
949 pa_usec_t sleep_usec, process_usec;
950
951 hw_sleep_time(u, &sleep_usec, &process_usec);
952 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
953 }
954
955 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
956
957 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
958 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
959 return err;
960 }
961
962 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
963 if (pa_alsa_pcm_is_hw(u->pcm_handle))
964 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
965 else {
966 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
967 pa_sink_set_max_rewind_within_thread(u->sink, 0);
968 }
969
970 return 0;
971 }
972
973 /* Called from IO context */
974 static int unsuspend(struct userdata *u) {
975 pa_sample_spec ss;
976 int err;
977 pa_bool_t b, d;
978 snd_pcm_uframes_t period_size, buffer_size;
979
980 pa_assert(u);
981 pa_assert(!u->pcm_handle);
982
983 pa_log_info("Trying resume...");
984
985 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
986 SND_PCM_NONBLOCK|
987 SND_PCM_NO_AUTO_RESAMPLE|
988 SND_PCM_NO_AUTO_CHANNELS|
989 SND_PCM_NO_AUTO_FORMAT)) < 0) {
990 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
991 goto fail;
992 }
993
994 ss = u->sink->sample_spec;
995 period_size = u->fragment_size / u->frame_size;
996 buffer_size = u->hwbuf_size / u->frame_size;
997 b = u->use_mmap;
998 d = u->use_tsched;
999
1000 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1001 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1002 goto fail;
1003 }
1004
1005 if (b != u->use_mmap || d != u->use_tsched) {
1006 pa_log_warn("Resume failed, couldn't get original access mode.");
1007 goto fail;
1008 }
1009
1010 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1011 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1012 goto fail;
1013 }
1014
1015 if (period_size*u->frame_size != u->fragment_size ||
1016 buffer_size*u->frame_size != u->hwbuf_size) {
1017 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1018 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1019 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1020 goto fail;
1021 }
1022
1023 if (update_sw_params(u) < 0)
1024 goto fail;
1025
1026 if (build_pollfd(u) < 0)
1027 goto fail;
1028
1029 u->write_count = 0;
1030 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1031 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1032 u->last_smoother_update = 0;
1033
1034 u->first = TRUE;
1035 u->since_start = 0;
1036
1037 pa_log_info("Resumed successfully...");
1038
1039 return 0;
1040
1041 fail:
1042 if (u->pcm_handle) {
1043 snd_pcm_close(u->pcm_handle);
1044 u->pcm_handle = NULL;
1045 }
1046
1047 return -PA_ERR_IO;
1048 }
1049
1050 /* Called from IO context */
1051 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1052 struct userdata *u = PA_SINK(o)->userdata;
1053
1054 switch (code) {
1055
1056 case PA_SINK_MESSAGE_FINISH_MOVE:
1057 case PA_SINK_MESSAGE_ADD_INPUT: {
1058 pa_sink_input *i = PA_SINK_INPUT(data);
1059 int r = 0;
1060
1061 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1062 break;
1063
1064 u->old_rate = u->sink->sample_spec.rate;
1065
1066 /* Passthrough format, see if we need to reset sink sample rate */
1067 if (u->sink->sample_spec.rate == i->thread_info.sample_spec.rate)
1068 break;
1069
1070 /* .. we do */
1071 if ((r = suspend(u)) < 0)
1072 return r;
1073
1074 u->sink->sample_spec.rate = i->thread_info.sample_spec.rate;
1075
1076 if ((r = unsuspend(u)) < 0)
1077 return r;
1078
1079 break;
1080 }
1081
1082 case PA_SINK_MESSAGE_START_MOVE:
1083 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1084 pa_sink_input *i = PA_SINK_INPUT(data);
1085 int r = 0;
1086
1087 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1088 break;
1089
1090 /* Passthrough format, see if we need to reset sink sample rate */
1091 if (u->sink->sample_spec.rate == u->old_rate)
1092 break;
1093
1094 /* .. we do */
1095 if ((r = suspend(u)) < 0)
1096 return r;
1097
1098 u->sink->sample_spec.rate = u->old_rate;
1099
1100 if ((r = unsuspend(u)) < 0)
1101 return r;
1102
1103 break;
1104 }
1105
1106 case PA_SINK_MESSAGE_GET_LATENCY: {
1107 pa_usec_t r = 0;
1108
1109 if (u->pcm_handle)
1110 r = sink_get_latency(u);
1111
1112 *((pa_usec_t*) data) = r;
1113
1114 return 0;
1115 }
1116
1117 case PA_SINK_MESSAGE_SET_STATE:
1118
1119 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1120
1121 case PA_SINK_SUSPENDED: {
1122 int r;
1123
1124 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1125
1126 if ((r = suspend(u)) < 0)
1127 return r;
1128
1129 break;
1130 }
1131
1132 case PA_SINK_IDLE:
1133 case PA_SINK_RUNNING: {
1134 int r;
1135
1136 if (u->sink->thread_info.state == PA_SINK_INIT) {
1137 if (build_pollfd(u) < 0)
1138 return -PA_ERR_IO;
1139 }
1140
1141 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1142 if ((r = unsuspend(u)) < 0)
1143 return r;
1144 }
1145
1146 break;
1147 }
1148
1149 case PA_SINK_UNLINKED:
1150 case PA_SINK_INIT:
1151 case PA_SINK_INVALID_STATE:
1152 ;
1153 }
1154
1155 break;
1156 }
1157
1158 return pa_sink_process_msg(o, code, data, offset, chunk);
1159 }
1160
1161 /* Called from main context */
1162 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1163 pa_sink_state_t old_state;
1164 struct userdata *u;
1165
1166 pa_sink_assert_ref(s);
1167 pa_assert_se(u = s->userdata);
1168
1169 old_state = pa_sink_get_state(u->sink);
1170
1171 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1172 reserve_done(u);
1173 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1174 if (reserve_init(u, u->device_name) < 0)
1175 return -PA_ERR_BUSY;
1176
1177 return 0;
1178 }
1179
1180 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1181 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1182
1183 pa_assert(u);
1184 pa_assert(u->mixer_handle);
1185
1186 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1187 return 0;
1188
1189 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1190 return 0;
1191
1192 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1193 pa_sink_get_volume(u->sink, TRUE);
1194 pa_sink_get_mute(u->sink, TRUE);
1195 }
1196
1197 return 0;
1198 }
1199
1200 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1201 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1202
1203 pa_assert(u);
1204 pa_assert(u->mixer_handle);
1205
1206 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1207 return 0;
1208
1209 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1210 return 0;
1211
1212 if (mask & SND_CTL_EVENT_MASK_VALUE)
1213 pa_sink_update_volume_and_mute(u->sink);
1214
1215 return 0;
1216 }
1217
1218 static void sink_get_volume_cb(pa_sink *s) {
1219 struct userdata *u = s->userdata;
1220 pa_cvolume r;
1221 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1222
1223 pa_assert(u);
1224 pa_assert(u->mixer_path);
1225 pa_assert(u->mixer_handle);
1226
1227 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1228 return;
1229
1230 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1231 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1232
1233 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1234
1235 if (u->mixer_path->has_dB) {
1236 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1237
1238 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1239 }
1240
1241 if (pa_cvolume_equal(&u->hardware_volume, &r))
1242 return;
1243
1244 s->real_volume = u->hardware_volume = r;
1245
1246 /* Hmm, so the hardware volume changed, let's reset our software volume */
1247 if (u->mixer_path->has_dB)
1248 pa_sink_set_soft_volume(s, NULL);
1249 }
1250
1251 static void sink_set_volume_cb(pa_sink *s) {
1252 struct userdata *u = s->userdata;
1253 pa_cvolume r;
1254 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1255 pa_bool_t sync_volume = !!(s->flags & PA_SINK_SYNC_VOLUME);
1256
1257 pa_assert(u);
1258 pa_assert(u->mixer_path);
1259 pa_assert(u->mixer_handle);
1260
1261 /* Shift up by the base volume */
1262 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1263
1264 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, sync_volume, !sync_volume) < 0)
1265 return;
1266
1267 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1268 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1269
1270 u->hardware_volume = r;
1271
1272 if (u->mixer_path->has_dB) {
1273 pa_cvolume new_soft_volume;
1274 pa_bool_t accurate_enough;
1275 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1276
1277 /* Match exactly what the user requested by software */
1278 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1279
1280 /* If the adjustment to do in software is only minimal we
1281 * can skip it. That saves us CPU at the expense of a bit of
1282 * accuracy */
1283 accurate_enough =
1284 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1285 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1286
1287 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1288 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1289 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1290 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1291 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1292 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1293 pa_yes_no(accurate_enough));
1294 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1295
1296 if (!accurate_enough)
1297 s->soft_volume = new_soft_volume;
1298
1299 } else {
1300 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1301
1302 /* We can't match exactly what the user requested, hence let's
1303 * at least tell the user about it */
1304
1305 s->real_volume = r;
1306 }
1307 }
1308
1309 static void sink_write_volume_cb(pa_sink *s) {
1310 struct userdata *u = s->userdata;
1311 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1312
1313 pa_assert(u);
1314 pa_assert(u->mixer_path);
1315 pa_assert(u->mixer_handle);
1316 pa_assert(s->flags & PA_SINK_SYNC_VOLUME);
1317
1318 /* Shift up by the base volume */
1319 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1320
1321 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1322 pa_log_error("Writing HW volume failed");
1323 else {
1324 pa_cvolume tmp_vol;
1325 pa_bool_t accurate_enough;
1326
1327 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1328 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1329
1330 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1331 accurate_enough =
1332 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1333 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1334
1335 if (!accurate_enough) {
1336 union {
1337 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1338 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1339 } vol;
1340
1341 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1342 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1343 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1344 pa_log_debug(" in dB: %s (request) != %s",
1345 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1346 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1347 }
1348 }
1349 }
1350
1351 static void sink_get_mute_cb(pa_sink *s) {
1352 struct userdata *u = s->userdata;
1353 pa_bool_t b;
1354
1355 pa_assert(u);
1356 pa_assert(u->mixer_path);
1357 pa_assert(u->mixer_handle);
1358
1359 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1360 return;
1361
1362 s->muted = b;
1363 }
1364
1365 static void sink_set_mute_cb(pa_sink *s) {
1366 struct userdata *u = s->userdata;
1367
1368 pa_assert(u);
1369 pa_assert(u->mixer_path);
1370 pa_assert(u->mixer_handle);
1371
1372 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1373 }
1374
1375 static void mixer_volume_init(struct userdata *u) {
1376 pa_assert(u);
1377
1378 if (!u->mixer_path->has_volume) {
1379 pa_sink_set_write_volume_callback(u->sink, NULL);
1380 pa_sink_set_get_volume_callback(u->sink, NULL);
1381 pa_sink_set_set_volume_callback(u->sink, NULL);
1382
1383 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1384 } else {
1385 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1386 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1387
1388 if (u->mixer_path->has_dB && u->sync_volume) {
1389 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1390 pa_log_info("Successfully enabled synchronous volume.");
1391 } else
1392 pa_sink_set_write_volume_callback(u->sink, NULL);
1393
1394 if (u->mixer_path->has_dB) {
1395 pa_sink_enable_decibel_volume(u->sink, TRUE);
1396 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1397
1398 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1399 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1400
1401 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1402 } else {
1403 pa_sink_enable_decibel_volume(u->sink, FALSE);
1404 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1405
1406 u->sink->base_volume = PA_VOLUME_NORM;
1407 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1408 }
1409
1410 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1411 }
1412
1413 if (!u->mixer_path->has_mute) {
1414 pa_sink_set_get_mute_callback(u->sink, NULL);
1415 pa_sink_set_set_mute_callback(u->sink, NULL);
1416 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1417 } else {
1418 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1419 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1420 pa_log_info("Using hardware mute control.");
1421 }
1422 }
1423
1424 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1425 struct userdata *u = s->userdata;
1426 pa_alsa_port_data *data;
1427
1428 pa_assert(u);
1429 pa_assert(p);
1430 pa_assert(u->mixer_handle);
1431
1432 data = PA_DEVICE_PORT_DATA(p);
1433
1434 pa_assert_se(u->mixer_path = data->path);
1435 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1436
1437 mixer_volume_init(u);
1438
1439 if (data->setting)
1440 pa_alsa_setting_select(data->setting, u->mixer_handle);
1441
1442 if (s->set_mute)
1443 s->set_mute(s);
1444 if (s->set_volume)
1445 s->set_volume(s);
1446
1447 return 0;
1448 }
1449
1450 static void sink_update_requested_latency_cb(pa_sink *s) {
1451 struct userdata *u = s->userdata;
1452 size_t before;
1453 pa_assert(u);
1454 pa_assert(u->use_tsched); /* only when timer scheduling is used
1455 * we can dynamically adjust the
1456 * latency */
1457
1458 if (!u->pcm_handle)
1459 return;
1460
1461 before = u->hwbuf_unused;
1462 update_sw_params(u);
1463
1464 /* Let's check whether we now use only a smaller part of the
1465 buffer then before. If so, we need to make sure that subsequent
1466 rewinds are relative to the new maximum fill level and not to the
1467 current fill level. Thus, let's do a full rewind once, to clear
1468 things up. */
1469
1470 if (u->hwbuf_unused > before) {
1471 pa_log_debug("Requesting rewind due to latency change.");
1472 pa_sink_request_rewind(s, (size_t) -1);
1473 }
1474 }
1475
1476 static int process_rewind(struct userdata *u) {
1477 snd_pcm_sframes_t unused;
1478 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1479 pa_assert(u);
1480
1481 /* Figure out how much we shall rewind and reset the counter */
1482 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1483
1484 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1485
1486 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1487 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1488 return -1;
1489 }
1490
1491 unused_nbytes = (size_t) unused * u->frame_size;
1492
1493 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1494 unused_nbytes += u->rewind_safeguard;
1495
1496 if (u->hwbuf_size > unused_nbytes)
1497 limit_nbytes = u->hwbuf_size - unused_nbytes;
1498 else
1499 limit_nbytes = 0;
1500
1501 if (rewind_nbytes > limit_nbytes)
1502 rewind_nbytes = limit_nbytes;
1503
1504 if (rewind_nbytes > 0) {
1505 snd_pcm_sframes_t in_frames, out_frames;
1506
1507 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1508
1509 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1510 pa_log_debug("before: %lu", (unsigned long) in_frames);
1511 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1512 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1513 if (try_recover(u, "process_rewind", out_frames) < 0)
1514 return -1;
1515 out_frames = 0;
1516 }
1517
1518 pa_log_debug("after: %lu", (unsigned long) out_frames);
1519
1520 rewind_nbytes = (size_t) out_frames * u->frame_size;
1521
1522 if (rewind_nbytes <= 0)
1523 pa_log_info("Tried rewind, but was apparently not possible.");
1524 else {
1525 u->write_count -= rewind_nbytes;
1526 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1527 pa_sink_process_rewind(u->sink, rewind_nbytes);
1528
1529 u->after_rewind = TRUE;
1530 return 0;
1531 }
1532 } else
1533 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1534
1535 pa_sink_process_rewind(u->sink, 0);
1536 return 0;
1537 }
1538
1539 static void thread_func(void *userdata) {
1540 struct userdata *u = userdata;
1541 unsigned short revents = 0;
1542
1543 pa_assert(u);
1544
1545 pa_log_debug("Thread starting up");
1546
1547 if (u->core->realtime_scheduling)
1548 pa_make_realtime(u->core->realtime_priority);
1549
1550 pa_thread_mq_install(&u->thread_mq);
1551
1552 for (;;) {
1553 int ret;
1554 pa_usec_t rtpoll_sleep = 0;
1555
1556 #ifdef DEBUG_TIMING
1557 pa_log_debug("Loop");
1558 #endif
1559
1560 /* Render some data and write it to the dsp */
1561 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1562 int work_done;
1563 pa_usec_t sleep_usec = 0;
1564 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1565
1566 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1567 if (process_rewind(u) < 0)
1568 goto fail;
1569
1570 if (u->use_mmap)
1571 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1572 else
1573 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1574
1575 if (work_done < 0)
1576 goto fail;
1577
1578 /* pa_log_debug("work_done = %i", work_done); */
1579
1580 if (work_done) {
1581
1582 if (u->first) {
1583 pa_log_info("Starting playback.");
1584 snd_pcm_start(u->pcm_handle);
1585
1586 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1587
1588 u->first = FALSE;
1589 }
1590
1591 update_smoother(u);
1592 }
1593
1594 if (u->use_tsched) {
1595 pa_usec_t cusec;
1596
1597 if (u->since_start <= u->hwbuf_size) {
1598
1599 /* USB devices on ALSA seem to hit a buffer
1600 * underrun during the first iterations much
1601 * quicker then we calculate here, probably due to
1602 * the transport latency. To accommodate for that
1603 * we artificially decrease the sleep time until
1604 * we have filled the buffer at least once
1605 * completely.*/
1606
1607 if (pa_log_ratelimit(PA_LOG_DEBUG))
1608 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1609 sleep_usec /= 2;
1610 }
1611
1612 /* OK, the playback buffer is now full, let's
1613 * calculate when to wake up next */
1614 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1615
1616 /* Convert from the sound card time domain to the
1617 * system time domain */
1618 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1619
1620 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1621
1622 /* We don't trust the conversion, so we wake up whatever comes first */
1623 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1624 }
1625
1626 u->after_rewind = FALSE;
1627
1628 }
1629
1630 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1631 pa_usec_t volume_sleep;
1632 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1633 if (volume_sleep > 0)
1634 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1635 }
1636
1637 if (rtpoll_sleep > 0)
1638 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1639 else
1640 pa_rtpoll_set_timer_disabled(u->rtpoll);
1641
1642 /* Hmm, nothing to do. Let's sleep */
1643 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1644 goto fail;
1645
1646 if (u->sink->flags & PA_SINK_SYNC_VOLUME)
1647 pa_sink_volume_change_apply(u->sink, NULL);
1648
1649 if (ret == 0)
1650 goto finish;
1651
1652 /* Tell ALSA about this and process its response */
1653 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1654 struct pollfd *pollfd;
1655 int err;
1656 unsigned n;
1657
1658 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1659
1660 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1661 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1662 goto fail;
1663 }
1664
1665 if (revents & ~POLLOUT) {
1666 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1667 goto fail;
1668
1669 u->first = TRUE;
1670 u->since_start = 0;
1671 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1672 pa_log_debug("Wakeup from ALSA!");
1673
1674 } else
1675 revents = 0;
1676 }
1677
1678 fail:
1679 /* If this was no regular exit from the loop we have to continue
1680 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1681 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1682 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1683
1684 finish:
1685 pa_log_debug("Thread shutting down");
1686 }
1687
1688 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1689 const char *n;
1690 char *t;
1691
1692 pa_assert(data);
1693 pa_assert(ma);
1694 pa_assert(device_name);
1695
1696 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1697 pa_sink_new_data_set_name(data, n);
1698 data->namereg_fail = TRUE;
1699 return;
1700 }
1701
1702 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1703 data->namereg_fail = TRUE;
1704 else {
1705 n = device_id ? device_id : device_name;
1706 data->namereg_fail = FALSE;
1707 }
1708
1709 if (mapping)
1710 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1711 else
1712 t = pa_sprintf_malloc("alsa_output.%s", n);
1713
1714 pa_sink_new_data_set_name(data, t);
1715 pa_xfree(t);
1716 }
1717
1718 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1719
1720 if (!mapping && !element)
1721 return;
1722
1723 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1724 pa_log_info("Failed to find a working mixer device.");
1725 return;
1726 }
1727
1728 if (element) {
1729
1730 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1731 goto fail;
1732
1733 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1734 goto fail;
1735
1736 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1737 pa_alsa_path_dump(u->mixer_path);
1738 } else {
1739
1740 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1741 goto fail;
1742
1743 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1744 }
1745
1746 return;
1747
1748 fail:
1749
1750 if (u->mixer_path_set) {
1751 pa_alsa_path_set_free(u->mixer_path_set);
1752 u->mixer_path_set = NULL;
1753 } else if (u->mixer_path) {
1754 pa_alsa_path_free(u->mixer_path);
1755 u->mixer_path = NULL;
1756 }
1757
1758 if (u->mixer_handle) {
1759 snd_mixer_close(u->mixer_handle);
1760 u->mixer_handle = NULL;
1761 }
1762 }
1763
1764
1765 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1766 pa_bool_t need_mixer_callback = FALSE;
1767
1768 pa_assert(u);
1769
1770 if (!u->mixer_handle)
1771 return 0;
1772
1773 if (u->sink->active_port) {
1774 pa_alsa_port_data *data;
1775
1776 /* We have a list of supported paths, so let's activate the
1777 * one that has been chosen as active */
1778
1779 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1780 u->mixer_path = data->path;
1781
1782 pa_alsa_path_select(data->path, u->mixer_handle);
1783
1784 if (data->setting)
1785 pa_alsa_setting_select(data->setting, u->mixer_handle);
1786
1787 } else {
1788
1789 if (!u->mixer_path && u->mixer_path_set)
1790 u->mixer_path = u->mixer_path_set->paths;
1791
1792 if (u->mixer_path) {
1793 /* Hmm, we have only a single path, then let's activate it */
1794
1795 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1796
1797 if (u->mixer_path->settings)
1798 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1799 } else
1800 return 0;
1801 }
1802
1803 mixer_volume_init(u);
1804
1805 /* Will we need to register callbacks? */
1806 if (u->mixer_path_set && u->mixer_path_set->paths) {
1807 pa_alsa_path *p;
1808
1809 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1810 if (p->has_volume || p->has_mute)
1811 need_mixer_callback = TRUE;
1812 }
1813 }
1814 else if (u->mixer_path)
1815 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1816
1817 if (need_mixer_callback) {
1818 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1819 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1820 u->mixer_pd = pa_alsa_mixer_pdata_new();
1821 mixer_callback = io_mixer_callback;
1822
1823 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1824 pa_log("Failed to initialize file descriptor monitoring");
1825 return -1;
1826 }
1827 } else {
1828 u->mixer_fdl = pa_alsa_fdlist_new();
1829 mixer_callback = ctl_mixer_callback;
1830
1831 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1832 pa_log("Failed to initialize file descriptor monitoring");
1833 return -1;
1834 }
1835 }
1836
1837 if (u->mixer_path_set)
1838 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1839 else
1840 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1841 }
1842
1843 return 0;
1844 }
1845
1846 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1847
1848 struct userdata *u = NULL;
1849 const char *dev_id = NULL;
1850 pa_sample_spec ss, requested_ss;
1851 pa_channel_map map;
1852 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1853 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1854 size_t frame_size;
1855 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE;
1856 pa_sink_new_data data;
1857 pa_alsa_profile_set *profile_set = NULL;
1858
1859 pa_assert(m);
1860 pa_assert(ma);
1861
1862 ss = m->core->default_sample_spec;
1863 map = m->core->default_channel_map;
1864 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1865 pa_log("Failed to parse sample specification and channel map");
1866 goto fail;
1867 }
1868
1869 requested_ss = ss;
1870 frame_size = pa_frame_size(&ss);
1871
1872 nfrags = m->core->default_n_fragments;
1873 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1874 if (frag_size <= 0)
1875 frag_size = (uint32_t) frame_size;
1876 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1877 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1878
1879 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1880 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1881 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1882 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1883 pa_log("Failed to parse buffer metrics");
1884 goto fail;
1885 }
1886
1887 buffer_size = nfrags * frag_size;
1888
1889 period_frames = frag_size/frame_size;
1890 buffer_frames = buffer_size/frame_size;
1891 tsched_frames = tsched_size/frame_size;
1892
1893 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1894 pa_log("Failed to parse mmap argument.");
1895 goto fail;
1896 }
1897
1898 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1899 pa_log("Failed to parse tsched argument.");
1900 goto fail;
1901 }
1902
1903 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1904 pa_log("Failed to parse ignore_dB argument.");
1905 goto fail;
1906 }
1907
1908 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
1909 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1910 pa_log("Failed to parse rewind_safeguard argument");
1911 goto fail;
1912 }
1913
1914 sync_volume = m->core->sync_volume;
1915 if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1916 pa_log("Failed to parse sync_volume argument.");
1917 goto fail;
1918 }
1919
1920 use_tsched = pa_alsa_may_tsched(use_tsched);
1921
1922 u = pa_xnew0(struct userdata, 1);
1923 u->core = m->core;
1924 u->module = m;
1925 u->use_mmap = use_mmap;
1926 u->use_tsched = use_tsched;
1927 u->sync_volume = sync_volume;
1928 u->first = TRUE;
1929 u->rewind_safeguard = rewind_safeguard;
1930 u->rtpoll = pa_rtpoll_new();
1931 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1932
1933 u->smoother = pa_smoother_new(
1934 SMOOTHER_ADJUST_USEC,
1935 SMOOTHER_WINDOW_USEC,
1936 TRUE,
1937 TRUE,
1938 5,
1939 pa_rtclock_now(),
1940 TRUE);
1941 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1942
1943 dev_id = pa_modargs_get_value(
1944 ma, "device_id",
1945 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1946
1947 if (reserve_init(u, dev_id) < 0)
1948 goto fail;
1949
1950 if (reserve_monitor_init(u, dev_id) < 0)
1951 goto fail;
1952
1953 b = use_mmap;
1954 d = use_tsched;
1955
1956 if (mapping) {
1957
1958 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1959 pa_log("device_id= not set");
1960 goto fail;
1961 }
1962
1963 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1964 dev_id,
1965 &u->device_name,
1966 &ss, &map,
1967 SND_PCM_STREAM_PLAYBACK,
1968 &period_frames, &buffer_frames, tsched_frames,
1969 &b, &d, mapping)))
1970 goto fail;
1971
1972 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1973
1974 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1975 goto fail;
1976
1977 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1978 dev_id,
1979 &u->device_name,
1980 &ss, &map,
1981 SND_PCM_STREAM_PLAYBACK,
1982 &period_frames, &buffer_frames, tsched_frames,
1983 &b, &d, profile_set, &mapping)))
1984 goto fail;
1985
1986 } else {
1987
1988 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1989 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1990 &u->device_name,
1991 &ss, &map,
1992 SND_PCM_STREAM_PLAYBACK,
1993 &period_frames, &buffer_frames, tsched_frames,
1994 &b, &d, FALSE)))
1995 goto fail;
1996 }
1997
1998 pa_assert(u->device_name);
1999 pa_log_info("Successfully opened device %s.", u->device_name);
2000
2001 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2002 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2003 goto fail;
2004 }
2005
2006 if (mapping)
2007 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2008
2009 if (use_mmap && !b) {
2010 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2011 u->use_mmap = use_mmap = FALSE;
2012 }
2013
2014 if (use_tsched && (!b || !d)) {
2015 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2016 u->use_tsched = use_tsched = FALSE;
2017 }
2018
2019 if (u->use_mmap)
2020 pa_log_info("Successfully enabled mmap() mode.");
2021
2022 if (u->use_tsched)
2023 pa_log_info("Successfully enabled timer-based scheduling mode.");
2024
2025 /* ALSA might tweak the sample spec, so recalculate the frame size */
2026 frame_size = pa_frame_size(&ss);
2027
2028 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2029
2030 pa_sink_new_data_init(&data);
2031 data.driver = driver;
2032 data.module = m;
2033 data.card = card;
2034 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2035
2036 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2037 * variable instead of using &data.namereg_fail directly, because
2038 * data.namereg_fail is a bitfield and taking the address of a bitfield
2039 * variable is impossible. */
2040 namereg_fail = data.namereg_fail;
2041 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2042 pa_log("Failed to parse boolean argument namereg_fail.");
2043 pa_sink_new_data_done(&data);
2044 goto fail;
2045 }
2046 data.namereg_fail = namereg_fail;
2047
2048 pa_sink_new_data_set_sample_spec(&data, &ss);
2049 pa_sink_new_data_set_channel_map(&data, &map);
2050
2051 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2052 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2053 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2054 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2055 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2056
2057 if (mapping) {
2058 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2059 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2060 }
2061
2062 pa_alsa_init_description(data.proplist);
2063
2064 if (u->control_device)
2065 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2066
2067 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2068 pa_log("Invalid properties");
2069 pa_sink_new_data_done(&data);
2070 goto fail;
2071 }
2072
2073 if (u->mixer_path_set)
2074 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2075
2076 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
2077 pa_sink_new_data_done(&data);
2078
2079 if (!u->sink) {
2080 pa_log("Failed to create sink object");
2081 goto fail;
2082 }
2083
2084 if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
2085 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2086 pa_log("Failed to parse sync_volume_safety_margin parameter");
2087 goto fail;
2088 }
2089
2090 if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
2091 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2092 pa_log("Failed to parse sync_volume_extra_delay parameter");
2093 goto fail;
2094 }
2095
2096 u->sink->parent.process_msg = sink_process_msg;
2097 if (u->use_tsched)
2098 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2099 u->sink->set_state = sink_set_state_cb;
2100 u->sink->set_port = sink_set_port_cb;
2101 u->sink->userdata = u;
2102
2103 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2104 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2105
2106 u->frame_size = frame_size;
2107 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2108 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2109 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2110
2111 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2112 (double) u->hwbuf_size / (double) u->fragment_size,
2113 (long unsigned) u->fragment_size,
2114 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2115 (long unsigned) u->hwbuf_size,
2116 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2117
2118 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2119 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2120 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2121 else {
2122 pa_log_info("Disabling rewind for device %s", u->device_name);
2123 pa_sink_set_max_rewind(u->sink, 0);
2124 }
2125
2126 if (u->use_tsched) {
2127 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
2128
2129 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
2130 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
2131
2132 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
2133 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
2134
2135 fix_min_sleep_wakeup(u);
2136 fix_tsched_watermark(u);
2137
2138 pa_sink_set_latency_range(u->sink,
2139 0,
2140 pa_bytes_to_usec(u->hwbuf_size, &ss));
2141
2142 pa_log_info("Time scheduling watermark is %0.2fms",
2143 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
2144 } else
2145 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2146
2147 reserve_update(u);
2148
2149 if (update_sw_params(u) < 0)
2150 goto fail;
2151
2152 if (setup_mixer(u, ignore_dB) < 0)
2153 goto fail;
2154
2155 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2156
2157 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2158 pa_log("Failed to create thread.");
2159 goto fail;
2160 }
2161
2162 /* Get initial mixer settings */
2163 if (data.volume_is_set) {
2164 if (u->sink->set_volume)
2165 u->sink->set_volume(u->sink);
2166 } else {
2167 if (u->sink->get_volume)
2168 u->sink->get_volume(u->sink);
2169 }
2170
2171 if (data.muted_is_set) {
2172 if (u->sink->set_mute)
2173 u->sink->set_mute(u->sink);
2174 } else {
2175 if (u->sink->get_mute)
2176 u->sink->get_mute(u->sink);
2177 }
2178
2179 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2180 u->sink->write_volume(u->sink);
2181
2182 pa_sink_put(u->sink);
2183
2184 if (profile_set)
2185 pa_alsa_profile_set_free(profile_set);
2186
2187 return u->sink;
2188
2189 fail:
2190
2191 if (u)
2192 userdata_free(u);
2193
2194 if (profile_set)
2195 pa_alsa_profile_set_free(profile_set);
2196
2197 return NULL;
2198 }
2199
2200 static void userdata_free(struct userdata *u) {
2201 pa_assert(u);
2202
2203 if (u->sink)
2204 pa_sink_unlink(u->sink);
2205
2206 if (u->thread) {
2207 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2208 pa_thread_free(u->thread);
2209 }
2210
2211 pa_thread_mq_done(&u->thread_mq);
2212
2213 if (u->sink)
2214 pa_sink_unref(u->sink);
2215
2216 if (u->memchunk.memblock)
2217 pa_memblock_unref(u->memchunk.memblock);
2218
2219 if (u->mixer_pd)
2220 pa_alsa_mixer_pdata_free(u->mixer_pd);
2221
2222 if (u->alsa_rtpoll_item)
2223 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2224
2225 if (u->rtpoll)
2226 pa_rtpoll_free(u->rtpoll);
2227
2228 if (u->pcm_handle) {
2229 snd_pcm_drop(u->pcm_handle);
2230 snd_pcm_close(u->pcm_handle);
2231 }
2232
2233 if (u->mixer_fdl)
2234 pa_alsa_fdlist_free(u->mixer_fdl);
2235
2236 if (u->mixer_path_set)
2237 pa_alsa_path_set_free(u->mixer_path_set);
2238 else if (u->mixer_path)
2239 pa_alsa_path_free(u->mixer_path);
2240
2241 if (u->mixer_handle)
2242 snd_mixer_close(u->mixer_handle);
2243
2244 if (u->smoother)
2245 pa_smoother_free(u->smoother);
2246
2247 reserve_done(u);
2248 monitor_done(u);
2249
2250 pa_xfree(u->device_name);
2251 pa_xfree(u->control_device);
2252 pa_xfree(u);
2253 }
2254
2255 void pa_alsa_sink_free(pa_sink *s) {
2256 struct userdata *u;
2257
2258 pa_sink_assert_ref(s);
2259 pa_assert_se(u = s->userdata);
2260
2261 userdata_free(u);
2262 }