]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: resets POLLOUT event
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39
40 #include <pulsecore/core.h>
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/thread-mq.h>
53 #include <pulsecore/rtpoll.h>
54 #include <pulsecore/time-smoother.h>
55
56 #include <modules/reserve-wrap.h>
57
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
60
61 /* #define DEBUG_TIMING */
62
63 #define DEFAULT_DEVICE "default"
64
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
67
68 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
69 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
70 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
71 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
72 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
73
74 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
75 * will increase the watermark only if we hit a real underrun. */
76
77 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
78 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
79
80 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
81 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
82
83 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
84 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
85
86 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
87
88 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
89 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
90
91 struct userdata {
92 pa_core *core;
93 pa_module *module;
94 pa_sink *sink;
95
96 pa_thread *thread;
97 pa_thread_mq thread_mq;
98 pa_rtpoll *rtpoll;
99
100 snd_pcm_t *pcm_handle;
101
102 pa_alsa_fdlist *mixer_fdl;
103 pa_alsa_mixer_pdata *mixer_pd;
104 snd_mixer_t *mixer_handle;
105 pa_alsa_path_set *mixer_path_set;
106 pa_alsa_path *mixer_path;
107
108 pa_cvolume hardware_volume;
109
110 uint32_t old_rate;
111
112 size_t
113 frame_size,
114 fragment_size,
115 hwbuf_size,
116 tsched_watermark,
117 hwbuf_unused,
118 min_sleep,
119 min_wakeup,
120 watermark_inc_step,
121 watermark_dec_step,
122 watermark_inc_threshold,
123 watermark_dec_threshold,
124 rewind_safeguard;
125
126 pa_usec_t watermark_dec_not_before;
127
128 pa_memchunk memchunk;
129
130 char *device_name; /* name of the PCM device */
131 char *control_device; /* name of the control device */
132
133 pa_bool_t use_mmap:1, use_tsched:1, sync_volume:1;
134
135 pa_bool_t first, after_rewind;
136
137 pa_rtpoll_item *alsa_rtpoll_item;
138
139 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
140
141 pa_smoother *smoother;
142 uint64_t write_count;
143 uint64_t since_start;
144 pa_usec_t smoother_interval;
145 pa_usec_t last_smoother_update;
146
147 pa_reserve_wrapper *reserve;
148 pa_hook_slot *reserve_slot;
149 pa_reserve_monitor_wrapper *monitor;
150 pa_hook_slot *monitor_slot;
151 };
152
153 static void userdata_free(struct userdata *u);
154
155 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
156 pa_assert(r);
157 pa_assert(u);
158
159 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
160 return PA_HOOK_CANCEL;
161
162 return PA_HOOK_OK;
163 }
164
165 static void reserve_done(struct userdata *u) {
166 pa_assert(u);
167
168 if (u->reserve_slot) {
169 pa_hook_slot_free(u->reserve_slot);
170 u->reserve_slot = NULL;
171 }
172
173 if (u->reserve) {
174 pa_reserve_wrapper_unref(u->reserve);
175 u->reserve = NULL;
176 }
177 }
178
179 static void reserve_update(struct userdata *u) {
180 const char *description;
181 pa_assert(u);
182
183 if (!u->sink || !u->reserve)
184 return;
185
186 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
187 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
188 }
189
190 static int reserve_init(struct userdata *u, const char *dname) {
191 char *rname;
192
193 pa_assert(u);
194 pa_assert(dname);
195
196 if (u->reserve)
197 return 0;
198
199 if (pa_in_system_mode())
200 return 0;
201
202 if (!(rname = pa_alsa_get_reserve_name(dname)))
203 return 0;
204
205 /* We are resuming, try to lock the device */
206 u->reserve = pa_reserve_wrapper_get(u->core, rname);
207 pa_xfree(rname);
208
209 if (!(u->reserve))
210 return -1;
211
212 reserve_update(u);
213
214 pa_assert(!u->reserve_slot);
215 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
216
217 return 0;
218 }
219
220 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
221 pa_bool_t b;
222
223 pa_assert(w);
224 pa_assert(u);
225
226 b = PA_PTR_TO_UINT(busy) && !u->reserve;
227
228 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
229 return PA_HOOK_OK;
230 }
231
232 static void monitor_done(struct userdata *u) {
233 pa_assert(u);
234
235 if (u->monitor_slot) {
236 pa_hook_slot_free(u->monitor_slot);
237 u->monitor_slot = NULL;
238 }
239
240 if (u->monitor) {
241 pa_reserve_monitor_wrapper_unref(u->monitor);
242 u->monitor = NULL;
243 }
244 }
245
246 static int reserve_monitor_init(struct userdata *u, const char *dname) {
247 char *rname;
248
249 pa_assert(u);
250 pa_assert(dname);
251
252 if (pa_in_system_mode())
253 return 0;
254
255 if (!(rname = pa_alsa_get_reserve_name(dname)))
256 return 0;
257
258 /* We are resuming, try to lock the device */
259 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
260 pa_xfree(rname);
261
262 if (!(u->monitor))
263 return -1;
264
265 pa_assert(!u->monitor_slot);
266 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
267
268 return 0;
269 }
270
271 static void fix_min_sleep_wakeup(struct userdata *u) {
272 size_t max_use, max_use_2;
273
274 pa_assert(u);
275 pa_assert(u->use_tsched);
276
277 max_use = u->hwbuf_size - u->hwbuf_unused;
278 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
279
280 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
281 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
282
283 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
284 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
285 }
286
287 static void fix_tsched_watermark(struct userdata *u) {
288 size_t max_use;
289 pa_assert(u);
290 pa_assert(u->use_tsched);
291
292 max_use = u->hwbuf_size - u->hwbuf_unused;
293
294 if (u->tsched_watermark > max_use - u->min_sleep)
295 u->tsched_watermark = max_use - u->min_sleep;
296
297 if (u->tsched_watermark < u->min_wakeup)
298 u->tsched_watermark = u->min_wakeup;
299 }
300
301 static void increase_watermark(struct userdata *u) {
302 size_t old_watermark;
303 pa_usec_t old_min_latency, new_min_latency;
304
305 pa_assert(u);
306 pa_assert(u->use_tsched);
307
308 /* First, just try to increase the watermark */
309 old_watermark = u->tsched_watermark;
310 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
311 fix_tsched_watermark(u);
312
313 if (old_watermark != u->tsched_watermark) {
314 pa_log_info("Increasing wakeup watermark to %0.2f ms",
315 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
316 return;
317 }
318
319 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
320 old_min_latency = u->sink->thread_info.min_latency;
321 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
322 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
323
324 if (old_min_latency != new_min_latency) {
325 pa_log_info("Increasing minimal latency to %0.2f ms",
326 (double) new_min_latency / PA_USEC_PER_MSEC);
327
328 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
329 }
330
331 /* When we reach this we're officialy fucked! */
332 }
333
334 static void decrease_watermark(struct userdata *u) {
335 size_t old_watermark;
336 pa_usec_t now;
337
338 pa_assert(u);
339 pa_assert(u->use_tsched);
340
341 now = pa_rtclock_now();
342
343 if (u->watermark_dec_not_before <= 0)
344 goto restart;
345
346 if (u->watermark_dec_not_before > now)
347 return;
348
349 old_watermark = u->tsched_watermark;
350
351 if (u->tsched_watermark < u->watermark_dec_step)
352 u->tsched_watermark = u->tsched_watermark / 2;
353 else
354 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
355
356 fix_tsched_watermark(u);
357
358 if (old_watermark != u->tsched_watermark)
359 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
360 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
361
362 /* We don't change the latency range*/
363
364 restart:
365 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
366 }
367
368 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
369 pa_usec_t usec, wm;
370
371 pa_assert(sleep_usec);
372 pa_assert(process_usec);
373
374 pa_assert(u);
375 pa_assert(u->use_tsched);
376
377 usec = pa_sink_get_requested_latency_within_thread(u->sink);
378
379 if (usec == (pa_usec_t) -1)
380 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
381
382 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
383
384 if (wm > usec)
385 wm = usec/2;
386
387 *sleep_usec = usec - wm;
388 *process_usec = wm;
389
390 #ifdef DEBUG_TIMING
391 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
392 (unsigned long) (usec / PA_USEC_PER_MSEC),
393 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
394 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
395 #endif
396 }
397
398 static int try_recover(struct userdata *u, const char *call, int err) {
399 pa_assert(u);
400 pa_assert(call);
401 pa_assert(err < 0);
402
403 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
404
405 pa_assert(err != -EAGAIN);
406
407 if (err == -EPIPE)
408 pa_log_debug("%s: Buffer underrun!", call);
409
410 if (err == -ESTRPIPE)
411 pa_log_debug("%s: System suspended!", call);
412
413 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
414 pa_log("%s: %s", call, pa_alsa_strerror(err));
415 return -1;
416 }
417
418 u->first = TRUE;
419 u->since_start = 0;
420 return 0;
421 }
422
423 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
424 size_t left_to_play;
425 pa_bool_t underrun = FALSE;
426
427 /* We use <= instead of < for this check here because an underrun
428 * only happens after the last sample was processed, not already when
429 * it is removed from the buffer. This is particularly important
430 * when block transfer is used. */
431
432 if (n_bytes <= u->hwbuf_size)
433 left_to_play = u->hwbuf_size - n_bytes;
434 else {
435
436 /* We got a dropout. What a mess! */
437 left_to_play = 0;
438 underrun = TRUE;
439
440 #ifdef DEBUG_TIMING
441 PA_DEBUG_TRAP;
442 #endif
443
444 if (!u->first && !u->after_rewind)
445 if (pa_log_ratelimit(PA_LOG_INFO))
446 pa_log_info("Underrun!");
447 }
448
449 #ifdef DEBUG_TIMING
450 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
451 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
452 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
453 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
454 #endif
455
456 if (u->use_tsched) {
457 pa_bool_t reset_not_before = TRUE;
458
459 if (!u->first && !u->after_rewind) {
460 if (underrun || left_to_play < u->watermark_inc_threshold)
461 increase_watermark(u);
462 else if (left_to_play > u->watermark_dec_threshold) {
463 reset_not_before = FALSE;
464
465 /* We decrease the watermark only if have actually
466 * been woken up by a timeout. If something else woke
467 * us up it's too easy to fulfill the deadlines... */
468
469 if (on_timeout)
470 decrease_watermark(u);
471 }
472 }
473
474 if (reset_not_before)
475 u->watermark_dec_not_before = 0;
476 }
477
478 return left_to_play;
479 }
480
481 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
482 pa_bool_t work_done = FALSE;
483 pa_usec_t max_sleep_usec = 0, process_usec = 0;
484 size_t left_to_play;
485 unsigned j = 0;
486
487 pa_assert(u);
488 pa_sink_assert_ref(u->sink);
489
490 if (u->use_tsched)
491 hw_sleep_time(u, &max_sleep_usec, &process_usec);
492
493 for (;;) {
494 snd_pcm_sframes_t n;
495 size_t n_bytes;
496 int r;
497 pa_bool_t after_avail = TRUE;
498
499 /* First we determine how many samples are missing to fill the
500 * buffer up to 100% */
501
502 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
503
504 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
505 continue;
506
507 return r;
508 }
509
510 n_bytes = (size_t) n * u->frame_size;
511
512 #ifdef DEBUG_TIMING
513 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
514 #endif
515
516 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
517 on_timeout = FALSE;
518
519 if (u->use_tsched)
520
521 /* We won't fill up the playback buffer before at least
522 * half the sleep time is over because otherwise we might
523 * ask for more data from the clients then they expect. We
524 * need to guarantee that clients only have to keep around
525 * a single hw buffer length. */
526
527 if (!polled &&
528 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
529 #ifdef DEBUG_TIMING
530 pa_log_debug("Not filling up, because too early.");
531 #endif
532 break;
533 }
534
535 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
536
537 if (polled)
538 PA_ONCE_BEGIN {
539 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
540 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
541 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
542 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
543 pa_strnull(dn));
544 pa_xfree(dn);
545 } PA_ONCE_END;
546
547 #ifdef DEBUG_TIMING
548 pa_log_debug("Not filling up, because not necessary.");
549 #endif
550 break;
551 }
552
553
554 if (++j > 10) {
555 #ifdef DEBUG_TIMING
556 pa_log_debug("Not filling up, because already too many iterations.");
557 #endif
558
559 break;
560 }
561
562 n_bytes -= u->hwbuf_unused;
563 polled = FALSE;
564
565 #ifdef DEBUG_TIMING
566 pa_log_debug("Filling up");
567 #endif
568
569 for (;;) {
570 pa_memchunk chunk;
571 void *p;
572 int err;
573 const snd_pcm_channel_area_t *areas;
574 snd_pcm_uframes_t offset, frames;
575 snd_pcm_sframes_t sframes;
576
577 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
578 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
579
580 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
581
582 if (!after_avail && err == -EAGAIN)
583 break;
584
585 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
586 continue;
587
588 return r;
589 }
590
591 /* Make sure that if these memblocks need to be copied they will fit into one slot */
592 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
593 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
594
595 if (!after_avail && frames == 0)
596 break;
597
598 pa_assert(frames > 0);
599 after_avail = FALSE;
600
601 /* Check these are multiples of 8 bit */
602 pa_assert((areas[0].first & 7) == 0);
603 pa_assert((areas[0].step & 7)== 0);
604
605 /* We assume a single interleaved memory buffer */
606 pa_assert((areas[0].first >> 3) == 0);
607 pa_assert((areas[0].step >> 3) == u->frame_size);
608
609 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
610
611 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
612 chunk.length = pa_memblock_get_length(chunk.memblock);
613 chunk.index = 0;
614
615 pa_sink_render_into_full(u->sink, &chunk);
616 pa_memblock_unref_fixed(chunk.memblock);
617
618 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
619
620 if (!after_avail && (int) sframes == -EAGAIN)
621 break;
622
623 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
624 continue;
625
626 return r;
627 }
628
629 work_done = TRUE;
630
631 u->write_count += frames * u->frame_size;
632 u->since_start += frames * u->frame_size;
633
634 #ifdef DEBUG_TIMING
635 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
636 #endif
637
638 if ((size_t) frames * u->frame_size >= n_bytes)
639 break;
640
641 n_bytes -= (size_t) frames * u->frame_size;
642 }
643 }
644
645 if (u->use_tsched) {
646 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
647 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
648
649 if (*sleep_usec > process_usec)
650 *sleep_usec -= process_usec;
651 else
652 *sleep_usec = 0;
653 } else
654 *sleep_usec = 0;
655
656 return work_done ? 1 : 0;
657 }
658
659 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
660 pa_bool_t work_done = FALSE;
661 pa_usec_t max_sleep_usec = 0, process_usec = 0;
662 size_t left_to_play;
663 unsigned j = 0;
664
665 pa_assert(u);
666 pa_sink_assert_ref(u->sink);
667
668 if (u->use_tsched)
669 hw_sleep_time(u, &max_sleep_usec, &process_usec);
670
671 for (;;) {
672 snd_pcm_sframes_t n;
673 size_t n_bytes;
674 int r;
675 pa_bool_t after_avail = TRUE;
676
677 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
678
679 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
680 continue;
681
682 return r;
683 }
684
685 n_bytes = (size_t) n * u->frame_size;
686 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
687 on_timeout = FALSE;
688
689 if (u->use_tsched)
690
691 /* We won't fill up the playback buffer before at least
692 * half the sleep time is over because otherwise we might
693 * ask for more data from the clients then they expect. We
694 * need to guarantee that clients only have to keep around
695 * a single hw buffer length. */
696
697 if (!polled &&
698 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
699 break;
700
701 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
702
703 if (polled)
704 PA_ONCE_BEGIN {
705 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
706 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
707 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
708 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
709 pa_strnull(dn));
710 pa_xfree(dn);
711 } PA_ONCE_END;
712
713 break;
714 }
715
716 if (++j > 10) {
717 #ifdef DEBUG_TIMING
718 pa_log_debug("Not filling up, because already too many iterations.");
719 #endif
720
721 break;
722 }
723
724 n_bytes -= u->hwbuf_unused;
725 polled = FALSE;
726
727 for (;;) {
728 snd_pcm_sframes_t frames;
729 void *p;
730
731 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
732
733 if (u->memchunk.length <= 0)
734 pa_sink_render(u->sink, n_bytes, &u->memchunk);
735
736 pa_assert(u->memchunk.length > 0);
737
738 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
739
740 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
741 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
742
743 p = pa_memblock_acquire(u->memchunk.memblock);
744 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
745 pa_memblock_release(u->memchunk.memblock);
746
747 if (PA_UNLIKELY(frames < 0)) {
748
749 if (!after_avail && (int) frames == -EAGAIN)
750 break;
751
752 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
753 continue;
754
755 return r;
756 }
757
758 if (!after_avail && frames == 0)
759 break;
760
761 pa_assert(frames > 0);
762 after_avail = FALSE;
763
764 u->memchunk.index += (size_t) frames * u->frame_size;
765 u->memchunk.length -= (size_t) frames * u->frame_size;
766
767 if (u->memchunk.length <= 0) {
768 pa_memblock_unref(u->memchunk.memblock);
769 pa_memchunk_reset(&u->memchunk);
770 }
771
772 work_done = TRUE;
773
774 u->write_count += frames * u->frame_size;
775 u->since_start += frames * u->frame_size;
776
777 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
778
779 if ((size_t) frames * u->frame_size >= n_bytes)
780 break;
781
782 n_bytes -= (size_t) frames * u->frame_size;
783 }
784 }
785
786 if (u->use_tsched) {
787 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
788 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
789
790 if (*sleep_usec > process_usec)
791 *sleep_usec -= process_usec;
792 else
793 *sleep_usec = 0;
794 } else
795 *sleep_usec = 0;
796
797 return work_done ? 1 : 0;
798 }
799
800 static void update_smoother(struct userdata *u) {
801 snd_pcm_sframes_t delay = 0;
802 int64_t position;
803 int err;
804 pa_usec_t now1 = 0, now2;
805 snd_pcm_status_t *status;
806
807 snd_pcm_status_alloca(&status);
808
809 pa_assert(u);
810 pa_assert(u->pcm_handle);
811
812 /* Let's update the time smoother */
813
814 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
815 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
816 return;
817 }
818
819 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
820 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
821 else {
822 snd_htimestamp_t htstamp = { 0, 0 };
823 snd_pcm_status_get_htstamp(status, &htstamp);
824 now1 = pa_timespec_load(&htstamp);
825 }
826
827 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
828 if (now1 <= 0)
829 now1 = pa_rtclock_now();
830
831 /* check if the time since the last update is bigger than the interval */
832 if (u->last_smoother_update > 0)
833 if (u->last_smoother_update + u->smoother_interval > now1)
834 return;
835
836 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
837
838 if (PA_UNLIKELY(position < 0))
839 position = 0;
840
841 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
842
843 pa_smoother_put(u->smoother, now1, now2);
844
845 u->last_smoother_update = now1;
846 /* exponentially increase the update interval up to the MAX limit */
847 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
848 }
849
850 static pa_usec_t sink_get_latency(struct userdata *u) {
851 pa_usec_t r;
852 int64_t delay;
853 pa_usec_t now1, now2;
854
855 pa_assert(u);
856
857 now1 = pa_rtclock_now();
858 now2 = pa_smoother_get(u->smoother, now1);
859
860 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
861
862 r = delay >= 0 ? (pa_usec_t) delay : 0;
863
864 if (u->memchunk.memblock)
865 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
866
867 return r;
868 }
869
870 static int build_pollfd(struct userdata *u) {
871 pa_assert(u);
872 pa_assert(u->pcm_handle);
873
874 if (u->alsa_rtpoll_item)
875 pa_rtpoll_item_free(u->alsa_rtpoll_item);
876
877 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
878 return -1;
879
880 return 0;
881 }
882
883 /* Called from IO context */
884 static int suspend(struct userdata *u) {
885 pa_assert(u);
886 pa_assert(u->pcm_handle);
887
888 pa_smoother_pause(u->smoother, pa_rtclock_now());
889
890 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
891 * take awfully long with our long buffer sizes today. */
892 snd_pcm_close(u->pcm_handle);
893 u->pcm_handle = NULL;
894
895 if (u->alsa_rtpoll_item) {
896 pa_rtpoll_item_free(u->alsa_rtpoll_item);
897 u->alsa_rtpoll_item = NULL;
898 }
899
900 /* We reset max_rewind/max_request here to make sure that while we
901 * are suspended the old max_request/max_rewind values set before
902 * the suspend can influence the per-stream buffer of newly
903 * created streams, without their requirements having any
904 * influence on them. */
905 pa_sink_set_max_rewind_within_thread(u->sink, 0);
906 pa_sink_set_max_request_within_thread(u->sink, 0);
907
908 pa_log_info("Device suspended...");
909
910 return 0;
911 }
912
913 /* Called from IO context */
914 static int update_sw_params(struct userdata *u) {
915 snd_pcm_uframes_t avail_min;
916 int err;
917
918 pa_assert(u);
919
920 /* Use the full buffer if noone asked us for anything specific */
921 u->hwbuf_unused = 0;
922
923 if (u->use_tsched) {
924 pa_usec_t latency;
925
926 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
927 size_t b;
928
929 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
930
931 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
932
933 /* We need at least one sample in our buffer */
934
935 if (PA_UNLIKELY(b < u->frame_size))
936 b = u->frame_size;
937
938 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
939 }
940
941 fix_min_sleep_wakeup(u);
942 fix_tsched_watermark(u);
943 }
944
945 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
946
947 /* We need at last one frame in the used part of the buffer */
948 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
949
950 if (u->use_tsched) {
951 pa_usec_t sleep_usec, process_usec;
952
953 hw_sleep_time(u, &sleep_usec, &process_usec);
954 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
955 }
956
957 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
958
959 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
960 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
961 return err;
962 }
963
964 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
965 if (pa_alsa_pcm_is_hw(u->pcm_handle))
966 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
967 else {
968 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
969 pa_sink_set_max_rewind_within_thread(u->sink, 0);
970 }
971
972 return 0;
973 }
974
975 /* Called from IO context */
976 static int unsuspend(struct userdata *u) {
977 pa_sample_spec ss;
978 int err;
979 pa_bool_t b, d;
980 snd_pcm_uframes_t period_size, buffer_size;
981
982 pa_assert(u);
983 pa_assert(!u->pcm_handle);
984
985 pa_log_info("Trying resume...");
986
987 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
988 SND_PCM_NONBLOCK|
989 SND_PCM_NO_AUTO_RESAMPLE|
990 SND_PCM_NO_AUTO_CHANNELS|
991 SND_PCM_NO_AUTO_FORMAT)) < 0) {
992 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
993 goto fail;
994 }
995
996 ss = u->sink->sample_spec;
997 period_size = u->fragment_size / u->frame_size;
998 buffer_size = u->hwbuf_size / u->frame_size;
999 b = u->use_mmap;
1000 d = u->use_tsched;
1001
1002 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1003 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1004 goto fail;
1005 }
1006
1007 if (b != u->use_mmap || d != u->use_tsched) {
1008 pa_log_warn("Resume failed, couldn't get original access mode.");
1009 goto fail;
1010 }
1011
1012 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1013 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1014 goto fail;
1015 }
1016
1017 if (period_size*u->frame_size != u->fragment_size ||
1018 buffer_size*u->frame_size != u->hwbuf_size) {
1019 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1020 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1021 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1022 goto fail;
1023 }
1024
1025 if (update_sw_params(u) < 0)
1026 goto fail;
1027
1028 if (build_pollfd(u) < 0)
1029 goto fail;
1030
1031 u->write_count = 0;
1032 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1033 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1034 u->last_smoother_update = 0;
1035
1036 u->first = TRUE;
1037 u->since_start = 0;
1038
1039 pa_log_info("Resumed successfully...");
1040
1041 return 0;
1042
1043 fail:
1044 if (u->pcm_handle) {
1045 snd_pcm_close(u->pcm_handle);
1046 u->pcm_handle = NULL;
1047 }
1048
1049 return -PA_ERR_IO;
1050 }
1051
1052 /* Called from IO context */
1053 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1054 struct userdata *u = PA_SINK(o)->userdata;
1055
1056 switch (code) {
1057
1058 case PA_SINK_MESSAGE_FINISH_MOVE:
1059 case PA_SINK_MESSAGE_ADD_INPUT: {
1060 pa_sink_input *i = PA_SINK_INPUT(data);
1061 int r = 0;
1062
1063 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1064 break;
1065
1066 u->old_rate = u->sink->sample_spec.rate;
1067
1068 /* Passthrough format, see if we need to reset sink sample rate */
1069 if (u->sink->sample_spec.rate == i->thread_info.sample_spec.rate)
1070 break;
1071
1072 /* .. we do */
1073 if ((r = suspend(u)) < 0)
1074 return r;
1075
1076 u->sink->sample_spec.rate = i->thread_info.sample_spec.rate;
1077
1078 if ((r = unsuspend(u)) < 0)
1079 return r;
1080
1081 break;
1082 }
1083
1084 case PA_SINK_MESSAGE_START_MOVE:
1085 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1086 pa_sink_input *i = PA_SINK_INPUT(data);
1087 int r = 0;
1088
1089 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1090 break;
1091
1092 /* Passthrough format, see if we need to reset sink sample rate */
1093 if (u->sink->sample_spec.rate == u->old_rate)
1094 break;
1095
1096 /* .. we do */
1097 if ((r = suspend(u)) < 0)
1098 return r;
1099
1100 u->sink->sample_spec.rate = u->old_rate;
1101
1102 if ((r = unsuspend(u)) < 0)
1103 return r;
1104
1105 break;
1106 }
1107
1108 case PA_SINK_MESSAGE_GET_LATENCY: {
1109 pa_usec_t r = 0;
1110
1111 if (u->pcm_handle)
1112 r = sink_get_latency(u);
1113
1114 *((pa_usec_t*) data) = r;
1115
1116 return 0;
1117 }
1118
1119 case PA_SINK_MESSAGE_SET_STATE:
1120
1121 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1122
1123 case PA_SINK_SUSPENDED: {
1124 int r;
1125
1126 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1127
1128 if ((r = suspend(u)) < 0)
1129 return r;
1130
1131 break;
1132 }
1133
1134 case PA_SINK_IDLE:
1135 case PA_SINK_RUNNING: {
1136 int r;
1137
1138 if (u->sink->thread_info.state == PA_SINK_INIT) {
1139 if (build_pollfd(u) < 0)
1140 return -PA_ERR_IO;
1141 }
1142
1143 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1144 if ((r = unsuspend(u)) < 0)
1145 return r;
1146 }
1147
1148 break;
1149 }
1150
1151 case PA_SINK_UNLINKED:
1152 case PA_SINK_INIT:
1153 case PA_SINK_INVALID_STATE:
1154 ;
1155 }
1156
1157 break;
1158 }
1159
1160 return pa_sink_process_msg(o, code, data, offset, chunk);
1161 }
1162
1163 /* Called from main context */
1164 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1165 pa_sink_state_t old_state;
1166 struct userdata *u;
1167
1168 pa_sink_assert_ref(s);
1169 pa_assert_se(u = s->userdata);
1170
1171 old_state = pa_sink_get_state(u->sink);
1172
1173 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1174 reserve_done(u);
1175 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1176 if (reserve_init(u, u->device_name) < 0)
1177 return -PA_ERR_BUSY;
1178
1179 return 0;
1180 }
1181
1182 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1183 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1184
1185 pa_assert(u);
1186 pa_assert(u->mixer_handle);
1187
1188 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1189 return 0;
1190
1191 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1192 return 0;
1193
1194 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1195 pa_sink_get_volume(u->sink, TRUE);
1196 pa_sink_get_mute(u->sink, TRUE);
1197 }
1198
1199 return 0;
1200 }
1201
1202 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1203 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1204
1205 pa_assert(u);
1206 pa_assert(u->mixer_handle);
1207
1208 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1209 return 0;
1210
1211 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1212 return 0;
1213
1214 if (mask & SND_CTL_EVENT_MASK_VALUE)
1215 pa_sink_update_volume_and_mute(u->sink);
1216
1217 return 0;
1218 }
1219
1220 static void sink_get_volume_cb(pa_sink *s) {
1221 struct userdata *u = s->userdata;
1222 pa_cvolume r;
1223 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1224
1225 pa_assert(u);
1226 pa_assert(u->mixer_path);
1227 pa_assert(u->mixer_handle);
1228
1229 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1230 return;
1231
1232 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1233 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1234
1235 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1236
1237 if (u->mixer_path->has_dB) {
1238 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1239
1240 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1241 }
1242
1243 if (pa_cvolume_equal(&u->hardware_volume, &r))
1244 return;
1245
1246 s->real_volume = u->hardware_volume = r;
1247
1248 /* Hmm, so the hardware volume changed, let's reset our software volume */
1249 if (u->mixer_path->has_dB)
1250 pa_sink_set_soft_volume(s, NULL);
1251 }
1252
1253 static void sink_set_volume_cb(pa_sink *s) {
1254 struct userdata *u = s->userdata;
1255 pa_cvolume r;
1256 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1257 pa_bool_t sync_volume = !!(s->flags & PA_SINK_SYNC_VOLUME);
1258
1259 pa_assert(u);
1260 pa_assert(u->mixer_path);
1261 pa_assert(u->mixer_handle);
1262
1263 /* Shift up by the base volume */
1264 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1265
1266 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, sync_volume, !sync_volume) < 0)
1267 return;
1268
1269 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1270 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1271
1272 u->hardware_volume = r;
1273
1274 if (u->mixer_path->has_dB) {
1275 pa_cvolume new_soft_volume;
1276 pa_bool_t accurate_enough;
1277 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1278
1279 /* Match exactly what the user requested by software */
1280 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1281
1282 /* If the adjustment to do in software is only minimal we
1283 * can skip it. That saves us CPU at the expense of a bit of
1284 * accuracy */
1285 accurate_enough =
1286 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1287 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1288
1289 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1290 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1291 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1292 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1293 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1294 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1295 pa_yes_no(accurate_enough));
1296 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1297
1298 if (!accurate_enough)
1299 s->soft_volume = new_soft_volume;
1300
1301 } else {
1302 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1303
1304 /* We can't match exactly what the user requested, hence let's
1305 * at least tell the user about it */
1306
1307 s->real_volume = r;
1308 }
1309 }
1310
1311 static void sink_write_volume_cb(pa_sink *s) {
1312 struct userdata *u = s->userdata;
1313 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1314
1315 pa_assert(u);
1316 pa_assert(u->mixer_path);
1317 pa_assert(u->mixer_handle);
1318 pa_assert(s->flags & PA_SINK_SYNC_VOLUME);
1319
1320 /* Shift up by the base volume */
1321 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1322
1323 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1324 pa_log_error("Writing HW volume failed");
1325 else {
1326 pa_cvolume tmp_vol;
1327 pa_bool_t accurate_enough;
1328
1329 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1330 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1331
1332 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1333 accurate_enough =
1334 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1335 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1336
1337 if (!accurate_enough) {
1338 union {
1339 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1340 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1341 } vol;
1342
1343 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1344 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1345 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1346 pa_log_debug(" in dB: %s (request) != %s",
1347 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1348 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1349 }
1350 }
1351 }
1352
1353 static void sink_get_mute_cb(pa_sink *s) {
1354 struct userdata *u = s->userdata;
1355 pa_bool_t b;
1356
1357 pa_assert(u);
1358 pa_assert(u->mixer_path);
1359 pa_assert(u->mixer_handle);
1360
1361 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1362 return;
1363
1364 s->muted = b;
1365 }
1366
1367 static void sink_set_mute_cb(pa_sink *s) {
1368 struct userdata *u = s->userdata;
1369
1370 pa_assert(u);
1371 pa_assert(u->mixer_path);
1372 pa_assert(u->mixer_handle);
1373
1374 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1375 }
1376
1377 static void mixer_volume_init(struct userdata *u) {
1378 pa_assert(u);
1379
1380 if (!u->mixer_path->has_volume) {
1381 pa_sink_set_write_volume_callback(u->sink, NULL);
1382 pa_sink_set_get_volume_callback(u->sink, NULL);
1383 pa_sink_set_set_volume_callback(u->sink, NULL);
1384
1385 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1386 } else {
1387 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1388 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1389
1390 if (u->mixer_path->has_dB && u->sync_volume) {
1391 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1392 pa_log_info("Successfully enabled synchronous volume.");
1393 } else
1394 pa_sink_set_write_volume_callback(u->sink, NULL);
1395
1396 if (u->mixer_path->has_dB) {
1397 pa_sink_enable_decibel_volume(u->sink, TRUE);
1398 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1399
1400 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1401 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1402
1403 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1404 } else {
1405 pa_sink_enable_decibel_volume(u->sink, FALSE);
1406 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1407
1408 u->sink->base_volume = PA_VOLUME_NORM;
1409 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1410 }
1411
1412 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1413 }
1414
1415 if (!u->mixer_path->has_mute) {
1416 pa_sink_set_get_mute_callback(u->sink, NULL);
1417 pa_sink_set_set_mute_callback(u->sink, NULL);
1418 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1419 } else {
1420 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1421 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1422 pa_log_info("Using hardware mute control.");
1423 }
1424 }
1425
1426 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1427 struct userdata *u = s->userdata;
1428 pa_alsa_port_data *data;
1429
1430 pa_assert(u);
1431 pa_assert(p);
1432 pa_assert(u->mixer_handle);
1433
1434 data = PA_DEVICE_PORT_DATA(p);
1435
1436 pa_assert_se(u->mixer_path = data->path);
1437 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1438
1439 mixer_volume_init(u);
1440
1441 if (data->setting)
1442 pa_alsa_setting_select(data->setting, u->mixer_handle);
1443
1444 if (s->set_mute)
1445 s->set_mute(s);
1446 if (s->set_volume)
1447 s->set_volume(s);
1448
1449 return 0;
1450 }
1451
1452 static void sink_update_requested_latency_cb(pa_sink *s) {
1453 struct userdata *u = s->userdata;
1454 size_t before;
1455 pa_assert(u);
1456 pa_assert(u->use_tsched); /* only when timer scheduling is used
1457 * we can dynamically adjust the
1458 * latency */
1459
1460 if (!u->pcm_handle)
1461 return;
1462
1463 before = u->hwbuf_unused;
1464 update_sw_params(u);
1465
1466 /* Let's check whether we now use only a smaller part of the
1467 buffer then before. If so, we need to make sure that subsequent
1468 rewinds are relative to the new maximum fill level and not to the
1469 current fill level. Thus, let's do a full rewind once, to clear
1470 things up. */
1471
1472 if (u->hwbuf_unused > before) {
1473 pa_log_debug("Requesting rewind due to latency change.");
1474 pa_sink_request_rewind(s, (size_t) -1);
1475 }
1476 }
1477
1478 static int process_rewind(struct userdata *u) {
1479 snd_pcm_sframes_t unused;
1480 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1481 pa_assert(u);
1482
1483 /* Figure out how much we shall rewind and reset the counter */
1484 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1485
1486 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1487
1488 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1489 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1490 return -1;
1491 }
1492
1493 unused_nbytes = (size_t) unused * u->frame_size;
1494
1495 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1496 unused_nbytes += u->rewind_safeguard;
1497
1498 if (u->hwbuf_size > unused_nbytes)
1499 limit_nbytes = u->hwbuf_size - unused_nbytes;
1500 else
1501 limit_nbytes = 0;
1502
1503 if (rewind_nbytes > limit_nbytes)
1504 rewind_nbytes = limit_nbytes;
1505
1506 if (rewind_nbytes > 0) {
1507 snd_pcm_sframes_t in_frames, out_frames;
1508
1509 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1510
1511 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1512 pa_log_debug("before: %lu", (unsigned long) in_frames);
1513 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1514 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1515 if (try_recover(u, "process_rewind", out_frames) < 0)
1516 return -1;
1517 out_frames = 0;
1518 }
1519
1520 pa_log_debug("after: %lu", (unsigned long) out_frames);
1521
1522 rewind_nbytes = (size_t) out_frames * u->frame_size;
1523
1524 if (rewind_nbytes <= 0)
1525 pa_log_info("Tried rewind, but was apparently not possible.");
1526 else {
1527 u->write_count -= rewind_nbytes;
1528 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1529 pa_sink_process_rewind(u->sink, rewind_nbytes);
1530
1531 u->after_rewind = TRUE;
1532 return 0;
1533 }
1534 } else
1535 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1536
1537 pa_sink_process_rewind(u->sink, 0);
1538 return 0;
1539 }
1540
1541 static void thread_func(void *userdata) {
1542 struct userdata *u = userdata;
1543 unsigned short revents = 0;
1544
1545 pa_assert(u);
1546
1547 pa_log_debug("Thread starting up");
1548
1549 if (u->core->realtime_scheduling)
1550 pa_make_realtime(u->core->realtime_priority);
1551
1552 pa_thread_mq_install(&u->thread_mq);
1553
1554 for (;;) {
1555 int ret;
1556 pa_usec_t rtpoll_sleep = 0;
1557
1558 #ifdef DEBUG_TIMING
1559 pa_log_debug("Loop");
1560 #endif
1561
1562 /* Render some data and write it to the dsp */
1563 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1564 int work_done;
1565 pa_usec_t sleep_usec = 0;
1566 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1567
1568 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1569 if (process_rewind(u) < 0)
1570 goto fail;
1571
1572 if (u->use_mmap)
1573 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1574 else
1575 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1576
1577 if (work_done < 0)
1578 goto fail;
1579
1580 /* pa_log_debug("work_done = %i", work_done); */
1581
1582 if (work_done) {
1583
1584 if (u->first) {
1585 pa_log_info("Starting playback.");
1586 snd_pcm_start(u->pcm_handle);
1587
1588 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1589
1590 u->first = FALSE;
1591 }
1592
1593 update_smoother(u);
1594 }
1595
1596 if (u->use_tsched) {
1597 pa_usec_t cusec;
1598
1599 if (u->since_start <= u->hwbuf_size) {
1600
1601 /* USB devices on ALSA seem to hit a buffer
1602 * underrun during the first iterations much
1603 * quicker then we calculate here, probably due to
1604 * the transport latency. To accommodate for that
1605 * we artificially decrease the sleep time until
1606 * we have filled the buffer at least once
1607 * completely.*/
1608
1609 if (pa_log_ratelimit(PA_LOG_DEBUG))
1610 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1611 sleep_usec /= 2;
1612 }
1613
1614 /* OK, the playback buffer is now full, let's
1615 * calculate when to wake up next */
1616 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1617
1618 /* Convert from the sound card time domain to the
1619 * system time domain */
1620 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1621
1622 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1623
1624 /* We don't trust the conversion, so we wake up whatever comes first */
1625 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1626 }
1627
1628 u->after_rewind = FALSE;
1629
1630 }
1631
1632 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1633 pa_usec_t volume_sleep;
1634 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1635 if (volume_sleep > 0)
1636 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1637 }
1638
1639 if (rtpoll_sleep > 0)
1640 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1641 else
1642 pa_rtpoll_set_timer_disabled(u->rtpoll);
1643
1644 /* Hmm, nothing to do. Let's sleep */
1645 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1646 goto fail;
1647
1648 if (u->sink->flags & PA_SINK_SYNC_VOLUME)
1649 pa_sink_volume_change_apply(u->sink, NULL);
1650
1651 if (ret == 0)
1652 goto finish;
1653
1654 /* Tell ALSA about this and process its response */
1655 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1656 struct pollfd *pollfd;
1657 int err;
1658 unsigned n;
1659
1660 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1661
1662 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1663 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1664 goto fail;
1665 }
1666
1667 if (revents & ~POLLOUT) {
1668 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1669 goto fail;
1670
1671 u->first = TRUE;
1672 u->since_start = 0;
1673 revents = 0;
1674 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1675 pa_log_debug("Wakeup from ALSA!");
1676
1677 } else
1678 revents = 0;
1679 }
1680
1681 fail:
1682 /* If this was no regular exit from the loop we have to continue
1683 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1684 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1685 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1686
1687 finish:
1688 pa_log_debug("Thread shutting down");
1689 }
1690
1691 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1692 const char *n;
1693 char *t;
1694
1695 pa_assert(data);
1696 pa_assert(ma);
1697 pa_assert(device_name);
1698
1699 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1700 pa_sink_new_data_set_name(data, n);
1701 data->namereg_fail = TRUE;
1702 return;
1703 }
1704
1705 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1706 data->namereg_fail = TRUE;
1707 else {
1708 n = device_id ? device_id : device_name;
1709 data->namereg_fail = FALSE;
1710 }
1711
1712 if (mapping)
1713 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1714 else
1715 t = pa_sprintf_malloc("alsa_output.%s", n);
1716
1717 pa_sink_new_data_set_name(data, t);
1718 pa_xfree(t);
1719 }
1720
1721 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1722
1723 if (!mapping && !element)
1724 return;
1725
1726 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1727 pa_log_info("Failed to find a working mixer device.");
1728 return;
1729 }
1730
1731 if (element) {
1732
1733 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1734 goto fail;
1735
1736 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1737 goto fail;
1738
1739 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1740 pa_alsa_path_dump(u->mixer_path);
1741 } else {
1742
1743 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1744 goto fail;
1745
1746 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1747 }
1748
1749 return;
1750
1751 fail:
1752
1753 if (u->mixer_path_set) {
1754 pa_alsa_path_set_free(u->mixer_path_set);
1755 u->mixer_path_set = NULL;
1756 } else if (u->mixer_path) {
1757 pa_alsa_path_free(u->mixer_path);
1758 u->mixer_path = NULL;
1759 }
1760
1761 if (u->mixer_handle) {
1762 snd_mixer_close(u->mixer_handle);
1763 u->mixer_handle = NULL;
1764 }
1765 }
1766
1767
1768 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1769 pa_bool_t need_mixer_callback = FALSE;
1770
1771 pa_assert(u);
1772
1773 if (!u->mixer_handle)
1774 return 0;
1775
1776 if (u->sink->active_port) {
1777 pa_alsa_port_data *data;
1778
1779 /* We have a list of supported paths, so let's activate the
1780 * one that has been chosen as active */
1781
1782 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1783 u->mixer_path = data->path;
1784
1785 pa_alsa_path_select(data->path, u->mixer_handle);
1786
1787 if (data->setting)
1788 pa_alsa_setting_select(data->setting, u->mixer_handle);
1789
1790 } else {
1791
1792 if (!u->mixer_path && u->mixer_path_set)
1793 u->mixer_path = u->mixer_path_set->paths;
1794
1795 if (u->mixer_path) {
1796 /* Hmm, we have only a single path, then let's activate it */
1797
1798 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1799
1800 if (u->mixer_path->settings)
1801 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1802 } else
1803 return 0;
1804 }
1805
1806 mixer_volume_init(u);
1807
1808 /* Will we need to register callbacks? */
1809 if (u->mixer_path_set && u->mixer_path_set->paths) {
1810 pa_alsa_path *p;
1811
1812 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1813 if (p->has_volume || p->has_mute)
1814 need_mixer_callback = TRUE;
1815 }
1816 }
1817 else if (u->mixer_path)
1818 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1819
1820 if (need_mixer_callback) {
1821 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1822 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1823 u->mixer_pd = pa_alsa_mixer_pdata_new();
1824 mixer_callback = io_mixer_callback;
1825
1826 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1827 pa_log("Failed to initialize file descriptor monitoring");
1828 return -1;
1829 }
1830 } else {
1831 u->mixer_fdl = pa_alsa_fdlist_new();
1832 mixer_callback = ctl_mixer_callback;
1833
1834 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1835 pa_log("Failed to initialize file descriptor monitoring");
1836 return -1;
1837 }
1838 }
1839
1840 if (u->mixer_path_set)
1841 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1842 else
1843 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1844 }
1845
1846 return 0;
1847 }
1848
1849 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1850
1851 struct userdata *u = NULL;
1852 const char *dev_id = NULL;
1853 pa_sample_spec ss, requested_ss;
1854 pa_channel_map map;
1855 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1856 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1857 size_t frame_size;
1858 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE;
1859 pa_sink_new_data data;
1860 pa_alsa_profile_set *profile_set = NULL;
1861
1862 pa_assert(m);
1863 pa_assert(ma);
1864
1865 ss = m->core->default_sample_spec;
1866 map = m->core->default_channel_map;
1867 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1868 pa_log("Failed to parse sample specification and channel map");
1869 goto fail;
1870 }
1871
1872 requested_ss = ss;
1873 frame_size = pa_frame_size(&ss);
1874
1875 nfrags = m->core->default_n_fragments;
1876 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1877 if (frag_size <= 0)
1878 frag_size = (uint32_t) frame_size;
1879 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1880 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1881
1882 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1883 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1884 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1885 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1886 pa_log("Failed to parse buffer metrics");
1887 goto fail;
1888 }
1889
1890 buffer_size = nfrags * frag_size;
1891
1892 period_frames = frag_size/frame_size;
1893 buffer_frames = buffer_size/frame_size;
1894 tsched_frames = tsched_size/frame_size;
1895
1896 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1897 pa_log("Failed to parse mmap argument.");
1898 goto fail;
1899 }
1900
1901 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1902 pa_log("Failed to parse tsched argument.");
1903 goto fail;
1904 }
1905
1906 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1907 pa_log("Failed to parse ignore_dB argument.");
1908 goto fail;
1909 }
1910
1911 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
1912 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1913 pa_log("Failed to parse rewind_safeguard argument");
1914 goto fail;
1915 }
1916
1917 sync_volume = m->core->sync_volume;
1918 if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1919 pa_log("Failed to parse sync_volume argument.");
1920 goto fail;
1921 }
1922
1923 use_tsched = pa_alsa_may_tsched(use_tsched);
1924
1925 u = pa_xnew0(struct userdata, 1);
1926 u->core = m->core;
1927 u->module = m;
1928 u->use_mmap = use_mmap;
1929 u->use_tsched = use_tsched;
1930 u->sync_volume = sync_volume;
1931 u->first = TRUE;
1932 u->rewind_safeguard = rewind_safeguard;
1933 u->rtpoll = pa_rtpoll_new();
1934 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1935
1936 u->smoother = pa_smoother_new(
1937 SMOOTHER_ADJUST_USEC,
1938 SMOOTHER_WINDOW_USEC,
1939 TRUE,
1940 TRUE,
1941 5,
1942 pa_rtclock_now(),
1943 TRUE);
1944 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1945
1946 dev_id = pa_modargs_get_value(
1947 ma, "device_id",
1948 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1949
1950 if (reserve_init(u, dev_id) < 0)
1951 goto fail;
1952
1953 if (reserve_monitor_init(u, dev_id) < 0)
1954 goto fail;
1955
1956 b = use_mmap;
1957 d = use_tsched;
1958
1959 if (mapping) {
1960
1961 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1962 pa_log("device_id= not set");
1963 goto fail;
1964 }
1965
1966 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1967 dev_id,
1968 &u->device_name,
1969 &ss, &map,
1970 SND_PCM_STREAM_PLAYBACK,
1971 &period_frames, &buffer_frames, tsched_frames,
1972 &b, &d, mapping)))
1973 goto fail;
1974
1975 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1976
1977 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1978 goto fail;
1979
1980 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1981 dev_id,
1982 &u->device_name,
1983 &ss, &map,
1984 SND_PCM_STREAM_PLAYBACK,
1985 &period_frames, &buffer_frames, tsched_frames,
1986 &b, &d, profile_set, &mapping)))
1987 goto fail;
1988
1989 } else {
1990
1991 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1992 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1993 &u->device_name,
1994 &ss, &map,
1995 SND_PCM_STREAM_PLAYBACK,
1996 &period_frames, &buffer_frames, tsched_frames,
1997 &b, &d, FALSE)))
1998 goto fail;
1999 }
2000
2001 pa_assert(u->device_name);
2002 pa_log_info("Successfully opened device %s.", u->device_name);
2003
2004 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2005 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2006 goto fail;
2007 }
2008
2009 if (mapping)
2010 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2011
2012 if (use_mmap && !b) {
2013 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2014 u->use_mmap = use_mmap = FALSE;
2015 }
2016
2017 if (use_tsched && (!b || !d)) {
2018 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2019 u->use_tsched = use_tsched = FALSE;
2020 }
2021
2022 if (u->use_mmap)
2023 pa_log_info("Successfully enabled mmap() mode.");
2024
2025 if (u->use_tsched)
2026 pa_log_info("Successfully enabled timer-based scheduling mode.");
2027
2028 /* ALSA might tweak the sample spec, so recalculate the frame size */
2029 frame_size = pa_frame_size(&ss);
2030
2031 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2032
2033 pa_sink_new_data_init(&data);
2034 data.driver = driver;
2035 data.module = m;
2036 data.card = card;
2037 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2038
2039 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2040 * variable instead of using &data.namereg_fail directly, because
2041 * data.namereg_fail is a bitfield and taking the address of a bitfield
2042 * variable is impossible. */
2043 namereg_fail = data.namereg_fail;
2044 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2045 pa_log("Failed to parse boolean argument namereg_fail.");
2046 pa_sink_new_data_done(&data);
2047 goto fail;
2048 }
2049 data.namereg_fail = namereg_fail;
2050
2051 pa_sink_new_data_set_sample_spec(&data, &ss);
2052 pa_sink_new_data_set_channel_map(&data, &map);
2053
2054 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2055 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2056 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2057 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2058 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2059
2060 if (mapping) {
2061 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2062 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2063 }
2064
2065 pa_alsa_init_description(data.proplist);
2066
2067 if (u->control_device)
2068 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2069
2070 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2071 pa_log("Invalid properties");
2072 pa_sink_new_data_done(&data);
2073 goto fail;
2074 }
2075
2076 if (u->mixer_path_set)
2077 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2078
2079 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
2080 pa_sink_new_data_done(&data);
2081
2082 if (!u->sink) {
2083 pa_log("Failed to create sink object");
2084 goto fail;
2085 }
2086
2087 if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
2088 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2089 pa_log("Failed to parse sync_volume_safety_margin parameter");
2090 goto fail;
2091 }
2092
2093 if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
2094 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2095 pa_log("Failed to parse sync_volume_extra_delay parameter");
2096 goto fail;
2097 }
2098
2099 u->sink->parent.process_msg = sink_process_msg;
2100 if (u->use_tsched)
2101 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2102 u->sink->set_state = sink_set_state_cb;
2103 u->sink->set_port = sink_set_port_cb;
2104 u->sink->userdata = u;
2105
2106 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2107 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2108
2109 u->frame_size = frame_size;
2110 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2111 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2112 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2113
2114 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2115 (double) u->hwbuf_size / (double) u->fragment_size,
2116 (long unsigned) u->fragment_size,
2117 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2118 (long unsigned) u->hwbuf_size,
2119 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2120
2121 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2122 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2123 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2124 else {
2125 pa_log_info("Disabling rewind for device %s", u->device_name);
2126 pa_sink_set_max_rewind(u->sink, 0);
2127 }
2128
2129 if (u->use_tsched) {
2130 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
2131
2132 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
2133 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
2134
2135 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
2136 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
2137
2138 fix_min_sleep_wakeup(u);
2139 fix_tsched_watermark(u);
2140
2141 pa_sink_set_latency_range(u->sink,
2142 0,
2143 pa_bytes_to_usec(u->hwbuf_size, &ss));
2144
2145 pa_log_info("Time scheduling watermark is %0.2fms",
2146 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
2147 } else
2148 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2149
2150 reserve_update(u);
2151
2152 if (update_sw_params(u) < 0)
2153 goto fail;
2154
2155 if (setup_mixer(u, ignore_dB) < 0)
2156 goto fail;
2157
2158 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2159
2160 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2161 pa_log("Failed to create thread.");
2162 goto fail;
2163 }
2164
2165 /* Get initial mixer settings */
2166 if (data.volume_is_set) {
2167 if (u->sink->set_volume)
2168 u->sink->set_volume(u->sink);
2169 } else {
2170 if (u->sink->get_volume)
2171 u->sink->get_volume(u->sink);
2172 }
2173
2174 if (data.muted_is_set) {
2175 if (u->sink->set_mute)
2176 u->sink->set_mute(u->sink);
2177 } else {
2178 if (u->sink->get_mute)
2179 u->sink->get_mute(u->sink);
2180 }
2181
2182 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2183 u->sink->write_volume(u->sink);
2184
2185 pa_sink_put(u->sink);
2186
2187 if (profile_set)
2188 pa_alsa_profile_set_free(profile_set);
2189
2190 return u->sink;
2191
2192 fail:
2193
2194 if (u)
2195 userdata_free(u);
2196
2197 if (profile_set)
2198 pa_alsa_profile_set_free(profile_set);
2199
2200 return NULL;
2201 }
2202
2203 static void userdata_free(struct userdata *u) {
2204 pa_assert(u);
2205
2206 if (u->sink)
2207 pa_sink_unlink(u->sink);
2208
2209 if (u->thread) {
2210 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2211 pa_thread_free(u->thread);
2212 }
2213
2214 pa_thread_mq_done(&u->thread_mq);
2215
2216 if (u->sink)
2217 pa_sink_unref(u->sink);
2218
2219 if (u->memchunk.memblock)
2220 pa_memblock_unref(u->memchunk.memblock);
2221
2222 if (u->mixer_pd)
2223 pa_alsa_mixer_pdata_free(u->mixer_pd);
2224
2225 if (u->alsa_rtpoll_item)
2226 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2227
2228 if (u->rtpoll)
2229 pa_rtpoll_free(u->rtpoll);
2230
2231 if (u->pcm_handle) {
2232 snd_pcm_drop(u->pcm_handle);
2233 snd_pcm_close(u->pcm_handle);
2234 }
2235
2236 if (u->mixer_fdl)
2237 pa_alsa_fdlist_free(u->mixer_fdl);
2238
2239 if (u->mixer_path_set)
2240 pa_alsa_path_set_free(u->mixer_path_set);
2241 else if (u->mixer_path)
2242 pa_alsa_path_free(u->mixer_path);
2243
2244 if (u->mixer_handle)
2245 snd_mixer_close(u->mixer_handle);
2246
2247 if (u->smoother)
2248 pa_smoother_free(u->smoother);
2249
2250 reserve_done(u);
2251 monitor_done(u);
2252
2253 pa_xfree(u->device_name);
2254 pa_xfree(u->control_device);
2255 pa_xfree(u);
2256 }
2257
2258 void pa_alsa_sink_free(pa_sink *s) {
2259 struct userdata *u;
2260
2261 pa_sink_assert_ref(s);
2262 pa_assert_se(u = s->userdata);
2263
2264 userdata_free(u);
2265 }