]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa-sink: take base volume into account when applying hw volume
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 pa_alsa_fdlist *mixer_fdl;
104 pa_alsa_mixer_pdata *mixer_pd;
105 snd_mixer_t *mixer_handle;
106 pa_alsa_path_set *mixer_path_set;
107 pa_alsa_path *mixer_path;
108
109 pa_cvolume hardware_volume;
110
111 size_t
112 frame_size,
113 fragment_size,
114 hwbuf_size,
115 tsched_watermark,
116 hwbuf_unused,
117 min_sleep,
118 min_wakeup,
119 watermark_inc_step,
120 watermark_dec_step,
121 watermark_inc_threshold,
122 watermark_dec_threshold,
123 rewind_safeguard;
124
125 pa_usec_t watermark_dec_not_before;
126
127 pa_memchunk memchunk;
128
129 char *device_name; /* name of the PCM device */
130 char *control_device; /* name of the control device */
131
132 pa_bool_t use_mmap:1, use_tsched:1;
133
134 pa_bool_t first, after_rewind;
135
136 pa_rtpoll_item *alsa_rtpoll_item;
137
138 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
139
140 pa_smoother *smoother;
141 uint64_t write_count;
142 uint64_t since_start;
143 pa_usec_t smoother_interval;
144 pa_usec_t last_smoother_update;
145
146 pa_reserve_wrapper *reserve;
147 pa_hook_slot *reserve_slot;
148 pa_reserve_monitor_wrapper *monitor;
149 pa_hook_slot *monitor_slot;
150 };
151
152 static void userdata_free(struct userdata *u);
153
154 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
155 pa_assert(r);
156 pa_assert(u);
157
158 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
159 return PA_HOOK_CANCEL;
160
161 return PA_HOOK_OK;
162 }
163
164 static void reserve_done(struct userdata *u) {
165 pa_assert(u);
166
167 if (u->reserve_slot) {
168 pa_hook_slot_free(u->reserve_slot);
169 u->reserve_slot = NULL;
170 }
171
172 if (u->reserve) {
173 pa_reserve_wrapper_unref(u->reserve);
174 u->reserve = NULL;
175 }
176 }
177
178 static void reserve_update(struct userdata *u) {
179 const char *description;
180 pa_assert(u);
181
182 if (!u->sink || !u->reserve)
183 return;
184
185 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
186 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
187 }
188
189 static int reserve_init(struct userdata *u, const char *dname) {
190 char *rname;
191
192 pa_assert(u);
193 pa_assert(dname);
194
195 if (u->reserve)
196 return 0;
197
198 if (pa_in_system_mode())
199 return 0;
200
201 if (!(rname = pa_alsa_get_reserve_name(dname)))
202 return 0;
203
204 /* We are resuming, try to lock the device */
205 u->reserve = pa_reserve_wrapper_get(u->core, rname);
206 pa_xfree(rname);
207
208 if (!(u->reserve))
209 return -1;
210
211 reserve_update(u);
212
213 pa_assert(!u->reserve_slot);
214 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
215
216 return 0;
217 }
218
219 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
220 pa_bool_t b;
221
222 pa_assert(w);
223 pa_assert(u);
224
225 b = PA_PTR_TO_UINT(busy) && !u->reserve;
226
227 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
228 return PA_HOOK_OK;
229 }
230
231 static void monitor_done(struct userdata *u) {
232 pa_assert(u);
233
234 if (u->monitor_slot) {
235 pa_hook_slot_free(u->monitor_slot);
236 u->monitor_slot = NULL;
237 }
238
239 if (u->monitor) {
240 pa_reserve_monitor_wrapper_unref(u->monitor);
241 u->monitor = NULL;
242 }
243 }
244
245 static int reserve_monitor_init(struct userdata *u, const char *dname) {
246 char *rname;
247
248 pa_assert(u);
249 pa_assert(dname);
250
251 if (pa_in_system_mode())
252 return 0;
253
254 if (!(rname = pa_alsa_get_reserve_name(dname)))
255 return 0;
256
257 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
258 pa_xfree(rname);
259
260 if (!(u->monitor))
261 return -1;
262
263 pa_assert(!u->monitor_slot);
264 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
265
266 return 0;
267 }
268
269 static void fix_min_sleep_wakeup(struct userdata *u) {
270 size_t max_use, max_use_2;
271
272 pa_assert(u);
273 pa_assert(u->use_tsched);
274
275 max_use = u->hwbuf_size - u->hwbuf_unused;
276 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
277
278 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
279 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
280
281 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
282 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
283 }
284
285 static void fix_tsched_watermark(struct userdata *u) {
286 size_t max_use;
287 pa_assert(u);
288 pa_assert(u->use_tsched);
289
290 max_use = u->hwbuf_size - u->hwbuf_unused;
291
292 if (u->tsched_watermark > max_use - u->min_sleep)
293 u->tsched_watermark = max_use - u->min_sleep;
294
295 if (u->tsched_watermark < u->min_wakeup)
296 u->tsched_watermark = u->min_wakeup;
297 }
298
299 static void increase_watermark(struct userdata *u) {
300 size_t old_watermark;
301 pa_usec_t old_min_latency, new_min_latency;
302
303 pa_assert(u);
304 pa_assert(u->use_tsched);
305
306 /* First, just try to increase the watermark */
307 old_watermark = u->tsched_watermark;
308 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
309 fix_tsched_watermark(u);
310
311 if (old_watermark != u->tsched_watermark) {
312 pa_log_info("Increasing wakeup watermark to %0.2f ms",
313 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
314 return;
315 }
316
317 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
318 old_min_latency = u->sink->thread_info.min_latency;
319 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
320 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
321
322 if (old_min_latency != new_min_latency) {
323 pa_log_info("Increasing minimal latency to %0.2f ms",
324 (double) new_min_latency / PA_USEC_PER_MSEC);
325
326 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
327 }
328
329 /* When we reach this we're officialy fucked! */
330 }
331
332 static void decrease_watermark(struct userdata *u) {
333 size_t old_watermark;
334 pa_usec_t now;
335
336 pa_assert(u);
337 pa_assert(u->use_tsched);
338
339 now = pa_rtclock_now();
340
341 if (u->watermark_dec_not_before <= 0)
342 goto restart;
343
344 if (u->watermark_dec_not_before > now)
345 return;
346
347 old_watermark = u->tsched_watermark;
348
349 if (u->tsched_watermark < u->watermark_dec_step)
350 u->tsched_watermark = u->tsched_watermark / 2;
351 else
352 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
353
354 fix_tsched_watermark(u);
355
356 if (old_watermark != u->tsched_watermark)
357 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
358 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
359
360 /* We don't change the latency range*/
361
362 restart:
363 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
364 }
365
366 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
367 pa_usec_t usec, wm;
368
369 pa_assert(sleep_usec);
370 pa_assert(process_usec);
371
372 pa_assert(u);
373 pa_assert(u->use_tsched);
374
375 usec = pa_sink_get_requested_latency_within_thread(u->sink);
376
377 if (usec == (pa_usec_t) -1)
378 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
379
380 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
381
382 if (wm > usec)
383 wm = usec/2;
384
385 *sleep_usec = usec - wm;
386 *process_usec = wm;
387
388 #ifdef DEBUG_TIMING
389 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
390 (unsigned long) (usec / PA_USEC_PER_MSEC),
391 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
392 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
393 #endif
394 }
395
396 static int try_recover(struct userdata *u, const char *call, int err) {
397 pa_assert(u);
398 pa_assert(call);
399 pa_assert(err < 0);
400
401 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
402
403 pa_assert(err != -EAGAIN);
404
405 if (err == -EPIPE)
406 pa_log_debug("%s: Buffer underrun!", call);
407
408 if (err == -ESTRPIPE)
409 pa_log_debug("%s: System suspended!", call);
410
411 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
412 pa_log("%s: %s", call, pa_alsa_strerror(err));
413 return -1;
414 }
415
416 u->first = TRUE;
417 u->since_start = 0;
418 return 0;
419 }
420
421 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
422 size_t left_to_play;
423 pa_bool_t underrun = FALSE;
424
425 /* We use <= instead of < for this check here because an underrun
426 * only happens after the last sample was processed, not already when
427 * it is removed from the buffer. This is particularly important
428 * when block transfer is used. */
429
430 if (n_bytes <= u->hwbuf_size)
431 left_to_play = u->hwbuf_size - n_bytes;
432 else {
433
434 /* We got a dropout. What a mess! */
435 left_to_play = 0;
436 underrun = TRUE;
437
438 #ifdef DEBUG_TIMING
439 PA_DEBUG_TRAP;
440 #endif
441
442 if (!u->first && !u->after_rewind)
443 if (pa_log_ratelimit())
444 pa_log_info("Underrun!");
445 }
446
447 #ifdef DEBUG_TIMING
448 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
449 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
450 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
451 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
452 #endif
453
454 if (u->use_tsched) {
455 pa_bool_t reset_not_before = TRUE;
456
457 if (!u->first && !u->after_rewind) {
458 if (underrun || left_to_play < u->watermark_inc_threshold)
459 increase_watermark(u);
460 else if (left_to_play > u->watermark_dec_threshold) {
461 reset_not_before = FALSE;
462
463 /* We decrease the watermark only if have actually
464 * been woken up by a timeout. If something else woke
465 * us up it's too easy to fulfill the deadlines... */
466
467 if (on_timeout)
468 decrease_watermark(u);
469 }
470 }
471
472 if (reset_not_before)
473 u->watermark_dec_not_before = 0;
474 }
475
476 return left_to_play;
477 }
478
479 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
480 pa_bool_t work_done = TRUE;
481 pa_usec_t max_sleep_usec = 0, process_usec = 0;
482 size_t left_to_play;
483 unsigned j = 0;
484
485 pa_assert(u);
486 pa_sink_assert_ref(u->sink);
487
488 if (u->use_tsched)
489 hw_sleep_time(u, &max_sleep_usec, &process_usec);
490
491 for (;;) {
492 snd_pcm_sframes_t n;
493 size_t n_bytes;
494 int r;
495 pa_bool_t after_avail = TRUE;
496
497 /* First we determine how many samples are missing to fill the
498 * buffer up to 100% */
499
500 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
501
502 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
503 continue;
504
505 return r;
506 }
507
508 n_bytes = (size_t) n * u->frame_size;
509
510 #ifdef DEBUG_TIMING
511 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
512 #endif
513
514 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
515 on_timeout = FALSE;
516
517 if (u->use_tsched)
518
519 /* We won't fill up the playback buffer before at least
520 * half the sleep time is over because otherwise we might
521 * ask for more data from the clients then they expect. We
522 * need to guarantee that clients only have to keep around
523 * a single hw buffer length. */
524
525 if (!polled &&
526 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
527 #ifdef DEBUG_TIMING
528 pa_log_debug("Not filling up, because too early.");
529 #endif
530 break;
531 }
532
533 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
534
535 if (polled)
536 PA_ONCE_BEGIN {
537 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
538 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
539 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
540 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
541 pa_strnull(dn));
542 pa_xfree(dn);
543 } PA_ONCE_END;
544
545 #ifdef DEBUG_TIMING
546 pa_log_debug("Not filling up, because not necessary.");
547 #endif
548 break;
549 }
550
551
552 if (++j > 10) {
553 #ifdef DEBUG_TIMING
554 pa_log_debug("Not filling up, because already too many iterations.");
555 #endif
556
557 break;
558 }
559
560 n_bytes -= u->hwbuf_unused;
561 polled = FALSE;
562
563 #ifdef DEBUG_TIMING
564 pa_log_debug("Filling up");
565 #endif
566
567 for (;;) {
568 pa_memchunk chunk;
569 void *p;
570 int err;
571 const snd_pcm_channel_area_t *areas;
572 snd_pcm_uframes_t offset, frames;
573 snd_pcm_sframes_t sframes;
574
575 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
576 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
577
578 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
579
580 if (!after_avail && err == -EAGAIN)
581 break;
582
583 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
584 continue;
585
586 return r;
587 }
588
589 /* Make sure that if these memblocks need to be copied they will fit into one slot */
590 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
591 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
592
593 if (!after_avail && frames == 0)
594 break;
595
596 pa_assert(frames > 0);
597 after_avail = FALSE;
598
599 /* Check these are multiples of 8 bit */
600 pa_assert((areas[0].first & 7) == 0);
601 pa_assert((areas[0].step & 7)== 0);
602
603 /* We assume a single interleaved memory buffer */
604 pa_assert((areas[0].first >> 3) == 0);
605 pa_assert((areas[0].step >> 3) == u->frame_size);
606
607 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
608
609 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
610 chunk.length = pa_memblock_get_length(chunk.memblock);
611 chunk.index = 0;
612
613 pa_sink_render_into_full(u->sink, &chunk);
614 pa_memblock_unref_fixed(chunk.memblock);
615
616 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
617
618 if (!after_avail && (int) sframes == -EAGAIN)
619 break;
620
621 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
622 continue;
623
624 return r;
625 }
626
627 work_done = TRUE;
628
629 u->write_count += frames * u->frame_size;
630 u->since_start += frames * u->frame_size;
631
632 #ifdef DEBUG_TIMING
633 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
634 #endif
635
636 if ((size_t) frames * u->frame_size >= n_bytes)
637 break;
638
639 n_bytes -= (size_t) frames * u->frame_size;
640 }
641 }
642
643 if (u->use_tsched) {
644 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
645
646 if (*sleep_usec > process_usec)
647 *sleep_usec -= process_usec;
648 else
649 *sleep_usec = 0;
650 } else
651 *sleep_usec = 0;
652
653 return work_done ? 1 : 0;
654 }
655
656 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
657 pa_bool_t work_done = FALSE;
658 pa_usec_t max_sleep_usec = 0, process_usec = 0;
659 size_t left_to_play;
660 unsigned j = 0;
661
662 pa_assert(u);
663 pa_sink_assert_ref(u->sink);
664
665 if (u->use_tsched)
666 hw_sleep_time(u, &max_sleep_usec, &process_usec);
667
668 for (;;) {
669 snd_pcm_sframes_t n;
670 size_t n_bytes;
671 int r;
672 pa_bool_t after_avail = TRUE;
673
674 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
675
676 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
677 continue;
678
679 return r;
680 }
681
682 n_bytes = (size_t) n * u->frame_size;
683 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
684 on_timeout = FALSE;
685
686 if (u->use_tsched)
687
688 /* We won't fill up the playback buffer before at least
689 * half the sleep time is over because otherwise we might
690 * ask for more data from the clients then they expect. We
691 * need to guarantee that clients only have to keep around
692 * a single hw buffer length. */
693
694 if (!polled &&
695 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
696 break;
697
698 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
699
700 if (polled)
701 PA_ONCE_BEGIN {
702 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
703 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
704 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
705 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
706 pa_strnull(dn));
707 pa_xfree(dn);
708 } PA_ONCE_END;
709
710 break;
711 }
712
713 if (++j > 10) {
714 #ifdef DEBUG_TIMING
715 pa_log_debug("Not filling up, because already too many iterations.");
716 #endif
717
718 break;
719 }
720
721 n_bytes -= u->hwbuf_unused;
722 polled = FALSE;
723
724 for (;;) {
725 snd_pcm_sframes_t frames;
726 void *p;
727
728 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
729
730 if (u->memchunk.length <= 0)
731 pa_sink_render(u->sink, n_bytes, &u->memchunk);
732
733 pa_assert(u->memchunk.length > 0);
734
735 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
736
737 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
738 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
739
740 p = pa_memblock_acquire(u->memchunk.memblock);
741 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
742 pa_memblock_release(u->memchunk.memblock);
743
744 if (PA_UNLIKELY(frames < 0)) {
745
746 if (!after_avail && (int) frames == -EAGAIN)
747 break;
748
749 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
750 continue;
751
752 return r;
753 }
754
755 if (!after_avail && frames == 0)
756 break;
757
758 pa_assert(frames > 0);
759 after_avail = FALSE;
760
761 u->memchunk.index += (size_t) frames * u->frame_size;
762 u->memchunk.length -= (size_t) frames * u->frame_size;
763
764 if (u->memchunk.length <= 0) {
765 pa_memblock_unref(u->memchunk.memblock);
766 pa_memchunk_reset(&u->memchunk);
767 }
768
769 work_done = TRUE;
770
771 u->write_count += frames * u->frame_size;
772 u->since_start += frames * u->frame_size;
773
774 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
775
776 if ((size_t) frames * u->frame_size >= n_bytes)
777 break;
778
779 n_bytes -= (size_t) frames * u->frame_size;
780 }
781 }
782
783 if (u->use_tsched) {
784 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
785
786 if (*sleep_usec > process_usec)
787 *sleep_usec -= process_usec;
788 else
789 *sleep_usec = 0;
790 } else
791 *sleep_usec = 0;
792
793 return work_done ? 1 : 0;
794 }
795
796 static void update_smoother(struct userdata *u) {
797 snd_pcm_sframes_t delay = 0;
798 int64_t position;
799 int err;
800 pa_usec_t now1 = 0, now2;
801 snd_pcm_status_t *status;
802
803 snd_pcm_status_alloca(&status);
804
805 pa_assert(u);
806 pa_assert(u->pcm_handle);
807
808 /* Let's update the time smoother */
809
810 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
811 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
812 return;
813 }
814
815 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
816 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
817 else {
818 snd_htimestamp_t htstamp = { 0, 0 };
819 snd_pcm_status_get_htstamp(status, &htstamp);
820 now1 = pa_timespec_load(&htstamp);
821 }
822
823 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
824 if (now1 <= 0)
825 now1 = pa_rtclock_now();
826
827 /* check if the time since the last update is bigger than the interval */
828 if (u->last_smoother_update > 0)
829 if (u->last_smoother_update + u->smoother_interval > now1)
830 return;
831
832 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
833
834 if (PA_UNLIKELY(position < 0))
835 position = 0;
836
837 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
838
839 pa_smoother_put(u->smoother, now1, now2);
840
841 u->last_smoother_update = now1;
842 /* exponentially increase the update interval up to the MAX limit */
843 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
844 }
845
846 static pa_usec_t sink_get_latency(struct userdata *u) {
847 pa_usec_t r;
848 int64_t delay;
849 pa_usec_t now1, now2;
850
851 pa_assert(u);
852
853 now1 = pa_rtclock_now();
854 now2 = pa_smoother_get(u->smoother, now1);
855
856 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
857
858 r = delay >= 0 ? (pa_usec_t) delay : 0;
859
860 if (u->memchunk.memblock)
861 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
862
863 return r;
864 }
865
866 static int build_pollfd(struct userdata *u) {
867 pa_assert(u);
868 pa_assert(u->pcm_handle);
869
870 if (u->alsa_rtpoll_item)
871 pa_rtpoll_item_free(u->alsa_rtpoll_item);
872
873 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
874 return -1;
875
876 return 0;
877 }
878
879 /* Called from IO context */
880 static int suspend(struct userdata *u) {
881 pa_assert(u);
882 pa_assert(u->pcm_handle);
883
884 pa_smoother_pause(u->smoother, pa_rtclock_now());
885
886 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
887 * take awfully long with our long buffer sizes today. */
888 snd_pcm_close(u->pcm_handle);
889 u->pcm_handle = NULL;
890
891 if (u->alsa_rtpoll_item) {
892 pa_rtpoll_item_free(u->alsa_rtpoll_item);
893 u->alsa_rtpoll_item = NULL;
894 }
895
896 /* We reset max_rewind/max_request here to make sure that while we
897 * are suspended the old max_request/max_rewind values set before
898 * the suspend can influence the per-stream buffer of newly
899 * created streams, without their requirements having any
900 * influence on them. */
901 pa_sink_set_max_rewind_within_thread(u->sink, 0);
902 pa_sink_set_max_request_within_thread(u->sink, 0);
903
904 pa_log_info("Device suspended...");
905
906 return 0;
907 }
908
909 /* Called from IO context */
910 static int update_sw_params(struct userdata *u) {
911 snd_pcm_uframes_t avail_min;
912 int err;
913
914 pa_assert(u);
915
916 /* Use the full buffer if noone asked us for anything specific */
917 u->hwbuf_unused = 0;
918
919 if (u->use_tsched) {
920 pa_usec_t latency;
921
922 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
923 size_t b;
924
925 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
926
927 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
928
929 /* We need at least one sample in our buffer */
930
931 if (PA_UNLIKELY(b < u->frame_size))
932 b = u->frame_size;
933
934 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
935 }
936
937 fix_min_sleep_wakeup(u);
938 fix_tsched_watermark(u);
939 }
940
941 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
942
943 /* We need at last one frame in the used part of the buffer */
944 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
945
946 if (u->use_tsched) {
947 pa_usec_t sleep_usec, process_usec;
948
949 hw_sleep_time(u, &sleep_usec, &process_usec);
950 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
951 }
952
953 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
954
955 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
956 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
957 return err;
958 }
959
960 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
961 if (pa_alsa_pcm_is_hw(u->pcm_handle))
962 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
963 else {
964 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
965 pa_sink_set_max_rewind_within_thread(u->sink, 0);
966 }
967
968 return 0;
969 }
970
971 /* Called from IO context */
972 static int unsuspend(struct userdata *u) {
973 pa_sample_spec ss;
974 int err;
975 pa_bool_t b, d;
976 snd_pcm_uframes_t period_size, buffer_size;
977
978 pa_assert(u);
979 pa_assert(!u->pcm_handle);
980
981 pa_log_info("Trying resume...");
982
983 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
984 SND_PCM_NONBLOCK|
985 SND_PCM_NO_AUTO_RESAMPLE|
986 SND_PCM_NO_AUTO_CHANNELS|
987 SND_PCM_NO_AUTO_FORMAT)) < 0) {
988 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
989 goto fail;
990 }
991
992 ss = u->sink->sample_spec;
993 period_size = u->fragment_size / u->frame_size;
994 buffer_size = u->hwbuf_size / u->frame_size;
995 b = u->use_mmap;
996 d = u->use_tsched;
997
998 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
999 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1000 goto fail;
1001 }
1002
1003 if (b != u->use_mmap || d != u->use_tsched) {
1004 pa_log_warn("Resume failed, couldn't get original access mode.");
1005 goto fail;
1006 }
1007
1008 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1009 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1010 goto fail;
1011 }
1012
1013 if (period_size*u->frame_size != u->fragment_size ||
1014 buffer_size*u->frame_size != u->hwbuf_size) {
1015 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1016 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1017 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1018 goto fail;
1019 }
1020
1021 if (update_sw_params(u) < 0)
1022 goto fail;
1023
1024 if (build_pollfd(u) < 0)
1025 goto fail;
1026
1027 u->write_count = 0;
1028 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1029 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1030 u->last_smoother_update = 0;
1031
1032 u->first = TRUE;
1033 u->since_start = 0;
1034
1035 pa_log_info("Resumed successfully...");
1036
1037 return 0;
1038
1039 fail:
1040 if (u->pcm_handle) {
1041 snd_pcm_close(u->pcm_handle);
1042 u->pcm_handle = NULL;
1043 }
1044
1045 return -PA_ERR_IO;
1046 }
1047
1048 /* Called from IO context */
1049 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1050 struct userdata *u = PA_SINK(o)->userdata;
1051
1052 switch (code) {
1053
1054 case PA_SINK_MESSAGE_GET_LATENCY: {
1055 pa_usec_t r = 0;
1056
1057 if (u->pcm_handle)
1058 r = sink_get_latency(u);
1059
1060 *((pa_usec_t*) data) = r;
1061
1062 return 0;
1063 }
1064
1065 case PA_SINK_MESSAGE_SET_STATE:
1066
1067 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1068
1069 case PA_SINK_SUSPENDED: {
1070 int r;
1071
1072 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1073
1074 if ((r = suspend(u)) < 0)
1075 return r;
1076
1077 break;
1078 }
1079
1080 case PA_SINK_IDLE:
1081 case PA_SINK_RUNNING: {
1082 int r;
1083
1084 if (u->sink->thread_info.state == PA_SINK_INIT) {
1085 if (build_pollfd(u) < 0)
1086 return -PA_ERR_IO;
1087 }
1088
1089 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1090 if ((r = unsuspend(u)) < 0)
1091 return r;
1092 }
1093
1094 break;
1095 }
1096
1097 case PA_SINK_UNLINKED:
1098 case PA_SINK_INIT:
1099 case PA_SINK_INVALID_STATE:
1100 ;
1101 }
1102
1103 break;
1104 }
1105
1106 return pa_sink_process_msg(o, code, data, offset, chunk);
1107 }
1108
1109 /* Called from main context */
1110 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1111 pa_sink_state_t old_state;
1112 struct userdata *u;
1113
1114 pa_sink_assert_ref(s);
1115 pa_assert_se(u = s->userdata);
1116
1117 old_state = pa_sink_get_state(u->sink);
1118
1119 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1120 reserve_done(u);
1121 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1122 if (reserve_init(u, u->device_name) < 0)
1123 return -PA_ERR_BUSY;
1124
1125 return 0;
1126 }
1127
1128 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1129 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1130
1131 pa_assert(u);
1132 pa_assert(u->mixer_handle);
1133
1134 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1135 return 0;
1136
1137 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1138 return 0;
1139
1140 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1141 pa_sink_get_volume(u->sink, TRUE);
1142 pa_sink_get_mute(u->sink, TRUE);
1143 }
1144
1145 return 0;
1146 }
1147
1148 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1149 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1150
1151 pa_assert(u);
1152 pa_assert(u->mixer_handle);
1153
1154 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1155 return 0;
1156
1157 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1158 return 0;
1159
1160 if (mask & SND_CTL_EVENT_MASK_VALUE)
1161 pa_sink_update_volume_and_mute(u->sink);
1162
1163 return 0;
1164 }
1165
1166 static void sink_get_volume_cb(pa_sink *s) {
1167 struct userdata *u = s->userdata;
1168 pa_cvolume r;
1169 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1170
1171 pa_assert(u);
1172 pa_assert(u->mixer_path);
1173 pa_assert(u->mixer_handle);
1174
1175 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1176 return;
1177
1178 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1179 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1180
1181 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1182
1183 if (u->mixer_path->has_dB) {
1184 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1185
1186 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1187 }
1188
1189 if (pa_cvolume_equal(&u->hardware_volume, &r))
1190 return;
1191
1192 s->real_volume = u->hardware_volume = r;
1193
1194 /* Hmm, so the hardware volume changed, let's reset our software volume */
1195 if (u->mixer_path->has_dB)
1196 pa_sink_set_soft_volume(s, NULL);
1197 }
1198
1199 static void sink_set_volume_cb(pa_sink *s) {
1200 struct userdata *u = s->userdata;
1201 pa_cvolume r;
1202 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1203 pa_bool_t write_to_hw = (s->flags & PA_SINK_SYNC_VOLUME) ? FALSE : TRUE;
1204
1205 pa_assert(u);
1206 pa_assert(u->mixer_path);
1207 pa_assert(u->mixer_handle);
1208
1209 /* Shift up by the base volume */
1210 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1211
1212 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, write_to_hw) < 0)
1213 return;
1214
1215 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1216 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1217
1218 u->hardware_volume = r;
1219
1220 if (u->mixer_path->has_dB) {
1221 pa_cvolume new_soft_volume;
1222 pa_bool_t accurate_enough;
1223 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1224
1225 /* Match exactly what the user requested by software */
1226 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1227
1228 /* If the adjustment to do in software is only minimal we
1229 * can skip it. That saves us CPU at the expense of a bit of
1230 * accuracy */
1231 accurate_enough =
1232 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1233 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1234
1235 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1236 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1237 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1238 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1239 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1240 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1241 pa_yes_no(accurate_enough));
1242 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1243
1244 if (!accurate_enough)
1245 s->soft_volume = new_soft_volume;
1246
1247 } else {
1248 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1249
1250 /* We can't match exactly what the user requested, hence let's
1251 * at least tell the user about it */
1252
1253 s->real_volume = r;
1254 }
1255 }
1256
1257 static void sink_write_volume_cb(pa_sink *s) {
1258 struct userdata *u = s->userdata;
1259 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1260
1261 pa_assert(u);
1262 pa_assert(u->mixer_path);
1263 pa_assert(u->mixer_handle);
1264 pa_assert(s->flags & PA_SINK_SYNC_VOLUME);
1265
1266 /* Shift up by the base volume */
1267 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1268
1269 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE) < 0)
1270 pa_log_error("Writing HW volume failed");
1271 else {
1272 pa_cvolume tmp_vol;
1273 pa_bool_t accurate_enough;
1274
1275 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1276 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1277
1278 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1279 accurate_enough =
1280 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1281 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1282
1283 if (!accurate_enough) {
1284 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1285 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1286
1287 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1288 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->thread_info.current_hw_volume),
1289 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &hw_vol));
1290 pa_log_debug(" in dB: %s (request) != %s",
1291 pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->thread_info.current_hw_volume),
1292 pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &hw_vol));
1293 }
1294 }
1295 }
1296
1297 static void sink_get_mute_cb(pa_sink *s) {
1298 struct userdata *u = s->userdata;
1299 pa_bool_t b;
1300
1301 pa_assert(u);
1302 pa_assert(u->mixer_path);
1303 pa_assert(u->mixer_handle);
1304
1305 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1306 return;
1307
1308 s->muted = b;
1309 }
1310
1311 static void sink_set_mute_cb(pa_sink *s) {
1312 struct userdata *u = s->userdata;
1313
1314 pa_assert(u);
1315 pa_assert(u->mixer_path);
1316 pa_assert(u->mixer_handle);
1317
1318 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1319 }
1320
1321 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1322 struct userdata *u = s->userdata;
1323 pa_alsa_port_data *data;
1324
1325 pa_assert(u);
1326 pa_assert(p);
1327 pa_assert(u->mixer_handle);
1328
1329 data = PA_DEVICE_PORT_DATA(p);
1330
1331 pa_assert_se(u->mixer_path = data->path);
1332 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1333
1334 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1335 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1336 s->n_volume_steps = PA_VOLUME_NORM+1;
1337
1338 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1339 } else {
1340 s->base_volume = PA_VOLUME_NORM;
1341 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1342 }
1343
1344 if (data->setting)
1345 pa_alsa_setting_select(data->setting, u->mixer_handle);
1346
1347 if (s->set_mute)
1348 s->set_mute(s);
1349 if (s->set_volume)
1350 s->set_volume(s);
1351
1352 return 0;
1353 }
1354
1355 static void sink_update_requested_latency_cb(pa_sink *s) {
1356 struct userdata *u = s->userdata;
1357 size_t before;
1358 pa_assert(u);
1359 pa_assert(u->use_tsched); /* only when timer scheduling is used
1360 * we can dynamically adjust the
1361 * latency */
1362
1363 if (!u->pcm_handle)
1364 return;
1365
1366 before = u->hwbuf_unused;
1367 update_sw_params(u);
1368
1369 /* Let's check whether we now use only a smaller part of the
1370 buffer then before. If so, we need to make sure that subsequent
1371 rewinds are relative to the new maximum fill level and not to the
1372 current fill level. Thus, let's do a full rewind once, to clear
1373 things up. */
1374
1375 if (u->hwbuf_unused > before) {
1376 pa_log_debug("Requesting rewind due to latency change.");
1377 pa_sink_request_rewind(s, (size_t) -1);
1378 }
1379 }
1380
1381 static int process_rewind(struct userdata *u) {
1382 snd_pcm_sframes_t unused;
1383 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1384 pa_assert(u);
1385
1386 /* Figure out how much we shall rewind and reset the counter */
1387 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1388
1389 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1390
1391 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1392 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1393 return -1;
1394 }
1395
1396 unused_nbytes = (size_t) unused * u->frame_size;
1397
1398 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1399 unused_nbytes += u->rewind_safeguard;
1400
1401 if (u->hwbuf_size > unused_nbytes)
1402 limit_nbytes = u->hwbuf_size - unused_nbytes;
1403 else
1404 limit_nbytes = 0;
1405
1406 if (rewind_nbytes > limit_nbytes)
1407 rewind_nbytes = limit_nbytes;
1408
1409 if (rewind_nbytes > 0) {
1410 snd_pcm_sframes_t in_frames, out_frames;
1411
1412 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1413
1414 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1415 pa_log_debug("before: %lu", (unsigned long) in_frames);
1416 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1417 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1418 if (try_recover(u, "process_rewind", out_frames) < 0)
1419 return -1;
1420 out_frames = 0;
1421 }
1422
1423 pa_log_debug("after: %lu", (unsigned long) out_frames);
1424
1425 rewind_nbytes = (size_t) out_frames * u->frame_size;
1426
1427 if (rewind_nbytes <= 0)
1428 pa_log_info("Tried rewind, but was apparently not possible.");
1429 else {
1430 u->write_count -= rewind_nbytes;
1431 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1432 pa_sink_process_rewind(u->sink, rewind_nbytes);
1433
1434 u->after_rewind = TRUE;
1435 return 0;
1436 }
1437 } else
1438 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1439
1440 pa_sink_process_rewind(u->sink, 0);
1441 return 0;
1442 }
1443
1444 static void thread_func(void *userdata) {
1445 struct userdata *u = userdata;
1446 unsigned short revents = 0;
1447
1448 pa_assert(u);
1449
1450 pa_log_debug("Thread starting up");
1451
1452 if (u->core->realtime_scheduling)
1453 pa_make_realtime(u->core->realtime_priority);
1454
1455 pa_thread_mq_install(&u->thread_mq);
1456
1457 for (;;) {
1458 int ret;
1459 pa_usec_t rtpoll_sleep = 0;
1460
1461 #ifdef DEBUG_TIMING
1462 pa_log_debug("Loop");
1463 #endif
1464
1465 /* Render some data and write it to the dsp */
1466 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1467 int work_done;
1468 pa_usec_t sleep_usec = 0;
1469 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1470
1471 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1472 if (process_rewind(u) < 0)
1473 goto fail;
1474
1475 if (u->use_mmap)
1476 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1477 else
1478 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1479
1480 if (work_done < 0)
1481 goto fail;
1482
1483 /* pa_log_debug("work_done = %i", work_done); */
1484
1485 if (work_done) {
1486
1487 if (u->first) {
1488 pa_log_info("Starting playback.");
1489 snd_pcm_start(u->pcm_handle);
1490
1491 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1492
1493 u->first = FALSE;
1494 }
1495
1496 update_smoother(u);
1497 }
1498
1499 if (u->use_tsched) {
1500 pa_usec_t cusec;
1501
1502 if (u->since_start <= u->hwbuf_size) {
1503
1504 /* USB devices on ALSA seem to hit a buffer
1505 * underrun during the first iterations much
1506 * quicker then we calculate here, probably due to
1507 * the transport latency. To accommodate for that
1508 * we artificially decrease the sleep time until
1509 * we have filled the buffer at least once
1510 * completely.*/
1511
1512 if (pa_log_ratelimit())
1513 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1514 sleep_usec /= 2;
1515 }
1516
1517 /* OK, the playback buffer is now full, let's
1518 * calculate when to wake up next */
1519 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1520
1521 /* Convert from the sound card time domain to the
1522 * system time domain */
1523 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1524
1525 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1526
1527 /* We don't trust the conversion, so we wake up whatever comes first */
1528 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1529 }
1530
1531 u->after_rewind = FALSE;
1532
1533 }
1534
1535 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1536 pa_usec_t volume_sleep;
1537 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1538 if (volume_sleep > 0)
1539 rtpoll_sleep = MIN(volume_sleep, rtpoll_sleep);
1540 }
1541
1542 if (rtpoll_sleep > 0)
1543 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1544 else
1545 pa_rtpoll_set_timer_disabled(u->rtpoll);
1546
1547 /* Hmm, nothing to do. Let's sleep */
1548 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1549 goto fail;
1550
1551 if (u->sink->flags & PA_SINK_SYNC_VOLUME)
1552 pa_sink_volume_change_apply(u->sink, NULL);
1553
1554 if (ret == 0)
1555 goto finish;
1556
1557 /* Tell ALSA about this and process its response */
1558 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1559 struct pollfd *pollfd;
1560 int err;
1561 unsigned n;
1562
1563 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1564
1565 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1566 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1567 goto fail;
1568 }
1569
1570 if (revents & ~POLLOUT) {
1571 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1572 goto fail;
1573
1574 u->first = TRUE;
1575 u->since_start = 0;
1576 } else if (revents && u->use_tsched && pa_log_ratelimit())
1577 pa_log_debug("Wakeup from ALSA!");
1578
1579 } else
1580 revents = 0;
1581 }
1582
1583 fail:
1584 /* If this was no regular exit from the loop we have to continue
1585 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1586 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1587 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1588
1589 finish:
1590 pa_log_debug("Thread shutting down");
1591 }
1592
1593 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1594 const char *n;
1595 char *t;
1596
1597 pa_assert(data);
1598 pa_assert(ma);
1599 pa_assert(device_name);
1600
1601 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1602 pa_sink_new_data_set_name(data, n);
1603 data->namereg_fail = TRUE;
1604 return;
1605 }
1606
1607 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1608 data->namereg_fail = TRUE;
1609 else {
1610 n = device_id ? device_id : device_name;
1611 data->namereg_fail = FALSE;
1612 }
1613
1614 if (mapping)
1615 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1616 else
1617 t = pa_sprintf_malloc("alsa_output.%s", n);
1618
1619 pa_sink_new_data_set_name(data, t);
1620 pa_xfree(t);
1621 }
1622
1623 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1624
1625 if (!mapping && !element)
1626 return;
1627
1628 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1629 pa_log_info("Failed to find a working mixer device.");
1630 return;
1631 }
1632
1633 if (element) {
1634
1635 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1636 goto fail;
1637
1638 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1639 goto fail;
1640
1641 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1642 pa_alsa_path_dump(u->mixer_path);
1643 } else {
1644
1645 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1646 goto fail;
1647
1648 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1649
1650 pa_log_debug("Probed mixer paths:");
1651 pa_alsa_path_set_dump(u->mixer_path_set);
1652 }
1653
1654 return;
1655
1656 fail:
1657
1658 if (u->mixer_path_set) {
1659 pa_alsa_path_set_free(u->mixer_path_set);
1660 u->mixer_path_set = NULL;
1661 } else if (u->mixer_path) {
1662 pa_alsa_path_free(u->mixer_path);
1663 u->mixer_path = NULL;
1664 }
1665
1666 if (u->mixer_handle) {
1667 snd_mixer_close(u->mixer_handle);
1668 u->mixer_handle = NULL;
1669 }
1670 }
1671
1672 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB, pa_bool_t sync_volume) {
1673 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1674
1675 pa_assert(u);
1676
1677 if (!u->mixer_handle)
1678 return 0;
1679
1680 if (u->sink->active_port) {
1681 pa_alsa_port_data *data;
1682
1683 /* We have a list of supported paths, so let's activate the
1684 * one that has been chosen as active */
1685
1686 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1687 u->mixer_path = data->path;
1688
1689 pa_alsa_path_select(data->path, u->mixer_handle);
1690
1691 if (data->setting)
1692 pa_alsa_setting_select(data->setting, u->mixer_handle);
1693
1694 } else {
1695
1696 if (!u->mixer_path && u->mixer_path_set)
1697 u->mixer_path = u->mixer_path_set->paths;
1698
1699 if (u->mixer_path) {
1700 /* Hmm, we have only a single path, then let's activate it */
1701
1702 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1703
1704 if (u->mixer_path->settings)
1705 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1706 } else
1707 return 0;
1708 }
1709
1710 /* FIXME: need automatic detection rather than hard-coded path */
1711 if (!strcmp(u->mixer_path->name, "iec958-passthrough-output")) {
1712 u->sink->flags |= PA_SINK_PASSTHROUGH;
1713 } else {
1714 u->sink->flags &= ~PA_SINK_PASSTHROUGH;
1715 }
1716
1717 if (!u->mixer_path->has_volume)
1718 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1719 else {
1720
1721 if (u->mixer_path->has_dB) {
1722 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1723
1724 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1725 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1726
1727 if (u->mixer_path->max_dB > 0.0)
1728 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1729 else
1730 pa_log_info("No particular base volume set, fixing to 0 dB");
1731
1732 } else {
1733 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1734 u->sink->base_volume = PA_VOLUME_NORM;
1735 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1736 }
1737
1738 u->sink->get_volume = sink_get_volume_cb;
1739 u->sink->set_volume = sink_set_volume_cb;
1740 u->sink->write_volume = sink_write_volume_cb;
1741
1742 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL;
1743 if (u->mixer_path->has_dB) {
1744 u->sink->flags |= PA_SINK_DECIBEL_VOLUME;
1745 if (sync_volume) {
1746 u->sink->flags |= PA_SINK_SYNC_VOLUME;
1747 pa_log_info("Successfully enabled synchronous volume.");
1748 }
1749 }
1750
1751 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1752 }
1753
1754 if (!u->mixer_path->has_mute) {
1755 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1756 } else {
1757 u->sink->get_mute = sink_get_mute_cb;
1758 u->sink->set_mute = sink_set_mute_cb;
1759 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1760 pa_log_info("Using hardware mute control.");
1761 }
1762
1763 if (sync_volume) {
1764 u->mixer_pd = pa_alsa_mixer_pdata_new();
1765 mixer_callback = io_mixer_callback;
1766
1767 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1768 pa_log("Failed to initialize file descriptor monitoring");
1769 return -1;
1770 }
1771
1772 } else {
1773 u->mixer_fdl = pa_alsa_fdlist_new();
1774 mixer_callback = ctl_mixer_callback;
1775
1776 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1777 pa_log("Failed to initialize file descriptor monitoring");
1778 return -1;
1779 }
1780 }
1781
1782 if (u->mixer_path_set)
1783 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1784 else
1785 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1786
1787 return 0;
1788 }
1789
1790 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1791
1792 struct userdata *u = NULL;
1793 const char *dev_id = NULL;
1794 pa_sample_spec ss, requested_ss;
1795 pa_channel_map map;
1796 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1797 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1798 size_t frame_size;
1799 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE;
1800 pa_sink_new_data data;
1801 pa_alsa_profile_set *profile_set = NULL;
1802
1803 pa_assert(m);
1804 pa_assert(ma);
1805
1806 ss = m->core->default_sample_spec;
1807 map = m->core->default_channel_map;
1808 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1809 pa_log("Failed to parse sample specification and channel map");
1810 goto fail;
1811 }
1812
1813 requested_ss = ss;
1814 frame_size = pa_frame_size(&ss);
1815
1816 nfrags = m->core->default_n_fragments;
1817 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1818 if (frag_size <= 0)
1819 frag_size = (uint32_t) frame_size;
1820 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1821 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1822
1823 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1824 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1825 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1826 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1827 pa_log("Failed to parse buffer metrics");
1828 goto fail;
1829 }
1830
1831 buffer_size = nfrags * frag_size;
1832
1833 period_frames = frag_size/frame_size;
1834 buffer_frames = buffer_size/frame_size;
1835 tsched_frames = tsched_size/frame_size;
1836
1837 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1838 pa_log("Failed to parse mmap argument.");
1839 goto fail;
1840 }
1841
1842 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1843 pa_log("Failed to parse tsched argument.");
1844 goto fail;
1845 }
1846
1847 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1848 pa_log("Failed to parse ignore_dB argument.");
1849 goto fail;
1850 }
1851
1852 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
1853 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1854 pa_log("Failed to parse rewind_safeguard argument");
1855 goto fail;
1856 }
1857
1858 sync_volume = m->core->sync_volume;
1859 if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1860 pa_log("Failed to parse sync_volume argument.");
1861 goto fail;
1862 }
1863
1864 use_tsched = pa_alsa_may_tsched(use_tsched);
1865
1866 u = pa_xnew0(struct userdata, 1);
1867 u->core = m->core;
1868 u->module = m;
1869 u->use_mmap = use_mmap;
1870 u->use_tsched = use_tsched;
1871 u->first = TRUE;
1872 u->rewind_safeguard = rewind_safeguard;
1873 u->rtpoll = pa_rtpoll_new();
1874 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1875
1876 u->smoother = pa_smoother_new(
1877 SMOOTHER_ADJUST_USEC,
1878 SMOOTHER_WINDOW_USEC,
1879 TRUE,
1880 TRUE,
1881 5,
1882 pa_rtclock_now(),
1883 TRUE);
1884 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1885
1886 dev_id = pa_modargs_get_value(
1887 ma, "device_id",
1888 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1889
1890 if (reserve_init(u, dev_id) < 0)
1891 goto fail;
1892
1893 if (reserve_monitor_init(u, dev_id) < 0)
1894 goto fail;
1895
1896 b = use_mmap;
1897 d = use_tsched;
1898
1899 if (mapping) {
1900
1901 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1902 pa_log("device_id= not set");
1903 goto fail;
1904 }
1905
1906 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1907 dev_id,
1908 &u->device_name,
1909 &ss, &map,
1910 SND_PCM_STREAM_PLAYBACK,
1911 &period_frames, &buffer_frames, tsched_frames,
1912 &b, &d, mapping)))
1913
1914 goto fail;
1915
1916 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1917
1918 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1919 goto fail;
1920
1921 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1922 dev_id,
1923 &u->device_name,
1924 &ss, &map,
1925 SND_PCM_STREAM_PLAYBACK,
1926 &period_frames, &buffer_frames, tsched_frames,
1927 &b, &d, profile_set, &mapping)))
1928
1929 goto fail;
1930
1931 } else {
1932
1933 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1934 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1935 &u->device_name,
1936 &ss, &map,
1937 SND_PCM_STREAM_PLAYBACK,
1938 &period_frames, &buffer_frames, tsched_frames,
1939 &b, &d, FALSE)))
1940 goto fail;
1941 }
1942
1943 pa_assert(u->device_name);
1944 pa_log_info("Successfully opened device %s.", u->device_name);
1945
1946 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1947 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1948 goto fail;
1949 }
1950
1951 if (mapping)
1952 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1953
1954 if (use_mmap && !b) {
1955 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1956 u->use_mmap = use_mmap = FALSE;
1957 }
1958
1959 if (use_tsched && (!b || !d)) {
1960 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1961 u->use_tsched = use_tsched = FALSE;
1962 }
1963
1964 if (u->use_mmap)
1965 pa_log_info("Successfully enabled mmap() mode.");
1966
1967 if (u->use_tsched)
1968 pa_log_info("Successfully enabled timer-based scheduling mode.");
1969
1970 /* ALSA might tweak the sample spec, so recalculate the frame size */
1971 frame_size = pa_frame_size(&ss);
1972
1973 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1974
1975 pa_sink_new_data_init(&data);
1976 data.driver = driver;
1977 data.module = m;
1978 data.card = card;
1979 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1980
1981 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1982 * variable instead of using &data.namereg_fail directly, because
1983 * data.namereg_fail is a bitfield and taking the address of a bitfield
1984 * variable is impossible. */
1985 namereg_fail = data.namereg_fail;
1986 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1987 pa_log("Failed to parse boolean argument namereg_fail.");
1988 pa_sink_new_data_done(&data);
1989 goto fail;
1990 }
1991 data.namereg_fail = namereg_fail;
1992
1993 pa_sink_new_data_set_sample_spec(&data, &ss);
1994 pa_sink_new_data_set_channel_map(&data, &map);
1995
1996 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1997 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1998 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1999 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2000 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2001
2002 if (mapping) {
2003 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2004 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2005 }
2006
2007 pa_alsa_init_description(data.proplist);
2008
2009 if (u->control_device)
2010 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2011
2012 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2013 pa_log("Invalid properties");
2014 pa_sink_new_data_done(&data);
2015 goto fail;
2016 }
2017
2018 if (u->mixer_path_set)
2019 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2020
2021 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
2022 pa_sink_new_data_done(&data);
2023
2024 if (!u->sink) {
2025 pa_log("Failed to create sink object");
2026 goto fail;
2027 }
2028
2029 if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
2030 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2031 pa_log("Failed to parse sync_volume_safety_margin parameter");
2032 goto fail;
2033 }
2034
2035 if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
2036 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2037 pa_log("Failed to parse sync_volume_extra_delay parameter");
2038 goto fail;
2039 }
2040
2041 u->sink->parent.process_msg = sink_process_msg;
2042 if (u->use_tsched)
2043 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2044 u->sink->set_state = sink_set_state_cb;
2045 u->sink->set_port = sink_set_port_cb;
2046 u->sink->userdata = u;
2047
2048 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2049 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2050
2051 u->frame_size = frame_size;
2052 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2053 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2054 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2055
2056 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2057 (double) u->hwbuf_size / (double) u->fragment_size,
2058 (long unsigned) u->fragment_size,
2059 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2060 (long unsigned) u->hwbuf_size,
2061 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2062
2063 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2064 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2065 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2066 else {
2067 pa_log_info("Disabling rewind for device %s", u->device_name);
2068 pa_sink_set_max_rewind(u->sink, 0);
2069 }
2070
2071 if (u->use_tsched) {
2072 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
2073
2074 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
2075 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
2076
2077 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
2078 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
2079
2080 fix_min_sleep_wakeup(u);
2081 fix_tsched_watermark(u);
2082
2083 pa_sink_set_latency_range(u->sink,
2084 0,
2085 pa_bytes_to_usec(u->hwbuf_size, &ss));
2086
2087 pa_log_info("Time scheduling watermark is %0.2fms",
2088 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
2089 } else
2090 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2091
2092 reserve_update(u);
2093
2094 if (update_sw_params(u) < 0)
2095 goto fail;
2096
2097 if (setup_mixer(u, ignore_dB, sync_volume) < 0)
2098 goto fail;
2099
2100 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2101
2102 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2103 pa_log("Failed to create thread.");
2104 goto fail;
2105 }
2106
2107 /* Get initial mixer settings */
2108 if (data.volume_is_set) {
2109 if (u->sink->set_volume)
2110 u->sink->set_volume(u->sink);
2111 } else {
2112 if (u->sink->get_volume)
2113 u->sink->get_volume(u->sink);
2114 }
2115
2116 if (data.muted_is_set) {
2117 if (u->sink->set_mute)
2118 u->sink->set_mute(u->sink);
2119 } else {
2120 if (u->sink->get_mute)
2121 u->sink->get_mute(u->sink);
2122 }
2123
2124 pa_sink_put(u->sink);
2125
2126 if (profile_set)
2127 pa_alsa_profile_set_free(profile_set);
2128
2129 return u->sink;
2130
2131 fail:
2132
2133 if (u)
2134 userdata_free(u);
2135
2136 if (profile_set)
2137 pa_alsa_profile_set_free(profile_set);
2138
2139 return NULL;
2140 }
2141
2142 static void userdata_free(struct userdata *u) {
2143 pa_assert(u);
2144
2145 if (u->sink)
2146 pa_sink_unlink(u->sink);
2147
2148 if (u->thread) {
2149 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2150 pa_thread_free(u->thread);
2151 }
2152
2153 pa_thread_mq_done(&u->thread_mq);
2154
2155 if (u->sink)
2156 pa_sink_unref(u->sink);
2157
2158 if (u->memchunk.memblock)
2159 pa_memblock_unref(u->memchunk.memblock);
2160
2161 if (u->mixer_pd)
2162 pa_alsa_mixer_pdata_free(u->mixer_pd);
2163
2164 if (u->alsa_rtpoll_item)
2165 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2166
2167 if (u->rtpoll)
2168 pa_rtpoll_free(u->rtpoll);
2169
2170 if (u->pcm_handle) {
2171 snd_pcm_drop(u->pcm_handle);
2172 snd_pcm_close(u->pcm_handle);
2173 }
2174
2175 if (u->mixer_fdl)
2176 pa_alsa_fdlist_free(u->mixer_fdl);
2177
2178 if (u->mixer_path_set)
2179 pa_alsa_path_set_free(u->mixer_path_set);
2180 else if (u->mixer_path)
2181 pa_alsa_path_free(u->mixer_path);
2182
2183 if (u->mixer_handle)
2184 snd_mixer_close(u->mixer_handle);
2185
2186 if (u->smoother)
2187 pa_smoother_free(u->smoother);
2188
2189 reserve_done(u);
2190 monitor_done(u);
2191
2192 pa_xfree(u->device_name);
2193 pa_xfree(u->control_device);
2194 pa_xfree(u);
2195 }
2196
2197 void pa_alsa_sink_free(pa_sink *s) {
2198 struct userdata *u;
2199
2200 pa_sink_assert_ref(s);
2201 pa_assert_se(u = s->userdata);
2202
2203 userdata_free(u);
2204 }