]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa-sink: Don't assume we were able to enable hw-volume or sync-volume (v1.1)
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 pa_alsa_fdlist *mixer_fdl;
104 pa_alsa_mixer_pdata *mixer_pd;
105 snd_mixer_t *mixer_handle;
106 pa_alsa_path_set *mixer_path_set;
107 pa_alsa_path *mixer_path;
108
109 pa_cvolume hardware_volume;
110
111 size_t
112 frame_size,
113 fragment_size,
114 hwbuf_size,
115 tsched_watermark,
116 hwbuf_unused,
117 min_sleep,
118 min_wakeup,
119 watermark_inc_step,
120 watermark_dec_step,
121 watermark_inc_threshold,
122 watermark_dec_threshold,
123 rewind_safeguard;
124
125 pa_usec_t watermark_dec_not_before;
126
127 pa_memchunk memchunk;
128
129 char *device_name; /* name of the PCM device */
130 char *control_device; /* name of the control device */
131
132 pa_bool_t use_mmap:1, use_tsched:1;
133
134 pa_bool_t first, after_rewind;
135
136 pa_rtpoll_item *alsa_rtpoll_item;
137
138 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
139
140 pa_smoother *smoother;
141 uint64_t write_count;
142 uint64_t since_start;
143 pa_usec_t smoother_interval;
144 pa_usec_t last_smoother_update;
145
146 pa_reserve_wrapper *reserve;
147 pa_hook_slot *reserve_slot;
148 pa_reserve_monitor_wrapper *monitor;
149 pa_hook_slot *monitor_slot;
150 };
151
152 static void userdata_free(struct userdata *u);
153
154 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
155 pa_assert(r);
156 pa_assert(u);
157
158 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
159 return PA_HOOK_CANCEL;
160
161 return PA_HOOK_OK;
162 }
163
164 static void reserve_done(struct userdata *u) {
165 pa_assert(u);
166
167 if (u->reserve_slot) {
168 pa_hook_slot_free(u->reserve_slot);
169 u->reserve_slot = NULL;
170 }
171
172 if (u->reserve) {
173 pa_reserve_wrapper_unref(u->reserve);
174 u->reserve = NULL;
175 }
176 }
177
178 static void reserve_update(struct userdata *u) {
179 const char *description;
180 pa_assert(u);
181
182 if (!u->sink || !u->reserve)
183 return;
184
185 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
186 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
187 }
188
189 static int reserve_init(struct userdata *u, const char *dname) {
190 char *rname;
191
192 pa_assert(u);
193 pa_assert(dname);
194
195 if (u->reserve)
196 return 0;
197
198 if (pa_in_system_mode())
199 return 0;
200
201 if (!(rname = pa_alsa_get_reserve_name(dname)))
202 return 0;
203
204 /* We are resuming, try to lock the device */
205 u->reserve = pa_reserve_wrapper_get(u->core, rname);
206 pa_xfree(rname);
207
208 if (!(u->reserve))
209 return -1;
210
211 reserve_update(u);
212
213 pa_assert(!u->reserve_slot);
214 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
215
216 return 0;
217 }
218
219 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
220 pa_bool_t b;
221
222 pa_assert(w);
223 pa_assert(u);
224
225 b = PA_PTR_TO_UINT(busy) && !u->reserve;
226
227 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
228 return PA_HOOK_OK;
229 }
230
231 static void monitor_done(struct userdata *u) {
232 pa_assert(u);
233
234 if (u->monitor_slot) {
235 pa_hook_slot_free(u->monitor_slot);
236 u->monitor_slot = NULL;
237 }
238
239 if (u->monitor) {
240 pa_reserve_monitor_wrapper_unref(u->monitor);
241 u->monitor = NULL;
242 }
243 }
244
245 static int reserve_monitor_init(struct userdata *u, const char *dname) {
246 char *rname;
247
248 pa_assert(u);
249 pa_assert(dname);
250
251 if (pa_in_system_mode())
252 return 0;
253
254 if (!(rname = pa_alsa_get_reserve_name(dname)))
255 return 0;
256
257 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
258 pa_xfree(rname);
259
260 if (!(u->monitor))
261 return -1;
262
263 pa_assert(!u->monitor_slot);
264 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
265
266 return 0;
267 }
268
269 static void fix_min_sleep_wakeup(struct userdata *u) {
270 size_t max_use, max_use_2;
271
272 pa_assert(u);
273 pa_assert(u->use_tsched);
274
275 max_use = u->hwbuf_size - u->hwbuf_unused;
276 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
277
278 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
279 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
280
281 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
282 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
283 }
284
285 static void fix_tsched_watermark(struct userdata *u) {
286 size_t max_use;
287 pa_assert(u);
288 pa_assert(u->use_tsched);
289
290 max_use = u->hwbuf_size - u->hwbuf_unused;
291
292 if (u->tsched_watermark > max_use - u->min_sleep)
293 u->tsched_watermark = max_use - u->min_sleep;
294
295 if (u->tsched_watermark < u->min_wakeup)
296 u->tsched_watermark = u->min_wakeup;
297 }
298
299 static void increase_watermark(struct userdata *u) {
300 size_t old_watermark;
301 pa_usec_t old_min_latency, new_min_latency;
302
303 pa_assert(u);
304 pa_assert(u->use_tsched);
305
306 /* First, just try to increase the watermark */
307 old_watermark = u->tsched_watermark;
308 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
309 fix_tsched_watermark(u);
310
311 if (old_watermark != u->tsched_watermark) {
312 pa_log_info("Increasing wakeup watermark to %0.2f ms",
313 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
314 return;
315 }
316
317 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
318 old_min_latency = u->sink->thread_info.min_latency;
319 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
320 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
321
322 if (old_min_latency != new_min_latency) {
323 pa_log_info("Increasing minimal latency to %0.2f ms",
324 (double) new_min_latency / PA_USEC_PER_MSEC);
325
326 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
327 }
328
329 /* When we reach this we're officialy fucked! */
330 }
331
332 static void decrease_watermark(struct userdata *u) {
333 size_t old_watermark;
334 pa_usec_t now;
335
336 pa_assert(u);
337 pa_assert(u->use_tsched);
338
339 now = pa_rtclock_now();
340
341 if (u->watermark_dec_not_before <= 0)
342 goto restart;
343
344 if (u->watermark_dec_not_before > now)
345 return;
346
347 old_watermark = u->tsched_watermark;
348
349 if (u->tsched_watermark < u->watermark_dec_step)
350 u->tsched_watermark = u->tsched_watermark / 2;
351 else
352 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
353
354 fix_tsched_watermark(u);
355
356 if (old_watermark != u->tsched_watermark)
357 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
358 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
359
360 /* We don't change the latency range*/
361
362 restart:
363 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
364 }
365
366 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
367 pa_usec_t usec, wm;
368
369 pa_assert(sleep_usec);
370 pa_assert(process_usec);
371
372 pa_assert(u);
373 pa_assert(u->use_tsched);
374
375 usec = pa_sink_get_requested_latency_within_thread(u->sink);
376
377 if (usec == (pa_usec_t) -1)
378 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
379
380 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
381
382 if (wm > usec)
383 wm = usec/2;
384
385 *sleep_usec = usec - wm;
386 *process_usec = wm;
387
388 #ifdef DEBUG_TIMING
389 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
390 (unsigned long) (usec / PA_USEC_PER_MSEC),
391 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
392 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
393 #endif
394 }
395
396 static int try_recover(struct userdata *u, const char *call, int err) {
397 pa_assert(u);
398 pa_assert(call);
399 pa_assert(err < 0);
400
401 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
402
403 pa_assert(err != -EAGAIN);
404
405 if (err == -EPIPE)
406 pa_log_debug("%s: Buffer underrun!", call);
407
408 if (err == -ESTRPIPE)
409 pa_log_debug("%s: System suspended!", call);
410
411 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
412 pa_log("%s: %s", call, pa_alsa_strerror(err));
413 return -1;
414 }
415
416 u->first = TRUE;
417 u->since_start = 0;
418 return 0;
419 }
420
421 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
422 size_t left_to_play;
423 pa_bool_t underrun = FALSE;
424
425 /* We use <= instead of < for this check here because an underrun
426 * only happens after the last sample was processed, not already when
427 * it is removed from the buffer. This is particularly important
428 * when block transfer is used. */
429
430 if (n_bytes <= u->hwbuf_size)
431 left_to_play = u->hwbuf_size - n_bytes;
432 else {
433
434 /* We got a dropout. What a mess! */
435 left_to_play = 0;
436 underrun = TRUE;
437
438 #ifdef DEBUG_TIMING
439 PA_DEBUG_TRAP;
440 #endif
441
442 if (!u->first && !u->after_rewind)
443 if (pa_log_ratelimit())
444 pa_log_info("Underrun!");
445 }
446
447 #ifdef DEBUG_TIMING
448 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
449 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
450 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
451 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
452 #endif
453
454 if (u->use_tsched) {
455 pa_bool_t reset_not_before = TRUE;
456
457 if (!u->first && !u->after_rewind) {
458 if (underrun || left_to_play < u->watermark_inc_threshold)
459 increase_watermark(u);
460 else if (left_to_play > u->watermark_dec_threshold) {
461 reset_not_before = FALSE;
462
463 /* We decrease the watermark only if have actually
464 * been woken up by a timeout. If something else woke
465 * us up it's too easy to fulfill the deadlines... */
466
467 if (on_timeout)
468 decrease_watermark(u);
469 }
470 }
471
472 if (reset_not_before)
473 u->watermark_dec_not_before = 0;
474 }
475
476 return left_to_play;
477 }
478
479 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
480 pa_bool_t work_done = TRUE;
481 pa_usec_t max_sleep_usec = 0, process_usec = 0;
482 size_t left_to_play;
483 unsigned j = 0;
484
485 pa_assert(u);
486 pa_sink_assert_ref(u->sink);
487
488 if (u->use_tsched)
489 hw_sleep_time(u, &max_sleep_usec, &process_usec);
490
491 for (;;) {
492 snd_pcm_sframes_t n;
493 size_t n_bytes;
494 int r;
495 pa_bool_t after_avail = TRUE;
496
497 /* First we determine how many samples are missing to fill the
498 * buffer up to 100% */
499
500 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
501
502 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
503 continue;
504
505 return r;
506 }
507
508 n_bytes = (size_t) n * u->frame_size;
509
510 #ifdef DEBUG_TIMING
511 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
512 #endif
513
514 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
515 on_timeout = FALSE;
516
517 if (u->use_tsched)
518
519 /* We won't fill up the playback buffer before at least
520 * half the sleep time is over because otherwise we might
521 * ask for more data from the clients then they expect. We
522 * need to guarantee that clients only have to keep around
523 * a single hw buffer length. */
524
525 if (!polled &&
526 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
527 #ifdef DEBUG_TIMING
528 pa_log_debug("Not filling up, because too early.");
529 #endif
530 break;
531 }
532
533 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
534
535 if (polled)
536 PA_ONCE_BEGIN {
537 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
538 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
539 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
540 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
541 pa_strnull(dn));
542 pa_xfree(dn);
543 } PA_ONCE_END;
544
545 #ifdef DEBUG_TIMING
546 pa_log_debug("Not filling up, because not necessary.");
547 #endif
548 break;
549 }
550
551
552 if (++j > 10) {
553 #ifdef DEBUG_TIMING
554 pa_log_debug("Not filling up, because already too many iterations.");
555 #endif
556
557 break;
558 }
559
560 n_bytes -= u->hwbuf_unused;
561 polled = FALSE;
562
563 #ifdef DEBUG_TIMING
564 pa_log_debug("Filling up");
565 #endif
566
567 for (;;) {
568 pa_memchunk chunk;
569 void *p;
570 int err;
571 const snd_pcm_channel_area_t *areas;
572 snd_pcm_uframes_t offset, frames;
573 snd_pcm_sframes_t sframes;
574
575 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
576 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
577
578 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
579
580 if (!after_avail && err == -EAGAIN)
581 break;
582
583 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
584 continue;
585
586 return r;
587 }
588
589 /* Make sure that if these memblocks need to be copied they will fit into one slot */
590 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
591 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
592
593 if (!after_avail && frames == 0)
594 break;
595
596 pa_assert(frames > 0);
597 after_avail = FALSE;
598
599 /* Check these are multiples of 8 bit */
600 pa_assert((areas[0].first & 7) == 0);
601 pa_assert((areas[0].step & 7)== 0);
602
603 /* We assume a single interleaved memory buffer */
604 pa_assert((areas[0].first >> 3) == 0);
605 pa_assert((areas[0].step >> 3) == u->frame_size);
606
607 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
608
609 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
610 chunk.length = pa_memblock_get_length(chunk.memblock);
611 chunk.index = 0;
612
613 pa_sink_render_into_full(u->sink, &chunk);
614 pa_memblock_unref_fixed(chunk.memblock);
615
616 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
617
618 if (!after_avail && (int) sframes == -EAGAIN)
619 break;
620
621 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
622 continue;
623
624 return r;
625 }
626
627 work_done = TRUE;
628
629 u->write_count += frames * u->frame_size;
630 u->since_start += frames * u->frame_size;
631
632 #ifdef DEBUG_TIMING
633 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
634 #endif
635
636 if ((size_t) frames * u->frame_size >= n_bytes)
637 break;
638
639 n_bytes -= (size_t) frames * u->frame_size;
640 }
641 }
642
643 if (u->use_tsched) {
644 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
645
646 if (*sleep_usec > process_usec)
647 *sleep_usec -= process_usec;
648 else
649 *sleep_usec = 0;
650 } else
651 *sleep_usec = 0;
652
653 return work_done ? 1 : 0;
654 }
655
656 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
657 pa_bool_t work_done = FALSE;
658 pa_usec_t max_sleep_usec = 0, process_usec = 0;
659 size_t left_to_play;
660 unsigned j = 0;
661
662 pa_assert(u);
663 pa_sink_assert_ref(u->sink);
664
665 if (u->use_tsched)
666 hw_sleep_time(u, &max_sleep_usec, &process_usec);
667
668 for (;;) {
669 snd_pcm_sframes_t n;
670 size_t n_bytes;
671 int r;
672 pa_bool_t after_avail = TRUE;
673
674 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
675
676 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
677 continue;
678
679 return r;
680 }
681
682 n_bytes = (size_t) n * u->frame_size;
683 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
684 on_timeout = FALSE;
685
686 if (u->use_tsched)
687
688 /* We won't fill up the playback buffer before at least
689 * half the sleep time is over because otherwise we might
690 * ask for more data from the clients then they expect. We
691 * need to guarantee that clients only have to keep around
692 * a single hw buffer length. */
693
694 if (!polled &&
695 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
696 break;
697
698 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
699
700 if (polled)
701 PA_ONCE_BEGIN {
702 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
703 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
704 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
705 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
706 pa_strnull(dn));
707 pa_xfree(dn);
708 } PA_ONCE_END;
709
710 break;
711 }
712
713 if (++j > 10) {
714 #ifdef DEBUG_TIMING
715 pa_log_debug("Not filling up, because already too many iterations.");
716 #endif
717
718 break;
719 }
720
721 n_bytes -= u->hwbuf_unused;
722 polled = FALSE;
723
724 for (;;) {
725 snd_pcm_sframes_t frames;
726 void *p;
727
728 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
729
730 if (u->memchunk.length <= 0)
731 pa_sink_render(u->sink, n_bytes, &u->memchunk);
732
733 pa_assert(u->memchunk.length > 0);
734
735 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
736
737 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
738 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
739
740 p = pa_memblock_acquire(u->memchunk.memblock);
741 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
742 pa_memblock_release(u->memchunk.memblock);
743
744 if (PA_UNLIKELY(frames < 0)) {
745
746 if (!after_avail && (int) frames == -EAGAIN)
747 break;
748
749 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
750 continue;
751
752 return r;
753 }
754
755 if (!after_avail && frames == 0)
756 break;
757
758 pa_assert(frames > 0);
759 after_avail = FALSE;
760
761 u->memchunk.index += (size_t) frames * u->frame_size;
762 u->memchunk.length -= (size_t) frames * u->frame_size;
763
764 if (u->memchunk.length <= 0) {
765 pa_memblock_unref(u->memchunk.memblock);
766 pa_memchunk_reset(&u->memchunk);
767 }
768
769 work_done = TRUE;
770
771 u->write_count += frames * u->frame_size;
772 u->since_start += frames * u->frame_size;
773
774 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
775
776 if ((size_t) frames * u->frame_size >= n_bytes)
777 break;
778
779 n_bytes -= (size_t) frames * u->frame_size;
780 }
781 }
782
783 if (u->use_tsched) {
784 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
785
786 if (*sleep_usec > process_usec)
787 *sleep_usec -= process_usec;
788 else
789 *sleep_usec = 0;
790 } else
791 *sleep_usec = 0;
792
793 return work_done ? 1 : 0;
794 }
795
796 static void update_smoother(struct userdata *u) {
797 snd_pcm_sframes_t delay = 0;
798 int64_t position;
799 int err;
800 pa_usec_t now1 = 0, now2;
801 snd_pcm_status_t *status;
802
803 snd_pcm_status_alloca(&status);
804
805 pa_assert(u);
806 pa_assert(u->pcm_handle);
807
808 /* Let's update the time smoother */
809
810 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
811 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
812 return;
813 }
814
815 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
816 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
817 else {
818 snd_htimestamp_t htstamp = { 0, 0 };
819 snd_pcm_status_get_htstamp(status, &htstamp);
820 now1 = pa_timespec_load(&htstamp);
821 }
822
823 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
824 if (now1 <= 0)
825 now1 = pa_rtclock_now();
826
827 /* check if the time since the last update is bigger than the interval */
828 if (u->last_smoother_update > 0)
829 if (u->last_smoother_update + u->smoother_interval > now1)
830 return;
831
832 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
833
834 if (PA_UNLIKELY(position < 0))
835 position = 0;
836
837 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
838
839 pa_smoother_put(u->smoother, now1, now2);
840
841 u->last_smoother_update = now1;
842 /* exponentially increase the update interval up to the MAX limit */
843 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
844 }
845
846 static pa_usec_t sink_get_latency(struct userdata *u) {
847 pa_usec_t r;
848 int64_t delay;
849 pa_usec_t now1, now2;
850
851 pa_assert(u);
852
853 now1 = pa_rtclock_now();
854 now2 = pa_smoother_get(u->smoother, now1);
855
856 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
857
858 r = delay >= 0 ? (pa_usec_t) delay : 0;
859
860 if (u->memchunk.memblock)
861 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
862
863 return r;
864 }
865
866 static int build_pollfd(struct userdata *u) {
867 pa_assert(u);
868 pa_assert(u->pcm_handle);
869
870 if (u->alsa_rtpoll_item)
871 pa_rtpoll_item_free(u->alsa_rtpoll_item);
872
873 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
874 return -1;
875
876 return 0;
877 }
878
879 /* Called from IO context */
880 static int suspend(struct userdata *u) {
881 pa_assert(u);
882 pa_assert(u->pcm_handle);
883
884 pa_smoother_pause(u->smoother, pa_rtclock_now());
885
886 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
887 * take awfully long with our long buffer sizes today. */
888 snd_pcm_close(u->pcm_handle);
889 u->pcm_handle = NULL;
890
891 if (u->alsa_rtpoll_item) {
892 pa_rtpoll_item_free(u->alsa_rtpoll_item);
893 u->alsa_rtpoll_item = NULL;
894 }
895
896 /* We reset max_rewind/max_request here to make sure that while we
897 * are suspended the old max_request/max_rewind values set before
898 * the suspend can influence the per-stream buffer of newly
899 * created streams, without their requirements having any
900 * influence on them. */
901 pa_sink_set_max_rewind_within_thread(u->sink, 0);
902 pa_sink_set_max_request_within_thread(u->sink, 0);
903
904 pa_log_info("Device suspended...");
905
906 return 0;
907 }
908
909 /* Called from IO context */
910 static int update_sw_params(struct userdata *u) {
911 snd_pcm_uframes_t avail_min;
912 int err;
913
914 pa_assert(u);
915
916 /* Use the full buffer if noone asked us for anything specific */
917 u->hwbuf_unused = 0;
918
919 if (u->use_tsched) {
920 pa_usec_t latency;
921
922 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
923 size_t b;
924
925 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
926
927 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
928
929 /* We need at least one sample in our buffer */
930
931 if (PA_UNLIKELY(b < u->frame_size))
932 b = u->frame_size;
933
934 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
935 }
936
937 fix_min_sleep_wakeup(u);
938 fix_tsched_watermark(u);
939 }
940
941 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
942
943 /* We need at last one frame in the used part of the buffer */
944 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
945
946 if (u->use_tsched) {
947 pa_usec_t sleep_usec, process_usec;
948
949 hw_sleep_time(u, &sleep_usec, &process_usec);
950 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
951 }
952
953 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
954
955 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
956 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
957 return err;
958 }
959
960 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
961 if (pa_alsa_pcm_is_hw(u->pcm_handle))
962 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
963 else {
964 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
965 pa_sink_set_max_rewind_within_thread(u->sink, 0);
966 }
967
968 return 0;
969 }
970
971 /* Called from IO context */
972 static int unsuspend(struct userdata *u) {
973 pa_sample_spec ss;
974 int err;
975 pa_bool_t b, d;
976 snd_pcm_uframes_t period_size, buffer_size;
977
978 pa_assert(u);
979 pa_assert(!u->pcm_handle);
980
981 pa_log_info("Trying resume...");
982
983 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
984 SND_PCM_NONBLOCK|
985 SND_PCM_NO_AUTO_RESAMPLE|
986 SND_PCM_NO_AUTO_CHANNELS|
987 SND_PCM_NO_AUTO_FORMAT)) < 0) {
988 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
989 goto fail;
990 }
991
992 ss = u->sink->sample_spec;
993 period_size = u->fragment_size / u->frame_size;
994 buffer_size = u->hwbuf_size / u->frame_size;
995 b = u->use_mmap;
996 d = u->use_tsched;
997
998 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
999 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1000 goto fail;
1001 }
1002
1003 if (b != u->use_mmap || d != u->use_tsched) {
1004 pa_log_warn("Resume failed, couldn't get original access mode.");
1005 goto fail;
1006 }
1007
1008 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1009 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1010 goto fail;
1011 }
1012
1013 if (period_size*u->frame_size != u->fragment_size ||
1014 buffer_size*u->frame_size != u->hwbuf_size) {
1015 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1016 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1017 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1018 goto fail;
1019 }
1020
1021 if (update_sw_params(u) < 0)
1022 goto fail;
1023
1024 if (build_pollfd(u) < 0)
1025 goto fail;
1026
1027 u->write_count = 0;
1028 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1029 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1030 u->last_smoother_update = 0;
1031
1032 u->first = TRUE;
1033 u->since_start = 0;
1034
1035 pa_log_info("Resumed successfully...");
1036
1037 return 0;
1038
1039 fail:
1040 if (u->pcm_handle) {
1041 snd_pcm_close(u->pcm_handle);
1042 u->pcm_handle = NULL;
1043 }
1044
1045 return -PA_ERR_IO;
1046 }
1047
1048 /* Called from IO context */
1049 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1050 struct userdata *u = PA_SINK(o)->userdata;
1051
1052 switch (code) {
1053
1054 case PA_SINK_MESSAGE_GET_LATENCY: {
1055 pa_usec_t r = 0;
1056
1057 if (u->pcm_handle)
1058 r = sink_get_latency(u);
1059
1060 *((pa_usec_t*) data) = r;
1061
1062 return 0;
1063 }
1064
1065 case PA_SINK_MESSAGE_SET_STATE:
1066
1067 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1068
1069 case PA_SINK_SUSPENDED: {
1070 int r;
1071
1072 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1073
1074 if ((r = suspend(u)) < 0)
1075 return r;
1076
1077 break;
1078 }
1079
1080 case PA_SINK_IDLE:
1081 case PA_SINK_RUNNING: {
1082 int r;
1083
1084 if (u->sink->thread_info.state == PA_SINK_INIT) {
1085 if (build_pollfd(u) < 0)
1086 return -PA_ERR_IO;
1087 }
1088
1089 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1090 if ((r = unsuspend(u)) < 0)
1091 return r;
1092 }
1093
1094 break;
1095 }
1096
1097 case PA_SINK_UNLINKED:
1098 case PA_SINK_INIT:
1099 case PA_SINK_INVALID_STATE:
1100 ;
1101 }
1102
1103 break;
1104 }
1105
1106 return pa_sink_process_msg(o, code, data, offset, chunk);
1107 }
1108
1109 /* Called from main context */
1110 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1111 pa_sink_state_t old_state;
1112 struct userdata *u;
1113
1114 pa_sink_assert_ref(s);
1115 pa_assert_se(u = s->userdata);
1116
1117 old_state = pa_sink_get_state(u->sink);
1118
1119 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1120 reserve_done(u);
1121 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1122 if (reserve_init(u, u->device_name) < 0)
1123 return -PA_ERR_BUSY;
1124
1125 return 0;
1126 }
1127
1128 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1129 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1130
1131 pa_assert(u);
1132 pa_assert(u->mixer_handle);
1133
1134 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1135 return 0;
1136
1137 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1138 return 0;
1139
1140 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1141 pa_sink_get_volume(u->sink, TRUE);
1142 pa_sink_get_mute(u->sink, TRUE);
1143 }
1144
1145 return 0;
1146 }
1147
1148 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1149 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1150
1151 pa_assert(u);
1152 pa_assert(u->mixer_handle);
1153
1154 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1155 return 0;
1156
1157 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1158 return 0;
1159
1160 if (mask & SND_CTL_EVENT_MASK_VALUE)
1161 pa_sink_update_volume_and_mute(u->sink);
1162
1163 return 0;
1164 }
1165
1166 static void sink_get_volume_cb(pa_sink *s) {
1167 struct userdata *u = s->userdata;
1168 pa_cvolume r;
1169 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1170
1171 pa_assert(u);
1172 pa_assert(u->mixer_path);
1173 pa_assert(u->mixer_handle);
1174
1175 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1176 return;
1177
1178 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1179 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1180
1181 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1182
1183 if (u->mixer_path->has_dB) {
1184 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1185
1186 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1187 }
1188
1189 if (pa_cvolume_equal(&u->hardware_volume, &r))
1190 return;
1191
1192 s->real_volume = u->hardware_volume = r;
1193
1194 /* Hmm, so the hardware volume changed, let's reset our software volume */
1195 if (u->mixer_path->has_dB)
1196 pa_sink_set_soft_volume(s, NULL);
1197 }
1198
1199 static void sink_set_volume_cb(pa_sink *s) {
1200 struct userdata *u = s->userdata;
1201 pa_cvolume r;
1202 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1203 pa_bool_t write_to_hw = (s->flags & PA_SINK_SYNC_VOLUME) ? FALSE : TRUE;
1204
1205 pa_assert(u);
1206 pa_assert(u->mixer_path);
1207 pa_assert(u->mixer_handle);
1208
1209 /* Shift up by the base volume */
1210 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1211
1212 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, write_to_hw) < 0)
1213 return;
1214
1215 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1216 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1217
1218 u->hardware_volume = r;
1219
1220 if (u->mixer_path->has_dB) {
1221 pa_cvolume new_soft_volume;
1222 pa_bool_t accurate_enough;
1223 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1224
1225 /* Match exactly what the user requested by software */
1226 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1227
1228 /* If the adjustment to do in software is only minimal we
1229 * can skip it. That saves us CPU at the expense of a bit of
1230 * accuracy */
1231 accurate_enough =
1232 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1233 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1234
1235 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1236 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1237 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1238 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1239 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1240 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1241 pa_yes_no(accurate_enough));
1242 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1243
1244 if (!accurate_enough)
1245 s->soft_volume = new_soft_volume;
1246
1247 } else {
1248 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1249
1250 /* We can't match exactly what the user requested, hence let's
1251 * at least tell the user about it */
1252
1253 s->real_volume = r;
1254 }
1255 }
1256
1257 static void sink_write_volume_cb(pa_sink *s) {
1258 struct userdata *u = s->userdata;
1259 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1260
1261 pa_assert(u);
1262 pa_assert(u->mixer_path);
1263 pa_assert(u->mixer_handle);
1264 pa_assert(s->flags & PA_SINK_SYNC_VOLUME);
1265
1266 /* Shift up by the base volume */
1267 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1268
1269 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE) < 0)
1270 pa_log_error("Writing HW volume failed");
1271 else {
1272 pa_cvolume tmp_vol;
1273 pa_bool_t accurate_enough;
1274
1275 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1276 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1277
1278 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1279 accurate_enough =
1280 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1281 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1282
1283 if (!accurate_enough) {
1284 union {
1285 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1286 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1287 } vol;
1288
1289 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1290 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1291 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1292 pa_log_debug(" in dB: %s (request) != %s",
1293 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1294 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1295 }
1296 }
1297 }
1298
1299 static void sink_get_mute_cb(pa_sink *s) {
1300 struct userdata *u = s->userdata;
1301 pa_bool_t b;
1302
1303 pa_assert(u);
1304 pa_assert(u->mixer_path);
1305 pa_assert(u->mixer_handle);
1306
1307 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1308 return;
1309
1310 s->muted = b;
1311 }
1312
1313 static void sink_set_mute_cb(pa_sink *s) {
1314 struct userdata *u = s->userdata;
1315
1316 pa_assert(u);
1317 pa_assert(u->mixer_path);
1318 pa_assert(u->mixer_handle);
1319
1320 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1321 }
1322
1323 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1324 struct userdata *u = s->userdata;
1325 pa_alsa_port_data *data;
1326
1327 pa_assert(u);
1328 pa_assert(p);
1329 pa_assert(u->mixer_handle);
1330
1331 data = PA_DEVICE_PORT_DATA(p);
1332
1333 pa_assert_se(u->mixer_path = data->path);
1334 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1335
1336 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1337 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1338 s->n_volume_steps = PA_VOLUME_NORM+1;
1339
1340 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1341 } else {
1342 s->base_volume = PA_VOLUME_NORM;
1343 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1344 }
1345
1346 if (data->setting)
1347 pa_alsa_setting_select(data->setting, u->mixer_handle);
1348
1349 if (s->set_mute)
1350 s->set_mute(s);
1351 if (s->set_volume)
1352 s->set_volume(s);
1353
1354 return 0;
1355 }
1356
1357 static void sink_update_requested_latency_cb(pa_sink *s) {
1358 struct userdata *u = s->userdata;
1359 size_t before;
1360 pa_assert(u);
1361 pa_assert(u->use_tsched); /* only when timer scheduling is used
1362 * we can dynamically adjust the
1363 * latency */
1364
1365 if (!u->pcm_handle)
1366 return;
1367
1368 before = u->hwbuf_unused;
1369 update_sw_params(u);
1370
1371 /* Let's check whether we now use only a smaller part of the
1372 buffer then before. If so, we need to make sure that subsequent
1373 rewinds are relative to the new maximum fill level and not to the
1374 current fill level. Thus, let's do a full rewind once, to clear
1375 things up. */
1376
1377 if (u->hwbuf_unused > before) {
1378 pa_log_debug("Requesting rewind due to latency change.");
1379 pa_sink_request_rewind(s, (size_t) -1);
1380 }
1381 }
1382
1383 static int process_rewind(struct userdata *u) {
1384 snd_pcm_sframes_t unused;
1385 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1386 pa_assert(u);
1387
1388 /* Figure out how much we shall rewind and reset the counter */
1389 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1390
1391 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1392
1393 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1394 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1395 return -1;
1396 }
1397
1398 unused_nbytes = (size_t) unused * u->frame_size;
1399
1400 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1401 unused_nbytes += u->rewind_safeguard;
1402
1403 if (u->hwbuf_size > unused_nbytes)
1404 limit_nbytes = u->hwbuf_size - unused_nbytes;
1405 else
1406 limit_nbytes = 0;
1407
1408 if (rewind_nbytes > limit_nbytes)
1409 rewind_nbytes = limit_nbytes;
1410
1411 if (rewind_nbytes > 0) {
1412 snd_pcm_sframes_t in_frames, out_frames;
1413
1414 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1415
1416 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1417 pa_log_debug("before: %lu", (unsigned long) in_frames);
1418 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1419 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1420 if (try_recover(u, "process_rewind", out_frames) < 0)
1421 return -1;
1422 out_frames = 0;
1423 }
1424
1425 pa_log_debug("after: %lu", (unsigned long) out_frames);
1426
1427 rewind_nbytes = (size_t) out_frames * u->frame_size;
1428
1429 if (rewind_nbytes <= 0)
1430 pa_log_info("Tried rewind, but was apparently not possible.");
1431 else {
1432 u->write_count -= rewind_nbytes;
1433 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1434 pa_sink_process_rewind(u->sink, rewind_nbytes);
1435
1436 u->after_rewind = TRUE;
1437 return 0;
1438 }
1439 } else
1440 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1441
1442 pa_sink_process_rewind(u->sink, 0);
1443 return 0;
1444 }
1445
1446 static void thread_func(void *userdata) {
1447 struct userdata *u = userdata;
1448 unsigned short revents = 0;
1449
1450 pa_assert(u);
1451
1452 pa_log_debug("Thread starting up");
1453
1454 if (u->core->realtime_scheduling)
1455 pa_make_realtime(u->core->realtime_priority);
1456
1457 pa_thread_mq_install(&u->thread_mq);
1458
1459 for (;;) {
1460 int ret;
1461 pa_usec_t rtpoll_sleep = 0;
1462
1463 #ifdef DEBUG_TIMING
1464 pa_log_debug("Loop");
1465 #endif
1466
1467 /* Render some data and write it to the dsp */
1468 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1469 int work_done;
1470 pa_usec_t sleep_usec = 0;
1471 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1472
1473 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1474 if (process_rewind(u) < 0)
1475 goto fail;
1476
1477 if (u->use_mmap)
1478 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1479 else
1480 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1481
1482 if (work_done < 0)
1483 goto fail;
1484
1485 /* pa_log_debug("work_done = %i", work_done); */
1486
1487 if (work_done) {
1488
1489 if (u->first) {
1490 pa_log_info("Starting playback.");
1491 snd_pcm_start(u->pcm_handle);
1492
1493 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1494
1495 u->first = FALSE;
1496 }
1497
1498 update_smoother(u);
1499 }
1500
1501 if (u->use_tsched) {
1502 pa_usec_t cusec;
1503
1504 if (u->since_start <= u->hwbuf_size) {
1505
1506 /* USB devices on ALSA seem to hit a buffer
1507 * underrun during the first iterations much
1508 * quicker then we calculate here, probably due to
1509 * the transport latency. To accommodate for that
1510 * we artificially decrease the sleep time until
1511 * we have filled the buffer at least once
1512 * completely.*/
1513
1514 if (pa_log_ratelimit())
1515 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1516 sleep_usec /= 2;
1517 }
1518
1519 /* OK, the playback buffer is now full, let's
1520 * calculate when to wake up next */
1521 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1522
1523 /* Convert from the sound card time domain to the
1524 * system time domain */
1525 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1526
1527 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1528
1529 /* We don't trust the conversion, so we wake up whatever comes first */
1530 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1531 }
1532
1533 u->after_rewind = FALSE;
1534
1535 }
1536
1537 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1538 pa_usec_t volume_sleep;
1539 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1540 if (volume_sleep > 0)
1541 rtpoll_sleep = MIN(volume_sleep, rtpoll_sleep);
1542 }
1543
1544 if (rtpoll_sleep > 0)
1545 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1546 else
1547 pa_rtpoll_set_timer_disabled(u->rtpoll);
1548
1549 /* Hmm, nothing to do. Let's sleep */
1550 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1551 goto fail;
1552
1553 if (u->sink->flags & PA_SINK_SYNC_VOLUME)
1554 pa_sink_volume_change_apply(u->sink, NULL);
1555
1556 if (ret == 0)
1557 goto finish;
1558
1559 /* Tell ALSA about this and process its response */
1560 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1561 struct pollfd *pollfd;
1562 int err;
1563 unsigned n;
1564
1565 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1566
1567 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1568 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1569 goto fail;
1570 }
1571
1572 if (revents & ~POLLOUT) {
1573 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1574 goto fail;
1575
1576 u->first = TRUE;
1577 u->since_start = 0;
1578 } else if (revents && u->use_tsched && pa_log_ratelimit())
1579 pa_log_debug("Wakeup from ALSA!");
1580
1581 } else
1582 revents = 0;
1583 }
1584
1585 fail:
1586 /* If this was no regular exit from the loop we have to continue
1587 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1588 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1589 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1590
1591 finish:
1592 pa_log_debug("Thread shutting down");
1593 }
1594
1595 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1596 const char *n;
1597 char *t;
1598
1599 pa_assert(data);
1600 pa_assert(ma);
1601 pa_assert(device_name);
1602
1603 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1604 pa_sink_new_data_set_name(data, n);
1605 data->namereg_fail = TRUE;
1606 return;
1607 }
1608
1609 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1610 data->namereg_fail = TRUE;
1611 else {
1612 n = device_id ? device_id : device_name;
1613 data->namereg_fail = FALSE;
1614 }
1615
1616 if (mapping)
1617 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1618 else
1619 t = pa_sprintf_malloc("alsa_output.%s", n);
1620
1621 pa_sink_new_data_set_name(data, t);
1622 pa_xfree(t);
1623 }
1624
1625 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1626
1627 if (!mapping && !element)
1628 return;
1629
1630 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1631 pa_log_info("Failed to find a working mixer device.");
1632 return;
1633 }
1634
1635 if (element) {
1636
1637 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1638 goto fail;
1639
1640 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1641 goto fail;
1642
1643 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1644 pa_alsa_path_dump(u->mixer_path);
1645 } else {
1646
1647 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1648 goto fail;
1649
1650 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1651
1652 pa_log_debug("Probed mixer paths:");
1653 pa_alsa_path_set_dump(u->mixer_path_set);
1654 }
1655
1656 return;
1657
1658 fail:
1659
1660 if (u->mixer_path_set) {
1661 pa_alsa_path_set_free(u->mixer_path_set);
1662 u->mixer_path_set = NULL;
1663 } else if (u->mixer_path) {
1664 pa_alsa_path_free(u->mixer_path);
1665 u->mixer_path = NULL;
1666 }
1667
1668 if (u->mixer_handle) {
1669 snd_mixer_close(u->mixer_handle);
1670 u->mixer_handle = NULL;
1671 }
1672 }
1673
1674 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB, pa_bool_t sync_volume) {
1675 pa_assert(u);
1676
1677 if (!u->mixer_handle)
1678 return 0;
1679
1680 if (u->sink->active_port) {
1681 pa_alsa_port_data *data;
1682
1683 /* We have a list of supported paths, so let's activate the
1684 * one that has been chosen as active */
1685
1686 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1687 u->mixer_path = data->path;
1688
1689 pa_alsa_path_select(data->path, u->mixer_handle);
1690
1691 if (data->setting)
1692 pa_alsa_setting_select(data->setting, u->mixer_handle);
1693
1694 } else {
1695
1696 if (!u->mixer_path && u->mixer_path_set)
1697 u->mixer_path = u->mixer_path_set->paths;
1698
1699 if (u->mixer_path) {
1700 /* Hmm, we have only a single path, then let's activate it */
1701
1702 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1703
1704 if (u->mixer_path->settings)
1705 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1706 } else
1707 return 0;
1708 }
1709
1710 /* FIXME: need automatic detection rather than hard-coded path */
1711 if (!strcmp(u->mixer_path->name, "iec958-passthrough-output")) {
1712 u->sink->flags |= PA_SINK_PASSTHROUGH;
1713 } else {
1714 u->sink->flags &= ~PA_SINK_PASSTHROUGH;
1715 }
1716
1717 if (!u->mixer_path->has_volume)
1718 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1719 else {
1720
1721 if (u->mixer_path->has_dB) {
1722 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1723
1724 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1725 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1726
1727 if (u->mixer_path->max_dB > 0.0)
1728 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1729 else
1730 pa_log_info("No particular base volume set, fixing to 0 dB");
1731
1732 } else {
1733 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1734 u->sink->base_volume = PA_VOLUME_NORM;
1735 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1736 }
1737
1738 u->sink->get_volume = sink_get_volume_cb;
1739 u->sink->set_volume = sink_set_volume_cb;
1740 u->sink->write_volume = sink_write_volume_cb;
1741
1742 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL;
1743 if (u->mixer_path->has_dB) {
1744 u->sink->flags |= PA_SINK_DECIBEL_VOLUME;
1745 if (sync_volume) {
1746 u->sink->flags |= PA_SINK_SYNC_VOLUME;
1747 pa_log_info("Successfully enabled synchronous volume.");
1748 }
1749 }
1750
1751 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1752 }
1753
1754 if (!u->mixer_path->has_mute) {
1755 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1756 } else {
1757 u->sink->get_mute = sink_get_mute_cb;
1758 u->sink->set_mute = sink_set_mute_cb;
1759 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1760 pa_log_info("Using hardware mute control.");
1761 }
1762
1763 if (u->sink->flags & (PA_SINK_HW_VOLUME_CTRL|PA_SINK_HW_MUTE_CTRL)) {
1764 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1765 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1766 u->mixer_pd = pa_alsa_mixer_pdata_new();
1767 mixer_callback = io_mixer_callback;
1768
1769 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1770 pa_log("Failed to initialize file descriptor monitoring");
1771 return -1;
1772 }
1773 } else {
1774 u->mixer_fdl = pa_alsa_fdlist_new();
1775 mixer_callback = ctl_mixer_callback;
1776
1777 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1778 pa_log("Failed to initialize file descriptor monitoring");
1779 return -1;
1780 }
1781 }
1782
1783 if (u->mixer_path_set)
1784 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1785 else
1786 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1787 }
1788
1789 return 0;
1790 }
1791
1792 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1793
1794 struct userdata *u = NULL;
1795 const char *dev_id = NULL;
1796 pa_sample_spec ss, requested_ss;
1797 pa_channel_map map;
1798 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1799 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1800 size_t frame_size;
1801 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE;
1802 pa_sink_new_data data;
1803 pa_alsa_profile_set *profile_set = NULL;
1804
1805 pa_assert(m);
1806 pa_assert(ma);
1807
1808 ss = m->core->default_sample_spec;
1809 map = m->core->default_channel_map;
1810 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1811 pa_log("Failed to parse sample specification and channel map");
1812 goto fail;
1813 }
1814
1815 requested_ss = ss;
1816 frame_size = pa_frame_size(&ss);
1817
1818 nfrags = m->core->default_n_fragments;
1819 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1820 if (frag_size <= 0)
1821 frag_size = (uint32_t) frame_size;
1822 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1823 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1824
1825 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1826 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1827 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1828 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1829 pa_log("Failed to parse buffer metrics");
1830 goto fail;
1831 }
1832
1833 buffer_size = nfrags * frag_size;
1834
1835 period_frames = frag_size/frame_size;
1836 buffer_frames = buffer_size/frame_size;
1837 tsched_frames = tsched_size/frame_size;
1838
1839 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1840 pa_log("Failed to parse mmap argument.");
1841 goto fail;
1842 }
1843
1844 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1845 pa_log("Failed to parse tsched argument.");
1846 goto fail;
1847 }
1848
1849 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1850 pa_log("Failed to parse ignore_dB argument.");
1851 goto fail;
1852 }
1853
1854 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
1855 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1856 pa_log("Failed to parse rewind_safeguard argument");
1857 goto fail;
1858 }
1859
1860 sync_volume = m->core->sync_volume;
1861 if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1862 pa_log("Failed to parse sync_volume argument.");
1863 goto fail;
1864 }
1865
1866 use_tsched = pa_alsa_may_tsched(use_tsched);
1867
1868 u = pa_xnew0(struct userdata, 1);
1869 u->core = m->core;
1870 u->module = m;
1871 u->use_mmap = use_mmap;
1872 u->use_tsched = use_tsched;
1873 u->first = TRUE;
1874 u->rewind_safeguard = rewind_safeguard;
1875 u->rtpoll = pa_rtpoll_new();
1876 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1877
1878 u->smoother = pa_smoother_new(
1879 SMOOTHER_ADJUST_USEC,
1880 SMOOTHER_WINDOW_USEC,
1881 TRUE,
1882 TRUE,
1883 5,
1884 pa_rtclock_now(),
1885 TRUE);
1886 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1887
1888 dev_id = pa_modargs_get_value(
1889 ma, "device_id",
1890 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1891
1892 if (reserve_init(u, dev_id) < 0)
1893 goto fail;
1894
1895 if (reserve_monitor_init(u, dev_id) < 0)
1896 goto fail;
1897
1898 b = use_mmap;
1899 d = use_tsched;
1900
1901 if (mapping) {
1902
1903 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1904 pa_log("device_id= not set");
1905 goto fail;
1906 }
1907
1908 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1909 dev_id,
1910 &u->device_name,
1911 &ss, &map,
1912 SND_PCM_STREAM_PLAYBACK,
1913 &period_frames, &buffer_frames, tsched_frames,
1914 &b, &d, mapping)))
1915
1916 goto fail;
1917
1918 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1919
1920 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1921 goto fail;
1922
1923 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1924 dev_id,
1925 &u->device_name,
1926 &ss, &map,
1927 SND_PCM_STREAM_PLAYBACK,
1928 &period_frames, &buffer_frames, tsched_frames,
1929 &b, &d, profile_set, &mapping)))
1930
1931 goto fail;
1932
1933 } else {
1934
1935 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1936 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1937 &u->device_name,
1938 &ss, &map,
1939 SND_PCM_STREAM_PLAYBACK,
1940 &period_frames, &buffer_frames, tsched_frames,
1941 &b, &d, FALSE)))
1942 goto fail;
1943 }
1944
1945 pa_assert(u->device_name);
1946 pa_log_info("Successfully opened device %s.", u->device_name);
1947
1948 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1949 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1950 goto fail;
1951 }
1952
1953 if (mapping)
1954 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1955
1956 if (use_mmap && !b) {
1957 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1958 u->use_mmap = use_mmap = FALSE;
1959 }
1960
1961 if (use_tsched && (!b || !d)) {
1962 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1963 u->use_tsched = use_tsched = FALSE;
1964 }
1965
1966 if (u->use_mmap)
1967 pa_log_info("Successfully enabled mmap() mode.");
1968
1969 if (u->use_tsched)
1970 pa_log_info("Successfully enabled timer-based scheduling mode.");
1971
1972 /* ALSA might tweak the sample spec, so recalculate the frame size */
1973 frame_size = pa_frame_size(&ss);
1974
1975 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1976
1977 pa_sink_new_data_init(&data);
1978 data.driver = driver;
1979 data.module = m;
1980 data.card = card;
1981 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1982
1983 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1984 * variable instead of using &data.namereg_fail directly, because
1985 * data.namereg_fail is a bitfield and taking the address of a bitfield
1986 * variable is impossible. */
1987 namereg_fail = data.namereg_fail;
1988 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1989 pa_log("Failed to parse boolean argument namereg_fail.");
1990 pa_sink_new_data_done(&data);
1991 goto fail;
1992 }
1993 data.namereg_fail = namereg_fail;
1994
1995 pa_sink_new_data_set_sample_spec(&data, &ss);
1996 pa_sink_new_data_set_channel_map(&data, &map);
1997
1998 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1999 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2000 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2001 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2002 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2003
2004 if (mapping) {
2005 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2006 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2007 }
2008
2009 pa_alsa_init_description(data.proplist);
2010
2011 if (u->control_device)
2012 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2013
2014 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2015 pa_log("Invalid properties");
2016 pa_sink_new_data_done(&data);
2017 goto fail;
2018 }
2019
2020 if (u->mixer_path_set)
2021 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2022
2023 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
2024 pa_sink_new_data_done(&data);
2025
2026 if (!u->sink) {
2027 pa_log("Failed to create sink object");
2028 goto fail;
2029 }
2030
2031 if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
2032 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2033 pa_log("Failed to parse sync_volume_safety_margin parameter");
2034 goto fail;
2035 }
2036
2037 if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
2038 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2039 pa_log("Failed to parse sync_volume_extra_delay parameter");
2040 goto fail;
2041 }
2042
2043 u->sink->parent.process_msg = sink_process_msg;
2044 if (u->use_tsched)
2045 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2046 u->sink->set_state = sink_set_state_cb;
2047 u->sink->set_port = sink_set_port_cb;
2048 u->sink->userdata = u;
2049
2050 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2051 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2052
2053 u->frame_size = frame_size;
2054 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2055 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2056 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2057
2058 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2059 (double) u->hwbuf_size / (double) u->fragment_size,
2060 (long unsigned) u->fragment_size,
2061 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2062 (long unsigned) u->hwbuf_size,
2063 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2064
2065 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2066 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2067 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2068 else {
2069 pa_log_info("Disabling rewind for device %s", u->device_name);
2070 pa_sink_set_max_rewind(u->sink, 0);
2071 }
2072
2073 if (u->use_tsched) {
2074 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
2075
2076 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
2077 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
2078
2079 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
2080 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
2081
2082 fix_min_sleep_wakeup(u);
2083 fix_tsched_watermark(u);
2084
2085 pa_sink_set_latency_range(u->sink,
2086 0,
2087 pa_bytes_to_usec(u->hwbuf_size, &ss));
2088
2089 pa_log_info("Time scheduling watermark is %0.2fms",
2090 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
2091 } else
2092 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2093
2094 reserve_update(u);
2095
2096 if (update_sw_params(u) < 0)
2097 goto fail;
2098
2099 if (setup_mixer(u, ignore_dB, sync_volume) < 0)
2100 goto fail;
2101
2102 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2103
2104 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2105 pa_log("Failed to create thread.");
2106 goto fail;
2107 }
2108
2109 /* Get initial mixer settings */
2110 if (data.volume_is_set) {
2111 if (u->sink->set_volume)
2112 u->sink->set_volume(u->sink);
2113 } else {
2114 if (u->sink->get_volume)
2115 u->sink->get_volume(u->sink);
2116 }
2117
2118 if (data.muted_is_set) {
2119 if (u->sink->set_mute)
2120 u->sink->set_mute(u->sink);
2121 } else {
2122 if (u->sink->get_mute)
2123 u->sink->get_mute(u->sink);
2124 }
2125
2126 pa_sink_put(u->sink);
2127
2128 if (profile_set)
2129 pa_alsa_profile_set_free(profile_set);
2130
2131 return u->sink;
2132
2133 fail:
2134
2135 if (u)
2136 userdata_free(u);
2137
2138 if (profile_set)
2139 pa_alsa_profile_set_free(profile_set);
2140
2141 return NULL;
2142 }
2143
2144 static void userdata_free(struct userdata *u) {
2145 pa_assert(u);
2146
2147 if (u->sink)
2148 pa_sink_unlink(u->sink);
2149
2150 if (u->thread) {
2151 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2152 pa_thread_free(u->thread);
2153 }
2154
2155 pa_thread_mq_done(&u->thread_mq);
2156
2157 if (u->sink)
2158 pa_sink_unref(u->sink);
2159
2160 if (u->memchunk.memblock)
2161 pa_memblock_unref(u->memchunk.memblock);
2162
2163 if (u->mixer_pd)
2164 pa_alsa_mixer_pdata_free(u->mixer_pd);
2165
2166 if (u->alsa_rtpoll_item)
2167 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2168
2169 if (u->rtpoll)
2170 pa_rtpoll_free(u->rtpoll);
2171
2172 if (u->pcm_handle) {
2173 snd_pcm_drop(u->pcm_handle);
2174 snd_pcm_close(u->pcm_handle);
2175 }
2176
2177 if (u->mixer_fdl)
2178 pa_alsa_fdlist_free(u->mixer_fdl);
2179
2180 if (u->mixer_path_set)
2181 pa_alsa_path_set_free(u->mixer_path_set);
2182 else if (u->mixer_path)
2183 pa_alsa_path_free(u->mixer_path);
2184
2185 if (u->mixer_handle)
2186 snd_mixer_close(u->mixer_handle);
2187
2188 if (u->smoother)
2189 pa_smoother_free(u->smoother);
2190
2191 reserve_done(u);
2192 monitor_done(u);
2193
2194 pa_xfree(u->device_name);
2195 pa_xfree(u->control_device);
2196 pa_xfree(u);
2197 }
2198
2199 void pa_alsa_sink_free(pa_sink *s) {
2200 struct userdata *u;
2201
2202 pa_sink_assert_ref(s);
2203 pa_assert_se(u = s->userdata);
2204
2205 userdata_free(u);
2206 }