]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: Print dB values in addition to percentages in debug messages.
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 pa_alsa_fdlist *mixer_fdl;
104 pa_alsa_mixer_pdata *mixer_pd;
105 snd_mixer_t *mixer_handle;
106 pa_alsa_path_set *mixer_path_set;
107 pa_alsa_path *mixer_path;
108
109 pa_cvolume hardware_volume;
110
111 size_t
112 frame_size,
113 fragment_size,
114 hwbuf_size,
115 tsched_watermark,
116 hwbuf_unused,
117 min_sleep,
118 min_wakeup,
119 watermark_inc_step,
120 watermark_dec_step,
121 watermark_inc_threshold,
122 watermark_dec_threshold,
123 rewind_safeguard;
124
125 pa_usec_t watermark_dec_not_before;
126
127 pa_memchunk memchunk;
128
129 char *device_name; /* name of the PCM device */
130 char *control_device; /* name of the control device */
131
132 pa_bool_t use_mmap:1, use_tsched:1;
133
134 pa_bool_t first, after_rewind;
135
136 pa_rtpoll_item *alsa_rtpoll_item;
137
138 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
139
140 pa_smoother *smoother;
141 uint64_t write_count;
142 uint64_t since_start;
143 pa_usec_t smoother_interval;
144 pa_usec_t last_smoother_update;
145
146 pa_reserve_wrapper *reserve;
147 pa_hook_slot *reserve_slot;
148 pa_reserve_monitor_wrapper *monitor;
149 pa_hook_slot *monitor_slot;
150 };
151
152 static void userdata_free(struct userdata *u);
153
154 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
155 pa_assert(r);
156 pa_assert(u);
157
158 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
159 return PA_HOOK_CANCEL;
160
161 return PA_HOOK_OK;
162 }
163
164 static void reserve_done(struct userdata *u) {
165 pa_assert(u);
166
167 if (u->reserve_slot) {
168 pa_hook_slot_free(u->reserve_slot);
169 u->reserve_slot = NULL;
170 }
171
172 if (u->reserve) {
173 pa_reserve_wrapper_unref(u->reserve);
174 u->reserve = NULL;
175 }
176 }
177
178 static void reserve_update(struct userdata *u) {
179 const char *description;
180 pa_assert(u);
181
182 if (!u->sink || !u->reserve)
183 return;
184
185 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
186 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
187 }
188
189 static int reserve_init(struct userdata *u, const char *dname) {
190 char *rname;
191
192 pa_assert(u);
193 pa_assert(dname);
194
195 if (u->reserve)
196 return 0;
197
198 if (pa_in_system_mode())
199 return 0;
200
201 if (!(rname = pa_alsa_get_reserve_name(dname)))
202 return 0;
203
204 /* We are resuming, try to lock the device */
205 u->reserve = pa_reserve_wrapper_get(u->core, rname);
206 pa_xfree(rname);
207
208 if (!(u->reserve))
209 return -1;
210
211 reserve_update(u);
212
213 pa_assert(!u->reserve_slot);
214 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
215
216 return 0;
217 }
218
219 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
220 pa_bool_t b;
221
222 pa_assert(w);
223 pa_assert(u);
224
225 b = PA_PTR_TO_UINT(busy) && !u->reserve;
226
227 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
228 return PA_HOOK_OK;
229 }
230
231 static void monitor_done(struct userdata *u) {
232 pa_assert(u);
233
234 if (u->monitor_slot) {
235 pa_hook_slot_free(u->monitor_slot);
236 u->monitor_slot = NULL;
237 }
238
239 if (u->monitor) {
240 pa_reserve_monitor_wrapper_unref(u->monitor);
241 u->monitor = NULL;
242 }
243 }
244
245 static int reserve_monitor_init(struct userdata *u, const char *dname) {
246 char *rname;
247
248 pa_assert(u);
249 pa_assert(dname);
250
251 if (pa_in_system_mode())
252 return 0;
253
254 if (!(rname = pa_alsa_get_reserve_name(dname)))
255 return 0;
256
257 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
258 pa_xfree(rname);
259
260 if (!(u->monitor))
261 return -1;
262
263 pa_assert(!u->monitor_slot);
264 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
265
266 return 0;
267 }
268
269 static void fix_min_sleep_wakeup(struct userdata *u) {
270 size_t max_use, max_use_2;
271
272 pa_assert(u);
273 pa_assert(u->use_tsched);
274
275 max_use = u->hwbuf_size - u->hwbuf_unused;
276 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
277
278 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
279 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
280
281 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
282 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
283 }
284
285 static void fix_tsched_watermark(struct userdata *u) {
286 size_t max_use;
287 pa_assert(u);
288 pa_assert(u->use_tsched);
289
290 max_use = u->hwbuf_size - u->hwbuf_unused;
291
292 if (u->tsched_watermark > max_use - u->min_sleep)
293 u->tsched_watermark = max_use - u->min_sleep;
294
295 if (u->tsched_watermark < u->min_wakeup)
296 u->tsched_watermark = u->min_wakeup;
297 }
298
299 static void increase_watermark(struct userdata *u) {
300 size_t old_watermark;
301 pa_usec_t old_min_latency, new_min_latency;
302
303 pa_assert(u);
304 pa_assert(u->use_tsched);
305
306 /* First, just try to increase the watermark */
307 old_watermark = u->tsched_watermark;
308 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
309 fix_tsched_watermark(u);
310
311 if (old_watermark != u->tsched_watermark) {
312 pa_log_info("Increasing wakeup watermark to %0.2f ms",
313 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
314 return;
315 }
316
317 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
318 old_min_latency = u->sink->thread_info.min_latency;
319 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
320 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
321
322 if (old_min_latency != new_min_latency) {
323 pa_log_info("Increasing minimal latency to %0.2f ms",
324 (double) new_min_latency / PA_USEC_PER_MSEC);
325
326 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
327 }
328
329 /* When we reach this we're officialy fucked! */
330 }
331
332 static void decrease_watermark(struct userdata *u) {
333 size_t old_watermark;
334 pa_usec_t now;
335
336 pa_assert(u);
337 pa_assert(u->use_tsched);
338
339 now = pa_rtclock_now();
340
341 if (u->watermark_dec_not_before <= 0)
342 goto restart;
343
344 if (u->watermark_dec_not_before > now)
345 return;
346
347 old_watermark = u->tsched_watermark;
348
349 if (u->tsched_watermark < u->watermark_dec_step)
350 u->tsched_watermark = u->tsched_watermark / 2;
351 else
352 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
353
354 fix_tsched_watermark(u);
355
356 if (old_watermark != u->tsched_watermark)
357 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
358 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
359
360 /* We don't change the latency range*/
361
362 restart:
363 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
364 }
365
366 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
367 pa_usec_t usec, wm;
368
369 pa_assert(sleep_usec);
370 pa_assert(process_usec);
371
372 pa_assert(u);
373 pa_assert(u->use_tsched);
374
375 usec = pa_sink_get_requested_latency_within_thread(u->sink);
376
377 if (usec == (pa_usec_t) -1)
378 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
379
380 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
381
382 if (wm > usec)
383 wm = usec/2;
384
385 *sleep_usec = usec - wm;
386 *process_usec = wm;
387
388 #ifdef DEBUG_TIMING
389 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
390 (unsigned long) (usec / PA_USEC_PER_MSEC),
391 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
392 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
393 #endif
394 }
395
396 static int try_recover(struct userdata *u, const char *call, int err) {
397 pa_assert(u);
398 pa_assert(call);
399 pa_assert(err < 0);
400
401 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
402
403 pa_assert(err != -EAGAIN);
404
405 if (err == -EPIPE)
406 pa_log_debug("%s: Buffer underrun!", call);
407
408 if (err == -ESTRPIPE)
409 pa_log_debug("%s: System suspended!", call);
410
411 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
412 pa_log("%s: %s", call, pa_alsa_strerror(err));
413 return -1;
414 }
415
416 u->first = TRUE;
417 u->since_start = 0;
418 return 0;
419 }
420
421 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
422 size_t left_to_play;
423 pa_bool_t underrun = FALSE;
424
425 /* We use <= instead of < for this check here because an underrun
426 * only happens after the last sample was processed, not already when
427 * it is removed from the buffer. This is particularly important
428 * when block transfer is used. */
429
430 if (n_bytes <= u->hwbuf_size)
431 left_to_play = u->hwbuf_size - n_bytes;
432 else {
433
434 /* We got a dropout. What a mess! */
435 left_to_play = 0;
436 underrun = TRUE;
437
438 #ifdef DEBUG_TIMING
439 PA_DEBUG_TRAP;
440 #endif
441
442 if (!u->first && !u->after_rewind)
443 if (pa_log_ratelimit())
444 pa_log_info("Underrun!");
445 }
446
447 #ifdef DEBUG_TIMING
448 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
449 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
450 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
451 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
452 #endif
453
454 if (u->use_tsched) {
455 pa_bool_t reset_not_before = TRUE;
456
457 if (!u->first && !u->after_rewind) {
458 if (underrun || left_to_play < u->watermark_inc_threshold)
459 increase_watermark(u);
460 else if (left_to_play > u->watermark_dec_threshold) {
461 reset_not_before = FALSE;
462
463 /* We decrease the watermark only if have actually
464 * been woken up by a timeout. If something else woke
465 * us up it's too easy to fulfill the deadlines... */
466
467 if (on_timeout)
468 decrease_watermark(u);
469 }
470 }
471
472 if (reset_not_before)
473 u->watermark_dec_not_before = 0;
474 }
475
476 return left_to_play;
477 }
478
479 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
480 pa_bool_t work_done = TRUE;
481 pa_usec_t max_sleep_usec = 0, process_usec = 0;
482 size_t left_to_play;
483 unsigned j = 0;
484
485 pa_assert(u);
486 pa_sink_assert_ref(u->sink);
487
488 if (u->use_tsched)
489 hw_sleep_time(u, &max_sleep_usec, &process_usec);
490
491 for (;;) {
492 snd_pcm_sframes_t n;
493 size_t n_bytes;
494 int r;
495 pa_bool_t after_avail = TRUE;
496
497 /* First we determine how many samples are missing to fill the
498 * buffer up to 100% */
499
500 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
501
502 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
503 continue;
504
505 return r;
506 }
507
508 n_bytes = (size_t) n * u->frame_size;
509
510 #ifdef DEBUG_TIMING
511 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
512 #endif
513
514 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
515 on_timeout = FALSE;
516
517 if (u->use_tsched)
518
519 /* We won't fill up the playback buffer before at least
520 * half the sleep time is over because otherwise we might
521 * ask for more data from the clients then they expect. We
522 * need to guarantee that clients only have to keep around
523 * a single hw buffer length. */
524
525 if (!polled &&
526 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
527 #ifdef DEBUG_TIMING
528 pa_log_debug("Not filling up, because too early.");
529 #endif
530 break;
531 }
532
533 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
534
535 if (polled)
536 PA_ONCE_BEGIN {
537 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
538 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
539 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
540 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
541 pa_strnull(dn));
542 pa_xfree(dn);
543 } PA_ONCE_END;
544
545 #ifdef DEBUG_TIMING
546 pa_log_debug("Not filling up, because not necessary.");
547 #endif
548 break;
549 }
550
551
552 if (++j > 10) {
553 #ifdef DEBUG_TIMING
554 pa_log_debug("Not filling up, because already too many iterations.");
555 #endif
556
557 break;
558 }
559
560 n_bytes -= u->hwbuf_unused;
561 polled = FALSE;
562
563 #ifdef DEBUG_TIMING
564 pa_log_debug("Filling up");
565 #endif
566
567 for (;;) {
568 pa_memchunk chunk;
569 void *p;
570 int err;
571 const snd_pcm_channel_area_t *areas;
572 snd_pcm_uframes_t offset, frames;
573 snd_pcm_sframes_t sframes;
574
575 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
576 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
577
578 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
579
580 if (!after_avail && err == -EAGAIN)
581 break;
582
583 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
584 continue;
585
586 return r;
587 }
588
589 /* Make sure that if these memblocks need to be copied they will fit into one slot */
590 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
591 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
592
593 if (!after_avail && frames == 0)
594 break;
595
596 pa_assert(frames > 0);
597 after_avail = FALSE;
598
599 /* Check these are multiples of 8 bit */
600 pa_assert((areas[0].first & 7) == 0);
601 pa_assert((areas[0].step & 7)== 0);
602
603 /* We assume a single interleaved memory buffer */
604 pa_assert((areas[0].first >> 3) == 0);
605 pa_assert((areas[0].step >> 3) == u->frame_size);
606
607 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
608
609 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
610 chunk.length = pa_memblock_get_length(chunk.memblock);
611 chunk.index = 0;
612
613 pa_sink_render_into_full(u->sink, &chunk);
614 pa_memblock_unref_fixed(chunk.memblock);
615
616 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
617
618 if (!after_avail && (int) sframes == -EAGAIN)
619 break;
620
621 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
622 continue;
623
624 return r;
625 }
626
627 work_done = TRUE;
628
629 u->write_count += frames * u->frame_size;
630 u->since_start += frames * u->frame_size;
631
632 #ifdef DEBUG_TIMING
633 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
634 #endif
635
636 if ((size_t) frames * u->frame_size >= n_bytes)
637 break;
638
639 n_bytes -= (size_t) frames * u->frame_size;
640 }
641 }
642
643 if (u->use_tsched) {
644 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
645
646 if (*sleep_usec > process_usec)
647 *sleep_usec -= process_usec;
648 else
649 *sleep_usec = 0;
650 } else
651 *sleep_usec = 0;
652
653 return work_done ? 1 : 0;
654 }
655
656 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
657 pa_bool_t work_done = FALSE;
658 pa_usec_t max_sleep_usec = 0, process_usec = 0;
659 size_t left_to_play;
660 unsigned j = 0;
661
662 pa_assert(u);
663 pa_sink_assert_ref(u->sink);
664
665 if (u->use_tsched)
666 hw_sleep_time(u, &max_sleep_usec, &process_usec);
667
668 for (;;) {
669 snd_pcm_sframes_t n;
670 size_t n_bytes;
671 int r;
672 pa_bool_t after_avail = TRUE;
673
674 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
675
676 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
677 continue;
678
679 return r;
680 }
681
682 n_bytes = (size_t) n * u->frame_size;
683 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
684 on_timeout = FALSE;
685
686 if (u->use_tsched)
687
688 /* We won't fill up the playback buffer before at least
689 * half the sleep time is over because otherwise we might
690 * ask for more data from the clients then they expect. We
691 * need to guarantee that clients only have to keep around
692 * a single hw buffer length. */
693
694 if (!polled &&
695 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
696 break;
697
698 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
699
700 if (polled)
701 PA_ONCE_BEGIN {
702 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
703 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
704 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
705 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
706 pa_strnull(dn));
707 pa_xfree(dn);
708 } PA_ONCE_END;
709
710 break;
711 }
712
713 if (++j > 10) {
714 #ifdef DEBUG_TIMING
715 pa_log_debug("Not filling up, because already too many iterations.");
716 #endif
717
718 break;
719 }
720
721 n_bytes -= u->hwbuf_unused;
722 polled = FALSE;
723
724 for (;;) {
725 snd_pcm_sframes_t frames;
726 void *p;
727
728 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
729
730 if (u->memchunk.length <= 0)
731 pa_sink_render(u->sink, n_bytes, &u->memchunk);
732
733 pa_assert(u->memchunk.length > 0);
734
735 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
736
737 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
738 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
739
740 p = pa_memblock_acquire(u->memchunk.memblock);
741 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
742 pa_memblock_release(u->memchunk.memblock);
743
744 if (PA_UNLIKELY(frames < 0)) {
745
746 if (!after_avail && (int) frames == -EAGAIN)
747 break;
748
749 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
750 continue;
751
752 return r;
753 }
754
755 if (!after_avail && frames == 0)
756 break;
757
758 pa_assert(frames > 0);
759 after_avail = FALSE;
760
761 u->memchunk.index += (size_t) frames * u->frame_size;
762 u->memchunk.length -= (size_t) frames * u->frame_size;
763
764 if (u->memchunk.length <= 0) {
765 pa_memblock_unref(u->memchunk.memblock);
766 pa_memchunk_reset(&u->memchunk);
767 }
768
769 work_done = TRUE;
770
771 u->write_count += frames * u->frame_size;
772 u->since_start += frames * u->frame_size;
773
774 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
775
776 if ((size_t) frames * u->frame_size >= n_bytes)
777 break;
778
779 n_bytes -= (size_t) frames * u->frame_size;
780 }
781 }
782
783 if (u->use_tsched) {
784 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
785
786 if (*sleep_usec > process_usec)
787 *sleep_usec -= process_usec;
788 else
789 *sleep_usec = 0;
790 } else
791 *sleep_usec = 0;
792
793 return work_done ? 1 : 0;
794 }
795
796 static void update_smoother(struct userdata *u) {
797 snd_pcm_sframes_t delay = 0;
798 int64_t position;
799 int err;
800 pa_usec_t now1 = 0, now2;
801 snd_pcm_status_t *status;
802
803 snd_pcm_status_alloca(&status);
804
805 pa_assert(u);
806 pa_assert(u->pcm_handle);
807
808 /* Let's update the time smoother */
809
810 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
811 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
812 return;
813 }
814
815 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
816 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
817 else {
818 snd_htimestamp_t htstamp = { 0, 0 };
819 snd_pcm_status_get_htstamp(status, &htstamp);
820 now1 = pa_timespec_load(&htstamp);
821 }
822
823 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
824 if (now1 <= 0)
825 now1 = pa_rtclock_now();
826
827 /* check if the time since the last update is bigger than the interval */
828 if (u->last_smoother_update > 0)
829 if (u->last_smoother_update + u->smoother_interval > now1)
830 return;
831
832 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
833
834 if (PA_UNLIKELY(position < 0))
835 position = 0;
836
837 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
838
839 pa_smoother_put(u->smoother, now1, now2);
840
841 u->last_smoother_update = now1;
842 /* exponentially increase the update interval up to the MAX limit */
843 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
844 }
845
846 static pa_usec_t sink_get_latency(struct userdata *u) {
847 pa_usec_t r;
848 int64_t delay;
849 pa_usec_t now1, now2;
850
851 pa_assert(u);
852
853 now1 = pa_rtclock_now();
854 now2 = pa_smoother_get(u->smoother, now1);
855
856 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
857
858 r = delay >= 0 ? (pa_usec_t) delay : 0;
859
860 if (u->memchunk.memblock)
861 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
862
863 return r;
864 }
865
866 static int build_pollfd(struct userdata *u) {
867 pa_assert(u);
868 pa_assert(u->pcm_handle);
869
870 if (u->alsa_rtpoll_item)
871 pa_rtpoll_item_free(u->alsa_rtpoll_item);
872
873 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
874 return -1;
875
876 return 0;
877 }
878
879 /* Called from IO context */
880 static int suspend(struct userdata *u) {
881 pa_assert(u);
882 pa_assert(u->pcm_handle);
883
884 pa_smoother_pause(u->smoother, pa_rtclock_now());
885
886 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
887 * take awfully long with our long buffer sizes today. */
888 snd_pcm_close(u->pcm_handle);
889 u->pcm_handle = NULL;
890
891 if (u->alsa_rtpoll_item) {
892 pa_rtpoll_item_free(u->alsa_rtpoll_item);
893 u->alsa_rtpoll_item = NULL;
894 }
895
896 /* We reset max_rewind/max_request here to make sure that while we
897 * are suspended the old max_request/max_rewind values set before
898 * the suspend can influence the per-stream buffer of newly
899 * created streams, without their requirements having any
900 * influence on them. */
901 pa_sink_set_max_rewind_within_thread(u->sink, 0);
902 pa_sink_set_max_request_within_thread(u->sink, 0);
903
904 pa_log_info("Device suspended...");
905
906 return 0;
907 }
908
909 /* Called from IO context */
910 static int update_sw_params(struct userdata *u) {
911 snd_pcm_uframes_t avail_min;
912 int err;
913
914 pa_assert(u);
915
916 /* Use the full buffer if noone asked us for anything specific */
917 u->hwbuf_unused = 0;
918
919 if (u->use_tsched) {
920 pa_usec_t latency;
921
922 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
923 size_t b;
924
925 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
926
927 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
928
929 /* We need at least one sample in our buffer */
930
931 if (PA_UNLIKELY(b < u->frame_size))
932 b = u->frame_size;
933
934 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
935 }
936
937 fix_min_sleep_wakeup(u);
938 fix_tsched_watermark(u);
939 }
940
941 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
942
943 /* We need at last one frame in the used part of the buffer */
944 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
945
946 if (u->use_tsched) {
947 pa_usec_t sleep_usec, process_usec;
948
949 hw_sleep_time(u, &sleep_usec, &process_usec);
950 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
951 }
952
953 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
954
955 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
956 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
957 return err;
958 }
959
960 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
961 if (pa_alsa_pcm_is_hw(u->pcm_handle))
962 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
963 else {
964 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
965 pa_sink_set_max_rewind_within_thread(u->sink, 0);
966 }
967
968 return 0;
969 }
970
971 /* Called from IO context */
972 static int unsuspend(struct userdata *u) {
973 pa_sample_spec ss;
974 int err;
975 pa_bool_t b, d;
976 snd_pcm_uframes_t period_size, buffer_size;
977
978 pa_assert(u);
979 pa_assert(!u->pcm_handle);
980
981 pa_log_info("Trying resume...");
982
983 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
984 SND_PCM_NONBLOCK|
985 SND_PCM_NO_AUTO_RESAMPLE|
986 SND_PCM_NO_AUTO_CHANNELS|
987 SND_PCM_NO_AUTO_FORMAT)) < 0) {
988 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
989 goto fail;
990 }
991
992 ss = u->sink->sample_spec;
993 period_size = u->fragment_size / u->frame_size;
994 buffer_size = u->hwbuf_size / u->frame_size;
995 b = u->use_mmap;
996 d = u->use_tsched;
997
998 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
999 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1000 goto fail;
1001 }
1002
1003 if (b != u->use_mmap || d != u->use_tsched) {
1004 pa_log_warn("Resume failed, couldn't get original access mode.");
1005 goto fail;
1006 }
1007
1008 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1009 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1010 goto fail;
1011 }
1012
1013 if (period_size*u->frame_size != u->fragment_size ||
1014 buffer_size*u->frame_size != u->hwbuf_size) {
1015 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1016 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1017 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1018 goto fail;
1019 }
1020
1021 if (update_sw_params(u) < 0)
1022 goto fail;
1023
1024 if (build_pollfd(u) < 0)
1025 goto fail;
1026
1027 u->write_count = 0;
1028 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1029 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1030 u->last_smoother_update = 0;
1031
1032 u->first = TRUE;
1033 u->since_start = 0;
1034
1035 pa_log_info("Resumed successfully...");
1036
1037 return 0;
1038
1039 fail:
1040 if (u->pcm_handle) {
1041 snd_pcm_close(u->pcm_handle);
1042 u->pcm_handle = NULL;
1043 }
1044
1045 return -PA_ERR_IO;
1046 }
1047
1048 /* Called from IO context */
1049 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1050 struct userdata *u = PA_SINK(o)->userdata;
1051
1052 switch (code) {
1053
1054 case PA_SINK_MESSAGE_GET_LATENCY: {
1055 pa_usec_t r = 0;
1056
1057 if (u->pcm_handle)
1058 r = sink_get_latency(u);
1059
1060 *((pa_usec_t*) data) = r;
1061
1062 return 0;
1063 }
1064
1065 case PA_SINK_MESSAGE_SET_STATE:
1066
1067 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1068
1069 case PA_SINK_SUSPENDED: {
1070 int r;
1071
1072 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1073
1074 if ((r = suspend(u)) < 0)
1075 return r;
1076
1077 break;
1078 }
1079
1080 case PA_SINK_IDLE:
1081 case PA_SINK_RUNNING: {
1082 int r;
1083
1084 if (u->sink->thread_info.state == PA_SINK_INIT) {
1085 if (build_pollfd(u) < 0)
1086 return -PA_ERR_IO;
1087 }
1088
1089 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1090 if ((r = unsuspend(u)) < 0)
1091 return r;
1092 }
1093
1094 break;
1095 }
1096
1097 case PA_SINK_UNLINKED:
1098 case PA_SINK_INIT:
1099 case PA_SINK_INVALID_STATE:
1100 ;
1101 }
1102
1103 break;
1104 }
1105
1106 return pa_sink_process_msg(o, code, data, offset, chunk);
1107 }
1108
1109 /* Called from main context */
1110 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1111 pa_sink_state_t old_state;
1112 struct userdata *u;
1113
1114 pa_sink_assert_ref(s);
1115 pa_assert_se(u = s->userdata);
1116
1117 old_state = pa_sink_get_state(u->sink);
1118
1119 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1120 reserve_done(u);
1121 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1122 if (reserve_init(u, u->device_name) < 0)
1123 return -PA_ERR_BUSY;
1124
1125 return 0;
1126 }
1127
1128 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1129 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1130
1131 pa_assert(u);
1132 pa_assert(u->mixer_handle);
1133
1134 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1135 return 0;
1136
1137 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1138 return 0;
1139
1140 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1141 pa_sink_get_volume(u->sink, TRUE);
1142 pa_sink_get_mute(u->sink, TRUE);
1143 }
1144
1145 return 0;
1146 }
1147
1148 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1149 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1150
1151 pa_assert(u);
1152 pa_assert(u->mixer_handle);
1153
1154 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1155 return 0;
1156
1157 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1158 return 0;
1159
1160 if (mask & SND_CTL_EVENT_MASK_VALUE)
1161 pa_sink_update_volume_and_mute(u->sink);
1162
1163 return 0;
1164 }
1165
1166 static void sink_get_volume_cb(pa_sink *s) {
1167 struct userdata *u = s->userdata;
1168 pa_cvolume r;
1169 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1170
1171 pa_assert(u);
1172 pa_assert(u->mixer_path);
1173 pa_assert(u->mixer_handle);
1174
1175 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1176 return;
1177
1178 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1179 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1180
1181 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1182
1183 if (u->mixer_path->has_dB) {
1184 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1185
1186 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1187 }
1188
1189 if (pa_cvolume_equal(&u->hardware_volume, &r))
1190 return;
1191
1192 s->real_volume = u->hardware_volume = r;
1193
1194 /* Hmm, so the hardware volume changed, let's reset our software volume */
1195 if (u->mixer_path->has_dB)
1196 pa_sink_set_soft_volume(s, NULL);
1197 }
1198
1199 static void sink_set_volume_cb(pa_sink *s) {
1200 struct userdata *u = s->userdata;
1201 pa_cvolume r;
1202 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1203 pa_bool_t write_to_hw = (s->flags & PA_SINK_SYNC_VOLUME) ? FALSE : TRUE;
1204
1205 pa_assert(u);
1206 pa_assert(u->mixer_path);
1207 pa_assert(u->mixer_handle);
1208
1209 /* Shift up by the base volume */
1210 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1211
1212 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, write_to_hw) < 0)
1213 return;
1214
1215 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1216 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1217
1218 u->hardware_volume = r;
1219
1220 if (u->mixer_path->has_dB) {
1221 pa_cvolume new_soft_volume;
1222 pa_bool_t accurate_enough;
1223 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1224
1225 /* Match exactly what the user requested by software */
1226 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1227
1228 /* If the adjustment to do in software is only minimal we
1229 * can skip it. That saves us CPU at the expense of a bit of
1230 * accuracy */
1231 accurate_enough =
1232 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1233 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1234
1235 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1236 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1237 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1238 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1239 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1240 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1241 pa_yes_no(accurate_enough));
1242 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1243
1244 if (!accurate_enough)
1245 s->soft_volume = new_soft_volume;
1246
1247 } else {
1248 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1249
1250 /* We can't match exactly what the user requested, hence let's
1251 * at least tell the user about it */
1252
1253 s->real_volume = r;
1254 }
1255 }
1256
1257 static void sink_write_volume_cb(pa_sink *s) {
1258 struct userdata *u = s->userdata;
1259 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1260
1261 pa_assert(u);
1262 pa_assert(u->mixer_path);
1263 pa_assert(u->mixer_handle);
1264 pa_assert(s->flags & PA_SINK_SYNC_VOLUME);
1265
1266 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE) < 0)
1267 pa_log_error("Writing HW volume failed");
1268 else {
1269 pa_cvolume tmp_vol;
1270 pa_bool_t accurate_enough;
1271
1272 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1273 accurate_enough =
1274 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1275 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1276
1277 if (!accurate_enough) {
1278 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1279 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1280
1281 pa_log_debug("Written HW volume did not match with the request %s != %s",
1282 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->thread_info.current_hw_volume),
1283 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &hw_vol));
1284 pa_log_debug(" in dB: %s != %s",
1285 pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->thread_info.current_hw_volume),
1286 pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &hw_vol));
1287 }
1288 }
1289 }
1290
1291 static void sink_get_mute_cb(pa_sink *s) {
1292 struct userdata *u = s->userdata;
1293 pa_bool_t b;
1294
1295 pa_assert(u);
1296 pa_assert(u->mixer_path);
1297 pa_assert(u->mixer_handle);
1298
1299 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1300 return;
1301
1302 s->muted = b;
1303 }
1304
1305 static void sink_set_mute_cb(pa_sink *s) {
1306 struct userdata *u = s->userdata;
1307
1308 pa_assert(u);
1309 pa_assert(u->mixer_path);
1310 pa_assert(u->mixer_handle);
1311
1312 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1313 }
1314
1315 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1316 struct userdata *u = s->userdata;
1317 pa_alsa_port_data *data;
1318
1319 pa_assert(u);
1320 pa_assert(p);
1321 pa_assert(u->mixer_handle);
1322
1323 data = PA_DEVICE_PORT_DATA(p);
1324
1325 pa_assert_se(u->mixer_path = data->path);
1326 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1327
1328 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1329 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1330 s->n_volume_steps = PA_VOLUME_NORM+1;
1331
1332 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1333 } else {
1334 s->base_volume = PA_VOLUME_NORM;
1335 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1336 }
1337
1338 if (data->setting)
1339 pa_alsa_setting_select(data->setting, u->mixer_handle);
1340
1341 if (s->set_mute)
1342 s->set_mute(s);
1343 if (s->set_volume)
1344 s->set_volume(s);
1345
1346 return 0;
1347 }
1348
1349 static void sink_update_requested_latency_cb(pa_sink *s) {
1350 struct userdata *u = s->userdata;
1351 size_t before;
1352 pa_assert(u);
1353 pa_assert(u->use_tsched); /* only when timer scheduling is used
1354 * we can dynamically adjust the
1355 * latency */
1356
1357 if (!u->pcm_handle)
1358 return;
1359
1360 before = u->hwbuf_unused;
1361 update_sw_params(u);
1362
1363 /* Let's check whether we now use only a smaller part of the
1364 buffer then before. If so, we need to make sure that subsequent
1365 rewinds are relative to the new maximum fill level and not to the
1366 current fill level. Thus, let's do a full rewind once, to clear
1367 things up. */
1368
1369 if (u->hwbuf_unused > before) {
1370 pa_log_debug("Requesting rewind due to latency change.");
1371 pa_sink_request_rewind(s, (size_t) -1);
1372 }
1373 }
1374
1375 static int process_rewind(struct userdata *u) {
1376 snd_pcm_sframes_t unused;
1377 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1378 pa_assert(u);
1379
1380 /* Figure out how much we shall rewind and reset the counter */
1381 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1382
1383 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1384
1385 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1386 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1387 return -1;
1388 }
1389
1390 unused_nbytes = (size_t) unused * u->frame_size;
1391
1392 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1393 unused_nbytes += u->rewind_safeguard;
1394
1395 if (u->hwbuf_size > unused_nbytes)
1396 limit_nbytes = u->hwbuf_size - unused_nbytes;
1397 else
1398 limit_nbytes = 0;
1399
1400 if (rewind_nbytes > limit_nbytes)
1401 rewind_nbytes = limit_nbytes;
1402
1403 if (rewind_nbytes > 0) {
1404 snd_pcm_sframes_t in_frames, out_frames;
1405
1406 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1407
1408 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1409 pa_log_debug("before: %lu", (unsigned long) in_frames);
1410 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1411 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1412 if (try_recover(u, "process_rewind", out_frames) < 0)
1413 return -1;
1414 out_frames = 0;
1415 }
1416
1417 pa_log_debug("after: %lu", (unsigned long) out_frames);
1418
1419 rewind_nbytes = (size_t) out_frames * u->frame_size;
1420
1421 if (rewind_nbytes <= 0)
1422 pa_log_info("Tried rewind, but was apparently not possible.");
1423 else {
1424 u->write_count -= rewind_nbytes;
1425 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1426 pa_sink_process_rewind(u->sink, rewind_nbytes);
1427
1428 u->after_rewind = TRUE;
1429 return 0;
1430 }
1431 } else
1432 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1433
1434 pa_sink_process_rewind(u->sink, 0);
1435 return 0;
1436 }
1437
1438 static void thread_func(void *userdata) {
1439 struct userdata *u = userdata;
1440 unsigned short revents = 0;
1441
1442 pa_assert(u);
1443
1444 pa_log_debug("Thread starting up");
1445
1446 if (u->core->realtime_scheduling)
1447 pa_make_realtime(u->core->realtime_priority);
1448
1449 pa_thread_mq_install(&u->thread_mq);
1450
1451 for (;;) {
1452 int ret;
1453 pa_usec_t rtpoll_sleep = 0;
1454
1455 #ifdef DEBUG_TIMING
1456 pa_log_debug("Loop");
1457 #endif
1458
1459 /* Render some data and write it to the dsp */
1460 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1461 int work_done;
1462 pa_usec_t sleep_usec = 0;
1463 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1464
1465 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1466 if (process_rewind(u) < 0)
1467 goto fail;
1468
1469 if (u->use_mmap)
1470 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1471 else
1472 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1473
1474 if (work_done < 0)
1475 goto fail;
1476
1477 /* pa_log_debug("work_done = %i", work_done); */
1478
1479 if (work_done) {
1480
1481 if (u->first) {
1482 pa_log_info("Starting playback.");
1483 snd_pcm_start(u->pcm_handle);
1484
1485 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1486
1487 u->first = FALSE;
1488 }
1489
1490 update_smoother(u);
1491 }
1492
1493 if (u->use_tsched) {
1494 pa_usec_t cusec;
1495
1496 if (u->since_start <= u->hwbuf_size) {
1497
1498 /* USB devices on ALSA seem to hit a buffer
1499 * underrun during the first iterations much
1500 * quicker then we calculate here, probably due to
1501 * the transport latency. To accommodate for that
1502 * we artificially decrease the sleep time until
1503 * we have filled the buffer at least once
1504 * completely.*/
1505
1506 if (pa_log_ratelimit())
1507 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1508 sleep_usec /= 2;
1509 }
1510
1511 /* OK, the playback buffer is now full, let's
1512 * calculate when to wake up next */
1513 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1514
1515 /* Convert from the sound card time domain to the
1516 * system time domain */
1517 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1518
1519 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1520
1521 /* We don't trust the conversion, so we wake up whatever comes first */
1522 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1523 }
1524
1525 u->after_rewind = FALSE;
1526
1527 }
1528
1529 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1530 pa_usec_t volume_sleep;
1531 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1532 if (volume_sleep > 0)
1533 rtpoll_sleep = MIN(volume_sleep, rtpoll_sleep);
1534 }
1535
1536 if (rtpoll_sleep > 0)
1537 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1538 else
1539 pa_rtpoll_set_timer_disabled(u->rtpoll);
1540
1541 /* Hmm, nothing to do. Let's sleep */
1542 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1543 goto fail;
1544
1545 if (u->sink->flags & PA_SINK_SYNC_VOLUME)
1546 pa_sink_volume_change_apply(u->sink, NULL);
1547
1548 if (ret == 0)
1549 goto finish;
1550
1551 /* Tell ALSA about this and process its response */
1552 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1553 struct pollfd *pollfd;
1554 int err;
1555 unsigned n;
1556
1557 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1558
1559 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1560 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1561 goto fail;
1562 }
1563
1564 if (revents & ~POLLOUT) {
1565 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1566 goto fail;
1567
1568 u->first = TRUE;
1569 u->since_start = 0;
1570 } else if (revents && u->use_tsched && pa_log_ratelimit())
1571 pa_log_debug("Wakeup from ALSA!");
1572
1573 } else
1574 revents = 0;
1575 }
1576
1577 fail:
1578 /* If this was no regular exit from the loop we have to continue
1579 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1580 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1581 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1582
1583 finish:
1584 pa_log_debug("Thread shutting down");
1585 }
1586
1587 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1588 const char *n;
1589 char *t;
1590
1591 pa_assert(data);
1592 pa_assert(ma);
1593 pa_assert(device_name);
1594
1595 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1596 pa_sink_new_data_set_name(data, n);
1597 data->namereg_fail = TRUE;
1598 return;
1599 }
1600
1601 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1602 data->namereg_fail = TRUE;
1603 else {
1604 n = device_id ? device_id : device_name;
1605 data->namereg_fail = FALSE;
1606 }
1607
1608 if (mapping)
1609 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1610 else
1611 t = pa_sprintf_malloc("alsa_output.%s", n);
1612
1613 pa_sink_new_data_set_name(data, t);
1614 pa_xfree(t);
1615 }
1616
1617 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1618
1619 if (!mapping && !element)
1620 return;
1621
1622 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1623 pa_log_info("Failed to find a working mixer device.");
1624 return;
1625 }
1626
1627 if (element) {
1628
1629 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1630 goto fail;
1631
1632 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1633 goto fail;
1634
1635 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1636 pa_alsa_path_dump(u->mixer_path);
1637 } else {
1638
1639 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1640 goto fail;
1641
1642 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1643
1644 pa_log_debug("Probed mixer paths:");
1645 pa_alsa_path_set_dump(u->mixer_path_set);
1646 }
1647
1648 return;
1649
1650 fail:
1651
1652 if (u->mixer_path_set) {
1653 pa_alsa_path_set_free(u->mixer_path_set);
1654 u->mixer_path_set = NULL;
1655 } else if (u->mixer_path) {
1656 pa_alsa_path_free(u->mixer_path);
1657 u->mixer_path = NULL;
1658 }
1659
1660 if (u->mixer_handle) {
1661 snd_mixer_close(u->mixer_handle);
1662 u->mixer_handle = NULL;
1663 }
1664 }
1665
1666 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB, pa_bool_t sync_volume) {
1667 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1668
1669 pa_assert(u);
1670
1671 if (!u->mixer_handle)
1672 return 0;
1673
1674 if (u->sink->active_port) {
1675 pa_alsa_port_data *data;
1676
1677 /* We have a list of supported paths, so let's activate the
1678 * one that has been chosen as active */
1679
1680 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1681 u->mixer_path = data->path;
1682
1683 pa_alsa_path_select(data->path, u->mixer_handle);
1684
1685 if (data->setting)
1686 pa_alsa_setting_select(data->setting, u->mixer_handle);
1687
1688 } else {
1689
1690 if (!u->mixer_path && u->mixer_path_set)
1691 u->mixer_path = u->mixer_path_set->paths;
1692
1693 if (u->mixer_path) {
1694 /* Hmm, we have only a single path, then let's activate it */
1695
1696 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1697
1698 if (u->mixer_path->settings)
1699 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1700 } else
1701 return 0;
1702 }
1703
1704 /* FIXME: need automatic detection rather than hard-coded path */
1705 if (!strcmp(u->mixer_path->name, "iec958-passthrough-output")) {
1706 u->sink->flags |= PA_SINK_PASSTHROUGH;
1707 } else {
1708 u->sink->flags &= ~PA_SINK_PASSTHROUGH;
1709 }
1710
1711 if (!u->mixer_path->has_volume)
1712 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1713 else {
1714
1715 if (u->mixer_path->has_dB) {
1716 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1717
1718 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1719 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1720
1721 if (u->mixer_path->max_dB > 0.0)
1722 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1723 else
1724 pa_log_info("No particular base volume set, fixing to 0 dB");
1725
1726 } else {
1727 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1728 u->sink->base_volume = PA_VOLUME_NORM;
1729 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1730 }
1731
1732 u->sink->get_volume = sink_get_volume_cb;
1733 u->sink->set_volume = sink_set_volume_cb;
1734 u->sink->write_volume = sink_write_volume_cb;
1735
1736 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL;
1737 if (u->mixer_path->has_dB) {
1738 u->sink->flags |= PA_SINK_DECIBEL_VOLUME;
1739 if (sync_volume) {
1740 u->sink->flags |= PA_SINK_SYNC_VOLUME;
1741 pa_log_info("Successfully enabled synchronous volume.");
1742 }
1743 }
1744
1745 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1746 }
1747
1748 if (!u->mixer_path->has_mute) {
1749 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1750 } else {
1751 u->sink->get_mute = sink_get_mute_cb;
1752 u->sink->set_mute = sink_set_mute_cb;
1753 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1754 pa_log_info("Using hardware mute control.");
1755 }
1756
1757 if (sync_volume) {
1758 u->mixer_pd = pa_alsa_mixer_pdata_new();
1759 mixer_callback = io_mixer_callback;
1760
1761 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1762 pa_log("Failed to initialize file descriptor monitoring");
1763 return -1;
1764 }
1765
1766 } else {
1767 u->mixer_fdl = pa_alsa_fdlist_new();
1768 mixer_callback = ctl_mixer_callback;
1769
1770 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1771 pa_log("Failed to initialize file descriptor monitoring");
1772 return -1;
1773 }
1774 }
1775
1776 if (u->mixer_path_set)
1777 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1778 else
1779 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1780
1781 return 0;
1782 }
1783
1784 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1785
1786 struct userdata *u = NULL;
1787 const char *dev_id = NULL;
1788 pa_sample_spec ss, requested_ss;
1789 pa_channel_map map;
1790 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1791 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1792 size_t frame_size;
1793 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE;
1794 pa_sink_new_data data;
1795 pa_alsa_profile_set *profile_set = NULL;
1796
1797 pa_assert(m);
1798 pa_assert(ma);
1799
1800 ss = m->core->default_sample_spec;
1801 map = m->core->default_channel_map;
1802 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1803 pa_log("Failed to parse sample specification and channel map");
1804 goto fail;
1805 }
1806
1807 requested_ss = ss;
1808 frame_size = pa_frame_size(&ss);
1809
1810 nfrags = m->core->default_n_fragments;
1811 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1812 if (frag_size <= 0)
1813 frag_size = (uint32_t) frame_size;
1814 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1815 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1816
1817 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1818 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1819 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1820 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1821 pa_log("Failed to parse buffer metrics");
1822 goto fail;
1823 }
1824
1825 buffer_size = nfrags * frag_size;
1826
1827 period_frames = frag_size/frame_size;
1828 buffer_frames = buffer_size/frame_size;
1829 tsched_frames = tsched_size/frame_size;
1830
1831 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1832 pa_log("Failed to parse mmap argument.");
1833 goto fail;
1834 }
1835
1836 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1837 pa_log("Failed to parse tsched argument.");
1838 goto fail;
1839 }
1840
1841 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1842 pa_log("Failed to parse ignore_dB argument.");
1843 goto fail;
1844 }
1845
1846 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
1847 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1848 pa_log("Failed to parse rewind_safeguard argument");
1849 goto fail;
1850 }
1851
1852 sync_volume = m->core->sync_volume;
1853 if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1854 pa_log("Failed to parse sync_volume argument.");
1855 goto fail;
1856 }
1857
1858 use_tsched = pa_alsa_may_tsched(use_tsched);
1859
1860 u = pa_xnew0(struct userdata, 1);
1861 u->core = m->core;
1862 u->module = m;
1863 u->use_mmap = use_mmap;
1864 u->use_tsched = use_tsched;
1865 u->first = TRUE;
1866 u->rewind_safeguard = rewind_safeguard;
1867 u->rtpoll = pa_rtpoll_new();
1868 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1869
1870 u->smoother = pa_smoother_new(
1871 SMOOTHER_ADJUST_USEC,
1872 SMOOTHER_WINDOW_USEC,
1873 TRUE,
1874 TRUE,
1875 5,
1876 pa_rtclock_now(),
1877 TRUE);
1878 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1879
1880 dev_id = pa_modargs_get_value(
1881 ma, "device_id",
1882 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1883
1884 if (reserve_init(u, dev_id) < 0)
1885 goto fail;
1886
1887 if (reserve_monitor_init(u, dev_id) < 0)
1888 goto fail;
1889
1890 b = use_mmap;
1891 d = use_tsched;
1892
1893 if (mapping) {
1894
1895 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1896 pa_log("device_id= not set");
1897 goto fail;
1898 }
1899
1900 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1901 dev_id,
1902 &u->device_name,
1903 &ss, &map,
1904 SND_PCM_STREAM_PLAYBACK,
1905 &period_frames, &buffer_frames, tsched_frames,
1906 &b, &d, mapping)))
1907
1908 goto fail;
1909
1910 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1911
1912 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1913 goto fail;
1914
1915 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1916 dev_id,
1917 &u->device_name,
1918 &ss, &map,
1919 SND_PCM_STREAM_PLAYBACK,
1920 &period_frames, &buffer_frames, tsched_frames,
1921 &b, &d, profile_set, &mapping)))
1922
1923 goto fail;
1924
1925 } else {
1926
1927 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1928 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1929 &u->device_name,
1930 &ss, &map,
1931 SND_PCM_STREAM_PLAYBACK,
1932 &period_frames, &buffer_frames, tsched_frames,
1933 &b, &d, FALSE)))
1934 goto fail;
1935 }
1936
1937 pa_assert(u->device_name);
1938 pa_log_info("Successfully opened device %s.", u->device_name);
1939
1940 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1941 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1942 goto fail;
1943 }
1944
1945 if (mapping)
1946 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1947
1948 if (use_mmap && !b) {
1949 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1950 u->use_mmap = use_mmap = FALSE;
1951 }
1952
1953 if (use_tsched && (!b || !d)) {
1954 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1955 u->use_tsched = use_tsched = FALSE;
1956 }
1957
1958 if (u->use_mmap)
1959 pa_log_info("Successfully enabled mmap() mode.");
1960
1961 if (u->use_tsched)
1962 pa_log_info("Successfully enabled timer-based scheduling mode.");
1963
1964 /* ALSA might tweak the sample spec, so recalculate the frame size */
1965 frame_size = pa_frame_size(&ss);
1966
1967 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1968
1969 pa_sink_new_data_init(&data);
1970 data.driver = driver;
1971 data.module = m;
1972 data.card = card;
1973 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1974
1975 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1976 * variable instead of using &data.namereg_fail directly, because
1977 * data.namereg_fail is a bitfield and taking the address of a bitfield
1978 * variable is impossible. */
1979 namereg_fail = data.namereg_fail;
1980 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1981 pa_log("Failed to parse boolean argument namereg_fail.");
1982 pa_sink_new_data_done(&data);
1983 goto fail;
1984 }
1985 data.namereg_fail = namereg_fail;
1986
1987 pa_sink_new_data_set_sample_spec(&data, &ss);
1988 pa_sink_new_data_set_channel_map(&data, &map);
1989
1990 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1991 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1992 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1993 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1994 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1995
1996 if (mapping) {
1997 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1998 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1999 }
2000
2001 pa_alsa_init_description(data.proplist);
2002
2003 if (u->control_device)
2004 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2005
2006 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2007 pa_log("Invalid properties");
2008 pa_sink_new_data_done(&data);
2009 goto fail;
2010 }
2011
2012 if (u->mixer_path_set)
2013 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2014
2015 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
2016 pa_sink_new_data_done(&data);
2017
2018 if (!u->sink) {
2019 pa_log("Failed to create sink object");
2020 goto fail;
2021 }
2022
2023 if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
2024 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2025 pa_log("Failed to parse sync_volume_safety_margin parameter");
2026 goto fail;
2027 }
2028
2029 if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
2030 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2031 pa_log("Failed to parse sync_volume_extra_delay parameter");
2032 goto fail;
2033 }
2034
2035 u->sink->parent.process_msg = sink_process_msg;
2036 if (u->use_tsched)
2037 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2038 u->sink->set_state = sink_set_state_cb;
2039 u->sink->set_port = sink_set_port_cb;
2040 u->sink->userdata = u;
2041
2042 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2043 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2044
2045 u->frame_size = frame_size;
2046 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2047 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2048 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2049
2050 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2051 (double) u->hwbuf_size / (double) u->fragment_size,
2052 (long unsigned) u->fragment_size,
2053 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2054 (long unsigned) u->hwbuf_size,
2055 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2056
2057 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2058 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2059 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2060 else {
2061 pa_log_info("Disabling rewind for device %s", u->device_name);
2062 pa_sink_set_max_rewind(u->sink, 0);
2063 }
2064
2065 if (u->use_tsched) {
2066 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
2067
2068 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
2069 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
2070
2071 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
2072 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
2073
2074 fix_min_sleep_wakeup(u);
2075 fix_tsched_watermark(u);
2076
2077 pa_sink_set_latency_range(u->sink,
2078 0,
2079 pa_bytes_to_usec(u->hwbuf_size, &ss));
2080
2081 pa_log_info("Time scheduling watermark is %0.2fms",
2082 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
2083 } else
2084 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2085
2086 reserve_update(u);
2087
2088 if (update_sw_params(u) < 0)
2089 goto fail;
2090
2091 if (setup_mixer(u, ignore_dB, sync_volume) < 0)
2092 goto fail;
2093
2094 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2095
2096 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2097 pa_log("Failed to create thread.");
2098 goto fail;
2099 }
2100
2101 /* Get initial mixer settings */
2102 if (data.volume_is_set) {
2103 if (u->sink->set_volume)
2104 u->sink->set_volume(u->sink);
2105 } else {
2106 if (u->sink->get_volume)
2107 u->sink->get_volume(u->sink);
2108 }
2109
2110 if (data.muted_is_set) {
2111 if (u->sink->set_mute)
2112 u->sink->set_mute(u->sink);
2113 } else {
2114 if (u->sink->get_mute)
2115 u->sink->get_mute(u->sink);
2116 }
2117
2118 pa_sink_put(u->sink);
2119
2120 if (profile_set)
2121 pa_alsa_profile_set_free(profile_set);
2122
2123 return u->sink;
2124
2125 fail:
2126
2127 if (u)
2128 userdata_free(u);
2129
2130 if (profile_set)
2131 pa_alsa_profile_set_free(profile_set);
2132
2133 return NULL;
2134 }
2135
2136 static void userdata_free(struct userdata *u) {
2137 pa_assert(u);
2138
2139 if (u->sink)
2140 pa_sink_unlink(u->sink);
2141
2142 if (u->thread) {
2143 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2144 pa_thread_free(u->thread);
2145 }
2146
2147 pa_thread_mq_done(&u->thread_mq);
2148
2149 if (u->sink)
2150 pa_sink_unref(u->sink);
2151
2152 if (u->memchunk.memblock)
2153 pa_memblock_unref(u->memchunk.memblock);
2154
2155 if (u->mixer_pd)
2156 pa_alsa_mixer_pdata_free(u->mixer_pd);
2157
2158 if (u->alsa_rtpoll_item)
2159 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2160
2161 if (u->rtpoll)
2162 pa_rtpoll_free(u->rtpoll);
2163
2164 if (u->pcm_handle) {
2165 snd_pcm_drop(u->pcm_handle);
2166 snd_pcm_close(u->pcm_handle);
2167 }
2168
2169 if (u->mixer_fdl)
2170 pa_alsa_fdlist_free(u->mixer_fdl);
2171
2172 if (u->mixer_path_set)
2173 pa_alsa_path_set_free(u->mixer_path_set);
2174 else if (u->mixer_path)
2175 pa_alsa_path_free(u->mixer_path);
2176
2177 if (u->mixer_handle)
2178 snd_mixer_close(u->mixer_handle);
2179
2180 if (u->smoother)
2181 pa_smoother_free(u->smoother);
2182
2183 reserve_done(u);
2184 monitor_done(u);
2185
2186 pa_xfree(u->device_name);
2187 pa_xfree(u->control_device);
2188 pa_xfree(u);
2189 }
2190
2191 void pa_alsa_sink_free(pa_sink *s) {
2192 struct userdata *u;
2193
2194 pa_sink_assert_ref(s);
2195 pa_assert_se(u = s->userdata);
2196
2197 userdata_free(u);
2198 }