]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa-sink: Some trivial tidyups
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 pa_alsa_fdlist *mixer_fdl;
104 pa_alsa_mixer_pdata *mixer_pd;
105 snd_mixer_t *mixer_handle;
106 pa_alsa_path_set *mixer_path_set;
107 pa_alsa_path *mixer_path;
108
109 pa_cvolume hardware_volume;
110
111 uint32_t old_rate;
112
113 size_t
114 frame_size,
115 fragment_size,
116 hwbuf_size,
117 tsched_watermark,
118 hwbuf_unused,
119 min_sleep,
120 min_wakeup,
121 watermark_inc_step,
122 watermark_dec_step,
123 watermark_inc_threshold,
124 watermark_dec_threshold,
125 rewind_safeguard;
126
127 pa_usec_t watermark_dec_not_before;
128
129 pa_memchunk memchunk;
130
131 char *device_name; /* name of the PCM device */
132 char *control_device; /* name of the control device */
133
134 pa_bool_t use_mmap:1, use_tsched:1;
135
136 pa_bool_t first, after_rewind;
137
138 pa_rtpoll_item *alsa_rtpoll_item;
139
140 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
141
142 pa_smoother *smoother;
143 uint64_t write_count;
144 uint64_t since_start;
145 pa_usec_t smoother_interval;
146 pa_usec_t last_smoother_update;
147
148 pa_reserve_wrapper *reserve;
149 pa_hook_slot *reserve_slot;
150 pa_reserve_monitor_wrapper *monitor;
151 pa_hook_slot *monitor_slot;
152 };
153
154 static void userdata_free(struct userdata *u);
155
156 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
157 pa_assert(r);
158 pa_assert(u);
159
160 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
161 return PA_HOOK_CANCEL;
162
163 return PA_HOOK_OK;
164 }
165
166 static void reserve_done(struct userdata *u) {
167 pa_assert(u);
168
169 if (u->reserve_slot) {
170 pa_hook_slot_free(u->reserve_slot);
171 u->reserve_slot = NULL;
172 }
173
174 if (u->reserve) {
175 pa_reserve_wrapper_unref(u->reserve);
176 u->reserve = NULL;
177 }
178 }
179
180 static void reserve_update(struct userdata *u) {
181 const char *description;
182 pa_assert(u);
183
184 if (!u->sink || !u->reserve)
185 return;
186
187 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
188 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
189 }
190
191 static int reserve_init(struct userdata *u, const char *dname) {
192 char *rname;
193
194 pa_assert(u);
195 pa_assert(dname);
196
197 if (u->reserve)
198 return 0;
199
200 if (pa_in_system_mode())
201 return 0;
202
203 if (!(rname = pa_alsa_get_reserve_name(dname)))
204 return 0;
205
206 /* We are resuming, try to lock the device */
207 u->reserve = pa_reserve_wrapper_get(u->core, rname);
208 pa_xfree(rname);
209
210 if (!(u->reserve))
211 return -1;
212
213 reserve_update(u);
214
215 pa_assert(!u->reserve_slot);
216 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
217
218 return 0;
219 }
220
221 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
222 pa_bool_t b;
223
224 pa_assert(w);
225 pa_assert(u);
226
227 b = PA_PTR_TO_UINT(busy) && !u->reserve;
228
229 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
230 return PA_HOOK_OK;
231 }
232
233 static void monitor_done(struct userdata *u) {
234 pa_assert(u);
235
236 if (u->monitor_slot) {
237 pa_hook_slot_free(u->monitor_slot);
238 u->monitor_slot = NULL;
239 }
240
241 if (u->monitor) {
242 pa_reserve_monitor_wrapper_unref(u->monitor);
243 u->monitor = NULL;
244 }
245 }
246
247 static int reserve_monitor_init(struct userdata *u, const char *dname) {
248 char *rname;
249
250 pa_assert(u);
251 pa_assert(dname);
252
253 if (pa_in_system_mode())
254 return 0;
255
256 if (!(rname = pa_alsa_get_reserve_name(dname)))
257 return 0;
258
259 /* We are resuming, try to lock the device */
260 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
261 pa_xfree(rname);
262
263 if (!(u->monitor))
264 return -1;
265
266 pa_assert(!u->monitor_slot);
267 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
268
269 return 0;
270 }
271
272 static void fix_min_sleep_wakeup(struct userdata *u) {
273 size_t max_use, max_use_2;
274
275 pa_assert(u);
276 pa_assert(u->use_tsched);
277
278 max_use = u->hwbuf_size - u->hwbuf_unused;
279 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
280
281 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
282 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
283
284 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
285 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
286 }
287
288 static void fix_tsched_watermark(struct userdata *u) {
289 size_t max_use;
290 pa_assert(u);
291 pa_assert(u->use_tsched);
292
293 max_use = u->hwbuf_size - u->hwbuf_unused;
294
295 if (u->tsched_watermark > max_use - u->min_sleep)
296 u->tsched_watermark = max_use - u->min_sleep;
297
298 if (u->tsched_watermark < u->min_wakeup)
299 u->tsched_watermark = u->min_wakeup;
300 }
301
302 static void increase_watermark(struct userdata *u) {
303 size_t old_watermark;
304 pa_usec_t old_min_latency, new_min_latency;
305
306 pa_assert(u);
307 pa_assert(u->use_tsched);
308
309 /* First, just try to increase the watermark */
310 old_watermark = u->tsched_watermark;
311 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
312 fix_tsched_watermark(u);
313
314 if (old_watermark != u->tsched_watermark) {
315 pa_log_info("Increasing wakeup watermark to %0.2f ms",
316 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
317 return;
318 }
319
320 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
321 old_min_latency = u->sink->thread_info.min_latency;
322 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
323 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
324
325 if (old_min_latency != new_min_latency) {
326 pa_log_info("Increasing minimal latency to %0.2f ms",
327 (double) new_min_latency / PA_USEC_PER_MSEC);
328
329 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
330 }
331
332 /* When we reach this we're officialy fucked! */
333 }
334
335 static void decrease_watermark(struct userdata *u) {
336 size_t old_watermark;
337 pa_usec_t now;
338
339 pa_assert(u);
340 pa_assert(u->use_tsched);
341
342 now = pa_rtclock_now();
343
344 if (u->watermark_dec_not_before <= 0)
345 goto restart;
346
347 if (u->watermark_dec_not_before > now)
348 return;
349
350 old_watermark = u->tsched_watermark;
351
352 if (u->tsched_watermark < u->watermark_dec_step)
353 u->tsched_watermark = u->tsched_watermark / 2;
354 else
355 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
356
357 fix_tsched_watermark(u);
358
359 if (old_watermark != u->tsched_watermark)
360 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
361 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
362
363 /* We don't change the latency range*/
364
365 restart:
366 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
367 }
368
369 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
370 pa_usec_t usec, wm;
371
372 pa_assert(sleep_usec);
373 pa_assert(process_usec);
374
375 pa_assert(u);
376 pa_assert(u->use_tsched);
377
378 usec = pa_sink_get_requested_latency_within_thread(u->sink);
379
380 if (usec == (pa_usec_t) -1)
381 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
382
383 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
384
385 if (wm > usec)
386 wm = usec/2;
387
388 *sleep_usec = usec - wm;
389 *process_usec = wm;
390
391 #ifdef DEBUG_TIMING
392 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
393 (unsigned long) (usec / PA_USEC_PER_MSEC),
394 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
395 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
396 #endif
397 }
398
399 static int try_recover(struct userdata *u, const char *call, int err) {
400 pa_assert(u);
401 pa_assert(call);
402 pa_assert(err < 0);
403
404 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
405
406 pa_assert(err != -EAGAIN);
407
408 if (err == -EPIPE)
409 pa_log_debug("%s: Buffer underrun!", call);
410
411 if (err == -ESTRPIPE)
412 pa_log_debug("%s: System suspended!", call);
413
414 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
415 pa_log("%s: %s", call, pa_alsa_strerror(err));
416 return -1;
417 }
418
419 u->first = TRUE;
420 u->since_start = 0;
421 return 0;
422 }
423
424 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
425 size_t left_to_play;
426 pa_bool_t underrun = FALSE;
427
428 /* We use <= instead of < for this check here because an underrun
429 * only happens after the last sample was processed, not already when
430 * it is removed from the buffer. This is particularly important
431 * when block transfer is used. */
432
433 if (n_bytes <= u->hwbuf_size)
434 left_to_play = u->hwbuf_size - n_bytes;
435 else {
436
437 /* We got a dropout. What a mess! */
438 left_to_play = 0;
439 underrun = TRUE;
440
441 #ifdef DEBUG_TIMING
442 PA_DEBUG_TRAP;
443 #endif
444
445 if (!u->first && !u->after_rewind)
446 if (pa_log_ratelimit(PA_LOG_INFO))
447 pa_log_info("Underrun!");
448 }
449
450 #ifdef DEBUG_TIMING
451 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
452 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
453 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
454 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
455 #endif
456
457 if (u->use_tsched) {
458 pa_bool_t reset_not_before = TRUE;
459
460 if (!u->first && !u->after_rewind) {
461 if (underrun || left_to_play < u->watermark_inc_threshold)
462 increase_watermark(u);
463 else if (left_to_play > u->watermark_dec_threshold) {
464 reset_not_before = FALSE;
465
466 /* We decrease the watermark only if have actually
467 * been woken up by a timeout. If something else woke
468 * us up it's too easy to fulfill the deadlines... */
469
470 if (on_timeout)
471 decrease_watermark(u);
472 }
473 }
474
475 if (reset_not_before)
476 u->watermark_dec_not_before = 0;
477 }
478
479 return left_to_play;
480 }
481
482 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
483 pa_bool_t work_done = TRUE;
484 pa_usec_t max_sleep_usec = 0, process_usec = 0;
485 size_t left_to_play;
486 unsigned j = 0;
487
488 pa_assert(u);
489 pa_sink_assert_ref(u->sink);
490
491 if (u->use_tsched)
492 hw_sleep_time(u, &max_sleep_usec, &process_usec);
493
494 for (;;) {
495 snd_pcm_sframes_t n;
496 size_t n_bytes;
497 int r;
498 pa_bool_t after_avail = TRUE;
499
500 /* First we determine how many samples are missing to fill the
501 * buffer up to 100% */
502
503 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
504
505 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
506 continue;
507
508 return r;
509 }
510
511 n_bytes = (size_t) n * u->frame_size;
512
513 #ifdef DEBUG_TIMING
514 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
515 #endif
516
517 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
518 on_timeout = FALSE;
519
520 if (u->use_tsched)
521
522 /* We won't fill up the playback buffer before at least
523 * half the sleep time is over because otherwise we might
524 * ask for more data from the clients then they expect. We
525 * need to guarantee that clients only have to keep around
526 * a single hw buffer length. */
527
528 if (!polled &&
529 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
530 #ifdef DEBUG_TIMING
531 pa_log_debug("Not filling up, because too early.");
532 #endif
533 break;
534 }
535
536 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
537
538 if (polled)
539 PA_ONCE_BEGIN {
540 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
541 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
542 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
543 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
544 pa_strnull(dn));
545 pa_xfree(dn);
546 } PA_ONCE_END;
547
548 #ifdef DEBUG_TIMING
549 pa_log_debug("Not filling up, because not necessary.");
550 #endif
551 break;
552 }
553
554
555 if (++j > 10) {
556 #ifdef DEBUG_TIMING
557 pa_log_debug("Not filling up, because already too many iterations.");
558 #endif
559
560 break;
561 }
562
563 n_bytes -= u->hwbuf_unused;
564 polled = FALSE;
565
566 #ifdef DEBUG_TIMING
567 pa_log_debug("Filling up");
568 #endif
569
570 for (;;) {
571 pa_memchunk chunk;
572 void *p;
573 int err;
574 const snd_pcm_channel_area_t *areas;
575 snd_pcm_uframes_t offset, frames;
576 snd_pcm_sframes_t sframes;
577
578 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
579 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
580
581 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
582
583 if (!after_avail && err == -EAGAIN)
584 break;
585
586 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
587 continue;
588
589 return r;
590 }
591
592 /* Make sure that if these memblocks need to be copied they will fit into one slot */
593 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
594 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
595
596 if (!after_avail && frames == 0)
597 break;
598
599 pa_assert(frames > 0);
600 after_avail = FALSE;
601
602 /* Check these are multiples of 8 bit */
603 pa_assert((areas[0].first & 7) == 0);
604 pa_assert((areas[0].step & 7)== 0);
605
606 /* We assume a single interleaved memory buffer */
607 pa_assert((areas[0].first >> 3) == 0);
608 pa_assert((areas[0].step >> 3) == u->frame_size);
609
610 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
611
612 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
613 chunk.length = pa_memblock_get_length(chunk.memblock);
614 chunk.index = 0;
615
616 pa_sink_render_into_full(u->sink, &chunk);
617 pa_memblock_unref_fixed(chunk.memblock);
618
619 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
620
621 if (!after_avail && (int) sframes == -EAGAIN)
622 break;
623
624 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
625 continue;
626
627 return r;
628 }
629
630 work_done = TRUE;
631
632 u->write_count += frames * u->frame_size;
633 u->since_start += frames * u->frame_size;
634
635 #ifdef DEBUG_TIMING
636 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
637 #endif
638
639 if ((size_t) frames * u->frame_size >= n_bytes)
640 break;
641
642 n_bytes -= (size_t) frames * u->frame_size;
643 }
644 }
645
646 if (u->use_tsched) {
647 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
648
649 if (*sleep_usec > process_usec)
650 *sleep_usec -= process_usec;
651 else
652 *sleep_usec = 0;
653 } else
654 *sleep_usec = 0;
655
656 return work_done ? 1 : 0;
657 }
658
659 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
660 pa_bool_t work_done = FALSE;
661 pa_usec_t max_sleep_usec = 0, process_usec = 0;
662 size_t left_to_play;
663 unsigned j = 0;
664
665 pa_assert(u);
666 pa_sink_assert_ref(u->sink);
667
668 if (u->use_tsched)
669 hw_sleep_time(u, &max_sleep_usec, &process_usec);
670
671 for (;;) {
672 snd_pcm_sframes_t n;
673 size_t n_bytes;
674 int r;
675 pa_bool_t after_avail = TRUE;
676
677 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
678
679 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
680 continue;
681
682 return r;
683 }
684
685 n_bytes = (size_t) n * u->frame_size;
686 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
687 on_timeout = FALSE;
688
689 if (u->use_tsched)
690
691 /* We won't fill up the playback buffer before at least
692 * half the sleep time is over because otherwise we might
693 * ask for more data from the clients then they expect. We
694 * need to guarantee that clients only have to keep around
695 * a single hw buffer length. */
696
697 if (!polled &&
698 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
699 break;
700
701 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
702
703 if (polled)
704 PA_ONCE_BEGIN {
705 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
706 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
707 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
708 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
709 pa_strnull(dn));
710 pa_xfree(dn);
711 } PA_ONCE_END;
712
713 break;
714 }
715
716 if (++j > 10) {
717 #ifdef DEBUG_TIMING
718 pa_log_debug("Not filling up, because already too many iterations.");
719 #endif
720
721 break;
722 }
723
724 n_bytes -= u->hwbuf_unused;
725 polled = FALSE;
726
727 for (;;) {
728 snd_pcm_sframes_t frames;
729 void *p;
730
731 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
732
733 if (u->memchunk.length <= 0)
734 pa_sink_render(u->sink, n_bytes, &u->memchunk);
735
736 pa_assert(u->memchunk.length > 0);
737
738 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
739
740 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
741 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
742
743 p = pa_memblock_acquire(u->memchunk.memblock);
744 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
745 pa_memblock_release(u->memchunk.memblock);
746
747 if (PA_UNLIKELY(frames < 0)) {
748
749 if (!after_avail && (int) frames == -EAGAIN)
750 break;
751
752 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
753 continue;
754
755 return r;
756 }
757
758 if (!after_avail && frames == 0)
759 break;
760
761 pa_assert(frames > 0);
762 after_avail = FALSE;
763
764 u->memchunk.index += (size_t) frames * u->frame_size;
765 u->memchunk.length -= (size_t) frames * u->frame_size;
766
767 if (u->memchunk.length <= 0) {
768 pa_memblock_unref(u->memchunk.memblock);
769 pa_memchunk_reset(&u->memchunk);
770 }
771
772 work_done = TRUE;
773
774 u->write_count += frames * u->frame_size;
775 u->since_start += frames * u->frame_size;
776
777 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
778
779 if ((size_t) frames * u->frame_size >= n_bytes)
780 break;
781
782 n_bytes -= (size_t) frames * u->frame_size;
783 }
784 }
785
786 if (u->use_tsched) {
787 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
788
789 if (*sleep_usec > process_usec)
790 *sleep_usec -= process_usec;
791 else
792 *sleep_usec = 0;
793 } else
794 *sleep_usec = 0;
795
796 return work_done ? 1 : 0;
797 }
798
799 static void update_smoother(struct userdata *u) {
800 snd_pcm_sframes_t delay = 0;
801 int64_t position;
802 int err;
803 pa_usec_t now1 = 0, now2;
804 snd_pcm_status_t *status;
805
806 snd_pcm_status_alloca(&status);
807
808 pa_assert(u);
809 pa_assert(u->pcm_handle);
810
811 /* Let's update the time smoother */
812
813 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
814 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
815 return;
816 }
817
818 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
819 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
820 else {
821 snd_htimestamp_t htstamp = { 0, 0 };
822 snd_pcm_status_get_htstamp(status, &htstamp);
823 now1 = pa_timespec_load(&htstamp);
824 }
825
826 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
827 if (now1 <= 0)
828 now1 = pa_rtclock_now();
829
830 /* check if the time since the last update is bigger than the interval */
831 if (u->last_smoother_update > 0)
832 if (u->last_smoother_update + u->smoother_interval > now1)
833 return;
834
835 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
836
837 if (PA_UNLIKELY(position < 0))
838 position = 0;
839
840 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
841
842 pa_smoother_put(u->smoother, now1, now2);
843
844 u->last_smoother_update = now1;
845 /* exponentially increase the update interval up to the MAX limit */
846 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
847 }
848
849 static pa_usec_t sink_get_latency(struct userdata *u) {
850 pa_usec_t r;
851 int64_t delay;
852 pa_usec_t now1, now2;
853
854 pa_assert(u);
855
856 now1 = pa_rtclock_now();
857 now2 = pa_smoother_get(u->smoother, now1);
858
859 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
860
861 r = delay >= 0 ? (pa_usec_t) delay : 0;
862
863 if (u->memchunk.memblock)
864 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
865
866 return r;
867 }
868
869 static int build_pollfd(struct userdata *u) {
870 pa_assert(u);
871 pa_assert(u->pcm_handle);
872
873 if (u->alsa_rtpoll_item)
874 pa_rtpoll_item_free(u->alsa_rtpoll_item);
875
876 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
877 return -1;
878
879 return 0;
880 }
881
882 /* Called from IO context */
883 static int suspend(struct userdata *u) {
884 pa_assert(u);
885 pa_assert(u->pcm_handle);
886
887 pa_smoother_pause(u->smoother, pa_rtclock_now());
888
889 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
890 * take awfully long with our long buffer sizes today. */
891 snd_pcm_close(u->pcm_handle);
892 u->pcm_handle = NULL;
893
894 if (u->alsa_rtpoll_item) {
895 pa_rtpoll_item_free(u->alsa_rtpoll_item);
896 u->alsa_rtpoll_item = NULL;
897 }
898
899 /* We reset max_rewind/max_request here to make sure that while we
900 * are suspended the old max_request/max_rewind values set before
901 * the suspend can influence the per-stream buffer of newly
902 * created streams, without their requirements having any
903 * influence on them. */
904 pa_sink_set_max_rewind_within_thread(u->sink, 0);
905 pa_sink_set_max_request_within_thread(u->sink, 0);
906
907 pa_log_info("Device suspended...");
908
909 return 0;
910 }
911
912 /* Called from IO context */
913 static int update_sw_params(struct userdata *u) {
914 snd_pcm_uframes_t avail_min;
915 int err;
916
917 pa_assert(u);
918
919 /* Use the full buffer if noone asked us for anything specific */
920 u->hwbuf_unused = 0;
921
922 if (u->use_tsched) {
923 pa_usec_t latency;
924
925 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
926 size_t b;
927
928 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
929
930 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
931
932 /* We need at least one sample in our buffer */
933
934 if (PA_UNLIKELY(b < u->frame_size))
935 b = u->frame_size;
936
937 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
938 }
939
940 fix_min_sleep_wakeup(u);
941 fix_tsched_watermark(u);
942 }
943
944 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
945
946 /* We need at last one frame in the used part of the buffer */
947 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
948
949 if (u->use_tsched) {
950 pa_usec_t sleep_usec, process_usec;
951
952 hw_sleep_time(u, &sleep_usec, &process_usec);
953 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
954 }
955
956 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
957
958 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
959 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
960 return err;
961 }
962
963 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
964 if (pa_alsa_pcm_is_hw(u->pcm_handle))
965 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
966 else {
967 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
968 pa_sink_set_max_rewind_within_thread(u->sink, 0);
969 }
970
971 return 0;
972 }
973
974 /* Called from IO context */
975 static int unsuspend(struct userdata *u) {
976 pa_sample_spec ss;
977 int err;
978 pa_bool_t b, d;
979 snd_pcm_uframes_t period_size, buffer_size;
980
981 pa_assert(u);
982 pa_assert(!u->pcm_handle);
983
984 pa_log_info("Trying resume...");
985
986 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
987 SND_PCM_NONBLOCK|
988 SND_PCM_NO_AUTO_RESAMPLE|
989 SND_PCM_NO_AUTO_CHANNELS|
990 SND_PCM_NO_AUTO_FORMAT)) < 0) {
991 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
992 goto fail;
993 }
994
995 ss = u->sink->sample_spec;
996 period_size = u->fragment_size / u->frame_size;
997 buffer_size = u->hwbuf_size / u->frame_size;
998 b = u->use_mmap;
999 d = u->use_tsched;
1000
1001 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1002 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1003 goto fail;
1004 }
1005
1006 if (b != u->use_mmap || d != u->use_tsched) {
1007 pa_log_warn("Resume failed, couldn't get original access mode.");
1008 goto fail;
1009 }
1010
1011 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1012 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1013 goto fail;
1014 }
1015
1016 if (period_size*u->frame_size != u->fragment_size ||
1017 buffer_size*u->frame_size != u->hwbuf_size) {
1018 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1019 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1020 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1021 goto fail;
1022 }
1023
1024 if (update_sw_params(u) < 0)
1025 goto fail;
1026
1027 if (build_pollfd(u) < 0)
1028 goto fail;
1029
1030 u->write_count = 0;
1031 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1032 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1033 u->last_smoother_update = 0;
1034
1035 u->first = TRUE;
1036 u->since_start = 0;
1037
1038 pa_log_info("Resumed successfully...");
1039
1040 return 0;
1041
1042 fail:
1043 if (u->pcm_handle) {
1044 snd_pcm_close(u->pcm_handle);
1045 u->pcm_handle = NULL;
1046 }
1047
1048 return -PA_ERR_IO;
1049 }
1050
1051 /* Called from IO context */
1052 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1053 struct userdata *u = PA_SINK(o)->userdata;
1054
1055 switch (code) {
1056
1057 case PA_SINK_MESSAGE_FINISH_MOVE:
1058 case PA_SINK_MESSAGE_ADD_INPUT: {
1059 pa_sink_input *i = PA_SINK_INPUT(data);
1060 int r = 0;
1061
1062 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1063 break;
1064
1065 u->old_rate = u->sink->sample_spec.rate;
1066
1067 /* Passthrough format, see if we need to reset sink sample rate */
1068 if (u->sink->sample_spec.rate == i->thread_info.sample_spec.rate)
1069 break;
1070
1071 /* .. we do */
1072 if ((r = suspend(u)) < 0)
1073 return r;
1074
1075 u->sink->sample_spec.rate = i->thread_info.sample_spec.rate;
1076
1077 if ((r = unsuspend(u)) < 0)
1078 return r;
1079
1080 break;
1081 }
1082
1083 case PA_SINK_MESSAGE_START_MOVE:
1084 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1085 pa_sink_input *i = PA_SINK_INPUT(data);
1086 int r = 0;
1087
1088 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1089 break;
1090
1091 /* Passthrough format, see if we need to reset sink sample rate */
1092 if (u->sink->sample_spec.rate == u->old_rate)
1093 break;
1094
1095 /* .. we do */
1096 if ((r = suspend(u)) < 0)
1097 return r;
1098
1099 u->sink->sample_spec.rate = u->old_rate;
1100
1101 if ((r = unsuspend(u)) < 0)
1102 return r;
1103
1104 break;
1105 }
1106
1107 case PA_SINK_MESSAGE_GET_LATENCY: {
1108 pa_usec_t r = 0;
1109
1110 if (u->pcm_handle)
1111 r = sink_get_latency(u);
1112
1113 *((pa_usec_t*) data) = r;
1114
1115 return 0;
1116 }
1117
1118 case PA_SINK_MESSAGE_SET_STATE:
1119
1120 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1121
1122 case PA_SINK_SUSPENDED: {
1123 int r;
1124
1125 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1126
1127 if ((r = suspend(u)) < 0)
1128 return r;
1129
1130 break;
1131 }
1132
1133 case PA_SINK_IDLE:
1134 case PA_SINK_RUNNING: {
1135 int r;
1136
1137 if (u->sink->thread_info.state == PA_SINK_INIT) {
1138 if (build_pollfd(u) < 0)
1139 return -PA_ERR_IO;
1140 }
1141
1142 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1143 if ((r = unsuspend(u)) < 0)
1144 return r;
1145 }
1146
1147 break;
1148 }
1149
1150 case PA_SINK_UNLINKED:
1151 case PA_SINK_INIT:
1152 case PA_SINK_INVALID_STATE:
1153 ;
1154 }
1155
1156 break;
1157 }
1158
1159 return pa_sink_process_msg(o, code, data, offset, chunk);
1160 }
1161
1162 /* Called from main context */
1163 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1164 pa_sink_state_t old_state;
1165 struct userdata *u;
1166
1167 pa_sink_assert_ref(s);
1168 pa_assert_se(u = s->userdata);
1169
1170 old_state = pa_sink_get_state(u->sink);
1171
1172 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1173 reserve_done(u);
1174 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1175 if (reserve_init(u, u->device_name) < 0)
1176 return -PA_ERR_BUSY;
1177
1178 return 0;
1179 }
1180
1181 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1182 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1183
1184 pa_assert(u);
1185 pa_assert(u->mixer_handle);
1186
1187 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1188 return 0;
1189
1190 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1191 return 0;
1192
1193 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1194 pa_sink_get_volume(u->sink, TRUE);
1195 pa_sink_get_mute(u->sink, TRUE);
1196 }
1197
1198 return 0;
1199 }
1200
1201 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1202 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1203
1204 pa_assert(u);
1205 pa_assert(u->mixer_handle);
1206
1207 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1208 return 0;
1209
1210 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1211 return 0;
1212
1213 if (mask & SND_CTL_EVENT_MASK_VALUE)
1214 pa_sink_update_volume_and_mute(u->sink);
1215
1216 return 0;
1217 }
1218
1219 static void sink_get_volume_cb(pa_sink *s) {
1220 struct userdata *u = s->userdata;
1221 pa_cvolume r;
1222 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1223
1224 pa_assert(u);
1225 pa_assert(u->mixer_path);
1226 pa_assert(u->mixer_handle);
1227
1228 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1229 return;
1230
1231 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1232 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1233
1234 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1235
1236 if (u->mixer_path->has_dB) {
1237 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1238
1239 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1240 }
1241
1242 if (pa_cvolume_equal(&u->hardware_volume, &r))
1243 return;
1244
1245 s->real_volume = u->hardware_volume = r;
1246
1247 /* Hmm, so the hardware volume changed, let's reset our software volume */
1248 if (u->mixer_path->has_dB)
1249 pa_sink_set_soft_volume(s, NULL);
1250 }
1251
1252 static void sink_set_volume_cb(pa_sink *s) {
1253 struct userdata *u = s->userdata;
1254 pa_cvolume r;
1255 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1256 pa_bool_t write_to_hw = (s->flags & PA_SINK_SYNC_VOLUME) ? FALSE : TRUE;
1257
1258 pa_assert(u);
1259 pa_assert(u->mixer_path);
1260 pa_assert(u->mixer_handle);
1261
1262 /* Shift up by the base volume */
1263 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1264
1265 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, write_to_hw) < 0)
1266 return;
1267
1268 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1269 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1270
1271 u->hardware_volume = r;
1272
1273 if (u->mixer_path->has_dB) {
1274 pa_cvolume new_soft_volume;
1275 pa_bool_t accurate_enough;
1276 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1277
1278 /* Match exactly what the user requested by software */
1279 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1280
1281 /* If the adjustment to do in software is only minimal we
1282 * can skip it. That saves us CPU at the expense of a bit of
1283 * accuracy */
1284 accurate_enough =
1285 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1286 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1287
1288 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1289 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1290 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1291 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1292 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1293 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1294 pa_yes_no(accurate_enough));
1295 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1296
1297 if (!accurate_enough)
1298 s->soft_volume = new_soft_volume;
1299
1300 } else {
1301 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1302
1303 /* We can't match exactly what the user requested, hence let's
1304 * at least tell the user about it */
1305
1306 s->real_volume = r;
1307 }
1308 }
1309
1310 static void sink_write_volume_cb(pa_sink *s) {
1311 struct userdata *u = s->userdata;
1312 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1313
1314 pa_assert(u);
1315 pa_assert(u->mixer_path);
1316 pa_assert(u->mixer_handle);
1317 pa_assert(s->flags & PA_SINK_SYNC_VOLUME);
1318
1319 /* Shift up by the base volume */
1320 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1321
1322 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE) < 0)
1323 pa_log_error("Writing HW volume failed");
1324 else {
1325 pa_cvolume tmp_vol;
1326 pa_bool_t accurate_enough;
1327
1328 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1329 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1330
1331 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1332 accurate_enough =
1333 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1334 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1335
1336 if (!accurate_enough) {
1337 union {
1338 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1339 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1340 } vol;
1341
1342 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1343 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1344 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1345 pa_log_debug(" in dB: %s (request) != %s",
1346 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1347 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1348 }
1349 }
1350 }
1351
1352 static void sink_get_mute_cb(pa_sink *s) {
1353 struct userdata *u = s->userdata;
1354 pa_bool_t b;
1355
1356 pa_assert(u);
1357 pa_assert(u->mixer_path);
1358 pa_assert(u->mixer_handle);
1359
1360 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1361 return;
1362
1363 s->muted = b;
1364 }
1365
1366 static void sink_set_mute_cb(pa_sink *s) {
1367 struct userdata *u = s->userdata;
1368
1369 pa_assert(u);
1370 pa_assert(u->mixer_path);
1371 pa_assert(u->mixer_handle);
1372
1373 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1374 }
1375
1376 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1377 struct userdata *u = s->userdata;
1378 pa_alsa_port_data *data;
1379
1380 pa_assert(u);
1381 pa_assert(p);
1382 pa_assert(u->mixer_handle);
1383
1384 data = PA_DEVICE_PORT_DATA(p);
1385
1386 pa_assert_se(u->mixer_path = data->path);
1387 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1388
1389 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1390 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1391 s->n_volume_steps = PA_VOLUME_NORM+1;
1392
1393 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1394 } else {
1395 s->base_volume = PA_VOLUME_NORM;
1396 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1397 }
1398
1399 if (data->setting)
1400 pa_alsa_setting_select(data->setting, u->mixer_handle);
1401
1402 if (s->set_mute)
1403 s->set_mute(s);
1404 if (s->set_volume)
1405 s->set_volume(s);
1406
1407 return 0;
1408 }
1409
1410 static void sink_update_requested_latency_cb(pa_sink *s) {
1411 struct userdata *u = s->userdata;
1412 size_t before;
1413 pa_assert(u);
1414 pa_assert(u->use_tsched); /* only when timer scheduling is used
1415 * we can dynamically adjust the
1416 * latency */
1417
1418 if (!u->pcm_handle)
1419 return;
1420
1421 before = u->hwbuf_unused;
1422 update_sw_params(u);
1423
1424 /* Let's check whether we now use only a smaller part of the
1425 buffer then before. If so, we need to make sure that subsequent
1426 rewinds are relative to the new maximum fill level and not to the
1427 current fill level. Thus, let's do a full rewind once, to clear
1428 things up. */
1429
1430 if (u->hwbuf_unused > before) {
1431 pa_log_debug("Requesting rewind due to latency change.");
1432 pa_sink_request_rewind(s, (size_t) -1);
1433 }
1434 }
1435
1436 static int process_rewind(struct userdata *u) {
1437 snd_pcm_sframes_t unused;
1438 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1439 pa_assert(u);
1440
1441 /* Figure out how much we shall rewind and reset the counter */
1442 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1443
1444 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1445
1446 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1447 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1448 return -1;
1449 }
1450
1451 unused_nbytes = (size_t) unused * u->frame_size;
1452
1453 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1454 unused_nbytes += u->rewind_safeguard;
1455
1456 if (u->hwbuf_size > unused_nbytes)
1457 limit_nbytes = u->hwbuf_size - unused_nbytes;
1458 else
1459 limit_nbytes = 0;
1460
1461 if (rewind_nbytes > limit_nbytes)
1462 rewind_nbytes = limit_nbytes;
1463
1464 if (rewind_nbytes > 0) {
1465 snd_pcm_sframes_t in_frames, out_frames;
1466
1467 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1468
1469 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1470 pa_log_debug("before: %lu", (unsigned long) in_frames);
1471 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1472 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1473 if (try_recover(u, "process_rewind", out_frames) < 0)
1474 return -1;
1475 out_frames = 0;
1476 }
1477
1478 pa_log_debug("after: %lu", (unsigned long) out_frames);
1479
1480 rewind_nbytes = (size_t) out_frames * u->frame_size;
1481
1482 if (rewind_nbytes <= 0)
1483 pa_log_info("Tried rewind, but was apparently not possible.");
1484 else {
1485 u->write_count -= rewind_nbytes;
1486 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1487 pa_sink_process_rewind(u->sink, rewind_nbytes);
1488
1489 u->after_rewind = TRUE;
1490 return 0;
1491 }
1492 } else
1493 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1494
1495 pa_sink_process_rewind(u->sink, 0);
1496 return 0;
1497 }
1498
1499 static void thread_func(void *userdata) {
1500 struct userdata *u = userdata;
1501 unsigned short revents = 0;
1502
1503 pa_assert(u);
1504
1505 pa_log_debug("Thread starting up");
1506
1507 if (u->core->realtime_scheduling)
1508 pa_make_realtime(u->core->realtime_priority);
1509
1510 pa_thread_mq_install(&u->thread_mq);
1511
1512 for (;;) {
1513 int ret;
1514 pa_usec_t rtpoll_sleep = 0;
1515
1516 #ifdef DEBUG_TIMING
1517 pa_log_debug("Loop");
1518 #endif
1519
1520 /* Render some data and write it to the dsp */
1521 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1522 int work_done;
1523 pa_usec_t sleep_usec = 0;
1524 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1525
1526 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1527 if (process_rewind(u) < 0)
1528 goto fail;
1529
1530 if (u->use_mmap)
1531 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1532 else
1533 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1534
1535 if (work_done < 0)
1536 goto fail;
1537
1538 /* pa_log_debug("work_done = %i", work_done); */
1539
1540 if (work_done) {
1541
1542 if (u->first) {
1543 pa_log_info("Starting playback.");
1544 snd_pcm_start(u->pcm_handle);
1545
1546 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1547
1548 u->first = FALSE;
1549 }
1550
1551 update_smoother(u);
1552 }
1553
1554 if (u->use_tsched) {
1555 pa_usec_t cusec;
1556
1557 if (u->since_start <= u->hwbuf_size) {
1558
1559 /* USB devices on ALSA seem to hit a buffer
1560 * underrun during the first iterations much
1561 * quicker then we calculate here, probably due to
1562 * the transport latency. To accommodate for that
1563 * we artificially decrease the sleep time until
1564 * we have filled the buffer at least once
1565 * completely.*/
1566
1567 if (pa_log_ratelimit(PA_LOG_DEBUG))
1568 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1569 sleep_usec /= 2;
1570 }
1571
1572 /* OK, the playback buffer is now full, let's
1573 * calculate when to wake up next */
1574 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1575
1576 /* Convert from the sound card time domain to the
1577 * system time domain */
1578 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1579
1580 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1581
1582 /* We don't trust the conversion, so we wake up whatever comes first */
1583 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1584 }
1585
1586 u->after_rewind = FALSE;
1587
1588 }
1589
1590 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1591 pa_usec_t volume_sleep;
1592 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1593 if (volume_sleep > 0)
1594 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1595 }
1596
1597 if (rtpoll_sleep > 0)
1598 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1599 else
1600 pa_rtpoll_set_timer_disabled(u->rtpoll);
1601
1602 /* Hmm, nothing to do. Let's sleep */
1603 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1604 goto fail;
1605
1606 if (u->sink->flags & PA_SINK_SYNC_VOLUME)
1607 pa_sink_volume_change_apply(u->sink, NULL);
1608
1609 if (ret == 0)
1610 goto finish;
1611
1612 /* Tell ALSA about this and process its response */
1613 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1614 struct pollfd *pollfd;
1615 int err;
1616 unsigned n;
1617
1618 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1619
1620 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1621 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1622 goto fail;
1623 }
1624
1625 if (revents & ~POLLOUT) {
1626 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1627 goto fail;
1628
1629 u->first = TRUE;
1630 u->since_start = 0;
1631 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1632 pa_log_debug("Wakeup from ALSA!");
1633
1634 } else
1635 revents = 0;
1636 }
1637
1638 fail:
1639 /* If this was no regular exit from the loop we have to continue
1640 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1641 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1642 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1643
1644 finish:
1645 pa_log_debug("Thread shutting down");
1646 }
1647
1648 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1649 const char *n;
1650 char *t;
1651
1652 pa_assert(data);
1653 pa_assert(ma);
1654 pa_assert(device_name);
1655
1656 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1657 pa_sink_new_data_set_name(data, n);
1658 data->namereg_fail = TRUE;
1659 return;
1660 }
1661
1662 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1663 data->namereg_fail = TRUE;
1664 else {
1665 n = device_id ? device_id : device_name;
1666 data->namereg_fail = FALSE;
1667 }
1668
1669 if (mapping)
1670 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1671 else
1672 t = pa_sprintf_malloc("alsa_output.%s", n);
1673
1674 pa_sink_new_data_set_name(data, t);
1675 pa_xfree(t);
1676 }
1677
1678 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1679
1680 if (!mapping && !element)
1681 return;
1682
1683 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1684 pa_log_info("Failed to find a working mixer device.");
1685 return;
1686 }
1687
1688 if (element) {
1689
1690 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1691 goto fail;
1692
1693 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1694 goto fail;
1695
1696 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1697 pa_alsa_path_dump(u->mixer_path);
1698 } else {
1699
1700 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1701 goto fail;
1702
1703 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1704
1705 pa_log_debug("Probed mixer paths:");
1706 pa_alsa_path_set_dump(u->mixer_path_set);
1707 }
1708
1709 return;
1710
1711 fail:
1712
1713 if (u->mixer_path_set) {
1714 pa_alsa_path_set_free(u->mixer_path_set);
1715 u->mixer_path_set = NULL;
1716 } else if (u->mixer_path) {
1717 pa_alsa_path_free(u->mixer_path);
1718 u->mixer_path = NULL;
1719 }
1720
1721 if (u->mixer_handle) {
1722 snd_mixer_close(u->mixer_handle);
1723 u->mixer_handle = NULL;
1724 }
1725 }
1726
1727 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB, pa_bool_t sync_volume) {
1728 pa_assert(u);
1729
1730 if (!u->mixer_handle)
1731 return 0;
1732
1733 if (u->sink->active_port) {
1734 pa_alsa_port_data *data;
1735
1736 /* We have a list of supported paths, so let's activate the
1737 * one that has been chosen as active */
1738
1739 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1740 u->mixer_path = data->path;
1741
1742 pa_alsa_path_select(data->path, u->mixer_handle);
1743
1744 if (data->setting)
1745 pa_alsa_setting_select(data->setting, u->mixer_handle);
1746
1747 } else {
1748
1749 if (!u->mixer_path && u->mixer_path_set)
1750 u->mixer_path = u->mixer_path_set->paths;
1751
1752 if (u->mixer_path) {
1753 /* Hmm, we have only a single path, then let's activate it */
1754
1755 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1756
1757 if (u->mixer_path->settings)
1758 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1759 } else
1760 return 0;
1761 }
1762
1763 if (!u->mixer_path->has_volume)
1764 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1765 else {
1766
1767 if (u->mixer_path->has_dB) {
1768 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1769
1770 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1771 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1772
1773 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1774
1775 } else {
1776 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1777 u->sink->base_volume = PA_VOLUME_NORM;
1778 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1779 }
1780
1781 u->sink->get_volume = sink_get_volume_cb;
1782 u->sink->set_volume = sink_set_volume_cb;
1783 u->sink->write_volume = sink_write_volume_cb;
1784
1785 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL;
1786 if (u->mixer_path->has_dB) {
1787 u->sink->flags |= PA_SINK_DECIBEL_VOLUME;
1788 if (sync_volume) {
1789 u->sink->flags |= PA_SINK_SYNC_VOLUME;
1790 pa_log_info("Successfully enabled synchronous volume.");
1791 }
1792 }
1793
1794 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1795 }
1796
1797 if (!u->mixer_path->has_mute) {
1798 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1799 } else {
1800 u->sink->get_mute = sink_get_mute_cb;
1801 u->sink->set_mute = sink_set_mute_cb;
1802 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1803 pa_log_info("Using hardware mute control.");
1804 }
1805
1806 if (u->sink->flags & (PA_SINK_HW_VOLUME_CTRL|PA_SINK_HW_MUTE_CTRL)) {
1807 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1808 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1809 u->mixer_pd = pa_alsa_mixer_pdata_new();
1810 mixer_callback = io_mixer_callback;
1811
1812 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1813 pa_log("Failed to initialize file descriptor monitoring");
1814 return -1;
1815 }
1816 } else {
1817 u->mixer_fdl = pa_alsa_fdlist_new();
1818 mixer_callback = ctl_mixer_callback;
1819
1820 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1821 pa_log("Failed to initialize file descriptor monitoring");
1822 return -1;
1823 }
1824 }
1825
1826 if (u->mixer_path_set)
1827 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1828 else
1829 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1830 }
1831
1832 return 0;
1833 }
1834
1835 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1836
1837 struct userdata *u = NULL;
1838 const char *dev_id = NULL;
1839 pa_sample_spec ss, requested_ss;
1840 pa_channel_map map;
1841 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1842 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1843 size_t frame_size;
1844 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE;
1845 pa_sink_new_data data;
1846 pa_alsa_profile_set *profile_set = NULL;
1847
1848 pa_assert(m);
1849 pa_assert(ma);
1850
1851 ss = m->core->default_sample_spec;
1852 map = m->core->default_channel_map;
1853 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1854 pa_log("Failed to parse sample specification and channel map");
1855 goto fail;
1856 }
1857
1858 requested_ss = ss;
1859 frame_size = pa_frame_size(&ss);
1860
1861 nfrags = m->core->default_n_fragments;
1862 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1863 if (frag_size <= 0)
1864 frag_size = (uint32_t) frame_size;
1865 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1866 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1867
1868 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1869 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1870 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1871 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1872 pa_log("Failed to parse buffer metrics");
1873 goto fail;
1874 }
1875
1876 buffer_size = nfrags * frag_size;
1877
1878 period_frames = frag_size/frame_size;
1879 buffer_frames = buffer_size/frame_size;
1880 tsched_frames = tsched_size/frame_size;
1881
1882 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1883 pa_log("Failed to parse mmap argument.");
1884 goto fail;
1885 }
1886
1887 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1888 pa_log("Failed to parse tsched argument.");
1889 goto fail;
1890 }
1891
1892 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1893 pa_log("Failed to parse ignore_dB argument.");
1894 goto fail;
1895 }
1896
1897 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
1898 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1899 pa_log("Failed to parse rewind_safeguard argument");
1900 goto fail;
1901 }
1902
1903 sync_volume = m->core->sync_volume;
1904 if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1905 pa_log("Failed to parse sync_volume argument.");
1906 goto fail;
1907 }
1908
1909 use_tsched = pa_alsa_may_tsched(use_tsched);
1910
1911 u = pa_xnew0(struct userdata, 1);
1912 u->core = m->core;
1913 u->module = m;
1914 u->use_mmap = use_mmap;
1915 u->use_tsched = use_tsched;
1916 u->first = TRUE;
1917 u->rewind_safeguard = rewind_safeguard;
1918 u->rtpoll = pa_rtpoll_new();
1919 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1920
1921 u->smoother = pa_smoother_new(
1922 SMOOTHER_ADJUST_USEC,
1923 SMOOTHER_WINDOW_USEC,
1924 TRUE,
1925 TRUE,
1926 5,
1927 pa_rtclock_now(),
1928 TRUE);
1929 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1930
1931 dev_id = pa_modargs_get_value(
1932 ma, "device_id",
1933 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1934
1935 if (reserve_init(u, dev_id) < 0)
1936 goto fail;
1937
1938 if (reserve_monitor_init(u, dev_id) < 0)
1939 goto fail;
1940
1941 b = use_mmap;
1942 d = use_tsched;
1943
1944 if (mapping) {
1945
1946 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1947 pa_log("device_id= not set");
1948 goto fail;
1949 }
1950
1951 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1952 dev_id,
1953 &u->device_name,
1954 &ss, &map,
1955 SND_PCM_STREAM_PLAYBACK,
1956 &period_frames, &buffer_frames, tsched_frames,
1957 &b, &d, mapping)))
1958 goto fail;
1959
1960 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1961
1962 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1963 goto fail;
1964
1965 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1966 dev_id,
1967 &u->device_name,
1968 &ss, &map,
1969 SND_PCM_STREAM_PLAYBACK,
1970 &period_frames, &buffer_frames, tsched_frames,
1971 &b, &d, profile_set, &mapping)))
1972 goto fail;
1973
1974 } else {
1975
1976 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1977 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1978 &u->device_name,
1979 &ss, &map,
1980 SND_PCM_STREAM_PLAYBACK,
1981 &period_frames, &buffer_frames, tsched_frames,
1982 &b, &d, FALSE)))
1983 goto fail;
1984 }
1985
1986 pa_assert(u->device_name);
1987 pa_log_info("Successfully opened device %s.", u->device_name);
1988
1989 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1990 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1991 goto fail;
1992 }
1993
1994 if (mapping)
1995 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1996
1997 if (use_mmap && !b) {
1998 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1999 u->use_mmap = use_mmap = FALSE;
2000 }
2001
2002 if (use_tsched && (!b || !d)) {
2003 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2004 u->use_tsched = use_tsched = FALSE;
2005 }
2006
2007 if (u->use_mmap)
2008 pa_log_info("Successfully enabled mmap() mode.");
2009
2010 if (u->use_tsched)
2011 pa_log_info("Successfully enabled timer-based scheduling mode.");
2012
2013 /* ALSA might tweak the sample spec, so recalculate the frame size */
2014 frame_size = pa_frame_size(&ss);
2015
2016 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2017
2018 pa_sink_new_data_init(&data);
2019 data.driver = driver;
2020 data.module = m;
2021 data.card = card;
2022 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2023
2024 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2025 * variable instead of using &data.namereg_fail directly, because
2026 * data.namereg_fail is a bitfield and taking the address of a bitfield
2027 * variable is impossible. */
2028 namereg_fail = data.namereg_fail;
2029 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2030 pa_log("Failed to parse boolean argument namereg_fail.");
2031 pa_sink_new_data_done(&data);
2032 goto fail;
2033 }
2034 data.namereg_fail = namereg_fail;
2035
2036 pa_sink_new_data_set_sample_spec(&data, &ss);
2037 pa_sink_new_data_set_channel_map(&data, &map);
2038
2039 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2040 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2041 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2042 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2043 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2044
2045 if (mapping) {
2046 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2047 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2048 }
2049
2050 pa_alsa_init_description(data.proplist);
2051
2052 if (u->control_device)
2053 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2054
2055 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2056 pa_log("Invalid properties");
2057 pa_sink_new_data_done(&data);
2058 goto fail;
2059 }
2060
2061 if (u->mixer_path_set)
2062 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2063
2064 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
2065 pa_sink_new_data_done(&data);
2066
2067 if (!u->sink) {
2068 pa_log("Failed to create sink object");
2069 goto fail;
2070 }
2071
2072 if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
2073 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2074 pa_log("Failed to parse sync_volume_safety_margin parameter");
2075 goto fail;
2076 }
2077
2078 if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
2079 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2080 pa_log("Failed to parse sync_volume_extra_delay parameter");
2081 goto fail;
2082 }
2083
2084 u->sink->parent.process_msg = sink_process_msg;
2085 if (u->use_tsched)
2086 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2087 u->sink->set_state = sink_set_state_cb;
2088 u->sink->set_port = sink_set_port_cb;
2089 u->sink->userdata = u;
2090
2091 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2092 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2093
2094 u->frame_size = frame_size;
2095 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2096 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2097 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2098
2099 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2100 (double) u->hwbuf_size / (double) u->fragment_size,
2101 (long unsigned) u->fragment_size,
2102 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2103 (long unsigned) u->hwbuf_size,
2104 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2105
2106 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2107 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2108 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2109 else {
2110 pa_log_info("Disabling rewind for device %s", u->device_name);
2111 pa_sink_set_max_rewind(u->sink, 0);
2112 }
2113
2114 if (u->use_tsched) {
2115 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
2116
2117 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
2118 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
2119
2120 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
2121 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
2122
2123 fix_min_sleep_wakeup(u);
2124 fix_tsched_watermark(u);
2125
2126 pa_sink_set_latency_range(u->sink,
2127 0,
2128 pa_bytes_to_usec(u->hwbuf_size, &ss));
2129
2130 pa_log_info("Time scheduling watermark is %0.2fms",
2131 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
2132 } else
2133 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2134
2135 reserve_update(u);
2136
2137 if (update_sw_params(u) < 0)
2138 goto fail;
2139
2140 if (setup_mixer(u, ignore_dB, sync_volume) < 0)
2141 goto fail;
2142
2143 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2144
2145 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2146 pa_log("Failed to create thread.");
2147 goto fail;
2148 }
2149
2150 /* Get initial mixer settings */
2151 if (data.volume_is_set) {
2152 if (u->sink->set_volume)
2153 u->sink->set_volume(u->sink);
2154 } else {
2155 if (u->sink->get_volume)
2156 u->sink->get_volume(u->sink);
2157 }
2158
2159 if (data.muted_is_set) {
2160 if (u->sink->set_mute)
2161 u->sink->set_mute(u->sink);
2162 } else {
2163 if (u->sink->get_mute)
2164 u->sink->get_mute(u->sink);
2165 }
2166
2167 pa_sink_put(u->sink);
2168
2169 if (profile_set)
2170 pa_alsa_profile_set_free(profile_set);
2171
2172 return u->sink;
2173
2174 fail:
2175
2176 if (u)
2177 userdata_free(u);
2178
2179 if (profile_set)
2180 pa_alsa_profile_set_free(profile_set);
2181
2182 return NULL;
2183 }
2184
2185 static void userdata_free(struct userdata *u) {
2186 pa_assert(u);
2187
2188 if (u->sink)
2189 pa_sink_unlink(u->sink);
2190
2191 if (u->thread) {
2192 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2193 pa_thread_free(u->thread);
2194 }
2195
2196 pa_thread_mq_done(&u->thread_mq);
2197
2198 if (u->sink)
2199 pa_sink_unref(u->sink);
2200
2201 if (u->memchunk.memblock)
2202 pa_memblock_unref(u->memchunk.memblock);
2203
2204 if (u->mixer_pd)
2205 pa_alsa_mixer_pdata_free(u->mixer_pd);
2206
2207 if (u->alsa_rtpoll_item)
2208 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2209
2210 if (u->rtpoll)
2211 pa_rtpoll_free(u->rtpoll);
2212
2213 if (u->pcm_handle) {
2214 snd_pcm_drop(u->pcm_handle);
2215 snd_pcm_close(u->pcm_handle);
2216 }
2217
2218 if (u->mixer_fdl)
2219 pa_alsa_fdlist_free(u->mixer_fdl);
2220
2221 if (u->mixer_path_set)
2222 pa_alsa_path_set_free(u->mixer_path_set);
2223 else if (u->mixer_path)
2224 pa_alsa_path_free(u->mixer_path);
2225
2226 if (u->mixer_handle)
2227 snd_mixer_close(u->mixer_handle);
2228
2229 if (u->smoother)
2230 pa_smoother_free(u->smoother);
2231
2232 reserve_done(u);
2233 monitor_done(u);
2234
2235 pa_xfree(u->device_name);
2236 pa_xfree(u->control_device);
2237 pa_xfree(u);
2238 }
2239
2240 void pa_alsa_sink_free(pa_sink *s) {
2241 struct userdata *u;
2242
2243 pa_sink_assert_ref(s);
2244 pa_assert_se(u = s->userdata);
2245
2246 userdata_free(u);
2247 }