]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: Take syncronized HW volume infra into use for alsa-sink
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 pa_alsa_fdlist *mixer_fdl;
104 pa_alsa_mixer_pdata *mixer_pd;
105 snd_mixer_t *mixer_handle;
106 pa_alsa_path_set *mixer_path_set;
107 pa_alsa_path *mixer_path;
108
109 pa_cvolume hardware_volume;
110
111 size_t
112 frame_size,
113 fragment_size,
114 hwbuf_size,
115 tsched_watermark,
116 hwbuf_unused,
117 min_sleep,
118 min_wakeup,
119 watermark_inc_step,
120 watermark_dec_step,
121 watermark_inc_threshold,
122 watermark_dec_threshold,
123 rewind_safeguard;
124
125 pa_usec_t watermark_dec_not_before;
126
127 pa_memchunk memchunk;
128
129 char *device_name; /* name of the PCM device */
130 char *control_device; /* name of the control device */
131
132 pa_bool_t use_mmap:1, use_tsched:1;
133
134 pa_bool_t first, after_rewind;
135
136 pa_rtpoll_item *alsa_rtpoll_item;
137
138 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
139
140 pa_smoother *smoother;
141 uint64_t write_count;
142 uint64_t since_start;
143 pa_usec_t smoother_interval;
144 pa_usec_t last_smoother_update;
145
146 pa_reserve_wrapper *reserve;
147 pa_hook_slot *reserve_slot;
148 pa_reserve_monitor_wrapper *monitor;
149 pa_hook_slot *monitor_slot;
150 };
151
152 static void userdata_free(struct userdata *u);
153
154 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
155 pa_assert(r);
156 pa_assert(u);
157
158 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
159 return PA_HOOK_CANCEL;
160
161 return PA_HOOK_OK;
162 }
163
164 static void reserve_done(struct userdata *u) {
165 pa_assert(u);
166
167 if (u->reserve_slot) {
168 pa_hook_slot_free(u->reserve_slot);
169 u->reserve_slot = NULL;
170 }
171
172 if (u->reserve) {
173 pa_reserve_wrapper_unref(u->reserve);
174 u->reserve = NULL;
175 }
176 }
177
178 static void reserve_update(struct userdata *u) {
179 const char *description;
180 pa_assert(u);
181
182 if (!u->sink || !u->reserve)
183 return;
184
185 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
186 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
187 }
188
189 static int reserve_init(struct userdata *u, const char *dname) {
190 char *rname;
191
192 pa_assert(u);
193 pa_assert(dname);
194
195 if (u->reserve)
196 return 0;
197
198 if (pa_in_system_mode())
199 return 0;
200
201 if (!(rname = pa_alsa_get_reserve_name(dname)))
202 return 0;
203
204 /* We are resuming, try to lock the device */
205 u->reserve = pa_reserve_wrapper_get(u->core, rname);
206 pa_xfree(rname);
207
208 if (!(u->reserve))
209 return -1;
210
211 reserve_update(u);
212
213 pa_assert(!u->reserve_slot);
214 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
215
216 return 0;
217 }
218
219 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
220 pa_bool_t b;
221
222 pa_assert(w);
223 pa_assert(u);
224
225 b = PA_PTR_TO_UINT(busy) && !u->reserve;
226
227 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
228 return PA_HOOK_OK;
229 }
230
231 static void monitor_done(struct userdata *u) {
232 pa_assert(u);
233
234 if (u->monitor_slot) {
235 pa_hook_slot_free(u->monitor_slot);
236 u->monitor_slot = NULL;
237 }
238
239 if (u->monitor) {
240 pa_reserve_monitor_wrapper_unref(u->monitor);
241 u->monitor = NULL;
242 }
243 }
244
245 static int reserve_monitor_init(struct userdata *u, const char *dname) {
246 char *rname;
247
248 pa_assert(u);
249 pa_assert(dname);
250
251 if (pa_in_system_mode())
252 return 0;
253
254 if (!(rname = pa_alsa_get_reserve_name(dname)))
255 return 0;
256
257 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
258 pa_xfree(rname);
259
260 if (!(u->monitor))
261 return -1;
262
263 pa_assert(!u->monitor_slot);
264 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
265
266 return 0;
267 }
268
269 static void fix_min_sleep_wakeup(struct userdata *u) {
270 size_t max_use, max_use_2;
271
272 pa_assert(u);
273 pa_assert(u->use_tsched);
274
275 max_use = u->hwbuf_size - u->hwbuf_unused;
276 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
277
278 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
279 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
280
281 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
282 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
283 }
284
285 static void fix_tsched_watermark(struct userdata *u) {
286 size_t max_use;
287 pa_assert(u);
288 pa_assert(u->use_tsched);
289
290 max_use = u->hwbuf_size - u->hwbuf_unused;
291
292 if (u->tsched_watermark > max_use - u->min_sleep)
293 u->tsched_watermark = max_use - u->min_sleep;
294
295 if (u->tsched_watermark < u->min_wakeup)
296 u->tsched_watermark = u->min_wakeup;
297 }
298
299 static void increase_watermark(struct userdata *u) {
300 size_t old_watermark;
301 pa_usec_t old_min_latency, new_min_latency;
302
303 pa_assert(u);
304 pa_assert(u->use_tsched);
305
306 /* First, just try to increase the watermark */
307 old_watermark = u->tsched_watermark;
308 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
309 fix_tsched_watermark(u);
310
311 if (old_watermark != u->tsched_watermark) {
312 pa_log_info("Increasing wakeup watermark to %0.2f ms",
313 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
314 return;
315 }
316
317 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
318 old_min_latency = u->sink->thread_info.min_latency;
319 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
320 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
321
322 if (old_min_latency != new_min_latency) {
323 pa_log_info("Increasing minimal latency to %0.2f ms",
324 (double) new_min_latency / PA_USEC_PER_MSEC);
325
326 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
327 }
328
329 /* When we reach this we're officialy fucked! */
330 }
331
332 static void decrease_watermark(struct userdata *u) {
333 size_t old_watermark;
334 pa_usec_t now;
335
336 pa_assert(u);
337 pa_assert(u->use_tsched);
338
339 now = pa_rtclock_now();
340
341 if (u->watermark_dec_not_before <= 0)
342 goto restart;
343
344 if (u->watermark_dec_not_before > now)
345 return;
346
347 old_watermark = u->tsched_watermark;
348
349 if (u->tsched_watermark < u->watermark_dec_step)
350 u->tsched_watermark = u->tsched_watermark / 2;
351 else
352 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
353
354 fix_tsched_watermark(u);
355
356 if (old_watermark != u->tsched_watermark)
357 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
358 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
359
360 /* We don't change the latency range*/
361
362 restart:
363 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
364 }
365
366 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
367 pa_usec_t usec, wm;
368
369 pa_assert(sleep_usec);
370 pa_assert(process_usec);
371
372 pa_assert(u);
373 pa_assert(u->use_tsched);
374
375 usec = pa_sink_get_requested_latency_within_thread(u->sink);
376
377 if (usec == (pa_usec_t) -1)
378 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
379
380 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
381
382 if (wm > usec)
383 wm = usec/2;
384
385 *sleep_usec = usec - wm;
386 *process_usec = wm;
387
388 #ifdef DEBUG_TIMING
389 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
390 (unsigned long) (usec / PA_USEC_PER_MSEC),
391 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
392 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
393 #endif
394 }
395
396 static int try_recover(struct userdata *u, const char *call, int err) {
397 pa_assert(u);
398 pa_assert(call);
399 pa_assert(err < 0);
400
401 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
402
403 pa_assert(err != -EAGAIN);
404
405 if (err == -EPIPE)
406 pa_log_debug("%s: Buffer underrun!", call);
407
408 if (err == -ESTRPIPE)
409 pa_log_debug("%s: System suspended!", call);
410
411 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
412 pa_log("%s: %s", call, pa_alsa_strerror(err));
413 return -1;
414 }
415
416 u->first = TRUE;
417 u->since_start = 0;
418 return 0;
419 }
420
421 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
422 size_t left_to_play;
423 pa_bool_t underrun = FALSE;
424
425 /* We use <= instead of < for this check here because an underrun
426 * only happens after the last sample was processed, not already when
427 * it is removed from the buffer. This is particularly important
428 * when block transfer is used. */
429
430 if (n_bytes <= u->hwbuf_size)
431 left_to_play = u->hwbuf_size - n_bytes;
432 else {
433
434 /* We got a dropout. What a mess! */
435 left_to_play = 0;
436 underrun = TRUE;
437
438 #ifdef DEBUG_TIMING
439 PA_DEBUG_TRAP;
440 #endif
441
442 if (!u->first && !u->after_rewind)
443 if (pa_log_ratelimit())
444 pa_log_info("Underrun!");
445 }
446
447 #ifdef DEBUG_TIMING
448 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
449 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
450 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
451 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
452 #endif
453
454 if (u->use_tsched) {
455 pa_bool_t reset_not_before = TRUE;
456
457 if (!u->first && !u->after_rewind) {
458 if (underrun || left_to_play < u->watermark_inc_threshold)
459 increase_watermark(u);
460 else if (left_to_play > u->watermark_dec_threshold) {
461 reset_not_before = FALSE;
462
463 /* We decrease the watermark only if have actually
464 * been woken up by a timeout. If something else woke
465 * us up it's too easy to fulfill the deadlines... */
466
467 if (on_timeout)
468 decrease_watermark(u);
469 }
470 }
471
472 if (reset_not_before)
473 u->watermark_dec_not_before = 0;
474 }
475
476 return left_to_play;
477 }
478
479 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
480 pa_bool_t work_done = TRUE;
481 pa_usec_t max_sleep_usec = 0, process_usec = 0;
482 size_t left_to_play;
483 unsigned j = 0;
484
485 pa_assert(u);
486 pa_sink_assert_ref(u->sink);
487
488 if (u->use_tsched)
489 hw_sleep_time(u, &max_sleep_usec, &process_usec);
490
491 for (;;) {
492 snd_pcm_sframes_t n;
493 size_t n_bytes;
494 int r;
495 pa_bool_t after_avail = TRUE;
496
497 /* First we determine how many samples are missing to fill the
498 * buffer up to 100% */
499
500 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
501
502 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
503 continue;
504
505 return r;
506 }
507
508 n_bytes = (size_t) n * u->frame_size;
509
510 #ifdef DEBUG_TIMING
511 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
512 #endif
513
514 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
515 on_timeout = FALSE;
516
517 if (u->use_tsched)
518
519 /* We won't fill up the playback buffer before at least
520 * half the sleep time is over because otherwise we might
521 * ask for more data from the clients then they expect. We
522 * need to guarantee that clients only have to keep around
523 * a single hw buffer length. */
524
525 if (!polled &&
526 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
527 #ifdef DEBUG_TIMING
528 pa_log_debug("Not filling up, because too early.");
529 #endif
530 break;
531 }
532
533 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
534
535 if (polled)
536 PA_ONCE_BEGIN {
537 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
538 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
539 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
540 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
541 pa_strnull(dn));
542 pa_xfree(dn);
543 } PA_ONCE_END;
544
545 #ifdef DEBUG_TIMING
546 pa_log_debug("Not filling up, because not necessary.");
547 #endif
548 break;
549 }
550
551
552 if (++j > 10) {
553 #ifdef DEBUG_TIMING
554 pa_log_debug("Not filling up, because already too many iterations.");
555 #endif
556
557 break;
558 }
559
560 n_bytes -= u->hwbuf_unused;
561 polled = FALSE;
562
563 #ifdef DEBUG_TIMING
564 pa_log_debug("Filling up");
565 #endif
566
567 for (;;) {
568 pa_memchunk chunk;
569 void *p;
570 int err;
571 const snd_pcm_channel_area_t *areas;
572 snd_pcm_uframes_t offset, frames;
573 snd_pcm_sframes_t sframes;
574
575 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
576 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
577
578 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
579
580 if (!after_avail && err == -EAGAIN)
581 break;
582
583 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
584 continue;
585
586 return r;
587 }
588
589 /* Make sure that if these memblocks need to be copied they will fit into one slot */
590 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
591 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
592
593 if (!after_avail && frames == 0)
594 break;
595
596 pa_assert(frames > 0);
597 after_avail = FALSE;
598
599 /* Check these are multiples of 8 bit */
600 pa_assert((areas[0].first & 7) == 0);
601 pa_assert((areas[0].step & 7)== 0);
602
603 /* We assume a single interleaved memory buffer */
604 pa_assert((areas[0].first >> 3) == 0);
605 pa_assert((areas[0].step >> 3) == u->frame_size);
606
607 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
608
609 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
610 chunk.length = pa_memblock_get_length(chunk.memblock);
611 chunk.index = 0;
612
613 pa_sink_render_into_full(u->sink, &chunk);
614 pa_memblock_unref_fixed(chunk.memblock);
615
616 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
617
618 if (!after_avail && (int) sframes == -EAGAIN)
619 break;
620
621 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
622 continue;
623
624 return r;
625 }
626
627 work_done = TRUE;
628
629 u->write_count += frames * u->frame_size;
630 u->since_start += frames * u->frame_size;
631
632 #ifdef DEBUG_TIMING
633 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
634 #endif
635
636 if ((size_t) frames * u->frame_size >= n_bytes)
637 break;
638
639 n_bytes -= (size_t) frames * u->frame_size;
640 }
641 }
642
643 if (u->use_tsched) {
644 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
645
646 if (*sleep_usec > process_usec)
647 *sleep_usec -= process_usec;
648 else
649 *sleep_usec = 0;
650 } else
651 *sleep_usec = 0;
652
653 return work_done ? 1 : 0;
654 }
655
656 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
657 pa_bool_t work_done = FALSE;
658 pa_usec_t max_sleep_usec = 0, process_usec = 0;
659 size_t left_to_play;
660 unsigned j = 0;
661
662 pa_assert(u);
663 pa_sink_assert_ref(u->sink);
664
665 if (u->use_tsched)
666 hw_sleep_time(u, &max_sleep_usec, &process_usec);
667
668 for (;;) {
669 snd_pcm_sframes_t n;
670 size_t n_bytes;
671 int r;
672 pa_bool_t after_avail = TRUE;
673
674 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
675
676 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
677 continue;
678
679 return r;
680 }
681
682 n_bytes = (size_t) n * u->frame_size;
683 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
684 on_timeout = FALSE;
685
686 if (u->use_tsched)
687
688 /* We won't fill up the playback buffer before at least
689 * half the sleep time is over because otherwise we might
690 * ask for more data from the clients then they expect. We
691 * need to guarantee that clients only have to keep around
692 * a single hw buffer length. */
693
694 if (!polled &&
695 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
696 break;
697
698 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
699
700 if (polled)
701 PA_ONCE_BEGIN {
702 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
703 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
704 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
705 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
706 pa_strnull(dn));
707 pa_xfree(dn);
708 } PA_ONCE_END;
709
710 break;
711 }
712
713 if (++j > 10) {
714 #ifdef DEBUG_TIMING
715 pa_log_debug("Not filling up, because already too many iterations.");
716 #endif
717
718 break;
719 }
720
721 n_bytes -= u->hwbuf_unused;
722 polled = FALSE;
723
724 for (;;) {
725 snd_pcm_sframes_t frames;
726 void *p;
727
728 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
729
730 if (u->memchunk.length <= 0)
731 pa_sink_render(u->sink, n_bytes, &u->memchunk);
732
733 pa_assert(u->memchunk.length > 0);
734
735 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
736
737 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
738 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
739
740 p = pa_memblock_acquire(u->memchunk.memblock);
741 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
742 pa_memblock_release(u->memchunk.memblock);
743
744 if (PA_UNLIKELY(frames < 0)) {
745
746 if (!after_avail && (int) frames == -EAGAIN)
747 break;
748
749 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
750 continue;
751
752 return r;
753 }
754
755 if (!after_avail && frames == 0)
756 break;
757
758 pa_assert(frames > 0);
759 after_avail = FALSE;
760
761 u->memchunk.index += (size_t) frames * u->frame_size;
762 u->memchunk.length -= (size_t) frames * u->frame_size;
763
764 if (u->memchunk.length <= 0) {
765 pa_memblock_unref(u->memchunk.memblock);
766 pa_memchunk_reset(&u->memchunk);
767 }
768
769 work_done = TRUE;
770
771 u->write_count += frames * u->frame_size;
772 u->since_start += frames * u->frame_size;
773
774 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
775
776 if ((size_t) frames * u->frame_size >= n_bytes)
777 break;
778
779 n_bytes -= (size_t) frames * u->frame_size;
780 }
781 }
782
783 if (u->use_tsched) {
784 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
785
786 if (*sleep_usec > process_usec)
787 *sleep_usec -= process_usec;
788 else
789 *sleep_usec = 0;
790 } else
791 *sleep_usec = 0;
792
793 return work_done ? 1 : 0;
794 }
795
796 static void update_smoother(struct userdata *u) {
797 snd_pcm_sframes_t delay = 0;
798 int64_t position;
799 int err;
800 pa_usec_t now1 = 0, now2;
801 snd_pcm_status_t *status;
802
803 snd_pcm_status_alloca(&status);
804
805 pa_assert(u);
806 pa_assert(u->pcm_handle);
807
808 /* Let's update the time smoother */
809
810 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
811 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
812 return;
813 }
814
815 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
816 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
817 else {
818 snd_htimestamp_t htstamp = { 0, 0 };
819 snd_pcm_status_get_htstamp(status, &htstamp);
820 now1 = pa_timespec_load(&htstamp);
821 }
822
823 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
824 if (now1 <= 0)
825 now1 = pa_rtclock_now();
826
827 /* check if the time since the last update is bigger than the interval */
828 if (u->last_smoother_update > 0)
829 if (u->last_smoother_update + u->smoother_interval > now1)
830 return;
831
832 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
833
834 if (PA_UNLIKELY(position < 0))
835 position = 0;
836
837 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
838
839 pa_smoother_put(u->smoother, now1, now2);
840
841 u->last_smoother_update = now1;
842 /* exponentially increase the update interval up to the MAX limit */
843 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
844 }
845
846 static pa_usec_t sink_get_latency(struct userdata *u) {
847 pa_usec_t r;
848 int64_t delay;
849 pa_usec_t now1, now2;
850
851 pa_assert(u);
852
853 now1 = pa_rtclock_now();
854 now2 = pa_smoother_get(u->smoother, now1);
855
856 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
857
858 r = delay >= 0 ? (pa_usec_t) delay : 0;
859
860 if (u->memchunk.memblock)
861 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
862
863 return r;
864 }
865
866 static int build_pollfd(struct userdata *u) {
867 pa_assert(u);
868 pa_assert(u->pcm_handle);
869
870 if (u->alsa_rtpoll_item)
871 pa_rtpoll_item_free(u->alsa_rtpoll_item);
872
873 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
874 return -1;
875
876 return 0;
877 }
878
879 /* Called from IO context */
880 static int suspend(struct userdata *u) {
881 pa_assert(u);
882 pa_assert(u->pcm_handle);
883
884 pa_smoother_pause(u->smoother, pa_rtclock_now());
885
886 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
887 * take awfully long with our long buffer sizes today. */
888 snd_pcm_close(u->pcm_handle);
889 u->pcm_handle = NULL;
890
891 if (u->alsa_rtpoll_item) {
892 pa_rtpoll_item_free(u->alsa_rtpoll_item);
893 u->alsa_rtpoll_item = NULL;
894 }
895
896 /* We reset max_rewind/max_request here to make sure that while we
897 * are suspended the old max_request/max_rewind values set before
898 * the suspend can influence the per-stream buffer of newly
899 * created streams, without their requirements having any
900 * influence on them. */
901 pa_sink_set_max_rewind_within_thread(u->sink, 0);
902 pa_sink_set_max_request_within_thread(u->sink, 0);
903
904 pa_log_info("Device suspended...");
905
906 return 0;
907 }
908
909 /* Called from IO context */
910 static int update_sw_params(struct userdata *u) {
911 snd_pcm_uframes_t avail_min;
912 int err;
913
914 pa_assert(u);
915
916 /* Use the full buffer if noone asked us for anything specific */
917 u->hwbuf_unused = 0;
918
919 if (u->use_tsched) {
920 pa_usec_t latency;
921
922 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
923 size_t b;
924
925 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
926
927 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
928
929 /* We need at least one sample in our buffer */
930
931 if (PA_UNLIKELY(b < u->frame_size))
932 b = u->frame_size;
933
934 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
935 }
936
937 fix_min_sleep_wakeup(u);
938 fix_tsched_watermark(u);
939 }
940
941 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
942
943 /* We need at last one frame in the used part of the buffer */
944 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
945
946 if (u->use_tsched) {
947 pa_usec_t sleep_usec, process_usec;
948
949 hw_sleep_time(u, &sleep_usec, &process_usec);
950 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
951 }
952
953 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
954
955 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
956 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
957 return err;
958 }
959
960 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
961 if (pa_alsa_pcm_is_hw(u->pcm_handle))
962 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
963 else {
964 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
965 pa_sink_set_max_rewind_within_thread(u->sink, 0);
966 }
967
968 return 0;
969 }
970
971 /* Called from IO context */
972 static int unsuspend(struct userdata *u) {
973 pa_sample_spec ss;
974 int err;
975 pa_bool_t b, d;
976 snd_pcm_uframes_t period_size, buffer_size;
977
978 pa_assert(u);
979 pa_assert(!u->pcm_handle);
980
981 pa_log_info("Trying resume...");
982
983 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
984 SND_PCM_NONBLOCK|
985 SND_PCM_NO_AUTO_RESAMPLE|
986 SND_PCM_NO_AUTO_CHANNELS|
987 SND_PCM_NO_AUTO_FORMAT)) < 0) {
988 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
989 goto fail;
990 }
991
992 ss = u->sink->sample_spec;
993 period_size = u->fragment_size / u->frame_size;
994 buffer_size = u->hwbuf_size / u->frame_size;
995 b = u->use_mmap;
996 d = u->use_tsched;
997
998 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
999 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1000 goto fail;
1001 }
1002
1003 if (b != u->use_mmap || d != u->use_tsched) {
1004 pa_log_warn("Resume failed, couldn't get original access mode.");
1005 goto fail;
1006 }
1007
1008 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1009 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1010 goto fail;
1011 }
1012
1013 if (period_size*u->frame_size != u->fragment_size ||
1014 buffer_size*u->frame_size != u->hwbuf_size) {
1015 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1016 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1017 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1018 goto fail;
1019 }
1020
1021 if (update_sw_params(u) < 0)
1022 goto fail;
1023
1024 if (build_pollfd(u) < 0)
1025 goto fail;
1026
1027 u->write_count = 0;
1028 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1029 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1030 u->last_smoother_update = 0;
1031
1032 u->first = TRUE;
1033 u->since_start = 0;
1034
1035 pa_log_info("Resumed successfully...");
1036
1037 return 0;
1038
1039 fail:
1040 if (u->pcm_handle) {
1041 snd_pcm_close(u->pcm_handle);
1042 u->pcm_handle = NULL;
1043 }
1044
1045 return -PA_ERR_IO;
1046 }
1047
1048 /* Called from IO context */
1049 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1050 struct userdata *u = PA_SINK(o)->userdata;
1051
1052 switch (code) {
1053
1054 case PA_SINK_MESSAGE_GET_LATENCY: {
1055 pa_usec_t r = 0;
1056
1057 if (u->pcm_handle)
1058 r = sink_get_latency(u);
1059
1060 *((pa_usec_t*) data) = r;
1061
1062 return 0;
1063 }
1064
1065 case PA_SINK_MESSAGE_SET_STATE:
1066
1067 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1068
1069 case PA_SINK_SUSPENDED: {
1070 int r;
1071
1072 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1073
1074 if ((r = suspend(u)) < 0)
1075 return r;
1076
1077 break;
1078 }
1079
1080 case PA_SINK_IDLE:
1081 case PA_SINK_RUNNING: {
1082 int r;
1083
1084 if (u->sink->thread_info.state == PA_SINK_INIT) {
1085 if (build_pollfd(u) < 0)
1086 return -PA_ERR_IO;
1087 }
1088
1089 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1090 if ((r = unsuspend(u)) < 0)
1091 return r;
1092 }
1093
1094 break;
1095 }
1096
1097 case PA_SINK_UNLINKED:
1098 case PA_SINK_INIT:
1099 case PA_SINK_INVALID_STATE:
1100 ;
1101 }
1102
1103 break;
1104 }
1105
1106 return pa_sink_process_msg(o, code, data, offset, chunk);
1107 }
1108
1109 /* Called from main context */
1110 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1111 pa_sink_state_t old_state;
1112 struct userdata *u;
1113
1114 pa_sink_assert_ref(s);
1115 pa_assert_se(u = s->userdata);
1116
1117 old_state = pa_sink_get_state(u->sink);
1118
1119 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1120 reserve_done(u);
1121 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1122 if (reserve_init(u, u->device_name) < 0)
1123 return -PA_ERR_BUSY;
1124
1125 return 0;
1126 }
1127
1128 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1129 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1130
1131 pa_assert(u);
1132 pa_assert(u->mixer_handle);
1133
1134 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1135 return 0;
1136
1137 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1138 return 0;
1139
1140 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1141 pa_sink_get_volume(u->sink, TRUE);
1142 pa_sink_get_mute(u->sink, TRUE);
1143 }
1144
1145 return 0;
1146 }
1147
1148 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1149 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1150
1151 pa_assert(u);
1152 pa_assert(u->mixer_handle);
1153
1154 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1155 return 0;
1156
1157 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1158 return 0;
1159
1160 if (mask & SND_CTL_EVENT_MASK_VALUE)
1161 pa_sink_update_volume_and_mute(u->sink);
1162
1163 return 0;
1164 }
1165
1166 static void sink_get_volume_cb(pa_sink *s) {
1167 struct userdata *u = s->userdata;
1168 pa_cvolume r;
1169 char t[PA_CVOLUME_SNPRINT_MAX];
1170
1171 pa_assert(u);
1172 pa_assert(u->mixer_path);
1173 pa_assert(u->mixer_handle);
1174
1175 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1176 return;
1177
1178 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1179 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1180
1181 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1182
1183 if (pa_cvolume_equal(&u->hardware_volume, &r))
1184 return;
1185
1186 s->real_volume = u->hardware_volume = r;
1187
1188 /* Hmm, so the hardware volume changed, let's reset our software volume */
1189 if (u->mixer_path->has_dB)
1190 pa_sink_set_soft_volume(s, NULL);
1191 }
1192
1193 static void sink_set_volume_cb(pa_sink *s) {
1194 struct userdata *u = s->userdata;
1195 pa_cvolume r;
1196 char t[PA_CVOLUME_SNPRINT_MAX];
1197 pa_bool_t write_to_hw = (s->flags & PA_SINK_SYNC_VOLUME) ? FALSE : TRUE;
1198
1199 pa_assert(u);
1200 pa_assert(u->mixer_path);
1201 pa_assert(u->mixer_handle);
1202
1203 /* Shift up by the base volume */
1204 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1205
1206 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, write_to_hw) < 0)
1207 return;
1208
1209 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1210 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1211
1212 u->hardware_volume = r;
1213
1214 if (u->mixer_path->has_dB) {
1215 pa_cvolume new_soft_volume;
1216 pa_bool_t accurate_enough;
1217
1218 /* Match exactly what the user requested by software */
1219 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1220
1221 /* If the adjustment to do in software is only minimal we
1222 * can skip it. That saves us CPU at the expense of a bit of
1223 * accuracy */
1224 accurate_enough =
1225 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1226 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1227
1228 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1229 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1230 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1231 pa_yes_no(accurate_enough));
1232
1233 if (!accurate_enough)
1234 s->soft_volume = new_soft_volume;
1235
1236 } else {
1237 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1238
1239 /* We can't match exactly what the user requested, hence let's
1240 * at least tell the user about it */
1241
1242 s->real_volume = r;
1243 }
1244 }
1245
1246 static void sink_write_volume_cb(pa_sink *s) {
1247 struct userdata *u = s->userdata;
1248 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1249
1250 pa_assert(u);
1251 pa_assert(u->mixer_path);
1252 pa_assert(u->mixer_handle);
1253 pa_assert(s->flags & PA_SINK_SYNC_VOLUME);
1254
1255 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE) < 0)
1256 pa_log_error("Writing HW volume failed");
1257 else {
1258 pa_cvolume tmp_vol;
1259 pa_bool_t accurate_enough;
1260 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1261 accurate_enough =
1262 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1263 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1264 if (!accurate_enough) {
1265 char t[PA_CVOLUME_SNPRINT_MAX];
1266 pa_log_debug("Written HW volume did not match with the request %s != %s",
1267 pa_cvolume_snprint(t, sizeof(t), &s->thread_info.current_hw_volume),
1268 pa_cvolume_snprint(t, sizeof(t), &hw_vol));
1269 }
1270 }
1271 }
1272
1273 static void sink_get_mute_cb(pa_sink *s) {
1274 struct userdata *u = s->userdata;
1275 pa_bool_t b;
1276
1277 pa_assert(u);
1278 pa_assert(u->mixer_path);
1279 pa_assert(u->mixer_handle);
1280
1281 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1282 return;
1283
1284 s->muted = b;
1285 }
1286
1287 static void sink_set_mute_cb(pa_sink *s) {
1288 struct userdata *u = s->userdata;
1289
1290 pa_assert(u);
1291 pa_assert(u->mixer_path);
1292 pa_assert(u->mixer_handle);
1293
1294 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1295 }
1296
1297 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1298 struct userdata *u = s->userdata;
1299 pa_alsa_port_data *data;
1300
1301 pa_assert(u);
1302 pa_assert(p);
1303 pa_assert(u->mixer_handle);
1304
1305 data = PA_DEVICE_PORT_DATA(p);
1306
1307 pa_assert_se(u->mixer_path = data->path);
1308 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1309
1310 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1311 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1312 s->n_volume_steps = PA_VOLUME_NORM+1;
1313
1314 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1315 } else {
1316 s->base_volume = PA_VOLUME_NORM;
1317 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1318 }
1319
1320 if (data->setting)
1321 pa_alsa_setting_select(data->setting, u->mixer_handle);
1322
1323 if (s->set_mute)
1324 s->set_mute(s);
1325 if (s->set_volume)
1326 s->set_volume(s);
1327
1328 return 0;
1329 }
1330
1331 static void sink_update_requested_latency_cb(pa_sink *s) {
1332 struct userdata *u = s->userdata;
1333 size_t before;
1334 pa_assert(u);
1335 pa_assert(u->use_tsched); /* only when timer scheduling is used
1336 * we can dynamically adjust the
1337 * latency */
1338
1339 if (!u->pcm_handle)
1340 return;
1341
1342 before = u->hwbuf_unused;
1343 update_sw_params(u);
1344
1345 /* Let's check whether we now use only a smaller part of the
1346 buffer then before. If so, we need to make sure that subsequent
1347 rewinds are relative to the new maximum fill level and not to the
1348 current fill level. Thus, let's do a full rewind once, to clear
1349 things up. */
1350
1351 if (u->hwbuf_unused > before) {
1352 pa_log_debug("Requesting rewind due to latency change.");
1353 pa_sink_request_rewind(s, (size_t) -1);
1354 }
1355 }
1356
1357 static int process_rewind(struct userdata *u) {
1358 snd_pcm_sframes_t unused;
1359 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1360 pa_assert(u);
1361
1362 /* Figure out how much we shall rewind and reset the counter */
1363 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1364
1365 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1366
1367 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1368 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1369 return -1;
1370 }
1371
1372 unused_nbytes = (size_t) unused * u->frame_size;
1373
1374 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1375 unused_nbytes += u->rewind_safeguard;
1376
1377 if (u->hwbuf_size > unused_nbytes)
1378 limit_nbytes = u->hwbuf_size - unused_nbytes;
1379 else
1380 limit_nbytes = 0;
1381
1382 if (rewind_nbytes > limit_nbytes)
1383 rewind_nbytes = limit_nbytes;
1384
1385 if (rewind_nbytes > 0) {
1386 snd_pcm_sframes_t in_frames, out_frames;
1387
1388 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1389
1390 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1391 pa_log_debug("before: %lu", (unsigned long) in_frames);
1392 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1393 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1394 if (try_recover(u, "process_rewind", out_frames) < 0)
1395 return -1;
1396 out_frames = 0;
1397 }
1398
1399 pa_log_debug("after: %lu", (unsigned long) out_frames);
1400
1401 rewind_nbytes = (size_t) out_frames * u->frame_size;
1402
1403 if (rewind_nbytes <= 0)
1404 pa_log_info("Tried rewind, but was apparently not possible.");
1405 else {
1406 u->write_count -= rewind_nbytes;
1407 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1408 pa_sink_process_rewind(u->sink, rewind_nbytes);
1409
1410 u->after_rewind = TRUE;
1411 return 0;
1412 }
1413 } else
1414 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1415
1416 pa_sink_process_rewind(u->sink, 0);
1417 return 0;
1418 }
1419
1420 static void thread_func(void *userdata) {
1421 struct userdata *u = userdata;
1422 unsigned short revents = 0;
1423
1424 pa_assert(u);
1425
1426 pa_log_debug("Thread starting up");
1427
1428 if (u->core->realtime_scheduling)
1429 pa_make_realtime(u->core->realtime_priority);
1430
1431 pa_thread_mq_install(&u->thread_mq);
1432
1433 for (;;) {
1434 int ret;
1435 pa_usec_t rtpoll_sleep = 0;
1436
1437 #ifdef DEBUG_TIMING
1438 pa_log_debug("Loop");
1439 #endif
1440
1441 /* Render some data and write it to the dsp */
1442 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1443 int work_done;
1444 pa_usec_t sleep_usec = 0;
1445 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1446
1447 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1448 if (process_rewind(u) < 0)
1449 goto fail;
1450
1451 if (u->use_mmap)
1452 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1453 else
1454 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1455
1456 if (work_done < 0)
1457 goto fail;
1458
1459 /* pa_log_debug("work_done = %i", work_done); */
1460
1461 if (work_done) {
1462
1463 if (u->first) {
1464 pa_log_info("Starting playback.");
1465 snd_pcm_start(u->pcm_handle);
1466
1467 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1468
1469 u->first = FALSE;
1470 }
1471
1472 update_smoother(u);
1473 }
1474
1475 if (u->use_tsched) {
1476 pa_usec_t cusec;
1477
1478 if (u->since_start <= u->hwbuf_size) {
1479
1480 /* USB devices on ALSA seem to hit a buffer
1481 * underrun during the first iterations much
1482 * quicker then we calculate here, probably due to
1483 * the transport latency. To accommodate for that
1484 * we artificially decrease the sleep time until
1485 * we have filled the buffer at least once
1486 * completely.*/
1487
1488 if (pa_log_ratelimit())
1489 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1490 sleep_usec /= 2;
1491 }
1492
1493 /* OK, the playback buffer is now full, let's
1494 * calculate when to wake up next */
1495 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1496
1497 /* Convert from the sound card time domain to the
1498 * system time domain */
1499 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1500
1501 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1502
1503 /* We don't trust the conversion, so we wake up whatever comes first */
1504 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1505 }
1506
1507 u->after_rewind = FALSE;
1508
1509 }
1510
1511 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1512 pa_usec_t volume_sleep;
1513 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1514 if (volume_sleep > 0)
1515 rtpoll_sleep = MIN(volume_sleep, rtpoll_sleep);
1516 }
1517
1518 if (rtpoll_sleep > 0)
1519 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1520 else
1521 pa_rtpoll_set_timer_disabled(u->rtpoll);
1522
1523 /* Hmm, nothing to do. Let's sleep */
1524 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1525 goto fail;
1526
1527 if (u->sink->flags & PA_SINK_SYNC_VOLUME)
1528 pa_sink_volume_change_apply(u->sink, NULL);
1529
1530 if (ret == 0)
1531 goto finish;
1532
1533 /* Tell ALSA about this and process its response */
1534 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1535 struct pollfd *pollfd;
1536 int err;
1537 unsigned n;
1538
1539 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1540
1541 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1542 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1543 goto fail;
1544 }
1545
1546 if (revents & ~POLLOUT) {
1547 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1548 goto fail;
1549
1550 u->first = TRUE;
1551 u->since_start = 0;
1552 } else if (revents && u->use_tsched && pa_log_ratelimit())
1553 pa_log_debug("Wakeup from ALSA!");
1554
1555 } else
1556 revents = 0;
1557 }
1558
1559 fail:
1560 /* If this was no regular exit from the loop we have to continue
1561 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1562 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1563 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1564
1565 finish:
1566 pa_log_debug("Thread shutting down");
1567 }
1568
1569 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1570 const char *n;
1571 char *t;
1572
1573 pa_assert(data);
1574 pa_assert(ma);
1575 pa_assert(device_name);
1576
1577 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1578 pa_sink_new_data_set_name(data, n);
1579 data->namereg_fail = TRUE;
1580 return;
1581 }
1582
1583 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1584 data->namereg_fail = TRUE;
1585 else {
1586 n = device_id ? device_id : device_name;
1587 data->namereg_fail = FALSE;
1588 }
1589
1590 if (mapping)
1591 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1592 else
1593 t = pa_sprintf_malloc("alsa_output.%s", n);
1594
1595 pa_sink_new_data_set_name(data, t);
1596 pa_xfree(t);
1597 }
1598
1599 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1600
1601 if (!mapping && !element)
1602 return;
1603
1604 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1605 pa_log_info("Failed to find a working mixer device.");
1606 return;
1607 }
1608
1609 if (element) {
1610
1611 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1612 goto fail;
1613
1614 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1615 goto fail;
1616
1617 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1618 pa_alsa_path_dump(u->mixer_path);
1619 } else {
1620
1621 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1622 goto fail;
1623
1624 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1625
1626 pa_log_debug("Probed mixer paths:");
1627 pa_alsa_path_set_dump(u->mixer_path_set);
1628 }
1629
1630 return;
1631
1632 fail:
1633
1634 if (u->mixer_path_set) {
1635 pa_alsa_path_set_free(u->mixer_path_set);
1636 u->mixer_path_set = NULL;
1637 } else if (u->mixer_path) {
1638 pa_alsa_path_free(u->mixer_path);
1639 u->mixer_path = NULL;
1640 }
1641
1642 if (u->mixer_handle) {
1643 snd_mixer_close(u->mixer_handle);
1644 u->mixer_handle = NULL;
1645 }
1646 }
1647
1648 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB, pa_bool_t sync_volume) {
1649 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1650
1651 pa_assert(u);
1652
1653 if (!u->mixer_handle)
1654 return 0;
1655
1656 if (u->sink->active_port) {
1657 pa_alsa_port_data *data;
1658
1659 /* We have a list of supported paths, so let's activate the
1660 * one that has been chosen as active */
1661
1662 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1663 u->mixer_path = data->path;
1664
1665 pa_alsa_path_select(data->path, u->mixer_handle);
1666
1667 if (data->setting)
1668 pa_alsa_setting_select(data->setting, u->mixer_handle);
1669
1670 } else {
1671
1672 if (!u->mixer_path && u->mixer_path_set)
1673 u->mixer_path = u->mixer_path_set->paths;
1674
1675 if (u->mixer_path) {
1676 /* Hmm, we have only a single path, then let's activate it */
1677
1678 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1679
1680 if (u->mixer_path->settings)
1681 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1682 } else
1683 return 0;
1684 }
1685
1686 /* FIXME: need automatic detection rather than hard-coded path */
1687 if (!strcmp(u->mixer_path->name, "iec958-passthrough-output")) {
1688 u->sink->flags |= PA_SINK_PASSTHROUGH;
1689 } else {
1690 u->sink->flags &= ~PA_SINK_PASSTHROUGH;
1691 }
1692
1693 if (!u->mixer_path->has_volume)
1694 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1695 else {
1696
1697 if (u->mixer_path->has_dB) {
1698 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1699
1700 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1701 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1702
1703 if (u->mixer_path->max_dB > 0.0)
1704 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1705 else
1706 pa_log_info("No particular base volume set, fixing to 0 dB");
1707
1708 } else {
1709 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1710 u->sink->base_volume = PA_VOLUME_NORM;
1711 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1712 }
1713
1714 u->sink->get_volume = sink_get_volume_cb;
1715 u->sink->set_volume = sink_set_volume_cb;
1716 u->sink->write_volume = sink_write_volume_cb;
1717
1718 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL;
1719 if (u->mixer_path->has_dB) {
1720 u->sink->flags |= PA_SINK_DECIBEL_VOLUME;
1721 if (sync_volume) {
1722 u->sink->flags |= PA_SINK_SYNC_VOLUME;
1723 pa_log_info("Successfully enabled synchronous volume.");
1724 }
1725 }
1726
1727 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1728 }
1729
1730 if (!u->mixer_path->has_mute) {
1731 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1732 } else {
1733 u->sink->get_mute = sink_get_mute_cb;
1734 u->sink->set_mute = sink_set_mute_cb;
1735 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1736 pa_log_info("Using hardware mute control.");
1737 }
1738
1739 if (sync_volume) {
1740 u->mixer_pd = pa_alsa_mixer_pdata_new();
1741 mixer_callback = io_mixer_callback;
1742
1743 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1744 pa_log("Failed to initialize file descriptor monitoring");
1745 return -1;
1746 }
1747
1748 } else {
1749 u->mixer_fdl = pa_alsa_fdlist_new();
1750 mixer_callback = ctl_mixer_callback;
1751
1752 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1753 pa_log("Failed to initialize file descriptor monitoring");
1754 return -1;
1755 }
1756 }
1757
1758 if (u->mixer_path_set)
1759 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1760 else
1761 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1762
1763 return 0;
1764 }
1765
1766 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1767
1768 struct userdata *u = NULL;
1769 const char *dev_id = NULL;
1770 pa_sample_spec ss, requested_ss;
1771 pa_channel_map map;
1772 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1773 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1774 size_t frame_size;
1775 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE;
1776 pa_sink_new_data data;
1777 pa_alsa_profile_set *profile_set = NULL;
1778
1779 pa_assert(m);
1780 pa_assert(ma);
1781
1782 ss = m->core->default_sample_spec;
1783 map = m->core->default_channel_map;
1784 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1785 pa_log("Failed to parse sample specification and channel map");
1786 goto fail;
1787 }
1788
1789 requested_ss = ss;
1790 frame_size = pa_frame_size(&ss);
1791
1792 nfrags = m->core->default_n_fragments;
1793 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1794 if (frag_size <= 0)
1795 frag_size = (uint32_t) frame_size;
1796 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1797 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1798
1799 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1800 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1801 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1802 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1803 pa_log("Failed to parse buffer metrics");
1804 goto fail;
1805 }
1806
1807 buffer_size = nfrags * frag_size;
1808
1809 period_frames = frag_size/frame_size;
1810 buffer_frames = buffer_size/frame_size;
1811 tsched_frames = tsched_size/frame_size;
1812
1813 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1814 pa_log("Failed to parse mmap argument.");
1815 goto fail;
1816 }
1817
1818 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1819 pa_log("Failed to parse tsched argument.");
1820 goto fail;
1821 }
1822
1823 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1824 pa_log("Failed to parse ignore_dB argument.");
1825 goto fail;
1826 }
1827
1828 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
1829 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1830 pa_log("Failed to parse rewind_safeguard argument");
1831 goto fail;
1832 }
1833
1834 if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1835 pa_log("Failed to parse sync_volume argument.");
1836 goto fail;
1837 }
1838
1839 use_tsched = pa_alsa_may_tsched(use_tsched);
1840
1841 u = pa_xnew0(struct userdata, 1);
1842 u->core = m->core;
1843 u->module = m;
1844 u->use_mmap = use_mmap;
1845 u->use_tsched = use_tsched;
1846 u->first = TRUE;
1847 u->rewind_safeguard = rewind_safeguard;
1848 u->rtpoll = pa_rtpoll_new();
1849 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1850
1851 u->smoother = pa_smoother_new(
1852 SMOOTHER_ADJUST_USEC,
1853 SMOOTHER_WINDOW_USEC,
1854 TRUE,
1855 TRUE,
1856 5,
1857 pa_rtclock_now(),
1858 TRUE);
1859 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1860
1861 dev_id = pa_modargs_get_value(
1862 ma, "device_id",
1863 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1864
1865 if (reserve_init(u, dev_id) < 0)
1866 goto fail;
1867
1868 if (reserve_monitor_init(u, dev_id) < 0)
1869 goto fail;
1870
1871 b = use_mmap;
1872 d = use_tsched;
1873
1874 if (mapping) {
1875
1876 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1877 pa_log("device_id= not set");
1878 goto fail;
1879 }
1880
1881 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1882 dev_id,
1883 &u->device_name,
1884 &ss, &map,
1885 SND_PCM_STREAM_PLAYBACK,
1886 &period_frames, &buffer_frames, tsched_frames,
1887 &b, &d, mapping)))
1888
1889 goto fail;
1890
1891 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1892
1893 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1894 goto fail;
1895
1896 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1897 dev_id,
1898 &u->device_name,
1899 &ss, &map,
1900 SND_PCM_STREAM_PLAYBACK,
1901 &period_frames, &buffer_frames, tsched_frames,
1902 &b, &d, profile_set, &mapping)))
1903
1904 goto fail;
1905
1906 } else {
1907
1908 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1909 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1910 &u->device_name,
1911 &ss, &map,
1912 SND_PCM_STREAM_PLAYBACK,
1913 &period_frames, &buffer_frames, tsched_frames,
1914 &b, &d, FALSE)))
1915 goto fail;
1916 }
1917
1918 pa_assert(u->device_name);
1919 pa_log_info("Successfully opened device %s.", u->device_name);
1920
1921 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1922 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1923 goto fail;
1924 }
1925
1926 if (mapping)
1927 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1928
1929 if (use_mmap && !b) {
1930 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1931 u->use_mmap = use_mmap = FALSE;
1932 }
1933
1934 if (use_tsched && (!b || !d)) {
1935 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1936 u->use_tsched = use_tsched = FALSE;
1937 }
1938
1939 if (u->use_mmap)
1940 pa_log_info("Successfully enabled mmap() mode.");
1941
1942 if (u->use_tsched)
1943 pa_log_info("Successfully enabled timer-based scheduling mode.");
1944
1945 /* ALSA might tweak the sample spec, so recalculate the frame size */
1946 frame_size = pa_frame_size(&ss);
1947
1948 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1949
1950 pa_sink_new_data_init(&data);
1951 data.driver = driver;
1952 data.module = m;
1953 data.card = card;
1954 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1955
1956 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1957 * variable instead of using &data.namereg_fail directly, because
1958 * data.namereg_fail is a bitfield and taking the address of a bitfield
1959 * variable is impossible. */
1960 namereg_fail = data.namereg_fail;
1961 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1962 pa_log("Failed to parse boolean argument namereg_fail.");
1963 pa_sink_new_data_done(&data);
1964 goto fail;
1965 }
1966 data.namereg_fail = namereg_fail;
1967
1968 pa_sink_new_data_set_sample_spec(&data, &ss);
1969 pa_sink_new_data_set_channel_map(&data, &map);
1970
1971 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1972 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1973 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1974 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1975 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1976
1977 if (mapping) {
1978 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1979 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1980 }
1981
1982 pa_alsa_init_description(data.proplist);
1983
1984 if (u->control_device)
1985 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1986
1987 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1988 pa_log("Invalid properties");
1989 pa_sink_new_data_done(&data);
1990 goto fail;
1991 }
1992
1993 if (u->mixer_path_set)
1994 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1995
1996 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1997 pa_sink_new_data_done(&data);
1998
1999 if (!u->sink) {
2000 pa_log("Failed to create sink object");
2001 goto fail;
2002 }
2003
2004 if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
2005 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2006 pa_log("Failed to parse sync_volume_safety_margin parameter");
2007 goto fail;
2008 }
2009
2010 if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
2011 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2012 pa_log("Failed to parse sync_volume_extra_delay parameter");
2013 goto fail;
2014 }
2015
2016 u->sink->parent.process_msg = sink_process_msg;
2017 if (u->use_tsched)
2018 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2019 u->sink->set_state = sink_set_state_cb;
2020 u->sink->set_port = sink_set_port_cb;
2021 u->sink->userdata = u;
2022
2023 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2024 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2025
2026 u->frame_size = frame_size;
2027 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2028 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2029 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2030
2031 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2032 (double) u->hwbuf_size / (double) u->fragment_size,
2033 (long unsigned) u->fragment_size,
2034 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2035 (long unsigned) u->hwbuf_size,
2036 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2037
2038 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2039 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2040 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2041 else {
2042 pa_log_info("Disabling rewind for device %s", u->device_name);
2043 pa_sink_set_max_rewind(u->sink, 0);
2044 }
2045
2046 if (u->use_tsched) {
2047 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
2048
2049 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
2050 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
2051
2052 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
2053 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
2054
2055 fix_min_sleep_wakeup(u);
2056 fix_tsched_watermark(u);
2057
2058 pa_sink_set_latency_range(u->sink,
2059 0,
2060 pa_bytes_to_usec(u->hwbuf_size, &ss));
2061
2062 pa_log_info("Time scheduling watermark is %0.2fms",
2063 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
2064 } else
2065 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2066
2067 reserve_update(u);
2068
2069 if (update_sw_params(u) < 0)
2070 goto fail;
2071
2072 if (setup_mixer(u, ignore_dB, sync_volume) < 0)
2073 goto fail;
2074
2075 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2076
2077 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2078 pa_log("Failed to create thread.");
2079 goto fail;
2080 }
2081
2082 /* Get initial mixer settings */
2083 if (data.volume_is_set) {
2084 if (u->sink->set_volume)
2085 u->sink->set_volume(u->sink);
2086 } else {
2087 if (u->sink->get_volume)
2088 u->sink->get_volume(u->sink);
2089 }
2090
2091 if (data.muted_is_set) {
2092 if (u->sink->set_mute)
2093 u->sink->set_mute(u->sink);
2094 } else {
2095 if (u->sink->get_mute)
2096 u->sink->get_mute(u->sink);
2097 }
2098
2099 pa_sink_put(u->sink);
2100
2101 if (profile_set)
2102 pa_alsa_profile_set_free(profile_set);
2103
2104 return u->sink;
2105
2106 fail:
2107
2108 if (u)
2109 userdata_free(u);
2110
2111 if (profile_set)
2112 pa_alsa_profile_set_free(profile_set);
2113
2114 return NULL;
2115 }
2116
2117 static void userdata_free(struct userdata *u) {
2118 pa_assert(u);
2119
2120 if (u->sink)
2121 pa_sink_unlink(u->sink);
2122
2123 if (u->thread) {
2124 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2125 pa_thread_free(u->thread);
2126 }
2127
2128 pa_thread_mq_done(&u->thread_mq);
2129
2130 if (u->sink)
2131 pa_sink_unref(u->sink);
2132
2133 if (u->memchunk.memblock)
2134 pa_memblock_unref(u->memchunk.memblock);
2135
2136 if (u->mixer_pd)
2137 pa_alsa_mixer_pdata_free(u->mixer_pd);
2138
2139 if (u->alsa_rtpoll_item)
2140 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2141
2142 if (u->rtpoll)
2143 pa_rtpoll_free(u->rtpoll);
2144
2145 if (u->pcm_handle) {
2146 snd_pcm_drop(u->pcm_handle);
2147 snd_pcm_close(u->pcm_handle);
2148 }
2149
2150 if (u->mixer_fdl)
2151 pa_alsa_fdlist_free(u->mixer_fdl);
2152
2153 if (u->mixer_path_set)
2154 pa_alsa_path_set_free(u->mixer_path_set);
2155 else if (u->mixer_path)
2156 pa_alsa_path_free(u->mixer_path);
2157
2158 if (u->mixer_handle)
2159 snd_mixer_close(u->mixer_handle);
2160
2161 if (u->smoother)
2162 pa_smoother_free(u->smoother);
2163
2164 reserve_done(u);
2165 monitor_done(u);
2166
2167 pa_xfree(u->device_name);
2168 pa_xfree(u->control_device);
2169 pa_xfree(u);
2170 }
2171
2172 void pa_alsa_sink_free(pa_sink *s) {
2173 struct userdata *u;
2174
2175 pa_sink_assert_ref(s);
2176 pa_assert_se(u = s->userdata);
2177
2178 userdata_free(u);
2179 }