]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: Set the rewind safeguard proportionally to sample spec
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 pa_alsa_fdlist *mixer_fdl;
104 snd_mixer_t *mixer_handle;
105 pa_alsa_path_set *mixer_path_set;
106 pa_alsa_path *mixer_path;
107
108 pa_cvolume hardware_volume;
109
110 size_t
111 frame_size,
112 fragment_size,
113 hwbuf_size,
114 tsched_watermark,
115 hwbuf_unused,
116 min_sleep,
117 min_wakeup,
118 watermark_inc_step,
119 watermark_dec_step,
120 watermark_inc_threshold,
121 watermark_dec_threshold,
122 rewind_safeguard;
123
124 pa_usec_t watermark_dec_not_before;
125
126 pa_memchunk memchunk;
127
128 char *device_name; /* name of the PCM device */
129 char *control_device; /* name of the control device */
130
131 pa_bool_t use_mmap:1, use_tsched:1;
132
133 pa_bool_t first, after_rewind;
134
135 pa_rtpoll_item *alsa_rtpoll_item;
136
137 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
138
139 pa_smoother *smoother;
140 uint64_t write_count;
141 uint64_t since_start;
142 pa_usec_t smoother_interval;
143 pa_usec_t last_smoother_update;
144
145 pa_reserve_wrapper *reserve;
146 pa_hook_slot *reserve_slot;
147 pa_reserve_monitor_wrapper *monitor;
148 pa_hook_slot *monitor_slot;
149 };
150
151 static void userdata_free(struct userdata *u);
152
153 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
154 pa_assert(r);
155 pa_assert(u);
156
157 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
158 return PA_HOOK_CANCEL;
159
160 return PA_HOOK_OK;
161 }
162
163 static void reserve_done(struct userdata *u) {
164 pa_assert(u);
165
166 if (u->reserve_slot) {
167 pa_hook_slot_free(u->reserve_slot);
168 u->reserve_slot = NULL;
169 }
170
171 if (u->reserve) {
172 pa_reserve_wrapper_unref(u->reserve);
173 u->reserve = NULL;
174 }
175 }
176
177 static void reserve_update(struct userdata *u) {
178 const char *description;
179 pa_assert(u);
180
181 if (!u->sink || !u->reserve)
182 return;
183
184 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
185 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
186 }
187
188 static int reserve_init(struct userdata *u, const char *dname) {
189 char *rname;
190
191 pa_assert(u);
192 pa_assert(dname);
193
194 if (u->reserve)
195 return 0;
196
197 if (pa_in_system_mode())
198 return 0;
199
200 if (!(rname = pa_alsa_get_reserve_name(dname)))
201 return 0;
202
203 /* We are resuming, try to lock the device */
204 u->reserve = pa_reserve_wrapper_get(u->core, rname);
205 pa_xfree(rname);
206
207 if (!(u->reserve))
208 return -1;
209
210 reserve_update(u);
211
212 pa_assert(!u->reserve_slot);
213 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
214
215 return 0;
216 }
217
218 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
219 pa_bool_t b;
220
221 pa_assert(w);
222 pa_assert(u);
223
224 b = PA_PTR_TO_UINT(busy) && !u->reserve;
225
226 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
227 return PA_HOOK_OK;
228 }
229
230 static void monitor_done(struct userdata *u) {
231 pa_assert(u);
232
233 if (u->monitor_slot) {
234 pa_hook_slot_free(u->monitor_slot);
235 u->monitor_slot = NULL;
236 }
237
238 if (u->monitor) {
239 pa_reserve_monitor_wrapper_unref(u->monitor);
240 u->monitor = NULL;
241 }
242 }
243
244 static int reserve_monitor_init(struct userdata *u, const char *dname) {
245 char *rname;
246
247 pa_assert(u);
248 pa_assert(dname);
249
250 if (pa_in_system_mode())
251 return 0;
252
253 if (!(rname = pa_alsa_get_reserve_name(dname)))
254 return 0;
255
256 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
257 pa_xfree(rname);
258
259 if (!(u->monitor))
260 return -1;
261
262 pa_assert(!u->monitor_slot);
263 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
264
265 return 0;
266 }
267
268 static void fix_min_sleep_wakeup(struct userdata *u) {
269 size_t max_use, max_use_2;
270
271 pa_assert(u);
272 pa_assert(u->use_tsched);
273
274 max_use = u->hwbuf_size - u->hwbuf_unused;
275 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
276
277 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
278 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
279
280 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
281 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
282 }
283
284 static void fix_tsched_watermark(struct userdata *u) {
285 size_t max_use;
286 pa_assert(u);
287 pa_assert(u->use_tsched);
288
289 max_use = u->hwbuf_size - u->hwbuf_unused;
290
291 if (u->tsched_watermark > max_use - u->min_sleep)
292 u->tsched_watermark = max_use - u->min_sleep;
293
294 if (u->tsched_watermark < u->min_wakeup)
295 u->tsched_watermark = u->min_wakeup;
296 }
297
298 static void increase_watermark(struct userdata *u) {
299 size_t old_watermark;
300 pa_usec_t old_min_latency, new_min_latency;
301
302 pa_assert(u);
303 pa_assert(u->use_tsched);
304
305 /* First, just try to increase the watermark */
306 old_watermark = u->tsched_watermark;
307 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
308 fix_tsched_watermark(u);
309
310 if (old_watermark != u->tsched_watermark) {
311 pa_log_info("Increasing wakeup watermark to %0.2f ms",
312 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
313 return;
314 }
315
316 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
317 old_min_latency = u->sink->thread_info.min_latency;
318 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
319 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
320
321 if (old_min_latency != new_min_latency) {
322 pa_log_info("Increasing minimal latency to %0.2f ms",
323 (double) new_min_latency / PA_USEC_PER_MSEC);
324
325 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
326 }
327
328 /* When we reach this we're officialy fucked! */
329 }
330
331 static void decrease_watermark(struct userdata *u) {
332 size_t old_watermark;
333 pa_usec_t now;
334
335 pa_assert(u);
336 pa_assert(u->use_tsched);
337
338 now = pa_rtclock_now();
339
340 if (u->watermark_dec_not_before <= 0)
341 goto restart;
342
343 if (u->watermark_dec_not_before > now)
344 return;
345
346 old_watermark = u->tsched_watermark;
347
348 if (u->tsched_watermark < u->watermark_dec_step)
349 u->tsched_watermark = u->tsched_watermark / 2;
350 else
351 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
352
353 fix_tsched_watermark(u);
354
355 if (old_watermark != u->tsched_watermark)
356 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
357 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
358
359 /* We don't change the latency range*/
360
361 restart:
362 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
363 }
364
365 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
366 pa_usec_t usec, wm;
367
368 pa_assert(sleep_usec);
369 pa_assert(process_usec);
370
371 pa_assert(u);
372 pa_assert(u->use_tsched);
373
374 usec = pa_sink_get_requested_latency_within_thread(u->sink);
375
376 if (usec == (pa_usec_t) -1)
377 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
378
379 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
380
381 if (wm > usec)
382 wm = usec/2;
383
384 *sleep_usec = usec - wm;
385 *process_usec = wm;
386
387 #ifdef DEBUG_TIMING
388 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
389 (unsigned long) (usec / PA_USEC_PER_MSEC),
390 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
391 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
392 #endif
393 }
394
395 static int try_recover(struct userdata *u, const char *call, int err) {
396 pa_assert(u);
397 pa_assert(call);
398 pa_assert(err < 0);
399
400 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
401
402 pa_assert(err != -EAGAIN);
403
404 if (err == -EPIPE)
405 pa_log_debug("%s: Buffer underrun!", call);
406
407 if (err == -ESTRPIPE)
408 pa_log_debug("%s: System suspended!", call);
409
410 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
411 pa_log("%s: %s", call, pa_alsa_strerror(err));
412 return -1;
413 }
414
415 u->first = TRUE;
416 u->since_start = 0;
417 return 0;
418 }
419
420 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
421 size_t left_to_play;
422 pa_bool_t underrun = FALSE;
423
424 /* We use <= instead of < for this check here because an underrun
425 * only happens after the last sample was processed, not already when
426 * it is removed from the buffer. This is particularly important
427 * when block transfer is used. */
428
429 if (n_bytes <= u->hwbuf_size)
430 left_to_play = u->hwbuf_size - n_bytes;
431 else {
432
433 /* We got a dropout. What a mess! */
434 left_to_play = 0;
435 underrun = TRUE;
436
437 #ifdef DEBUG_TIMING
438 PA_DEBUG_TRAP;
439 #endif
440
441 if (!u->first && !u->after_rewind)
442 if (pa_log_ratelimit())
443 pa_log_info("Underrun!");
444 }
445
446 #ifdef DEBUG_TIMING
447 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
448 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
449 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
450 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
451 #endif
452
453 if (u->use_tsched) {
454 pa_bool_t reset_not_before = TRUE;
455
456 if (!u->first && !u->after_rewind) {
457 if (underrun || left_to_play < u->watermark_inc_threshold)
458 increase_watermark(u);
459 else if (left_to_play > u->watermark_dec_threshold) {
460 reset_not_before = FALSE;
461
462 /* We decrease the watermark only if have actually
463 * been woken up by a timeout. If something else woke
464 * us up it's too easy to fulfill the deadlines... */
465
466 if (on_timeout)
467 decrease_watermark(u);
468 }
469 }
470
471 if (reset_not_before)
472 u->watermark_dec_not_before = 0;
473 }
474
475 return left_to_play;
476 }
477
478 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
479 pa_bool_t work_done = TRUE;
480 pa_usec_t max_sleep_usec = 0, process_usec = 0;
481 size_t left_to_play;
482 unsigned j = 0;
483
484 pa_assert(u);
485 pa_sink_assert_ref(u->sink);
486
487 if (u->use_tsched)
488 hw_sleep_time(u, &max_sleep_usec, &process_usec);
489
490 for (;;) {
491 snd_pcm_sframes_t n;
492 size_t n_bytes;
493 int r;
494 pa_bool_t after_avail = TRUE;
495
496 /* First we determine how many samples are missing to fill the
497 * buffer up to 100% */
498
499 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
500
501 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
502 continue;
503
504 return r;
505 }
506
507 n_bytes = (size_t) n * u->frame_size;
508
509 #ifdef DEBUG_TIMING
510 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
511 #endif
512
513 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
514 on_timeout = FALSE;
515
516 if (u->use_tsched)
517
518 /* We won't fill up the playback buffer before at least
519 * half the sleep time is over because otherwise we might
520 * ask for more data from the clients then they expect. We
521 * need to guarantee that clients only have to keep around
522 * a single hw buffer length. */
523
524 if (!polled &&
525 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
526 #ifdef DEBUG_TIMING
527 pa_log_debug("Not filling up, because too early.");
528 #endif
529 break;
530 }
531
532 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
533
534 if (polled)
535 PA_ONCE_BEGIN {
536 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
537 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
538 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
539 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
540 pa_strnull(dn));
541 pa_xfree(dn);
542 } PA_ONCE_END;
543
544 #ifdef DEBUG_TIMING
545 pa_log_debug("Not filling up, because not necessary.");
546 #endif
547 break;
548 }
549
550
551 if (++j > 10) {
552 #ifdef DEBUG_TIMING
553 pa_log_debug("Not filling up, because already too many iterations.");
554 #endif
555
556 break;
557 }
558
559 n_bytes -= u->hwbuf_unused;
560 polled = FALSE;
561
562 #ifdef DEBUG_TIMING
563 pa_log_debug("Filling up");
564 #endif
565
566 for (;;) {
567 pa_memchunk chunk;
568 void *p;
569 int err;
570 const snd_pcm_channel_area_t *areas;
571 snd_pcm_uframes_t offset, frames;
572 snd_pcm_sframes_t sframes;
573
574 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
575 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
576
577 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
578
579 if (!after_avail && err == -EAGAIN)
580 break;
581
582 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
583 continue;
584
585 return r;
586 }
587
588 /* Make sure that if these memblocks need to be copied they will fit into one slot */
589 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
590 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
591
592 if (!after_avail && frames == 0)
593 break;
594
595 pa_assert(frames > 0);
596 after_avail = FALSE;
597
598 /* Check these are multiples of 8 bit */
599 pa_assert((areas[0].first & 7) == 0);
600 pa_assert((areas[0].step & 7)== 0);
601
602 /* We assume a single interleaved memory buffer */
603 pa_assert((areas[0].first >> 3) == 0);
604 pa_assert((areas[0].step >> 3) == u->frame_size);
605
606 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
607
608 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
609 chunk.length = pa_memblock_get_length(chunk.memblock);
610 chunk.index = 0;
611
612 pa_sink_render_into_full(u->sink, &chunk);
613 pa_memblock_unref_fixed(chunk.memblock);
614
615 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
616
617 if (!after_avail && (int) sframes == -EAGAIN)
618 break;
619
620 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
621 continue;
622
623 return r;
624 }
625
626 work_done = TRUE;
627
628 u->write_count += frames * u->frame_size;
629 u->since_start += frames * u->frame_size;
630
631 #ifdef DEBUG_TIMING
632 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
633 #endif
634
635 if ((size_t) frames * u->frame_size >= n_bytes)
636 break;
637
638 n_bytes -= (size_t) frames * u->frame_size;
639 }
640 }
641
642 if (u->use_tsched) {
643 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
644
645 if (*sleep_usec > process_usec)
646 *sleep_usec -= process_usec;
647 else
648 *sleep_usec = 0;
649 } else
650 *sleep_usec = 0;
651
652 return work_done ? 1 : 0;
653 }
654
655 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
656 pa_bool_t work_done = FALSE;
657 pa_usec_t max_sleep_usec = 0, process_usec = 0;
658 size_t left_to_play;
659 unsigned j = 0;
660
661 pa_assert(u);
662 pa_sink_assert_ref(u->sink);
663
664 if (u->use_tsched)
665 hw_sleep_time(u, &max_sleep_usec, &process_usec);
666
667 for (;;) {
668 snd_pcm_sframes_t n;
669 size_t n_bytes;
670 int r;
671 pa_bool_t after_avail = TRUE;
672
673 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
674
675 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
676 continue;
677
678 return r;
679 }
680
681 n_bytes = (size_t) n * u->frame_size;
682 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
683 on_timeout = FALSE;
684
685 if (u->use_tsched)
686
687 /* We won't fill up the playback buffer before at least
688 * half the sleep time is over because otherwise we might
689 * ask for more data from the clients then they expect. We
690 * need to guarantee that clients only have to keep around
691 * a single hw buffer length. */
692
693 if (!polled &&
694 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
695 break;
696
697 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
698
699 if (polled)
700 PA_ONCE_BEGIN {
701 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
702 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
703 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
704 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
705 pa_strnull(dn));
706 pa_xfree(dn);
707 } PA_ONCE_END;
708
709 break;
710 }
711
712 if (++j > 10) {
713 #ifdef DEBUG_TIMING
714 pa_log_debug("Not filling up, because already too many iterations.");
715 #endif
716
717 break;
718 }
719
720 n_bytes -= u->hwbuf_unused;
721 polled = FALSE;
722
723 for (;;) {
724 snd_pcm_sframes_t frames;
725 void *p;
726
727 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
728
729 if (u->memchunk.length <= 0)
730 pa_sink_render(u->sink, n_bytes, &u->memchunk);
731
732 pa_assert(u->memchunk.length > 0);
733
734 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
735
736 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
737 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
738
739 p = pa_memblock_acquire(u->memchunk.memblock);
740 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
741 pa_memblock_release(u->memchunk.memblock);
742
743 if (PA_UNLIKELY(frames < 0)) {
744
745 if (!after_avail && (int) frames == -EAGAIN)
746 break;
747
748 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
749 continue;
750
751 return r;
752 }
753
754 if (!after_avail && frames == 0)
755 break;
756
757 pa_assert(frames > 0);
758 after_avail = FALSE;
759
760 u->memchunk.index += (size_t) frames * u->frame_size;
761 u->memchunk.length -= (size_t) frames * u->frame_size;
762
763 if (u->memchunk.length <= 0) {
764 pa_memblock_unref(u->memchunk.memblock);
765 pa_memchunk_reset(&u->memchunk);
766 }
767
768 work_done = TRUE;
769
770 u->write_count += frames * u->frame_size;
771 u->since_start += frames * u->frame_size;
772
773 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
774
775 if ((size_t) frames * u->frame_size >= n_bytes)
776 break;
777
778 n_bytes -= (size_t) frames * u->frame_size;
779 }
780 }
781
782 if (u->use_tsched) {
783 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
784
785 if (*sleep_usec > process_usec)
786 *sleep_usec -= process_usec;
787 else
788 *sleep_usec = 0;
789 } else
790 *sleep_usec = 0;
791
792 return work_done ? 1 : 0;
793 }
794
795 static void update_smoother(struct userdata *u) {
796 snd_pcm_sframes_t delay = 0;
797 int64_t position;
798 int err;
799 pa_usec_t now1 = 0, now2;
800 snd_pcm_status_t *status;
801
802 snd_pcm_status_alloca(&status);
803
804 pa_assert(u);
805 pa_assert(u->pcm_handle);
806
807 /* Let's update the time smoother */
808
809 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
810 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
811 return;
812 }
813
814 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
815 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
816 else {
817 snd_htimestamp_t htstamp = { 0, 0 };
818 snd_pcm_status_get_htstamp(status, &htstamp);
819 now1 = pa_timespec_load(&htstamp);
820 }
821
822 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
823 if (now1 <= 0)
824 now1 = pa_rtclock_now();
825
826 /* check if the time since the last update is bigger than the interval */
827 if (u->last_smoother_update > 0)
828 if (u->last_smoother_update + u->smoother_interval > now1)
829 return;
830
831 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
832
833 if (PA_UNLIKELY(position < 0))
834 position = 0;
835
836 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
837
838 pa_smoother_put(u->smoother, now1, now2);
839
840 u->last_smoother_update = now1;
841 /* exponentially increase the update interval up to the MAX limit */
842 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
843 }
844
845 static pa_usec_t sink_get_latency(struct userdata *u) {
846 pa_usec_t r;
847 int64_t delay;
848 pa_usec_t now1, now2;
849
850 pa_assert(u);
851
852 now1 = pa_rtclock_now();
853 now2 = pa_smoother_get(u->smoother, now1);
854
855 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
856
857 r = delay >= 0 ? (pa_usec_t) delay : 0;
858
859 if (u->memchunk.memblock)
860 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
861
862 return r;
863 }
864
865 static int build_pollfd(struct userdata *u) {
866 pa_assert(u);
867 pa_assert(u->pcm_handle);
868
869 if (u->alsa_rtpoll_item)
870 pa_rtpoll_item_free(u->alsa_rtpoll_item);
871
872 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
873 return -1;
874
875 return 0;
876 }
877
878 /* Called from IO context */
879 static int suspend(struct userdata *u) {
880 pa_assert(u);
881 pa_assert(u->pcm_handle);
882
883 pa_smoother_pause(u->smoother, pa_rtclock_now());
884
885 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
886 * take awfully long with our long buffer sizes today. */
887 snd_pcm_close(u->pcm_handle);
888 u->pcm_handle = NULL;
889
890 if (u->alsa_rtpoll_item) {
891 pa_rtpoll_item_free(u->alsa_rtpoll_item);
892 u->alsa_rtpoll_item = NULL;
893 }
894
895 /* We reset max_rewind/max_request here to make sure that while we
896 * are suspended the old max_request/max_rewind values set before
897 * the suspend can influence the per-stream buffer of newly
898 * created streams, without their requirements having any
899 * influence on them. */
900 pa_sink_set_max_rewind_within_thread(u->sink, 0);
901 pa_sink_set_max_request_within_thread(u->sink, 0);
902
903 pa_log_info("Device suspended...");
904
905 return 0;
906 }
907
908 /* Called from IO context */
909 static int update_sw_params(struct userdata *u) {
910 snd_pcm_uframes_t avail_min;
911 int err;
912
913 pa_assert(u);
914
915 /* Use the full buffer if noone asked us for anything specific */
916 u->hwbuf_unused = 0;
917
918 if (u->use_tsched) {
919 pa_usec_t latency;
920
921 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
922 size_t b;
923
924 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
925
926 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
927
928 /* We need at least one sample in our buffer */
929
930 if (PA_UNLIKELY(b < u->frame_size))
931 b = u->frame_size;
932
933 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
934 }
935
936 fix_min_sleep_wakeup(u);
937 fix_tsched_watermark(u);
938 }
939
940 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
941
942 /* We need at last one frame in the used part of the buffer */
943 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
944
945 if (u->use_tsched) {
946 pa_usec_t sleep_usec, process_usec;
947
948 hw_sleep_time(u, &sleep_usec, &process_usec);
949 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
950 }
951
952 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
953
954 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
955 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
956 return err;
957 }
958
959 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
960 if (pa_alsa_pcm_is_hw(u->pcm_handle))
961 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
962 else {
963 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
964 pa_sink_set_max_rewind_within_thread(u->sink, 0);
965 }
966
967 return 0;
968 }
969
970 /* Called from IO context */
971 static int unsuspend(struct userdata *u) {
972 pa_sample_spec ss;
973 int err;
974 pa_bool_t b, d;
975 snd_pcm_uframes_t period_size, buffer_size;
976
977 pa_assert(u);
978 pa_assert(!u->pcm_handle);
979
980 pa_log_info("Trying resume...");
981
982 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
983 SND_PCM_NONBLOCK|
984 SND_PCM_NO_AUTO_RESAMPLE|
985 SND_PCM_NO_AUTO_CHANNELS|
986 SND_PCM_NO_AUTO_FORMAT)) < 0) {
987 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
988 goto fail;
989 }
990
991 ss = u->sink->sample_spec;
992 period_size = u->fragment_size / u->frame_size;
993 buffer_size = u->hwbuf_size / u->frame_size;
994 b = u->use_mmap;
995 d = u->use_tsched;
996
997 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
998 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
999 goto fail;
1000 }
1001
1002 if (b != u->use_mmap || d != u->use_tsched) {
1003 pa_log_warn("Resume failed, couldn't get original access mode.");
1004 goto fail;
1005 }
1006
1007 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1008 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1009 goto fail;
1010 }
1011
1012 if (period_size*u->frame_size != u->fragment_size ||
1013 buffer_size*u->frame_size != u->hwbuf_size) {
1014 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1015 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1016 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1017 goto fail;
1018 }
1019
1020 if (update_sw_params(u) < 0)
1021 goto fail;
1022
1023 if (build_pollfd(u) < 0)
1024 goto fail;
1025
1026 u->write_count = 0;
1027 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1028 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1029 u->last_smoother_update = 0;
1030
1031 u->first = TRUE;
1032 u->since_start = 0;
1033
1034 pa_log_info("Resumed successfully...");
1035
1036 return 0;
1037
1038 fail:
1039 if (u->pcm_handle) {
1040 snd_pcm_close(u->pcm_handle);
1041 u->pcm_handle = NULL;
1042 }
1043
1044 return -PA_ERR_IO;
1045 }
1046
1047 /* Called from IO context */
1048 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1049 struct userdata *u = PA_SINK(o)->userdata;
1050
1051 switch (code) {
1052
1053 case PA_SINK_MESSAGE_GET_LATENCY: {
1054 pa_usec_t r = 0;
1055
1056 if (u->pcm_handle)
1057 r = sink_get_latency(u);
1058
1059 *((pa_usec_t*) data) = r;
1060
1061 return 0;
1062 }
1063
1064 case PA_SINK_MESSAGE_SET_STATE:
1065
1066 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1067
1068 case PA_SINK_SUSPENDED: {
1069 int r;
1070
1071 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1072
1073 if ((r = suspend(u)) < 0)
1074 return r;
1075
1076 break;
1077 }
1078
1079 case PA_SINK_IDLE:
1080 case PA_SINK_RUNNING: {
1081 int r;
1082
1083 if (u->sink->thread_info.state == PA_SINK_INIT) {
1084 if (build_pollfd(u) < 0)
1085 return -PA_ERR_IO;
1086 }
1087
1088 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1089 if ((r = unsuspend(u)) < 0)
1090 return r;
1091 }
1092
1093 break;
1094 }
1095
1096 case PA_SINK_UNLINKED:
1097 case PA_SINK_INIT:
1098 case PA_SINK_INVALID_STATE:
1099 ;
1100 }
1101
1102 break;
1103 }
1104
1105 return pa_sink_process_msg(o, code, data, offset, chunk);
1106 }
1107
1108 /* Called from main context */
1109 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1110 pa_sink_state_t old_state;
1111 struct userdata *u;
1112
1113 pa_sink_assert_ref(s);
1114 pa_assert_se(u = s->userdata);
1115
1116 old_state = pa_sink_get_state(u->sink);
1117
1118 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1119 reserve_done(u);
1120 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1121 if (reserve_init(u, u->device_name) < 0)
1122 return -PA_ERR_BUSY;
1123
1124 return 0;
1125 }
1126
1127 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1128 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1129
1130 pa_assert(u);
1131 pa_assert(u->mixer_handle);
1132
1133 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1134 return 0;
1135
1136 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1137 return 0;
1138
1139 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1140 pa_sink_get_volume(u->sink, TRUE);
1141 pa_sink_get_mute(u->sink, TRUE);
1142 }
1143
1144 return 0;
1145 }
1146
1147 static void sink_get_volume_cb(pa_sink *s) {
1148 struct userdata *u = s->userdata;
1149 pa_cvolume r;
1150 char t[PA_CVOLUME_SNPRINT_MAX];
1151
1152 pa_assert(u);
1153 pa_assert(u->mixer_path);
1154 pa_assert(u->mixer_handle);
1155
1156 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1157 return;
1158
1159 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1160 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1161
1162 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1163
1164 if (pa_cvolume_equal(&u->hardware_volume, &r))
1165 return;
1166
1167 s->real_volume = u->hardware_volume = r;
1168
1169 /* Hmm, so the hardware volume changed, let's reset our software volume */
1170 if (u->mixer_path->has_dB)
1171 pa_sink_set_soft_volume(s, NULL);
1172 }
1173
1174 static void sink_set_volume_cb(pa_sink *s) {
1175 struct userdata *u = s->userdata;
1176 pa_cvolume r;
1177 char t[PA_CVOLUME_SNPRINT_MAX];
1178
1179 pa_assert(u);
1180 pa_assert(u->mixer_path);
1181 pa_assert(u->mixer_handle);
1182
1183 /* Shift up by the base volume */
1184 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1185
1186 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1187 return;
1188
1189 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1190 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1191
1192 u->hardware_volume = r;
1193
1194 if (u->mixer_path->has_dB) {
1195 pa_cvolume new_soft_volume;
1196 pa_bool_t accurate_enough;
1197
1198 /* Match exactly what the user requested by software */
1199 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1200
1201 /* If the adjustment to do in software is only minimal we
1202 * can skip it. That saves us CPU at the expense of a bit of
1203 * accuracy */
1204 accurate_enough =
1205 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1206 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1207
1208 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1209 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1210 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1211 pa_yes_no(accurate_enough));
1212
1213 if (!accurate_enough)
1214 s->soft_volume = new_soft_volume;
1215
1216 } else {
1217 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1218
1219 /* We can't match exactly what the user requested, hence let's
1220 * at least tell the user about it */
1221
1222 s->real_volume = r;
1223 }
1224 }
1225
1226 static void sink_get_mute_cb(pa_sink *s) {
1227 struct userdata *u = s->userdata;
1228 pa_bool_t b;
1229
1230 pa_assert(u);
1231 pa_assert(u->mixer_path);
1232 pa_assert(u->mixer_handle);
1233
1234 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1235 return;
1236
1237 s->muted = b;
1238 }
1239
1240 static void sink_set_mute_cb(pa_sink *s) {
1241 struct userdata *u = s->userdata;
1242
1243 pa_assert(u);
1244 pa_assert(u->mixer_path);
1245 pa_assert(u->mixer_handle);
1246
1247 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1248 }
1249
1250 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1251 struct userdata *u = s->userdata;
1252 pa_alsa_port_data *data;
1253
1254 pa_assert(u);
1255 pa_assert(p);
1256 pa_assert(u->mixer_handle);
1257
1258 data = PA_DEVICE_PORT_DATA(p);
1259
1260 pa_assert_se(u->mixer_path = data->path);
1261 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1262
1263 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1264 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1265 s->n_volume_steps = PA_VOLUME_NORM+1;
1266
1267 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1268 } else {
1269 s->base_volume = PA_VOLUME_NORM;
1270 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1271 }
1272
1273 if (data->setting)
1274 pa_alsa_setting_select(data->setting, u->mixer_handle);
1275
1276 if (s->set_mute)
1277 s->set_mute(s);
1278 if (s->set_volume)
1279 s->set_volume(s);
1280
1281 return 0;
1282 }
1283
1284 static void sink_update_requested_latency_cb(pa_sink *s) {
1285 struct userdata *u = s->userdata;
1286 size_t before;
1287 pa_assert(u);
1288 pa_assert(u->use_tsched); /* only when timer scheduling is used
1289 * we can dynamically adjust the
1290 * latency */
1291
1292 if (!u->pcm_handle)
1293 return;
1294
1295 before = u->hwbuf_unused;
1296 update_sw_params(u);
1297
1298 /* Let's check whether we now use only a smaller part of the
1299 buffer then before. If so, we need to make sure that subsequent
1300 rewinds are relative to the new maximum fill level and not to the
1301 current fill level. Thus, let's do a full rewind once, to clear
1302 things up. */
1303
1304 if (u->hwbuf_unused > before) {
1305 pa_log_debug("Requesting rewind due to latency change.");
1306 pa_sink_request_rewind(s, (size_t) -1);
1307 }
1308 }
1309
1310 static int process_rewind(struct userdata *u) {
1311 snd_pcm_sframes_t unused;
1312 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1313 pa_assert(u);
1314
1315 /* Figure out how much we shall rewind and reset the counter */
1316 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1317
1318 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1319
1320 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1321 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1322 return -1;
1323 }
1324
1325 unused_nbytes = (size_t) unused * u->frame_size;
1326
1327 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1328 unused_nbytes += u->rewind_safeguard;
1329
1330 if (u->hwbuf_size > unused_nbytes)
1331 limit_nbytes = u->hwbuf_size - unused_nbytes;
1332 else
1333 limit_nbytes = 0;
1334
1335 if (rewind_nbytes > limit_nbytes)
1336 rewind_nbytes = limit_nbytes;
1337
1338 if (rewind_nbytes > 0) {
1339 snd_pcm_sframes_t in_frames, out_frames;
1340
1341 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1342
1343 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1344 pa_log_debug("before: %lu", (unsigned long) in_frames);
1345 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1346 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1347 if (try_recover(u, "process_rewind", out_frames) < 0)
1348 return -1;
1349 out_frames = 0;
1350 }
1351
1352 pa_log_debug("after: %lu", (unsigned long) out_frames);
1353
1354 rewind_nbytes = (size_t) out_frames * u->frame_size;
1355
1356 if (rewind_nbytes <= 0)
1357 pa_log_info("Tried rewind, but was apparently not possible.");
1358 else {
1359 u->write_count -= rewind_nbytes;
1360 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1361 pa_sink_process_rewind(u->sink, rewind_nbytes);
1362
1363 u->after_rewind = TRUE;
1364 return 0;
1365 }
1366 } else
1367 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1368
1369 pa_sink_process_rewind(u->sink, 0);
1370 return 0;
1371 }
1372
1373 static void thread_func(void *userdata) {
1374 struct userdata *u = userdata;
1375 unsigned short revents = 0;
1376
1377 pa_assert(u);
1378
1379 pa_log_debug("Thread starting up");
1380
1381 if (u->core->realtime_scheduling)
1382 pa_make_realtime(u->core->realtime_priority);
1383
1384 pa_thread_mq_install(&u->thread_mq);
1385
1386 for (;;) {
1387 int ret;
1388
1389 #ifdef DEBUG_TIMING
1390 pa_log_debug("Loop");
1391 #endif
1392
1393 /* Render some data and write it to the dsp */
1394 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1395 int work_done;
1396 pa_usec_t sleep_usec = 0;
1397 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1398
1399 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1400 if (process_rewind(u) < 0)
1401 goto fail;
1402
1403 if (u->use_mmap)
1404 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1405 else
1406 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1407
1408 if (work_done < 0)
1409 goto fail;
1410
1411 /* pa_log_debug("work_done = %i", work_done); */
1412
1413 if (work_done) {
1414
1415 if (u->first) {
1416 pa_log_info("Starting playback.");
1417 snd_pcm_start(u->pcm_handle);
1418
1419 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1420 }
1421
1422 update_smoother(u);
1423 }
1424
1425 if (u->use_tsched) {
1426 pa_usec_t cusec;
1427
1428 if (u->since_start <= u->hwbuf_size) {
1429
1430 /* USB devices on ALSA seem to hit a buffer
1431 * underrun during the first iterations much
1432 * quicker then we calculate here, probably due to
1433 * the transport latency. To accommodate for that
1434 * we artificially decrease the sleep time until
1435 * we have filled the buffer at least once
1436 * completely.*/
1437
1438 if (pa_log_ratelimit())
1439 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1440 sleep_usec /= 2;
1441 }
1442
1443 /* OK, the playback buffer is now full, let's
1444 * calculate when to wake up next */
1445 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1446
1447 /* Convert from the sound card time domain to the
1448 * system time domain */
1449 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1450
1451 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1452
1453 /* We don't trust the conversion, so we wake up whatever comes first */
1454 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1455 }
1456
1457 u->first = FALSE;
1458 u->after_rewind = FALSE;
1459
1460 } else if (u->use_tsched)
1461
1462 /* OK, we're in an invalid state, let's disable our timers */
1463 pa_rtpoll_set_timer_disabled(u->rtpoll);
1464
1465 /* Hmm, nothing to do. Let's sleep */
1466 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1467 goto fail;
1468
1469 if (ret == 0)
1470 goto finish;
1471
1472 /* Tell ALSA about this and process its response */
1473 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1474 struct pollfd *pollfd;
1475 int err;
1476 unsigned n;
1477
1478 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1479
1480 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1481 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1482 goto fail;
1483 }
1484
1485 if (revents & ~POLLOUT) {
1486 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1487 goto fail;
1488
1489 u->first = TRUE;
1490 u->since_start = 0;
1491 } else if (revents && u->use_tsched && pa_log_ratelimit())
1492 pa_log_debug("Wakeup from ALSA!");
1493
1494 } else
1495 revents = 0;
1496 }
1497
1498 fail:
1499 /* If this was no regular exit from the loop we have to continue
1500 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1501 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1502 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1503
1504 finish:
1505 pa_log_debug("Thread shutting down");
1506 }
1507
1508 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1509 const char *n;
1510 char *t;
1511
1512 pa_assert(data);
1513 pa_assert(ma);
1514 pa_assert(device_name);
1515
1516 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1517 pa_sink_new_data_set_name(data, n);
1518 data->namereg_fail = TRUE;
1519 return;
1520 }
1521
1522 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1523 data->namereg_fail = TRUE;
1524 else {
1525 n = device_id ? device_id : device_name;
1526 data->namereg_fail = FALSE;
1527 }
1528
1529 if (mapping)
1530 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1531 else
1532 t = pa_sprintf_malloc("alsa_output.%s", n);
1533
1534 pa_sink_new_data_set_name(data, t);
1535 pa_xfree(t);
1536 }
1537
1538 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1539
1540 if (!mapping && !element)
1541 return;
1542
1543 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1544 pa_log_info("Failed to find a working mixer device.");
1545 return;
1546 }
1547
1548 if (element) {
1549
1550 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1551 goto fail;
1552
1553 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1554 goto fail;
1555
1556 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1557 pa_alsa_path_dump(u->mixer_path);
1558 } else {
1559
1560 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1561 goto fail;
1562
1563 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1564
1565 pa_log_debug("Probed mixer paths:");
1566 pa_alsa_path_set_dump(u->mixer_path_set);
1567 }
1568
1569 return;
1570
1571 fail:
1572
1573 if (u->mixer_path_set) {
1574 pa_alsa_path_set_free(u->mixer_path_set);
1575 u->mixer_path_set = NULL;
1576 } else if (u->mixer_path) {
1577 pa_alsa_path_free(u->mixer_path);
1578 u->mixer_path = NULL;
1579 }
1580
1581 if (u->mixer_handle) {
1582 snd_mixer_close(u->mixer_handle);
1583 u->mixer_handle = NULL;
1584 }
1585 }
1586
1587 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1588 pa_assert(u);
1589
1590 if (!u->mixer_handle)
1591 return 0;
1592
1593 if (u->sink->active_port) {
1594 pa_alsa_port_data *data;
1595
1596 /* We have a list of supported paths, so let's activate the
1597 * one that has been chosen as active */
1598
1599 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1600 u->mixer_path = data->path;
1601
1602 pa_alsa_path_select(data->path, u->mixer_handle);
1603
1604 if (data->setting)
1605 pa_alsa_setting_select(data->setting, u->mixer_handle);
1606
1607 } else {
1608
1609 if (!u->mixer_path && u->mixer_path_set)
1610 u->mixer_path = u->mixer_path_set->paths;
1611
1612 if (u->mixer_path) {
1613 /* Hmm, we have only a single path, then let's activate it */
1614
1615 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1616
1617 if (u->mixer_path->settings)
1618 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1619 } else
1620 return 0;
1621 }
1622
1623 /* FIXME: need automatic detection rather than hard-coded path */
1624 if (!strcmp(u->mixer_path->name, "iec958-passthrough-output")) {
1625 u->sink->flags |= PA_SINK_PASSTHROUGH;
1626 } else {
1627 u->sink->flags &= ~PA_SINK_PASSTHROUGH;
1628 }
1629
1630 if (!u->mixer_path->has_volume)
1631 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1632 else {
1633
1634 if (u->mixer_path->has_dB) {
1635 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1636
1637 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1638 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1639
1640 if (u->mixer_path->max_dB > 0.0)
1641 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1642 else
1643 pa_log_info("No particular base volume set, fixing to 0 dB");
1644
1645 } else {
1646 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1647 u->sink->base_volume = PA_VOLUME_NORM;
1648 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1649 }
1650
1651 u->sink->get_volume = sink_get_volume_cb;
1652 u->sink->set_volume = sink_set_volume_cb;
1653
1654 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1655 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1656 }
1657
1658 if (!u->mixer_path->has_mute) {
1659 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1660 } else {
1661 u->sink->get_mute = sink_get_mute_cb;
1662 u->sink->set_mute = sink_set_mute_cb;
1663 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1664 pa_log_info("Using hardware mute control.");
1665 }
1666
1667 u->mixer_fdl = pa_alsa_fdlist_new();
1668
1669 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1670 pa_log("Failed to initialize file descriptor monitoring");
1671 return -1;
1672 }
1673
1674 if (u->mixer_path_set)
1675 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1676 else
1677 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1678
1679 return 0;
1680 }
1681
1682 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1683
1684 struct userdata *u = NULL;
1685 const char *dev_id = NULL;
1686 pa_sample_spec ss, requested_ss;
1687 pa_channel_map map;
1688 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1689 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1690 size_t frame_size;
1691 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE;
1692 pa_sink_new_data data;
1693 pa_alsa_profile_set *profile_set = NULL;
1694
1695 pa_assert(m);
1696 pa_assert(ma);
1697
1698 ss = m->core->default_sample_spec;
1699 map = m->core->default_channel_map;
1700 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1701 pa_log("Failed to parse sample specification and channel map");
1702 goto fail;
1703 }
1704
1705 requested_ss = ss;
1706 frame_size = pa_frame_size(&ss);
1707
1708 nfrags = m->core->default_n_fragments;
1709 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1710 if (frag_size <= 0)
1711 frag_size = (uint32_t) frame_size;
1712 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1713 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1714
1715 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1716 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1717 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1718 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1719 pa_log("Failed to parse buffer metrics");
1720 goto fail;
1721 }
1722
1723 buffer_size = nfrags * frag_size;
1724
1725 period_frames = frag_size/frame_size;
1726 buffer_frames = buffer_size/frame_size;
1727 tsched_frames = tsched_size/frame_size;
1728
1729 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1730 pa_log("Failed to parse mmap argument.");
1731 goto fail;
1732 }
1733
1734 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1735 pa_log("Failed to parse tsched argument.");
1736 goto fail;
1737 }
1738
1739 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1740 pa_log("Failed to parse ignore_dB argument.");
1741 goto fail;
1742 }
1743
1744 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
1745 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1746 pa_log("Failed to parse rewind_safeguard argument");
1747 goto fail;
1748 }
1749
1750 use_tsched = pa_alsa_may_tsched(use_tsched);
1751
1752 u = pa_xnew0(struct userdata, 1);
1753 u->core = m->core;
1754 u->module = m;
1755 u->use_mmap = use_mmap;
1756 u->use_tsched = use_tsched;
1757 u->first = TRUE;
1758 u->rewind_safeguard = rewind_safeguard;
1759 u->rtpoll = pa_rtpoll_new();
1760 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1761
1762 u->smoother = pa_smoother_new(
1763 SMOOTHER_ADJUST_USEC,
1764 SMOOTHER_WINDOW_USEC,
1765 TRUE,
1766 TRUE,
1767 5,
1768 pa_rtclock_now(),
1769 TRUE);
1770 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1771
1772 dev_id = pa_modargs_get_value(
1773 ma, "device_id",
1774 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1775
1776 if (reserve_init(u, dev_id) < 0)
1777 goto fail;
1778
1779 if (reserve_monitor_init(u, dev_id) < 0)
1780 goto fail;
1781
1782 b = use_mmap;
1783 d = use_tsched;
1784
1785 if (mapping) {
1786
1787 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1788 pa_log("device_id= not set");
1789 goto fail;
1790 }
1791
1792 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1793 dev_id,
1794 &u->device_name,
1795 &ss, &map,
1796 SND_PCM_STREAM_PLAYBACK,
1797 &period_frames, &buffer_frames, tsched_frames,
1798 &b, &d, mapping)))
1799
1800 goto fail;
1801
1802 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1803
1804 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1805 goto fail;
1806
1807 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1808 dev_id,
1809 &u->device_name,
1810 &ss, &map,
1811 SND_PCM_STREAM_PLAYBACK,
1812 &period_frames, &buffer_frames, tsched_frames,
1813 &b, &d, profile_set, &mapping)))
1814
1815 goto fail;
1816
1817 } else {
1818
1819 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1820 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1821 &u->device_name,
1822 &ss, &map,
1823 SND_PCM_STREAM_PLAYBACK,
1824 &period_frames, &buffer_frames, tsched_frames,
1825 &b, &d, FALSE)))
1826 goto fail;
1827 }
1828
1829 pa_assert(u->device_name);
1830 pa_log_info("Successfully opened device %s.", u->device_name);
1831
1832 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1833 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1834 goto fail;
1835 }
1836
1837 if (mapping)
1838 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1839
1840 if (use_mmap && !b) {
1841 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1842 u->use_mmap = use_mmap = FALSE;
1843 }
1844
1845 if (use_tsched && (!b || !d)) {
1846 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1847 u->use_tsched = use_tsched = FALSE;
1848 }
1849
1850 if (u->use_mmap)
1851 pa_log_info("Successfully enabled mmap() mode.");
1852
1853 if (u->use_tsched)
1854 pa_log_info("Successfully enabled timer-based scheduling mode.");
1855
1856 /* ALSA might tweak the sample spec, so recalculate the frame size */
1857 frame_size = pa_frame_size(&ss);
1858
1859 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1860
1861 pa_sink_new_data_init(&data);
1862 data.driver = driver;
1863 data.module = m;
1864 data.card = card;
1865 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1866
1867 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1868 * variable instead of using &data.namereg_fail directly, because
1869 * data.namereg_fail is a bitfield and taking the address of a bitfield
1870 * variable is impossible. */
1871 namereg_fail = data.namereg_fail;
1872 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1873 pa_log("Failed to parse boolean argument namereg_fail.");
1874 pa_sink_new_data_done(&data);
1875 goto fail;
1876 }
1877 data.namereg_fail = namereg_fail;
1878
1879 pa_sink_new_data_set_sample_spec(&data, &ss);
1880 pa_sink_new_data_set_channel_map(&data, &map);
1881
1882 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1883 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1884 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1885 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1886 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1887
1888 if (mapping) {
1889 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1890 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1891 }
1892
1893 pa_alsa_init_description(data.proplist);
1894
1895 if (u->control_device)
1896 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1897
1898 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1899 pa_log("Invalid properties");
1900 pa_sink_new_data_done(&data);
1901 goto fail;
1902 }
1903
1904 if (u->mixer_path_set)
1905 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1906
1907 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1908 pa_sink_new_data_done(&data);
1909
1910 if (!u->sink) {
1911 pa_log("Failed to create sink object");
1912 goto fail;
1913 }
1914
1915 u->sink->parent.process_msg = sink_process_msg;
1916 if (u->use_tsched)
1917 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1918 u->sink->set_state = sink_set_state_cb;
1919 u->sink->set_port = sink_set_port_cb;
1920 u->sink->userdata = u;
1921
1922 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1923 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1924
1925 u->frame_size = frame_size;
1926 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1927 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1928 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1929
1930 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1931 (double) u->hwbuf_size / (double) u->fragment_size,
1932 (long unsigned) u->fragment_size,
1933 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1934 (long unsigned) u->hwbuf_size,
1935 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1936
1937 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1938 if (pa_alsa_pcm_is_hw(u->pcm_handle))
1939 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1940 else {
1941 pa_log_info("Disabling rewind for device %s", u->device_name);
1942 pa_sink_set_max_rewind(u->sink, 0);
1943 }
1944
1945 if (u->use_tsched) {
1946 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1947
1948 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1949 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1950
1951 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1952 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1953
1954 fix_min_sleep_wakeup(u);
1955 fix_tsched_watermark(u);
1956
1957 pa_sink_set_latency_range(u->sink,
1958 0,
1959 pa_bytes_to_usec(u->hwbuf_size, &ss));
1960
1961 pa_log_info("Time scheduling watermark is %0.2fms",
1962 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1963 } else
1964 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1965
1966 reserve_update(u);
1967
1968 if (update_sw_params(u) < 0)
1969 goto fail;
1970
1971 if (setup_mixer(u, ignore_dB) < 0)
1972 goto fail;
1973
1974 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1975
1976 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
1977 pa_log("Failed to create thread.");
1978 goto fail;
1979 }
1980
1981 /* Get initial mixer settings */
1982 if (data.volume_is_set) {
1983 if (u->sink->set_volume)
1984 u->sink->set_volume(u->sink);
1985 } else {
1986 if (u->sink->get_volume)
1987 u->sink->get_volume(u->sink);
1988 }
1989
1990 if (data.muted_is_set) {
1991 if (u->sink->set_mute)
1992 u->sink->set_mute(u->sink);
1993 } else {
1994 if (u->sink->get_mute)
1995 u->sink->get_mute(u->sink);
1996 }
1997
1998 pa_sink_put(u->sink);
1999
2000 if (profile_set)
2001 pa_alsa_profile_set_free(profile_set);
2002
2003 return u->sink;
2004
2005 fail:
2006
2007 if (u)
2008 userdata_free(u);
2009
2010 if (profile_set)
2011 pa_alsa_profile_set_free(profile_set);
2012
2013 return NULL;
2014 }
2015
2016 static void userdata_free(struct userdata *u) {
2017 pa_assert(u);
2018
2019 if (u->sink)
2020 pa_sink_unlink(u->sink);
2021
2022 if (u->thread) {
2023 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2024 pa_thread_free(u->thread);
2025 }
2026
2027 pa_thread_mq_done(&u->thread_mq);
2028
2029 if (u->sink)
2030 pa_sink_unref(u->sink);
2031
2032 if (u->memchunk.memblock)
2033 pa_memblock_unref(u->memchunk.memblock);
2034
2035 if (u->alsa_rtpoll_item)
2036 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2037
2038 if (u->rtpoll)
2039 pa_rtpoll_free(u->rtpoll);
2040
2041 if (u->pcm_handle) {
2042 snd_pcm_drop(u->pcm_handle);
2043 snd_pcm_close(u->pcm_handle);
2044 }
2045
2046 if (u->mixer_fdl)
2047 pa_alsa_fdlist_free(u->mixer_fdl);
2048
2049 if (u->mixer_path_set)
2050 pa_alsa_path_set_free(u->mixer_path_set);
2051 else if (u->mixer_path)
2052 pa_alsa_path_free(u->mixer_path);
2053
2054 if (u->mixer_handle)
2055 snd_mixer_close(u->mixer_handle);
2056
2057 if (u->smoother)
2058 pa_smoother_free(u->smoother);
2059
2060 reserve_done(u);
2061 monitor_done(u);
2062
2063 pa_xfree(u->device_name);
2064 pa_xfree(u->control_device);
2065 pa_xfree(u);
2066 }
2067
2068 void pa_alsa_sink_free(pa_sink *s) {
2069 struct userdata *u;
2070
2071 pa_sink_assert_ref(s);
2072 pa_assert_se(u = s->userdata);
2073
2074 userdata_free(u);
2075 }