]> code.delx.au - pulseaudio/blob - src/modules/module-alsa-sink.c
add some code to make invalid valgrind warnings go away
[pulseaudio] / src / modules / module-alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38
39 #include <pulsecore/core.h>
40 #include <pulsecore/module.h>
41 #include <pulsecore/memchunk.h>
42 #include <pulsecore/sink.h>
43 #include <pulsecore/modargs.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/rtclock.h>
53 #include <pulsecore/time-smoother.h>
54
55 #include "alsa-util.h"
56 #include "module-alsa-sink-symdef.h"
57
58 PA_MODULE_AUTHOR("Lennart Poettering");
59 PA_MODULE_DESCRIPTION("ALSA Sink");
60 PA_MODULE_VERSION(PACKAGE_VERSION);
61 PA_MODULE_LOAD_ONCE(FALSE);
62 PA_MODULE_USAGE(
63 "sink_name=<name for the sink> "
64 "device=<ALSA device> "
65 "device_id=<ALSA card index> "
66 "format=<sample format> "
67 "rate=<sample rate> "
68 "channels=<number of channels> "
69 "channel_map=<channel map> "
70 "fragments=<number of fragments> "
71 "fragment_size=<fragment size> "
72 "mmap=<enable memory mapping?> "
73 "tsched=<enable system timer based scheduling mode?> "
74 "tsched_buffer_size=<buffer size when using timer based scheduling> "
75 "tsched_buffer_watermark=<lower fill watermark>");
76
77 static const char* const valid_modargs[] = {
78 "sink_name",
79 "device",
80 "device_id",
81 "format",
82 "rate",
83 "channels",
84 "channel_map",
85 "fragments",
86 "fragment_size",
87 "mmap",
88 "tsched",
89 "tsched_buffer_size",
90 "tsched_buffer_watermark",
91 NULL
92 };
93
94 #define DEFAULT_DEVICE "default"
95 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
96 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
97 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
98 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
99
100 struct userdata {
101 pa_core *core;
102 pa_module *module;
103 pa_sink *sink;
104
105 pa_thread *thread;
106 pa_thread_mq thread_mq;
107 pa_rtpoll *rtpoll;
108
109 snd_pcm_t *pcm_handle;
110
111 pa_alsa_fdlist *mixer_fdl;
112 snd_mixer_t *mixer_handle;
113 snd_mixer_elem_t *mixer_elem;
114 long hw_volume_max, hw_volume_min;
115 long hw_dB_max, hw_dB_min;
116 pa_bool_t hw_dB_supported;
117 pa_bool_t mixer_seperate_channels;
118 pa_cvolume hardware_volume;
119
120 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
121 unsigned nfragments;
122 pa_memchunk memchunk;
123
124 char *device_name;
125
126 pa_bool_t use_mmap, use_tsched;
127
128 pa_bool_t first, after_rewind;
129
130 pa_rtpoll_item *alsa_rtpoll_item;
131
132 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
133
134 pa_smoother *smoother;
135 int64_t frame_index;
136 uint64_t since_start;
137
138 snd_pcm_sframes_t hwbuf_unused_frames;
139 };
140
141 static void fix_tsched_watermark(struct userdata *u) {
142 size_t max_use;
143 size_t min_sleep, min_wakeup;
144 pa_assert(u);
145
146 max_use = u->hwbuf_size - u->hwbuf_unused_frames * u->frame_size;
147
148 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
149 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
150
151 if (min_sleep > max_use/2)
152 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
153 if (min_sleep < u->frame_size)
154 min_sleep = u->frame_size;
155
156 if (min_wakeup > max_use/2)
157 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
158 if (min_wakeup < u->frame_size)
159 min_wakeup = u->frame_size;
160
161 if (u->tsched_watermark > max_use-min_sleep)
162 u->tsched_watermark = max_use-min_sleep;
163
164 if (u->tsched_watermark < min_wakeup)
165 u->tsched_watermark = min_wakeup;
166 }
167
168 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
169 pa_usec_t usec, wm;
170
171 pa_assert(sleep_usec);
172 pa_assert(process_usec);
173
174 pa_assert(u);
175
176 usec = pa_sink_get_requested_latency_within_thread(u->sink);
177
178 if (usec == (pa_usec_t) -1)
179 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
180
181 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
182
183 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
184
185 if (usec >= wm) {
186 *sleep_usec = usec - wm;
187 *process_usec = wm;
188 } else
189 *process_usec = *sleep_usec = usec / 2;
190
191 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
192 }
193
194 static int try_recover(struct userdata *u, const char *call, int err) {
195 pa_assert(u);
196 pa_assert(call);
197 pa_assert(err < 0);
198
199 pa_log_debug("%s: %s", call, snd_strerror(err));
200
201 pa_assert(err != -EAGAIN);
202
203 if (err == -EPIPE)
204 pa_log_debug("%s: Buffer underrun!", call);
205
206 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
207 u->first = TRUE;
208 u->since_start = 0;
209 return 0;
210 }
211
212 pa_log("%s: %s", call, snd_strerror(err));
213 return -1;
214 }
215
216 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
217 size_t left_to_play;
218
219 if (n*u->frame_size < u->hwbuf_size)
220 left_to_play = u->hwbuf_size - (n*u->frame_size);
221 else
222 left_to_play = 0;
223
224 if (left_to_play > 0) {
225 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
226 } else if (!u->first && !u->after_rewind) {
227 pa_log_info("Underrun!");
228
229 if (u->use_tsched) {
230 size_t old_watermark = u->tsched_watermark;
231
232 u->tsched_watermark *= 2;
233 fix_tsched_watermark(u);
234
235 if (old_watermark != u->tsched_watermark)
236 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
237 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
238 }
239 }
240
241 return left_to_play;
242 }
243
244 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec) {
245 int work_done = 0;
246 pa_usec_t max_sleep_usec = 0, process_usec = 0;
247 size_t left_to_play;
248
249 pa_assert(u);
250 pa_sink_assert_ref(u->sink);
251
252 if (u->use_tsched)
253 hw_sleep_time(u, &max_sleep_usec, &process_usec);
254
255 for (;;) {
256 snd_pcm_sframes_t n;
257 int r;
258
259 snd_pcm_hwsync(u->pcm_handle);
260
261 /* First we determine how many samples are missing to fill the
262 * buffer up to 100% */
263
264 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
265
266 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
267 continue;
268
269 return r;
270 }
271
272 left_to_play = check_left_to_play(u, n);
273
274 if (u->use_tsched)
275
276 /* We won't fill up the playback buffer before at least
277 * half the sleep time is over because otherwise we might
278 * ask for more data from the clients then they expect. We
279 * need to guarantee that clients only have to keep around
280 * a single hw buffer length. */
281
282 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
283 break;
284
285 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
286 break;
287
288 n -= u->hwbuf_unused_frames;
289
290 /* pa_log_debug("Filling up"); */
291
292 for (;;) {
293 pa_memchunk chunk;
294 void *p;
295 int err;
296 const snd_pcm_channel_area_t *areas;
297 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
298
299 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
300
301 if (PA_UNLIKELY((err = snd_pcm_mmap_begin(u->pcm_handle, &areas, &offset, &frames)) < 0)) {
302
303 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
304 continue;
305
306 return r;
307 }
308
309 /* Make sure that if these memblocks need to be copied they will fit into one slot */
310 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
311 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
312
313 /* Check these are multiples of 8 bit */
314 pa_assert((areas[0].first & 7) == 0);
315 pa_assert((areas[0].step & 7)== 0);
316
317 /* We assume a single interleaved memory buffer */
318 pa_assert((areas[0].first >> 3) == 0);
319 pa_assert((areas[0].step >> 3) == u->frame_size);
320
321 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
322
323 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
324 chunk.length = pa_memblock_get_length(chunk.memblock);
325 chunk.index = 0;
326
327 pa_sink_render_into_full(u->sink, &chunk);
328
329 /* FIXME: Maybe we can do something to keep this memory block
330 * a little bit longer around? */
331 pa_memblock_unref_fixed(chunk.memblock);
332
333 if (PA_UNLIKELY((err = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
334
335 if ((r = try_recover(u, "snd_pcm_mmap_commit", err)) == 0)
336 continue;
337
338 return r;
339 }
340
341 work_done = 1;
342
343 u->frame_index += frames;
344 u->since_start += frames * u->frame_size;
345
346 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
347
348 if (frames >= (snd_pcm_uframes_t) n)
349 break;
350
351 n -= frames;
352 }
353 }
354
355 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
356 return work_done;
357 }
358
359 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec) {
360 int work_done = 0;
361 pa_usec_t max_sleep_usec = 0, process_usec = 0;
362 size_t left_to_play;
363
364 pa_assert(u);
365 pa_sink_assert_ref(u->sink);
366
367 if (u->use_tsched)
368 hw_sleep_time(u, &max_sleep_usec, &process_usec);
369
370 for (;;) {
371 snd_pcm_sframes_t n;
372 int r;
373
374 snd_pcm_hwsync(u->pcm_handle);
375
376 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
377
378 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
379 continue;
380
381 return r;
382 }
383
384 left_to_play = check_left_to_play(u, n);
385
386 if (u->use_tsched)
387
388 /* We won't fill up the playback buffer before at least
389 * half the sleep time is over because otherwise we might
390 * ask for more data from the clients then they expect. We
391 * need to guarantee that clients only have to keep around
392 * a single hw buffer length. */
393
394 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
395 break;
396
397 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
398 break;
399
400 n -= u->hwbuf_unused_frames;
401
402 for (;;) {
403 snd_pcm_sframes_t frames;
404 void *p;
405
406 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
407
408 if (u->memchunk.length <= 0)
409 pa_sink_render(u->sink, n * u->frame_size, &u->memchunk);
410
411 pa_assert(u->memchunk.length > 0);
412
413 frames = u->memchunk.length / u->frame_size;
414
415 if (frames > n)
416 frames = n;
417
418 p = pa_memblock_acquire(u->memchunk.memblock);
419 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, frames);
420 pa_memblock_release(u->memchunk.memblock);
421
422 pa_assert(frames != 0);
423
424 if (PA_UNLIKELY(frames < 0)) {
425
426 if ((r = try_recover(u, "snd_pcm_writei", n)) == 0)
427 continue;
428
429 return r;
430 }
431
432 u->memchunk.index += frames * u->frame_size;
433 u->memchunk.length -= frames * u->frame_size;
434
435 if (u->memchunk.length <= 0) {
436 pa_memblock_unref(u->memchunk.memblock);
437 pa_memchunk_reset(&u->memchunk);
438 }
439
440 work_done = 1;
441
442 u->frame_index += frames;
443 u->since_start += frames * u->frame_size;
444
445 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
446
447 if (frames >= n)
448 break;
449
450 n -= frames;
451 }
452 }
453
454 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
455 return work_done;
456 }
457
458 static void update_smoother(struct userdata *u) {
459 snd_pcm_sframes_t delay = 0;
460 int64_t frames;
461 int err;
462 pa_usec_t now1, now2;
463 /* struct timeval timestamp; */
464 snd_pcm_status_t *status;
465
466 snd_pcm_status_alloca(&status);
467
468 pa_assert(u);
469 pa_assert(u->pcm_handle);
470
471 /* Let's update the time smoother */
472
473 snd_pcm_hwsync(u->pcm_handle);
474 snd_pcm_avail_update(u->pcm_handle);
475
476 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
477 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
478 /* return; */
479 /* } */
480
481 /* delay = snd_pcm_status_get_delay(status); */
482
483 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
484 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
485 return;
486 }
487
488 frames = u->frame_index - delay;
489
490 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
491
492 /* snd_pcm_status_get_tstamp(status, &timestamp); */
493 /* pa_rtclock_from_wallclock(&timestamp); */
494 /* now1 = pa_timeval_load(&timestamp); */
495
496 now1 = pa_rtclock_usec();
497 now2 = pa_bytes_to_usec(frames * u->frame_size, &u->sink->sample_spec);
498 pa_smoother_put(u->smoother, now1, now2);
499 }
500
501 static pa_usec_t sink_get_latency(struct userdata *u) {
502 pa_usec_t r = 0;
503 int64_t delay;
504 pa_usec_t now1, now2;
505
506 pa_assert(u);
507
508 now1 = pa_rtclock_usec();
509 now2 = pa_smoother_get(u->smoother, now1);
510
511 delay = (int64_t) pa_bytes_to_usec(u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
512
513 if (delay > 0)
514 r = (pa_usec_t) delay;
515
516 if (u->memchunk.memblock)
517 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
518
519 return r;
520 }
521
522 static int build_pollfd(struct userdata *u) {
523 pa_assert(u);
524 pa_assert(u->pcm_handle);
525
526 if (u->alsa_rtpoll_item)
527 pa_rtpoll_item_free(u->alsa_rtpoll_item);
528
529 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
530 return -1;
531
532 return 0;
533 }
534
535 static int suspend(struct userdata *u) {
536 pa_assert(u);
537 pa_assert(u->pcm_handle);
538
539 pa_smoother_pause(u->smoother, pa_rtclock_usec());
540
541 /* Let's suspend */
542 snd_pcm_drain(u->pcm_handle);
543 snd_pcm_close(u->pcm_handle);
544 u->pcm_handle = NULL;
545
546 if (u->alsa_rtpoll_item) {
547 pa_rtpoll_item_free(u->alsa_rtpoll_item);
548 u->alsa_rtpoll_item = NULL;
549 }
550
551 pa_log_info("Device suspended...");
552
553 return 0;
554 }
555
556 static int update_sw_params(struct userdata *u) {
557 snd_pcm_uframes_t avail_min;
558 int err;
559
560 pa_assert(u);
561
562 /* Use the full buffer if noone asked us for anything specific */
563 u->hwbuf_unused_frames = 0;
564
565 if (u->use_tsched) {
566 pa_usec_t latency;
567
568 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
569 size_t b;
570
571 pa_log_debug("latency set to %0.2f", (double) latency / PA_USEC_PER_MSEC);
572
573 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
574
575 /* We need at least one sample in our buffer */
576
577 if (PA_UNLIKELY(b < u->frame_size))
578 b = u->frame_size;
579
580 u->hwbuf_unused_frames =
581 PA_LIKELY(b < u->hwbuf_size) ?
582 ((u->hwbuf_size - b) / u->frame_size) : 0;
583
584 fix_tsched_watermark(u);
585 }
586 }
587
588 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
589
590 /* We need at last one frame in the used part of the buffer */
591 avail_min = u->hwbuf_unused_frames + 1;
592
593 if (u->use_tsched) {
594 pa_usec_t sleep_usec, process_usec;
595
596 hw_sleep_time(u, &sleep_usec, &process_usec);
597 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec);
598 }
599
600 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
601
602 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
603 pa_log("Failed to set software parameters: %s", snd_strerror(err));
604 return err;
605 }
606
607 pa_sink_set_max_request(u->sink, u->hwbuf_size - u->hwbuf_unused_frames * u->frame_size);
608
609 return 0;
610 }
611
612 static int unsuspend(struct userdata *u) {
613 pa_sample_spec ss;
614 int err;
615 pa_bool_t b, d;
616 unsigned nfrags;
617 snd_pcm_uframes_t period_size;
618
619 pa_assert(u);
620 pa_assert(!u->pcm_handle);
621
622 pa_log_info("Trying resume...");
623
624 snd_config_update_free_global();
625 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) {
626 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
627 goto fail;
628 }
629
630 ss = u->sink->sample_spec;
631 nfrags = u->nfragments;
632 period_size = u->fragment_size / u->frame_size;
633 b = u->use_mmap;
634 d = u->use_tsched;
635
636 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
637 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
638 goto fail;
639 }
640
641 if (b != u->use_mmap || d != u->use_tsched) {
642 pa_log_warn("Resume failed, couldn't get original access mode.");
643 goto fail;
644 }
645
646 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
647 pa_log_warn("Resume failed, couldn't restore original sample settings.");
648 goto fail;
649 }
650
651 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
652 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
653 goto fail;
654 }
655
656 if (update_sw_params(u) < 0)
657 goto fail;
658
659 if (build_pollfd(u) < 0)
660 goto fail;
661
662 /* FIXME: We need to reload the volume somehow */
663
664 u->first = TRUE;
665 u->since_start = 0;
666
667 pa_log_info("Resumed successfully...");
668
669 return 0;
670
671 fail:
672 if (u->pcm_handle) {
673 snd_pcm_close(u->pcm_handle);
674 u->pcm_handle = NULL;
675 }
676
677 return -1;
678 }
679
680 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
681 struct userdata *u = PA_SINK(o)->userdata;
682
683 switch (code) {
684
685 case PA_SINK_MESSAGE_GET_LATENCY: {
686 pa_usec_t r = 0;
687
688 if (u->pcm_handle)
689 r = sink_get_latency(u);
690
691 *((pa_usec_t*) data) = r;
692
693 return 0;
694 }
695
696 case PA_SINK_MESSAGE_SET_STATE:
697
698 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
699
700 case PA_SINK_SUSPENDED:
701 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
702
703 if (suspend(u) < 0)
704 return -1;
705
706 break;
707
708 case PA_SINK_IDLE:
709 case PA_SINK_RUNNING:
710
711 if (u->sink->thread_info.state == PA_SINK_INIT) {
712 if (build_pollfd(u) < 0)
713 return -1;
714 }
715
716 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
717 if (unsuspend(u) < 0)
718 return -1;
719 }
720
721 break;
722
723 case PA_SINK_UNLINKED:
724 case PA_SINK_INIT:
725 ;
726 }
727
728 break;
729 }
730
731 return pa_sink_process_msg(o, code, data, offset, chunk);
732 }
733
734 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
735 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
736
737 pa_assert(u);
738 pa_assert(u->mixer_handle);
739
740 if (mask == SND_CTL_EVENT_MASK_REMOVE)
741 return 0;
742
743 if (mask & SND_CTL_EVENT_MASK_VALUE) {
744 pa_sink_get_volume(u->sink, TRUE);
745 pa_sink_get_mute(u->sink, TRUE);
746 }
747
748 return 0;
749 }
750
751 static int sink_get_volume_cb(pa_sink *s) {
752 struct userdata *u = s->userdata;
753 int err;
754 unsigned i;
755 pa_cvolume r;
756 char t[PA_CVOLUME_SNPRINT_MAX];
757
758 pa_assert(u);
759 pa_assert(u->mixer_elem);
760
761 if (u->mixer_seperate_channels) {
762
763 r.channels = s->sample_spec.channels;
764
765 for (i = 0; i < s->sample_spec.channels; i++) {
766 long alsa_vol;
767
768 if (u->hw_dB_supported) {
769
770 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
771 goto fail;
772
773 #ifdef HAVE_VALGRIND_MEMCHECK_H
774 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
775 #endif
776
777 r.values[i] = pa_sw_volume_from_dB((double) alsa_vol / 100.0);
778 } else {
779
780 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
781 goto fail;
782
783 r.values[i] = (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
784 }
785 }
786
787 } else {
788 long alsa_vol;
789
790 pa_assert(u->hw_dB_supported);
791
792 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
793 goto fail;
794
795 #ifdef HAVE_VALGRIND_MEMCHECK_H
796 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
797 #endif
798
799 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) alsa_vol / 100.0));
800 }
801
802 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
803
804 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
805
806 u->hardware_volume = s->volume = r;
807
808 if (u->hw_dB_supported) {
809 pa_cvolume reset;
810
811 /* Hmm, so the hardware volume changed, let's reset our software volume */
812
813 pa_cvolume_reset(&reset, s->sample_spec.channels);
814 pa_sink_set_soft_volume(s, &reset);
815 }
816 }
817
818 return 0;
819
820 fail:
821 pa_log_error("Unable to read volume: %s", snd_strerror(err));
822
823 return -1;
824 }
825
826 static int sink_set_volume_cb(pa_sink *s) {
827 struct userdata *u = s->userdata;
828 int err;
829 unsigned i;
830 pa_cvolume r;
831
832 pa_assert(u);
833 pa_assert(u->mixer_elem);
834
835 if (u->mixer_seperate_channels) {
836
837 r.channels = s->sample_spec.channels;
838
839 for (i = 0; i < s->sample_spec.channels; i++) {
840 long alsa_vol;
841 pa_volume_t vol;
842
843 vol = s->volume.values[i];
844
845 if (u->hw_dB_supported) {
846
847 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
848 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
849
850 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
851 goto fail;
852
853 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
854 goto fail;
855
856 r.values[i] = pa_sw_volume_from_dB((double) alsa_vol / 100.0);
857 } else {
858
859 alsa_vol = (long) round(((double) vol * (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
860 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
861
862 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
863 goto fail;
864
865 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
866 goto fail;
867
868 r.values[i] = (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
869 }
870 }
871
872 } else {
873 pa_volume_t vol;
874 long alsa_vol;
875
876 pa_assert(u->hw_dB_supported);
877
878 vol = pa_cvolume_max(&s->volume);
879
880 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
881 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
882
883 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
884 goto fail;
885
886 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
887 goto fail;
888
889 pa_cvolume_set(&r, s->volume.channels, pa_sw_volume_from_dB((double) alsa_vol / 100.0));
890 }
891
892 u->hardware_volume = r;
893
894 if (u->hw_dB_supported) {
895 char t[PA_CVOLUME_SNPRINT_MAX];
896
897 /* Match exactly what the user requested by software */
898
899 pa_alsa_volume_divide(&r, &s->volume);
900 pa_sink_set_soft_volume(s, &r);
901
902 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
903 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
904 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
905
906 } else
907
908 /* We can't match exactly what the user requested, hence let's
909 * at least tell the user about it */
910
911 s->volume = r;
912
913 return 0;
914
915 fail:
916 pa_log_error("Unable to set volume: %s", snd_strerror(err));
917
918 return -1;
919 }
920
921 static int sink_get_mute_cb(pa_sink *s) {
922 struct userdata *u = s->userdata;
923 int err, sw;
924
925 pa_assert(u);
926 pa_assert(u->mixer_elem);
927
928 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
929 pa_log_error("Unable to get switch: %s", snd_strerror(err));
930 return -1;
931 }
932
933 s->muted = !sw;
934
935 return 0;
936 }
937
938 static int sink_set_mute_cb(pa_sink *s) {
939 struct userdata *u = s->userdata;
940 int err;
941
942 pa_assert(u);
943 pa_assert(u->mixer_elem);
944
945 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
946 pa_log_error("Unable to set switch: %s", snd_strerror(err));
947 return -1;
948 }
949
950 return 0;
951 }
952
953 static void sink_update_requested_latency_cb(pa_sink *s) {
954 struct userdata *u = s->userdata;
955 snd_pcm_sframes_t before;
956 pa_assert(u);
957
958 if (!u->pcm_handle)
959 return;
960
961 before = u->hwbuf_unused_frames;
962 update_sw_params(u);
963
964 /* Let's check whether we now use only a smaller part of the
965 buffer then before. If so, we need to make sure that subsequent
966 rewinds are relative to the new maxium fill level and not to the
967 current fill level. Thus, let's do a full rewind once, to clear
968 things up. */
969
970 if (u->hwbuf_unused_frames > before) {
971 pa_log_debug("Requesting rewind due to latency change.");
972 pa_sink_request_rewind(s, (size_t) -1);
973 }
974 }
975
976 static int process_rewind(struct userdata *u) {
977 snd_pcm_sframes_t unused;
978 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
979 pa_assert(u);
980
981 /* Figure out how much we shall rewind and reset the counter */
982 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
983 u->sink->thread_info.rewind_nbytes = 0;
984
985 if (rewind_nbytes <= 0)
986 goto finish;
987
988 pa_assert(rewind_nbytes > 0);
989 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
990
991 snd_pcm_hwsync(u->pcm_handle);
992 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
993 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror(unused));
994 return -1;
995 }
996
997 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
998
999 if (u->hwbuf_size > unused_nbytes)
1000 limit_nbytes = u->hwbuf_size - unused_nbytes;
1001 else
1002 limit_nbytes = 0;
1003
1004 if (rewind_nbytes > limit_nbytes)
1005 rewind_nbytes = limit_nbytes;
1006
1007 if (rewind_nbytes > 0) {
1008 snd_pcm_sframes_t in_frames, out_frames;
1009
1010 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1011
1012 in_frames = (snd_pcm_sframes_t) rewind_nbytes / u->frame_size;
1013 pa_log_debug("before: %lu", (unsigned long) in_frames);
1014 if ((out_frames = snd_pcm_rewind(u->pcm_handle, in_frames)) < 0) {
1015 pa_log("snd_pcm_rewind() failed: %s", snd_strerror(out_frames));
1016 return -1;
1017 }
1018 pa_log_debug("after: %lu", (unsigned long) out_frames);
1019
1020 rewind_nbytes = out_frames * u->frame_size;
1021
1022 if (rewind_nbytes <= 0)
1023 pa_log_info("Tried rewind, but was apparently not possible.");
1024 else {
1025 u->frame_index -= out_frames;
1026 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1027 pa_sink_process_rewind(u->sink, rewind_nbytes);
1028
1029 u->after_rewind = TRUE;
1030 return 0;
1031 }
1032 } else
1033 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1034
1035 finish:
1036
1037 pa_sink_process_rewind(u->sink, 0);
1038
1039 return 0;
1040
1041 }
1042
1043 static void thread_func(void *userdata) {
1044 struct userdata *u = userdata;
1045
1046 pa_assert(u);
1047
1048 pa_log_debug("Thread starting up");
1049
1050 if (u->core->realtime_scheduling)
1051 pa_make_realtime(u->core->realtime_priority);
1052
1053 pa_thread_mq_install(&u->thread_mq);
1054 pa_rtpoll_install(u->rtpoll);
1055
1056 for (;;) {
1057 int ret;
1058
1059 /* pa_log_debug("loop"); */
1060
1061 /* Render some data and write it to the dsp */
1062 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1063 int work_done;
1064 pa_usec_t sleep_usec = 0;
1065
1066 if (u->sink->thread_info.rewind_requested)
1067 if (process_rewind(u) < 0)
1068 goto fail;
1069
1070 if (u->use_mmap)
1071 work_done = mmap_write(u, &sleep_usec);
1072 else
1073 work_done = unix_write(u, &sleep_usec);
1074
1075 if (work_done < 0)
1076 goto fail;
1077
1078 /* pa_log_debug("work_done = %i", work_done); */
1079
1080 if (work_done) {
1081
1082 if (u->first) {
1083 pa_log_info("Starting playback.");
1084 snd_pcm_start(u->pcm_handle);
1085
1086 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1087 }
1088
1089 update_smoother(u);
1090 }
1091
1092 if (u->use_tsched) {
1093 pa_usec_t cusec;
1094
1095 if (u->since_start <= u->hwbuf_size) {
1096
1097 /* USB devices on ALSA seem to hit a buffer
1098 * underrun during the first iterations much
1099 * quicker then we calculate here, probably due to
1100 * the transport latency. To accomodate for that
1101 * we artificially decrease the sleep time until
1102 * we have filled the buffer at least once
1103 * completely.*/
1104
1105 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1106 sleep_usec /= 2;
1107 }
1108
1109 /* OK, the playback buffer is now full, let's
1110 * calculate when to wake up next */
1111 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1112
1113 /* Convert from the sound card time domain to the
1114 * system time domain */
1115 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1116
1117 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1118
1119 /* We don't trust the conversion, so we wake up whatever comes first */
1120 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1121 }
1122
1123 u->first = FALSE;
1124 u->after_rewind = FALSE;
1125
1126 } else if (u->use_tsched)
1127
1128 /* OK, we're in an invalid state, let's disable our timers */
1129 pa_rtpoll_set_timer_disabled(u->rtpoll);
1130
1131 /* Hmm, nothing to do. Let's sleep */
1132 if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
1133 goto fail;
1134
1135 if (ret == 0)
1136 goto finish;
1137
1138 /* Tell ALSA about this and process its response */
1139 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1140 struct pollfd *pollfd;
1141 unsigned short revents = 0;
1142 int err;
1143 unsigned n;
1144
1145 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1146
1147 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1148 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1149 goto fail;
1150 }
1151
1152 if (revents & (POLLERR|POLLNVAL|POLLHUP)) {
1153 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1154 goto fail;
1155
1156 u->first = TRUE;
1157 u->since_start = 0;
1158 }
1159
1160 if (revents && u->use_tsched)
1161 pa_log_debug("Wakeup from ALSA! (%i)", revents);
1162 }
1163 }
1164
1165 fail:
1166 /* If this was no regular exit from the loop we have to continue
1167 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1168 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1169 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1170
1171 finish:
1172 pa_log_debug("Thread shutting down");
1173 }
1174
1175 int pa__init(pa_module*m) {
1176
1177 pa_modargs *ma = NULL;
1178 struct userdata *u = NULL;
1179 const char *dev_id;
1180 pa_sample_spec ss;
1181 pa_channel_map map;
1182 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1183 snd_pcm_uframes_t period_frames, tsched_frames;
1184 size_t frame_size;
1185 snd_pcm_info_t *pcm_info = NULL;
1186 int err;
1187 const char *name;
1188 char *name_buf = NULL;
1189 pa_bool_t namereg_fail;
1190 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d;
1191 pa_usec_t usec;
1192 pa_sink_new_data data;
1193
1194 snd_pcm_info_alloca(&pcm_info);
1195
1196 pa_assert(m);
1197
1198 pa_alsa_redirect_errors_inc();
1199
1200 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1201 pa_log("Failed to parse module arguments");
1202 goto fail;
1203 }
1204
1205 ss = m->core->default_sample_spec;
1206 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1207 pa_log("Failed to parse sample specification and channel map");
1208 goto fail;
1209 }
1210
1211 frame_size = pa_frame_size(&ss);
1212
1213 nfrags = m->core->default_n_fragments;
1214 frag_size = pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1215 if (frag_size <= 0)
1216 frag_size = frame_size;
1217 tsched_size = pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1218 tsched_watermark = pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1219
1220 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1221 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1222 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1223 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1224 pa_log("Failed to parse buffer metrics");
1225 goto fail;
1226 }
1227
1228 hwbuf_size = frag_size * nfrags;
1229 period_frames = frag_size/frame_size;
1230 tsched_frames = tsched_size/frame_size;
1231
1232 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1233 pa_log("Failed to parse mmap argument.");
1234 goto fail;
1235 }
1236
1237 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1238 pa_log("Failed to parse tsched argument.");
1239 goto fail;
1240 }
1241
1242 if (use_tsched && !pa_rtclock_hrtimer()) {
1243 pa_log("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1244 use_tsched = FALSE;
1245 }
1246
1247 u = pa_xnew0(struct userdata, 1);
1248 u->core = m->core;
1249 u->module = m;
1250 m->userdata = u;
1251 u->use_mmap = use_mmap;
1252 u->use_tsched = use_tsched;
1253 u->first = TRUE;
1254 u->since_start = 0;
1255 u->after_rewind = FALSE;
1256 u->rtpoll = pa_rtpoll_new();
1257 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1258 u->alsa_rtpoll_item = NULL;
1259
1260 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1261 usec = pa_rtclock_usec();
1262 pa_smoother_set_time_offset(u->smoother, usec);
1263 pa_smoother_pause(u->smoother, usec);
1264
1265 snd_config_update_free_global();
1266
1267 b = use_mmap;
1268 d = use_tsched;
1269
1270 if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1271
1272 if (!(u->pcm_handle = pa_alsa_open_by_device_id(
1273 dev_id,
1274 &u->device_name,
1275 &ss, &map,
1276 SND_PCM_STREAM_PLAYBACK,
1277 &nfrags, &period_frames, tsched_frames,
1278 &b, &d)))
1279
1280 goto fail;
1281
1282 } else {
1283
1284 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1285 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1286 &u->device_name,
1287 &ss, &map,
1288 SND_PCM_STREAM_PLAYBACK,
1289 &nfrags, &period_frames, tsched_frames,
1290 &b, &d)))
1291 goto fail;
1292
1293 }
1294
1295 pa_assert(u->device_name);
1296 pa_log_info("Successfully opened device %s.", u->device_name);
1297
1298 if (use_mmap && !b) {
1299 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1300 u->use_mmap = use_mmap = FALSE;
1301 }
1302
1303 if (use_tsched && (!b || !d)) {
1304 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1305 u->use_tsched = use_tsched = FALSE;
1306 }
1307
1308 if (u->use_mmap)
1309 pa_log_info("Successfully enabled mmap() mode.");
1310
1311 if (u->use_tsched)
1312 pa_log_info("Successfully enabled timer-based scheduling mode.");
1313
1314 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1315 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1316 goto fail;
1317 }
1318
1319 /* ALSA might tweak the sample spec, so recalculate the frame size */
1320 frame_size = pa_frame_size(&ss);
1321
1322 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1323 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1324 else {
1325 pa_bool_t found = FALSE;
1326
1327 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1328 found = TRUE;
1329 else {
1330 snd_pcm_info_t *info;
1331
1332 snd_pcm_info_alloca(&info);
1333
1334 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1335 char *md;
1336 int card;
1337
1338 if ((card = snd_pcm_info_get_card(info)) >= 0) {
1339
1340 md = pa_sprintf_malloc("hw:%i", card);
1341
1342 if (strcmp(u->device_name, md))
1343 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1344 found = TRUE;
1345 pa_xfree(md);
1346 }
1347 }
1348 }
1349
1350 if (found)
1351 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM")))
1352 found = FALSE;
1353
1354 if (!found) {
1355 snd_mixer_close(u->mixer_handle);
1356 u->mixer_handle = NULL;
1357 }
1358 }
1359
1360 if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
1361 namereg_fail = TRUE;
1362 else {
1363 name = name_buf = pa_sprintf_malloc("alsa_output.%s", u->device_name);
1364 namereg_fail = FALSE;
1365 }
1366
1367 pa_sink_new_data_init(&data);
1368 data.driver = __FILE__;
1369 data.module = m;
1370 pa_sink_new_data_set_name(&data, name);
1371 data.namereg_fail = namereg_fail;
1372 pa_sink_new_data_set_sample_spec(&data, &ss);
1373 pa_sink_new_data_set_channel_map(&data, &map);
1374
1375 pa_alsa_init_proplist(data.proplist, pcm_info);
1376 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1377 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1378 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1379 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1380
1381 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1382 pa_sink_new_data_done(&data);
1383 pa_xfree(name_buf);
1384
1385 if (!u->sink) {
1386 pa_log("Failed to create sink object");
1387 goto fail;
1388 }
1389
1390 u->sink->parent.process_msg = sink_process_msg;
1391 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1392 u->sink->userdata = u;
1393
1394 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1395 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1396
1397 u->frame_size = frame_size;
1398 u->fragment_size = frag_size = period_frames * frame_size;
1399 u->nfragments = nfrags;
1400 u->hwbuf_size = u->fragment_size * nfrags;
1401 u->hwbuf_unused_frames = 0;
1402 u->tsched_watermark = tsched_watermark;
1403 u->frame_index = 0;
1404 u->hw_dB_supported = FALSE;
1405 u->hw_dB_min = u->hw_dB_max = 0;
1406 u->hw_volume_min = u->hw_volume_max = 0;
1407 u->mixer_seperate_channels = FALSE;
1408 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1409
1410 if (use_tsched)
1411 fix_tsched_watermark(u);
1412
1413 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1414 u->sink->thread_info.max_request = u->hwbuf_size;
1415
1416 pa_sink_set_latency_range(u->sink,
1417 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1418 pa_bytes_to_usec(u->hwbuf_size, &ss));
1419
1420 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1421 nfrags, (long unsigned) u->fragment_size,
1422 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1423
1424 if (use_tsched)
1425 pa_log_info("Time scheduling watermark is %0.2fms",
1426 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1427
1428 if (update_sw_params(u) < 0)
1429 goto fail;
1430
1431 pa_memchunk_reset(&u->memchunk);
1432
1433 if (u->mixer_handle) {
1434 pa_assert(u->mixer_elem);
1435
1436 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1437 pa_bool_t suitable = TRUE;
1438
1439 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0) {
1440 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1441 suitable = FALSE;
1442 } else {
1443 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1444 pa_assert(u->hw_volume_min < u->hw_volume_max);
1445 }
1446
1447 if (snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1448 pa_log_info("Mixer doesn't support dB information.");
1449 else {
1450 #ifdef HAVE_VALGRIND_MEMCHECK_H
1451 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1452 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1453 #endif
1454
1455 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", u->hw_dB_min/100.0, u->hw_dB_max/100.0);
1456 pa_assert(u->hw_dB_min < u->hw_dB_max);
1457 u->hw_dB_supported = TRUE;
1458 }
1459
1460 if (suitable &&
1461 !u->hw_dB_supported &&
1462 u->hw_volume_max - u->hw_volume_min < 3) {
1463
1464 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1465 suitable = FALSE;
1466 }
1467
1468 if (suitable) {
1469 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0;
1470
1471 u->sink->get_volume = sink_get_volume_cb;
1472 u->sink->set_volume = sink_set_volume_cb;
1473 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1474 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1475
1476 } else
1477 pa_log_info("Using software volume control.");
1478 }
1479
1480 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1481 u->sink->get_mute = sink_get_mute_cb;
1482 u->sink->set_mute = sink_set_mute_cb;
1483 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1484 } else
1485 pa_log_info("Using software mute control.");
1486
1487 u->mixer_fdl = pa_alsa_fdlist_new();
1488
1489 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1490 pa_log("Failed to initialize file descriptor monitoring");
1491 goto fail;
1492 }
1493
1494 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1495 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1496 } else
1497 u->mixer_fdl = NULL;
1498
1499 pa_alsa_dump(u->pcm_handle);
1500
1501 if (!(u->thread = pa_thread_new(thread_func, u))) {
1502 pa_log("Failed to create thread.");
1503 goto fail;
1504 }
1505
1506 /* Get initial mixer settings */
1507 if (data.volume_is_set) {
1508 if (u->sink->set_volume)
1509 u->sink->set_volume(u->sink);
1510 } else {
1511 if (u->sink->get_volume)
1512 u->sink->get_volume(u->sink);
1513 }
1514
1515 if (data.muted_is_set) {
1516 if (u->sink->set_mute)
1517 u->sink->set_mute(u->sink);
1518 } else {
1519 if (u->sink->get_mute)
1520 u->sink->get_mute(u->sink);
1521 }
1522
1523 pa_sink_put(u->sink);
1524
1525 pa_modargs_free(ma);
1526
1527 return 0;
1528
1529 fail:
1530
1531 if (ma)
1532 pa_modargs_free(ma);
1533
1534 pa__done(m);
1535
1536 return -1;
1537 }
1538
1539 void pa__done(pa_module*m) {
1540 struct userdata *u;
1541
1542 pa_assert(m);
1543
1544 if (!(u = m->userdata)) {
1545 pa_alsa_redirect_errors_dec();
1546 return;
1547 }
1548
1549 if (u->sink)
1550 pa_sink_unlink(u->sink);
1551
1552 if (u->thread) {
1553 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1554 pa_thread_free(u->thread);
1555 }
1556
1557 pa_thread_mq_done(&u->thread_mq);
1558
1559 if (u->sink)
1560 pa_sink_unref(u->sink);
1561
1562 if (u->memchunk.memblock)
1563 pa_memblock_unref(u->memchunk.memblock);
1564
1565 if (u->alsa_rtpoll_item)
1566 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1567
1568 if (u->rtpoll)
1569 pa_rtpoll_free(u->rtpoll);
1570
1571 if (u->mixer_fdl)
1572 pa_alsa_fdlist_free(u->mixer_fdl);
1573
1574 if (u->mixer_handle)
1575 snd_mixer_close(u->mixer_handle);
1576
1577 if (u->pcm_handle) {
1578 snd_pcm_drop(u->pcm_handle);
1579 snd_pcm_close(u->pcm_handle);
1580 }
1581
1582 if (u->smoother)
1583 pa_smoother_free(u->smoother);
1584
1585 pa_xfree(u->device_name);
1586 pa_xfree(u);
1587
1588 snd_config_update_free_global();
1589
1590 pa_alsa_redirect_errors_dec();
1591 }