]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
core: automatically add icons for headsets/headphones/speakers for devices
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 memset(data, 0, sizeof(*data));
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_done(pa_sink_new_data *data) {
104 pa_assert(data);
105
106 pa_xfree(data->name);
107 pa_proplist_free(data->proplist);
108 }
109
110 /* Called from main context */
111 static void reset_callbacks(pa_sink *s) {
112 pa_assert(s);
113
114 s->set_state = NULL;
115 s->get_volume = NULL;
116 s->set_volume = NULL;
117 s->get_mute = NULL;
118 s->set_mute = NULL;
119 s->request_rewind = NULL;
120 s->update_requested_latency = NULL;
121 }
122
123 /* Called from main context */
124 pa_sink* pa_sink_new(
125 pa_core *core,
126 pa_sink_new_data *data,
127 pa_sink_flags_t flags) {
128
129 pa_sink *s;
130 const char *name;
131 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
132 pa_source_new_data source_data;
133 const char *dn;
134 char *pt;
135
136 pa_assert(core);
137 pa_assert(data);
138 pa_assert(data->name);
139
140 s = pa_msgobject_new(pa_sink);
141
142 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
143 pa_xfree(s);
144 return NULL;
145 }
146
147 pa_sink_new_data_set_name(data, name);
148
149 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
150 pa_xfree(s);
151 pa_namereg_unregister(core, name);
152 return NULL;
153 }
154
155 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
156 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
157
158 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
159
160 if (!data->channel_map_is_set)
161 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
162
163 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
164 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
165
166 if (!data->volume_is_set)
167 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
168
169 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
170 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
171
172 if (!data->muted_is_set)
173 data->muted = FALSE;
174
175 if (data->card)
176 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
177
178 pa_device_init_description(data->proplist);
179 pa_device_init_icon(data->proplist, TRUE);
180
181 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
182 pa_xfree(s);
183 pa_namereg_unregister(core, name);
184 return NULL;
185 }
186
187 s->parent.parent.free = sink_free;
188 s->parent.process_msg = pa_sink_process_msg;
189
190 s->core = core;
191 s->state = PA_SINK_INIT;
192 s->flags = flags;
193 s->name = pa_xstrdup(name);
194 s->proplist = pa_proplist_copy(data->proplist);
195 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
196 s->module = data->module;
197 s->card = data->card;
198
199 s->sample_spec = data->sample_spec;
200 s->channel_map = data->channel_map;
201
202 s->inputs = pa_idxset_new(NULL, NULL);
203 s->n_corked = 0;
204
205 s->reference_volume = s->virtual_volume = data->volume;
206 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
207 s->base_volume = PA_VOLUME_NORM;
208 s->n_volume_steps = PA_VOLUME_NORM+1;
209 s->muted = data->muted;
210 s->refresh_volume = s->refresh_muted = FALSE;
211
212 s->fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
213
214 reset_callbacks(s);
215 s->userdata = NULL;
216
217 s->asyncmsgq = NULL;
218 s->rtpoll = NULL;
219
220 pa_silence_memchunk_get(
221 &core->silence_cache,
222 core->mempool,
223 &s->silence,
224 &s->sample_spec,
225 0);
226
227 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
228 s->thread_info.soft_volume = s->soft_volume;
229 s->thread_info.soft_muted = s->muted;
230 s->thread_info.state = s->state;
231 s->thread_info.rewind_nbytes = 0;
232 s->thread_info.rewind_requested = FALSE;
233 s->thread_info.max_rewind = 0;
234 s->thread_info.max_request = 0;
235 s->thread_info.requested_latency_valid = FALSE;
236 s->thread_info.requested_latency = 0;
237 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
238 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
239
240 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
241
242 if (s->card)
243 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
244
245 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
246 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
247 s->index,
248 s->name,
249 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
250 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
251 pt);
252 pa_xfree(pt);
253
254 pa_source_new_data_init(&source_data);
255 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
256 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
257 source_data.name = pa_sprintf_malloc("%s.monitor", name);
258 source_data.driver = data->driver;
259 source_data.module = data->module;
260 source_data.card = data->card;
261
262 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
263 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
264 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
265
266 s->monitor_source = pa_source_new(core, &source_data,
267 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
268 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
269
270 pa_source_new_data_done(&source_data);
271
272 if (!s->monitor_source) {
273 pa_sink_unlink(s);
274 pa_sink_unref(s);
275 return NULL;
276 }
277
278 s->monitor_source->monitor_of = s;
279
280 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
281 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
282
283 return s;
284 }
285
286 /* Called from main context */
287 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
288 int ret;
289 pa_bool_t suspend_change;
290 pa_sink_state_t original_state;
291
292 pa_assert(s);
293
294 if (s->state == state)
295 return 0;
296
297 original_state = s->state;
298
299 suspend_change =
300 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
301 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
302
303 if (s->set_state)
304 if ((ret = s->set_state(s, state)) < 0)
305 return ret;
306
307 if (s->asyncmsgq)
308 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
309
310 if (s->set_state)
311 s->set_state(s, original_state);
312
313 return ret;
314 }
315
316 s->state = state;
317
318 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
319 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
320 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
321 }
322
323 if (suspend_change) {
324 pa_sink_input *i;
325 uint32_t idx;
326
327 /* We're suspending or resuming, tell everyone about it */
328
329 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
330 if (s->state == PA_SINK_SUSPENDED &&
331 (i->flags & PA_SINK_INPUT_FAIL_ON_SUSPEND))
332 pa_sink_input_kill(i);
333 else if (i->suspend)
334 i->suspend(i, state == PA_SINK_SUSPENDED);
335
336 if (s->monitor_source)
337 pa_source_sync_suspend(s->monitor_source);
338 }
339
340 return 0;
341 }
342
343 /* Called from main context */
344 void pa_sink_put(pa_sink* s) {
345 pa_sink_assert_ref(s);
346
347 pa_assert(s->state == PA_SINK_INIT);
348
349 /* The following fields must be initialized properly when calling _put() */
350 pa_assert(s->asyncmsgq);
351 pa_assert(s->rtpoll);
352 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
353
354 /* Generally, flags should be initialized via pa_sink_new(). As a
355 * special exception we allow volume related flags to be set
356 * between _new() and _put(). */
357
358 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
359 s->flags |= PA_SINK_DECIBEL_VOLUME;
360
361 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
362 s->flags |= PA_SINK_FLAT_VOLUME;
363
364 s->thread_info.soft_volume = s->soft_volume;
365 s->thread_info.soft_muted = s->muted;
366
367 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
368 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
369 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->fixed_latency != 0));
370 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
371 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
372
373 pa_assert(s->monitor_source->fixed_latency == s->fixed_latency);
374 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
375 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
376
377 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
378
379 pa_source_put(s->monitor_source);
380
381 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
382 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
383 }
384
385 /* Called from main context */
386 void pa_sink_unlink(pa_sink* s) {
387 pa_bool_t linked;
388 pa_sink_input *i, *j = NULL;
389
390 pa_assert(s);
391
392 /* Please note that pa_sink_unlink() does more than simply
393 * reversing pa_sink_put(). It also undoes the registrations
394 * already done in pa_sink_new()! */
395
396 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
397 * may be called multiple times on the same sink without bad
398 * effects. */
399
400 linked = PA_SINK_IS_LINKED(s->state);
401
402 if (linked)
403 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
404
405 if (s->state != PA_SINK_UNLINKED)
406 pa_namereg_unregister(s->core, s->name);
407 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
408
409 if (s->card)
410 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
411
412 while ((i = pa_idxset_first(s->inputs, NULL))) {
413 pa_assert(i != j);
414 pa_sink_input_kill(i);
415 j = i;
416 }
417
418 if (linked)
419 sink_set_state(s, PA_SINK_UNLINKED);
420 else
421 s->state = PA_SINK_UNLINKED;
422
423 reset_callbacks(s);
424
425 if (s->monitor_source)
426 pa_source_unlink(s->monitor_source);
427
428 if (linked) {
429 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
430 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
431 }
432 }
433
434 /* Called from main context */
435 static void sink_free(pa_object *o) {
436 pa_sink *s = PA_SINK(o);
437 pa_sink_input *i;
438
439 pa_assert(s);
440 pa_assert(pa_sink_refcnt(s) == 0);
441
442 if (PA_SINK_IS_LINKED(s->state))
443 pa_sink_unlink(s);
444
445 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
446
447 if (s->monitor_source) {
448 pa_source_unref(s->monitor_source);
449 s->monitor_source = NULL;
450 }
451
452 pa_idxset_free(s->inputs, NULL, NULL);
453
454 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
455 pa_sink_input_unref(i);
456
457 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
458
459 if (s->silence.memblock)
460 pa_memblock_unref(s->silence.memblock);
461
462 pa_xfree(s->name);
463 pa_xfree(s->driver);
464
465 if (s->proplist)
466 pa_proplist_free(s->proplist);
467
468 pa_xfree(s);
469 }
470
471 /* Called from main context */
472 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
473 pa_sink_assert_ref(s);
474
475 s->asyncmsgq = q;
476
477 if (s->monitor_source)
478 pa_source_set_asyncmsgq(s->monitor_source, q);
479 }
480
481 /* Called from main context */
482 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
483 pa_sink_assert_ref(s);
484
485 s->rtpoll = p;
486 if (s->monitor_source)
487 pa_source_set_rtpoll(s->monitor_source, p);
488 }
489
490 /* Called from main context */
491 int pa_sink_update_status(pa_sink*s) {
492 pa_sink_assert_ref(s);
493 pa_assert(PA_SINK_IS_LINKED(s->state));
494
495 if (s->state == PA_SINK_SUSPENDED)
496 return 0;
497
498 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
499 }
500
501 /* Called from main context */
502 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend) {
503 pa_sink_assert_ref(s);
504 pa_assert(PA_SINK_IS_LINKED(s->state));
505
506 if (suspend)
507 return sink_set_state(s, PA_SINK_SUSPENDED);
508 else
509 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
510 }
511
512 /* Called from main context */
513 pa_queue *pa_sink_move_all_start(pa_sink *s) {
514 pa_queue *q;
515 pa_sink_input *i, *n;
516 uint32_t idx;
517
518 pa_sink_assert_ref(s);
519 pa_assert(PA_SINK_IS_LINKED(s->state));
520
521 q = pa_queue_new();
522
523 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
524 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
525
526 pa_sink_input_ref(i);
527
528 if (pa_sink_input_start_move(i) >= 0)
529 pa_queue_push(q, i);
530 else
531 pa_sink_input_unref(i);
532 }
533
534 return q;
535 }
536
537 /* Called from main context */
538 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
539 pa_sink_input *i;
540
541 pa_sink_assert_ref(s);
542 pa_assert(PA_SINK_IS_LINKED(s->state));
543 pa_assert(q);
544
545 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
546 if (pa_sink_input_finish_move(i, s, save) < 0)
547 pa_sink_input_kill(i);
548
549 pa_sink_input_unref(i);
550 }
551
552 pa_queue_free(q, NULL, NULL);
553 }
554
555 /* Called from main context */
556 void pa_sink_move_all_fail(pa_queue *q) {
557 pa_sink_input *i;
558 pa_assert(q);
559
560 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
561 if (pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE_FAIL], i) == PA_HOOK_OK) {
562 pa_sink_input_kill(i);
563 pa_sink_input_unref(i);
564 }
565 }
566
567 pa_queue_free(q, NULL, NULL);
568 }
569
570 /* Called from IO thread context */
571 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
572 pa_sink_input *i;
573 void *state = NULL;
574 pa_sink_assert_ref(s);
575 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
576
577 /* If nobody requested this and this is actually no real rewind
578 * then we can short cut this */
579 if (!s->thread_info.rewind_requested && nbytes <= 0)
580 return;
581
582 s->thread_info.rewind_nbytes = 0;
583 s->thread_info.rewind_requested = FALSE;
584
585 if (s->thread_info.state == PA_SINK_SUSPENDED)
586 return;
587
588 if (nbytes > 0)
589 pa_log_debug("Processing rewind...");
590
591 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
592 pa_sink_input_assert_ref(i);
593 pa_sink_input_process_rewind(i, nbytes);
594 }
595
596 if (nbytes > 0)
597 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
598 pa_source_process_rewind(s->monitor_source, nbytes);
599 }
600
601 /* Called from IO thread context */
602 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
603 pa_sink_input *i;
604 unsigned n = 0;
605 void *state = NULL;
606 size_t mixlength = *length;
607
608 pa_sink_assert_ref(s);
609 pa_assert(info);
610
611 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
612 pa_sink_input_assert_ref(i);
613
614 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
615
616 if (mixlength == 0 || info->chunk.length < mixlength)
617 mixlength = info->chunk.length;
618
619 if (pa_memblock_is_silence(info->chunk.memblock)) {
620 pa_memblock_unref(info->chunk.memblock);
621 continue;
622 }
623
624 info->userdata = pa_sink_input_ref(i);
625
626 pa_assert(info->chunk.memblock);
627 pa_assert(info->chunk.length > 0);
628
629 info++;
630 n++;
631 maxinfo--;
632 }
633
634 if (mixlength > 0)
635 *length = mixlength;
636
637 return n;
638 }
639
640 /* Called from IO thread context */
641 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
642 pa_sink_input *i;
643 void *state = NULL;
644 unsigned p = 0;
645 unsigned n_unreffed = 0;
646
647 pa_sink_assert_ref(s);
648 pa_assert(result);
649 pa_assert(result->memblock);
650 pa_assert(result->length > 0);
651
652 /* We optimize for the case where the order of the inputs has not changed */
653
654 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
655 unsigned j;
656 pa_mix_info* m = NULL;
657
658 pa_sink_input_assert_ref(i);
659
660 /* Let's try to find the matching entry info the pa_mix_info array */
661 for (j = 0; j < n; j ++) {
662
663 if (info[p].userdata == i) {
664 m = info + p;
665 break;
666 }
667
668 p++;
669 if (p >= n)
670 p = 0;
671 }
672
673 /* Drop read data */
674 pa_sink_input_drop(i, result->length);
675
676 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
677
678 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
679 void *ostate = NULL;
680 pa_source_output *o;
681 pa_memchunk c;
682
683 if (m && m->chunk.memblock) {
684 c = m->chunk;
685 pa_memblock_ref(c.memblock);
686 pa_assert(result->length <= c.length);
687 c.length = result->length;
688
689 pa_memchunk_make_writable(&c, 0);
690 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
691 } else {
692 c = s->silence;
693 pa_memblock_ref(c.memblock);
694 pa_assert(result->length <= c.length);
695 c.length = result->length;
696 }
697
698 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
699 pa_source_output_assert_ref(o);
700 pa_assert(o->direct_on_input == i);
701 pa_source_post_direct(s->monitor_source, o, &c);
702 }
703
704 pa_memblock_unref(c.memblock);
705 }
706 }
707
708 if (m) {
709 if (m->chunk.memblock)
710 pa_memblock_unref(m->chunk.memblock);
711 pa_memchunk_reset(&m->chunk);
712
713 pa_sink_input_unref(m->userdata);
714 m->userdata = NULL;
715
716 n_unreffed += 1;
717 }
718 }
719
720 /* Now drop references to entries that are included in the
721 * pa_mix_info array but don't exist anymore */
722
723 if (n_unreffed < n) {
724 for (; n > 0; info++, n--) {
725 if (info->userdata)
726 pa_sink_input_unref(info->userdata);
727 if (info->chunk.memblock)
728 pa_memblock_unref(info->chunk.memblock);
729 }
730 }
731
732 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
733 pa_source_post(s->monitor_source, result);
734 }
735
736 /* Called from IO thread context */
737 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
738 pa_mix_info info[MAX_MIX_CHANNELS];
739 unsigned n;
740 size_t block_size_max;
741
742 pa_sink_assert_ref(s);
743 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
744 pa_assert(pa_frame_aligned(length, &s->sample_spec));
745 pa_assert(result);
746
747 pa_sink_ref(s);
748
749 pa_assert(!s->thread_info.rewind_requested);
750 pa_assert(s->thread_info.rewind_nbytes == 0);
751
752 if (s->thread_info.state == PA_SINK_SUSPENDED) {
753 result->memblock = pa_memblock_ref(s->silence.memblock);
754 result->index = s->silence.index;
755 result->length = PA_MIN(s->silence.length, length);
756 return;
757 }
758
759 if (length <= 0)
760 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
761
762 block_size_max = pa_mempool_block_size_max(s->core->mempool);
763 if (length > block_size_max)
764 length = pa_frame_align(block_size_max, &s->sample_spec);
765
766 pa_assert(length > 0);
767
768 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
769
770 if (n == 0) {
771
772 *result = s->silence;
773 pa_memblock_ref(result->memblock);
774
775 if (result->length > length)
776 result->length = length;
777
778 } else if (n == 1) {
779 pa_cvolume volume;
780
781 *result = info[0].chunk;
782 pa_memblock_ref(result->memblock);
783
784 if (result->length > length)
785 result->length = length;
786
787 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
788
789 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
790 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
791 pa_memblock_unref(result->memblock);
792 pa_silence_memchunk_get(&s->core->silence_cache,
793 s->core->mempool,
794 result,
795 &s->sample_spec,
796 result->length);
797 } else {
798 pa_memchunk_make_writable(result, 0);
799 pa_volume_memchunk(result, &s->sample_spec, &volume);
800 }
801 }
802 } else {
803 void *ptr;
804 result->memblock = pa_memblock_new(s->core->mempool, length);
805
806 ptr = pa_memblock_acquire(result->memblock);
807 result->length = pa_mix(info, n,
808 ptr, length,
809 &s->sample_spec,
810 &s->thread_info.soft_volume,
811 s->thread_info.soft_muted);
812 pa_memblock_release(result->memblock);
813
814 result->index = 0;
815 }
816
817 inputs_drop(s, info, n, result);
818
819 pa_sink_unref(s);
820 }
821
822 /* Called from IO thread context */
823 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
824 pa_mix_info info[MAX_MIX_CHANNELS];
825 unsigned n;
826 size_t length, block_size_max;
827
828 pa_sink_assert_ref(s);
829 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
830 pa_assert(target);
831 pa_assert(target->memblock);
832 pa_assert(target->length > 0);
833 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
834
835 pa_sink_ref(s);
836
837 pa_assert(!s->thread_info.rewind_requested);
838 pa_assert(s->thread_info.rewind_nbytes == 0);
839
840 if (s->thread_info.state == PA_SINK_SUSPENDED) {
841 pa_silence_memchunk(target, &s->sample_spec);
842 return;
843 }
844
845 length = target->length;
846 block_size_max = pa_mempool_block_size_max(s->core->mempool);
847 if (length > block_size_max)
848 length = pa_frame_align(block_size_max, &s->sample_spec);
849
850 pa_assert(length > 0);
851
852 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
853
854 if (n == 0) {
855 if (target->length > length)
856 target->length = length;
857
858 pa_silence_memchunk(target, &s->sample_spec);
859 } else if (n == 1) {
860 pa_cvolume volume;
861
862 if (target->length > length)
863 target->length = length;
864
865 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
866
867 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
868 pa_silence_memchunk(target, &s->sample_spec);
869 else {
870 pa_memchunk vchunk;
871
872 vchunk = info[0].chunk;
873 pa_memblock_ref(vchunk.memblock);
874
875 if (vchunk.length > length)
876 vchunk.length = length;
877
878 if (!pa_cvolume_is_norm(&volume)) {
879 pa_memchunk_make_writable(&vchunk, 0);
880 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
881 }
882
883 pa_memchunk_memcpy(target, &vchunk);
884 pa_memblock_unref(vchunk.memblock);
885 }
886
887 } else {
888 void *ptr;
889
890 ptr = pa_memblock_acquire(target->memblock);
891
892 target->length = pa_mix(info, n,
893 (uint8_t*) ptr + target->index, length,
894 &s->sample_spec,
895 &s->thread_info.soft_volume,
896 s->thread_info.soft_muted);
897
898 pa_memblock_release(target->memblock);
899 }
900
901 inputs_drop(s, info, n, target);
902
903 pa_sink_unref(s);
904 }
905
906 /* Called from IO thread context */
907 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
908 pa_memchunk chunk;
909 size_t l, d;
910
911 pa_sink_assert_ref(s);
912 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
913 pa_assert(target);
914 pa_assert(target->memblock);
915 pa_assert(target->length > 0);
916 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
917
918 pa_sink_ref(s);
919
920 pa_assert(!s->thread_info.rewind_requested);
921 pa_assert(s->thread_info.rewind_nbytes == 0);
922
923 l = target->length;
924 d = 0;
925 while (l > 0) {
926 chunk = *target;
927 chunk.index += d;
928 chunk.length -= d;
929
930 pa_sink_render_into(s, &chunk);
931
932 d += chunk.length;
933 l -= chunk.length;
934 }
935
936 pa_sink_unref(s);
937 }
938
939 /* Called from IO thread context */
940 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
941 pa_mix_info info[MAX_MIX_CHANNELS];
942 size_t length1st = length;
943 unsigned n;
944
945 pa_sink_assert_ref(s);
946 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
947 pa_assert(length > 0);
948 pa_assert(pa_frame_aligned(length, &s->sample_spec));
949 pa_assert(result);
950
951 pa_sink_ref(s);
952
953 pa_assert(!s->thread_info.rewind_requested);
954 pa_assert(s->thread_info.rewind_nbytes == 0);
955
956 pa_assert(length > 0);
957
958 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
959
960 if (n == 0) {
961 pa_silence_memchunk_get(&s->core->silence_cache,
962 s->core->mempool,
963 result,
964 &s->sample_spec,
965 length1st);
966 } else if (n == 1) {
967 pa_cvolume volume;
968
969 *result = info[0].chunk;
970 pa_memblock_ref(result->memblock);
971
972 if (result->length > length)
973 result->length = length;
974
975 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
976
977 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
978 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
979 pa_memblock_unref(result->memblock);
980 pa_silence_memchunk_get(&s->core->silence_cache,
981 s->core->mempool,
982 result,
983 &s->sample_spec,
984 result->length);
985 } else {
986 pa_memchunk_make_writable(result, length);
987 pa_volume_memchunk(result, &s->sample_spec, &volume);
988 }
989 }
990 } else {
991 void *ptr;
992
993 result->index = 0;
994 result->memblock = pa_memblock_new(s->core->mempool, length);
995
996 ptr = pa_memblock_acquire(result->memblock);
997
998 result->length = pa_mix(info, n,
999 (uint8_t*) ptr + result->index, length1st,
1000 &s->sample_spec,
1001 &s->thread_info.soft_volume,
1002 s->thread_info.soft_muted);
1003
1004 pa_memblock_release(result->memblock);
1005 }
1006
1007 inputs_drop(s, info, n, result);
1008
1009 if (result->length < length) {
1010 pa_memchunk chunk;
1011 size_t l, d;
1012 pa_memchunk_make_writable(result, length);
1013 result->length = length;
1014
1015 l = length - result->length;
1016 d = result->index + result->length;
1017 while (l > 0) {
1018 chunk = *result;
1019 chunk.index += d;
1020 chunk.length -= d - result->index;
1021
1022 pa_sink_render_into(s, &chunk);
1023
1024 d += chunk.length;
1025 l -= chunk.length;
1026 }
1027 result->length = length;
1028 }
1029
1030 pa_sink_unref(s);
1031 }
1032
1033 /* Called from main thread */
1034 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1035 pa_usec_t usec = 0;
1036
1037 pa_sink_assert_ref(s);
1038 pa_assert(PA_SINK_IS_LINKED(s->state));
1039
1040 /* The returned value is supposed to be in the time domain of the sound card! */
1041
1042 if (s->state == PA_SINK_SUSPENDED)
1043 return 0;
1044
1045 if (!(s->flags & PA_SINK_LATENCY))
1046 return 0;
1047
1048 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1049
1050 return usec;
1051 }
1052
1053 /* Called from IO thread */
1054 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1055 pa_usec_t usec = 0;
1056 pa_msgobject *o;
1057
1058 pa_sink_assert_ref(s);
1059 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1060
1061 /* The returned value is supposed to be in the time domain of the sound card! */
1062
1063 if (s->thread_info.state == PA_SINK_SUSPENDED)
1064 return 0;
1065
1066 if (!(s->flags & PA_SINK_LATENCY))
1067 return 0;
1068
1069 o = PA_MSGOBJECT(s);
1070
1071 /* We probably should make this a proper vtable callback instead of going through process_msg() */
1072
1073 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1074 return -1;
1075
1076 return usec;
1077 }
1078
1079 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1080 unsigned c;
1081
1082 pa_sink_input_assert_ref(i);
1083 pa_assert(new_volume->channels == i->sample_spec.channels);
1084
1085 /*
1086 * This basically calculates:
1087 *
1088 * i->relative_volume := i->virtual_volume / new_volume
1089 * i->soft_volume := i->relative_volume * i->volume_factor
1090 */
1091
1092 /* The new sink volume passed in here must already be remapped to
1093 * the sink input's channel map! */
1094
1095 i->soft_volume.channels = i->sample_spec.channels;
1096
1097 for (c = 0; c < i->sample_spec.channels; c++)
1098
1099 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1100 /* We leave i->relative_volume untouched */
1101 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1102 else {
1103 i->relative_volume[c] =
1104 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1105 pa_sw_volume_to_linear(new_volume->values[c]);
1106
1107 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1108 i->relative_volume[c] *
1109 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1110 }
1111
1112 /* Hooks have the ability to play games with i->soft_volume */
1113 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1114
1115 /* We don't copy the soft_volume to the thread_info data
1116 * here. That must be done by the caller */
1117 }
1118
1119 /* Called from main thread */
1120 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1121 pa_sink_input *i;
1122 uint32_t idx;
1123
1124 pa_sink_assert_ref(s);
1125 pa_assert(new_volume);
1126 pa_assert(PA_SINK_IS_LINKED(s->state));
1127 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1128
1129 /* This is called whenever a sink input volume changes or a sink
1130 * input is added/removed and we might need to fix up the sink
1131 * volume accordingly. Please note that we don't actually update
1132 * the sinks volume here, we only return how it needs to be
1133 * updated. The caller should then call pa_sink_set_volume().*/
1134
1135 if (pa_idxset_isempty(s->inputs)) {
1136 /* In the special case that we have no sink input we leave the
1137 * volume unmodified. */
1138 *new_volume = s->reference_volume;
1139 return;
1140 }
1141
1142 pa_cvolume_mute(new_volume, s->channel_map.channels);
1143
1144 /* First let's determine the new maximum volume of all inputs
1145 * connected to this sink */
1146 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1147 unsigned c;
1148 pa_cvolume remapped_volume;
1149
1150 remapped_volume = i->virtual_volume;
1151 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1152
1153 for (c = 0; c < new_volume->channels; c++)
1154 if (remapped_volume.values[c] > new_volume->values[c])
1155 new_volume->values[c] = remapped_volume.values[c];
1156 }
1157
1158 /* Then, let's update the soft volumes of all inputs connected
1159 * to this sink */
1160 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1161 pa_cvolume remapped_new_volume;
1162
1163 remapped_new_volume = *new_volume;
1164 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1165 compute_new_soft_volume(i, &remapped_new_volume);
1166
1167 /* We don't copy soft_volume to the thread_info data here
1168 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1169 * want the update to be atomically with the sink volume
1170 * update, hence we do it within the pa_sink_set_volume() call
1171 * below */
1172 }
1173 }
1174
1175 /* Called from main thread */
1176 void pa_sink_propagate_flat_volume(pa_sink *s) {
1177 pa_sink_input *i;
1178 uint32_t idx;
1179
1180 pa_sink_assert_ref(s);
1181 pa_assert(PA_SINK_IS_LINKED(s->state));
1182 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1183
1184 /* This is called whenever the sink volume changes that is not
1185 * caused by a sink input volume change. We need to fix up the
1186 * sink input volumes accordingly */
1187
1188 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1189 pa_cvolume sink_volume, new_virtual_volume;
1190 unsigned c;
1191
1192 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1193
1194 sink_volume = s->virtual_volume;
1195 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1196
1197 for (c = 0; c < i->sample_spec.channels; c++)
1198 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1199 i->relative_volume[c] *
1200 pa_sw_volume_to_linear(sink_volume.values[c]));
1201
1202 new_virtual_volume.channels = i->sample_spec.channels;
1203
1204 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1205 i->virtual_volume = new_virtual_volume;
1206
1207 /* Hmm, the soft volume might no longer actually match
1208 * what has been chosen as new virtual volume here,
1209 * especially when the old volume was
1210 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1211 * volumes here. */
1212 compute_new_soft_volume(i, &sink_volume);
1213
1214 /* The virtual volume changed, let's tell people so */
1215 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1216 }
1217 }
1218
1219 /* If the soft_volume of any of the sink inputs got changed, let's
1220 * make sure the thread copies are synced up. */
1221 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1222 }
1223
1224 /* Called from main thread */
1225 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference) {
1226 pa_bool_t virtual_volume_changed;
1227
1228 pa_sink_assert_ref(s);
1229 pa_assert(PA_SINK_IS_LINKED(s->state));
1230 pa_assert(volume);
1231 pa_assert(pa_cvolume_valid(volume));
1232 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1233
1234 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1235 s->virtual_volume = *volume;
1236
1237 if (become_reference)
1238 s->reference_volume = s->virtual_volume;
1239
1240 /* Propagate this volume change back to the inputs */
1241 if (virtual_volume_changed)
1242 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1243 pa_sink_propagate_flat_volume(s);
1244
1245 if (s->set_volume) {
1246 /* If we have a function set_volume(), then we do not apply a
1247 * soft volume by default. However, set_volume() is free to
1248 * apply one to s->soft_volume */
1249
1250 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1251 s->set_volume(s);
1252
1253 } else
1254 /* If we have no function set_volume(), then the soft volume
1255 * becomes the virtual volume */
1256 s->soft_volume = s->virtual_volume;
1257
1258 /* This tells the sink that soft and/or virtual volume changed */
1259 if (sendmsg)
1260 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1261
1262 if (virtual_volume_changed)
1263 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1264 }
1265
1266 /* Called from main thread. Only to be called by sink implementor */
1267 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1268 pa_sink_assert_ref(s);
1269 pa_assert(volume);
1270
1271 s->soft_volume = *volume;
1272
1273 if (PA_SINK_IS_LINKED(s->state))
1274 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1275 else
1276 s->thread_info.soft_volume = *volume;
1277 }
1278
1279 /* Called from main thread */
1280 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1281 pa_sink_assert_ref(s);
1282
1283 if (s->refresh_volume || force_refresh) {
1284 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1285
1286 if (s->get_volume)
1287 s->get_volume(s);
1288
1289 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1290
1291 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1292
1293 s->reference_volume = s->virtual_volume;
1294
1295 if (s->flags & PA_SINK_FLAT_VOLUME)
1296 pa_sink_propagate_flat_volume(s);
1297
1298 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1299 }
1300 }
1301
1302 return reference ? &s->reference_volume : &s->virtual_volume;
1303 }
1304
1305 /* Called from main thread */
1306 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1307 pa_sink_assert_ref(s);
1308
1309 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1310
1311 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1312 return;
1313
1314 s->reference_volume = s->virtual_volume = *new_volume;
1315
1316 if (s->flags & PA_SINK_FLAT_VOLUME)
1317 pa_sink_propagate_flat_volume(s);
1318
1319 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1320 }
1321
1322 /* Called from main thread */
1323 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute) {
1324 pa_bool_t old_muted;
1325
1326 pa_sink_assert_ref(s);
1327 pa_assert(PA_SINK_IS_LINKED(s->state));
1328
1329 old_muted = s->muted;
1330 s->muted = mute;
1331
1332 if (s->set_mute)
1333 s->set_mute(s);
1334
1335 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1336
1337 if (old_muted != s->muted)
1338 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1339 }
1340
1341 /* Called from main thread */
1342 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1343
1344 pa_sink_assert_ref(s);
1345
1346 if (s->refresh_muted || force_refresh) {
1347 pa_bool_t old_muted = s->muted;
1348
1349 if (s->get_mute)
1350 s->get_mute(s);
1351
1352 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1353
1354 if (old_muted != s->muted)
1355 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1356 }
1357
1358 return s->muted;
1359 }
1360
1361 /* Called from main thread */
1362 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1363 pa_sink_assert_ref(s);
1364
1365 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1366
1367 if (s->muted == new_muted)
1368 return;
1369
1370 s->muted = new_muted;
1371 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1372 }
1373
1374 /* Called from main thread */
1375 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1376 pa_sink_assert_ref(s);
1377
1378 if (p)
1379 pa_proplist_update(s->proplist, mode, p);
1380
1381 if (PA_SINK_IS_LINKED(s->state)) {
1382 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1383 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1384 }
1385
1386 return TRUE;
1387 }
1388
1389 /* Called from main thread */
1390 void pa_sink_set_description(pa_sink *s, const char *description) {
1391 const char *old;
1392 pa_sink_assert_ref(s);
1393
1394 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1395 return;
1396
1397 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1398
1399 if (old && description && !strcmp(old, description))
1400 return;
1401
1402 if (description)
1403 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1404 else
1405 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1406
1407 if (s->monitor_source) {
1408 char *n;
1409
1410 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1411 pa_source_set_description(s->monitor_source, n);
1412 pa_xfree(n);
1413 }
1414
1415 if (PA_SINK_IS_LINKED(s->state)) {
1416 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1417 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1418 }
1419 }
1420
1421 /* Called from main thread */
1422 unsigned pa_sink_linked_by(pa_sink *s) {
1423 unsigned ret;
1424
1425 pa_sink_assert_ref(s);
1426 pa_assert(PA_SINK_IS_LINKED(s->state));
1427
1428 ret = pa_idxset_size(s->inputs);
1429
1430 /* We add in the number of streams connected to us here. Please
1431 * note the asymmmetry to pa_sink_used_by()! */
1432
1433 if (s->monitor_source)
1434 ret += pa_source_linked_by(s->monitor_source);
1435
1436 return ret;
1437 }
1438
1439 /* Called from main thread */
1440 unsigned pa_sink_used_by(pa_sink *s) {
1441 unsigned ret;
1442
1443 pa_sink_assert_ref(s);
1444 pa_assert(PA_SINK_IS_LINKED(s->state));
1445
1446 ret = pa_idxset_size(s->inputs);
1447 pa_assert(ret >= s->n_corked);
1448
1449 /* Streams connected to our monitor source do not matter for
1450 * pa_sink_used_by()!.*/
1451
1452 return ret - s->n_corked;
1453 }
1454
1455 /* Called from main thread */
1456 unsigned pa_sink_check_suspend(pa_sink *s) {
1457 unsigned ret;
1458 pa_sink_input *i;
1459 uint32_t idx;
1460
1461 pa_sink_assert_ref(s);
1462
1463 if (!PA_SINK_IS_LINKED(s->state))
1464 return 0;
1465
1466 ret = 0;
1467
1468 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1469 pa_sink_input_state_t st;
1470
1471 st = pa_sink_input_get_state(i);
1472 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1473
1474 if (st == PA_SINK_INPUT_CORKED)
1475 continue;
1476
1477 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1478 continue;
1479
1480 ret ++;
1481 }
1482
1483 if (s->monitor_source)
1484 ret += pa_source_check_suspend(s->monitor_source);
1485
1486 return ret;
1487 }
1488
1489 /* Called from the IO thread */
1490 static void sync_input_volumes_within_thread(pa_sink *s) {
1491 pa_sink_input *i;
1492 void *state = NULL;
1493
1494 pa_sink_assert_ref(s);
1495
1496 while ((i = PA_SINK_INPUT(pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))) {
1497 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1498 continue;
1499
1500 i->thread_info.soft_volume = i->soft_volume;
1501 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1502 }
1503 }
1504
1505 /* Called from IO thread, except when it is not */
1506 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1507 pa_sink *s = PA_SINK(o);
1508 pa_sink_assert_ref(s);
1509
1510 switch ((pa_sink_message_t) code) {
1511
1512 case PA_SINK_MESSAGE_ADD_INPUT: {
1513 pa_sink_input *i = PA_SINK_INPUT(userdata);
1514
1515 /* If you change anything here, make sure to change the
1516 * sink input handling a few lines down at
1517 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1518
1519 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1520
1521 /* Since the caller sleeps in pa_sink_input_put(), we can
1522 * safely access data outside of thread_info even though
1523 * it is mutable */
1524
1525 if ((i->thread_info.sync_prev = i->sync_prev)) {
1526 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1527 pa_assert(i->sync_prev->sync_next == i);
1528 i->thread_info.sync_prev->thread_info.sync_next = i;
1529 }
1530
1531 if ((i->thread_info.sync_next = i->sync_next)) {
1532 pa_assert(i->sink == i->thread_info.sync_next->sink);
1533 pa_assert(i->sync_next->sync_prev == i);
1534 i->thread_info.sync_next->thread_info.sync_prev = i;
1535 }
1536
1537 pa_assert(!i->thread_info.attached);
1538 i->thread_info.attached = TRUE;
1539
1540 if (i->attach)
1541 i->attach(i);
1542
1543 pa_sink_input_set_state_within_thread(i, i->state);
1544
1545 /* The requested latency of the sink input needs to be
1546 * fixed up and then configured on the sink */
1547
1548 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1549 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1550
1551 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1552 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1553
1554 /* We don't rewind here automatically. This is left to the
1555 * sink input implementor because some sink inputs need a
1556 * slow start, i.e. need some time to buffer client
1557 * samples before beginning streaming. */
1558
1559 /* In flat volume mode we need to update the volume as
1560 * well */
1561 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1562 }
1563
1564 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1565 pa_sink_input *i = PA_SINK_INPUT(userdata);
1566
1567 /* If you change anything here, make sure to change the
1568 * sink input handling a few lines down at
1569 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1570
1571 if (i->detach)
1572 i->detach(i);
1573
1574 pa_sink_input_set_state_within_thread(i, i->state);
1575
1576 pa_assert(i->thread_info.attached);
1577 i->thread_info.attached = FALSE;
1578
1579 /* Since the caller sleeps in pa_sink_input_unlink(),
1580 * we can safely access data outside of thread_info even
1581 * though it is mutable */
1582
1583 pa_assert(!i->sync_prev);
1584 pa_assert(!i->sync_next);
1585
1586 if (i->thread_info.sync_prev) {
1587 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1588 i->thread_info.sync_prev = NULL;
1589 }
1590
1591 if (i->thread_info.sync_next) {
1592 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1593 i->thread_info.sync_next = NULL;
1594 }
1595
1596 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1597 pa_sink_input_unref(i);
1598
1599 pa_sink_invalidate_requested_latency(s);
1600 pa_sink_request_rewind(s, (size_t) -1);
1601
1602 /* In flat volume mode we need to update the volume as
1603 * well */
1604 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1605 }
1606
1607 case PA_SINK_MESSAGE_START_MOVE: {
1608 pa_sink_input *i = PA_SINK_INPUT(userdata);
1609
1610 /* We don't support moving synchronized streams. */
1611 pa_assert(!i->sync_prev);
1612 pa_assert(!i->sync_next);
1613 pa_assert(!i->thread_info.sync_next);
1614 pa_assert(!i->thread_info.sync_prev);
1615
1616 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1617 pa_usec_t usec = 0;
1618 size_t sink_nbytes, total_nbytes;
1619
1620 /* Get the latency of the sink */
1621 if (!(s->flags & PA_SINK_LATENCY) ||
1622 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1623 usec = 0;
1624
1625 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1626 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1627
1628 if (total_nbytes > 0) {
1629 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1630 i->thread_info.rewrite_flush = TRUE;
1631 pa_sink_input_process_rewind(i, sink_nbytes);
1632 }
1633 }
1634
1635 if (i->detach)
1636 i->detach(i);
1637
1638 pa_assert(i->thread_info.attached);
1639 i->thread_info.attached = FALSE;
1640
1641 /* Let's remove the sink input ...*/
1642 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1643 pa_sink_input_unref(i);
1644
1645 pa_sink_invalidate_requested_latency(s);
1646
1647 pa_log_debug("Requesting rewind due to started move");
1648 pa_sink_request_rewind(s, (size_t) -1);
1649
1650 /* In flat volume mode we need to update the volume as
1651 * well */
1652 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1653 }
1654
1655 case PA_SINK_MESSAGE_FINISH_MOVE: {
1656 pa_sink_input *i = PA_SINK_INPUT(userdata);
1657
1658 /* We don't support moving synchronized streams. */
1659 pa_assert(!i->sync_prev);
1660 pa_assert(!i->sync_next);
1661 pa_assert(!i->thread_info.sync_next);
1662 pa_assert(!i->thread_info.sync_prev);
1663
1664 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1665
1666 pa_assert(!i->thread_info.attached);
1667 i->thread_info.attached = TRUE;
1668
1669 if (i->attach)
1670 i->attach(i);
1671
1672 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1673 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1674
1675 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1676 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1677
1678 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1679 pa_usec_t usec = 0;
1680 size_t nbytes;
1681
1682 /* Get the latency of the sink */
1683 if (!(s->flags & PA_SINK_LATENCY) ||
1684 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1685 usec = 0;
1686
1687 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1688
1689 if (nbytes > 0)
1690 pa_sink_input_drop(i, nbytes);
1691
1692 pa_log_debug("Requesting rewind due to finished move");
1693 pa_sink_request_rewind(s, nbytes);
1694 }
1695
1696 /* In flat volume mode we need to update the volume as
1697 * well */
1698 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1699 }
1700
1701 case PA_SINK_MESSAGE_SET_VOLUME:
1702
1703 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1704 s->thread_info.soft_volume = s->soft_volume;
1705 pa_sink_request_rewind(s, (size_t) -1);
1706 }
1707
1708 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1709 return 0;
1710
1711 /* Fall through ... */
1712
1713 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1714 sync_input_volumes_within_thread(s);
1715 return 0;
1716
1717 case PA_SINK_MESSAGE_GET_VOLUME:
1718 return 0;
1719
1720 case PA_SINK_MESSAGE_SET_MUTE:
1721
1722 if (s->thread_info.soft_muted != s->muted) {
1723 s->thread_info.soft_muted = s->muted;
1724 pa_sink_request_rewind(s, (size_t) -1);
1725 }
1726
1727 return 0;
1728
1729 case PA_SINK_MESSAGE_GET_MUTE:
1730 return 0;
1731
1732 case PA_SINK_MESSAGE_SET_STATE: {
1733
1734 pa_bool_t suspend_change =
1735 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1736 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1737
1738 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1739
1740 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1741 s->thread_info.rewind_nbytes = 0;
1742 s->thread_info.rewind_requested = FALSE;
1743 }
1744
1745 if (suspend_change) {
1746 pa_sink_input *i;
1747 void *state = NULL;
1748
1749 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1750 if (i->suspend_within_thread)
1751 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1752 }
1753
1754 return 0;
1755 }
1756
1757 case PA_SINK_MESSAGE_DETACH:
1758
1759 /* Detach all streams */
1760 pa_sink_detach_within_thread(s);
1761 return 0;
1762
1763 case PA_SINK_MESSAGE_ATTACH:
1764
1765 /* Reattach all streams */
1766 pa_sink_attach_within_thread(s);
1767 return 0;
1768
1769 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1770
1771 pa_usec_t *usec = userdata;
1772 *usec = pa_sink_get_requested_latency_within_thread(s);
1773
1774 if (*usec == (pa_usec_t) -1)
1775 *usec = s->thread_info.max_latency;
1776
1777 return 0;
1778 }
1779
1780 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1781 pa_usec_t *r = userdata;
1782
1783 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1784
1785 return 0;
1786 }
1787
1788 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1789 pa_usec_t *r = userdata;
1790
1791 r[0] = s->thread_info.min_latency;
1792 r[1] = s->thread_info.max_latency;
1793
1794 return 0;
1795 }
1796
1797 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1798
1799 *((size_t*) userdata) = s->thread_info.max_rewind;
1800 return 0;
1801
1802 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1803
1804 *((size_t*) userdata) = s->thread_info.max_request;
1805 return 0;
1806
1807 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1808
1809 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1810 return 0;
1811
1812 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1813
1814 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1815 return 0;
1816
1817 case PA_SINK_MESSAGE_GET_LATENCY:
1818 case PA_SINK_MESSAGE_MAX:
1819 ;
1820 }
1821
1822 return -1;
1823 }
1824
1825 /* Called from main thread */
1826 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend) {
1827 pa_sink *sink;
1828 uint32_t idx;
1829 int ret = 0;
1830
1831 pa_core_assert_ref(c);
1832
1833 for (sink = PA_SINK(pa_idxset_first(c->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(c->sinks, &idx))) {
1834 int r;
1835
1836 if ((r = pa_sink_suspend(sink, suspend)) < 0)
1837 ret = r;
1838 }
1839
1840 return ret;
1841 }
1842
1843 /* Called from main thread */
1844 void pa_sink_detach(pa_sink *s) {
1845 pa_sink_assert_ref(s);
1846 pa_assert(PA_SINK_IS_LINKED(s->state));
1847
1848 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1849 }
1850
1851 /* Called from main thread */
1852 void pa_sink_attach(pa_sink *s) {
1853 pa_sink_assert_ref(s);
1854 pa_assert(PA_SINK_IS_LINKED(s->state));
1855
1856 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1857 }
1858
1859 /* Called from IO thread */
1860 void pa_sink_detach_within_thread(pa_sink *s) {
1861 pa_sink_input *i;
1862 void *state = NULL;
1863
1864 pa_sink_assert_ref(s);
1865 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1866
1867 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1868 if (i->detach)
1869 i->detach(i);
1870
1871 if (s->monitor_source)
1872 pa_source_detach_within_thread(s->monitor_source);
1873 }
1874
1875 /* Called from IO thread */
1876 void pa_sink_attach_within_thread(pa_sink *s) {
1877 pa_sink_input *i;
1878 void *state = NULL;
1879
1880 pa_sink_assert_ref(s);
1881 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1882
1883 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1884 if (i->attach)
1885 i->attach(i);
1886
1887 if (s->monitor_source)
1888 pa_source_attach_within_thread(s->monitor_source);
1889 }
1890
1891 /* Called from IO thread */
1892 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
1893 pa_sink_assert_ref(s);
1894 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1895
1896 if (s->thread_info.state == PA_SINK_SUSPENDED)
1897 return;
1898
1899 if (nbytes == (size_t) -1)
1900 nbytes = s->thread_info.max_rewind;
1901
1902 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
1903
1904 if (s->thread_info.rewind_requested &&
1905 nbytes <= s->thread_info.rewind_nbytes)
1906 return;
1907
1908 s->thread_info.rewind_nbytes = nbytes;
1909 s->thread_info.rewind_requested = TRUE;
1910
1911 if (s->request_rewind)
1912 s->request_rewind(s);
1913 }
1914
1915 /* Called from IO thread */
1916 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
1917 pa_usec_t result = (pa_usec_t) -1;
1918 pa_sink_input *i;
1919 void *state = NULL;
1920 pa_usec_t monitor_latency;
1921
1922 pa_sink_assert_ref(s);
1923
1924 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
1925 return PA_CLAMP(s->fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
1926
1927 if (s->thread_info.requested_latency_valid)
1928 return s->thread_info.requested_latency;
1929
1930 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1931
1932 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
1933 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
1934 result = i->thread_info.requested_sink_latency;
1935
1936 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
1937
1938 if (monitor_latency != (pa_usec_t) -1 &&
1939 (result == (pa_usec_t) -1 || result > monitor_latency))
1940 result = monitor_latency;
1941
1942 if (result != (pa_usec_t) -1)
1943 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
1944
1945 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1946 /* Only cache if properly initialized */
1947 s->thread_info.requested_latency = result;
1948 s->thread_info.requested_latency_valid = TRUE;
1949 }
1950
1951 return result;
1952 }
1953
1954 /* Called from main thread */
1955 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
1956 pa_usec_t usec = 0;
1957
1958 pa_sink_assert_ref(s);
1959 pa_assert(PA_SINK_IS_LINKED(s->state));
1960
1961 if (s->state == PA_SINK_SUSPENDED)
1962 return 0;
1963
1964 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
1965 return usec;
1966 }
1967
1968 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
1969 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
1970 pa_sink_input *i;
1971 void *state = NULL;
1972
1973 pa_sink_assert_ref(s);
1974
1975 if (max_rewind == s->thread_info.max_rewind)
1976 return;
1977
1978 s->thread_info.max_rewind = max_rewind;
1979
1980 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1981 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1982 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1983 }
1984
1985 if (s->monitor_source)
1986 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
1987 }
1988
1989 /* Called from main thread */
1990 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
1991 pa_sink_assert_ref(s);
1992
1993 if (PA_SINK_IS_LINKED(s->state))
1994 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
1995 else
1996 pa_sink_set_max_rewind_within_thread(s, max_rewind);
1997 }
1998
1999 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2000 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2001 void *state = NULL;
2002
2003 pa_sink_assert_ref(s);
2004
2005 if (max_request == s->thread_info.max_request)
2006 return;
2007
2008 s->thread_info.max_request = max_request;
2009
2010 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2011 pa_sink_input *i;
2012
2013 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2014 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2015 }
2016 }
2017
2018 /* Called from main thread */
2019 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2020 pa_sink_assert_ref(s);
2021
2022 if (PA_SINK_IS_LINKED(s->state))
2023 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2024 else
2025 pa_sink_set_max_request_within_thread(s, max_request);
2026 }
2027
2028 /* Called from IO thread */
2029 void pa_sink_invalidate_requested_latency(pa_sink *s) {
2030 pa_sink_input *i;
2031 void *state = NULL;
2032
2033 pa_sink_assert_ref(s);
2034
2035 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2036 return;
2037
2038 s->thread_info.requested_latency_valid = FALSE;
2039
2040 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2041
2042 if (s->update_requested_latency)
2043 s->update_requested_latency(s);
2044
2045 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2046 if (i->update_sink_requested_latency)
2047 i->update_sink_requested_latency(i);
2048 }
2049 }
2050
2051 /* Called from main thread */
2052 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2053 pa_sink_assert_ref(s);
2054
2055 /* min_latency == 0: no limit
2056 * min_latency anything else: specified limit
2057 *
2058 * Similar for max_latency */
2059
2060 if (min_latency < ABSOLUTE_MIN_LATENCY)
2061 min_latency = ABSOLUTE_MIN_LATENCY;
2062
2063 if (max_latency <= 0 ||
2064 max_latency > ABSOLUTE_MAX_LATENCY)
2065 max_latency = ABSOLUTE_MAX_LATENCY;
2066
2067 pa_assert(min_latency <= max_latency);
2068
2069 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2070 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2071 max_latency == ABSOLUTE_MAX_LATENCY) ||
2072 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2073
2074 if (PA_SINK_IS_LINKED(s->state)) {
2075 pa_usec_t r[2];
2076
2077 r[0] = min_latency;
2078 r[1] = max_latency;
2079
2080 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2081 } else
2082 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2083 }
2084
2085 /* Called from main thread */
2086 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2087 pa_sink_assert_ref(s);
2088 pa_assert(min_latency);
2089 pa_assert(max_latency);
2090
2091 if (PA_SINK_IS_LINKED(s->state)) {
2092 pa_usec_t r[2] = { 0, 0 };
2093
2094 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2095
2096 *min_latency = r[0];
2097 *max_latency = r[1];
2098 } else {
2099 *min_latency = s->thread_info.min_latency;
2100 *max_latency = s->thread_info.max_latency;
2101 }
2102 }
2103
2104 /* Called from IO thread */
2105 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2106 void *state = NULL;
2107
2108 pa_sink_assert_ref(s);
2109
2110 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2111 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2112 pa_assert(min_latency <= max_latency);
2113
2114 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2115 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2116 max_latency == ABSOLUTE_MAX_LATENCY) ||
2117 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2118
2119 s->thread_info.min_latency = min_latency;
2120 s->thread_info.max_latency = max_latency;
2121
2122 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2123 pa_sink_input *i;
2124
2125 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2126 if (i->update_sink_latency_range)
2127 i->update_sink_latency_range(i);
2128 }
2129
2130 pa_sink_invalidate_requested_latency(s);
2131
2132 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2133 }
2134
2135 /* Called from main thread, before the sink is put */
2136 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2137 pa_sink_assert_ref(s);
2138
2139 pa_assert(pa_sink_get_state(s) == PA_SINK_INIT);
2140
2141 if (latency < ABSOLUTE_MIN_LATENCY)
2142 latency = ABSOLUTE_MIN_LATENCY;
2143
2144 if (latency > ABSOLUTE_MAX_LATENCY)
2145 latency = ABSOLUTE_MAX_LATENCY;
2146
2147 s->fixed_latency = latency;
2148 pa_source_set_fixed_latency(s->monitor_source, latency);
2149 }
2150
2151 /* Called from main context */
2152 size_t pa_sink_get_max_rewind(pa_sink *s) {
2153 size_t r;
2154 pa_sink_assert_ref(s);
2155
2156 if (!PA_SINK_IS_LINKED(s->state))
2157 return s->thread_info.max_rewind;
2158
2159 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2160
2161 return r;
2162 }
2163
2164 /* Called from main context */
2165 size_t pa_sink_get_max_request(pa_sink *s) {
2166 size_t r;
2167 pa_sink_assert_ref(s);
2168
2169 if (!PA_SINK_IS_LINKED(s->state))
2170 return s->thread_info.max_request;
2171
2172 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2173
2174 return r;
2175 }
2176
2177 /* Called from main context */
2178 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2179 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2180
2181 pa_assert(p);
2182
2183 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2184 return TRUE;
2185
2186 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2187
2188 if (pa_streq(ff, "microphone"))
2189 t = "audio-input-microphone";
2190 else if (pa_streq(ff, "webcam"))
2191 t = "camera-web";
2192 else if (pa_streq(ff, "computer"))
2193 t = "computer";
2194 else if (pa_streq(ff, "handset"))
2195 t = "phone";
2196 else if (pa_streq(ff, "portable"))
2197 t = "multimedia-player";
2198 else if (pa_streq(ff, "tv"))
2199 t = "video-display";
2200
2201 /*
2202 * The following icons are not part of the icon naming spec,
2203 * because Rodney Dawes sucks as the maintainer of that spec.
2204 *
2205 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2206 */
2207 else if (pa_streq(ff, "headset"))
2208 t = "audio-headset";
2209 else if (pa_streq(ff, "headphone"))
2210 t = "audio-headphones";
2211 else if (pa_streq(ff, "speaker"))
2212 t = "audio-speakers";
2213 else if (pa_streq(ff, "hands-free"))
2214 t = "audio-handsfree";
2215 }
2216
2217 if (!t)
2218 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2219 if (pa_streq(c, "modem"))
2220 t = "modem";
2221
2222 if (!t) {
2223 if (is_sink)
2224 t = "audio-card";
2225 else
2226 t = "audio-input-microphone";
2227 }
2228
2229 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2230 if (strstr(profile, "analog"))
2231 s = "-analog";
2232 else if (strstr(profile, "iec958"))
2233 s = "-iec958";
2234 else if (strstr(profile, "hdmi"))
2235 s = "-hdmi";
2236 }
2237
2238 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2239
2240 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2241
2242 return TRUE;
2243 }
2244
2245 pa_bool_t pa_device_init_description(pa_proplist *p) {
2246 const char *s;
2247 pa_assert(p);
2248
2249 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2250 return TRUE;
2251
2252 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2253 if (pa_streq(s, "internal")) {
2254 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Internal Audio"));
2255 return TRUE;
2256 }
2257
2258 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2259 if (pa_streq(s, "modem")) {
2260 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Modem"));
2261 return TRUE;
2262 }
2263
2264 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME))) {
2265 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, s);
2266 return TRUE;
2267 }
2268
2269 return FALSE;
2270 }