]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
alsa: Reinitialise the mixer on port change.
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37 #include <pulse/rtclock.h>
38 #include <pulse/internal.h>
39
40 #include <pulsecore/sink-input.h>
41 #include <pulsecore/namereg.h>
42 #include <pulsecore/core-util.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/core-subscribe.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/play-memblockq.h>
48 #include <pulsecore/flist.h>
49
50 #include "sink.h"
51
52 #define MAX_MIX_CHANNELS 32
53 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
54 #define ABSOLUTE_MIN_LATENCY (500)
55 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
56 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
57
58 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
59
60 struct pa_sink_volume_change {
61 pa_usec_t at;
62 pa_cvolume hw_volume;
63
64 PA_LLIST_FIELDS(pa_sink_volume_change);
65 };
66
67 struct sink_message_set_port {
68 pa_device_port *port;
69 int ret;
70 };
71
72 static void sink_free(pa_object *s);
73
74 static void pa_sink_volume_change_push(pa_sink *s);
75 static void pa_sink_volume_change_flush(pa_sink *s);
76 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
77
78 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
79 pa_assert(data);
80
81 pa_zero(*data);
82 data->proplist = pa_proplist_new();
83
84 return data;
85 }
86
87 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
88 pa_assert(data);
89
90 pa_xfree(data->name);
91 data->name = pa_xstrdup(name);
92 }
93
94 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
95 pa_assert(data);
96
97 if ((data->sample_spec_is_set = !!spec))
98 data->sample_spec = *spec;
99 }
100
101 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
102 pa_assert(data);
103
104 if ((data->channel_map_is_set = !!map))
105 data->channel_map = *map;
106 }
107
108 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
109 pa_assert(data);
110
111 if ((data->volume_is_set = !!volume))
112 data->volume = *volume;
113 }
114
115 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
116 pa_assert(data);
117
118 data->muted_is_set = TRUE;
119 data->muted = !!mute;
120 }
121
122 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
123 pa_assert(data);
124
125 pa_xfree(data->active_port);
126 data->active_port = pa_xstrdup(port);
127 }
128
129 void pa_sink_new_data_done(pa_sink_new_data *data) {
130 pa_assert(data);
131
132 pa_proplist_free(data->proplist);
133
134 if (data->ports) {
135 pa_device_port *p;
136
137 while ((p = pa_hashmap_steal_first(data->ports)))
138 pa_device_port_free(p);
139
140 pa_hashmap_free(data->ports, NULL, NULL);
141 }
142
143 pa_xfree(data->name);
144 pa_xfree(data->active_port);
145 }
146
147 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
148 pa_device_port *p;
149
150 pa_assert(name);
151
152 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
153 p->name = pa_xstrdup(name);
154 p->description = pa_xstrdup(description);
155
156 p->priority = 0;
157
158 return p;
159 }
160
161 void pa_device_port_free(pa_device_port *p) {
162 pa_assert(p);
163
164 pa_xfree(p->name);
165 pa_xfree(p->description);
166 pa_xfree(p);
167 }
168
169 /* Called from main context */
170 static void reset_callbacks(pa_sink *s) {
171 pa_assert(s);
172
173 s->set_state = NULL;
174 s->get_volume = NULL;
175 s->set_volume = NULL;
176 s->get_mute = NULL;
177 s->set_mute = NULL;
178 s->request_rewind = NULL;
179 s->update_requested_latency = NULL;
180 s->set_port = NULL;
181 s->get_formats = NULL;
182 }
183
184 /* Called from main context */
185 pa_sink* pa_sink_new(
186 pa_core *core,
187 pa_sink_new_data *data,
188 pa_sink_flags_t flags) {
189
190 pa_sink *s;
191 const char *name;
192 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
193 pa_source_new_data source_data;
194 const char *dn;
195 char *pt;
196
197 pa_assert(core);
198 pa_assert(data);
199 pa_assert(data->name);
200 pa_assert_ctl_context();
201
202 s = pa_msgobject_new(pa_sink);
203
204 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
205 pa_log_debug("Failed to register name %s.", data->name);
206 pa_xfree(s);
207 return NULL;
208 }
209
210 pa_sink_new_data_set_name(data, name);
211
212 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
213 pa_xfree(s);
214 pa_namereg_unregister(core, name);
215 return NULL;
216 }
217
218 /* FIXME, need to free s here on failure */
219
220 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
221 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
222
223 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
224
225 if (!data->channel_map_is_set)
226 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
227
228 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
229 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
230
231 /* FIXME: There should probably be a general function for checking whether
232 * the sink volume is allowed to be set, like there is for sink inputs. */
233 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
234
235 if (!data->volume_is_set) {
236 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
237 data->save_volume = FALSE;
238 }
239
240 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
241 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
242
243 if (!data->muted_is_set)
244 data->muted = FALSE;
245
246 if (data->card)
247 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
248
249 pa_device_init_description(data->proplist);
250 pa_device_init_icon(data->proplist, TRUE);
251 pa_device_init_intended_roles(data->proplist);
252
253 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
254 pa_xfree(s);
255 pa_namereg_unregister(core, name);
256 return NULL;
257 }
258
259 s->parent.parent.free = sink_free;
260 s->parent.process_msg = pa_sink_process_msg;
261
262 s->core = core;
263 s->state = PA_SINK_INIT;
264 s->flags = flags;
265 s->priority = 0;
266 s->suspend_cause = 0;
267 s->name = pa_xstrdup(name);
268 s->proplist = pa_proplist_copy(data->proplist);
269 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
270 s->module = data->module;
271 s->card = data->card;
272
273 s->priority = pa_device_init_priority(s->proplist);
274
275 s->sample_spec = data->sample_spec;
276 s->channel_map = data->channel_map;
277
278 s->inputs = pa_idxset_new(NULL, NULL);
279 s->n_corked = 0;
280 s->input_to_master = NULL;
281
282 s->reference_volume = s->real_volume = data->volume;
283 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
284 s->base_volume = PA_VOLUME_NORM;
285 s->n_volume_steps = PA_VOLUME_NORM+1;
286 s->muted = data->muted;
287 s->refresh_volume = s->refresh_muted = FALSE;
288
289 reset_callbacks(s);
290 s->userdata = NULL;
291
292 s->asyncmsgq = NULL;
293
294 /* As a minor optimization we just steal the list instead of
295 * copying it here */
296 s->ports = data->ports;
297 data->ports = NULL;
298
299 s->active_port = NULL;
300 s->save_port = FALSE;
301
302 if (data->active_port && s->ports)
303 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
304 s->save_port = data->save_port;
305
306 if (!s->active_port && s->ports) {
307 void *state;
308 pa_device_port *p;
309
310 PA_HASHMAP_FOREACH(p, s->ports, state)
311 if (!s->active_port || p->priority > s->active_port->priority)
312 s->active_port = p;
313 }
314
315 s->save_volume = data->save_volume;
316 s->save_muted = data->save_muted;
317
318 pa_silence_memchunk_get(
319 &core->silence_cache,
320 core->mempool,
321 &s->silence,
322 &s->sample_spec,
323 0);
324
325 s->thread_info.rtpoll = NULL;
326 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
327 s->thread_info.soft_volume = s->soft_volume;
328 s->thread_info.soft_muted = s->muted;
329 s->thread_info.state = s->state;
330 s->thread_info.rewind_nbytes = 0;
331 s->thread_info.rewind_requested = FALSE;
332 s->thread_info.max_rewind = 0;
333 s->thread_info.max_request = 0;
334 s->thread_info.requested_latency_valid = FALSE;
335 s->thread_info.requested_latency = 0;
336 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
337 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
338 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
339
340 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
341 s->thread_info.volume_changes_tail = NULL;
342 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
343 s->thread_info.volume_change_safety_margin = core->sync_volume_safety_margin_usec;
344 s->thread_info.volume_change_extra_delay = core->sync_volume_extra_delay_usec;
345
346 /* FIXME: This should probably be moved to pa_sink_put() */
347 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
348
349 if (s->card)
350 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
351
352 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
353 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
354 s->index,
355 s->name,
356 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
357 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
358 pt);
359 pa_xfree(pt);
360
361 pa_source_new_data_init(&source_data);
362 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
363 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
364 source_data.name = pa_sprintf_malloc("%s.monitor", name);
365 source_data.driver = data->driver;
366 source_data.module = data->module;
367 source_data.card = data->card;
368
369 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
370 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
371 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
372
373 s->monitor_source = pa_source_new(core, &source_data,
374 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
375 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
376
377 pa_source_new_data_done(&source_data);
378
379 if (!s->monitor_source) {
380 pa_sink_unlink(s);
381 pa_sink_unref(s);
382 return NULL;
383 }
384
385 s->monitor_source->monitor_of = s;
386
387 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
388 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
389 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
390
391 return s;
392 }
393
394 /* Called from main context */
395 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
396 int ret;
397 pa_bool_t suspend_change;
398 pa_sink_state_t original_state;
399
400 pa_assert(s);
401 pa_assert_ctl_context();
402
403 if (s->state == state)
404 return 0;
405
406 original_state = s->state;
407
408 suspend_change =
409 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
410 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
411
412 if (s->set_state)
413 if ((ret = s->set_state(s, state)) < 0)
414 return ret;
415
416 if (s->asyncmsgq)
417 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
418
419 if (s->set_state)
420 s->set_state(s, original_state);
421
422 return ret;
423 }
424
425 s->state = state;
426
427 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
428 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
429 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
430 }
431
432 if (suspend_change) {
433 pa_sink_input *i;
434 uint32_t idx;
435
436 /* We're suspending or resuming, tell everyone about it */
437
438 PA_IDXSET_FOREACH(i, s->inputs, idx)
439 if (s->state == PA_SINK_SUSPENDED &&
440 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
441 pa_sink_input_kill(i);
442 else if (i->suspend)
443 i->suspend(i, state == PA_SINK_SUSPENDED);
444
445 if (s->monitor_source)
446 pa_source_sync_suspend(s->monitor_source);
447 }
448
449 return 0;
450 }
451
452 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
453 pa_assert(s);
454
455 s->get_volume = cb;
456 }
457
458 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
459 pa_sink_flags_t flags;
460
461 pa_assert(s);
462 pa_assert(!s->write_volume || cb);
463
464 s->set_volume = cb;
465
466 /* Save the current flags so we can tell if they've changed */
467 flags = s->flags;
468
469 if (cb) {
470 /* The sink implementor is responsible for setting decibel volume support */
471 s->flags |= PA_SINK_HW_VOLUME_CTRL;
472 } else {
473 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
474 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
475 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
476 }
477
478 /* If the flags have changed after init, let any clients know via a change event */
479 if (s->state != PA_SINK_INIT && flags != s->flags)
480 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
481 }
482
483 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
484 pa_sink_flags_t flags;
485
486 pa_assert(s);
487 pa_assert(!cb || s->set_volume);
488
489 s->write_volume = cb;
490
491 /* Save the current flags so we can tell if they've changed */
492 flags = s->flags;
493
494 if (cb)
495 s->flags |= PA_SINK_SYNC_VOLUME;
496 else
497 s->flags &= ~PA_SINK_SYNC_VOLUME;
498
499 /* If the flags have changed after init, let any clients know via a change event */
500 if (s->state != PA_SINK_INIT && flags != s->flags)
501 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
502 }
503
504 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
505 pa_assert(s);
506
507 s->get_mute = cb;
508 }
509
510 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
511 pa_sink_flags_t flags;
512
513 pa_assert(s);
514
515 s->set_mute = cb;
516
517 /* Save the current flags so we can tell if they've changed */
518 flags = s->flags;
519
520 if (cb)
521 s->flags |= PA_SINK_HW_MUTE_CTRL;
522 else
523 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
524
525 /* If the flags have changed after init, let any clients know via a change event */
526 if (s->state != PA_SINK_INIT && flags != s->flags)
527 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
528 }
529
530 static void enable_flat_volume(pa_sink *s, pa_bool_t enable) {
531 pa_sink_flags_t flags;
532
533 pa_assert(s);
534
535 /* Always follow the overall user preference here */
536 enable = enable && s->core->flat_volumes;
537
538 /* Save the current flags so we can tell if they've changed */
539 flags = s->flags;
540
541 if (enable)
542 s->flags |= PA_SINK_FLAT_VOLUME;
543 else
544 s->flags &= ~PA_SINK_FLAT_VOLUME;
545
546 /* If the flags have changed after init, let any clients know via a change event */
547 if (s->state != PA_SINK_INIT && flags != s->flags)
548 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
549 }
550
551 void pa_sink_enable_decibel_volume(pa_sink *s, pa_bool_t enable) {
552 pa_sink_flags_t flags;
553
554 pa_assert(s);
555
556 /* Save the current flags so we can tell if they've changed */
557 flags = s->flags;
558
559 if (enable) {
560 s->flags |= PA_SINK_DECIBEL_VOLUME;
561 enable_flat_volume(s, TRUE);
562 } else {
563 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
564 enable_flat_volume(s, FALSE);
565 }
566
567 /* If the flags have changed after init, let any clients know via a change event */
568 if (s->state != PA_SINK_INIT && flags != s->flags)
569 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
570 }
571
572 /* Called from main context */
573 void pa_sink_put(pa_sink* s) {
574 pa_sink_assert_ref(s);
575 pa_assert_ctl_context();
576
577 pa_assert(s->state == PA_SINK_INIT);
578 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || s->input_to_master);
579
580 /* The following fields must be initialized properly when calling _put() */
581 pa_assert(s->asyncmsgq);
582 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
583
584 /* Generally, flags should be initialized via pa_sink_new(). As a
585 * special exception we allow some volume related flags to be set
586 * between _new() and _put() by the callback setter functions above.
587 *
588 * Thus we implement a couple safeguards here which ensure the above
589 * setters were used (or at least the implementor made manual changes
590 * in a compatible way).
591 *
592 * Note: All of these flags set here can change over the life time
593 * of the sink. */
594 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
595 pa_assert(!(s->flags & PA_SINK_SYNC_VOLUME) || s->write_volume);
596 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
597
598 /* XXX: Currently decibel volume is disabled for all sinks that use volume
599 * sharing. When the master sink supports decibel volume, it would be good
600 * to have the flag also in the filter sink, but currently we don't do that
601 * so that the flags of the filter sink never change when it's moved from
602 * a master sink to another. One solution for this problem would be to
603 * remove user-visible volume altogether from filter sinks when volume
604 * sharing is used, but the current approach was easier to implement... */
605 /* We always support decibel volumes in software, otherwise we leave it to
606 * the sink implementor to set this flag as needed.
607 *
608 * Note: This flag can also change over the life time of the sink. */
609 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
610 pa_sink_enable_decibel_volume(s, TRUE);
611
612 /* If the sink implementor support DB volumes by itself, we should always
613 * try and enable flat volumes too */
614 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
615 enable_flat_volume(s, TRUE);
616
617 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
618 pa_sink *root_sink = s->input_to_master->sink;
619
620 while (root_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
621 root_sink = root_sink->input_to_master->sink;
622
623 s->reference_volume = root_sink->reference_volume;
624 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
625
626 s->real_volume = root_sink->real_volume;
627 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
628 } else
629 /* We assume that if the sink implementor changed the default
630 * volume he did so in real_volume, because that is the usual
631 * place where he is supposed to place his changes. */
632 s->reference_volume = s->real_volume;
633
634 s->thread_info.soft_volume = s->soft_volume;
635 s->thread_info.soft_muted = s->muted;
636 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
637
638 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
639 || (s->base_volume == PA_VOLUME_NORM
640 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
641 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
642 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
643 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
644 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
645
646 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
647 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
648 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
649
650 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
651
652 pa_source_put(s->monitor_source);
653
654 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
655 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
656 }
657
658 /* Called from main context */
659 void pa_sink_unlink(pa_sink* s) {
660 pa_bool_t linked;
661 pa_sink_input *i, *j = NULL;
662
663 pa_assert(s);
664 pa_assert_ctl_context();
665
666 /* Please note that pa_sink_unlink() does more than simply
667 * reversing pa_sink_put(). It also undoes the registrations
668 * already done in pa_sink_new()! */
669
670 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
671 * may be called multiple times on the same sink without bad
672 * effects. */
673
674 linked = PA_SINK_IS_LINKED(s->state);
675
676 if (linked)
677 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
678
679 if (s->state != PA_SINK_UNLINKED)
680 pa_namereg_unregister(s->core, s->name);
681 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
682
683 if (s->card)
684 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
685
686 while ((i = pa_idxset_first(s->inputs, NULL))) {
687 pa_assert(i != j);
688 pa_sink_input_kill(i);
689 j = i;
690 }
691
692 if (linked)
693 sink_set_state(s, PA_SINK_UNLINKED);
694 else
695 s->state = PA_SINK_UNLINKED;
696
697 reset_callbacks(s);
698
699 if (s->monitor_source)
700 pa_source_unlink(s->monitor_source);
701
702 if (linked) {
703 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
704 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
705 }
706 }
707
708 /* Called from main context */
709 static void sink_free(pa_object *o) {
710 pa_sink *s = PA_SINK(o);
711 pa_sink_input *i;
712
713 pa_assert(s);
714 pa_assert_ctl_context();
715 pa_assert(pa_sink_refcnt(s) == 0);
716
717 if (PA_SINK_IS_LINKED(s->state))
718 pa_sink_unlink(s);
719
720 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
721
722 if (s->monitor_source) {
723 pa_source_unref(s->monitor_source);
724 s->monitor_source = NULL;
725 }
726
727 pa_idxset_free(s->inputs, NULL, NULL);
728
729 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
730 pa_sink_input_unref(i);
731
732 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
733
734 if (s->silence.memblock)
735 pa_memblock_unref(s->silence.memblock);
736
737 pa_xfree(s->name);
738 pa_xfree(s->driver);
739
740 if (s->proplist)
741 pa_proplist_free(s->proplist);
742
743 if (s->ports) {
744 pa_device_port *p;
745
746 while ((p = pa_hashmap_steal_first(s->ports)))
747 pa_device_port_free(p);
748
749 pa_hashmap_free(s->ports, NULL, NULL);
750 }
751
752 pa_xfree(s);
753 }
754
755 /* Called from main context, and not while the IO thread is active, please */
756 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
757 pa_sink_assert_ref(s);
758 pa_assert_ctl_context();
759
760 s->asyncmsgq = q;
761
762 if (s->monitor_source)
763 pa_source_set_asyncmsgq(s->monitor_source, q);
764 }
765
766 /* Called from main context, and not while the IO thread is active, please */
767 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
768 pa_sink_assert_ref(s);
769 pa_assert_ctl_context();
770
771 if (mask == 0)
772 return;
773
774 /* For now, allow only a minimal set of flags to be changed. */
775 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
776
777 s->flags = (s->flags & ~mask) | (value & mask);
778
779 pa_source_update_flags(s->monitor_source,
780 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
781 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
782 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
783 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
784 }
785
786 /* Called from IO context, or before _put() from main context */
787 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
788 pa_sink_assert_ref(s);
789 pa_sink_assert_io_context(s);
790
791 s->thread_info.rtpoll = p;
792
793 if (s->monitor_source)
794 pa_source_set_rtpoll(s->monitor_source, p);
795 }
796
797 /* Called from main context */
798 int pa_sink_update_status(pa_sink*s) {
799 pa_sink_assert_ref(s);
800 pa_assert_ctl_context();
801 pa_assert(PA_SINK_IS_LINKED(s->state));
802
803 if (s->state == PA_SINK_SUSPENDED)
804 return 0;
805
806 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
807 }
808
809 /* Called from main context */
810 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
811 pa_sink_assert_ref(s);
812 pa_assert_ctl_context();
813 pa_assert(PA_SINK_IS_LINKED(s->state));
814 pa_assert(cause != 0);
815
816 if (suspend) {
817 s->suspend_cause |= cause;
818 s->monitor_source->suspend_cause |= cause;
819 } else {
820 s->suspend_cause &= ~cause;
821 s->monitor_source->suspend_cause &= ~cause;
822 }
823
824 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
825 return 0;
826
827 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
828
829 if (s->suspend_cause)
830 return sink_set_state(s, PA_SINK_SUSPENDED);
831 else
832 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
833 }
834
835 /* Called from main context */
836 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
837 pa_sink_input *i, *n;
838 uint32_t idx;
839
840 pa_sink_assert_ref(s);
841 pa_assert_ctl_context();
842 pa_assert(PA_SINK_IS_LINKED(s->state));
843
844 if (!q)
845 q = pa_queue_new();
846
847 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
848 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
849
850 pa_sink_input_ref(i);
851
852 if (pa_sink_input_start_move(i) >= 0)
853 pa_queue_push(q, i);
854 else
855 pa_sink_input_unref(i);
856 }
857
858 return q;
859 }
860
861 /* Called from main context */
862 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
863 pa_sink_input *i;
864
865 pa_sink_assert_ref(s);
866 pa_assert_ctl_context();
867 pa_assert(PA_SINK_IS_LINKED(s->state));
868 pa_assert(q);
869
870 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
871 if (pa_sink_input_finish_move(i, s, save) < 0)
872 pa_sink_input_fail_move(i);
873
874 pa_sink_input_unref(i);
875 }
876
877 pa_queue_free(q, NULL, NULL);
878 }
879
880 /* Called from main context */
881 void pa_sink_move_all_fail(pa_queue *q) {
882 pa_sink_input *i;
883
884 pa_assert_ctl_context();
885 pa_assert(q);
886
887 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
888 pa_sink_input_fail_move(i);
889 pa_sink_input_unref(i);
890 }
891
892 pa_queue_free(q, NULL, NULL);
893 }
894
895 /* Called from IO thread context */
896 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
897 pa_sink_input *i;
898 void *state = NULL;
899
900 pa_sink_assert_ref(s);
901 pa_sink_assert_io_context(s);
902 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
903
904 /* If nobody requested this and this is actually no real rewind
905 * then we can short cut this. Please note that this means that
906 * not all rewind requests triggered upstream will always be
907 * translated in actual requests! */
908 if (!s->thread_info.rewind_requested && nbytes <= 0)
909 return;
910
911 s->thread_info.rewind_nbytes = 0;
912 s->thread_info.rewind_requested = FALSE;
913
914 if (s->thread_info.state == PA_SINK_SUSPENDED)
915 return;
916
917 if (nbytes > 0) {
918 pa_log_debug("Processing rewind...");
919 if (s->flags & PA_SINK_SYNC_VOLUME)
920 pa_sink_volume_change_rewind(s, nbytes);
921 }
922
923 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
924 pa_sink_input_assert_ref(i);
925 pa_sink_input_process_rewind(i, nbytes);
926 }
927
928 if (nbytes > 0) {
929 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
930 pa_source_process_rewind(s->monitor_source, nbytes);
931 }
932 }
933
934 /* Called from IO thread context */
935 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
936 pa_sink_input *i;
937 unsigned n = 0;
938 void *state = NULL;
939 size_t mixlength = *length;
940
941 pa_sink_assert_ref(s);
942 pa_sink_assert_io_context(s);
943 pa_assert(info);
944
945 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
946 pa_sink_input_assert_ref(i);
947
948 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
949
950 if (mixlength == 0 || info->chunk.length < mixlength)
951 mixlength = info->chunk.length;
952
953 if (pa_memblock_is_silence(info->chunk.memblock)) {
954 pa_memblock_unref(info->chunk.memblock);
955 continue;
956 }
957
958 info->userdata = pa_sink_input_ref(i);
959
960 pa_assert(info->chunk.memblock);
961 pa_assert(info->chunk.length > 0);
962
963 info++;
964 n++;
965 maxinfo--;
966 }
967
968 if (mixlength > 0)
969 *length = mixlength;
970
971 return n;
972 }
973
974 /* Called from IO thread context */
975 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
976 pa_sink_input *i;
977 void *state;
978 unsigned p = 0;
979 unsigned n_unreffed = 0;
980
981 pa_sink_assert_ref(s);
982 pa_sink_assert_io_context(s);
983 pa_assert(result);
984 pa_assert(result->memblock);
985 pa_assert(result->length > 0);
986
987 /* We optimize for the case where the order of the inputs has not changed */
988
989 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
990 unsigned j;
991 pa_mix_info* m = NULL;
992
993 pa_sink_input_assert_ref(i);
994
995 /* Let's try to find the matching entry info the pa_mix_info array */
996 for (j = 0; j < n; j ++) {
997
998 if (info[p].userdata == i) {
999 m = info + p;
1000 break;
1001 }
1002
1003 p++;
1004 if (p >= n)
1005 p = 0;
1006 }
1007
1008 /* Drop read data */
1009 pa_sink_input_drop(i, result->length);
1010
1011 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1012
1013 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1014 void *ostate = NULL;
1015 pa_source_output *o;
1016 pa_memchunk c;
1017
1018 if (m && m->chunk.memblock) {
1019 c = m->chunk;
1020 pa_memblock_ref(c.memblock);
1021 pa_assert(result->length <= c.length);
1022 c.length = result->length;
1023
1024 pa_memchunk_make_writable(&c, 0);
1025 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1026 } else {
1027 c = s->silence;
1028 pa_memblock_ref(c.memblock);
1029 pa_assert(result->length <= c.length);
1030 c.length = result->length;
1031 }
1032
1033 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1034 pa_source_output_assert_ref(o);
1035 pa_assert(o->direct_on_input == i);
1036 pa_source_post_direct(s->monitor_source, o, &c);
1037 }
1038
1039 pa_memblock_unref(c.memblock);
1040 }
1041 }
1042
1043 if (m) {
1044 if (m->chunk.memblock)
1045 pa_memblock_unref(m->chunk.memblock);
1046 pa_memchunk_reset(&m->chunk);
1047
1048 pa_sink_input_unref(m->userdata);
1049 m->userdata = NULL;
1050
1051 n_unreffed += 1;
1052 }
1053 }
1054
1055 /* Now drop references to entries that are included in the
1056 * pa_mix_info array but don't exist anymore */
1057
1058 if (n_unreffed < n) {
1059 for (; n > 0; info++, n--) {
1060 if (info->userdata)
1061 pa_sink_input_unref(info->userdata);
1062 if (info->chunk.memblock)
1063 pa_memblock_unref(info->chunk.memblock);
1064 }
1065 }
1066
1067 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1068 pa_source_post(s->monitor_source, result);
1069 }
1070
1071 /* Called from IO thread context */
1072 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1073 pa_mix_info info[MAX_MIX_CHANNELS];
1074 unsigned n;
1075 size_t block_size_max;
1076
1077 pa_sink_assert_ref(s);
1078 pa_sink_assert_io_context(s);
1079 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1080 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1081 pa_assert(result);
1082
1083 pa_assert(!s->thread_info.rewind_requested);
1084 pa_assert(s->thread_info.rewind_nbytes == 0);
1085
1086 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1087 result->memblock = pa_memblock_ref(s->silence.memblock);
1088 result->index = s->silence.index;
1089 result->length = PA_MIN(s->silence.length, length);
1090 return;
1091 }
1092
1093 pa_sink_ref(s);
1094
1095 if (length <= 0)
1096 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1097
1098 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1099 if (length > block_size_max)
1100 length = pa_frame_align(block_size_max, &s->sample_spec);
1101
1102 pa_assert(length > 0);
1103
1104 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1105
1106 if (n == 0) {
1107
1108 *result = s->silence;
1109 pa_memblock_ref(result->memblock);
1110
1111 if (result->length > length)
1112 result->length = length;
1113
1114 } else if (n == 1) {
1115 pa_cvolume volume;
1116
1117 *result = info[0].chunk;
1118 pa_memblock_ref(result->memblock);
1119
1120 if (result->length > length)
1121 result->length = length;
1122
1123 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1124
1125 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1126 pa_memblock_unref(result->memblock);
1127 pa_silence_memchunk_get(&s->core->silence_cache,
1128 s->core->mempool,
1129 result,
1130 &s->sample_spec,
1131 result->length);
1132 } else if (!pa_cvolume_is_norm(&volume)) {
1133 pa_memchunk_make_writable(result, 0);
1134 pa_volume_memchunk(result, &s->sample_spec, &volume);
1135 }
1136 } else {
1137 void *ptr;
1138 result->memblock = pa_memblock_new(s->core->mempool, length);
1139
1140 ptr = pa_memblock_acquire(result->memblock);
1141 result->length = pa_mix(info, n,
1142 ptr, length,
1143 &s->sample_spec,
1144 &s->thread_info.soft_volume,
1145 s->thread_info.soft_muted);
1146 pa_memblock_release(result->memblock);
1147
1148 result->index = 0;
1149 }
1150
1151 inputs_drop(s, info, n, result);
1152
1153 pa_sink_unref(s);
1154 }
1155
1156 /* Called from IO thread context */
1157 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1158 pa_mix_info info[MAX_MIX_CHANNELS];
1159 unsigned n;
1160 size_t length, block_size_max;
1161
1162 pa_sink_assert_ref(s);
1163 pa_sink_assert_io_context(s);
1164 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1165 pa_assert(target);
1166 pa_assert(target->memblock);
1167 pa_assert(target->length > 0);
1168 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1169
1170 pa_assert(!s->thread_info.rewind_requested);
1171 pa_assert(s->thread_info.rewind_nbytes == 0);
1172
1173 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1174 pa_silence_memchunk(target, &s->sample_spec);
1175 return;
1176 }
1177
1178 pa_sink_ref(s);
1179
1180 length = target->length;
1181 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1182 if (length > block_size_max)
1183 length = pa_frame_align(block_size_max, &s->sample_spec);
1184
1185 pa_assert(length > 0);
1186
1187 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1188
1189 if (n == 0) {
1190 if (target->length > length)
1191 target->length = length;
1192
1193 pa_silence_memchunk(target, &s->sample_spec);
1194 } else if (n == 1) {
1195 pa_cvolume volume;
1196
1197 if (target->length > length)
1198 target->length = length;
1199
1200 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1201
1202 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1203 pa_silence_memchunk(target, &s->sample_spec);
1204 else {
1205 pa_memchunk vchunk;
1206
1207 vchunk = info[0].chunk;
1208 pa_memblock_ref(vchunk.memblock);
1209
1210 if (vchunk.length > length)
1211 vchunk.length = length;
1212
1213 if (!pa_cvolume_is_norm(&volume)) {
1214 pa_memchunk_make_writable(&vchunk, 0);
1215 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1216 }
1217
1218 pa_memchunk_memcpy(target, &vchunk);
1219 pa_memblock_unref(vchunk.memblock);
1220 }
1221
1222 } else {
1223 void *ptr;
1224
1225 ptr = pa_memblock_acquire(target->memblock);
1226
1227 target->length = pa_mix(info, n,
1228 (uint8_t*) ptr + target->index, length,
1229 &s->sample_spec,
1230 &s->thread_info.soft_volume,
1231 s->thread_info.soft_muted);
1232
1233 pa_memblock_release(target->memblock);
1234 }
1235
1236 inputs_drop(s, info, n, target);
1237
1238 pa_sink_unref(s);
1239 }
1240
1241 /* Called from IO thread context */
1242 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1243 pa_memchunk chunk;
1244 size_t l, d;
1245
1246 pa_sink_assert_ref(s);
1247 pa_sink_assert_io_context(s);
1248 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1249 pa_assert(target);
1250 pa_assert(target->memblock);
1251 pa_assert(target->length > 0);
1252 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1253
1254 pa_assert(!s->thread_info.rewind_requested);
1255 pa_assert(s->thread_info.rewind_nbytes == 0);
1256
1257 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1258 pa_silence_memchunk(target, &s->sample_spec);
1259 return;
1260 }
1261
1262 pa_sink_ref(s);
1263
1264 l = target->length;
1265 d = 0;
1266 while (l > 0) {
1267 chunk = *target;
1268 chunk.index += d;
1269 chunk.length -= d;
1270
1271 pa_sink_render_into(s, &chunk);
1272
1273 d += chunk.length;
1274 l -= chunk.length;
1275 }
1276
1277 pa_sink_unref(s);
1278 }
1279
1280 /* Called from IO thread context */
1281 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1282 pa_sink_assert_ref(s);
1283 pa_sink_assert_io_context(s);
1284 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1285 pa_assert(length > 0);
1286 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1287 pa_assert(result);
1288
1289 pa_assert(!s->thread_info.rewind_requested);
1290 pa_assert(s->thread_info.rewind_nbytes == 0);
1291
1292 pa_sink_ref(s);
1293
1294 pa_sink_render(s, length, result);
1295
1296 if (result->length < length) {
1297 pa_memchunk chunk;
1298
1299 pa_memchunk_make_writable(result, length);
1300
1301 chunk.memblock = result->memblock;
1302 chunk.index = result->index + result->length;
1303 chunk.length = length - result->length;
1304
1305 pa_sink_render_into_full(s, &chunk);
1306
1307 result->length = length;
1308 }
1309
1310 pa_sink_unref(s);
1311 }
1312
1313 /* Called from main thread */
1314 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1315 pa_usec_t usec = 0;
1316
1317 pa_sink_assert_ref(s);
1318 pa_assert_ctl_context();
1319 pa_assert(PA_SINK_IS_LINKED(s->state));
1320
1321 /* The returned value is supposed to be in the time domain of the sound card! */
1322
1323 if (s->state == PA_SINK_SUSPENDED)
1324 return 0;
1325
1326 if (!(s->flags & PA_SINK_LATENCY))
1327 return 0;
1328
1329 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1330
1331 return usec;
1332 }
1333
1334 /* Called from IO thread */
1335 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1336 pa_usec_t usec = 0;
1337 pa_msgobject *o;
1338
1339 pa_sink_assert_ref(s);
1340 pa_sink_assert_io_context(s);
1341 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1342
1343 /* The returned value is supposed to be in the time domain of the sound card! */
1344
1345 if (s->thread_info.state == PA_SINK_SUSPENDED)
1346 return 0;
1347
1348 if (!(s->flags & PA_SINK_LATENCY))
1349 return 0;
1350
1351 o = PA_MSGOBJECT(s);
1352
1353 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1354
1355 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1356 return -1;
1357
1358 return usec;
1359 }
1360
1361 /* Called from the main thread (and also from the IO thread while the main
1362 * thread is waiting).
1363 *
1364 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1365 * set. Instead, flat volume mode is detected by checking whether the root sink
1366 * has the flag set. */
1367 pa_bool_t pa_sink_flat_volume_enabled(pa_sink *s) {
1368 pa_sink_assert_ref(s);
1369
1370 while (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1371 s = s->input_to_master->sink;
1372
1373 return (s->flags & PA_SINK_FLAT_VOLUME);
1374 }
1375
1376 /* Called from main context */
1377 pa_bool_t pa_sink_is_passthrough(pa_sink *s) {
1378 pa_sink_input *alt_i;
1379 uint32_t idx;
1380
1381 pa_sink_assert_ref(s);
1382
1383 /* one and only one PASSTHROUGH input can possibly be connected */
1384 if (pa_idxset_size(s->inputs) == 1) {
1385 alt_i = pa_idxset_first(s->inputs, &idx);
1386
1387 if (pa_sink_input_is_passthrough(alt_i))
1388 return TRUE;
1389 }
1390
1391 return FALSE;
1392 }
1393
1394 /* Called from main context. */
1395 static void compute_reference_ratio(pa_sink_input *i) {
1396 unsigned c = 0;
1397 pa_cvolume remapped;
1398
1399 pa_assert(i);
1400 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1401
1402 /*
1403 * Calculates the reference ratio from the sink's reference
1404 * volume. This basically calculates:
1405 *
1406 * i->reference_ratio = i->volume / i->sink->reference_volume
1407 */
1408
1409 remapped = i->sink->reference_volume;
1410 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1411
1412 i->reference_ratio.channels = i->sample_spec.channels;
1413
1414 for (c = 0; c < i->sample_spec.channels; c++) {
1415
1416 /* We don't update when the sink volume is 0 anyway */
1417 if (remapped.values[c] <= PA_VOLUME_MUTED)
1418 continue;
1419
1420 /* Don't update the reference ratio unless necessary */
1421 if (pa_sw_volume_multiply(
1422 i->reference_ratio.values[c],
1423 remapped.values[c]) == i->volume.values[c])
1424 continue;
1425
1426 i->reference_ratio.values[c] = pa_sw_volume_divide(
1427 i->volume.values[c],
1428 remapped.values[c]);
1429 }
1430 }
1431
1432 /* Called from main context. Only called for the root sink in volume sharing
1433 * cases, except for internal recursive calls. */
1434 static void compute_reference_ratios(pa_sink *s) {
1435 uint32_t idx;
1436 pa_sink_input *i;
1437
1438 pa_sink_assert_ref(s);
1439 pa_assert_ctl_context();
1440 pa_assert(PA_SINK_IS_LINKED(s->state));
1441 pa_assert(pa_sink_flat_volume_enabled(s));
1442
1443 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1444 compute_reference_ratio(i);
1445
1446 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1447 compute_reference_ratios(i->origin_sink);
1448 }
1449 }
1450
1451 /* Called from main context. Only called for the root sink in volume sharing
1452 * cases, except for internal recursive calls. */
1453 static void compute_real_ratios(pa_sink *s) {
1454 pa_sink_input *i;
1455 uint32_t idx;
1456
1457 pa_sink_assert_ref(s);
1458 pa_assert_ctl_context();
1459 pa_assert(PA_SINK_IS_LINKED(s->state));
1460 pa_assert(pa_sink_flat_volume_enabled(s));
1461
1462 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1463 unsigned c;
1464 pa_cvolume remapped;
1465
1466 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1467 /* The origin sink uses volume sharing, so this input's real ratio
1468 * is handled as a special case - the real ratio must be 0 dB, and
1469 * as a result i->soft_volume must equal i->volume_factor. */
1470 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1471 i->soft_volume = i->volume_factor;
1472
1473 compute_real_ratios(i->origin_sink);
1474
1475 continue;
1476 }
1477
1478 /*
1479 * This basically calculates:
1480 *
1481 * i->real_ratio := i->volume / s->real_volume
1482 * i->soft_volume := i->real_ratio * i->volume_factor
1483 */
1484
1485 remapped = s->real_volume;
1486 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1487
1488 i->real_ratio.channels = i->sample_spec.channels;
1489 i->soft_volume.channels = i->sample_spec.channels;
1490
1491 for (c = 0; c < i->sample_spec.channels; c++) {
1492
1493 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1494 /* We leave i->real_ratio untouched */
1495 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1496 continue;
1497 }
1498
1499 /* Don't lose accuracy unless necessary */
1500 if (pa_sw_volume_multiply(
1501 i->real_ratio.values[c],
1502 remapped.values[c]) != i->volume.values[c])
1503
1504 i->real_ratio.values[c] = pa_sw_volume_divide(
1505 i->volume.values[c],
1506 remapped.values[c]);
1507
1508 i->soft_volume.values[c] = pa_sw_volume_multiply(
1509 i->real_ratio.values[c],
1510 i->volume_factor.values[c]);
1511 }
1512
1513 /* We don't copy the soft_volume to the thread_info data
1514 * here. That must be done by the caller */
1515 }
1516 }
1517
1518 static pa_cvolume *cvolume_remap_minimal_impact(
1519 pa_cvolume *v,
1520 const pa_cvolume *template,
1521 const pa_channel_map *from,
1522 const pa_channel_map *to) {
1523
1524 pa_cvolume t;
1525
1526 pa_assert(v);
1527 pa_assert(template);
1528 pa_assert(from);
1529 pa_assert(to);
1530 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1531 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1532
1533 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1534 * mapping from sink input to sink volumes:
1535 *
1536 * If template is a possible remapping from v it is used instead
1537 * of remapping anew.
1538 *
1539 * If the channel maps don't match we set an all-channel volume on
1540 * the sink to ensure that changing a volume on one stream has no
1541 * effect that cannot be compensated for in another stream that
1542 * does not have the same channel map as the sink. */
1543
1544 if (pa_channel_map_equal(from, to))
1545 return v;
1546
1547 t = *template;
1548 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1549 *v = *template;
1550 return v;
1551 }
1552
1553 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1554 return v;
1555 }
1556
1557 /* Called from main thread. Only called for the root sink in volume sharing
1558 * cases, except for internal recursive calls. */
1559 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1560 pa_sink_input *i;
1561 uint32_t idx;
1562
1563 pa_sink_assert_ref(s);
1564 pa_assert(max_volume);
1565 pa_assert(channel_map);
1566 pa_assert(pa_sink_flat_volume_enabled(s));
1567
1568 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1569 pa_cvolume remapped;
1570
1571 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1572 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1573
1574 /* Ignore this input. The origin sink uses volume sharing, so this
1575 * input's volume will be set to be equal to the root sink's real
1576 * volume. Obviously this input's current volume must not then
1577 * affect what the root sink's real volume will be. */
1578 continue;
1579 }
1580
1581 remapped = i->volume;
1582 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1583 pa_cvolume_merge(max_volume, max_volume, &remapped);
1584 }
1585 }
1586
1587 /* Called from main thread. Only called for the root sink in volume sharing
1588 * cases, except for internal recursive calls. */
1589 static pa_bool_t has_inputs(pa_sink *s) {
1590 pa_sink_input *i;
1591 uint32_t idx;
1592
1593 pa_sink_assert_ref(s);
1594
1595 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1596 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1597 return TRUE;
1598 }
1599
1600 return FALSE;
1601 }
1602
1603 /* Called from main thread. Only called for the root sink in volume sharing
1604 * cases, except for internal recursive calls. */
1605 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1606 pa_sink_input *i;
1607 uint32_t idx;
1608
1609 pa_sink_assert_ref(s);
1610 pa_assert(new_volume);
1611 pa_assert(channel_map);
1612
1613 s->real_volume = *new_volume;
1614 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1615
1616 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1617 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1618 if (pa_sink_flat_volume_enabled(s)) {
1619 pa_cvolume old_volume = i->volume;
1620
1621 /* Follow the root sink's real volume. */
1622 i->volume = *new_volume;
1623 pa_cvolume_remap(&i->volume, channel_map, &i->channel_map);
1624 compute_reference_ratio(i);
1625
1626 /* The volume changed, let's tell people so */
1627 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1628 if (i->volume_changed)
1629 i->volume_changed(i);
1630
1631 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1632 }
1633 }
1634
1635 update_real_volume(i->origin_sink, new_volume, channel_map);
1636 }
1637 }
1638 }
1639
1640 /* Called from main thread. Only called for the root sink in shared volume
1641 * cases. */
1642 static void compute_real_volume(pa_sink *s) {
1643 pa_sink_assert_ref(s);
1644 pa_assert_ctl_context();
1645 pa_assert(PA_SINK_IS_LINKED(s->state));
1646 pa_assert(pa_sink_flat_volume_enabled(s));
1647 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1648
1649 /* This determines the maximum volume of all streams and sets
1650 * s->real_volume accordingly. */
1651
1652 if (!has_inputs(s)) {
1653 /* In the special case that we have no sink inputs we leave the
1654 * volume unmodified. */
1655 update_real_volume(s, &s->reference_volume, &s->channel_map);
1656 return;
1657 }
1658
1659 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1660
1661 /* First let's determine the new maximum volume of all inputs
1662 * connected to this sink */
1663 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1664 update_real_volume(s, &s->real_volume, &s->channel_map);
1665
1666 /* Then, let's update the real ratios/soft volumes of all inputs
1667 * connected to this sink */
1668 compute_real_ratios(s);
1669 }
1670
1671 /* Called from main thread. Only called for the root sink in shared volume
1672 * cases, except for internal recursive calls. */
1673 static void propagate_reference_volume(pa_sink *s) {
1674 pa_sink_input *i;
1675 uint32_t idx;
1676
1677 pa_sink_assert_ref(s);
1678 pa_assert_ctl_context();
1679 pa_assert(PA_SINK_IS_LINKED(s->state));
1680 pa_assert(pa_sink_flat_volume_enabled(s));
1681
1682 /* This is called whenever the sink volume changes that is not
1683 * caused by a sink input volume change. We need to fix up the
1684 * sink input volumes accordingly */
1685
1686 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1687 pa_cvolume old_volume;
1688
1689 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1690 propagate_reference_volume(i->origin_sink);
1691
1692 /* Since the origin sink uses volume sharing, this input's volume
1693 * needs to be updated to match the root sink's real volume, but
1694 * that will be done later in update_shared_real_volume(). */
1695 continue;
1696 }
1697
1698 old_volume = i->volume;
1699
1700 /* This basically calculates:
1701 *
1702 * i->volume := s->reference_volume * i->reference_ratio */
1703
1704 i->volume = s->reference_volume;
1705 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
1706 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
1707
1708 /* The volume changed, let's tell people so */
1709 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1710
1711 if (i->volume_changed)
1712 i->volume_changed(i);
1713
1714 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1715 }
1716 }
1717 }
1718
1719 /* Called from main thread. Only called for the root sink in volume sharing
1720 * cases, except for internal recursive calls. The return value indicates
1721 * whether any reference volume actually changed. */
1722 static pa_bool_t update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1723 pa_cvolume volume;
1724 pa_bool_t reference_volume_changed;
1725 pa_sink_input *i;
1726 uint32_t idx;
1727
1728 pa_sink_assert_ref(s);
1729 pa_assert(PA_SINK_IS_LINKED(s->state));
1730 pa_assert(v);
1731 pa_assert(channel_map);
1732 pa_assert(pa_cvolume_valid(v));
1733
1734 volume = *v;
1735 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1736
1737 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1738 s->reference_volume = volume;
1739
1740 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1741
1742 if (reference_volume_changed)
1743 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1744 else if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1745 /* If the root sink's volume doesn't change, then there can't be any
1746 * changes in the other sinks in the sink tree either.
1747 *
1748 * It's probably theoretically possible that even if the root sink's
1749 * volume changes slightly, some filter sink doesn't change its volume
1750 * due to rounding errors. If that happens, we still want to propagate
1751 * the changed root sink volume to the sinks connected to the
1752 * intermediate sink that didn't change its volume. This theoretical
1753 * possiblity is the reason why we have that !(s->flags &
1754 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1755 * notice even if we returned here FALSE always if
1756 * reference_volume_changed is FALSE. */
1757 return FALSE;
1758
1759 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1760 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1761 update_reference_volume(i->origin_sink, v, channel_map, FALSE);
1762 }
1763
1764 return TRUE;
1765 }
1766
1767 /* Called from main thread */
1768 void pa_sink_set_volume(
1769 pa_sink *s,
1770 const pa_cvolume *volume,
1771 pa_bool_t send_msg,
1772 pa_bool_t save) {
1773
1774 pa_cvolume new_reference_volume;
1775 pa_sink *root_sink = s;
1776
1777 pa_sink_assert_ref(s);
1778 pa_assert_ctl_context();
1779 pa_assert(PA_SINK_IS_LINKED(s->state));
1780 pa_assert(!volume || pa_cvolume_valid(volume));
1781 pa_assert(volume || pa_sink_flat_volume_enabled(s));
1782 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1783
1784 /* make sure we don't change the volume when a PASSTHROUGH input is connected */
1785 if (pa_sink_is_passthrough(s)) {
1786 /* FIXME: Need to notify client that volume control is disabled */
1787 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
1788 return;
1789 }
1790
1791 /* In case of volume sharing, the volume is set for the root sink first,
1792 * from which it's then propagated to the sharing sinks. */
1793 while (root_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1794 root_sink = root_sink->input_to_master->sink;
1795
1796 /* As a special exception we accept mono volumes on all sinks --
1797 * even on those with more complex channel maps */
1798
1799 if (volume) {
1800 if (pa_cvolume_compatible(volume, &s->sample_spec))
1801 new_reference_volume = *volume;
1802 else {
1803 new_reference_volume = s->reference_volume;
1804 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1805 }
1806
1807 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
1808 }
1809
1810 /* If volume is NULL we synchronize the sink's real and reference
1811 * volumes with the stream volumes. If it is not NULL we update
1812 * the reference_volume with it. */
1813
1814 if (volume) {
1815 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
1816 if (pa_sink_flat_volume_enabled(root_sink)) {
1817 /* OK, propagate this volume change back to the inputs */
1818 propagate_reference_volume(root_sink);
1819
1820 /* And now recalculate the real volume */
1821 compute_real_volume(root_sink);
1822 } else
1823 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
1824 }
1825
1826 } else {
1827 pa_assert(pa_sink_flat_volume_enabled(root_sink));
1828
1829 /* Ok, let's determine the new real volume */
1830 compute_real_volume(root_sink);
1831
1832 /* Let's 'push' the reference volume if necessary */
1833 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
1834 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
1835
1836 /* Now that the reference volume is updated, we can update the streams'
1837 * reference ratios. */
1838 compute_reference_ratios(root_sink);
1839 }
1840
1841 if (root_sink->set_volume) {
1842 /* If we have a function set_volume(), then we do not apply a
1843 * soft volume by default. However, set_volume() is free to
1844 * apply one to root_sink->soft_volume */
1845
1846 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
1847 if (!(root_sink->flags & PA_SINK_SYNC_VOLUME))
1848 root_sink->set_volume(root_sink);
1849
1850 } else
1851 /* If we have no function set_volume(), then the soft volume
1852 * becomes the real volume */
1853 root_sink->soft_volume = root_sink->real_volume;
1854
1855 /* This tells the sink that soft volume and/or real volume changed */
1856 if (send_msg)
1857 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1858 }
1859
1860 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1861 * Only to be called by sink implementor */
1862 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1863
1864 pa_sink_assert_ref(s);
1865 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1866
1867 if (s->flags & PA_SINK_SYNC_VOLUME)
1868 pa_sink_assert_io_context(s);
1869 else
1870 pa_assert_ctl_context();
1871
1872 if (!volume)
1873 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1874 else
1875 s->soft_volume = *volume;
1876
1877 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_SYNC_VOLUME))
1878 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1879 else
1880 s->thread_info.soft_volume = s->soft_volume;
1881 }
1882
1883 /* Called from the main thread. Only called for the root sink in volume sharing
1884 * cases, except for internal recursive calls. */
1885 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1886 pa_sink_input *i;
1887 uint32_t idx;
1888
1889 pa_sink_assert_ref(s);
1890 pa_assert(old_real_volume);
1891 pa_assert_ctl_context();
1892 pa_assert(PA_SINK_IS_LINKED(s->state));
1893
1894 /* This is called when the hardware's real volume changes due to
1895 * some external event. We copy the real volume into our
1896 * reference volume and then rebuild the stream volumes based on
1897 * i->real_ratio which should stay fixed. */
1898
1899 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1900 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1901 return;
1902
1903 /* 1. Make the real volume the reference volume */
1904 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
1905 }
1906
1907 if (pa_sink_flat_volume_enabled(s)) {
1908
1909 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1910 pa_cvolume old_volume = i->volume;
1911
1912 /* 2. Since the sink's reference and real volumes are equal
1913 * now our ratios should be too. */
1914 i->reference_ratio = i->real_ratio;
1915
1916 /* 3. Recalculate the new stream reference volume based on the
1917 * reference ratio and the sink's reference volume.
1918 *
1919 * This basically calculates:
1920 *
1921 * i->volume = s->reference_volume * i->reference_ratio
1922 *
1923 * This is identical to propagate_reference_volume() */
1924 i->volume = s->reference_volume;
1925 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
1926 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
1927
1928 /* Notify if something changed */
1929 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1930
1931 if (i->volume_changed)
1932 i->volume_changed(i);
1933
1934 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1935 }
1936
1937 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1938 propagate_real_volume(i->origin_sink, old_real_volume);
1939 }
1940 }
1941
1942 /* Something got changed in the hardware. It probably makes sense
1943 * to save changed hw settings given that hw volume changes not
1944 * triggered by PA are almost certainly done by the user. */
1945 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1946 s->save_volume = TRUE;
1947 }
1948
1949 /* Called from io thread */
1950 void pa_sink_update_volume_and_mute(pa_sink *s) {
1951 pa_assert(s);
1952 pa_sink_assert_io_context(s);
1953
1954 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1955 }
1956
1957 /* Called from main thread */
1958 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1959 pa_sink_assert_ref(s);
1960 pa_assert_ctl_context();
1961 pa_assert(PA_SINK_IS_LINKED(s->state));
1962
1963 if (s->refresh_volume || force_refresh) {
1964 struct pa_cvolume old_real_volume;
1965
1966 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1967
1968 old_real_volume = s->real_volume;
1969
1970 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->get_volume)
1971 s->get_volume(s);
1972
1973 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1974
1975 update_real_volume(s, &s->real_volume, &s->channel_map);
1976 propagate_real_volume(s, &old_real_volume);
1977 }
1978
1979 return &s->reference_volume;
1980 }
1981
1982 /* Called from main thread. In volume sharing cases, only the root sink may
1983 * call this. */
1984 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1985 pa_cvolume old_real_volume;
1986
1987 pa_sink_assert_ref(s);
1988 pa_assert_ctl_context();
1989 pa_assert(PA_SINK_IS_LINKED(s->state));
1990 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1991
1992 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1993
1994 old_real_volume = s->real_volume;
1995 update_real_volume(s, new_real_volume, &s->channel_map);
1996 propagate_real_volume(s, &old_real_volume);
1997 }
1998
1999 /* Called from main thread */
2000 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
2001 pa_bool_t old_muted;
2002
2003 pa_sink_assert_ref(s);
2004 pa_assert_ctl_context();
2005 pa_assert(PA_SINK_IS_LINKED(s->state));
2006
2007 old_muted = s->muted;
2008 s->muted = mute;
2009 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
2010
2011 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->set_mute)
2012 s->set_mute(s);
2013
2014 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2015
2016 if (old_muted != s->muted)
2017 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2018 }
2019
2020 /* Called from main thread */
2021 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
2022
2023 pa_sink_assert_ref(s);
2024 pa_assert_ctl_context();
2025 pa_assert(PA_SINK_IS_LINKED(s->state));
2026
2027 if (s->refresh_muted || force_refresh) {
2028 pa_bool_t old_muted = s->muted;
2029
2030 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->get_mute)
2031 s->get_mute(s);
2032
2033 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
2034
2035 if (old_muted != s->muted) {
2036 s->save_muted = TRUE;
2037
2038 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2039
2040 /* Make sure the soft mute status stays in sync */
2041 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2042 }
2043 }
2044
2045 return s->muted;
2046 }
2047
2048 /* Called from main thread */
2049 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
2050 pa_sink_assert_ref(s);
2051 pa_assert_ctl_context();
2052 pa_assert(PA_SINK_IS_LINKED(s->state));
2053
2054 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2055
2056 if (s->muted == new_muted)
2057 return;
2058
2059 s->muted = new_muted;
2060 s->save_muted = TRUE;
2061
2062 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2063 }
2064
2065 /* Called from main thread */
2066 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2067 pa_sink_assert_ref(s);
2068 pa_assert_ctl_context();
2069
2070 if (p)
2071 pa_proplist_update(s->proplist, mode, p);
2072
2073 if (PA_SINK_IS_LINKED(s->state)) {
2074 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2075 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2076 }
2077
2078 return TRUE;
2079 }
2080
2081 /* Called from main thread */
2082 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2083 void pa_sink_set_description(pa_sink *s, const char *description) {
2084 const char *old;
2085 pa_sink_assert_ref(s);
2086 pa_assert_ctl_context();
2087
2088 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2089 return;
2090
2091 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2092
2093 if (old && description && pa_streq(old, description))
2094 return;
2095
2096 if (description)
2097 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2098 else
2099 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2100
2101 if (s->monitor_source) {
2102 char *n;
2103
2104 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2105 pa_source_set_description(s->monitor_source, n);
2106 pa_xfree(n);
2107 }
2108
2109 if (PA_SINK_IS_LINKED(s->state)) {
2110 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2111 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2112 }
2113 }
2114
2115 /* Called from main thread */
2116 unsigned pa_sink_linked_by(pa_sink *s) {
2117 unsigned ret;
2118
2119 pa_sink_assert_ref(s);
2120 pa_assert_ctl_context();
2121 pa_assert(PA_SINK_IS_LINKED(s->state));
2122
2123 ret = pa_idxset_size(s->inputs);
2124
2125 /* We add in the number of streams connected to us here. Please
2126 * note the asymmmetry to pa_sink_used_by()! */
2127
2128 if (s->monitor_source)
2129 ret += pa_source_linked_by(s->monitor_source);
2130
2131 return ret;
2132 }
2133
2134 /* Called from main thread */
2135 unsigned pa_sink_used_by(pa_sink *s) {
2136 unsigned ret;
2137
2138 pa_sink_assert_ref(s);
2139 pa_assert_ctl_context();
2140 pa_assert(PA_SINK_IS_LINKED(s->state));
2141
2142 ret = pa_idxset_size(s->inputs);
2143 pa_assert(ret >= s->n_corked);
2144
2145 /* Streams connected to our monitor source do not matter for
2146 * pa_sink_used_by()!.*/
2147
2148 return ret - s->n_corked;
2149 }
2150
2151 /* Called from main thread */
2152 unsigned pa_sink_check_suspend(pa_sink *s) {
2153 unsigned ret;
2154 pa_sink_input *i;
2155 uint32_t idx;
2156
2157 pa_sink_assert_ref(s);
2158 pa_assert_ctl_context();
2159
2160 if (!PA_SINK_IS_LINKED(s->state))
2161 return 0;
2162
2163 ret = 0;
2164
2165 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2166 pa_sink_input_state_t st;
2167
2168 st = pa_sink_input_get_state(i);
2169
2170 /* We do not assert here. It is perfectly valid for a sink input to
2171 * be in the INIT state (i.e. created, marked done but not yet put)
2172 * and we should not care if it's unlinked as it won't contribute
2173 * towarards our busy status.
2174 */
2175 if (!PA_SINK_INPUT_IS_LINKED(st))
2176 continue;
2177
2178 if (st == PA_SINK_INPUT_CORKED)
2179 continue;
2180
2181 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2182 continue;
2183
2184 ret ++;
2185 }
2186
2187 if (s->monitor_source)
2188 ret += pa_source_check_suspend(s->monitor_source);
2189
2190 return ret;
2191 }
2192
2193 /* Called from the IO thread */
2194 static void sync_input_volumes_within_thread(pa_sink *s) {
2195 pa_sink_input *i;
2196 void *state = NULL;
2197
2198 pa_sink_assert_ref(s);
2199 pa_sink_assert_io_context(s);
2200
2201 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2202 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2203 continue;
2204
2205 i->thread_info.soft_volume = i->soft_volume;
2206 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
2207 }
2208 }
2209
2210 /* Called from the IO thread. Only called for the root sink in volume sharing
2211 * cases, except for internal recursive calls. */
2212 static void set_shared_volume_within_thread(pa_sink *s) {
2213 pa_sink_input *i = NULL;
2214 void *state = NULL;
2215
2216 pa_sink_assert_ref(s);
2217
2218 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2219
2220 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2221 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2222 set_shared_volume_within_thread(i->origin_sink);
2223 }
2224 }
2225
2226 /* Called from IO thread, except when it is not */
2227 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2228 pa_sink *s = PA_SINK(o);
2229 pa_sink_assert_ref(s);
2230
2231 switch ((pa_sink_message_t) code) {
2232
2233 case PA_SINK_MESSAGE_ADD_INPUT: {
2234 pa_sink_input *i = PA_SINK_INPUT(userdata);
2235
2236 /* If you change anything here, make sure to change the
2237 * sink input handling a few lines down at
2238 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2239
2240 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2241
2242 /* Since the caller sleeps in pa_sink_input_put(), we can
2243 * safely access data outside of thread_info even though
2244 * it is mutable */
2245
2246 if ((i->thread_info.sync_prev = i->sync_prev)) {
2247 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2248 pa_assert(i->sync_prev->sync_next == i);
2249 i->thread_info.sync_prev->thread_info.sync_next = i;
2250 }
2251
2252 if ((i->thread_info.sync_next = i->sync_next)) {
2253 pa_assert(i->sink == i->thread_info.sync_next->sink);
2254 pa_assert(i->sync_next->sync_prev == i);
2255 i->thread_info.sync_next->thread_info.sync_prev = i;
2256 }
2257
2258 pa_assert(!i->thread_info.attached);
2259 i->thread_info.attached = TRUE;
2260
2261 if (i->attach)
2262 i->attach(i);
2263
2264 pa_sink_input_set_state_within_thread(i, i->state);
2265
2266 /* The requested latency of the sink input needs to be
2267 * fixed up and then configured on the sink */
2268
2269 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2270 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2271
2272 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2273 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2274
2275 /* We don't rewind here automatically. This is left to the
2276 * sink input implementor because some sink inputs need a
2277 * slow start, i.e. need some time to buffer client
2278 * samples before beginning streaming. */
2279
2280 /* In flat volume mode we need to update the volume as
2281 * well */
2282 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2283 }
2284
2285 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2286 pa_sink_input *i = PA_SINK_INPUT(userdata);
2287
2288 /* If you change anything here, make sure to change the
2289 * sink input handling a few lines down at
2290 * PA_SINK_MESSAGE_START_MOVE, too. */
2291
2292 if (i->detach)
2293 i->detach(i);
2294
2295 pa_sink_input_set_state_within_thread(i, i->state);
2296
2297 pa_assert(i->thread_info.attached);
2298 i->thread_info.attached = FALSE;
2299
2300 /* Since the caller sleeps in pa_sink_input_unlink(),
2301 * we can safely access data outside of thread_info even
2302 * though it is mutable */
2303
2304 pa_assert(!i->sync_prev);
2305 pa_assert(!i->sync_next);
2306
2307 if (i->thread_info.sync_prev) {
2308 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2309 i->thread_info.sync_prev = NULL;
2310 }
2311
2312 if (i->thread_info.sync_next) {
2313 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2314 i->thread_info.sync_next = NULL;
2315 }
2316
2317 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2318 pa_sink_input_unref(i);
2319
2320 pa_sink_invalidate_requested_latency(s, TRUE);
2321 pa_sink_request_rewind(s, (size_t) -1);
2322
2323 /* In flat volume mode we need to update the volume as
2324 * well */
2325 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2326 }
2327
2328 case PA_SINK_MESSAGE_START_MOVE: {
2329 pa_sink_input *i = PA_SINK_INPUT(userdata);
2330
2331 /* We don't support moving synchronized streams. */
2332 pa_assert(!i->sync_prev);
2333 pa_assert(!i->sync_next);
2334 pa_assert(!i->thread_info.sync_next);
2335 pa_assert(!i->thread_info.sync_prev);
2336
2337 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2338 pa_usec_t usec = 0;
2339 size_t sink_nbytes, total_nbytes;
2340
2341 /* Get the latency of the sink */
2342 usec = pa_sink_get_latency_within_thread(s);
2343 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2344 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2345
2346 if (total_nbytes > 0) {
2347 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2348 i->thread_info.rewrite_flush = TRUE;
2349 pa_sink_input_process_rewind(i, sink_nbytes);
2350 }
2351 }
2352
2353 if (i->detach)
2354 i->detach(i);
2355
2356 pa_assert(i->thread_info.attached);
2357 i->thread_info.attached = FALSE;
2358
2359 /* Let's remove the sink input ...*/
2360 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2361 pa_sink_input_unref(i);
2362
2363 pa_sink_invalidate_requested_latency(s, TRUE);
2364
2365 pa_log_debug("Requesting rewind due to started move");
2366 pa_sink_request_rewind(s, (size_t) -1);
2367
2368 /* In flat volume mode we need to update the volume as
2369 * well */
2370 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2371 }
2372
2373 case PA_SINK_MESSAGE_FINISH_MOVE: {
2374 pa_sink_input *i = PA_SINK_INPUT(userdata);
2375
2376 /* We don't support moving synchronized streams. */
2377 pa_assert(!i->sync_prev);
2378 pa_assert(!i->sync_next);
2379 pa_assert(!i->thread_info.sync_next);
2380 pa_assert(!i->thread_info.sync_prev);
2381
2382 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2383
2384 pa_assert(!i->thread_info.attached);
2385 i->thread_info.attached = TRUE;
2386
2387 if (i->attach)
2388 i->attach(i);
2389
2390 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2391 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2392
2393 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2394 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2395
2396 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2397 pa_usec_t usec = 0;
2398 size_t nbytes;
2399
2400 /* Get the latency of the sink */
2401 usec = pa_sink_get_latency_within_thread(s);
2402 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2403
2404 if (nbytes > 0)
2405 pa_sink_input_drop(i, nbytes);
2406
2407 pa_log_debug("Requesting rewind due to finished move");
2408 pa_sink_request_rewind(s, nbytes);
2409 }
2410
2411 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2412 }
2413
2414 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2415 pa_sink *root_sink = s;
2416
2417 while (root_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2418 root_sink = root_sink->input_to_master->sink;
2419
2420 set_shared_volume_within_thread(root_sink);
2421 return 0;
2422 }
2423
2424 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2425
2426 if (s->flags & PA_SINK_SYNC_VOLUME) {
2427 s->set_volume(s);
2428 pa_sink_volume_change_push(s);
2429 }
2430 /* Fall through ... */
2431
2432 case PA_SINK_MESSAGE_SET_VOLUME:
2433
2434 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2435 s->thread_info.soft_volume = s->soft_volume;
2436 pa_sink_request_rewind(s, (size_t) -1);
2437 }
2438
2439 /* Fall through ... */
2440
2441 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2442 sync_input_volumes_within_thread(s);
2443 return 0;
2444
2445 case PA_SINK_MESSAGE_GET_VOLUME:
2446
2447 if ((s->flags & PA_SINK_SYNC_VOLUME) && s->get_volume) {
2448 s->get_volume(s);
2449 pa_sink_volume_change_flush(s);
2450 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2451 }
2452
2453 /* In case sink implementor reset SW volume. */
2454 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2455 s->thread_info.soft_volume = s->soft_volume;
2456 pa_sink_request_rewind(s, (size_t) -1);
2457 }
2458
2459 return 0;
2460
2461 case PA_SINK_MESSAGE_SET_MUTE:
2462
2463 if (s->thread_info.soft_muted != s->muted) {
2464 s->thread_info.soft_muted = s->muted;
2465 pa_sink_request_rewind(s, (size_t) -1);
2466 }
2467
2468 if (s->flags & PA_SINK_SYNC_VOLUME && s->set_mute)
2469 s->set_mute(s);
2470
2471 return 0;
2472
2473 case PA_SINK_MESSAGE_GET_MUTE:
2474
2475 if (s->flags & PA_SINK_SYNC_VOLUME && s->get_mute)
2476 s->get_mute(s);
2477
2478 return 0;
2479
2480 case PA_SINK_MESSAGE_SET_STATE: {
2481
2482 pa_bool_t suspend_change =
2483 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2484 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2485
2486 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2487
2488 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2489 s->thread_info.rewind_nbytes = 0;
2490 s->thread_info.rewind_requested = FALSE;
2491 }
2492
2493 if (suspend_change) {
2494 pa_sink_input *i;
2495 void *state = NULL;
2496
2497 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2498 if (i->suspend_within_thread)
2499 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2500 }
2501
2502 return 0;
2503 }
2504
2505 case PA_SINK_MESSAGE_DETACH:
2506
2507 /* Detach all streams */
2508 pa_sink_detach_within_thread(s);
2509 return 0;
2510
2511 case PA_SINK_MESSAGE_ATTACH:
2512
2513 /* Reattach all streams */
2514 pa_sink_attach_within_thread(s);
2515 return 0;
2516
2517 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2518
2519 pa_usec_t *usec = userdata;
2520 *usec = pa_sink_get_requested_latency_within_thread(s);
2521
2522 /* Yes, that's right, the IO thread will see -1 when no
2523 * explicit requested latency is configured, the main
2524 * thread will see max_latency */
2525 if (*usec == (pa_usec_t) -1)
2526 *usec = s->thread_info.max_latency;
2527
2528 return 0;
2529 }
2530
2531 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2532 pa_usec_t *r = userdata;
2533
2534 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2535
2536 return 0;
2537 }
2538
2539 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2540 pa_usec_t *r = userdata;
2541
2542 r[0] = s->thread_info.min_latency;
2543 r[1] = s->thread_info.max_latency;
2544
2545 return 0;
2546 }
2547
2548 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2549
2550 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2551 return 0;
2552
2553 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2554
2555 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2556 return 0;
2557
2558 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2559
2560 *((size_t*) userdata) = s->thread_info.max_rewind;
2561 return 0;
2562
2563 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2564
2565 *((size_t*) userdata) = s->thread_info.max_request;
2566 return 0;
2567
2568 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2569
2570 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2571 return 0;
2572
2573 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2574
2575 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2576 return 0;
2577
2578 case PA_SINK_MESSAGE_SET_PORT:
2579
2580 pa_assert(userdata);
2581 if (s->set_port) {
2582 struct sink_message_set_port *msg_data = userdata;
2583 msg_data->ret = s->set_port(s, msg_data->port);
2584 }
2585 return 0;
2586
2587 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2588 /* This message is sent from IO-thread and handled in main thread. */
2589 pa_assert_ctl_context();
2590
2591 pa_sink_get_volume(s, TRUE);
2592 pa_sink_get_mute(s, TRUE);
2593 return 0;
2594
2595 case PA_SINK_MESSAGE_GET_LATENCY:
2596 case PA_SINK_MESSAGE_MAX:
2597 ;
2598 }
2599
2600 return -1;
2601 }
2602
2603 /* Called from main thread */
2604 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2605 pa_sink *sink;
2606 uint32_t idx;
2607 int ret = 0;
2608
2609 pa_core_assert_ref(c);
2610 pa_assert_ctl_context();
2611 pa_assert(cause != 0);
2612
2613 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2614 int r;
2615
2616 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2617 ret = r;
2618 }
2619
2620 return ret;
2621 }
2622
2623 /* Called from main thread */
2624 void pa_sink_detach(pa_sink *s) {
2625 pa_sink_assert_ref(s);
2626 pa_assert_ctl_context();
2627 pa_assert(PA_SINK_IS_LINKED(s->state));
2628
2629 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2630 }
2631
2632 /* Called from main thread */
2633 void pa_sink_attach(pa_sink *s) {
2634 pa_sink_assert_ref(s);
2635 pa_assert_ctl_context();
2636 pa_assert(PA_SINK_IS_LINKED(s->state));
2637
2638 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2639 }
2640
2641 /* Called from IO thread */
2642 void pa_sink_detach_within_thread(pa_sink *s) {
2643 pa_sink_input *i;
2644 void *state = NULL;
2645
2646 pa_sink_assert_ref(s);
2647 pa_sink_assert_io_context(s);
2648 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2649
2650 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2651 if (i->detach)
2652 i->detach(i);
2653
2654 if (s->monitor_source)
2655 pa_source_detach_within_thread(s->monitor_source);
2656 }
2657
2658 /* Called from IO thread */
2659 void pa_sink_attach_within_thread(pa_sink *s) {
2660 pa_sink_input *i;
2661 void *state = NULL;
2662
2663 pa_sink_assert_ref(s);
2664 pa_sink_assert_io_context(s);
2665 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2666
2667 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2668 if (i->attach)
2669 i->attach(i);
2670
2671 if (s->monitor_source)
2672 pa_source_attach_within_thread(s->monitor_source);
2673 }
2674
2675 /* Called from IO thread */
2676 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2677 pa_sink_assert_ref(s);
2678 pa_sink_assert_io_context(s);
2679 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2680
2681 if (s->thread_info.state == PA_SINK_SUSPENDED)
2682 return;
2683
2684 if (nbytes == (size_t) -1)
2685 nbytes = s->thread_info.max_rewind;
2686
2687 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2688
2689 if (s->thread_info.rewind_requested &&
2690 nbytes <= s->thread_info.rewind_nbytes)
2691 return;
2692
2693 s->thread_info.rewind_nbytes = nbytes;
2694 s->thread_info.rewind_requested = TRUE;
2695
2696 if (s->request_rewind)
2697 s->request_rewind(s);
2698 }
2699
2700 /* Called from IO thread */
2701 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2702 pa_usec_t result = (pa_usec_t) -1;
2703 pa_sink_input *i;
2704 void *state = NULL;
2705 pa_usec_t monitor_latency;
2706
2707 pa_sink_assert_ref(s);
2708 pa_sink_assert_io_context(s);
2709
2710 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2711 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2712
2713 if (s->thread_info.requested_latency_valid)
2714 return s->thread_info.requested_latency;
2715
2716 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2717 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2718 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2719 result = i->thread_info.requested_sink_latency;
2720
2721 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2722
2723 if (monitor_latency != (pa_usec_t) -1 &&
2724 (result == (pa_usec_t) -1 || result > monitor_latency))
2725 result = monitor_latency;
2726
2727 if (result != (pa_usec_t) -1)
2728 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2729
2730 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2731 /* Only cache if properly initialized */
2732 s->thread_info.requested_latency = result;
2733 s->thread_info.requested_latency_valid = TRUE;
2734 }
2735
2736 return result;
2737 }
2738
2739 /* Called from main thread */
2740 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2741 pa_usec_t usec = 0;
2742
2743 pa_sink_assert_ref(s);
2744 pa_assert_ctl_context();
2745 pa_assert(PA_SINK_IS_LINKED(s->state));
2746
2747 if (s->state == PA_SINK_SUSPENDED)
2748 return 0;
2749
2750 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2751
2752 return usec;
2753 }
2754
2755 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2756 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2757 pa_sink_input *i;
2758 void *state = NULL;
2759
2760 pa_sink_assert_ref(s);
2761 pa_sink_assert_io_context(s);
2762
2763 if (max_rewind == s->thread_info.max_rewind)
2764 return;
2765
2766 s->thread_info.max_rewind = max_rewind;
2767
2768 if (PA_SINK_IS_LINKED(s->thread_info.state))
2769 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2770 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2771
2772 if (s->monitor_source)
2773 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2774 }
2775
2776 /* Called from main thread */
2777 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2778 pa_sink_assert_ref(s);
2779 pa_assert_ctl_context();
2780
2781 if (PA_SINK_IS_LINKED(s->state))
2782 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2783 else
2784 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2785 }
2786
2787 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2788 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2789 void *state = NULL;
2790
2791 pa_sink_assert_ref(s);
2792 pa_sink_assert_io_context(s);
2793
2794 if (max_request == s->thread_info.max_request)
2795 return;
2796
2797 s->thread_info.max_request = max_request;
2798
2799 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2800 pa_sink_input *i;
2801
2802 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2803 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2804 }
2805 }
2806
2807 /* Called from main thread */
2808 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2809 pa_sink_assert_ref(s);
2810 pa_assert_ctl_context();
2811
2812 if (PA_SINK_IS_LINKED(s->state))
2813 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2814 else
2815 pa_sink_set_max_request_within_thread(s, max_request);
2816 }
2817
2818 /* Called from IO thread */
2819 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2820 pa_sink_input *i;
2821 void *state = NULL;
2822
2823 pa_sink_assert_ref(s);
2824 pa_sink_assert_io_context(s);
2825
2826 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2827 s->thread_info.requested_latency_valid = FALSE;
2828 else if (dynamic)
2829 return;
2830
2831 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2832
2833 if (s->update_requested_latency)
2834 s->update_requested_latency(s);
2835
2836 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2837 if (i->update_sink_requested_latency)
2838 i->update_sink_requested_latency(i);
2839 }
2840 }
2841
2842 /* Called from main thread */
2843 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2844 pa_sink_assert_ref(s);
2845 pa_assert_ctl_context();
2846
2847 /* min_latency == 0: no limit
2848 * min_latency anything else: specified limit
2849 *
2850 * Similar for max_latency */
2851
2852 if (min_latency < ABSOLUTE_MIN_LATENCY)
2853 min_latency = ABSOLUTE_MIN_LATENCY;
2854
2855 if (max_latency <= 0 ||
2856 max_latency > ABSOLUTE_MAX_LATENCY)
2857 max_latency = ABSOLUTE_MAX_LATENCY;
2858
2859 pa_assert(min_latency <= max_latency);
2860
2861 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2862 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2863 max_latency == ABSOLUTE_MAX_LATENCY) ||
2864 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2865
2866 if (PA_SINK_IS_LINKED(s->state)) {
2867 pa_usec_t r[2];
2868
2869 r[0] = min_latency;
2870 r[1] = max_latency;
2871
2872 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2873 } else
2874 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2875 }
2876
2877 /* Called from main thread */
2878 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2879 pa_sink_assert_ref(s);
2880 pa_assert_ctl_context();
2881 pa_assert(min_latency);
2882 pa_assert(max_latency);
2883
2884 if (PA_SINK_IS_LINKED(s->state)) {
2885 pa_usec_t r[2] = { 0, 0 };
2886
2887 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2888
2889 *min_latency = r[0];
2890 *max_latency = r[1];
2891 } else {
2892 *min_latency = s->thread_info.min_latency;
2893 *max_latency = s->thread_info.max_latency;
2894 }
2895 }
2896
2897 /* Called from IO thread */
2898 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2899 pa_sink_assert_ref(s);
2900 pa_sink_assert_io_context(s);
2901
2902 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2903 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2904 pa_assert(min_latency <= max_latency);
2905
2906 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2907 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2908 max_latency == ABSOLUTE_MAX_LATENCY) ||
2909 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2910
2911 if (s->thread_info.min_latency == min_latency &&
2912 s->thread_info.max_latency == max_latency)
2913 return;
2914
2915 s->thread_info.min_latency = min_latency;
2916 s->thread_info.max_latency = max_latency;
2917
2918 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2919 pa_sink_input *i;
2920 void *state = NULL;
2921
2922 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2923 if (i->update_sink_latency_range)
2924 i->update_sink_latency_range(i);
2925 }
2926
2927 pa_sink_invalidate_requested_latency(s, FALSE);
2928
2929 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2930 }
2931
2932 /* Called from main thread */
2933 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2934 pa_sink_assert_ref(s);
2935 pa_assert_ctl_context();
2936
2937 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2938 pa_assert(latency == 0);
2939 return;
2940 }
2941
2942 if (latency < ABSOLUTE_MIN_LATENCY)
2943 latency = ABSOLUTE_MIN_LATENCY;
2944
2945 if (latency > ABSOLUTE_MAX_LATENCY)
2946 latency = ABSOLUTE_MAX_LATENCY;
2947
2948 if (PA_SINK_IS_LINKED(s->state))
2949 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2950 else
2951 s->thread_info.fixed_latency = latency;
2952
2953 pa_source_set_fixed_latency(s->monitor_source, latency);
2954 }
2955
2956 /* Called from main thread */
2957 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2958 pa_usec_t latency;
2959
2960 pa_sink_assert_ref(s);
2961 pa_assert_ctl_context();
2962
2963 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2964 return 0;
2965
2966 if (PA_SINK_IS_LINKED(s->state))
2967 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2968 else
2969 latency = s->thread_info.fixed_latency;
2970
2971 return latency;
2972 }
2973
2974 /* Called from IO thread */
2975 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2976 pa_sink_assert_ref(s);
2977 pa_sink_assert_io_context(s);
2978
2979 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2980 pa_assert(latency == 0);
2981 return;
2982 }
2983
2984 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2985 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2986
2987 if (s->thread_info.fixed_latency == latency)
2988 return;
2989
2990 s->thread_info.fixed_latency = latency;
2991
2992 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2993 pa_sink_input *i;
2994 void *state = NULL;
2995
2996 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2997 if (i->update_sink_fixed_latency)
2998 i->update_sink_fixed_latency(i);
2999 }
3000
3001 pa_sink_invalidate_requested_latency(s, FALSE);
3002
3003 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3004 }
3005
3006 /* Called from main context */
3007 size_t pa_sink_get_max_rewind(pa_sink *s) {
3008 size_t r;
3009 pa_assert_ctl_context();
3010 pa_sink_assert_ref(s);
3011
3012 if (!PA_SINK_IS_LINKED(s->state))
3013 return s->thread_info.max_rewind;
3014
3015 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3016
3017 return r;
3018 }
3019
3020 /* Called from main context */
3021 size_t pa_sink_get_max_request(pa_sink *s) {
3022 size_t r;
3023 pa_sink_assert_ref(s);
3024 pa_assert_ctl_context();
3025
3026 if (!PA_SINK_IS_LINKED(s->state))
3027 return s->thread_info.max_request;
3028
3029 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3030
3031 return r;
3032 }
3033
3034 /* Called from main context */
3035 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
3036 pa_device_port *port;
3037 int ret;
3038
3039 pa_sink_assert_ref(s);
3040 pa_assert_ctl_context();
3041
3042 if (!s->set_port) {
3043 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3044 return -PA_ERR_NOTIMPLEMENTED;
3045 }
3046
3047 if (!s->ports)
3048 return -PA_ERR_NOENTITY;
3049
3050 if (!(port = pa_hashmap_get(s->ports, name)))
3051 return -PA_ERR_NOENTITY;
3052
3053 if (s->active_port == port) {
3054 s->save_port = s->save_port || save;
3055 return 0;
3056 }
3057
3058 if (s->flags & PA_SINK_SYNC_VOLUME) {
3059 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3060 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3061 ret = msg.ret;
3062 }
3063 else
3064 ret = s->set_port(s, port);
3065
3066 if (ret < 0)
3067 return -PA_ERR_NOENTITY;
3068
3069 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3070
3071 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3072
3073 s->active_port = port;
3074 s->save_port = save;
3075
3076 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3077
3078 return 0;
3079 }
3080
3081 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
3082 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3083
3084 pa_assert(p);
3085
3086 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3087 return TRUE;
3088
3089 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3090
3091 if (pa_streq(ff, "microphone"))
3092 t = "audio-input-microphone";
3093 else if (pa_streq(ff, "webcam"))
3094 t = "camera-web";
3095 else if (pa_streq(ff, "computer"))
3096 t = "computer";
3097 else if (pa_streq(ff, "handset"))
3098 t = "phone";
3099 else if (pa_streq(ff, "portable"))
3100 t = "multimedia-player";
3101 else if (pa_streq(ff, "tv"))
3102 t = "video-display";
3103
3104 /*
3105 * The following icons are not part of the icon naming spec,
3106 * because Rodney Dawes sucks as the maintainer of that spec.
3107 *
3108 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3109 */
3110 else if (pa_streq(ff, "headset"))
3111 t = "audio-headset";
3112 else if (pa_streq(ff, "headphone"))
3113 t = "audio-headphones";
3114 else if (pa_streq(ff, "speaker"))
3115 t = "audio-speakers";
3116 else if (pa_streq(ff, "hands-free"))
3117 t = "audio-handsfree";
3118 }
3119
3120 if (!t)
3121 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3122 if (pa_streq(c, "modem"))
3123 t = "modem";
3124
3125 if (!t) {
3126 if (is_sink)
3127 t = "audio-card";
3128 else
3129 t = "audio-input-microphone";
3130 }
3131
3132 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3133 if (strstr(profile, "analog"))
3134 s = "-analog";
3135 else if (strstr(profile, "iec958"))
3136 s = "-iec958";
3137 else if (strstr(profile, "hdmi"))
3138 s = "-hdmi";
3139 }
3140
3141 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3142
3143 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3144
3145 return TRUE;
3146 }
3147
3148 pa_bool_t pa_device_init_description(pa_proplist *p) {
3149 const char *s, *d = NULL, *k;
3150 pa_assert(p);
3151
3152 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3153 return TRUE;
3154
3155 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3156 if (pa_streq(s, "internal"))
3157 d = _("Internal Audio");
3158
3159 if (!d)
3160 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3161 if (pa_streq(s, "modem"))
3162 d = _("Modem");
3163
3164 if (!d)
3165 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3166
3167 if (!d)
3168 return FALSE;
3169
3170 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3171
3172 if (d && k)
3173 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
3174 else if (d)
3175 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3176
3177 return TRUE;
3178 }
3179
3180 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
3181 const char *s;
3182 pa_assert(p);
3183
3184 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3185 return TRUE;
3186
3187 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3188 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3189 || pa_streq(s, "headset")) {
3190 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3191 return TRUE;
3192 }
3193
3194 return FALSE;
3195 }
3196
3197 unsigned pa_device_init_priority(pa_proplist *p) {
3198 const char *s;
3199 unsigned priority = 0;
3200
3201 pa_assert(p);
3202
3203 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3204
3205 if (pa_streq(s, "sound"))
3206 priority += 9000;
3207 else if (!pa_streq(s, "modem"))
3208 priority += 1000;
3209 }
3210
3211 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3212
3213 if (pa_streq(s, "internal"))
3214 priority += 900;
3215 else if (pa_streq(s, "speaker"))
3216 priority += 500;
3217 else if (pa_streq(s, "headphone"))
3218 priority += 400;
3219 }
3220
3221 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3222
3223 if (pa_streq(s, "pci"))
3224 priority += 50;
3225 else if (pa_streq(s, "usb"))
3226 priority += 40;
3227 else if (pa_streq(s, "bluetooth"))
3228 priority += 30;
3229 }
3230
3231 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3232
3233 if (pa_startswith(s, "analog-"))
3234 priority += 9;
3235 else if (pa_startswith(s, "iec958-"))
3236 priority += 8;
3237 }
3238
3239 return priority;
3240 }
3241
3242 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3243
3244 /* Called from the IO thread. */
3245 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3246 pa_sink_volume_change *c;
3247 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3248 c = pa_xnew(pa_sink_volume_change, 1);
3249
3250 PA_LLIST_INIT(pa_sink_volume_change, c);
3251 c->at = 0;
3252 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3253 return c;
3254 }
3255
3256 /* Called from the IO thread. */
3257 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3258 pa_assert(c);
3259 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3260 pa_xfree(c);
3261 }
3262
3263 /* Called from the IO thread. */
3264 void pa_sink_volume_change_push(pa_sink *s) {
3265 pa_sink_volume_change *c = NULL;
3266 pa_sink_volume_change *nc = NULL;
3267 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3268
3269 const char *direction = NULL;
3270
3271 pa_assert(s);
3272 nc = pa_sink_volume_change_new(s);
3273
3274 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3275 * Adding one more volume for HW would get us rid of this, but I am trying
3276 * to survive with the ones we already have. */
3277 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3278
3279 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3280 pa_log_debug("Volume not changing");
3281 pa_sink_volume_change_free(nc);
3282 return;
3283 }
3284
3285 nc->at = pa_sink_get_latency_within_thread(s);
3286 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3287
3288 if (s->thread_info.volume_changes_tail) {
3289 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3290 /* If volume is going up let's do it a bit late. If it is going
3291 * down let's do it a bit early. */
3292 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3293 if (nc->at + safety_margin > c->at) {
3294 nc->at += safety_margin;
3295 direction = "up";
3296 break;
3297 }
3298 }
3299 else if (nc->at - safety_margin > c->at) {
3300 nc->at -= safety_margin;
3301 direction = "down";
3302 break;
3303 }
3304 }
3305 }
3306
3307 if (c == NULL) {
3308 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3309 nc->at += safety_margin;
3310 direction = "up";
3311 } else {
3312 nc->at -= safety_margin;
3313 direction = "down";
3314 }
3315 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3316 }
3317 else {
3318 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3319 }
3320
3321 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3322
3323 /* We can ignore volume events that came earlier but should happen later than this. */
3324 PA_LLIST_FOREACH(c, nc->next) {
3325 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3326 pa_sink_volume_change_free(c);
3327 }
3328 nc->next = NULL;
3329 s->thread_info.volume_changes_tail = nc;
3330 }
3331
3332 /* Called from the IO thread. */
3333 static void pa_sink_volume_change_flush(pa_sink *s) {
3334 pa_sink_volume_change *c = s->thread_info.volume_changes;
3335 pa_assert(s);
3336 s->thread_info.volume_changes = NULL;
3337 s->thread_info.volume_changes_tail = NULL;
3338 while (c) {
3339 pa_sink_volume_change *next = c->next;
3340 pa_sink_volume_change_free(c);
3341 c = next;
3342 }
3343 }
3344
3345 /* Called from the IO thread. */
3346 pa_bool_t pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3347 pa_usec_t now = pa_rtclock_now();
3348 pa_bool_t ret = FALSE;
3349
3350 pa_assert(s);
3351 pa_assert(s->write_volume);
3352
3353 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3354 pa_sink_volume_change *c = s->thread_info.volume_changes;
3355 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3356 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3357 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3358 ret = TRUE;
3359 s->thread_info.current_hw_volume = c->hw_volume;
3360 pa_sink_volume_change_free(c);
3361 }
3362
3363 if (s->write_volume && ret)
3364 s->write_volume(s);
3365
3366 if (s->thread_info.volume_changes) {
3367 if (usec_to_next)
3368 *usec_to_next = s->thread_info.volume_changes->at - now;
3369 if (pa_log_ratelimit(PA_LOG_DEBUG))
3370 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3371 }
3372 else {
3373 if (usec_to_next)
3374 *usec_to_next = 0;
3375 s->thread_info.volume_changes_tail = NULL;
3376 }
3377 return ret;
3378 }
3379
3380 /* Called from the IO thread. */
3381 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3382 /* All the queued volume events later than current latency are shifted to happen earlier. */
3383 pa_sink_volume_change *c;
3384 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3385 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3386 pa_usec_t limit = pa_sink_get_latency_within_thread(s);
3387
3388 pa_log_debug("latency = %lld", (long long) limit);
3389 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3390
3391 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3392 pa_usec_t modified_limit = limit;
3393 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3394 modified_limit -= s->thread_info.volume_change_safety_margin;
3395 else
3396 modified_limit += s->thread_info.volume_change_safety_margin;
3397 if (c->at > modified_limit) {
3398 c->at -= rewound;
3399 if (c->at < modified_limit)
3400 c->at = modified_limit;
3401 }
3402 prev_vol = pa_cvolume_avg(&c->hw_volume);
3403 }
3404 pa_sink_volume_change_apply(s, NULL);
3405 }
3406
3407 /* Called from the main thread */
3408 /* Gets the list of formats supported by the sink. The members and idxset must
3409 * be freed by the caller. */
3410 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3411 pa_idxset *ret;
3412
3413 pa_assert(s);
3414
3415 if (s->get_formats) {
3416 /* Sink supports format query, all is good */
3417 ret = s->get_formats(s);
3418 } else {
3419 /* Sink doesn't support format query, so assume it does PCM */
3420 pa_format_info *f = pa_format_info_new();
3421 f->encoding = PA_ENCODING_PCM;
3422
3423 ret = pa_idxset_new(NULL, NULL);
3424 pa_idxset_put(ret, f, NULL);
3425 }
3426
3427 return ret;
3428 }
3429
3430 /* Called from the main thread */
3431 /* Checks if the sink can accept this format */
3432 pa_bool_t pa_sink_check_format(pa_sink *s, pa_format_info *f)
3433 {
3434 pa_idxset *formats = NULL;
3435 pa_bool_t ret = FALSE;
3436
3437 pa_assert(s);
3438 pa_assert(f);
3439
3440 formats = pa_sink_get_formats(s);
3441
3442 if (formats) {
3443 pa_format_info *finfo_device;
3444 uint32_t i;
3445
3446 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3447 if (pa_format_info_is_compatible(finfo_device, f)) {
3448 ret = TRUE;
3449 break;
3450 }
3451 }
3452
3453 pa_idxset_free(formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
3454 }
3455
3456 return ret;
3457 }
3458
3459 /* Called from the main thread */
3460 /* Calculates the intersection between formats supported by the sink and
3461 * in_formats, and returns these, in the order of the sink's formats. */
3462 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3463 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3464 pa_format_info *f_sink, *f_in;
3465 uint32_t i, j;
3466
3467 pa_assert(s);
3468
3469 if (!in_formats || pa_idxset_isempty(in_formats))
3470 goto done;
3471
3472 sink_formats = pa_sink_get_formats(s);
3473
3474 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3475 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3476 if (pa_format_info_is_compatible(f_sink, f_in))
3477 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3478 }
3479 }
3480
3481 done:
3482 if (sink_formats)
3483 pa_idxset_free(sink_formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
3484
3485 return out_formats;
3486 }