]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
sink-input, source-output: Don't assume that proplist has been initialized in free()
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/format.h>
33 #include <pulse/utf8.h>
34 #include <pulse/xmalloc.h>
35 #include <pulse/timeval.h>
36 #include <pulse/util.h>
37 #include <pulse/rtclock.h>
38 #include <pulse/internal.h>
39
40 #include <pulsecore/i18n.h>
41 #include <pulsecore/sink-input.h>
42 #include <pulsecore/namereg.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/mix.h>
46 #include <pulsecore/core-subscribe.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/play-memblockq.h>
50 #include <pulsecore/flist.h>
51
52 #include "sink.h"
53
54 #define MAX_MIX_CHANNELS 32
55 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
56 #define ABSOLUTE_MIN_LATENCY (500)
57 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
58 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
59
60 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
61
62 struct pa_sink_volume_change {
63 pa_usec_t at;
64 pa_cvolume hw_volume;
65
66 PA_LLIST_FIELDS(pa_sink_volume_change);
67 };
68
69 struct sink_message_set_port {
70 pa_device_port *port;
71 int ret;
72 };
73
74 static void sink_free(pa_object *s);
75
76 static void pa_sink_volume_change_push(pa_sink *s);
77 static void pa_sink_volume_change_flush(pa_sink *s);
78 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
79
80 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
81 pa_assert(data);
82
83 pa_zero(*data);
84 data->proplist = pa_proplist_new();
85 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
86
87 return data;
88 }
89
90 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
91 pa_assert(data);
92
93 pa_xfree(data->name);
94 data->name = pa_xstrdup(name);
95 }
96
97 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
98 pa_assert(data);
99
100 if ((data->sample_spec_is_set = !!spec))
101 data->sample_spec = *spec;
102 }
103
104 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
105 pa_assert(data);
106
107 if ((data->channel_map_is_set = !!map))
108 data->channel_map = *map;
109 }
110
111 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
112 pa_assert(data);
113
114 data->alternate_sample_rate_is_set = true;
115 data->alternate_sample_rate = alternate_sample_rate;
116 }
117
118 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
119 pa_assert(data);
120
121 if ((data->volume_is_set = !!volume))
122 data->volume = *volume;
123 }
124
125 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
126 pa_assert(data);
127
128 data->muted_is_set = true;
129 data->muted = !!mute;
130 }
131
132 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
133 pa_assert(data);
134
135 pa_xfree(data->active_port);
136 data->active_port = pa_xstrdup(port);
137 }
138
139 void pa_sink_new_data_done(pa_sink_new_data *data) {
140 pa_assert(data);
141
142 pa_proplist_free(data->proplist);
143
144 if (data->ports)
145 pa_hashmap_free(data->ports);
146
147 pa_xfree(data->name);
148 pa_xfree(data->active_port);
149 }
150
151 /* Called from main context */
152 static void reset_callbacks(pa_sink *s) {
153 pa_assert(s);
154
155 s->set_state = NULL;
156 s->get_volume = NULL;
157 s->set_volume = NULL;
158 s->write_volume = NULL;
159 s->get_mute = NULL;
160 s->set_mute = NULL;
161 s->request_rewind = NULL;
162 s->update_requested_latency = NULL;
163 s->set_port = NULL;
164 s->get_formats = NULL;
165 s->set_formats = NULL;
166 s->update_rate = NULL;
167 }
168
169 /* Called from main context */
170 pa_sink* pa_sink_new(
171 pa_core *core,
172 pa_sink_new_data *data,
173 pa_sink_flags_t flags) {
174
175 pa_sink *s;
176 const char *name;
177 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
178 pa_source_new_data source_data;
179 const char *dn;
180 char *pt;
181
182 pa_assert(core);
183 pa_assert(data);
184 pa_assert(data->name);
185 pa_assert_ctl_context();
186
187 s = pa_msgobject_new(pa_sink);
188
189 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
190 pa_log_debug("Failed to register name %s.", data->name);
191 pa_xfree(s);
192 return NULL;
193 }
194
195 pa_sink_new_data_set_name(data, name);
196
197 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
198 pa_xfree(s);
199 pa_namereg_unregister(core, name);
200 return NULL;
201 }
202
203 /* FIXME, need to free s here on failure */
204
205 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
206 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
207
208 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
209
210 if (!data->channel_map_is_set)
211 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
212
213 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
214 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
215
216 /* FIXME: There should probably be a general function for checking whether
217 * the sink volume is allowed to be set, like there is for sink inputs. */
218 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
219
220 if (!data->volume_is_set) {
221 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
222 data->save_volume = false;
223 }
224
225 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
226 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
227
228 if (!data->muted_is_set)
229 data->muted = false;
230
231 if (data->card)
232 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
233
234 pa_device_init_description(data->proplist);
235 pa_device_init_icon(data->proplist, true);
236 pa_device_init_intended_roles(data->proplist);
237
238 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
239 pa_xfree(s);
240 pa_namereg_unregister(core, name);
241 return NULL;
242 }
243
244 s->parent.parent.free = sink_free;
245 s->parent.process_msg = pa_sink_process_msg;
246
247 s->core = core;
248 s->state = PA_SINK_INIT;
249 s->flags = flags;
250 s->priority = 0;
251 s->suspend_cause = data->suspend_cause;
252 pa_sink_set_mixer_dirty(s, false);
253 s->name = pa_xstrdup(name);
254 s->proplist = pa_proplist_copy(data->proplist);
255 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
256 s->module = data->module;
257 s->card = data->card;
258
259 s->priority = pa_device_init_priority(s->proplist);
260
261 s->sample_spec = data->sample_spec;
262 s->channel_map = data->channel_map;
263 s->default_sample_rate = s->sample_spec.rate;
264
265 if (data->alternate_sample_rate_is_set)
266 s->alternate_sample_rate = data->alternate_sample_rate;
267 else
268 s->alternate_sample_rate = s->core->alternate_sample_rate;
269
270 if (s->sample_spec.rate == s->alternate_sample_rate) {
271 pa_log_warn("Default and alternate sample rates are the same.");
272 s->alternate_sample_rate = 0;
273 }
274
275 s->inputs = pa_idxset_new(NULL, NULL);
276 s->n_corked = 0;
277 s->input_to_master = NULL;
278
279 s->reference_volume = s->real_volume = data->volume;
280 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
281 s->base_volume = PA_VOLUME_NORM;
282 s->n_volume_steps = PA_VOLUME_NORM+1;
283 s->muted = data->muted;
284 s->refresh_volume = s->refresh_muted = false;
285
286 reset_callbacks(s);
287 s->userdata = NULL;
288
289 s->asyncmsgq = NULL;
290
291 /* As a minor optimization we just steal the list instead of
292 * copying it here */
293 s->ports = data->ports;
294 data->ports = NULL;
295
296 s->active_port = NULL;
297 s->save_port = false;
298
299 if (data->active_port)
300 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
301 s->save_port = data->save_port;
302
303 if (!s->active_port) {
304 void *state;
305 pa_device_port *p;
306
307 PA_HASHMAP_FOREACH(p, s->ports, state)
308 if (!s->active_port || p->priority > s->active_port->priority)
309 s->active_port = p;
310 }
311
312 if (s->active_port)
313 s->latency_offset = s->active_port->latency_offset;
314 else
315 s->latency_offset = 0;
316
317 s->save_volume = data->save_volume;
318 s->save_muted = data->save_muted;
319
320 pa_silence_memchunk_get(
321 &core->silence_cache,
322 core->mempool,
323 &s->silence,
324 &s->sample_spec,
325 0);
326
327 s->thread_info.rtpoll = NULL;
328 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
329 (pa_free_cb_t) pa_sink_input_unref);
330 s->thread_info.soft_volume = s->soft_volume;
331 s->thread_info.soft_muted = s->muted;
332 s->thread_info.state = s->state;
333 s->thread_info.rewind_nbytes = 0;
334 s->thread_info.rewind_requested = false;
335 s->thread_info.max_rewind = 0;
336 s->thread_info.max_request = 0;
337 s->thread_info.requested_latency_valid = false;
338 s->thread_info.requested_latency = 0;
339 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
340 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
341 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
342
343 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
344 s->thread_info.volume_changes_tail = NULL;
345 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
346 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
347 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
348 s->thread_info.latency_offset = s->latency_offset;
349
350 /* FIXME: This should probably be moved to pa_sink_put() */
351 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
352
353 if (s->card)
354 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
355
356 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
357 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
358 s->index,
359 s->name,
360 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
361 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
362 pt);
363 pa_xfree(pt);
364
365 pa_source_new_data_init(&source_data);
366 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
367 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
368 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
369 source_data.name = pa_sprintf_malloc("%s.monitor", name);
370 source_data.driver = data->driver;
371 source_data.module = data->module;
372 source_data.card = data->card;
373
374 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
375 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
376 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
377
378 s->monitor_source = pa_source_new(core, &source_data,
379 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
380 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
381
382 pa_source_new_data_done(&source_data);
383
384 if (!s->monitor_source) {
385 pa_sink_unlink(s);
386 pa_sink_unref(s);
387 return NULL;
388 }
389
390 s->monitor_source->monitor_of = s;
391
392 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
393 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
394 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
395
396 return s;
397 }
398
399 /* Called from main context */
400 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
401 int ret;
402 bool suspend_change;
403 pa_sink_state_t original_state;
404
405 pa_assert(s);
406 pa_assert_ctl_context();
407
408 if (s->state == state)
409 return 0;
410
411 original_state = s->state;
412
413 suspend_change =
414 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
415 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
416
417 if (s->set_state)
418 if ((ret = s->set_state(s, state)) < 0)
419 return ret;
420
421 if (s->asyncmsgq)
422 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
423
424 if (s->set_state)
425 s->set_state(s, original_state);
426
427 return ret;
428 }
429
430 s->state = state;
431
432 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the appropriate events */
433 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
434 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
435 }
436
437 if (suspend_change) {
438 pa_sink_input *i;
439 uint32_t idx;
440
441 /* We're suspending or resuming, tell everyone about it */
442
443 PA_IDXSET_FOREACH(i, s->inputs, idx)
444 if (s->state == PA_SINK_SUSPENDED &&
445 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
446 pa_sink_input_kill(i);
447 else if (i->suspend)
448 i->suspend(i, state == PA_SINK_SUSPENDED);
449
450 if (s->monitor_source)
451 pa_source_sync_suspend(s->monitor_source);
452 }
453
454 return 0;
455 }
456
457 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
458 pa_assert(s);
459
460 s->get_volume = cb;
461 }
462
463 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
464 pa_sink_flags_t flags;
465
466 pa_assert(s);
467 pa_assert(!s->write_volume || cb);
468
469 s->set_volume = cb;
470
471 /* Save the current flags so we can tell if they've changed */
472 flags = s->flags;
473
474 if (cb) {
475 /* The sink implementor is responsible for setting decibel volume support */
476 s->flags |= PA_SINK_HW_VOLUME_CTRL;
477 } else {
478 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
479 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
480 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
481 }
482
483 /* If the flags have changed after init, let any clients know via a change event */
484 if (s->state != PA_SINK_INIT && flags != s->flags)
485 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
486 }
487
488 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
489 pa_sink_flags_t flags;
490
491 pa_assert(s);
492 pa_assert(!cb || s->set_volume);
493
494 s->write_volume = cb;
495
496 /* Save the current flags so we can tell if they've changed */
497 flags = s->flags;
498
499 if (cb)
500 s->flags |= PA_SINK_DEFERRED_VOLUME;
501 else
502 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
503
504 /* If the flags have changed after init, let any clients know via a change event */
505 if (s->state != PA_SINK_INIT && flags != s->flags)
506 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
507 }
508
509 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
510 pa_assert(s);
511
512 s->get_mute = cb;
513 }
514
515 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
516 pa_sink_flags_t flags;
517
518 pa_assert(s);
519
520 s->set_mute = cb;
521
522 /* Save the current flags so we can tell if they've changed */
523 flags = s->flags;
524
525 if (cb)
526 s->flags |= PA_SINK_HW_MUTE_CTRL;
527 else
528 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
529
530 /* If the flags have changed after init, let any clients know via a change event */
531 if (s->state != PA_SINK_INIT && flags != s->flags)
532 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
533 }
534
535 static void enable_flat_volume(pa_sink *s, bool enable) {
536 pa_sink_flags_t flags;
537
538 pa_assert(s);
539
540 /* Always follow the overall user preference here */
541 enable = enable && s->core->flat_volumes;
542
543 /* Save the current flags so we can tell if they've changed */
544 flags = s->flags;
545
546 if (enable)
547 s->flags |= PA_SINK_FLAT_VOLUME;
548 else
549 s->flags &= ~PA_SINK_FLAT_VOLUME;
550
551 /* If the flags have changed after init, let any clients know via a change event */
552 if (s->state != PA_SINK_INIT && flags != s->flags)
553 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
554 }
555
556 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
557 pa_sink_flags_t flags;
558
559 pa_assert(s);
560
561 /* Save the current flags so we can tell if they've changed */
562 flags = s->flags;
563
564 if (enable) {
565 s->flags |= PA_SINK_DECIBEL_VOLUME;
566 enable_flat_volume(s, true);
567 } else {
568 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
569 enable_flat_volume(s, false);
570 }
571
572 /* If the flags have changed after init, let any clients know via a change event */
573 if (s->state != PA_SINK_INIT && flags != s->flags)
574 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
575 }
576
577 /* Called from main context */
578 void pa_sink_put(pa_sink* s) {
579 pa_sink_assert_ref(s);
580 pa_assert_ctl_context();
581
582 pa_assert(s->state == PA_SINK_INIT);
583 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || s->input_to_master);
584
585 /* The following fields must be initialized properly when calling _put() */
586 pa_assert(s->asyncmsgq);
587 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
588
589 /* Generally, flags should be initialized via pa_sink_new(). As a
590 * special exception we allow some volume related flags to be set
591 * between _new() and _put() by the callback setter functions above.
592 *
593 * Thus we implement a couple safeguards here which ensure the above
594 * setters were used (or at least the implementor made manual changes
595 * in a compatible way).
596 *
597 * Note: All of these flags set here can change over the life time
598 * of the sink. */
599 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
600 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
601 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
602
603 /* XXX: Currently decibel volume is disabled for all sinks that use volume
604 * sharing. When the master sink supports decibel volume, it would be good
605 * to have the flag also in the filter sink, but currently we don't do that
606 * so that the flags of the filter sink never change when it's moved from
607 * a master sink to another. One solution for this problem would be to
608 * remove user-visible volume altogether from filter sinks when volume
609 * sharing is used, but the current approach was easier to implement... */
610 /* We always support decibel volumes in software, otherwise we leave it to
611 * the sink implementor to set this flag as needed.
612 *
613 * Note: This flag can also change over the life time of the sink. */
614 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
615 pa_sink_enable_decibel_volume(s, true);
616
617 /* If the sink implementor support DB volumes by itself, we should always
618 * try and enable flat volumes too */
619 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
620 enable_flat_volume(s, true);
621
622 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
623 pa_sink *root_sink = pa_sink_get_master(s);
624
625 pa_assert(root_sink);
626
627 s->reference_volume = root_sink->reference_volume;
628 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
629
630 s->real_volume = root_sink->real_volume;
631 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
632 } else
633 /* We assume that if the sink implementor changed the default
634 * volume he did so in real_volume, because that is the usual
635 * place where he is supposed to place his changes. */
636 s->reference_volume = s->real_volume;
637
638 s->thread_info.soft_volume = s->soft_volume;
639 s->thread_info.soft_muted = s->muted;
640 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
641
642 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
643 || (s->base_volume == PA_VOLUME_NORM
644 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
645 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
646 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
647 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
648 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
649
650 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
651 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
652 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
653
654 if (s->suspend_cause)
655 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED) == 0);
656 else
657 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
658
659 pa_source_put(s->monitor_source);
660
661 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
662 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
663 }
664
665 /* Called from main context */
666 void pa_sink_unlink(pa_sink* s) {
667 bool linked;
668 pa_sink_input *i, *j = NULL;
669
670 pa_assert(s);
671 pa_assert_ctl_context();
672
673 /* Please note that pa_sink_unlink() does more than simply
674 * reversing pa_sink_put(). It also undoes the registrations
675 * already done in pa_sink_new()! */
676
677 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
678 * may be called multiple times on the same sink without bad
679 * effects. */
680
681 linked = PA_SINK_IS_LINKED(s->state);
682
683 if (linked)
684 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
685
686 if (s->state != PA_SINK_UNLINKED)
687 pa_namereg_unregister(s->core, s->name);
688 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
689
690 if (s->card)
691 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
692
693 while ((i = pa_idxset_first(s->inputs, NULL))) {
694 pa_assert(i != j);
695 pa_sink_input_kill(i);
696 j = i;
697 }
698
699 if (linked)
700 sink_set_state(s, PA_SINK_UNLINKED);
701 else
702 s->state = PA_SINK_UNLINKED;
703
704 reset_callbacks(s);
705
706 if (s->monitor_source)
707 pa_source_unlink(s->monitor_source);
708
709 if (linked) {
710 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
711 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
712 }
713 }
714
715 /* Called from main context */
716 static void sink_free(pa_object *o) {
717 pa_sink *s = PA_SINK(o);
718
719 pa_assert(s);
720 pa_assert_ctl_context();
721 pa_assert(pa_sink_refcnt(s) == 0);
722
723 if (PA_SINK_IS_LINKED(s->state))
724 pa_sink_unlink(s);
725
726 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
727
728 if (s->monitor_source) {
729 pa_source_unref(s->monitor_source);
730 s->monitor_source = NULL;
731 }
732
733 pa_idxset_free(s->inputs, NULL);
734 pa_hashmap_free(s->thread_info.inputs);
735
736 if (s->silence.memblock)
737 pa_memblock_unref(s->silence.memblock);
738
739 pa_xfree(s->name);
740 pa_xfree(s->driver);
741
742 if (s->proplist)
743 pa_proplist_free(s->proplist);
744
745 if (s->ports)
746 pa_hashmap_free(s->ports);
747
748 pa_xfree(s);
749 }
750
751 /* Called from main context, and not while the IO thread is active, please */
752 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
753 pa_sink_assert_ref(s);
754 pa_assert_ctl_context();
755
756 s->asyncmsgq = q;
757
758 if (s->monitor_source)
759 pa_source_set_asyncmsgq(s->monitor_source, q);
760 }
761
762 /* Called from main context, and not while the IO thread is active, please */
763 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
764 pa_sink_flags_t old_flags;
765 pa_sink_input *input;
766 uint32_t idx;
767
768 pa_sink_assert_ref(s);
769 pa_assert_ctl_context();
770
771 /* For now, allow only a minimal set of flags to be changed. */
772 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
773
774 old_flags = s->flags;
775 s->flags = (s->flags & ~mask) | (value & mask);
776
777 if (s->flags == old_flags)
778 return;
779
780 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
781 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
782
783 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
784 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
785 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
786
787 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
788 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
789
790 if (s->monitor_source)
791 pa_source_update_flags(s->monitor_source,
792 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
793 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
794 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
795 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
796
797 PA_IDXSET_FOREACH(input, s->inputs, idx) {
798 if (input->origin_sink)
799 pa_sink_update_flags(input->origin_sink, mask, value);
800 }
801 }
802
803 /* Called from IO context, or before _put() from main context */
804 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
805 pa_sink_assert_ref(s);
806 pa_sink_assert_io_context(s);
807
808 s->thread_info.rtpoll = p;
809
810 if (s->monitor_source)
811 pa_source_set_rtpoll(s->monitor_source, p);
812 }
813
814 /* Called from main context */
815 int pa_sink_update_status(pa_sink*s) {
816 pa_sink_assert_ref(s);
817 pa_assert_ctl_context();
818 pa_assert(PA_SINK_IS_LINKED(s->state));
819
820 if (s->state == PA_SINK_SUSPENDED)
821 return 0;
822
823 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
824 }
825
826 /* Called from any context - must be threadsafe */
827 void pa_sink_set_mixer_dirty(pa_sink *s, bool is_dirty) {
828 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
829 }
830
831 /* Called from main context */
832 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
833 pa_sink_assert_ref(s);
834 pa_assert_ctl_context();
835 pa_assert(PA_SINK_IS_LINKED(s->state));
836 pa_assert(cause != 0);
837
838 if (suspend) {
839 s->suspend_cause |= cause;
840 s->monitor_source->suspend_cause |= cause;
841 } else {
842 s->suspend_cause &= ~cause;
843 s->monitor_source->suspend_cause &= ~cause;
844 }
845
846 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
847 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
848 it'll be handled just fine. */
849 pa_sink_set_mixer_dirty(s, false);
850 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
851 if (s->active_port && s->set_port) {
852 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
853 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
854 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
855 }
856 else
857 s->set_port(s, s->active_port);
858 }
859 else {
860 if (s->set_mute)
861 s->set_mute(s);
862 if (s->set_volume)
863 s->set_volume(s);
864 }
865 }
866
867 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
868 return 0;
869
870 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
871
872 if (s->suspend_cause)
873 return sink_set_state(s, PA_SINK_SUSPENDED);
874 else
875 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
876 }
877
878 /* Called from main context */
879 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
880 pa_sink_input *i, *n;
881 uint32_t idx;
882
883 pa_sink_assert_ref(s);
884 pa_assert_ctl_context();
885 pa_assert(PA_SINK_IS_LINKED(s->state));
886
887 if (!q)
888 q = pa_queue_new();
889
890 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
891 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
892
893 pa_sink_input_ref(i);
894
895 if (pa_sink_input_start_move(i) >= 0)
896 pa_queue_push(q, i);
897 else
898 pa_sink_input_unref(i);
899 }
900
901 return q;
902 }
903
904 /* Called from main context */
905 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
906 pa_sink_input *i;
907
908 pa_sink_assert_ref(s);
909 pa_assert_ctl_context();
910 pa_assert(PA_SINK_IS_LINKED(s->state));
911 pa_assert(q);
912
913 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
914 if (pa_sink_input_finish_move(i, s, save) < 0)
915 pa_sink_input_fail_move(i);
916
917 pa_sink_input_unref(i);
918 }
919
920 pa_queue_free(q, NULL);
921 }
922
923 /* Called from main context */
924 void pa_sink_move_all_fail(pa_queue *q) {
925 pa_sink_input *i;
926
927 pa_assert_ctl_context();
928 pa_assert(q);
929
930 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
931 pa_sink_input_fail_move(i);
932 pa_sink_input_unref(i);
933 }
934
935 pa_queue_free(q, NULL);
936 }
937
938 /* Called from IO thread context */
939 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
940 pa_sink_input *i;
941 void *state = NULL;
942 size_t result = 0;
943
944 pa_sink_assert_ref(s);
945 pa_sink_assert_io_context(s);
946
947 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
948 size_t uf = i->thread_info.underrun_for_sink;
949 if (uf == 0)
950 continue;
951 if (uf >= left_to_play) {
952 if (pa_sink_input_process_underrun(i))
953 continue;
954 }
955 else if (uf > result)
956 result = uf;
957 }
958
959 if (result > 0)
960 pa_log_debug("Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", (long) result, (long) left_to_play - result);
961 return left_to_play - result;
962 }
963
964 /* Called from IO thread context */
965 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
966 pa_sink_input *i;
967 void *state = NULL;
968
969 pa_sink_assert_ref(s);
970 pa_sink_assert_io_context(s);
971 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
972
973 /* If nobody requested this and this is actually no real rewind
974 * then we can short cut this. Please note that this means that
975 * not all rewind requests triggered upstream will always be
976 * translated in actual requests! */
977 if (!s->thread_info.rewind_requested && nbytes <= 0)
978 return;
979
980 s->thread_info.rewind_nbytes = 0;
981 s->thread_info.rewind_requested = false;
982
983 if (nbytes > 0) {
984 pa_log_debug("Processing rewind...");
985 if (s->flags & PA_SINK_DEFERRED_VOLUME)
986 pa_sink_volume_change_rewind(s, nbytes);
987 }
988
989 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
990 pa_sink_input_assert_ref(i);
991 pa_sink_input_process_rewind(i, nbytes);
992 }
993
994 if (nbytes > 0) {
995 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
996 pa_source_process_rewind(s->monitor_source, nbytes);
997 }
998 }
999
1000 /* Called from IO thread context */
1001 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1002 pa_sink_input *i;
1003 unsigned n = 0;
1004 void *state = NULL;
1005 size_t mixlength = *length;
1006
1007 pa_sink_assert_ref(s);
1008 pa_sink_assert_io_context(s);
1009 pa_assert(info);
1010
1011 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1012 pa_sink_input_assert_ref(i);
1013
1014 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1015
1016 if (mixlength == 0 || info->chunk.length < mixlength)
1017 mixlength = info->chunk.length;
1018
1019 if (pa_memblock_is_silence(info->chunk.memblock)) {
1020 pa_memblock_unref(info->chunk.memblock);
1021 continue;
1022 }
1023
1024 info->userdata = pa_sink_input_ref(i);
1025
1026 pa_assert(info->chunk.memblock);
1027 pa_assert(info->chunk.length > 0);
1028
1029 info++;
1030 n++;
1031 maxinfo--;
1032 }
1033
1034 if (mixlength > 0)
1035 *length = mixlength;
1036
1037 return n;
1038 }
1039
1040 /* Called from IO thread context */
1041 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1042 pa_sink_input *i;
1043 void *state;
1044 unsigned p = 0;
1045 unsigned n_unreffed = 0;
1046
1047 pa_sink_assert_ref(s);
1048 pa_sink_assert_io_context(s);
1049 pa_assert(result);
1050 pa_assert(result->memblock);
1051 pa_assert(result->length > 0);
1052
1053 /* We optimize for the case where the order of the inputs has not changed */
1054
1055 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1056 unsigned j;
1057 pa_mix_info* m = NULL;
1058
1059 pa_sink_input_assert_ref(i);
1060
1061 /* Let's try to find the matching entry info the pa_mix_info array */
1062 for (j = 0; j < n; j ++) {
1063
1064 if (info[p].userdata == i) {
1065 m = info + p;
1066 break;
1067 }
1068
1069 p++;
1070 if (p >= n)
1071 p = 0;
1072 }
1073
1074 /* Drop read data */
1075 pa_sink_input_drop(i, result->length);
1076
1077 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1078
1079 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1080 void *ostate = NULL;
1081 pa_source_output *o;
1082 pa_memchunk c;
1083
1084 if (m && m->chunk.memblock) {
1085 c = m->chunk;
1086 pa_memblock_ref(c.memblock);
1087 pa_assert(result->length <= c.length);
1088 c.length = result->length;
1089
1090 pa_memchunk_make_writable(&c, 0);
1091 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1092 } else {
1093 c = s->silence;
1094 pa_memblock_ref(c.memblock);
1095 pa_assert(result->length <= c.length);
1096 c.length = result->length;
1097 }
1098
1099 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1100 pa_source_output_assert_ref(o);
1101 pa_assert(o->direct_on_input == i);
1102 pa_source_post_direct(s->monitor_source, o, &c);
1103 }
1104
1105 pa_memblock_unref(c.memblock);
1106 }
1107 }
1108
1109 if (m) {
1110 if (m->chunk.memblock) {
1111 pa_memblock_unref(m->chunk.memblock);
1112 pa_memchunk_reset(&m->chunk);
1113 }
1114
1115 pa_sink_input_unref(m->userdata);
1116 m->userdata = NULL;
1117
1118 n_unreffed += 1;
1119 }
1120 }
1121
1122 /* Now drop references to entries that are included in the
1123 * pa_mix_info array but don't exist anymore */
1124
1125 if (n_unreffed < n) {
1126 for (; n > 0; info++, n--) {
1127 if (info->userdata)
1128 pa_sink_input_unref(info->userdata);
1129 if (info->chunk.memblock)
1130 pa_memblock_unref(info->chunk.memblock);
1131 }
1132 }
1133
1134 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1135 pa_source_post(s->monitor_source, result);
1136 }
1137
1138 /* Called from IO thread context */
1139 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1140 pa_mix_info info[MAX_MIX_CHANNELS];
1141 unsigned n;
1142 size_t block_size_max;
1143
1144 pa_sink_assert_ref(s);
1145 pa_sink_assert_io_context(s);
1146 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1147 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1148 pa_assert(result);
1149
1150 pa_assert(!s->thread_info.rewind_requested);
1151 pa_assert(s->thread_info.rewind_nbytes == 0);
1152
1153 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1154 result->memblock = pa_memblock_ref(s->silence.memblock);
1155 result->index = s->silence.index;
1156 result->length = PA_MIN(s->silence.length, length);
1157 return;
1158 }
1159
1160 pa_sink_ref(s);
1161
1162 if (length <= 0)
1163 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1164
1165 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1166 if (length > block_size_max)
1167 length = pa_frame_align(block_size_max, &s->sample_spec);
1168
1169 pa_assert(length > 0);
1170
1171 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1172
1173 if (n == 0) {
1174
1175 *result = s->silence;
1176 pa_memblock_ref(result->memblock);
1177
1178 if (result->length > length)
1179 result->length = length;
1180
1181 } else if (n == 1) {
1182 pa_cvolume volume;
1183
1184 *result = info[0].chunk;
1185 pa_memblock_ref(result->memblock);
1186
1187 if (result->length > length)
1188 result->length = length;
1189
1190 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1191
1192 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1193 pa_memblock_unref(result->memblock);
1194 pa_silence_memchunk_get(&s->core->silence_cache,
1195 s->core->mempool,
1196 result,
1197 &s->sample_spec,
1198 result->length);
1199 } else if (!pa_cvolume_is_norm(&volume)) {
1200 pa_memchunk_make_writable(result, 0);
1201 pa_volume_memchunk(result, &s->sample_spec, &volume);
1202 }
1203 } else {
1204 void *ptr;
1205 result->memblock = pa_memblock_new(s->core->mempool, length);
1206
1207 ptr = pa_memblock_acquire(result->memblock);
1208 result->length = pa_mix(info, n,
1209 ptr, length,
1210 &s->sample_spec,
1211 &s->thread_info.soft_volume,
1212 s->thread_info.soft_muted);
1213 pa_memblock_release(result->memblock);
1214
1215 result->index = 0;
1216 }
1217
1218 inputs_drop(s, info, n, result);
1219
1220 pa_sink_unref(s);
1221 }
1222
1223 /* Called from IO thread context */
1224 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1225 pa_mix_info info[MAX_MIX_CHANNELS];
1226 unsigned n;
1227 size_t length, block_size_max;
1228
1229 pa_sink_assert_ref(s);
1230 pa_sink_assert_io_context(s);
1231 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1232 pa_assert(target);
1233 pa_assert(target->memblock);
1234 pa_assert(target->length > 0);
1235 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1236
1237 pa_assert(!s->thread_info.rewind_requested);
1238 pa_assert(s->thread_info.rewind_nbytes == 0);
1239
1240 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1241 pa_silence_memchunk(target, &s->sample_spec);
1242 return;
1243 }
1244
1245 pa_sink_ref(s);
1246
1247 length = target->length;
1248 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1249 if (length > block_size_max)
1250 length = pa_frame_align(block_size_max, &s->sample_spec);
1251
1252 pa_assert(length > 0);
1253
1254 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1255
1256 if (n == 0) {
1257 if (target->length > length)
1258 target->length = length;
1259
1260 pa_silence_memchunk(target, &s->sample_spec);
1261 } else if (n == 1) {
1262 pa_cvolume volume;
1263
1264 if (target->length > length)
1265 target->length = length;
1266
1267 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1268
1269 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1270 pa_silence_memchunk(target, &s->sample_spec);
1271 else {
1272 pa_memchunk vchunk;
1273
1274 vchunk = info[0].chunk;
1275 pa_memblock_ref(vchunk.memblock);
1276
1277 if (vchunk.length > length)
1278 vchunk.length = length;
1279
1280 if (!pa_cvolume_is_norm(&volume)) {
1281 pa_memchunk_make_writable(&vchunk, 0);
1282 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1283 }
1284
1285 pa_memchunk_memcpy(target, &vchunk);
1286 pa_memblock_unref(vchunk.memblock);
1287 }
1288
1289 } else {
1290 void *ptr;
1291
1292 ptr = pa_memblock_acquire(target->memblock);
1293
1294 target->length = pa_mix(info, n,
1295 (uint8_t*) ptr + target->index, length,
1296 &s->sample_spec,
1297 &s->thread_info.soft_volume,
1298 s->thread_info.soft_muted);
1299
1300 pa_memblock_release(target->memblock);
1301 }
1302
1303 inputs_drop(s, info, n, target);
1304
1305 pa_sink_unref(s);
1306 }
1307
1308 /* Called from IO thread context */
1309 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1310 pa_memchunk chunk;
1311 size_t l, d;
1312
1313 pa_sink_assert_ref(s);
1314 pa_sink_assert_io_context(s);
1315 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1316 pa_assert(target);
1317 pa_assert(target->memblock);
1318 pa_assert(target->length > 0);
1319 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1320
1321 pa_assert(!s->thread_info.rewind_requested);
1322 pa_assert(s->thread_info.rewind_nbytes == 0);
1323
1324 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1325 pa_silence_memchunk(target, &s->sample_spec);
1326 return;
1327 }
1328
1329 pa_sink_ref(s);
1330
1331 l = target->length;
1332 d = 0;
1333 while (l > 0) {
1334 chunk = *target;
1335 chunk.index += d;
1336 chunk.length -= d;
1337
1338 pa_sink_render_into(s, &chunk);
1339
1340 d += chunk.length;
1341 l -= chunk.length;
1342 }
1343
1344 pa_sink_unref(s);
1345 }
1346
1347 /* Called from IO thread context */
1348 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1349 pa_sink_assert_ref(s);
1350 pa_sink_assert_io_context(s);
1351 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1352 pa_assert(length > 0);
1353 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1354 pa_assert(result);
1355
1356 pa_assert(!s->thread_info.rewind_requested);
1357 pa_assert(s->thread_info.rewind_nbytes == 0);
1358
1359 pa_sink_ref(s);
1360
1361 pa_sink_render(s, length, result);
1362
1363 if (result->length < length) {
1364 pa_memchunk chunk;
1365
1366 pa_memchunk_make_writable(result, length);
1367
1368 chunk.memblock = result->memblock;
1369 chunk.index = result->index + result->length;
1370 chunk.length = length - result->length;
1371
1372 pa_sink_render_into_full(s, &chunk);
1373
1374 result->length = length;
1375 }
1376
1377 pa_sink_unref(s);
1378 }
1379
1380 /* Called from main thread */
1381 int pa_sink_update_rate(pa_sink *s, uint32_t rate, bool passthrough) {
1382 int ret = -1;
1383 uint32_t desired_rate = rate;
1384 uint32_t default_rate = s->default_sample_rate;
1385 uint32_t alternate_rate = s->alternate_sample_rate;
1386 uint32_t idx;
1387 pa_sink_input *i;
1388 bool use_alternate = false;
1389
1390 if (rate == s->sample_spec.rate)
1391 return 0;
1392
1393 if (!s->update_rate)
1394 return -1;
1395
1396 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough)) {
1397 pa_log_debug("Default and alternate sample rates are the same.");
1398 return -1;
1399 }
1400
1401 if (PA_SINK_IS_RUNNING(s->state)) {
1402 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1403 s->sample_spec.rate);
1404 return -1;
1405 }
1406
1407 if (s->monitor_source) {
1408 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1409 pa_log_info("Cannot update rate, monitor source is RUNNING");
1410 return -1;
1411 }
1412 }
1413
1414 if (PA_UNLIKELY (desired_rate < 8000 ||
1415 desired_rate > PA_RATE_MAX))
1416 return -1;
1417
1418 if (!passthrough) {
1419 pa_assert((default_rate % 4000 == 0) || (default_rate % 11025 == 0));
1420 pa_assert((alternate_rate % 4000 == 0) || (alternate_rate % 11025 == 0));
1421
1422 if (default_rate % 11025 == 0) {
1423 if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
1424 use_alternate=true;
1425 } else {
1426 /* default is 4000 multiple */
1427 if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
1428 use_alternate=true;
1429 }
1430
1431 if (use_alternate)
1432 desired_rate = alternate_rate;
1433 else
1434 desired_rate = default_rate;
1435 } else {
1436 desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
1437 }
1438
1439 if (desired_rate == s->sample_spec.rate)
1440 return -1;
1441
1442 if (!passthrough && pa_sink_used_by(s) > 0)
1443 return -1;
1444
1445 pa_log_debug("Suspending sink %s due to changing the sample rate.", s->name);
1446 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1447
1448 if (s->update_rate(s, desired_rate) >= 0) {
1449 /* update monitor source as well */
1450 if (s->monitor_source && !passthrough)
1451 pa_source_update_rate(s->monitor_source, desired_rate, false);
1452 pa_log_info("Changed sampling rate successfully");
1453
1454 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1455 if (i->state == PA_SINK_INPUT_CORKED)
1456 pa_sink_input_update_rate(i);
1457 }
1458
1459 ret = 0;
1460 }
1461
1462 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1463
1464 return ret;
1465 }
1466
1467 /* Called from main thread */
1468 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1469 pa_usec_t usec = 0;
1470
1471 pa_sink_assert_ref(s);
1472 pa_assert_ctl_context();
1473 pa_assert(PA_SINK_IS_LINKED(s->state));
1474
1475 /* The returned value is supposed to be in the time domain of the sound card! */
1476
1477 if (s->state == PA_SINK_SUSPENDED)
1478 return 0;
1479
1480 if (!(s->flags & PA_SINK_LATENCY))
1481 return 0;
1482
1483 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1484
1485 /* usec is unsigned, so check that the offset can be added to usec without
1486 * underflowing. */
1487 if (-s->latency_offset <= (int64_t) usec)
1488 usec += s->latency_offset;
1489 else
1490 usec = 0;
1491
1492 return usec;
1493 }
1494
1495 /* Called from IO thread */
1496 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1497 pa_usec_t usec = 0;
1498 pa_msgobject *o;
1499
1500 pa_sink_assert_ref(s);
1501 pa_sink_assert_io_context(s);
1502 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1503
1504 /* The returned value is supposed to be in the time domain of the sound card! */
1505
1506 if (s->thread_info.state == PA_SINK_SUSPENDED)
1507 return 0;
1508
1509 if (!(s->flags & PA_SINK_LATENCY))
1510 return 0;
1511
1512 o = PA_MSGOBJECT(s);
1513
1514 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1515
1516 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1517 return -1;
1518
1519 /* usec is unsigned, so check that the offset can be added to usec without
1520 * underflowing. */
1521 if (-s->thread_info.latency_offset <= (int64_t) usec)
1522 usec += s->thread_info.latency_offset;
1523 else
1524 usec = 0;
1525
1526 return usec;
1527 }
1528
1529 /* Called from the main thread (and also from the IO thread while the main
1530 * thread is waiting).
1531 *
1532 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1533 * set. Instead, flat volume mode is detected by checking whether the root sink
1534 * has the flag set. */
1535 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1536 pa_sink_assert_ref(s);
1537
1538 s = pa_sink_get_master(s);
1539
1540 if (PA_LIKELY(s))
1541 return (s->flags & PA_SINK_FLAT_VOLUME);
1542 else
1543 return false;
1544 }
1545
1546 /* Called from the main thread (and also from the IO thread while the main
1547 * thread is waiting). */
1548 pa_sink *pa_sink_get_master(pa_sink *s) {
1549 pa_sink_assert_ref(s);
1550
1551 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1552 if (PA_UNLIKELY(!s->input_to_master))
1553 return NULL;
1554
1555 s = s->input_to_master->sink;
1556 }
1557
1558 return s;
1559 }
1560
1561 /* Called from main context */
1562 bool pa_sink_is_passthrough(pa_sink *s) {
1563 pa_sink_input *alt_i;
1564 uint32_t idx;
1565
1566 pa_sink_assert_ref(s);
1567
1568 /* one and only one PASSTHROUGH input can possibly be connected */
1569 if (pa_idxset_size(s->inputs) == 1) {
1570 alt_i = pa_idxset_first(s->inputs, &idx);
1571
1572 if (pa_sink_input_is_passthrough(alt_i))
1573 return true;
1574 }
1575
1576 return false;
1577 }
1578
1579 /* Called from main context */
1580 void pa_sink_enter_passthrough(pa_sink *s) {
1581 pa_cvolume volume;
1582
1583 /* disable the monitor in passthrough mode */
1584 if (s->monitor_source) {
1585 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1586 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1587 }
1588
1589 /* set the volume to NORM */
1590 s->saved_volume = *pa_sink_get_volume(s, true);
1591 s->saved_save_volume = s->save_volume;
1592
1593 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1594 pa_sink_set_volume(s, &volume, true, false);
1595 }
1596
1597 /* Called from main context */
1598 void pa_sink_leave_passthrough(pa_sink *s) {
1599 /* Unsuspend monitor */
1600 if (s->monitor_source) {
1601 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1602 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1603 }
1604
1605 /* Restore sink volume to what it was before we entered passthrough mode */
1606 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1607
1608 pa_cvolume_init(&s->saved_volume);
1609 s->saved_save_volume = false;
1610 }
1611
1612 /* Called from main context. */
1613 static void compute_reference_ratio(pa_sink_input *i) {
1614 unsigned c = 0;
1615 pa_cvolume remapped;
1616
1617 pa_assert(i);
1618 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1619
1620 /*
1621 * Calculates the reference ratio from the sink's reference
1622 * volume. This basically calculates:
1623 *
1624 * i->reference_ratio = i->volume / i->sink->reference_volume
1625 */
1626
1627 remapped = i->sink->reference_volume;
1628 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1629
1630 i->reference_ratio.channels = i->sample_spec.channels;
1631
1632 for (c = 0; c < i->sample_spec.channels; c++) {
1633
1634 /* We don't update when the sink volume is 0 anyway */
1635 if (remapped.values[c] <= PA_VOLUME_MUTED)
1636 continue;
1637
1638 /* Don't update the reference ratio unless necessary */
1639 if (pa_sw_volume_multiply(
1640 i->reference_ratio.values[c],
1641 remapped.values[c]) == i->volume.values[c])
1642 continue;
1643
1644 i->reference_ratio.values[c] = pa_sw_volume_divide(
1645 i->volume.values[c],
1646 remapped.values[c]);
1647 }
1648 }
1649
1650 /* Called from main context. Only called for the root sink in volume sharing
1651 * cases, except for internal recursive calls. */
1652 static void compute_reference_ratios(pa_sink *s) {
1653 uint32_t idx;
1654 pa_sink_input *i;
1655
1656 pa_sink_assert_ref(s);
1657 pa_assert_ctl_context();
1658 pa_assert(PA_SINK_IS_LINKED(s->state));
1659 pa_assert(pa_sink_flat_volume_enabled(s));
1660
1661 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1662 compute_reference_ratio(i);
1663
1664 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1665 compute_reference_ratios(i->origin_sink);
1666 }
1667 }
1668
1669 /* Called from main context. Only called for the root sink in volume sharing
1670 * cases, except for internal recursive calls. */
1671 static void compute_real_ratios(pa_sink *s) {
1672 pa_sink_input *i;
1673 uint32_t idx;
1674
1675 pa_sink_assert_ref(s);
1676 pa_assert_ctl_context();
1677 pa_assert(PA_SINK_IS_LINKED(s->state));
1678 pa_assert(pa_sink_flat_volume_enabled(s));
1679
1680 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1681 unsigned c;
1682 pa_cvolume remapped;
1683
1684 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1685 /* The origin sink uses volume sharing, so this input's real ratio
1686 * is handled as a special case - the real ratio must be 0 dB, and
1687 * as a result i->soft_volume must equal i->volume_factor. */
1688 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1689 i->soft_volume = i->volume_factor;
1690
1691 compute_real_ratios(i->origin_sink);
1692
1693 continue;
1694 }
1695
1696 /*
1697 * This basically calculates:
1698 *
1699 * i->real_ratio := i->volume / s->real_volume
1700 * i->soft_volume := i->real_ratio * i->volume_factor
1701 */
1702
1703 remapped = s->real_volume;
1704 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1705
1706 i->real_ratio.channels = i->sample_spec.channels;
1707 i->soft_volume.channels = i->sample_spec.channels;
1708
1709 for (c = 0; c < i->sample_spec.channels; c++) {
1710
1711 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1712 /* We leave i->real_ratio untouched */
1713 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1714 continue;
1715 }
1716
1717 /* Don't lose accuracy unless necessary */
1718 if (pa_sw_volume_multiply(
1719 i->real_ratio.values[c],
1720 remapped.values[c]) != i->volume.values[c])
1721
1722 i->real_ratio.values[c] = pa_sw_volume_divide(
1723 i->volume.values[c],
1724 remapped.values[c]);
1725
1726 i->soft_volume.values[c] = pa_sw_volume_multiply(
1727 i->real_ratio.values[c],
1728 i->volume_factor.values[c]);
1729 }
1730
1731 /* We don't copy the soft_volume to the thread_info data
1732 * here. That must be done by the caller */
1733 }
1734 }
1735
1736 static pa_cvolume *cvolume_remap_minimal_impact(
1737 pa_cvolume *v,
1738 const pa_cvolume *template,
1739 const pa_channel_map *from,
1740 const pa_channel_map *to) {
1741
1742 pa_cvolume t;
1743
1744 pa_assert(v);
1745 pa_assert(template);
1746 pa_assert(from);
1747 pa_assert(to);
1748 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1749 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1750
1751 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1752 * mapping from sink input to sink volumes:
1753 *
1754 * If template is a possible remapping from v it is used instead
1755 * of remapping anew.
1756 *
1757 * If the channel maps don't match we set an all-channel volume on
1758 * the sink to ensure that changing a volume on one stream has no
1759 * effect that cannot be compensated for in another stream that
1760 * does not have the same channel map as the sink. */
1761
1762 if (pa_channel_map_equal(from, to))
1763 return v;
1764
1765 t = *template;
1766 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1767 *v = *template;
1768 return v;
1769 }
1770
1771 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1772 return v;
1773 }
1774
1775 /* Called from main thread. Only called for the root sink in volume sharing
1776 * cases, except for internal recursive calls. */
1777 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1778 pa_sink_input *i;
1779 uint32_t idx;
1780
1781 pa_sink_assert_ref(s);
1782 pa_assert(max_volume);
1783 pa_assert(channel_map);
1784 pa_assert(pa_sink_flat_volume_enabled(s));
1785
1786 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1787 pa_cvolume remapped;
1788
1789 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1790 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1791
1792 /* Ignore this input. The origin sink uses volume sharing, so this
1793 * input's volume will be set to be equal to the root sink's real
1794 * volume. Obviously this input's current volume must not then
1795 * affect what the root sink's real volume will be. */
1796 continue;
1797 }
1798
1799 remapped = i->volume;
1800 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1801 pa_cvolume_merge(max_volume, max_volume, &remapped);
1802 }
1803 }
1804
1805 /* Called from main thread. Only called for the root sink in volume sharing
1806 * cases, except for internal recursive calls. */
1807 static bool has_inputs(pa_sink *s) {
1808 pa_sink_input *i;
1809 uint32_t idx;
1810
1811 pa_sink_assert_ref(s);
1812
1813 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1814 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1815 return true;
1816 }
1817
1818 return false;
1819 }
1820
1821 /* Called from main thread. Only called for the root sink in volume sharing
1822 * cases, except for internal recursive calls. */
1823 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1824 pa_sink_input *i;
1825 uint32_t idx;
1826
1827 pa_sink_assert_ref(s);
1828 pa_assert(new_volume);
1829 pa_assert(channel_map);
1830
1831 s->real_volume = *new_volume;
1832 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1833
1834 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1835 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1836 if (pa_sink_flat_volume_enabled(s)) {
1837 pa_cvolume old_volume = i->volume;
1838
1839 /* Follow the root sink's real volume. */
1840 i->volume = *new_volume;
1841 pa_cvolume_remap(&i->volume, channel_map, &i->channel_map);
1842 compute_reference_ratio(i);
1843
1844 /* The volume changed, let's tell people so */
1845 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1846 if (i->volume_changed)
1847 i->volume_changed(i);
1848
1849 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1850 }
1851 }
1852
1853 update_real_volume(i->origin_sink, new_volume, channel_map);
1854 }
1855 }
1856 }
1857
1858 /* Called from main thread. Only called for the root sink in shared volume
1859 * cases. */
1860 static void compute_real_volume(pa_sink *s) {
1861 pa_sink_assert_ref(s);
1862 pa_assert_ctl_context();
1863 pa_assert(PA_SINK_IS_LINKED(s->state));
1864 pa_assert(pa_sink_flat_volume_enabled(s));
1865 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1866
1867 /* This determines the maximum volume of all streams and sets
1868 * s->real_volume accordingly. */
1869
1870 if (!has_inputs(s)) {
1871 /* In the special case that we have no sink inputs we leave the
1872 * volume unmodified. */
1873 update_real_volume(s, &s->reference_volume, &s->channel_map);
1874 return;
1875 }
1876
1877 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1878
1879 /* First let's determine the new maximum volume of all inputs
1880 * connected to this sink */
1881 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1882 update_real_volume(s, &s->real_volume, &s->channel_map);
1883
1884 /* Then, let's update the real ratios/soft volumes of all inputs
1885 * connected to this sink */
1886 compute_real_ratios(s);
1887 }
1888
1889 /* Called from main thread. Only called for the root sink in shared volume
1890 * cases, except for internal recursive calls. */
1891 static void propagate_reference_volume(pa_sink *s) {
1892 pa_sink_input *i;
1893 uint32_t idx;
1894
1895 pa_sink_assert_ref(s);
1896 pa_assert_ctl_context();
1897 pa_assert(PA_SINK_IS_LINKED(s->state));
1898 pa_assert(pa_sink_flat_volume_enabled(s));
1899
1900 /* This is called whenever the sink volume changes that is not
1901 * caused by a sink input volume change. We need to fix up the
1902 * sink input volumes accordingly */
1903
1904 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1905 pa_cvolume old_volume;
1906
1907 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1908 propagate_reference_volume(i->origin_sink);
1909
1910 /* Since the origin sink uses volume sharing, this input's volume
1911 * needs to be updated to match the root sink's real volume, but
1912 * that will be done later in update_shared_real_volume(). */
1913 continue;
1914 }
1915
1916 old_volume = i->volume;
1917
1918 /* This basically calculates:
1919 *
1920 * i->volume := s->reference_volume * i->reference_ratio */
1921
1922 i->volume = s->reference_volume;
1923 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
1924 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
1925
1926 /* The volume changed, let's tell people so */
1927 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1928
1929 if (i->volume_changed)
1930 i->volume_changed(i);
1931
1932 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1933 }
1934 }
1935 }
1936
1937 /* Called from main thread. Only called for the root sink in volume sharing
1938 * cases, except for internal recursive calls. The return value indicates
1939 * whether any reference volume actually changed. */
1940 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
1941 pa_cvolume volume;
1942 bool reference_volume_changed;
1943 pa_sink_input *i;
1944 uint32_t idx;
1945
1946 pa_sink_assert_ref(s);
1947 pa_assert(PA_SINK_IS_LINKED(s->state));
1948 pa_assert(v);
1949 pa_assert(channel_map);
1950 pa_assert(pa_cvolume_valid(v));
1951
1952 volume = *v;
1953 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1954
1955 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1956 s->reference_volume = volume;
1957
1958 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1959
1960 if (reference_volume_changed)
1961 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1962 else if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1963 /* If the root sink's volume doesn't change, then there can't be any
1964 * changes in the other sinks in the sink tree either.
1965 *
1966 * It's probably theoretically possible that even if the root sink's
1967 * volume changes slightly, some filter sink doesn't change its volume
1968 * due to rounding errors. If that happens, we still want to propagate
1969 * the changed root sink volume to the sinks connected to the
1970 * intermediate sink that didn't change its volume. This theoretical
1971 * possibility is the reason why we have that !(s->flags &
1972 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1973 * notice even if we returned here false always if
1974 * reference_volume_changed is false. */
1975 return false;
1976
1977 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1978 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1979 update_reference_volume(i->origin_sink, v, channel_map, false);
1980 }
1981
1982 return true;
1983 }
1984
1985 /* Called from main thread */
1986 void pa_sink_set_volume(
1987 pa_sink *s,
1988 const pa_cvolume *volume,
1989 bool send_msg,
1990 bool save) {
1991
1992 pa_cvolume new_reference_volume;
1993 pa_sink *root_sink;
1994
1995 pa_sink_assert_ref(s);
1996 pa_assert_ctl_context();
1997 pa_assert(PA_SINK_IS_LINKED(s->state));
1998 pa_assert(!volume || pa_cvolume_valid(volume));
1999 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2000 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2001
2002 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2003 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2004 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2005 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2006 return;
2007 }
2008
2009 /* In case of volume sharing, the volume is set for the root sink first,
2010 * from which it's then propagated to the sharing sinks. */
2011 root_sink = pa_sink_get_master(s);
2012
2013 if (PA_UNLIKELY(!root_sink))
2014 return;
2015
2016 /* As a special exception we accept mono volumes on all sinks --
2017 * even on those with more complex channel maps */
2018
2019 if (volume) {
2020 if (pa_cvolume_compatible(volume, &s->sample_spec))
2021 new_reference_volume = *volume;
2022 else {
2023 new_reference_volume = s->reference_volume;
2024 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2025 }
2026
2027 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2028
2029 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2030 if (pa_sink_flat_volume_enabled(root_sink)) {
2031 /* OK, propagate this volume change back to the inputs */
2032 propagate_reference_volume(root_sink);
2033
2034 /* And now recalculate the real volume */
2035 compute_real_volume(root_sink);
2036 } else
2037 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2038 }
2039
2040 } else {
2041 /* If volume is NULL we synchronize the sink's real and
2042 * reference volumes with the stream volumes. */
2043
2044 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2045
2046 /* Ok, let's determine the new real volume */
2047 compute_real_volume(root_sink);
2048
2049 /* Let's 'push' the reference volume if necessary */
2050 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2051 /* If the sink and it's root don't have the same number of channels, we need to remap */
2052 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2053 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2054 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2055
2056 /* Now that the reference volume is updated, we can update the streams'
2057 * reference ratios. */
2058 compute_reference_ratios(root_sink);
2059 }
2060
2061 if (root_sink->set_volume) {
2062 /* If we have a function set_volume(), then we do not apply a
2063 * soft volume by default. However, set_volume() is free to
2064 * apply one to root_sink->soft_volume */
2065
2066 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2067 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2068 root_sink->set_volume(root_sink);
2069
2070 } else
2071 /* If we have no function set_volume(), then the soft volume
2072 * becomes the real volume */
2073 root_sink->soft_volume = root_sink->real_volume;
2074
2075 /* This tells the sink that soft volume and/or real volume changed */
2076 if (send_msg)
2077 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2078 }
2079
2080 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2081 * Only to be called by sink implementor */
2082 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2083
2084 pa_sink_assert_ref(s);
2085 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2086
2087 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2088 pa_sink_assert_io_context(s);
2089 else
2090 pa_assert_ctl_context();
2091
2092 if (!volume)
2093 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2094 else
2095 s->soft_volume = *volume;
2096
2097 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2098 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2099 else
2100 s->thread_info.soft_volume = s->soft_volume;
2101 }
2102
2103 /* Called from the main thread. Only called for the root sink in volume sharing
2104 * cases, except for internal recursive calls. */
2105 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2106 pa_sink_input *i;
2107 uint32_t idx;
2108
2109 pa_sink_assert_ref(s);
2110 pa_assert(old_real_volume);
2111 pa_assert_ctl_context();
2112 pa_assert(PA_SINK_IS_LINKED(s->state));
2113
2114 /* This is called when the hardware's real volume changes due to
2115 * some external event. We copy the real volume into our
2116 * reference volume and then rebuild the stream volumes based on
2117 * i->real_ratio which should stay fixed. */
2118
2119 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2120 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2121 return;
2122
2123 /* 1. Make the real volume the reference volume */
2124 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2125 }
2126
2127 if (pa_sink_flat_volume_enabled(s)) {
2128
2129 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2130 pa_cvolume old_volume = i->volume;
2131
2132 /* 2. Since the sink's reference and real volumes are equal
2133 * now our ratios should be too. */
2134 i->reference_ratio = i->real_ratio;
2135
2136 /* 3. Recalculate the new stream reference volume based on the
2137 * reference ratio and the sink's reference volume.
2138 *
2139 * This basically calculates:
2140 *
2141 * i->volume = s->reference_volume * i->reference_ratio
2142 *
2143 * This is identical to propagate_reference_volume() */
2144 i->volume = s->reference_volume;
2145 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
2146 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
2147
2148 /* Notify if something changed */
2149 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
2150
2151 if (i->volume_changed)
2152 i->volume_changed(i);
2153
2154 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
2155 }
2156
2157 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2158 propagate_real_volume(i->origin_sink, old_real_volume);
2159 }
2160 }
2161
2162 /* Something got changed in the hardware. It probably makes sense
2163 * to save changed hw settings given that hw volume changes not
2164 * triggered by PA are almost certainly done by the user. */
2165 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2166 s->save_volume = true;
2167 }
2168
2169 /* Called from io thread */
2170 void pa_sink_update_volume_and_mute(pa_sink *s) {
2171 pa_assert(s);
2172 pa_sink_assert_io_context(s);
2173
2174 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2175 }
2176
2177 /* Called from main thread */
2178 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2179 pa_sink_assert_ref(s);
2180 pa_assert_ctl_context();
2181 pa_assert(PA_SINK_IS_LINKED(s->state));
2182
2183 if (s->refresh_volume || force_refresh) {
2184 struct pa_cvolume old_real_volume;
2185
2186 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2187
2188 old_real_volume = s->real_volume;
2189
2190 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2191 s->get_volume(s);
2192
2193 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2194
2195 update_real_volume(s, &s->real_volume, &s->channel_map);
2196 propagate_real_volume(s, &old_real_volume);
2197 }
2198
2199 return &s->reference_volume;
2200 }
2201
2202 /* Called from main thread. In volume sharing cases, only the root sink may
2203 * call this. */
2204 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2205 pa_cvolume old_real_volume;
2206
2207 pa_sink_assert_ref(s);
2208 pa_assert_ctl_context();
2209 pa_assert(PA_SINK_IS_LINKED(s->state));
2210 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2211
2212 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2213
2214 old_real_volume = s->real_volume;
2215 update_real_volume(s, new_real_volume, &s->channel_map);
2216 propagate_real_volume(s, &old_real_volume);
2217 }
2218
2219 /* Called from main thread */
2220 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2221 bool old_muted;
2222
2223 pa_sink_assert_ref(s);
2224 pa_assert_ctl_context();
2225 pa_assert(PA_SINK_IS_LINKED(s->state));
2226
2227 old_muted = s->muted;
2228 s->muted = mute;
2229 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
2230
2231 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute)
2232 s->set_mute(s);
2233
2234 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2235
2236 if (old_muted != s->muted)
2237 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2238 }
2239
2240 /* Called from main thread */
2241 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2242
2243 pa_sink_assert_ref(s);
2244 pa_assert_ctl_context();
2245 pa_assert(PA_SINK_IS_LINKED(s->state));
2246
2247 if (s->refresh_muted || force_refresh) {
2248 bool old_muted = s->muted;
2249
2250 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_mute)
2251 s->get_mute(s);
2252
2253 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
2254
2255 if (old_muted != s->muted) {
2256 s->save_muted = true;
2257
2258 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2259
2260 /* Make sure the soft mute status stays in sync */
2261 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2262 }
2263 }
2264
2265 return s->muted;
2266 }
2267
2268 /* Called from main thread */
2269 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2270 pa_sink_assert_ref(s);
2271 pa_assert_ctl_context();
2272 pa_assert(PA_SINK_IS_LINKED(s->state));
2273
2274 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2275
2276 if (s->muted == new_muted)
2277 return;
2278
2279 s->muted = new_muted;
2280 s->save_muted = true;
2281
2282 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2283 }
2284
2285 /* Called from main thread */
2286 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2287 pa_sink_assert_ref(s);
2288 pa_assert_ctl_context();
2289
2290 if (p)
2291 pa_proplist_update(s->proplist, mode, p);
2292
2293 if (PA_SINK_IS_LINKED(s->state)) {
2294 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2295 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2296 }
2297
2298 return true;
2299 }
2300
2301 /* Called from main thread */
2302 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2303 void pa_sink_set_description(pa_sink *s, const char *description) {
2304 const char *old;
2305 pa_sink_assert_ref(s);
2306 pa_assert_ctl_context();
2307
2308 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2309 return;
2310
2311 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2312
2313 if (old && description && pa_streq(old, description))
2314 return;
2315
2316 if (description)
2317 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2318 else
2319 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2320
2321 if (s->monitor_source) {
2322 char *n;
2323
2324 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2325 pa_source_set_description(s->monitor_source, n);
2326 pa_xfree(n);
2327 }
2328
2329 if (PA_SINK_IS_LINKED(s->state)) {
2330 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2331 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2332 }
2333 }
2334
2335 /* Called from main thread */
2336 unsigned pa_sink_linked_by(pa_sink *s) {
2337 unsigned ret;
2338
2339 pa_sink_assert_ref(s);
2340 pa_assert_ctl_context();
2341 pa_assert(PA_SINK_IS_LINKED(s->state));
2342
2343 ret = pa_idxset_size(s->inputs);
2344
2345 /* We add in the number of streams connected to us here. Please
2346 * note the asymmetry to pa_sink_used_by()! */
2347
2348 if (s->monitor_source)
2349 ret += pa_source_linked_by(s->monitor_source);
2350
2351 return ret;
2352 }
2353
2354 /* Called from main thread */
2355 unsigned pa_sink_used_by(pa_sink *s) {
2356 unsigned ret;
2357
2358 pa_sink_assert_ref(s);
2359 pa_assert_ctl_context();
2360 pa_assert(PA_SINK_IS_LINKED(s->state));
2361
2362 ret = pa_idxset_size(s->inputs);
2363 pa_assert(ret >= s->n_corked);
2364
2365 /* Streams connected to our monitor source do not matter for
2366 * pa_sink_used_by()!.*/
2367
2368 return ret - s->n_corked;
2369 }
2370
2371 /* Called from main thread */
2372 unsigned pa_sink_check_suspend(pa_sink *s) {
2373 unsigned ret;
2374 pa_sink_input *i;
2375 uint32_t idx;
2376
2377 pa_sink_assert_ref(s);
2378 pa_assert_ctl_context();
2379
2380 if (!PA_SINK_IS_LINKED(s->state))
2381 return 0;
2382
2383 ret = 0;
2384
2385 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2386 pa_sink_input_state_t st;
2387
2388 st = pa_sink_input_get_state(i);
2389
2390 /* We do not assert here. It is perfectly valid for a sink input to
2391 * be in the INIT state (i.e. created, marked done but not yet put)
2392 * and we should not care if it's unlinked as it won't contribute
2393 * towards our busy status.
2394 */
2395 if (!PA_SINK_INPUT_IS_LINKED(st))
2396 continue;
2397
2398 if (st == PA_SINK_INPUT_CORKED)
2399 continue;
2400
2401 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2402 continue;
2403
2404 ret ++;
2405 }
2406
2407 if (s->monitor_source)
2408 ret += pa_source_check_suspend(s->monitor_source);
2409
2410 return ret;
2411 }
2412
2413 /* Called from the IO thread */
2414 static void sync_input_volumes_within_thread(pa_sink *s) {
2415 pa_sink_input *i;
2416 void *state = NULL;
2417
2418 pa_sink_assert_ref(s);
2419 pa_sink_assert_io_context(s);
2420
2421 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2422 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2423 continue;
2424
2425 i->thread_info.soft_volume = i->soft_volume;
2426 pa_sink_input_request_rewind(i, 0, true, false, false);
2427 }
2428 }
2429
2430 /* Called from the IO thread. Only called for the root sink in volume sharing
2431 * cases, except for internal recursive calls. */
2432 static void set_shared_volume_within_thread(pa_sink *s) {
2433 pa_sink_input *i = NULL;
2434 void *state = NULL;
2435
2436 pa_sink_assert_ref(s);
2437
2438 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2439
2440 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2441 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2442 set_shared_volume_within_thread(i->origin_sink);
2443 }
2444 }
2445
2446 /* Called from IO thread, except when it is not */
2447 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2448 pa_sink *s = PA_SINK(o);
2449 pa_sink_assert_ref(s);
2450
2451 switch ((pa_sink_message_t) code) {
2452
2453 case PA_SINK_MESSAGE_ADD_INPUT: {
2454 pa_sink_input *i = PA_SINK_INPUT(userdata);
2455
2456 /* If you change anything here, make sure to change the
2457 * sink input handling a few lines down at
2458 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2459
2460 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2461
2462 /* Since the caller sleeps in pa_sink_input_put(), we can
2463 * safely access data outside of thread_info even though
2464 * it is mutable */
2465
2466 if ((i->thread_info.sync_prev = i->sync_prev)) {
2467 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2468 pa_assert(i->sync_prev->sync_next == i);
2469 i->thread_info.sync_prev->thread_info.sync_next = i;
2470 }
2471
2472 if ((i->thread_info.sync_next = i->sync_next)) {
2473 pa_assert(i->sink == i->thread_info.sync_next->sink);
2474 pa_assert(i->sync_next->sync_prev == i);
2475 i->thread_info.sync_next->thread_info.sync_prev = i;
2476 }
2477
2478 pa_assert(!i->thread_info.attached);
2479 i->thread_info.attached = true;
2480
2481 if (i->attach)
2482 i->attach(i);
2483
2484 pa_sink_input_set_state_within_thread(i, i->state);
2485
2486 /* The requested latency of the sink input needs to be fixed up and
2487 * then configured on the sink. If this causes the sink latency to
2488 * go down, the sink implementor is responsible for doing a rewind
2489 * in the update_requested_latency() callback to ensure that the
2490 * sink buffer doesn't contain more data than what the new latency
2491 * allows.
2492 *
2493 * XXX: Does it really make sense to push this responsibility to
2494 * the sink implementors? Wouldn't it be better to do it once in
2495 * the core than many times in the modules? */
2496
2497 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2498 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2499
2500 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2501 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2502
2503 /* We don't rewind here automatically. This is left to the
2504 * sink input implementor because some sink inputs need a
2505 * slow start, i.e. need some time to buffer client
2506 * samples before beginning streaming.
2507 *
2508 * XXX: Does it really make sense to push this functionality to
2509 * the sink implementors? Wouldn't it be better to do it once in
2510 * the core than many times in the modules? */
2511
2512 /* In flat volume mode we need to update the volume as
2513 * well */
2514 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2515 }
2516
2517 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2518 pa_sink_input *i = PA_SINK_INPUT(userdata);
2519
2520 /* If you change anything here, make sure to change the
2521 * sink input handling a few lines down at
2522 * PA_SINK_MESSAGE_START_MOVE, too. */
2523
2524 if (i->detach)
2525 i->detach(i);
2526
2527 pa_sink_input_set_state_within_thread(i, i->state);
2528
2529 pa_assert(i->thread_info.attached);
2530 i->thread_info.attached = false;
2531
2532 /* Since the caller sleeps in pa_sink_input_unlink(),
2533 * we can safely access data outside of thread_info even
2534 * though it is mutable */
2535
2536 pa_assert(!i->sync_prev);
2537 pa_assert(!i->sync_next);
2538
2539 if (i->thread_info.sync_prev) {
2540 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2541 i->thread_info.sync_prev = NULL;
2542 }
2543
2544 if (i->thread_info.sync_next) {
2545 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2546 i->thread_info.sync_next = NULL;
2547 }
2548
2549 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2550 pa_sink_input_unref(i);
2551
2552 pa_sink_invalidate_requested_latency(s, true);
2553 pa_sink_request_rewind(s, (size_t) -1);
2554
2555 /* In flat volume mode we need to update the volume as
2556 * well */
2557 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2558 }
2559
2560 case PA_SINK_MESSAGE_START_MOVE: {
2561 pa_sink_input *i = PA_SINK_INPUT(userdata);
2562
2563 /* We don't support moving synchronized streams. */
2564 pa_assert(!i->sync_prev);
2565 pa_assert(!i->sync_next);
2566 pa_assert(!i->thread_info.sync_next);
2567 pa_assert(!i->thread_info.sync_prev);
2568
2569 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2570 pa_usec_t usec = 0;
2571 size_t sink_nbytes, total_nbytes;
2572
2573 /* The old sink probably has some audio from this
2574 * stream in its buffer. We want to "take it back" as
2575 * much as possible and play it to the new sink. We
2576 * don't know at this point how much the old sink can
2577 * rewind. We have to pick something, and that
2578 * something is the full latency of the old sink here.
2579 * So we rewind the stream buffer by the sink latency
2580 * amount, which may be more than what we should
2581 * rewind. This can result in a chunk of audio being
2582 * played both to the old sink and the new sink.
2583 *
2584 * FIXME: Fix this code so that we don't have to make
2585 * guesses about how much the sink will actually be
2586 * able to rewind. If someone comes up with a solution
2587 * for this, something to note is that the part of the
2588 * latency that the old sink couldn't rewind should
2589 * ideally be compensated after the stream has moved
2590 * to the new sink by adding silence. The new sink
2591 * most likely can't start playing the moved stream
2592 * immediately, and that gap should be removed from
2593 * the "compensation silence" (at least at the time of
2594 * writing this, the move finish code will actually
2595 * already take care of dropping the new sink's
2596 * unrewindable latency, so taking into account the
2597 * unrewindable latency of the old sink is the only
2598 * problem).
2599 *
2600 * The render_memblockq contents are discarded,
2601 * because when the sink changes, the format of the
2602 * audio stored in the render_memblockq may change
2603 * too, making the stored audio invalid. FIXME:
2604 * However, the read and write indices are moved back
2605 * the same amount, so if they are not the same now,
2606 * they won't be the same after the rewind either. If
2607 * the write index of the render_memblockq is ahead of
2608 * the read index, then the render_memblockq will feed
2609 * the new sink some silence first, which it shouldn't
2610 * do. The write index should be flushed to be the
2611 * same as the read index. */
2612
2613 /* Get the latency of the sink */
2614 usec = pa_sink_get_latency_within_thread(s);
2615 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2616 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2617
2618 if (total_nbytes > 0) {
2619 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2620 i->thread_info.rewrite_flush = true;
2621 pa_sink_input_process_rewind(i, sink_nbytes);
2622 }
2623 }
2624
2625 if (i->detach)
2626 i->detach(i);
2627
2628 pa_assert(i->thread_info.attached);
2629 i->thread_info.attached = false;
2630
2631 /* Let's remove the sink input ...*/
2632 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2633 pa_sink_input_unref(i);
2634
2635 pa_sink_invalidate_requested_latency(s, true);
2636
2637 pa_log_debug("Requesting rewind due to started move");
2638 pa_sink_request_rewind(s, (size_t) -1);
2639
2640 /* In flat volume mode we need to update the volume as
2641 * well */
2642 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2643 }
2644
2645 case PA_SINK_MESSAGE_FINISH_MOVE: {
2646 pa_sink_input *i = PA_SINK_INPUT(userdata);
2647
2648 /* We don't support moving synchronized streams. */
2649 pa_assert(!i->sync_prev);
2650 pa_assert(!i->sync_next);
2651 pa_assert(!i->thread_info.sync_next);
2652 pa_assert(!i->thread_info.sync_prev);
2653
2654 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2655
2656 pa_assert(!i->thread_info.attached);
2657 i->thread_info.attached = true;
2658
2659 if (i->attach)
2660 i->attach(i);
2661
2662 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2663 pa_usec_t usec = 0;
2664 size_t nbytes;
2665
2666 /* In the ideal case the new sink would start playing
2667 * the stream immediately. That requires the sink to
2668 * be able to rewind all of its latency, which usually
2669 * isn't possible, so there will probably be some gap
2670 * before the moved stream becomes audible. We then
2671 * have two possibilities: 1) start playing the stream
2672 * from where it is now, or 2) drop the unrewindable
2673 * latency of the sink from the stream. With option 1
2674 * we won't lose any audio but the stream will have a
2675 * pause. With option 2 we may lose some audio but the
2676 * stream time will be somewhat in sync with the wall
2677 * clock. Lennart seems to have chosen option 2 (one
2678 * of the reasons might have been that option 1 is
2679 * actually much harder to implement), so we drop the
2680 * latency of the new sink from the moved stream and
2681 * hope that the sink will undo most of that in the
2682 * rewind. */
2683
2684 /* Get the latency of the sink */
2685 usec = pa_sink_get_latency_within_thread(s);
2686 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2687
2688 if (nbytes > 0)
2689 pa_sink_input_drop(i, nbytes);
2690
2691 pa_log_debug("Requesting rewind due to finished move");
2692 pa_sink_request_rewind(s, nbytes);
2693 }
2694
2695 /* Updating the requested sink latency has to be done
2696 * after the sink rewind request, not before, because
2697 * otherwise the sink may limit the rewind amount
2698 * needlessly. */
2699
2700 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2701 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2702
2703 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2704 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2705
2706 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2707 }
2708
2709 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2710 pa_sink *root_sink = pa_sink_get_master(s);
2711
2712 if (PA_LIKELY(root_sink))
2713 set_shared_volume_within_thread(root_sink);
2714
2715 return 0;
2716 }
2717
2718 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2719
2720 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2721 s->set_volume(s);
2722 pa_sink_volume_change_push(s);
2723 }
2724 /* Fall through ... */
2725
2726 case PA_SINK_MESSAGE_SET_VOLUME:
2727
2728 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2729 s->thread_info.soft_volume = s->soft_volume;
2730 pa_sink_request_rewind(s, (size_t) -1);
2731 }
2732
2733 /* Fall through ... */
2734
2735 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2736 sync_input_volumes_within_thread(s);
2737 return 0;
2738
2739 case PA_SINK_MESSAGE_GET_VOLUME:
2740
2741 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2742 s->get_volume(s);
2743 pa_sink_volume_change_flush(s);
2744 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2745 }
2746
2747 /* In case sink implementor reset SW volume. */
2748 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2749 s->thread_info.soft_volume = s->soft_volume;
2750 pa_sink_request_rewind(s, (size_t) -1);
2751 }
2752
2753 return 0;
2754
2755 case PA_SINK_MESSAGE_SET_MUTE:
2756
2757 if (s->thread_info.soft_muted != s->muted) {
2758 s->thread_info.soft_muted = s->muted;
2759 pa_sink_request_rewind(s, (size_t) -1);
2760 }
2761
2762 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2763 s->set_mute(s);
2764
2765 return 0;
2766
2767 case PA_SINK_MESSAGE_GET_MUTE:
2768
2769 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2770 s->get_mute(s);
2771
2772 return 0;
2773
2774 case PA_SINK_MESSAGE_SET_STATE: {
2775
2776 bool suspend_change =
2777 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2778 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2779
2780 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2781
2782 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2783 s->thread_info.rewind_nbytes = 0;
2784 s->thread_info.rewind_requested = false;
2785 }
2786
2787 if (suspend_change) {
2788 pa_sink_input *i;
2789 void *state = NULL;
2790
2791 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2792 if (i->suspend_within_thread)
2793 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2794 }
2795
2796 return 0;
2797 }
2798
2799 case PA_SINK_MESSAGE_DETACH:
2800
2801 /* Detach all streams */
2802 pa_sink_detach_within_thread(s);
2803 return 0;
2804
2805 case PA_SINK_MESSAGE_ATTACH:
2806
2807 /* Reattach all streams */
2808 pa_sink_attach_within_thread(s);
2809 return 0;
2810
2811 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2812
2813 pa_usec_t *usec = userdata;
2814 *usec = pa_sink_get_requested_latency_within_thread(s);
2815
2816 /* Yes, that's right, the IO thread will see -1 when no
2817 * explicit requested latency is configured, the main
2818 * thread will see max_latency */
2819 if (*usec == (pa_usec_t) -1)
2820 *usec = s->thread_info.max_latency;
2821
2822 return 0;
2823 }
2824
2825 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2826 pa_usec_t *r = userdata;
2827
2828 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2829
2830 return 0;
2831 }
2832
2833 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2834 pa_usec_t *r = userdata;
2835
2836 r[0] = s->thread_info.min_latency;
2837 r[1] = s->thread_info.max_latency;
2838
2839 return 0;
2840 }
2841
2842 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2843
2844 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2845 return 0;
2846
2847 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2848
2849 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2850 return 0;
2851
2852 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2853
2854 *((size_t*) userdata) = s->thread_info.max_rewind;
2855 return 0;
2856
2857 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2858
2859 *((size_t*) userdata) = s->thread_info.max_request;
2860 return 0;
2861
2862 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2863
2864 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2865 return 0;
2866
2867 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2868
2869 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2870 return 0;
2871
2872 case PA_SINK_MESSAGE_SET_PORT:
2873
2874 pa_assert(userdata);
2875 if (s->set_port) {
2876 struct sink_message_set_port *msg_data = userdata;
2877 msg_data->ret = s->set_port(s, msg_data->port);
2878 }
2879 return 0;
2880
2881 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2882 /* This message is sent from IO-thread and handled in main thread. */
2883 pa_assert_ctl_context();
2884
2885 /* Make sure we're not messing with main thread when no longer linked */
2886 if (!PA_SINK_IS_LINKED(s->state))
2887 return 0;
2888
2889 pa_sink_get_volume(s, true);
2890 pa_sink_get_mute(s, true);
2891 return 0;
2892
2893 case PA_SINK_MESSAGE_SET_LATENCY_OFFSET:
2894 s->thread_info.latency_offset = offset;
2895 return 0;
2896
2897 case PA_SINK_MESSAGE_GET_LATENCY:
2898 case PA_SINK_MESSAGE_MAX:
2899 ;
2900 }
2901
2902 return -1;
2903 }
2904
2905 /* Called from main thread */
2906 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2907 pa_sink *sink;
2908 uint32_t idx;
2909 int ret = 0;
2910
2911 pa_core_assert_ref(c);
2912 pa_assert_ctl_context();
2913 pa_assert(cause != 0);
2914
2915 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2916 int r;
2917
2918 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2919 ret = r;
2920 }
2921
2922 return ret;
2923 }
2924
2925 /* Called from main thread */
2926 void pa_sink_detach(pa_sink *s) {
2927 pa_sink_assert_ref(s);
2928 pa_assert_ctl_context();
2929 pa_assert(PA_SINK_IS_LINKED(s->state));
2930
2931 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2932 }
2933
2934 /* Called from main thread */
2935 void pa_sink_attach(pa_sink *s) {
2936 pa_sink_assert_ref(s);
2937 pa_assert_ctl_context();
2938 pa_assert(PA_SINK_IS_LINKED(s->state));
2939
2940 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2941 }
2942
2943 /* Called from IO thread */
2944 void pa_sink_detach_within_thread(pa_sink *s) {
2945 pa_sink_input *i;
2946 void *state = NULL;
2947
2948 pa_sink_assert_ref(s);
2949 pa_sink_assert_io_context(s);
2950 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2951
2952 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2953 if (i->detach)
2954 i->detach(i);
2955
2956 if (s->monitor_source)
2957 pa_source_detach_within_thread(s->monitor_source);
2958 }
2959
2960 /* Called from IO thread */
2961 void pa_sink_attach_within_thread(pa_sink *s) {
2962 pa_sink_input *i;
2963 void *state = NULL;
2964
2965 pa_sink_assert_ref(s);
2966 pa_sink_assert_io_context(s);
2967 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2968
2969 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2970 if (i->attach)
2971 i->attach(i);
2972
2973 if (s->monitor_source)
2974 pa_source_attach_within_thread(s->monitor_source);
2975 }
2976
2977 /* Called from IO thread */
2978 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2979 pa_sink_assert_ref(s);
2980 pa_sink_assert_io_context(s);
2981 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2982
2983 if (nbytes == (size_t) -1)
2984 nbytes = s->thread_info.max_rewind;
2985
2986 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2987
2988 if (s->thread_info.rewind_requested &&
2989 nbytes <= s->thread_info.rewind_nbytes)
2990 return;
2991
2992 s->thread_info.rewind_nbytes = nbytes;
2993 s->thread_info.rewind_requested = true;
2994
2995 if (s->request_rewind)
2996 s->request_rewind(s);
2997 }
2998
2999 /* Called from IO thread */
3000 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3001 pa_usec_t result = (pa_usec_t) -1;
3002 pa_sink_input *i;
3003 void *state = NULL;
3004 pa_usec_t monitor_latency;
3005
3006 pa_sink_assert_ref(s);
3007 pa_sink_assert_io_context(s);
3008
3009 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3010 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3011
3012 if (s->thread_info.requested_latency_valid)
3013 return s->thread_info.requested_latency;
3014
3015 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3016 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3017 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3018 result = i->thread_info.requested_sink_latency;
3019
3020 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3021
3022 if (monitor_latency != (pa_usec_t) -1 &&
3023 (result == (pa_usec_t) -1 || result > monitor_latency))
3024 result = monitor_latency;
3025
3026 if (result != (pa_usec_t) -1)
3027 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3028
3029 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3030 /* Only cache if properly initialized */
3031 s->thread_info.requested_latency = result;
3032 s->thread_info.requested_latency_valid = true;
3033 }
3034
3035 return result;
3036 }
3037
3038 /* Called from main thread */
3039 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3040 pa_usec_t usec = 0;
3041
3042 pa_sink_assert_ref(s);
3043 pa_assert_ctl_context();
3044 pa_assert(PA_SINK_IS_LINKED(s->state));
3045
3046 if (s->state == PA_SINK_SUSPENDED)
3047 return 0;
3048
3049 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3050
3051 return usec;
3052 }
3053
3054 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3055 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3056 pa_sink_input *i;
3057 void *state = NULL;
3058
3059 pa_sink_assert_ref(s);
3060 pa_sink_assert_io_context(s);
3061
3062 if (max_rewind == s->thread_info.max_rewind)
3063 return;
3064
3065 s->thread_info.max_rewind = max_rewind;
3066
3067 if (PA_SINK_IS_LINKED(s->thread_info.state))
3068 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3069 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3070
3071 if (s->monitor_source)
3072 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3073 }
3074
3075 /* Called from main thread */
3076 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3077 pa_sink_assert_ref(s);
3078 pa_assert_ctl_context();
3079
3080 if (PA_SINK_IS_LINKED(s->state))
3081 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3082 else
3083 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3084 }
3085
3086 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3087 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3088 void *state = NULL;
3089
3090 pa_sink_assert_ref(s);
3091 pa_sink_assert_io_context(s);
3092
3093 if (max_request == s->thread_info.max_request)
3094 return;
3095
3096 s->thread_info.max_request = max_request;
3097
3098 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3099 pa_sink_input *i;
3100
3101 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3102 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3103 }
3104 }
3105
3106 /* Called from main thread */
3107 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3108 pa_sink_assert_ref(s);
3109 pa_assert_ctl_context();
3110
3111 if (PA_SINK_IS_LINKED(s->state))
3112 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3113 else
3114 pa_sink_set_max_request_within_thread(s, max_request);
3115 }
3116
3117 /* Called from IO thread */
3118 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3119 pa_sink_input *i;
3120 void *state = NULL;
3121
3122 pa_sink_assert_ref(s);
3123 pa_sink_assert_io_context(s);
3124
3125 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3126 s->thread_info.requested_latency_valid = false;
3127 else if (dynamic)
3128 return;
3129
3130 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3131
3132 if (s->update_requested_latency)
3133 s->update_requested_latency(s);
3134
3135 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3136 if (i->update_sink_requested_latency)
3137 i->update_sink_requested_latency(i);
3138 }
3139 }
3140
3141 /* Called from main thread */
3142 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3143 pa_sink_assert_ref(s);
3144 pa_assert_ctl_context();
3145
3146 /* min_latency == 0: no limit
3147 * min_latency anything else: specified limit
3148 *
3149 * Similar for max_latency */
3150
3151 if (min_latency < ABSOLUTE_MIN_LATENCY)
3152 min_latency = ABSOLUTE_MIN_LATENCY;
3153
3154 if (max_latency <= 0 ||
3155 max_latency > ABSOLUTE_MAX_LATENCY)
3156 max_latency = ABSOLUTE_MAX_LATENCY;
3157
3158 pa_assert(min_latency <= max_latency);
3159
3160 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3161 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3162 max_latency == ABSOLUTE_MAX_LATENCY) ||
3163 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3164
3165 if (PA_SINK_IS_LINKED(s->state)) {
3166 pa_usec_t r[2];
3167
3168 r[0] = min_latency;
3169 r[1] = max_latency;
3170
3171 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3172 } else
3173 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3174 }
3175
3176 /* Called from main thread */
3177 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3178 pa_sink_assert_ref(s);
3179 pa_assert_ctl_context();
3180 pa_assert(min_latency);
3181 pa_assert(max_latency);
3182
3183 if (PA_SINK_IS_LINKED(s->state)) {
3184 pa_usec_t r[2] = { 0, 0 };
3185
3186 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3187
3188 *min_latency = r[0];
3189 *max_latency = r[1];
3190 } else {
3191 *min_latency = s->thread_info.min_latency;
3192 *max_latency = s->thread_info.max_latency;
3193 }
3194 }
3195
3196 /* Called from IO thread */
3197 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3198 pa_sink_assert_ref(s);
3199 pa_sink_assert_io_context(s);
3200
3201 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3202 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3203 pa_assert(min_latency <= max_latency);
3204
3205 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3206 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3207 max_latency == ABSOLUTE_MAX_LATENCY) ||
3208 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3209
3210 if (s->thread_info.min_latency == min_latency &&
3211 s->thread_info.max_latency == max_latency)
3212 return;
3213
3214 s->thread_info.min_latency = min_latency;
3215 s->thread_info.max_latency = max_latency;
3216
3217 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3218 pa_sink_input *i;
3219 void *state = NULL;
3220
3221 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3222 if (i->update_sink_latency_range)
3223 i->update_sink_latency_range(i);
3224 }
3225
3226 pa_sink_invalidate_requested_latency(s, false);
3227
3228 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3229 }
3230
3231 /* Called from main thread */
3232 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3233 pa_sink_assert_ref(s);
3234 pa_assert_ctl_context();
3235
3236 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3237 pa_assert(latency == 0);
3238 return;
3239 }
3240
3241 if (latency < ABSOLUTE_MIN_LATENCY)
3242 latency = ABSOLUTE_MIN_LATENCY;
3243
3244 if (latency > ABSOLUTE_MAX_LATENCY)
3245 latency = ABSOLUTE_MAX_LATENCY;
3246
3247 if (PA_SINK_IS_LINKED(s->state))
3248 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3249 else
3250 s->thread_info.fixed_latency = latency;
3251
3252 pa_source_set_fixed_latency(s->monitor_source, latency);
3253 }
3254
3255 /* Called from main thread */
3256 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3257 pa_usec_t latency;
3258
3259 pa_sink_assert_ref(s);
3260 pa_assert_ctl_context();
3261
3262 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3263 return 0;
3264
3265 if (PA_SINK_IS_LINKED(s->state))
3266 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3267 else
3268 latency = s->thread_info.fixed_latency;
3269
3270 return latency;
3271 }
3272
3273 /* Called from IO thread */
3274 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3275 pa_sink_assert_ref(s);
3276 pa_sink_assert_io_context(s);
3277
3278 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3279 pa_assert(latency == 0);
3280 s->thread_info.fixed_latency = 0;
3281
3282 if (s->monitor_source)
3283 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3284
3285 return;
3286 }
3287
3288 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3289 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3290
3291 if (s->thread_info.fixed_latency == latency)
3292 return;
3293
3294 s->thread_info.fixed_latency = latency;
3295
3296 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3297 pa_sink_input *i;
3298 void *state = NULL;
3299
3300 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3301 if (i->update_sink_fixed_latency)
3302 i->update_sink_fixed_latency(i);
3303 }
3304
3305 pa_sink_invalidate_requested_latency(s, false);
3306
3307 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3308 }
3309
3310 /* Called from main context */
3311 void pa_sink_set_latency_offset(pa_sink *s, int64_t offset) {
3312 pa_sink_assert_ref(s);
3313
3314 s->latency_offset = offset;
3315
3316 if (PA_SINK_IS_LINKED(s->state))
3317 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3318 else
3319 s->thread_info.latency_offset = offset;
3320 }
3321
3322 /* Called from main context */
3323 size_t pa_sink_get_max_rewind(pa_sink *s) {
3324 size_t r;
3325 pa_assert_ctl_context();
3326 pa_sink_assert_ref(s);
3327
3328 if (!PA_SINK_IS_LINKED(s->state))
3329 return s->thread_info.max_rewind;
3330
3331 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3332
3333 return r;
3334 }
3335
3336 /* Called from main context */
3337 size_t pa_sink_get_max_request(pa_sink *s) {
3338 size_t r;
3339 pa_sink_assert_ref(s);
3340 pa_assert_ctl_context();
3341
3342 if (!PA_SINK_IS_LINKED(s->state))
3343 return s->thread_info.max_request;
3344
3345 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3346
3347 return r;
3348 }
3349
3350 /* Called from main context */
3351 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3352 pa_device_port *port;
3353 int ret;
3354
3355 pa_sink_assert_ref(s);
3356 pa_assert_ctl_context();
3357
3358 if (!s->set_port) {
3359 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3360 return -PA_ERR_NOTIMPLEMENTED;
3361 }
3362
3363 if (!name)
3364 return -PA_ERR_NOENTITY;
3365
3366 if (!(port = pa_hashmap_get(s->ports, name)))
3367 return -PA_ERR_NOENTITY;
3368
3369 if (s->active_port == port) {
3370 s->save_port = s->save_port || save;
3371 return 0;
3372 }
3373
3374 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3375 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3376 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3377 ret = msg.ret;
3378 }
3379 else
3380 ret = s->set_port(s, port);
3381
3382 if (ret < 0)
3383 return -PA_ERR_NOENTITY;
3384
3385 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3386
3387 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3388
3389 s->active_port = port;
3390 s->save_port = save;
3391
3392 pa_sink_set_latency_offset(s, s->active_port->latency_offset);
3393
3394 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3395
3396 return 0;
3397 }
3398
3399 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3400 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3401
3402 pa_assert(p);
3403
3404 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3405 return true;
3406
3407 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3408
3409 if (pa_streq(ff, "microphone"))
3410 t = "audio-input-microphone";
3411 else if (pa_streq(ff, "webcam"))
3412 t = "camera-web";
3413 else if (pa_streq(ff, "computer"))
3414 t = "computer";
3415 else if (pa_streq(ff, "handset"))
3416 t = "phone";
3417 else if (pa_streq(ff, "portable"))
3418 t = "multimedia-player";
3419 else if (pa_streq(ff, "tv"))
3420 t = "video-display";
3421
3422 /*
3423 * The following icons are not part of the icon naming spec,
3424 * because Rodney Dawes sucks as the maintainer of that spec.
3425 *
3426 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3427 */
3428 else if (pa_streq(ff, "headset"))
3429 t = "audio-headset";
3430 else if (pa_streq(ff, "headphone"))
3431 t = "audio-headphones";
3432 else if (pa_streq(ff, "speaker"))
3433 t = "audio-speakers";
3434 else if (pa_streq(ff, "hands-free"))
3435 t = "audio-handsfree";
3436 }
3437
3438 if (!t)
3439 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3440 if (pa_streq(c, "modem"))
3441 t = "modem";
3442
3443 if (!t) {
3444 if (is_sink)
3445 t = "audio-card";
3446 else
3447 t = "audio-input-microphone";
3448 }
3449
3450 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3451 if (strstr(profile, "analog"))
3452 s = "-analog";
3453 else if (strstr(profile, "iec958"))
3454 s = "-iec958";
3455 else if (strstr(profile, "hdmi"))
3456 s = "-hdmi";
3457 }
3458
3459 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3460
3461 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3462
3463 return true;
3464 }
3465
3466 bool pa_device_init_description(pa_proplist *p) {
3467 const char *s, *d = NULL, *k;
3468 pa_assert(p);
3469
3470 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3471 return true;
3472
3473 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3474 if (pa_streq(s, "internal"))
3475 d = _("Built-in Audio");
3476
3477 if (!d)
3478 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3479 if (pa_streq(s, "modem"))
3480 d = _("Modem");
3481
3482 if (!d)
3483 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3484
3485 if (!d)
3486 return false;
3487
3488 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3489
3490 if (d && k)
3491 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3492 else if (d)
3493 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3494
3495 return true;
3496 }
3497
3498 bool pa_device_init_intended_roles(pa_proplist *p) {
3499 const char *s;
3500 pa_assert(p);
3501
3502 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3503 return true;
3504
3505 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3506 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3507 || pa_streq(s, "headset")) {
3508 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3509 return true;
3510 }
3511
3512 return false;
3513 }
3514
3515 unsigned pa_device_init_priority(pa_proplist *p) {
3516 const char *s;
3517 unsigned priority = 0;
3518
3519 pa_assert(p);
3520
3521 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3522
3523 if (pa_streq(s, "sound"))
3524 priority += 9000;
3525 else if (!pa_streq(s, "modem"))
3526 priority += 1000;
3527 }
3528
3529 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3530
3531 if (pa_streq(s, "internal"))
3532 priority += 900;
3533 else if (pa_streq(s, "speaker"))
3534 priority += 500;
3535 else if (pa_streq(s, "headphone"))
3536 priority += 400;
3537 }
3538
3539 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3540
3541 if (pa_streq(s, "pci"))
3542 priority += 50;
3543 else if (pa_streq(s, "usb"))
3544 priority += 40;
3545 else if (pa_streq(s, "bluetooth"))
3546 priority += 30;
3547 }
3548
3549 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3550
3551 if (pa_startswith(s, "analog-"))
3552 priority += 9;
3553 else if (pa_startswith(s, "iec958-"))
3554 priority += 8;
3555 }
3556
3557 return priority;
3558 }
3559
3560 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3561
3562 /* Called from the IO thread. */
3563 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3564 pa_sink_volume_change *c;
3565 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3566 c = pa_xnew(pa_sink_volume_change, 1);
3567
3568 PA_LLIST_INIT(pa_sink_volume_change, c);
3569 c->at = 0;
3570 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3571 return c;
3572 }
3573
3574 /* Called from the IO thread. */
3575 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3576 pa_assert(c);
3577 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3578 pa_xfree(c);
3579 }
3580
3581 /* Called from the IO thread. */
3582 void pa_sink_volume_change_push(pa_sink *s) {
3583 pa_sink_volume_change *c = NULL;
3584 pa_sink_volume_change *nc = NULL;
3585 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3586
3587 const char *direction = NULL;
3588
3589 pa_assert(s);
3590 nc = pa_sink_volume_change_new(s);
3591
3592 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3593 * Adding one more volume for HW would get us rid of this, but I am trying
3594 * to survive with the ones we already have. */
3595 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3596
3597 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3598 pa_log_debug("Volume not changing");
3599 pa_sink_volume_change_free(nc);
3600 return;
3601 }
3602
3603 nc->at = pa_sink_get_latency_within_thread(s);
3604 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3605
3606 if (s->thread_info.volume_changes_tail) {
3607 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3608 /* If volume is going up let's do it a bit late. If it is going
3609 * down let's do it a bit early. */
3610 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3611 if (nc->at + safety_margin > c->at) {
3612 nc->at += safety_margin;
3613 direction = "up";
3614 break;
3615 }
3616 }
3617 else if (nc->at - safety_margin > c->at) {
3618 nc->at -= safety_margin;
3619 direction = "down";
3620 break;
3621 }
3622 }
3623 }
3624
3625 if (c == NULL) {
3626 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3627 nc->at += safety_margin;
3628 direction = "up";
3629 } else {
3630 nc->at -= safety_margin;
3631 direction = "down";
3632 }
3633 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3634 }
3635 else {
3636 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3637 }
3638
3639 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3640
3641 /* We can ignore volume events that came earlier but should happen later than this. */
3642 PA_LLIST_FOREACH(c, nc->next) {
3643 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3644 pa_sink_volume_change_free(c);
3645 }
3646 nc->next = NULL;
3647 s->thread_info.volume_changes_tail = nc;
3648 }
3649
3650 /* Called from the IO thread. */
3651 static void pa_sink_volume_change_flush(pa_sink *s) {
3652 pa_sink_volume_change *c = s->thread_info.volume_changes;
3653 pa_assert(s);
3654 s->thread_info.volume_changes = NULL;
3655 s->thread_info.volume_changes_tail = NULL;
3656 while (c) {
3657 pa_sink_volume_change *next = c->next;
3658 pa_sink_volume_change_free(c);
3659 c = next;
3660 }
3661 }
3662
3663 /* Called from the IO thread. */
3664 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3665 pa_usec_t now;
3666 bool ret = false;
3667
3668 pa_assert(s);
3669
3670 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3671 if (usec_to_next)
3672 *usec_to_next = 0;
3673 return ret;
3674 }
3675
3676 pa_assert(s->write_volume);
3677
3678 now = pa_rtclock_now();
3679
3680 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3681 pa_sink_volume_change *c = s->thread_info.volume_changes;
3682 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3683 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3684 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3685 ret = true;
3686 s->thread_info.current_hw_volume = c->hw_volume;
3687 pa_sink_volume_change_free(c);
3688 }
3689
3690 if (ret)
3691 s->write_volume(s);
3692
3693 if (s->thread_info.volume_changes) {
3694 if (usec_to_next)
3695 *usec_to_next = s->thread_info.volume_changes->at - now;
3696 if (pa_log_ratelimit(PA_LOG_DEBUG))
3697 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3698 }
3699 else {
3700 if (usec_to_next)
3701 *usec_to_next = 0;
3702 s->thread_info.volume_changes_tail = NULL;
3703 }
3704 return ret;
3705 }
3706
3707 /* Called from the IO thread. */
3708 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3709 /* All the queued volume events later than current latency are shifted to happen earlier. */
3710 pa_sink_volume_change *c;
3711 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3712 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3713 pa_usec_t limit = pa_sink_get_latency_within_thread(s);
3714
3715 pa_log_debug("latency = %lld", (long long) limit);
3716 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3717
3718 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3719 pa_usec_t modified_limit = limit;
3720 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3721 modified_limit -= s->thread_info.volume_change_safety_margin;
3722 else
3723 modified_limit += s->thread_info.volume_change_safety_margin;
3724 if (c->at > modified_limit) {
3725 c->at -= rewound;
3726 if (c->at < modified_limit)
3727 c->at = modified_limit;
3728 }
3729 prev_vol = pa_cvolume_avg(&c->hw_volume);
3730 }
3731 pa_sink_volume_change_apply(s, NULL);
3732 }
3733
3734 /* Called from the main thread */
3735 /* Gets the list of formats supported by the sink. The members and idxset must
3736 * be freed by the caller. */
3737 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3738 pa_idxset *ret;
3739
3740 pa_assert(s);
3741
3742 if (s->get_formats) {
3743 /* Sink supports format query, all is good */
3744 ret = s->get_formats(s);
3745 } else {
3746 /* Sink doesn't support format query, so assume it does PCM */
3747 pa_format_info *f = pa_format_info_new();
3748 f->encoding = PA_ENCODING_PCM;
3749
3750 ret = pa_idxset_new(NULL, NULL);
3751 pa_idxset_put(ret, f, NULL);
3752 }
3753
3754 return ret;
3755 }
3756
3757 /* Called from the main thread */
3758 /* Allows an external source to set what formats a sink supports if the sink
3759 * permits this. The function makes a copy of the formats on success. */
3760 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3761 pa_assert(s);
3762 pa_assert(formats);
3763
3764 if (s->set_formats)
3765 /* Sink supports setting formats -- let's give it a shot */
3766 return s->set_formats(s, formats);
3767 else
3768 /* Sink doesn't support setting this -- bail out */
3769 return false;
3770 }
3771
3772 /* Called from the main thread */
3773 /* Checks if the sink can accept this format */
3774 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3775 pa_idxset *formats = NULL;
3776 bool ret = false;
3777
3778 pa_assert(s);
3779 pa_assert(f);
3780
3781 formats = pa_sink_get_formats(s);
3782
3783 if (formats) {
3784 pa_format_info *finfo_device;
3785 uint32_t i;
3786
3787 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3788 if (pa_format_info_is_compatible(finfo_device, f)) {
3789 ret = true;
3790 break;
3791 }
3792 }
3793
3794 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3795 }
3796
3797 return ret;
3798 }
3799
3800 /* Called from the main thread */
3801 /* Calculates the intersection between formats supported by the sink and
3802 * in_formats, and returns these, in the order of the sink's formats. */
3803 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3804 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3805 pa_format_info *f_sink, *f_in;
3806 uint32_t i, j;
3807
3808 pa_assert(s);
3809
3810 if (!in_formats || pa_idxset_isempty(in_formats))
3811 goto done;
3812
3813 sink_formats = pa_sink_get_formats(s);
3814
3815 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3816 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3817 if (pa_format_info_is_compatible(f_sink, f_in))
3818 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3819 }
3820 }
3821
3822 done:
3823 if (sink_formats)
3824 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3825
3826 return out_formats;
3827 }