]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
f4647b8bf4aba48ae8051440f5a9e15b4257ca1f
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/format.h>
33 #include <pulse/utf8.h>
34 #include <pulse/xmalloc.h>
35 #include <pulse/timeval.h>
36 #include <pulse/util.h>
37 #include <pulse/rtclock.h>
38 #include <pulse/internal.h>
39
40 #include <pulsecore/i18n.h>
41 #include <pulsecore/sink-input.h>
42 #include <pulsecore/namereg.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/mix.h>
46 #include <pulsecore/core-subscribe.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/play-memblockq.h>
50 #include <pulsecore/flist.h>
51
52 #include "sink.h"
53
54 #define MAX_MIX_CHANNELS 32
55 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
56 #define ABSOLUTE_MIN_LATENCY (500)
57 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
58 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
59
60 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
61
62 struct pa_sink_volume_change {
63 pa_usec_t at;
64 pa_cvolume hw_volume;
65
66 PA_LLIST_FIELDS(pa_sink_volume_change);
67 };
68
69 struct sink_message_set_port {
70 pa_device_port *port;
71 int ret;
72 };
73
74 static void sink_free(pa_object *s);
75
76 static void pa_sink_volume_change_push(pa_sink *s);
77 static void pa_sink_volume_change_flush(pa_sink *s);
78 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
79
80 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
81 pa_assert(data);
82
83 pa_zero(*data);
84 data->proplist = pa_proplist_new();
85 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
86
87 return data;
88 }
89
90 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
91 pa_assert(data);
92
93 pa_xfree(data->name);
94 data->name = pa_xstrdup(name);
95 }
96
97 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
98 pa_assert(data);
99
100 if ((data->sample_spec_is_set = !!spec))
101 data->sample_spec = *spec;
102 }
103
104 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
105 pa_assert(data);
106
107 if ((data->channel_map_is_set = !!map))
108 data->channel_map = *map;
109 }
110
111 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
112 pa_assert(data);
113
114 data->alternate_sample_rate_is_set = true;
115 data->alternate_sample_rate = alternate_sample_rate;
116 }
117
118 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
119 pa_assert(data);
120
121 if ((data->volume_is_set = !!volume))
122 data->volume = *volume;
123 }
124
125 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
126 pa_assert(data);
127
128 data->muted_is_set = true;
129 data->muted = !!mute;
130 }
131
132 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
133 pa_assert(data);
134
135 pa_xfree(data->active_port);
136 data->active_port = pa_xstrdup(port);
137 }
138
139 void pa_sink_new_data_done(pa_sink_new_data *data) {
140 pa_assert(data);
141
142 pa_proplist_free(data->proplist);
143
144 if (data->ports)
145 pa_hashmap_free(data->ports);
146
147 pa_xfree(data->name);
148 pa_xfree(data->active_port);
149 }
150
151 /* Called from main context */
152 static void reset_callbacks(pa_sink *s) {
153 pa_assert(s);
154
155 s->set_state = NULL;
156 s->get_volume = NULL;
157 s->set_volume = NULL;
158 s->write_volume = NULL;
159 s->get_mute = NULL;
160 s->set_mute = NULL;
161 s->request_rewind = NULL;
162 s->update_requested_latency = NULL;
163 s->set_port = NULL;
164 s->get_formats = NULL;
165 s->set_formats = NULL;
166 s->update_rate = NULL;
167 }
168
169 /* Called from main context */
170 pa_sink* pa_sink_new(
171 pa_core *core,
172 pa_sink_new_data *data,
173 pa_sink_flags_t flags) {
174
175 pa_sink *s;
176 const char *name;
177 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
178 pa_source_new_data source_data;
179 const char *dn;
180 char *pt;
181
182 pa_assert(core);
183 pa_assert(data);
184 pa_assert(data->name);
185 pa_assert_ctl_context();
186
187 s = pa_msgobject_new(pa_sink);
188
189 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
190 pa_log_debug("Failed to register name %s.", data->name);
191 pa_xfree(s);
192 return NULL;
193 }
194
195 pa_sink_new_data_set_name(data, name);
196
197 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
198 pa_xfree(s);
199 pa_namereg_unregister(core, name);
200 return NULL;
201 }
202
203 /* FIXME, need to free s here on failure */
204
205 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
206 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
207
208 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
209
210 if (!data->channel_map_is_set)
211 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
212
213 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
214 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
215
216 /* FIXME: There should probably be a general function for checking whether
217 * the sink volume is allowed to be set, like there is for sink inputs. */
218 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
219
220 if (!data->volume_is_set) {
221 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
222 data->save_volume = false;
223 }
224
225 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
226 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
227
228 if (!data->muted_is_set)
229 data->muted = false;
230
231 if (data->card)
232 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
233
234 pa_device_init_description(data->proplist);
235 pa_device_init_icon(data->proplist, true);
236 pa_device_init_intended_roles(data->proplist);
237
238 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
239 pa_xfree(s);
240 pa_namereg_unregister(core, name);
241 return NULL;
242 }
243
244 s->parent.parent.free = sink_free;
245 s->parent.process_msg = pa_sink_process_msg;
246
247 s->core = core;
248 s->state = PA_SINK_INIT;
249 s->flags = flags;
250 s->priority = 0;
251 s->suspend_cause = data->suspend_cause;
252 pa_sink_set_mixer_dirty(s, false);
253 s->name = pa_xstrdup(name);
254 s->proplist = pa_proplist_copy(data->proplist);
255 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
256 s->module = data->module;
257 s->card = data->card;
258
259 s->priority = pa_device_init_priority(s->proplist);
260
261 s->sample_spec = data->sample_spec;
262 s->channel_map = data->channel_map;
263 s->default_sample_rate = s->sample_spec.rate;
264
265 if (data->alternate_sample_rate_is_set)
266 s->alternate_sample_rate = data->alternate_sample_rate;
267 else
268 s->alternate_sample_rate = s->core->alternate_sample_rate;
269
270 if (s->sample_spec.rate == s->alternate_sample_rate) {
271 pa_log_warn("Default and alternate sample rates are the same.");
272 s->alternate_sample_rate = 0;
273 }
274
275 s->inputs = pa_idxset_new(NULL, NULL);
276 s->n_corked = 0;
277 s->input_to_master = NULL;
278
279 s->reference_volume = s->real_volume = data->volume;
280 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
281 s->base_volume = PA_VOLUME_NORM;
282 s->n_volume_steps = PA_VOLUME_NORM+1;
283 s->muted = data->muted;
284 s->refresh_volume = s->refresh_muted = false;
285
286 reset_callbacks(s);
287 s->userdata = NULL;
288
289 s->asyncmsgq = NULL;
290
291 /* As a minor optimization we just steal the list instead of
292 * copying it here */
293 s->ports = data->ports;
294 data->ports = NULL;
295
296 s->active_port = NULL;
297 s->save_port = false;
298
299 if (data->active_port)
300 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
301 s->save_port = data->save_port;
302
303 if (!s->active_port) {
304 void *state;
305 pa_device_port *p;
306
307 PA_HASHMAP_FOREACH(p, s->ports, state) {
308 if (p->available == PA_AVAILABLE_NO)
309 continue;
310
311 if (!s->active_port || p->priority > s->active_port->priority)
312 s->active_port = p;
313 }
314 if (!s->active_port) {
315 PA_HASHMAP_FOREACH(p, s->ports, state)
316 if (!s->active_port || p->priority > s->active_port->priority)
317 s->active_port = p;
318 }
319 }
320
321 if (s->active_port)
322 s->latency_offset = s->active_port->latency_offset;
323 else
324 s->latency_offset = 0;
325
326 s->save_volume = data->save_volume;
327 s->save_muted = data->save_muted;
328
329 pa_silence_memchunk_get(
330 &core->silence_cache,
331 core->mempool,
332 &s->silence,
333 &s->sample_spec,
334 0);
335
336 s->thread_info.rtpoll = NULL;
337 s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
338 (pa_free_cb_t) pa_sink_input_unref);
339 s->thread_info.soft_volume = s->soft_volume;
340 s->thread_info.soft_muted = s->muted;
341 s->thread_info.state = s->state;
342 s->thread_info.rewind_nbytes = 0;
343 s->thread_info.rewind_requested = false;
344 s->thread_info.max_rewind = 0;
345 s->thread_info.max_request = 0;
346 s->thread_info.requested_latency_valid = false;
347 s->thread_info.requested_latency = 0;
348 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
349 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
350 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
351
352 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
353 s->thread_info.volume_changes_tail = NULL;
354 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
355 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
356 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
357 s->thread_info.latency_offset = s->latency_offset;
358
359 /* FIXME: This should probably be moved to pa_sink_put() */
360 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
361
362 if (s->card)
363 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
364
365 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
366 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
367 s->index,
368 s->name,
369 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
370 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
371 pt);
372 pa_xfree(pt);
373
374 pa_source_new_data_init(&source_data);
375 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
376 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
377 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
378 source_data.name = pa_sprintf_malloc("%s.monitor", name);
379 source_data.driver = data->driver;
380 source_data.module = data->module;
381 source_data.card = data->card;
382
383 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
384 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
385 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
386
387 s->monitor_source = pa_source_new(core, &source_data,
388 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
389 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
390
391 pa_source_new_data_done(&source_data);
392
393 if (!s->monitor_source) {
394 pa_sink_unlink(s);
395 pa_sink_unref(s);
396 return NULL;
397 }
398
399 s->monitor_source->monitor_of = s;
400
401 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
402 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
403 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
404
405 return s;
406 }
407
408 /* Called from main context */
409 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
410 int ret;
411 bool suspend_change;
412 pa_sink_state_t original_state;
413
414 pa_assert(s);
415 pa_assert_ctl_context();
416
417 if (s->state == state)
418 return 0;
419
420 original_state = s->state;
421
422 suspend_change =
423 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
424 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
425
426 if (s->set_state)
427 if ((ret = s->set_state(s, state)) < 0)
428 return ret;
429
430 if (s->asyncmsgq)
431 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
432
433 if (s->set_state)
434 s->set_state(s, original_state);
435
436 return ret;
437 }
438
439 s->state = state;
440
441 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the appropriate events */
442 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
443 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
444 }
445
446 if (suspend_change) {
447 pa_sink_input *i;
448 uint32_t idx;
449
450 /* We're suspending or resuming, tell everyone about it */
451
452 PA_IDXSET_FOREACH(i, s->inputs, idx)
453 if (s->state == PA_SINK_SUSPENDED &&
454 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
455 pa_sink_input_kill(i);
456 else if (i->suspend)
457 i->suspend(i, state == PA_SINK_SUSPENDED);
458
459 if (s->monitor_source)
460 pa_source_sync_suspend(s->monitor_source);
461 }
462
463 return 0;
464 }
465
466 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
467 pa_assert(s);
468
469 s->get_volume = cb;
470 }
471
472 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
473 pa_sink_flags_t flags;
474
475 pa_assert(s);
476 pa_assert(!s->write_volume || cb);
477
478 s->set_volume = cb;
479
480 /* Save the current flags so we can tell if they've changed */
481 flags = s->flags;
482
483 if (cb) {
484 /* The sink implementor is responsible for setting decibel volume support */
485 s->flags |= PA_SINK_HW_VOLUME_CTRL;
486 } else {
487 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
488 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
489 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
490 }
491
492 /* If the flags have changed after init, let any clients know via a change event */
493 if (s->state != PA_SINK_INIT && flags != s->flags)
494 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
495 }
496
497 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
498 pa_sink_flags_t flags;
499
500 pa_assert(s);
501 pa_assert(!cb || s->set_volume);
502
503 s->write_volume = cb;
504
505 /* Save the current flags so we can tell if they've changed */
506 flags = s->flags;
507
508 if (cb)
509 s->flags |= PA_SINK_DEFERRED_VOLUME;
510 else
511 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
512
513 /* If the flags have changed after init, let any clients know via a change event */
514 if (s->state != PA_SINK_INIT && flags != s->flags)
515 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
516 }
517
518 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
519 pa_assert(s);
520
521 s->get_mute = cb;
522 }
523
524 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
525 pa_sink_flags_t flags;
526
527 pa_assert(s);
528
529 s->set_mute = cb;
530
531 /* Save the current flags so we can tell if they've changed */
532 flags = s->flags;
533
534 if (cb)
535 s->flags |= PA_SINK_HW_MUTE_CTRL;
536 else
537 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
538
539 /* If the flags have changed after init, let any clients know via a change event */
540 if (s->state != PA_SINK_INIT && flags != s->flags)
541 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
542 }
543
544 static void enable_flat_volume(pa_sink *s, bool enable) {
545 pa_sink_flags_t flags;
546
547 pa_assert(s);
548
549 /* Always follow the overall user preference here */
550 enable = enable && s->core->flat_volumes;
551
552 /* Save the current flags so we can tell if they've changed */
553 flags = s->flags;
554
555 if (enable)
556 s->flags |= PA_SINK_FLAT_VOLUME;
557 else
558 s->flags &= ~PA_SINK_FLAT_VOLUME;
559
560 /* If the flags have changed after init, let any clients know via a change event */
561 if (s->state != PA_SINK_INIT && flags != s->flags)
562 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
563 }
564
565 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
566 pa_sink_flags_t flags;
567
568 pa_assert(s);
569
570 /* Save the current flags so we can tell if they've changed */
571 flags = s->flags;
572
573 if (enable) {
574 s->flags |= PA_SINK_DECIBEL_VOLUME;
575 enable_flat_volume(s, true);
576 } else {
577 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
578 enable_flat_volume(s, false);
579 }
580
581 /* If the flags have changed after init, let any clients know via a change event */
582 if (s->state != PA_SINK_INIT && flags != s->flags)
583 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
584 }
585
586 /* Called from main context */
587 void pa_sink_put(pa_sink* s) {
588 pa_sink_assert_ref(s);
589 pa_assert_ctl_context();
590
591 pa_assert(s->state == PA_SINK_INIT);
592 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || s->input_to_master);
593
594 /* The following fields must be initialized properly when calling _put() */
595 pa_assert(s->asyncmsgq);
596 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
597
598 /* Generally, flags should be initialized via pa_sink_new(). As a
599 * special exception we allow some volume related flags to be set
600 * between _new() and _put() by the callback setter functions above.
601 *
602 * Thus we implement a couple safeguards here which ensure the above
603 * setters were used (or at least the implementor made manual changes
604 * in a compatible way).
605 *
606 * Note: All of these flags set here can change over the life time
607 * of the sink. */
608 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
609 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
610 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
611
612 /* XXX: Currently decibel volume is disabled for all sinks that use volume
613 * sharing. When the master sink supports decibel volume, it would be good
614 * to have the flag also in the filter sink, but currently we don't do that
615 * so that the flags of the filter sink never change when it's moved from
616 * a master sink to another. One solution for this problem would be to
617 * remove user-visible volume altogether from filter sinks when volume
618 * sharing is used, but the current approach was easier to implement... */
619 /* We always support decibel volumes in software, otherwise we leave it to
620 * the sink implementor to set this flag as needed.
621 *
622 * Note: This flag can also change over the life time of the sink. */
623 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
624 pa_sink_enable_decibel_volume(s, true);
625
626 /* If the sink implementor support DB volumes by itself, we should always
627 * try and enable flat volumes too */
628 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
629 enable_flat_volume(s, true);
630
631 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
632 pa_sink *root_sink = pa_sink_get_master(s);
633
634 pa_assert(root_sink);
635
636 s->reference_volume = root_sink->reference_volume;
637 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
638
639 s->real_volume = root_sink->real_volume;
640 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
641 } else
642 /* We assume that if the sink implementor changed the default
643 * volume he did so in real_volume, because that is the usual
644 * place where he is supposed to place his changes. */
645 s->reference_volume = s->real_volume;
646
647 s->thread_info.soft_volume = s->soft_volume;
648 s->thread_info.soft_muted = s->muted;
649 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
650
651 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
652 || (s->base_volume == PA_VOLUME_NORM
653 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
654 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
655 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
656 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
657 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
658
659 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
660 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
661 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
662
663 if (s->suspend_cause)
664 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED) == 0);
665 else
666 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
667
668 pa_source_put(s->monitor_source);
669
670 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
671 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
672 }
673
674 /* Called from main context */
675 void pa_sink_unlink(pa_sink* s) {
676 bool linked;
677 pa_sink_input *i, *j = NULL;
678
679 pa_assert(s);
680 pa_assert_ctl_context();
681
682 /* Please note that pa_sink_unlink() does more than simply
683 * reversing pa_sink_put(). It also undoes the registrations
684 * already done in pa_sink_new()! */
685
686 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
687 * may be called multiple times on the same sink without bad
688 * effects. */
689
690 linked = PA_SINK_IS_LINKED(s->state);
691
692 if (linked)
693 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
694
695 if (s->state != PA_SINK_UNLINKED)
696 pa_namereg_unregister(s->core, s->name);
697 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
698
699 if (s->card)
700 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
701
702 while ((i = pa_idxset_first(s->inputs, NULL))) {
703 pa_assert(i != j);
704 pa_sink_input_kill(i);
705 j = i;
706 }
707
708 if (linked)
709 sink_set_state(s, PA_SINK_UNLINKED);
710 else
711 s->state = PA_SINK_UNLINKED;
712
713 reset_callbacks(s);
714
715 if (s->monitor_source)
716 pa_source_unlink(s->monitor_source);
717
718 if (linked) {
719 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
720 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
721 }
722 }
723
724 /* Called from main context */
725 static void sink_free(pa_object *o) {
726 pa_sink *s = PA_SINK(o);
727
728 pa_assert(s);
729 pa_assert_ctl_context();
730 pa_assert(pa_sink_refcnt(s) == 0);
731
732 if (PA_SINK_IS_LINKED(s->state))
733 pa_sink_unlink(s);
734
735 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
736
737 if (s->monitor_source) {
738 pa_source_unref(s->monitor_source);
739 s->monitor_source = NULL;
740 }
741
742 pa_idxset_free(s->inputs, NULL);
743 pa_hashmap_free(s->thread_info.inputs);
744
745 if (s->silence.memblock)
746 pa_memblock_unref(s->silence.memblock);
747
748 pa_xfree(s->name);
749 pa_xfree(s->driver);
750
751 if (s->proplist)
752 pa_proplist_free(s->proplist);
753
754 if (s->ports)
755 pa_hashmap_free(s->ports);
756
757 pa_xfree(s);
758 }
759
760 /* Called from main context, and not while the IO thread is active, please */
761 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
762 pa_sink_assert_ref(s);
763 pa_assert_ctl_context();
764
765 s->asyncmsgq = q;
766
767 if (s->monitor_source)
768 pa_source_set_asyncmsgq(s->monitor_source, q);
769 }
770
771 /* Called from main context, and not while the IO thread is active, please */
772 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
773 pa_sink_flags_t old_flags;
774 pa_sink_input *input;
775 uint32_t idx;
776
777 pa_sink_assert_ref(s);
778 pa_assert_ctl_context();
779
780 /* For now, allow only a minimal set of flags to be changed. */
781 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
782
783 old_flags = s->flags;
784 s->flags = (s->flags & ~mask) | (value & mask);
785
786 if (s->flags == old_flags)
787 return;
788
789 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
790 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
791
792 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
793 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
794 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
795
796 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
797 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
798
799 if (s->monitor_source)
800 pa_source_update_flags(s->monitor_source,
801 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
802 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
803 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
804 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
805
806 PA_IDXSET_FOREACH(input, s->inputs, idx) {
807 if (input->origin_sink)
808 pa_sink_update_flags(input->origin_sink, mask, value);
809 }
810 }
811
812 /* Called from IO context, or before _put() from main context */
813 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
814 pa_sink_assert_ref(s);
815 pa_sink_assert_io_context(s);
816
817 s->thread_info.rtpoll = p;
818
819 if (s->monitor_source)
820 pa_source_set_rtpoll(s->monitor_source, p);
821 }
822
823 /* Called from main context */
824 int pa_sink_update_status(pa_sink*s) {
825 pa_sink_assert_ref(s);
826 pa_assert_ctl_context();
827 pa_assert(PA_SINK_IS_LINKED(s->state));
828
829 if (s->state == PA_SINK_SUSPENDED)
830 return 0;
831
832 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
833 }
834
835 /* Called from any context - must be threadsafe */
836 void pa_sink_set_mixer_dirty(pa_sink *s, bool is_dirty) {
837 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
838 }
839
840 /* Called from main context */
841 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
842 pa_sink_assert_ref(s);
843 pa_assert_ctl_context();
844 pa_assert(PA_SINK_IS_LINKED(s->state));
845 pa_assert(cause != 0);
846
847 if (suspend) {
848 s->suspend_cause |= cause;
849 s->monitor_source->suspend_cause |= cause;
850 } else {
851 s->suspend_cause &= ~cause;
852 s->monitor_source->suspend_cause &= ~cause;
853 }
854
855 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
856 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
857 it'll be handled just fine. */
858 pa_sink_set_mixer_dirty(s, false);
859 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
860 if (s->active_port && s->set_port) {
861 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
862 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
863 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
864 }
865 else
866 s->set_port(s, s->active_port);
867 }
868 else {
869 if (s->set_mute)
870 s->set_mute(s);
871 if (s->set_volume)
872 s->set_volume(s);
873 }
874 }
875
876 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
877 return 0;
878
879 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
880
881 if (s->suspend_cause)
882 return sink_set_state(s, PA_SINK_SUSPENDED);
883 else
884 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
885 }
886
887 /* Called from main context */
888 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
889 pa_sink_input *i, *n;
890 uint32_t idx;
891
892 pa_sink_assert_ref(s);
893 pa_assert_ctl_context();
894 pa_assert(PA_SINK_IS_LINKED(s->state));
895
896 if (!q)
897 q = pa_queue_new();
898
899 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
900 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
901
902 pa_sink_input_ref(i);
903
904 if (pa_sink_input_start_move(i) >= 0)
905 pa_queue_push(q, i);
906 else
907 pa_sink_input_unref(i);
908 }
909
910 return q;
911 }
912
913 /* Called from main context */
914 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
915 pa_sink_input *i;
916
917 pa_sink_assert_ref(s);
918 pa_assert_ctl_context();
919 pa_assert(PA_SINK_IS_LINKED(s->state));
920 pa_assert(q);
921
922 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
923 if (pa_sink_input_finish_move(i, s, save) < 0)
924 pa_sink_input_fail_move(i);
925
926 pa_sink_input_unref(i);
927 }
928
929 pa_queue_free(q, NULL);
930 }
931
932 /* Called from main context */
933 void pa_sink_move_all_fail(pa_queue *q) {
934 pa_sink_input *i;
935
936 pa_assert_ctl_context();
937 pa_assert(q);
938
939 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
940 pa_sink_input_fail_move(i);
941 pa_sink_input_unref(i);
942 }
943
944 pa_queue_free(q, NULL);
945 }
946
947 /* Called from IO thread context */
948 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
949 pa_sink_input *i;
950 void *state = NULL;
951 size_t result = 0;
952
953 pa_sink_assert_ref(s);
954 pa_sink_assert_io_context(s);
955
956 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
957 size_t uf = i->thread_info.underrun_for_sink;
958 if (uf == 0)
959 continue;
960 if (uf >= left_to_play) {
961 if (pa_sink_input_process_underrun(i))
962 continue;
963 }
964 else if (uf > result)
965 result = uf;
966 }
967
968 if (result > 0)
969 pa_log_debug("Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", (long) result, (long) left_to_play - result);
970 return left_to_play - result;
971 }
972
973 /* Called from IO thread context */
974 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
975 pa_sink_input *i;
976 void *state = NULL;
977
978 pa_sink_assert_ref(s);
979 pa_sink_assert_io_context(s);
980 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
981
982 /* If nobody requested this and this is actually no real rewind
983 * then we can short cut this. Please note that this means that
984 * not all rewind requests triggered upstream will always be
985 * translated in actual requests! */
986 if (!s->thread_info.rewind_requested && nbytes <= 0)
987 return;
988
989 s->thread_info.rewind_nbytes = 0;
990 s->thread_info.rewind_requested = false;
991
992 if (nbytes > 0) {
993 pa_log_debug("Processing rewind...");
994 if (s->flags & PA_SINK_DEFERRED_VOLUME)
995 pa_sink_volume_change_rewind(s, nbytes);
996 }
997
998 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
999 pa_sink_input_assert_ref(i);
1000 pa_sink_input_process_rewind(i, nbytes);
1001 }
1002
1003 if (nbytes > 0) {
1004 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1005 pa_source_process_rewind(s->monitor_source, nbytes);
1006 }
1007 }
1008
1009 /* Called from IO thread context */
1010 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1011 pa_sink_input *i;
1012 unsigned n = 0;
1013 void *state = NULL;
1014 size_t mixlength = *length;
1015
1016 pa_sink_assert_ref(s);
1017 pa_sink_assert_io_context(s);
1018 pa_assert(info);
1019
1020 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1021 pa_sink_input_assert_ref(i);
1022
1023 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1024
1025 if (mixlength == 0 || info->chunk.length < mixlength)
1026 mixlength = info->chunk.length;
1027
1028 if (pa_memblock_is_silence(info->chunk.memblock)) {
1029 pa_memblock_unref(info->chunk.memblock);
1030 continue;
1031 }
1032
1033 info->userdata = pa_sink_input_ref(i);
1034
1035 pa_assert(info->chunk.memblock);
1036 pa_assert(info->chunk.length > 0);
1037
1038 info++;
1039 n++;
1040 maxinfo--;
1041 }
1042
1043 if (mixlength > 0)
1044 *length = mixlength;
1045
1046 return n;
1047 }
1048
1049 /* Called from IO thread context */
1050 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1051 pa_sink_input *i;
1052 void *state;
1053 unsigned p = 0;
1054 unsigned n_unreffed = 0;
1055
1056 pa_sink_assert_ref(s);
1057 pa_sink_assert_io_context(s);
1058 pa_assert(result);
1059 pa_assert(result->memblock);
1060 pa_assert(result->length > 0);
1061
1062 /* We optimize for the case where the order of the inputs has not changed */
1063
1064 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1065 unsigned j;
1066 pa_mix_info* m = NULL;
1067
1068 pa_sink_input_assert_ref(i);
1069
1070 /* Let's try to find the matching entry info the pa_mix_info array */
1071 for (j = 0; j < n; j ++) {
1072
1073 if (info[p].userdata == i) {
1074 m = info + p;
1075 break;
1076 }
1077
1078 p++;
1079 if (p >= n)
1080 p = 0;
1081 }
1082
1083 /* Drop read data */
1084 pa_sink_input_drop(i, result->length);
1085
1086 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1087
1088 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1089 void *ostate = NULL;
1090 pa_source_output *o;
1091 pa_memchunk c;
1092
1093 if (m && m->chunk.memblock) {
1094 c = m->chunk;
1095 pa_memblock_ref(c.memblock);
1096 pa_assert(result->length <= c.length);
1097 c.length = result->length;
1098
1099 pa_memchunk_make_writable(&c, 0);
1100 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1101 } else {
1102 c = s->silence;
1103 pa_memblock_ref(c.memblock);
1104 pa_assert(result->length <= c.length);
1105 c.length = result->length;
1106 }
1107
1108 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1109 pa_source_output_assert_ref(o);
1110 pa_assert(o->direct_on_input == i);
1111 pa_source_post_direct(s->monitor_source, o, &c);
1112 }
1113
1114 pa_memblock_unref(c.memblock);
1115 }
1116 }
1117
1118 if (m) {
1119 if (m->chunk.memblock) {
1120 pa_memblock_unref(m->chunk.memblock);
1121 pa_memchunk_reset(&m->chunk);
1122 }
1123
1124 pa_sink_input_unref(m->userdata);
1125 m->userdata = NULL;
1126
1127 n_unreffed += 1;
1128 }
1129 }
1130
1131 /* Now drop references to entries that are included in the
1132 * pa_mix_info array but don't exist anymore */
1133
1134 if (n_unreffed < n) {
1135 for (; n > 0; info++, n--) {
1136 if (info->userdata)
1137 pa_sink_input_unref(info->userdata);
1138 if (info->chunk.memblock)
1139 pa_memblock_unref(info->chunk.memblock);
1140 }
1141 }
1142
1143 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1144 pa_source_post(s->monitor_source, result);
1145 }
1146
1147 /* Called from IO thread context */
1148 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1149 pa_mix_info info[MAX_MIX_CHANNELS];
1150 unsigned n;
1151 size_t block_size_max;
1152
1153 pa_sink_assert_ref(s);
1154 pa_sink_assert_io_context(s);
1155 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1156 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1157 pa_assert(result);
1158
1159 pa_assert(!s->thread_info.rewind_requested);
1160 pa_assert(s->thread_info.rewind_nbytes == 0);
1161
1162 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1163 result->memblock = pa_memblock_ref(s->silence.memblock);
1164 result->index = s->silence.index;
1165 result->length = PA_MIN(s->silence.length, length);
1166 return;
1167 }
1168
1169 pa_sink_ref(s);
1170
1171 if (length <= 0)
1172 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1173
1174 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1175 if (length > block_size_max)
1176 length = pa_frame_align(block_size_max, &s->sample_spec);
1177
1178 pa_assert(length > 0);
1179
1180 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1181
1182 if (n == 0) {
1183
1184 *result = s->silence;
1185 pa_memblock_ref(result->memblock);
1186
1187 if (result->length > length)
1188 result->length = length;
1189
1190 } else if (n == 1) {
1191 pa_cvolume volume;
1192
1193 *result = info[0].chunk;
1194 pa_memblock_ref(result->memblock);
1195
1196 if (result->length > length)
1197 result->length = length;
1198
1199 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1200
1201 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1202 pa_memblock_unref(result->memblock);
1203 pa_silence_memchunk_get(&s->core->silence_cache,
1204 s->core->mempool,
1205 result,
1206 &s->sample_spec,
1207 result->length);
1208 } else if (!pa_cvolume_is_norm(&volume)) {
1209 pa_memchunk_make_writable(result, 0);
1210 pa_volume_memchunk(result, &s->sample_spec, &volume);
1211 }
1212 } else {
1213 void *ptr;
1214 result->memblock = pa_memblock_new(s->core->mempool, length);
1215
1216 ptr = pa_memblock_acquire(result->memblock);
1217 result->length = pa_mix(info, n,
1218 ptr, length,
1219 &s->sample_spec,
1220 &s->thread_info.soft_volume,
1221 s->thread_info.soft_muted);
1222 pa_memblock_release(result->memblock);
1223
1224 result->index = 0;
1225 }
1226
1227 inputs_drop(s, info, n, result);
1228
1229 pa_sink_unref(s);
1230 }
1231
1232 /* Called from IO thread context */
1233 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1234 pa_mix_info info[MAX_MIX_CHANNELS];
1235 unsigned n;
1236 size_t length, block_size_max;
1237
1238 pa_sink_assert_ref(s);
1239 pa_sink_assert_io_context(s);
1240 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1241 pa_assert(target);
1242 pa_assert(target->memblock);
1243 pa_assert(target->length > 0);
1244 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1245
1246 pa_assert(!s->thread_info.rewind_requested);
1247 pa_assert(s->thread_info.rewind_nbytes == 0);
1248
1249 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1250 pa_silence_memchunk(target, &s->sample_spec);
1251 return;
1252 }
1253
1254 pa_sink_ref(s);
1255
1256 length = target->length;
1257 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1258 if (length > block_size_max)
1259 length = pa_frame_align(block_size_max, &s->sample_spec);
1260
1261 pa_assert(length > 0);
1262
1263 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1264
1265 if (n == 0) {
1266 if (target->length > length)
1267 target->length = length;
1268
1269 pa_silence_memchunk(target, &s->sample_spec);
1270 } else if (n == 1) {
1271 pa_cvolume volume;
1272
1273 if (target->length > length)
1274 target->length = length;
1275
1276 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1277
1278 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1279 pa_silence_memchunk(target, &s->sample_spec);
1280 else {
1281 pa_memchunk vchunk;
1282
1283 vchunk = info[0].chunk;
1284 pa_memblock_ref(vchunk.memblock);
1285
1286 if (vchunk.length > length)
1287 vchunk.length = length;
1288
1289 if (!pa_cvolume_is_norm(&volume)) {
1290 pa_memchunk_make_writable(&vchunk, 0);
1291 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1292 }
1293
1294 pa_memchunk_memcpy(target, &vchunk);
1295 pa_memblock_unref(vchunk.memblock);
1296 }
1297
1298 } else {
1299 void *ptr;
1300
1301 ptr = pa_memblock_acquire(target->memblock);
1302
1303 target->length = pa_mix(info, n,
1304 (uint8_t*) ptr + target->index, length,
1305 &s->sample_spec,
1306 &s->thread_info.soft_volume,
1307 s->thread_info.soft_muted);
1308
1309 pa_memblock_release(target->memblock);
1310 }
1311
1312 inputs_drop(s, info, n, target);
1313
1314 pa_sink_unref(s);
1315 }
1316
1317 /* Called from IO thread context */
1318 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1319 pa_memchunk chunk;
1320 size_t l, d;
1321
1322 pa_sink_assert_ref(s);
1323 pa_sink_assert_io_context(s);
1324 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1325 pa_assert(target);
1326 pa_assert(target->memblock);
1327 pa_assert(target->length > 0);
1328 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1329
1330 pa_assert(!s->thread_info.rewind_requested);
1331 pa_assert(s->thread_info.rewind_nbytes == 0);
1332
1333 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1334 pa_silence_memchunk(target, &s->sample_spec);
1335 return;
1336 }
1337
1338 pa_sink_ref(s);
1339
1340 l = target->length;
1341 d = 0;
1342 while (l > 0) {
1343 chunk = *target;
1344 chunk.index += d;
1345 chunk.length -= d;
1346
1347 pa_sink_render_into(s, &chunk);
1348
1349 d += chunk.length;
1350 l -= chunk.length;
1351 }
1352
1353 pa_sink_unref(s);
1354 }
1355
1356 /* Called from IO thread context */
1357 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1358 pa_sink_assert_ref(s);
1359 pa_sink_assert_io_context(s);
1360 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1361 pa_assert(length > 0);
1362 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1363 pa_assert(result);
1364
1365 pa_assert(!s->thread_info.rewind_requested);
1366 pa_assert(s->thread_info.rewind_nbytes == 0);
1367
1368 pa_sink_ref(s);
1369
1370 pa_sink_render(s, length, result);
1371
1372 if (result->length < length) {
1373 pa_memchunk chunk;
1374
1375 pa_memchunk_make_writable(result, length);
1376
1377 chunk.memblock = result->memblock;
1378 chunk.index = result->index + result->length;
1379 chunk.length = length - result->length;
1380
1381 pa_sink_render_into_full(s, &chunk);
1382
1383 result->length = length;
1384 }
1385
1386 pa_sink_unref(s);
1387 }
1388
1389 /* Called from main thread */
1390 int pa_sink_update_rate(pa_sink *s, uint32_t rate, bool passthrough) {
1391 int ret = -1;
1392 uint32_t desired_rate = rate;
1393 uint32_t default_rate = s->default_sample_rate;
1394 uint32_t alternate_rate = s->alternate_sample_rate;
1395 uint32_t idx;
1396 pa_sink_input *i;
1397 bool use_alternate = false;
1398
1399 if (rate == s->sample_spec.rate)
1400 return 0;
1401
1402 if (!s->update_rate)
1403 return -1;
1404
1405 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough)) {
1406 pa_log_debug("Default and alternate sample rates are the same.");
1407 return -1;
1408 }
1409
1410 if (PA_SINK_IS_RUNNING(s->state)) {
1411 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1412 s->sample_spec.rate);
1413 return -1;
1414 }
1415
1416 if (s->monitor_source) {
1417 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1418 pa_log_info("Cannot update rate, monitor source is RUNNING");
1419 return -1;
1420 }
1421 }
1422
1423 if (PA_UNLIKELY(!pa_sample_rate_valid(desired_rate)))
1424 return -1;
1425
1426 if (!passthrough) {
1427 pa_assert((default_rate % 4000 == 0) || (default_rate % 11025 == 0));
1428 pa_assert((alternate_rate % 4000 == 0) || (alternate_rate % 11025 == 0));
1429
1430 if (default_rate % 11025 == 0) {
1431 if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
1432 use_alternate=true;
1433 } else {
1434 /* default is 4000 multiple */
1435 if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
1436 use_alternate=true;
1437 }
1438
1439 if (use_alternate)
1440 desired_rate = alternate_rate;
1441 else
1442 desired_rate = default_rate;
1443 } else {
1444 desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
1445 }
1446
1447 if (desired_rate == s->sample_spec.rate)
1448 return -1;
1449
1450 if (!passthrough && pa_sink_used_by(s) > 0)
1451 return -1;
1452
1453 pa_log_debug("Suspending sink %s due to changing the sample rate.", s->name);
1454 pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1455
1456 if (s->update_rate(s, desired_rate) >= 0) {
1457 /* update monitor source as well */
1458 if (s->monitor_source && !passthrough)
1459 pa_source_update_rate(s->monitor_source, desired_rate, false);
1460 pa_log_info("Changed sampling rate successfully");
1461
1462 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1463 if (i->state == PA_SINK_INPUT_CORKED)
1464 pa_sink_input_update_rate(i);
1465 }
1466
1467 ret = 0;
1468 }
1469
1470 pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1471
1472 return ret;
1473 }
1474
1475 /* Called from main thread */
1476 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1477 pa_usec_t usec = 0;
1478
1479 pa_sink_assert_ref(s);
1480 pa_assert_ctl_context();
1481 pa_assert(PA_SINK_IS_LINKED(s->state));
1482
1483 /* The returned value is supposed to be in the time domain of the sound card! */
1484
1485 if (s->state == PA_SINK_SUSPENDED)
1486 return 0;
1487
1488 if (!(s->flags & PA_SINK_LATENCY))
1489 return 0;
1490
1491 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1492
1493 /* usec is unsigned, so check that the offset can be added to usec without
1494 * underflowing. */
1495 if (-s->latency_offset <= (int64_t) usec)
1496 usec += s->latency_offset;
1497 else
1498 usec = 0;
1499
1500 return usec;
1501 }
1502
1503 /* Called from IO thread */
1504 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1505 pa_usec_t usec = 0;
1506 pa_msgobject *o;
1507
1508 pa_sink_assert_ref(s);
1509 pa_sink_assert_io_context(s);
1510 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1511
1512 /* The returned value is supposed to be in the time domain of the sound card! */
1513
1514 if (s->thread_info.state == PA_SINK_SUSPENDED)
1515 return 0;
1516
1517 if (!(s->flags & PA_SINK_LATENCY))
1518 return 0;
1519
1520 o = PA_MSGOBJECT(s);
1521
1522 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1523
1524 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1525 return -1;
1526
1527 /* usec is unsigned, so check that the offset can be added to usec without
1528 * underflowing. */
1529 if (-s->thread_info.latency_offset <= (int64_t) usec)
1530 usec += s->thread_info.latency_offset;
1531 else
1532 usec = 0;
1533
1534 return usec;
1535 }
1536
1537 /* Called from the main thread (and also from the IO thread while the main
1538 * thread is waiting).
1539 *
1540 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1541 * set. Instead, flat volume mode is detected by checking whether the root sink
1542 * has the flag set. */
1543 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1544 pa_sink_assert_ref(s);
1545
1546 s = pa_sink_get_master(s);
1547
1548 if (PA_LIKELY(s))
1549 return (s->flags & PA_SINK_FLAT_VOLUME);
1550 else
1551 return false;
1552 }
1553
1554 /* Called from the main thread (and also from the IO thread while the main
1555 * thread is waiting). */
1556 pa_sink *pa_sink_get_master(pa_sink *s) {
1557 pa_sink_assert_ref(s);
1558
1559 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1560 if (PA_UNLIKELY(!s->input_to_master))
1561 return NULL;
1562
1563 s = s->input_to_master->sink;
1564 }
1565
1566 return s;
1567 }
1568
1569 /* Called from main context */
1570 bool pa_sink_is_passthrough(pa_sink *s) {
1571 pa_sink_input *alt_i;
1572 uint32_t idx;
1573
1574 pa_sink_assert_ref(s);
1575
1576 /* one and only one PASSTHROUGH input can possibly be connected */
1577 if (pa_idxset_size(s->inputs) == 1) {
1578 alt_i = pa_idxset_first(s->inputs, &idx);
1579
1580 if (pa_sink_input_is_passthrough(alt_i))
1581 return true;
1582 }
1583
1584 return false;
1585 }
1586
1587 /* Called from main context */
1588 void pa_sink_enter_passthrough(pa_sink *s) {
1589 pa_cvolume volume;
1590
1591 /* disable the monitor in passthrough mode */
1592 if (s->monitor_source) {
1593 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1594 pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1595 }
1596
1597 /* set the volume to NORM */
1598 s->saved_volume = *pa_sink_get_volume(s, true);
1599 s->saved_save_volume = s->save_volume;
1600
1601 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1602 pa_sink_set_volume(s, &volume, true, false);
1603 }
1604
1605 /* Called from main context */
1606 void pa_sink_leave_passthrough(pa_sink *s) {
1607 /* Unsuspend monitor */
1608 if (s->monitor_source) {
1609 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1610 pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1611 }
1612
1613 /* Restore sink volume to what it was before we entered passthrough mode */
1614 pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1615
1616 pa_cvolume_init(&s->saved_volume);
1617 s->saved_save_volume = false;
1618 }
1619
1620 /* Called from main context. */
1621 static void compute_reference_ratio(pa_sink_input *i) {
1622 unsigned c = 0;
1623 pa_cvolume remapped;
1624
1625 pa_assert(i);
1626 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1627
1628 /*
1629 * Calculates the reference ratio from the sink's reference
1630 * volume. This basically calculates:
1631 *
1632 * i->reference_ratio = i->volume / i->sink->reference_volume
1633 */
1634
1635 remapped = i->sink->reference_volume;
1636 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1637
1638 i->reference_ratio.channels = i->sample_spec.channels;
1639
1640 for (c = 0; c < i->sample_spec.channels; c++) {
1641
1642 /* We don't update when the sink volume is 0 anyway */
1643 if (remapped.values[c] <= PA_VOLUME_MUTED)
1644 continue;
1645
1646 /* Don't update the reference ratio unless necessary */
1647 if (pa_sw_volume_multiply(
1648 i->reference_ratio.values[c],
1649 remapped.values[c]) == i->volume.values[c])
1650 continue;
1651
1652 i->reference_ratio.values[c] = pa_sw_volume_divide(
1653 i->volume.values[c],
1654 remapped.values[c]);
1655 }
1656 }
1657
1658 /* Called from main context. Only called for the root sink in volume sharing
1659 * cases, except for internal recursive calls. */
1660 static void compute_reference_ratios(pa_sink *s) {
1661 uint32_t idx;
1662 pa_sink_input *i;
1663
1664 pa_sink_assert_ref(s);
1665 pa_assert_ctl_context();
1666 pa_assert(PA_SINK_IS_LINKED(s->state));
1667 pa_assert(pa_sink_flat_volume_enabled(s));
1668
1669 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1670 compute_reference_ratio(i);
1671
1672 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1673 compute_reference_ratios(i->origin_sink);
1674 }
1675 }
1676
1677 /* Called from main context. Only called for the root sink in volume sharing
1678 * cases, except for internal recursive calls. */
1679 static void compute_real_ratios(pa_sink *s) {
1680 pa_sink_input *i;
1681 uint32_t idx;
1682
1683 pa_sink_assert_ref(s);
1684 pa_assert_ctl_context();
1685 pa_assert(PA_SINK_IS_LINKED(s->state));
1686 pa_assert(pa_sink_flat_volume_enabled(s));
1687
1688 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1689 unsigned c;
1690 pa_cvolume remapped;
1691
1692 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1693 /* The origin sink uses volume sharing, so this input's real ratio
1694 * is handled as a special case - the real ratio must be 0 dB, and
1695 * as a result i->soft_volume must equal i->volume_factor. */
1696 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1697 i->soft_volume = i->volume_factor;
1698
1699 compute_real_ratios(i->origin_sink);
1700
1701 continue;
1702 }
1703
1704 /*
1705 * This basically calculates:
1706 *
1707 * i->real_ratio := i->volume / s->real_volume
1708 * i->soft_volume := i->real_ratio * i->volume_factor
1709 */
1710
1711 remapped = s->real_volume;
1712 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1713
1714 i->real_ratio.channels = i->sample_spec.channels;
1715 i->soft_volume.channels = i->sample_spec.channels;
1716
1717 for (c = 0; c < i->sample_spec.channels; c++) {
1718
1719 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1720 /* We leave i->real_ratio untouched */
1721 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1722 continue;
1723 }
1724
1725 /* Don't lose accuracy unless necessary */
1726 if (pa_sw_volume_multiply(
1727 i->real_ratio.values[c],
1728 remapped.values[c]) != i->volume.values[c])
1729
1730 i->real_ratio.values[c] = pa_sw_volume_divide(
1731 i->volume.values[c],
1732 remapped.values[c]);
1733
1734 i->soft_volume.values[c] = pa_sw_volume_multiply(
1735 i->real_ratio.values[c],
1736 i->volume_factor.values[c]);
1737 }
1738
1739 /* We don't copy the soft_volume to the thread_info data
1740 * here. That must be done by the caller */
1741 }
1742 }
1743
1744 static pa_cvolume *cvolume_remap_minimal_impact(
1745 pa_cvolume *v,
1746 const pa_cvolume *template,
1747 const pa_channel_map *from,
1748 const pa_channel_map *to) {
1749
1750 pa_cvolume t;
1751
1752 pa_assert(v);
1753 pa_assert(template);
1754 pa_assert(from);
1755 pa_assert(to);
1756 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1757 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1758
1759 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1760 * mapping from sink input to sink volumes:
1761 *
1762 * If template is a possible remapping from v it is used instead
1763 * of remapping anew.
1764 *
1765 * If the channel maps don't match we set an all-channel volume on
1766 * the sink to ensure that changing a volume on one stream has no
1767 * effect that cannot be compensated for in another stream that
1768 * does not have the same channel map as the sink. */
1769
1770 if (pa_channel_map_equal(from, to))
1771 return v;
1772
1773 t = *template;
1774 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1775 *v = *template;
1776 return v;
1777 }
1778
1779 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1780 return v;
1781 }
1782
1783 /* Called from main thread. Only called for the root sink in volume sharing
1784 * cases, except for internal recursive calls. */
1785 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1786 pa_sink_input *i;
1787 uint32_t idx;
1788
1789 pa_sink_assert_ref(s);
1790 pa_assert(max_volume);
1791 pa_assert(channel_map);
1792 pa_assert(pa_sink_flat_volume_enabled(s));
1793
1794 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1795 pa_cvolume remapped;
1796
1797 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1798 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1799
1800 /* Ignore this input. The origin sink uses volume sharing, so this
1801 * input's volume will be set to be equal to the root sink's real
1802 * volume. Obviously this input's current volume must not then
1803 * affect what the root sink's real volume will be. */
1804 continue;
1805 }
1806
1807 remapped = i->volume;
1808 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1809 pa_cvolume_merge(max_volume, max_volume, &remapped);
1810 }
1811 }
1812
1813 /* Called from main thread. Only called for the root sink in volume sharing
1814 * cases, except for internal recursive calls. */
1815 static bool has_inputs(pa_sink *s) {
1816 pa_sink_input *i;
1817 uint32_t idx;
1818
1819 pa_sink_assert_ref(s);
1820
1821 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1822 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1823 return true;
1824 }
1825
1826 return false;
1827 }
1828
1829 /* Called from main thread. Only called for the root sink in volume sharing
1830 * cases, except for internal recursive calls. */
1831 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1832 pa_sink_input *i;
1833 uint32_t idx;
1834
1835 pa_sink_assert_ref(s);
1836 pa_assert(new_volume);
1837 pa_assert(channel_map);
1838
1839 s->real_volume = *new_volume;
1840 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1841
1842 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1843 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1844 if (pa_sink_flat_volume_enabled(s)) {
1845 pa_cvolume old_volume = i->volume;
1846
1847 /* Follow the root sink's real volume. */
1848 i->volume = *new_volume;
1849 pa_cvolume_remap(&i->volume, channel_map, &i->channel_map);
1850 compute_reference_ratio(i);
1851
1852 /* The volume changed, let's tell people so */
1853 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1854 if (i->volume_changed)
1855 i->volume_changed(i);
1856
1857 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1858 }
1859 }
1860
1861 update_real_volume(i->origin_sink, new_volume, channel_map);
1862 }
1863 }
1864 }
1865
1866 /* Called from main thread. Only called for the root sink in shared volume
1867 * cases. */
1868 static void compute_real_volume(pa_sink *s) {
1869 pa_sink_assert_ref(s);
1870 pa_assert_ctl_context();
1871 pa_assert(PA_SINK_IS_LINKED(s->state));
1872 pa_assert(pa_sink_flat_volume_enabled(s));
1873 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1874
1875 /* This determines the maximum volume of all streams and sets
1876 * s->real_volume accordingly. */
1877
1878 if (!has_inputs(s)) {
1879 /* In the special case that we have no sink inputs we leave the
1880 * volume unmodified. */
1881 update_real_volume(s, &s->reference_volume, &s->channel_map);
1882 return;
1883 }
1884
1885 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1886
1887 /* First let's determine the new maximum volume of all inputs
1888 * connected to this sink */
1889 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1890 update_real_volume(s, &s->real_volume, &s->channel_map);
1891
1892 /* Then, let's update the real ratios/soft volumes of all inputs
1893 * connected to this sink */
1894 compute_real_ratios(s);
1895 }
1896
1897 /* Called from main thread. Only called for the root sink in shared volume
1898 * cases, except for internal recursive calls. */
1899 static void propagate_reference_volume(pa_sink *s) {
1900 pa_sink_input *i;
1901 uint32_t idx;
1902
1903 pa_sink_assert_ref(s);
1904 pa_assert_ctl_context();
1905 pa_assert(PA_SINK_IS_LINKED(s->state));
1906 pa_assert(pa_sink_flat_volume_enabled(s));
1907
1908 /* This is called whenever the sink volume changes that is not
1909 * caused by a sink input volume change. We need to fix up the
1910 * sink input volumes accordingly */
1911
1912 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1913 pa_cvolume old_volume;
1914
1915 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1916 propagate_reference_volume(i->origin_sink);
1917
1918 /* Since the origin sink uses volume sharing, this input's volume
1919 * needs to be updated to match the root sink's real volume, but
1920 * that will be done later in update_shared_real_volume(). */
1921 continue;
1922 }
1923
1924 old_volume = i->volume;
1925
1926 /* This basically calculates:
1927 *
1928 * i->volume := s->reference_volume * i->reference_ratio */
1929
1930 i->volume = s->reference_volume;
1931 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
1932 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
1933
1934 /* The volume changed, let's tell people so */
1935 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1936
1937 if (i->volume_changed)
1938 i->volume_changed(i);
1939
1940 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1941 }
1942 }
1943 }
1944
1945 /* Called from main thread. Only called for the root sink in volume sharing
1946 * cases, except for internal recursive calls. The return value indicates
1947 * whether any reference volume actually changed. */
1948 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
1949 pa_cvolume volume;
1950 bool reference_volume_changed;
1951 pa_sink_input *i;
1952 uint32_t idx;
1953
1954 pa_sink_assert_ref(s);
1955 pa_assert(PA_SINK_IS_LINKED(s->state));
1956 pa_assert(v);
1957 pa_assert(channel_map);
1958 pa_assert(pa_cvolume_valid(v));
1959
1960 volume = *v;
1961 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1962
1963 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1964 s->reference_volume = volume;
1965
1966 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1967
1968 if (reference_volume_changed)
1969 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1970 else if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1971 /* If the root sink's volume doesn't change, then there can't be any
1972 * changes in the other sinks in the sink tree either.
1973 *
1974 * It's probably theoretically possible that even if the root sink's
1975 * volume changes slightly, some filter sink doesn't change its volume
1976 * due to rounding errors. If that happens, we still want to propagate
1977 * the changed root sink volume to the sinks connected to the
1978 * intermediate sink that didn't change its volume. This theoretical
1979 * possibility is the reason why we have that !(s->flags &
1980 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1981 * notice even if we returned here false always if
1982 * reference_volume_changed is false. */
1983 return false;
1984
1985 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1986 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1987 update_reference_volume(i->origin_sink, v, channel_map, false);
1988 }
1989
1990 return true;
1991 }
1992
1993 /* Called from main thread */
1994 void pa_sink_set_volume(
1995 pa_sink *s,
1996 const pa_cvolume *volume,
1997 bool send_msg,
1998 bool save) {
1999
2000 pa_cvolume new_reference_volume;
2001 pa_sink *root_sink;
2002
2003 pa_sink_assert_ref(s);
2004 pa_assert_ctl_context();
2005 pa_assert(PA_SINK_IS_LINKED(s->state));
2006 pa_assert(!volume || pa_cvolume_valid(volume));
2007 pa_assert(volume || pa_sink_flat_volume_enabled(s));
2008 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2009
2010 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2011 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2012 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2013 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2014 return;
2015 }
2016
2017 /* In case of volume sharing, the volume is set for the root sink first,
2018 * from which it's then propagated to the sharing sinks. */
2019 root_sink = pa_sink_get_master(s);
2020
2021 if (PA_UNLIKELY(!root_sink))
2022 return;
2023
2024 /* As a special exception we accept mono volumes on all sinks --
2025 * even on those with more complex channel maps */
2026
2027 if (volume) {
2028 if (pa_cvolume_compatible(volume, &s->sample_spec))
2029 new_reference_volume = *volume;
2030 else {
2031 new_reference_volume = s->reference_volume;
2032 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2033 }
2034
2035 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2036
2037 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2038 if (pa_sink_flat_volume_enabled(root_sink)) {
2039 /* OK, propagate this volume change back to the inputs */
2040 propagate_reference_volume(root_sink);
2041
2042 /* And now recalculate the real volume */
2043 compute_real_volume(root_sink);
2044 } else
2045 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2046 }
2047
2048 } else {
2049 /* If volume is NULL we synchronize the sink's real and
2050 * reference volumes with the stream volumes. */
2051
2052 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2053
2054 /* Ok, let's determine the new real volume */
2055 compute_real_volume(root_sink);
2056
2057 /* Let's 'push' the reference volume if necessary */
2058 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2059 /* If the sink and it's root don't have the same number of channels, we need to remap */
2060 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2061 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2062 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2063
2064 /* Now that the reference volume is updated, we can update the streams'
2065 * reference ratios. */
2066 compute_reference_ratios(root_sink);
2067 }
2068
2069 if (root_sink->set_volume) {
2070 /* If we have a function set_volume(), then we do not apply a
2071 * soft volume by default. However, set_volume() is free to
2072 * apply one to root_sink->soft_volume */
2073
2074 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2075 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2076 root_sink->set_volume(root_sink);
2077
2078 } else
2079 /* If we have no function set_volume(), then the soft volume
2080 * becomes the real volume */
2081 root_sink->soft_volume = root_sink->real_volume;
2082
2083 /* This tells the sink that soft volume and/or real volume changed */
2084 if (send_msg)
2085 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2086 }
2087
2088 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2089 * Only to be called by sink implementor */
2090 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2091
2092 pa_sink_assert_ref(s);
2093 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2094
2095 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2096 pa_sink_assert_io_context(s);
2097 else
2098 pa_assert_ctl_context();
2099
2100 if (!volume)
2101 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2102 else
2103 s->soft_volume = *volume;
2104
2105 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2106 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2107 else
2108 s->thread_info.soft_volume = s->soft_volume;
2109 }
2110
2111 /* Called from the main thread. Only called for the root sink in volume sharing
2112 * cases, except for internal recursive calls. */
2113 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2114 pa_sink_input *i;
2115 uint32_t idx;
2116
2117 pa_sink_assert_ref(s);
2118 pa_assert(old_real_volume);
2119 pa_assert_ctl_context();
2120 pa_assert(PA_SINK_IS_LINKED(s->state));
2121
2122 /* This is called when the hardware's real volume changes due to
2123 * some external event. We copy the real volume into our
2124 * reference volume and then rebuild the stream volumes based on
2125 * i->real_ratio which should stay fixed. */
2126
2127 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2128 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2129 return;
2130
2131 /* 1. Make the real volume the reference volume */
2132 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2133 }
2134
2135 if (pa_sink_flat_volume_enabled(s)) {
2136
2137 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2138 pa_cvolume old_volume = i->volume;
2139
2140 /* 2. Since the sink's reference and real volumes are equal
2141 * now our ratios should be too. */
2142 i->reference_ratio = i->real_ratio;
2143
2144 /* 3. Recalculate the new stream reference volume based on the
2145 * reference ratio and the sink's reference volume.
2146 *
2147 * This basically calculates:
2148 *
2149 * i->volume = s->reference_volume * i->reference_ratio
2150 *
2151 * This is identical to propagate_reference_volume() */
2152 i->volume = s->reference_volume;
2153 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
2154 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
2155
2156 /* Notify if something changed */
2157 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
2158
2159 if (i->volume_changed)
2160 i->volume_changed(i);
2161
2162 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
2163 }
2164
2165 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2166 propagate_real_volume(i->origin_sink, old_real_volume);
2167 }
2168 }
2169
2170 /* Something got changed in the hardware. It probably makes sense
2171 * to save changed hw settings given that hw volume changes not
2172 * triggered by PA are almost certainly done by the user. */
2173 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2174 s->save_volume = true;
2175 }
2176
2177 /* Called from io thread */
2178 void pa_sink_update_volume_and_mute(pa_sink *s) {
2179 pa_assert(s);
2180 pa_sink_assert_io_context(s);
2181
2182 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2183 }
2184
2185 /* Called from main thread */
2186 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2187 pa_sink_assert_ref(s);
2188 pa_assert_ctl_context();
2189 pa_assert(PA_SINK_IS_LINKED(s->state));
2190
2191 if (s->refresh_volume || force_refresh) {
2192 struct pa_cvolume old_real_volume;
2193
2194 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2195
2196 old_real_volume = s->real_volume;
2197
2198 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2199 s->get_volume(s);
2200
2201 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2202
2203 update_real_volume(s, &s->real_volume, &s->channel_map);
2204 propagate_real_volume(s, &old_real_volume);
2205 }
2206
2207 return &s->reference_volume;
2208 }
2209
2210 /* Called from main thread. In volume sharing cases, only the root sink may
2211 * call this. */
2212 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2213 pa_cvolume old_real_volume;
2214
2215 pa_sink_assert_ref(s);
2216 pa_assert_ctl_context();
2217 pa_assert(PA_SINK_IS_LINKED(s->state));
2218 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2219
2220 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2221
2222 old_real_volume = s->real_volume;
2223 update_real_volume(s, new_real_volume, &s->channel_map);
2224 propagate_real_volume(s, &old_real_volume);
2225 }
2226
2227 /* Called from main thread */
2228 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2229 bool old_muted;
2230
2231 pa_sink_assert_ref(s);
2232 pa_assert_ctl_context();
2233 pa_assert(PA_SINK_IS_LINKED(s->state));
2234
2235 old_muted = s->muted;
2236 s->muted = mute;
2237 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
2238
2239 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute)
2240 s->set_mute(s);
2241
2242 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2243
2244 if (old_muted != s->muted)
2245 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2246 }
2247
2248 /* Called from main thread */
2249 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2250
2251 pa_sink_assert_ref(s);
2252 pa_assert_ctl_context();
2253 pa_assert(PA_SINK_IS_LINKED(s->state));
2254
2255 if (s->refresh_muted || force_refresh) {
2256 bool old_muted = s->muted;
2257
2258 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_mute)
2259 s->get_mute(s);
2260
2261 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
2262
2263 if (old_muted != s->muted) {
2264 s->save_muted = true;
2265
2266 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2267
2268 /* Make sure the soft mute status stays in sync */
2269 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2270 }
2271 }
2272
2273 return s->muted;
2274 }
2275
2276 /* Called from main thread */
2277 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2278 pa_sink_assert_ref(s);
2279 pa_assert_ctl_context();
2280 pa_assert(PA_SINK_IS_LINKED(s->state));
2281
2282 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2283
2284 if (s->muted == new_muted)
2285 return;
2286
2287 s->muted = new_muted;
2288 s->save_muted = true;
2289
2290 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2291 }
2292
2293 /* Called from main thread */
2294 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2295 pa_sink_assert_ref(s);
2296 pa_assert_ctl_context();
2297
2298 if (p)
2299 pa_proplist_update(s->proplist, mode, p);
2300
2301 if (PA_SINK_IS_LINKED(s->state)) {
2302 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2303 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2304 }
2305
2306 return true;
2307 }
2308
2309 /* Called from main thread */
2310 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2311 void pa_sink_set_description(pa_sink *s, const char *description) {
2312 const char *old;
2313 pa_sink_assert_ref(s);
2314 pa_assert_ctl_context();
2315
2316 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2317 return;
2318
2319 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2320
2321 if (old && description && pa_streq(old, description))
2322 return;
2323
2324 if (description)
2325 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2326 else
2327 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2328
2329 if (s->monitor_source) {
2330 char *n;
2331
2332 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2333 pa_source_set_description(s->monitor_source, n);
2334 pa_xfree(n);
2335 }
2336
2337 if (PA_SINK_IS_LINKED(s->state)) {
2338 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2339 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2340 }
2341 }
2342
2343 /* Called from main thread */
2344 unsigned pa_sink_linked_by(pa_sink *s) {
2345 unsigned ret;
2346
2347 pa_sink_assert_ref(s);
2348 pa_assert_ctl_context();
2349 pa_assert(PA_SINK_IS_LINKED(s->state));
2350
2351 ret = pa_idxset_size(s->inputs);
2352
2353 /* We add in the number of streams connected to us here. Please
2354 * note the asymmetry to pa_sink_used_by()! */
2355
2356 if (s->monitor_source)
2357 ret += pa_source_linked_by(s->monitor_source);
2358
2359 return ret;
2360 }
2361
2362 /* Called from main thread */
2363 unsigned pa_sink_used_by(pa_sink *s) {
2364 unsigned ret;
2365
2366 pa_sink_assert_ref(s);
2367 pa_assert_ctl_context();
2368 pa_assert(PA_SINK_IS_LINKED(s->state));
2369
2370 ret = pa_idxset_size(s->inputs);
2371 pa_assert(ret >= s->n_corked);
2372
2373 /* Streams connected to our monitor source do not matter for
2374 * pa_sink_used_by()!.*/
2375
2376 return ret - s->n_corked;
2377 }
2378
2379 /* Called from main thread */
2380 unsigned pa_sink_check_suspend(pa_sink *s) {
2381 unsigned ret;
2382 pa_sink_input *i;
2383 uint32_t idx;
2384
2385 pa_sink_assert_ref(s);
2386 pa_assert_ctl_context();
2387
2388 if (!PA_SINK_IS_LINKED(s->state))
2389 return 0;
2390
2391 ret = 0;
2392
2393 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2394 pa_sink_input_state_t st;
2395
2396 st = pa_sink_input_get_state(i);
2397
2398 /* We do not assert here. It is perfectly valid for a sink input to
2399 * be in the INIT state (i.e. created, marked done but not yet put)
2400 * and we should not care if it's unlinked as it won't contribute
2401 * towards our busy status.
2402 */
2403 if (!PA_SINK_INPUT_IS_LINKED(st))
2404 continue;
2405
2406 if (st == PA_SINK_INPUT_CORKED)
2407 continue;
2408
2409 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2410 continue;
2411
2412 ret ++;
2413 }
2414
2415 if (s->monitor_source)
2416 ret += pa_source_check_suspend(s->monitor_source);
2417
2418 return ret;
2419 }
2420
2421 /* Called from the IO thread */
2422 static void sync_input_volumes_within_thread(pa_sink *s) {
2423 pa_sink_input *i;
2424 void *state = NULL;
2425
2426 pa_sink_assert_ref(s);
2427 pa_sink_assert_io_context(s);
2428
2429 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2430 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2431 continue;
2432
2433 i->thread_info.soft_volume = i->soft_volume;
2434 pa_sink_input_request_rewind(i, 0, true, false, false);
2435 }
2436 }
2437
2438 /* Called from the IO thread. Only called for the root sink in volume sharing
2439 * cases, except for internal recursive calls. */
2440 static void set_shared_volume_within_thread(pa_sink *s) {
2441 pa_sink_input *i = NULL;
2442 void *state = NULL;
2443
2444 pa_sink_assert_ref(s);
2445
2446 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2447
2448 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2449 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2450 set_shared_volume_within_thread(i->origin_sink);
2451 }
2452 }
2453
2454 /* Called from IO thread, except when it is not */
2455 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2456 pa_sink *s = PA_SINK(o);
2457 pa_sink_assert_ref(s);
2458
2459 switch ((pa_sink_message_t) code) {
2460
2461 case PA_SINK_MESSAGE_ADD_INPUT: {
2462 pa_sink_input *i = PA_SINK_INPUT(userdata);
2463
2464 /* If you change anything here, make sure to change the
2465 * sink input handling a few lines down at
2466 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2467
2468 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2469
2470 /* Since the caller sleeps in pa_sink_input_put(), we can
2471 * safely access data outside of thread_info even though
2472 * it is mutable */
2473
2474 if ((i->thread_info.sync_prev = i->sync_prev)) {
2475 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2476 pa_assert(i->sync_prev->sync_next == i);
2477 i->thread_info.sync_prev->thread_info.sync_next = i;
2478 }
2479
2480 if ((i->thread_info.sync_next = i->sync_next)) {
2481 pa_assert(i->sink == i->thread_info.sync_next->sink);
2482 pa_assert(i->sync_next->sync_prev == i);
2483 i->thread_info.sync_next->thread_info.sync_prev = i;
2484 }
2485
2486 pa_assert(!i->thread_info.attached);
2487 i->thread_info.attached = true;
2488
2489 if (i->attach)
2490 i->attach(i);
2491
2492 pa_sink_input_set_state_within_thread(i, i->state);
2493
2494 /* The requested latency of the sink input needs to be fixed up and
2495 * then configured on the sink. If this causes the sink latency to
2496 * go down, the sink implementor is responsible for doing a rewind
2497 * in the update_requested_latency() callback to ensure that the
2498 * sink buffer doesn't contain more data than what the new latency
2499 * allows.
2500 *
2501 * XXX: Does it really make sense to push this responsibility to
2502 * the sink implementors? Wouldn't it be better to do it once in
2503 * the core than many times in the modules? */
2504
2505 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2506 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2507
2508 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2509 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2510
2511 /* We don't rewind here automatically. This is left to the
2512 * sink input implementor because some sink inputs need a
2513 * slow start, i.e. need some time to buffer client
2514 * samples before beginning streaming.
2515 *
2516 * XXX: Does it really make sense to push this functionality to
2517 * the sink implementors? Wouldn't it be better to do it once in
2518 * the core than many times in the modules? */
2519
2520 /* In flat volume mode we need to update the volume as
2521 * well */
2522 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2523 }
2524
2525 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2526 pa_sink_input *i = PA_SINK_INPUT(userdata);
2527
2528 /* If you change anything here, make sure to change the
2529 * sink input handling a few lines down at
2530 * PA_SINK_MESSAGE_START_MOVE, too. */
2531
2532 if (i->detach)
2533 i->detach(i);
2534
2535 pa_sink_input_set_state_within_thread(i, i->state);
2536
2537 pa_assert(i->thread_info.attached);
2538 i->thread_info.attached = false;
2539
2540 /* Since the caller sleeps in pa_sink_input_unlink(),
2541 * we can safely access data outside of thread_info even
2542 * though it is mutable */
2543
2544 pa_assert(!i->sync_prev);
2545 pa_assert(!i->sync_next);
2546
2547 if (i->thread_info.sync_prev) {
2548 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2549 i->thread_info.sync_prev = NULL;
2550 }
2551
2552 if (i->thread_info.sync_next) {
2553 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2554 i->thread_info.sync_next = NULL;
2555 }
2556
2557 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2558 pa_sink_input_unref(i);
2559
2560 pa_sink_invalidate_requested_latency(s, true);
2561 pa_sink_request_rewind(s, (size_t) -1);
2562
2563 /* In flat volume mode we need to update the volume as
2564 * well */
2565 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2566 }
2567
2568 case PA_SINK_MESSAGE_START_MOVE: {
2569 pa_sink_input *i = PA_SINK_INPUT(userdata);
2570
2571 /* We don't support moving synchronized streams. */
2572 pa_assert(!i->sync_prev);
2573 pa_assert(!i->sync_next);
2574 pa_assert(!i->thread_info.sync_next);
2575 pa_assert(!i->thread_info.sync_prev);
2576
2577 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2578 pa_usec_t usec = 0;
2579 size_t sink_nbytes, total_nbytes;
2580
2581 /* The old sink probably has some audio from this
2582 * stream in its buffer. We want to "take it back" as
2583 * much as possible and play it to the new sink. We
2584 * don't know at this point how much the old sink can
2585 * rewind. We have to pick something, and that
2586 * something is the full latency of the old sink here.
2587 * So we rewind the stream buffer by the sink latency
2588 * amount, which may be more than what we should
2589 * rewind. This can result in a chunk of audio being
2590 * played both to the old sink and the new sink.
2591 *
2592 * FIXME: Fix this code so that we don't have to make
2593 * guesses about how much the sink will actually be
2594 * able to rewind. If someone comes up with a solution
2595 * for this, something to note is that the part of the
2596 * latency that the old sink couldn't rewind should
2597 * ideally be compensated after the stream has moved
2598 * to the new sink by adding silence. The new sink
2599 * most likely can't start playing the moved stream
2600 * immediately, and that gap should be removed from
2601 * the "compensation silence" (at least at the time of
2602 * writing this, the move finish code will actually
2603 * already take care of dropping the new sink's
2604 * unrewindable latency, so taking into account the
2605 * unrewindable latency of the old sink is the only
2606 * problem).
2607 *
2608 * The render_memblockq contents are discarded,
2609 * because when the sink changes, the format of the
2610 * audio stored in the render_memblockq may change
2611 * too, making the stored audio invalid. FIXME:
2612 * However, the read and write indices are moved back
2613 * the same amount, so if they are not the same now,
2614 * they won't be the same after the rewind either. If
2615 * the write index of the render_memblockq is ahead of
2616 * the read index, then the render_memblockq will feed
2617 * the new sink some silence first, which it shouldn't
2618 * do. The write index should be flushed to be the
2619 * same as the read index. */
2620
2621 /* Get the latency of the sink */
2622 usec = pa_sink_get_latency_within_thread(s);
2623 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2624 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2625
2626 if (total_nbytes > 0) {
2627 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2628 i->thread_info.rewrite_flush = true;
2629 pa_sink_input_process_rewind(i, sink_nbytes);
2630 }
2631 }
2632
2633 if (i->detach)
2634 i->detach(i);
2635
2636 pa_assert(i->thread_info.attached);
2637 i->thread_info.attached = false;
2638
2639 /* Let's remove the sink input ...*/
2640 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2641 pa_sink_input_unref(i);
2642
2643 pa_sink_invalidate_requested_latency(s, true);
2644
2645 pa_log_debug("Requesting rewind due to started move");
2646 pa_sink_request_rewind(s, (size_t) -1);
2647
2648 /* In flat volume mode we need to update the volume as
2649 * well */
2650 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2651 }
2652
2653 case PA_SINK_MESSAGE_FINISH_MOVE: {
2654 pa_sink_input *i = PA_SINK_INPUT(userdata);
2655
2656 /* We don't support moving synchronized streams. */
2657 pa_assert(!i->sync_prev);
2658 pa_assert(!i->sync_next);
2659 pa_assert(!i->thread_info.sync_next);
2660 pa_assert(!i->thread_info.sync_prev);
2661
2662 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2663
2664 pa_assert(!i->thread_info.attached);
2665 i->thread_info.attached = true;
2666
2667 if (i->attach)
2668 i->attach(i);
2669
2670 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2671 pa_usec_t usec = 0;
2672 size_t nbytes;
2673
2674 /* In the ideal case the new sink would start playing
2675 * the stream immediately. That requires the sink to
2676 * be able to rewind all of its latency, which usually
2677 * isn't possible, so there will probably be some gap
2678 * before the moved stream becomes audible. We then
2679 * have two possibilities: 1) start playing the stream
2680 * from where it is now, or 2) drop the unrewindable
2681 * latency of the sink from the stream. With option 1
2682 * we won't lose any audio but the stream will have a
2683 * pause. With option 2 we may lose some audio but the
2684 * stream time will be somewhat in sync with the wall
2685 * clock. Lennart seems to have chosen option 2 (one
2686 * of the reasons might have been that option 1 is
2687 * actually much harder to implement), so we drop the
2688 * latency of the new sink from the moved stream and
2689 * hope that the sink will undo most of that in the
2690 * rewind. */
2691
2692 /* Get the latency of the sink */
2693 usec = pa_sink_get_latency_within_thread(s);
2694 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2695
2696 if (nbytes > 0)
2697 pa_sink_input_drop(i, nbytes);
2698
2699 pa_log_debug("Requesting rewind due to finished move");
2700 pa_sink_request_rewind(s, nbytes);
2701 }
2702
2703 /* Updating the requested sink latency has to be done
2704 * after the sink rewind request, not before, because
2705 * otherwise the sink may limit the rewind amount
2706 * needlessly. */
2707
2708 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2709 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2710
2711 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2712 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2713
2714 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2715 }
2716
2717 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2718 pa_sink *root_sink = pa_sink_get_master(s);
2719
2720 if (PA_LIKELY(root_sink))
2721 set_shared_volume_within_thread(root_sink);
2722
2723 return 0;
2724 }
2725
2726 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2727
2728 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2729 s->set_volume(s);
2730 pa_sink_volume_change_push(s);
2731 }
2732 /* Fall through ... */
2733
2734 case PA_SINK_MESSAGE_SET_VOLUME:
2735
2736 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2737 s->thread_info.soft_volume = s->soft_volume;
2738 pa_sink_request_rewind(s, (size_t) -1);
2739 }
2740
2741 /* Fall through ... */
2742
2743 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2744 sync_input_volumes_within_thread(s);
2745 return 0;
2746
2747 case PA_SINK_MESSAGE_GET_VOLUME:
2748
2749 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2750 s->get_volume(s);
2751 pa_sink_volume_change_flush(s);
2752 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2753 }
2754
2755 /* In case sink implementor reset SW volume. */
2756 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2757 s->thread_info.soft_volume = s->soft_volume;
2758 pa_sink_request_rewind(s, (size_t) -1);
2759 }
2760
2761 return 0;
2762
2763 case PA_SINK_MESSAGE_SET_MUTE:
2764
2765 if (s->thread_info.soft_muted != s->muted) {
2766 s->thread_info.soft_muted = s->muted;
2767 pa_sink_request_rewind(s, (size_t) -1);
2768 }
2769
2770 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2771 s->set_mute(s);
2772
2773 return 0;
2774
2775 case PA_SINK_MESSAGE_GET_MUTE:
2776
2777 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2778 s->get_mute(s);
2779
2780 return 0;
2781
2782 case PA_SINK_MESSAGE_SET_STATE: {
2783
2784 bool suspend_change =
2785 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2786 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2787
2788 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2789
2790 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2791 s->thread_info.rewind_nbytes = 0;
2792 s->thread_info.rewind_requested = false;
2793 }
2794
2795 if (suspend_change) {
2796 pa_sink_input *i;
2797 void *state = NULL;
2798
2799 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2800 if (i->suspend_within_thread)
2801 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2802 }
2803
2804 return 0;
2805 }
2806
2807 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2808
2809 pa_usec_t *usec = userdata;
2810 *usec = pa_sink_get_requested_latency_within_thread(s);
2811
2812 /* Yes, that's right, the IO thread will see -1 when no
2813 * explicit requested latency is configured, the main
2814 * thread will see max_latency */
2815 if (*usec == (pa_usec_t) -1)
2816 *usec = s->thread_info.max_latency;
2817
2818 return 0;
2819 }
2820
2821 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2822 pa_usec_t *r = userdata;
2823
2824 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2825
2826 return 0;
2827 }
2828
2829 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2830 pa_usec_t *r = userdata;
2831
2832 r[0] = s->thread_info.min_latency;
2833 r[1] = s->thread_info.max_latency;
2834
2835 return 0;
2836 }
2837
2838 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2839
2840 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2841 return 0;
2842
2843 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2844
2845 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2846 return 0;
2847
2848 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2849
2850 *((size_t*) userdata) = s->thread_info.max_rewind;
2851 return 0;
2852
2853 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2854
2855 *((size_t*) userdata) = s->thread_info.max_request;
2856 return 0;
2857
2858 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2859
2860 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2861 return 0;
2862
2863 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2864
2865 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2866 return 0;
2867
2868 case PA_SINK_MESSAGE_SET_PORT:
2869
2870 pa_assert(userdata);
2871 if (s->set_port) {
2872 struct sink_message_set_port *msg_data = userdata;
2873 msg_data->ret = s->set_port(s, msg_data->port);
2874 }
2875 return 0;
2876
2877 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2878 /* This message is sent from IO-thread and handled in main thread. */
2879 pa_assert_ctl_context();
2880
2881 /* Make sure we're not messing with main thread when no longer linked */
2882 if (!PA_SINK_IS_LINKED(s->state))
2883 return 0;
2884
2885 pa_sink_get_volume(s, true);
2886 pa_sink_get_mute(s, true);
2887 return 0;
2888
2889 case PA_SINK_MESSAGE_SET_LATENCY_OFFSET:
2890 s->thread_info.latency_offset = offset;
2891 return 0;
2892
2893 case PA_SINK_MESSAGE_GET_LATENCY:
2894 case PA_SINK_MESSAGE_MAX:
2895 ;
2896 }
2897
2898 return -1;
2899 }
2900
2901 /* Called from main thread */
2902 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2903 pa_sink *sink;
2904 uint32_t idx;
2905 int ret = 0;
2906
2907 pa_core_assert_ref(c);
2908 pa_assert_ctl_context();
2909 pa_assert(cause != 0);
2910
2911 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2912 int r;
2913
2914 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2915 ret = r;
2916 }
2917
2918 return ret;
2919 }
2920
2921 /* Called from IO thread */
2922 void pa_sink_detach_within_thread(pa_sink *s) {
2923 pa_sink_input *i;
2924 void *state = NULL;
2925
2926 pa_sink_assert_ref(s);
2927 pa_sink_assert_io_context(s);
2928 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2929
2930 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2931 if (i->detach)
2932 i->detach(i);
2933
2934 if (s->monitor_source)
2935 pa_source_detach_within_thread(s->monitor_source);
2936 }
2937
2938 /* Called from IO thread */
2939 void pa_sink_attach_within_thread(pa_sink *s) {
2940 pa_sink_input *i;
2941 void *state = NULL;
2942
2943 pa_sink_assert_ref(s);
2944 pa_sink_assert_io_context(s);
2945 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2946
2947 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2948 if (i->attach)
2949 i->attach(i);
2950
2951 if (s->monitor_source)
2952 pa_source_attach_within_thread(s->monitor_source);
2953 }
2954
2955 /* Called from IO thread */
2956 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2957 pa_sink_assert_ref(s);
2958 pa_sink_assert_io_context(s);
2959 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2960
2961 if (nbytes == (size_t) -1)
2962 nbytes = s->thread_info.max_rewind;
2963
2964 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2965
2966 if (s->thread_info.rewind_requested &&
2967 nbytes <= s->thread_info.rewind_nbytes)
2968 return;
2969
2970 s->thread_info.rewind_nbytes = nbytes;
2971 s->thread_info.rewind_requested = true;
2972
2973 if (s->request_rewind)
2974 s->request_rewind(s);
2975 }
2976
2977 /* Called from IO thread */
2978 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2979 pa_usec_t result = (pa_usec_t) -1;
2980 pa_sink_input *i;
2981 void *state = NULL;
2982 pa_usec_t monitor_latency;
2983
2984 pa_sink_assert_ref(s);
2985 pa_sink_assert_io_context(s);
2986
2987 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2988 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2989
2990 if (s->thread_info.requested_latency_valid)
2991 return s->thread_info.requested_latency;
2992
2993 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2994 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2995 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2996 result = i->thread_info.requested_sink_latency;
2997
2998 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2999
3000 if (monitor_latency != (pa_usec_t) -1 &&
3001 (result == (pa_usec_t) -1 || result > monitor_latency))
3002 result = monitor_latency;
3003
3004 if (result != (pa_usec_t) -1)
3005 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3006
3007 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3008 /* Only cache if properly initialized */
3009 s->thread_info.requested_latency = result;
3010 s->thread_info.requested_latency_valid = true;
3011 }
3012
3013 return result;
3014 }
3015
3016 /* Called from main thread */
3017 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3018 pa_usec_t usec = 0;
3019
3020 pa_sink_assert_ref(s);
3021 pa_assert_ctl_context();
3022 pa_assert(PA_SINK_IS_LINKED(s->state));
3023
3024 if (s->state == PA_SINK_SUSPENDED)
3025 return 0;
3026
3027 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3028
3029 return usec;
3030 }
3031
3032 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3033 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3034 pa_sink_input *i;
3035 void *state = NULL;
3036
3037 pa_sink_assert_ref(s);
3038 pa_sink_assert_io_context(s);
3039
3040 if (max_rewind == s->thread_info.max_rewind)
3041 return;
3042
3043 s->thread_info.max_rewind = max_rewind;
3044
3045 if (PA_SINK_IS_LINKED(s->thread_info.state))
3046 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3047 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3048
3049 if (s->monitor_source)
3050 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3051 }
3052
3053 /* Called from main thread */
3054 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3055 pa_sink_assert_ref(s);
3056 pa_assert_ctl_context();
3057
3058 if (PA_SINK_IS_LINKED(s->state))
3059 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3060 else
3061 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3062 }
3063
3064 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3065 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3066 void *state = NULL;
3067
3068 pa_sink_assert_ref(s);
3069 pa_sink_assert_io_context(s);
3070
3071 if (max_request == s->thread_info.max_request)
3072 return;
3073
3074 s->thread_info.max_request = max_request;
3075
3076 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3077 pa_sink_input *i;
3078
3079 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3080 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3081 }
3082 }
3083
3084 /* Called from main thread */
3085 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3086 pa_sink_assert_ref(s);
3087 pa_assert_ctl_context();
3088
3089 if (PA_SINK_IS_LINKED(s->state))
3090 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3091 else
3092 pa_sink_set_max_request_within_thread(s, max_request);
3093 }
3094
3095 /* Called from IO thread */
3096 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3097 pa_sink_input *i;
3098 void *state = NULL;
3099
3100 pa_sink_assert_ref(s);
3101 pa_sink_assert_io_context(s);
3102
3103 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3104 s->thread_info.requested_latency_valid = false;
3105 else if (dynamic)
3106 return;
3107
3108 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3109
3110 if (s->update_requested_latency)
3111 s->update_requested_latency(s);
3112
3113 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3114 if (i->update_sink_requested_latency)
3115 i->update_sink_requested_latency(i);
3116 }
3117 }
3118
3119 /* Called from main thread */
3120 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3121 pa_sink_assert_ref(s);
3122 pa_assert_ctl_context();
3123
3124 /* min_latency == 0: no limit
3125 * min_latency anything else: specified limit
3126 *
3127 * Similar for max_latency */
3128
3129 if (min_latency < ABSOLUTE_MIN_LATENCY)
3130 min_latency = ABSOLUTE_MIN_LATENCY;
3131
3132 if (max_latency <= 0 ||
3133 max_latency > ABSOLUTE_MAX_LATENCY)
3134 max_latency = ABSOLUTE_MAX_LATENCY;
3135
3136 pa_assert(min_latency <= max_latency);
3137
3138 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3139 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3140 max_latency == ABSOLUTE_MAX_LATENCY) ||
3141 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3142
3143 if (PA_SINK_IS_LINKED(s->state)) {
3144 pa_usec_t r[2];
3145
3146 r[0] = min_latency;
3147 r[1] = max_latency;
3148
3149 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3150 } else
3151 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3152 }
3153
3154 /* Called from main thread */
3155 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3156 pa_sink_assert_ref(s);
3157 pa_assert_ctl_context();
3158 pa_assert(min_latency);
3159 pa_assert(max_latency);
3160
3161 if (PA_SINK_IS_LINKED(s->state)) {
3162 pa_usec_t r[2] = { 0, 0 };
3163
3164 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3165
3166 *min_latency = r[0];
3167 *max_latency = r[1];
3168 } else {
3169 *min_latency = s->thread_info.min_latency;
3170 *max_latency = s->thread_info.max_latency;
3171 }
3172 }
3173
3174 /* Called from IO thread */
3175 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3176 pa_sink_assert_ref(s);
3177 pa_sink_assert_io_context(s);
3178
3179 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3180 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3181 pa_assert(min_latency <= max_latency);
3182
3183 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3184 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3185 max_latency == ABSOLUTE_MAX_LATENCY) ||
3186 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3187
3188 if (s->thread_info.min_latency == min_latency &&
3189 s->thread_info.max_latency == max_latency)
3190 return;
3191
3192 s->thread_info.min_latency = min_latency;
3193 s->thread_info.max_latency = max_latency;
3194
3195 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3196 pa_sink_input *i;
3197 void *state = NULL;
3198
3199 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3200 if (i->update_sink_latency_range)
3201 i->update_sink_latency_range(i);
3202 }
3203
3204 pa_sink_invalidate_requested_latency(s, false);
3205
3206 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3207 }
3208
3209 /* Called from main thread */
3210 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3211 pa_sink_assert_ref(s);
3212 pa_assert_ctl_context();
3213
3214 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3215 pa_assert(latency == 0);
3216 return;
3217 }
3218
3219 if (latency < ABSOLUTE_MIN_LATENCY)
3220 latency = ABSOLUTE_MIN_LATENCY;
3221
3222 if (latency > ABSOLUTE_MAX_LATENCY)
3223 latency = ABSOLUTE_MAX_LATENCY;
3224
3225 if (PA_SINK_IS_LINKED(s->state))
3226 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3227 else
3228 s->thread_info.fixed_latency = latency;
3229
3230 pa_source_set_fixed_latency(s->monitor_source, latency);
3231 }
3232
3233 /* Called from main thread */
3234 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3235 pa_usec_t latency;
3236
3237 pa_sink_assert_ref(s);
3238 pa_assert_ctl_context();
3239
3240 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3241 return 0;
3242
3243 if (PA_SINK_IS_LINKED(s->state))
3244 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3245 else
3246 latency = s->thread_info.fixed_latency;
3247
3248 return latency;
3249 }
3250
3251 /* Called from IO thread */
3252 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3253 pa_sink_assert_ref(s);
3254 pa_sink_assert_io_context(s);
3255
3256 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3257 pa_assert(latency == 0);
3258 s->thread_info.fixed_latency = 0;
3259
3260 if (s->monitor_source)
3261 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3262
3263 return;
3264 }
3265
3266 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3267 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3268
3269 if (s->thread_info.fixed_latency == latency)
3270 return;
3271
3272 s->thread_info.fixed_latency = latency;
3273
3274 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3275 pa_sink_input *i;
3276 void *state = NULL;
3277
3278 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3279 if (i->update_sink_fixed_latency)
3280 i->update_sink_fixed_latency(i);
3281 }
3282
3283 pa_sink_invalidate_requested_latency(s, false);
3284
3285 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3286 }
3287
3288 /* Called from main context */
3289 void pa_sink_set_latency_offset(pa_sink *s, int64_t offset) {
3290 pa_sink_assert_ref(s);
3291
3292 s->latency_offset = offset;
3293
3294 if (PA_SINK_IS_LINKED(s->state))
3295 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3296 else
3297 s->thread_info.latency_offset = offset;
3298 }
3299
3300 /* Called from main context */
3301 size_t pa_sink_get_max_rewind(pa_sink *s) {
3302 size_t r;
3303 pa_assert_ctl_context();
3304 pa_sink_assert_ref(s);
3305
3306 if (!PA_SINK_IS_LINKED(s->state))
3307 return s->thread_info.max_rewind;
3308
3309 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3310
3311 return r;
3312 }
3313
3314 /* Called from main context */
3315 size_t pa_sink_get_max_request(pa_sink *s) {
3316 size_t r;
3317 pa_sink_assert_ref(s);
3318 pa_assert_ctl_context();
3319
3320 if (!PA_SINK_IS_LINKED(s->state))
3321 return s->thread_info.max_request;
3322
3323 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3324
3325 return r;
3326 }
3327
3328 /* Called from main context */
3329 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3330 pa_device_port *port;
3331 int ret;
3332
3333 pa_sink_assert_ref(s);
3334 pa_assert_ctl_context();
3335
3336 if (!s->set_port) {
3337 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3338 return -PA_ERR_NOTIMPLEMENTED;
3339 }
3340
3341 if (!name)
3342 return -PA_ERR_NOENTITY;
3343
3344 if (!(port = pa_hashmap_get(s->ports, name)))
3345 return -PA_ERR_NOENTITY;
3346
3347 if (s->active_port == port) {
3348 s->save_port = s->save_port || save;
3349 return 0;
3350 }
3351
3352 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3353 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3354 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3355 ret = msg.ret;
3356 }
3357 else
3358 ret = s->set_port(s, port);
3359
3360 if (ret < 0)
3361 return -PA_ERR_NOENTITY;
3362
3363 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3364
3365 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3366
3367 s->active_port = port;
3368 s->save_port = save;
3369
3370 pa_sink_set_latency_offset(s, s->active_port->latency_offset);
3371
3372 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3373
3374 return 0;
3375 }
3376
3377 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3378 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3379
3380 pa_assert(p);
3381
3382 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3383 return true;
3384
3385 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3386
3387 if (pa_streq(ff, "microphone"))
3388 t = "audio-input-microphone";
3389 else if (pa_streq(ff, "webcam"))
3390 t = "camera-web";
3391 else if (pa_streq(ff, "computer"))
3392 t = "computer";
3393 else if (pa_streq(ff, "handset"))
3394 t = "phone";
3395 else if (pa_streq(ff, "portable"))
3396 t = "multimedia-player";
3397 else if (pa_streq(ff, "tv"))
3398 t = "video-display";
3399
3400 /*
3401 * The following icons are not part of the icon naming spec,
3402 * because Rodney Dawes sucks as the maintainer of that spec.
3403 *
3404 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3405 */
3406 else if (pa_streq(ff, "headset"))
3407 t = "audio-headset";
3408 else if (pa_streq(ff, "headphone"))
3409 t = "audio-headphones";
3410 else if (pa_streq(ff, "speaker"))
3411 t = "audio-speakers";
3412 else if (pa_streq(ff, "hands-free"))
3413 t = "audio-handsfree";
3414 }
3415
3416 if (!t)
3417 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3418 if (pa_streq(c, "modem"))
3419 t = "modem";
3420
3421 if (!t) {
3422 if (is_sink)
3423 t = "audio-card";
3424 else
3425 t = "audio-input-microphone";
3426 }
3427
3428 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3429 if (strstr(profile, "analog"))
3430 s = "-analog";
3431 else if (strstr(profile, "iec958"))
3432 s = "-iec958";
3433 else if (strstr(profile, "hdmi"))
3434 s = "-hdmi";
3435 }
3436
3437 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3438
3439 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3440
3441 return true;
3442 }
3443
3444 bool pa_device_init_description(pa_proplist *p) {
3445 const char *s, *d = NULL, *k;
3446 pa_assert(p);
3447
3448 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3449 return true;
3450
3451 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3452 if (pa_streq(s, "internal"))
3453 d = _("Built-in Audio");
3454
3455 if (!d)
3456 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3457 if (pa_streq(s, "modem"))
3458 d = _("Modem");
3459
3460 if (!d)
3461 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3462
3463 if (!d)
3464 return false;
3465
3466 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3467
3468 if (d && k)
3469 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3470 else if (d)
3471 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3472
3473 return true;
3474 }
3475
3476 bool pa_device_init_intended_roles(pa_proplist *p) {
3477 const char *s;
3478 pa_assert(p);
3479
3480 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3481 return true;
3482
3483 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3484 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3485 || pa_streq(s, "headset")) {
3486 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3487 return true;
3488 }
3489
3490 return false;
3491 }
3492
3493 unsigned pa_device_init_priority(pa_proplist *p) {
3494 const char *s;
3495 unsigned priority = 0;
3496
3497 pa_assert(p);
3498
3499 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3500
3501 if (pa_streq(s, "sound"))
3502 priority += 9000;
3503 else if (!pa_streq(s, "modem"))
3504 priority += 1000;
3505 }
3506
3507 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3508
3509 if (pa_streq(s, "internal"))
3510 priority += 900;
3511 else if (pa_streq(s, "speaker"))
3512 priority += 500;
3513 else if (pa_streq(s, "headphone"))
3514 priority += 400;
3515 }
3516
3517 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3518
3519 if (pa_streq(s, "pci"))
3520 priority += 50;
3521 else if (pa_streq(s, "usb"))
3522 priority += 40;
3523 else if (pa_streq(s, "bluetooth"))
3524 priority += 30;
3525 }
3526
3527 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3528
3529 if (pa_startswith(s, "analog-"))
3530 priority += 9;
3531 else if (pa_startswith(s, "iec958-"))
3532 priority += 8;
3533 }
3534
3535 return priority;
3536 }
3537
3538 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3539
3540 /* Called from the IO thread. */
3541 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3542 pa_sink_volume_change *c;
3543 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3544 c = pa_xnew(pa_sink_volume_change, 1);
3545
3546 PA_LLIST_INIT(pa_sink_volume_change, c);
3547 c->at = 0;
3548 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3549 return c;
3550 }
3551
3552 /* Called from the IO thread. */
3553 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3554 pa_assert(c);
3555 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3556 pa_xfree(c);
3557 }
3558
3559 /* Called from the IO thread. */
3560 void pa_sink_volume_change_push(pa_sink *s) {
3561 pa_sink_volume_change *c = NULL;
3562 pa_sink_volume_change *nc = NULL;
3563 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3564
3565 const char *direction = NULL;
3566
3567 pa_assert(s);
3568 nc = pa_sink_volume_change_new(s);
3569
3570 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3571 * Adding one more volume for HW would get us rid of this, but I am trying
3572 * to survive with the ones we already have. */
3573 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3574
3575 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3576 pa_log_debug("Volume not changing");
3577 pa_sink_volume_change_free(nc);
3578 return;
3579 }
3580
3581 nc->at = pa_sink_get_latency_within_thread(s);
3582 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3583
3584 if (s->thread_info.volume_changes_tail) {
3585 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3586 /* If volume is going up let's do it a bit late. If it is going
3587 * down let's do it a bit early. */
3588 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3589 if (nc->at + safety_margin > c->at) {
3590 nc->at += safety_margin;
3591 direction = "up";
3592 break;
3593 }
3594 }
3595 else if (nc->at - safety_margin > c->at) {
3596 nc->at -= safety_margin;
3597 direction = "down";
3598 break;
3599 }
3600 }
3601 }
3602
3603 if (c == NULL) {
3604 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3605 nc->at += safety_margin;
3606 direction = "up";
3607 } else {
3608 nc->at -= safety_margin;
3609 direction = "down";
3610 }
3611 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3612 }
3613 else {
3614 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3615 }
3616
3617 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3618
3619 /* We can ignore volume events that came earlier but should happen later than this. */
3620 PA_LLIST_FOREACH(c, nc->next) {
3621 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3622 pa_sink_volume_change_free(c);
3623 }
3624 nc->next = NULL;
3625 s->thread_info.volume_changes_tail = nc;
3626 }
3627
3628 /* Called from the IO thread. */
3629 static void pa_sink_volume_change_flush(pa_sink *s) {
3630 pa_sink_volume_change *c = s->thread_info.volume_changes;
3631 pa_assert(s);
3632 s->thread_info.volume_changes = NULL;
3633 s->thread_info.volume_changes_tail = NULL;
3634 while (c) {
3635 pa_sink_volume_change *next = c->next;
3636 pa_sink_volume_change_free(c);
3637 c = next;
3638 }
3639 }
3640
3641 /* Called from the IO thread. */
3642 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3643 pa_usec_t now;
3644 bool ret = false;
3645
3646 pa_assert(s);
3647
3648 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3649 if (usec_to_next)
3650 *usec_to_next = 0;
3651 return ret;
3652 }
3653
3654 pa_assert(s->write_volume);
3655
3656 now = pa_rtclock_now();
3657
3658 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3659 pa_sink_volume_change *c = s->thread_info.volume_changes;
3660 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3661 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3662 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3663 ret = true;
3664 s->thread_info.current_hw_volume = c->hw_volume;
3665 pa_sink_volume_change_free(c);
3666 }
3667
3668 if (ret)
3669 s->write_volume(s);
3670
3671 if (s->thread_info.volume_changes) {
3672 if (usec_to_next)
3673 *usec_to_next = s->thread_info.volume_changes->at - now;
3674 if (pa_log_ratelimit(PA_LOG_DEBUG))
3675 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3676 }
3677 else {
3678 if (usec_to_next)
3679 *usec_to_next = 0;
3680 s->thread_info.volume_changes_tail = NULL;
3681 }
3682 return ret;
3683 }
3684
3685 /* Called from the IO thread. */
3686 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3687 /* All the queued volume events later than current latency are shifted to happen earlier. */
3688 pa_sink_volume_change *c;
3689 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3690 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3691 pa_usec_t limit = pa_sink_get_latency_within_thread(s);
3692
3693 pa_log_debug("latency = %lld", (long long) limit);
3694 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3695
3696 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3697 pa_usec_t modified_limit = limit;
3698 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3699 modified_limit -= s->thread_info.volume_change_safety_margin;
3700 else
3701 modified_limit += s->thread_info.volume_change_safety_margin;
3702 if (c->at > modified_limit) {
3703 c->at -= rewound;
3704 if (c->at < modified_limit)
3705 c->at = modified_limit;
3706 }
3707 prev_vol = pa_cvolume_avg(&c->hw_volume);
3708 }
3709 pa_sink_volume_change_apply(s, NULL);
3710 }
3711
3712 /* Called from the main thread */
3713 /* Gets the list of formats supported by the sink. The members and idxset must
3714 * be freed by the caller. */
3715 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3716 pa_idxset *ret;
3717
3718 pa_assert(s);
3719
3720 if (s->get_formats) {
3721 /* Sink supports format query, all is good */
3722 ret = s->get_formats(s);
3723 } else {
3724 /* Sink doesn't support format query, so assume it does PCM */
3725 pa_format_info *f = pa_format_info_new();
3726 f->encoding = PA_ENCODING_PCM;
3727
3728 ret = pa_idxset_new(NULL, NULL);
3729 pa_idxset_put(ret, f, NULL);
3730 }
3731
3732 return ret;
3733 }
3734
3735 /* Called from the main thread */
3736 /* Allows an external source to set what formats a sink supports if the sink
3737 * permits this. The function makes a copy of the formats on success. */
3738 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3739 pa_assert(s);
3740 pa_assert(formats);
3741
3742 if (s->set_formats)
3743 /* Sink supports setting formats -- let's give it a shot */
3744 return s->set_formats(s, formats);
3745 else
3746 /* Sink doesn't support setting this -- bail out */
3747 return false;
3748 }
3749
3750 /* Called from the main thread */
3751 /* Checks if the sink can accept this format */
3752 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3753 pa_idxset *formats = NULL;
3754 bool ret = false;
3755
3756 pa_assert(s);
3757 pa_assert(f);
3758
3759 formats = pa_sink_get_formats(s);
3760
3761 if (formats) {
3762 pa_format_info *finfo_device;
3763 uint32_t i;
3764
3765 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3766 if (pa_format_info_is_compatible(finfo_device, f)) {
3767 ret = true;
3768 break;
3769 }
3770 }
3771
3772 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3773 }
3774
3775 return ret;
3776 }
3777
3778 /* Called from the main thread */
3779 /* Calculates the intersection between formats supported by the sink and
3780 * in_formats, and returns these, in the order of the sink's formats. */
3781 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3782 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3783 pa_format_info *f_sink, *f_in;
3784 uint32_t i, j;
3785
3786 pa_assert(s);
3787
3788 if (!in_formats || pa_idxset_isempty(in_formats))
3789 goto done;
3790
3791 sink_formats = pa_sink_get_formats(s);
3792
3793 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3794 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3795 if (pa_format_info_is_compatible(f_sink, f_in))
3796 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3797 }
3798 }
3799
3800 done:
3801 if (sink_formats)
3802 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3803
3804 return out_formats;
3805 }