]> code.delx.au - pulseaudio/blob - src/pulsecore/source.c
sink, source: Fix setting the latency offset when the sink/source is unlinked.
[pulseaudio] / src / pulsecore / source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
37
38 #include <pulsecore/core-util.h>
39 #include <pulsecore/source-output.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-subscribe.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/flist.h>
45
46 #include "source.h"
47
48 #define ABSOLUTE_MIN_LATENCY (500)
49 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
50 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
51
52 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
53
54 struct pa_source_volume_change {
55 pa_usec_t at;
56 pa_cvolume hw_volume;
57
58 PA_LLIST_FIELDS(pa_source_volume_change);
59 };
60
61 struct source_message_set_port {
62 pa_device_port *port;
63 int ret;
64 };
65
66 static void source_free(pa_object *o);
67
68 static void pa_source_volume_change_push(pa_source *s);
69 static void pa_source_volume_change_flush(pa_source *s);
70
71 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
72 pa_assert(data);
73
74 pa_zero(*data);
75 data->proplist = pa_proplist_new();
76
77 return data;
78 }
79
80 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
81 pa_assert(data);
82
83 pa_xfree(data->name);
84 data->name = pa_xstrdup(name);
85 }
86
87 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
88 pa_assert(data);
89
90 if ((data->sample_spec_is_set = !!spec))
91 data->sample_spec = *spec;
92 }
93
94 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
95 pa_assert(data);
96
97 if ((data->channel_map_is_set = !!map))
98 data->channel_map = *map;
99 }
100
101 void pa_source_new_data_set_alternate_sample_rate(pa_source_new_data *data, const uint32_t alternate_sample_rate) {
102 pa_assert(data);
103
104 data->alternate_sample_rate_is_set = TRUE;
105 data->alternate_sample_rate = alternate_sample_rate;
106 }
107
108 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
109 pa_assert(data);
110
111 if ((data->volume_is_set = !!volume))
112 data->volume = *volume;
113 }
114
115 void pa_source_new_data_set_muted(pa_source_new_data *data, pa_bool_t mute) {
116 pa_assert(data);
117
118 data->muted_is_set = TRUE;
119 data->muted = !!mute;
120 }
121
122 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
123 pa_assert(data);
124
125 pa_xfree(data->active_port);
126 data->active_port = pa_xstrdup(port);
127 }
128
129 void pa_source_new_data_done(pa_source_new_data *data) {
130 pa_assert(data);
131
132 pa_proplist_free(data->proplist);
133
134 if (data->ports)
135 pa_device_port_hashmap_free(data->ports);
136
137 pa_xfree(data->name);
138 pa_xfree(data->active_port);
139 }
140
141 /* Called from main context */
142 static void reset_callbacks(pa_source *s) {
143 pa_assert(s);
144
145 s->set_state = NULL;
146 s->get_volume = NULL;
147 s->set_volume = NULL;
148 s->write_volume = NULL;
149 s->get_mute = NULL;
150 s->set_mute = NULL;
151 s->update_requested_latency = NULL;
152 s->set_port = NULL;
153 s->get_formats = NULL;
154 s->update_rate = NULL;
155 }
156
157 /* Called from main context */
158 pa_source* pa_source_new(
159 pa_core *core,
160 pa_source_new_data *data,
161 pa_source_flags_t flags) {
162
163 pa_source *s;
164 const char *name;
165 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
166 char *pt;
167
168 pa_assert(core);
169 pa_assert(data);
170 pa_assert(data->name);
171 pa_assert_ctl_context();
172
173 s = pa_msgobject_new(pa_source);
174
175 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
176 pa_log_debug("Failed to register name %s.", data->name);
177 pa_xfree(s);
178 return NULL;
179 }
180
181 pa_source_new_data_set_name(data, name);
182
183 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
184 pa_xfree(s);
185 pa_namereg_unregister(core, name);
186 return NULL;
187 }
188
189 /* FIXME, need to free s here on failure */
190
191 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
192 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
193
194 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
195
196 if (!data->channel_map_is_set)
197 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
198
199 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
200 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
201
202 /* FIXME: There should probably be a general function for checking whether
203 * the source volume is allowed to be set, like there is for source outputs. */
204 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
205
206 if (!data->volume_is_set) {
207 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
208 data->save_volume = FALSE;
209 }
210
211 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
212 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
213
214 if (!data->muted_is_set)
215 data->muted = FALSE;
216
217 if (data->card)
218 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
219
220 pa_device_init_description(data->proplist);
221 pa_device_init_icon(data->proplist, FALSE);
222 pa_device_init_intended_roles(data->proplist);
223
224 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
225 pa_xfree(s);
226 pa_namereg_unregister(core, name);
227 return NULL;
228 }
229
230 s->parent.parent.free = source_free;
231 s->parent.process_msg = pa_source_process_msg;
232
233 s->core = core;
234 s->state = PA_SOURCE_INIT;
235 s->flags = flags;
236 s->priority = 0;
237 s->suspend_cause = 0;
238 pa_source_set_mixer_dirty(s, FALSE);
239 s->name = pa_xstrdup(name);
240 s->proplist = pa_proplist_copy(data->proplist);
241 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
242 s->module = data->module;
243 s->card = data->card;
244
245 s->priority = pa_device_init_priority(s->proplist);
246
247 s->sample_spec = data->sample_spec;
248 s->channel_map = data->channel_map;
249 s->default_sample_rate = s->sample_spec.rate;
250
251 if (data->alternate_sample_rate_is_set)
252 s->alternate_sample_rate = data->alternate_sample_rate;
253 else
254 s->alternate_sample_rate = s->core->alternate_sample_rate;
255
256 if (s->sample_spec.rate == s->alternate_sample_rate) {
257 pa_log_warn("Default and alternate sample rates are the same.");
258 s->alternate_sample_rate = 0;
259 }
260
261 s->outputs = pa_idxset_new(NULL, NULL);
262 s->n_corked = 0;
263 s->monitor_of = NULL;
264 s->output_from_master = NULL;
265
266 s->reference_volume = s->real_volume = data->volume;
267 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
268 s->base_volume = PA_VOLUME_NORM;
269 s->n_volume_steps = PA_VOLUME_NORM+1;
270 s->muted = data->muted;
271 s->refresh_volume = s->refresh_muted = FALSE;
272
273 reset_callbacks(s);
274 s->userdata = NULL;
275
276 s->asyncmsgq = NULL;
277
278 /* As a minor optimization we just steal the list instead of
279 * copying it here */
280 s->ports = data->ports;
281 data->ports = NULL;
282
283 s->active_port = NULL;
284 s->save_port = FALSE;
285
286 if (data->active_port && s->ports)
287 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
288 s->save_port = data->save_port;
289
290 if (!s->active_port && s->ports) {
291 void *state;
292 pa_device_port *p;
293
294 PA_HASHMAP_FOREACH(p, s->ports, state)
295 if (!s->active_port || p->priority > s->active_port->priority)
296 s->active_port = p;
297 }
298
299 if (s->active_port)
300 s->latency_offset = s->active_port->latency_offset;
301 else
302 s->latency_offset = 0;
303
304 s->save_volume = data->save_volume;
305 s->save_muted = data->save_muted;
306
307 pa_silence_memchunk_get(
308 &core->silence_cache,
309 core->mempool,
310 &s->silence,
311 &s->sample_spec,
312 0);
313
314 s->thread_info.rtpoll = NULL;
315 s->thread_info.outputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
316 s->thread_info.soft_volume = s->soft_volume;
317 s->thread_info.soft_muted = s->muted;
318 s->thread_info.state = s->state;
319 s->thread_info.max_rewind = 0;
320 s->thread_info.requested_latency_valid = FALSE;
321 s->thread_info.requested_latency = 0;
322 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
323 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
324 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
325
326 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
327 s->thread_info.volume_changes_tail = NULL;
328 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
329 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
330 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
331 s->thread_info.latency_offset = s->latency_offset;
332
333 /* FIXME: This should probably be moved to pa_source_put() */
334 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
335
336 if (s->card)
337 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
338
339 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
340 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
341 s->index,
342 s->name,
343 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
344 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
345 pt);
346 pa_xfree(pt);
347
348 return s;
349 }
350
351 /* Called from main context */
352 static int source_set_state(pa_source *s, pa_source_state_t state) {
353 int ret;
354 pa_bool_t suspend_change;
355 pa_source_state_t original_state;
356
357 pa_assert(s);
358 pa_assert_ctl_context();
359
360 if (s->state == state)
361 return 0;
362
363 original_state = s->state;
364
365 suspend_change =
366 (original_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state)) ||
367 (PA_SOURCE_IS_OPENED(original_state) && state == PA_SOURCE_SUSPENDED);
368
369 if (s->set_state)
370 if ((ret = s->set_state(s, state)) < 0)
371 return ret;
372
373 if (s->asyncmsgq)
374 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
375
376 if (s->set_state)
377 s->set_state(s, original_state);
378
379 return ret;
380 }
381
382 s->state = state;
383
384 if (state != PA_SOURCE_UNLINKED) { /* if we enter UNLINKED state pa_source_unlink() will fire the appropriate events */
385 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
386 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
387 }
388
389 if (suspend_change) {
390 pa_source_output *o;
391 uint32_t idx;
392
393 /* We're suspending or resuming, tell everyone about it */
394
395 PA_IDXSET_FOREACH(o, s->outputs, idx)
396 if (s->state == PA_SOURCE_SUSPENDED &&
397 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
398 pa_source_output_kill(o);
399 else if (o->suspend)
400 o->suspend(o, state == PA_SOURCE_SUSPENDED);
401 }
402
403 return 0;
404 }
405
406 void pa_source_set_get_volume_callback(pa_source *s, pa_source_cb_t cb) {
407 pa_assert(s);
408
409 s->get_volume = cb;
410 }
411
412 void pa_source_set_set_volume_callback(pa_source *s, pa_source_cb_t cb) {
413 pa_source_flags_t flags;
414
415 pa_assert(s);
416 pa_assert(!s->write_volume || cb);
417
418 s->set_volume = cb;
419
420 /* Save the current flags so we can tell if they've changed */
421 flags = s->flags;
422
423 if (cb) {
424 /* The source implementor is responsible for setting decibel volume support */
425 s->flags |= PA_SOURCE_HW_VOLUME_CTRL;
426 } else {
427 s->flags &= ~PA_SOURCE_HW_VOLUME_CTRL;
428 /* See note below in pa_source_put() about volume sharing and decibel volumes */
429 pa_source_enable_decibel_volume(s, !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
430 }
431
432 /* If the flags have changed after init, let any clients know via a change event */
433 if (s->state != PA_SOURCE_INIT && flags != s->flags)
434 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
435 }
436
437 void pa_source_set_write_volume_callback(pa_source *s, pa_source_cb_t cb) {
438 pa_source_flags_t flags;
439
440 pa_assert(s);
441 pa_assert(!cb || s->set_volume);
442
443 s->write_volume = cb;
444
445 /* Save the current flags so we can tell if they've changed */
446 flags = s->flags;
447
448 if (cb)
449 s->flags |= PA_SOURCE_DEFERRED_VOLUME;
450 else
451 s->flags &= ~PA_SOURCE_DEFERRED_VOLUME;
452
453 /* If the flags have changed after init, let any clients know via a change event */
454 if (s->state != PA_SOURCE_INIT && flags != s->flags)
455 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
456 }
457
458 void pa_source_set_get_mute_callback(pa_source *s, pa_source_cb_t cb) {
459 pa_assert(s);
460
461 s->get_mute = cb;
462 }
463
464 void pa_source_set_set_mute_callback(pa_source *s, pa_source_cb_t cb) {
465 pa_source_flags_t flags;
466
467 pa_assert(s);
468
469 s->set_mute = cb;
470
471 /* Save the current flags so we can tell if they've changed */
472 flags = s->flags;
473
474 if (cb)
475 s->flags |= PA_SOURCE_HW_MUTE_CTRL;
476 else
477 s->flags &= ~PA_SOURCE_HW_MUTE_CTRL;
478
479 /* If the flags have changed after init, let any clients know via a change event */
480 if (s->state != PA_SOURCE_INIT && flags != s->flags)
481 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
482 }
483
484 static void enable_flat_volume(pa_source *s, pa_bool_t enable) {
485 pa_source_flags_t flags;
486
487 pa_assert(s);
488
489 /* Always follow the overall user preference here */
490 enable = enable && s->core->flat_volumes;
491
492 /* Save the current flags so we can tell if they've changed */
493 flags = s->flags;
494
495 if (enable)
496 s->flags |= PA_SOURCE_FLAT_VOLUME;
497 else
498 s->flags &= ~PA_SOURCE_FLAT_VOLUME;
499
500 /* If the flags have changed after init, let any clients know via a change event */
501 if (s->state != PA_SOURCE_INIT && flags != s->flags)
502 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
503 }
504
505 void pa_source_enable_decibel_volume(pa_source *s, pa_bool_t enable) {
506 pa_source_flags_t flags;
507
508 pa_assert(s);
509
510 /* Save the current flags so we can tell if they've changed */
511 flags = s->flags;
512
513 if (enable) {
514 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
515 enable_flat_volume(s, TRUE);
516 } else {
517 s->flags &= ~PA_SOURCE_DECIBEL_VOLUME;
518 enable_flat_volume(s, FALSE);
519 }
520
521 /* If the flags have changed after init, let any clients know via a change event */
522 if (s->state != PA_SOURCE_INIT && flags != s->flags)
523 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
524 }
525
526 /* Called from main context */
527 void pa_source_put(pa_source *s) {
528 pa_source_assert_ref(s);
529 pa_assert_ctl_context();
530
531 pa_assert(s->state == PA_SOURCE_INIT);
532 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || s->output_from_master);
533
534 /* The following fields must be initialized properly when calling _put() */
535 pa_assert(s->asyncmsgq);
536 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
537
538 /* Generally, flags should be initialized via pa_source_new(). As a
539 * special exception we allow some volume related flags to be set
540 * between _new() and _put() by the callback setter functions above.
541 *
542 * Thus we implement a couple safeguards here which ensure the above
543 * setters were used (or at least the implementor made manual changes
544 * in a compatible way).
545 *
546 * Note: All of these flags set here can change over the life time
547 * of the source. */
548 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
549 pa_assert(!(s->flags & PA_SOURCE_DEFERRED_VOLUME) || s->write_volume);
550 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
551
552 /* XXX: Currently decibel volume is disabled for all sources that use volume
553 * sharing. When the master source supports decibel volume, it would be good
554 * to have the flag also in the filter source, but currently we don't do that
555 * so that the flags of the filter source never change when it's moved from
556 * a master source to another. One solution for this problem would be to
557 * remove user-visible volume altogether from filter sources when volume
558 * sharing is used, but the current approach was easier to implement... */
559 /* We always support decibel volumes in software, otherwise we leave it to
560 * the source implementor to set this flag as needed.
561 *
562 * Note: This flag can also change over the life time of the source. */
563 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
564 pa_source_enable_decibel_volume(s, TRUE);
565
566 /* If the source implementor support DB volumes by itself, we should always
567 * try and enable flat volumes too */
568 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME))
569 enable_flat_volume(s, TRUE);
570
571 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
572 pa_source *root_source = pa_source_get_master(s);
573
574 pa_assert(PA_LIKELY(root_source));
575
576 s->reference_volume = root_source->reference_volume;
577 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
578
579 s->real_volume = root_source->real_volume;
580 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
581 } else
582 /* We assume that if the sink implementor changed the default
583 * volume he did so in real_volume, because that is the usual
584 * place where he is supposed to place his changes. */
585 s->reference_volume = s->real_volume;
586
587 s->thread_info.soft_volume = s->soft_volume;
588 s->thread_info.soft_muted = s->muted;
589 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
590
591 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
592 || (s->base_volume == PA_VOLUME_NORM
593 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
594 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
595 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
596
597 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE) == 0);
598
599 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
600 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
601 }
602
603 /* Called from main context */
604 void pa_source_unlink(pa_source *s) {
605 pa_bool_t linked;
606 pa_source_output *o, *j = NULL;
607
608 pa_assert(s);
609 pa_assert_ctl_context();
610
611 /* See pa_sink_unlink() for a couple of comments how this function
612 * works. */
613
614 linked = PA_SOURCE_IS_LINKED(s->state);
615
616 if (linked)
617 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
618
619 if (s->state != PA_SOURCE_UNLINKED)
620 pa_namereg_unregister(s->core, s->name);
621 pa_idxset_remove_by_data(s->core->sources, s, NULL);
622
623 if (s->card)
624 pa_idxset_remove_by_data(s->card->sources, s, NULL);
625
626 while ((o = pa_idxset_first(s->outputs, NULL))) {
627 pa_assert(o != j);
628 pa_source_output_kill(o);
629 j = o;
630 }
631
632 if (linked)
633 source_set_state(s, PA_SOURCE_UNLINKED);
634 else
635 s->state = PA_SOURCE_UNLINKED;
636
637 reset_callbacks(s);
638
639 if (linked) {
640 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
641 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
642 }
643 }
644
645 /* Called from main context */
646 static void source_free(pa_object *o) {
647 pa_source_output *so;
648 pa_source *s = PA_SOURCE(o);
649
650 pa_assert(s);
651 pa_assert_ctl_context();
652 pa_assert(pa_source_refcnt(s) == 0);
653
654 if (PA_SOURCE_IS_LINKED(s->state))
655 pa_source_unlink(s);
656
657 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
658
659 pa_idxset_free(s->outputs, NULL, NULL);
660
661 while ((so = pa_hashmap_steal_first(s->thread_info.outputs)))
662 pa_source_output_unref(so);
663
664 pa_hashmap_free(s->thread_info.outputs, NULL, NULL);
665
666 if (s->silence.memblock)
667 pa_memblock_unref(s->silence.memblock);
668
669 pa_xfree(s->name);
670 pa_xfree(s->driver);
671
672 if (s->proplist)
673 pa_proplist_free(s->proplist);
674
675 if (s->ports)
676 pa_device_port_hashmap_free(s->ports);
677
678 pa_xfree(s);
679 }
680
681 /* Called from main context, and not while the IO thread is active, please */
682 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
683 pa_source_assert_ref(s);
684 pa_assert_ctl_context();
685
686 s->asyncmsgq = q;
687 }
688
689 /* Called from main context, and not while the IO thread is active, please */
690 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
691 pa_source_assert_ref(s);
692 pa_assert_ctl_context();
693
694 if (mask == 0)
695 return;
696
697 /* For now, allow only a minimal set of flags to be changed. */
698 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
699
700 s->flags = (s->flags & ~mask) | (value & mask);
701 }
702
703 /* Called from IO context, or before _put() from main context */
704 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
705 pa_source_assert_ref(s);
706 pa_source_assert_io_context(s);
707
708 s->thread_info.rtpoll = p;
709 }
710
711 /* Called from main context */
712 int pa_source_update_status(pa_source*s) {
713 pa_source_assert_ref(s);
714 pa_assert_ctl_context();
715 pa_assert(PA_SOURCE_IS_LINKED(s->state));
716
717 if (s->state == PA_SOURCE_SUSPENDED)
718 return 0;
719
720 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
721 }
722
723 /* Called from any context - must be threadsafe */
724 void pa_source_set_mixer_dirty(pa_source *s, pa_bool_t is_dirty)
725 {
726 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
727 }
728
729 /* Called from main context */
730 int pa_source_suspend(pa_source *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
731 pa_source_assert_ref(s);
732 pa_assert_ctl_context();
733 pa_assert(PA_SOURCE_IS_LINKED(s->state));
734 pa_assert(cause != 0);
735
736 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
737 return -PA_ERR_NOTSUPPORTED;
738
739 if (suspend)
740 s->suspend_cause |= cause;
741 else
742 s->suspend_cause &= ~cause;
743
744 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
745 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
746 it'll be handled just fine. */
747 pa_source_set_mixer_dirty(s, FALSE);
748 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
749 if (s->active_port && s->set_port) {
750 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
751 struct source_message_set_port msg = { .port = s->active_port, .ret = 0 };
752 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
753 }
754 else
755 s->set_port(s, s->active_port);
756 }
757 else {
758 if (s->set_mute)
759 s->set_mute(s);
760 if (s->set_volume)
761 s->set_volume(s);
762 }
763 }
764
765 if ((pa_source_get_state(s) == PA_SOURCE_SUSPENDED) == !!s->suspend_cause)
766 return 0;
767
768 pa_log_debug("Suspend cause of source %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
769
770 if (s->suspend_cause)
771 return source_set_state(s, PA_SOURCE_SUSPENDED);
772 else
773 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
774 }
775
776 /* Called from main context */
777 int pa_source_sync_suspend(pa_source *s) {
778 pa_sink_state_t state;
779
780 pa_source_assert_ref(s);
781 pa_assert_ctl_context();
782 pa_assert(PA_SOURCE_IS_LINKED(s->state));
783 pa_assert(s->monitor_of);
784
785 state = pa_sink_get_state(s->monitor_of);
786
787 if (state == PA_SINK_SUSPENDED)
788 return source_set_state(s, PA_SOURCE_SUSPENDED);
789
790 pa_assert(PA_SINK_IS_OPENED(state));
791
792 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
793 }
794
795 /* Called from main context */
796 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
797 pa_source_output *o, *n;
798 uint32_t idx;
799
800 pa_source_assert_ref(s);
801 pa_assert_ctl_context();
802 pa_assert(PA_SOURCE_IS_LINKED(s->state));
803
804 if (!q)
805 q = pa_queue_new();
806
807 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
808 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
809
810 pa_source_output_ref(o);
811
812 if (pa_source_output_start_move(o) >= 0)
813 pa_queue_push(q, o);
814 else
815 pa_source_output_unref(o);
816 }
817
818 return q;
819 }
820
821 /* Called from main context */
822 void pa_source_move_all_finish(pa_source *s, pa_queue *q, pa_bool_t save) {
823 pa_source_output *o;
824
825 pa_source_assert_ref(s);
826 pa_assert_ctl_context();
827 pa_assert(PA_SOURCE_IS_LINKED(s->state));
828 pa_assert(q);
829
830 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
831 if (pa_source_output_finish_move(o, s, save) < 0)
832 pa_source_output_fail_move(o);
833
834 pa_source_output_unref(o);
835 }
836
837 pa_queue_free(q, NULL);
838 }
839
840 /* Called from main context */
841 void pa_source_move_all_fail(pa_queue *q) {
842 pa_source_output *o;
843
844 pa_assert_ctl_context();
845 pa_assert(q);
846
847 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
848 pa_source_output_fail_move(o);
849 pa_source_output_unref(o);
850 }
851
852 pa_queue_free(q, NULL);
853 }
854
855 /* Called from IO thread context */
856 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
857 pa_source_output *o;
858 void *state = NULL;
859
860 pa_source_assert_ref(s);
861 pa_source_assert_io_context(s);
862 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
863
864 if (nbytes <= 0)
865 return;
866
867 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
868 return;
869
870 pa_log_debug("Processing rewind...");
871
872 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
873 pa_source_output_assert_ref(o);
874 pa_source_output_process_rewind(o, nbytes);
875 }
876 }
877
878 /* Called from IO thread context */
879 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
880 pa_source_output *o;
881 void *state = NULL;
882
883 pa_source_assert_ref(s);
884 pa_source_assert_io_context(s);
885 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
886 pa_assert(chunk);
887
888 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
889 return;
890
891 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
892 pa_memchunk vchunk = *chunk;
893
894 pa_memblock_ref(vchunk.memblock);
895 pa_memchunk_make_writable(&vchunk, 0);
896
897 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
898 pa_silence_memchunk(&vchunk, &s->sample_spec);
899 else
900 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
901
902 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
903 pa_source_output_assert_ref(o);
904
905 if (!o->thread_info.direct_on_input)
906 pa_source_output_push(o, &vchunk);
907 }
908
909 pa_memblock_unref(vchunk.memblock);
910 } else {
911
912 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
913 pa_source_output_assert_ref(o);
914
915 if (!o->thread_info.direct_on_input)
916 pa_source_output_push(o, chunk);
917 }
918 }
919 }
920
921 /* Called from IO thread context */
922 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
923 pa_source_assert_ref(s);
924 pa_source_assert_io_context(s);
925 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
926 pa_source_output_assert_ref(o);
927 pa_assert(o->thread_info.direct_on_input);
928 pa_assert(chunk);
929
930 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
931 return;
932
933 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
934 pa_memchunk vchunk = *chunk;
935
936 pa_memblock_ref(vchunk.memblock);
937 pa_memchunk_make_writable(&vchunk, 0);
938
939 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
940 pa_silence_memchunk(&vchunk, &s->sample_spec);
941 else
942 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
943
944 pa_source_output_push(o, &vchunk);
945
946 pa_memblock_unref(vchunk.memblock);
947 } else
948 pa_source_output_push(o, chunk);
949 }
950
951 /* Called from main thread */
952 pa_bool_t pa_source_update_rate(pa_source *s, uint32_t rate, pa_bool_t passthrough)
953 {
954 if (s->update_rate) {
955 uint32_t desired_rate = rate;
956 uint32_t default_rate = s->default_sample_rate;
957 uint32_t alternate_rate = s->alternate_sample_rate;
958 uint32_t idx;
959 pa_source_output *o;
960 pa_bool_t use_alternate = FALSE;
961
962 if (PA_UNLIKELY(default_rate == alternate_rate)) {
963 pa_log_warn("Default and alternate sample rates are the same.");
964 return FALSE;
965 }
966
967 if (PA_SOURCE_IS_RUNNING(s->state)) {
968 pa_log_info("Cannot update rate, SOURCE_IS_RUNNING, will keep using %u Hz",
969 s->sample_spec.rate);
970 return FALSE;
971 }
972
973 if (PA_UNLIKELY (desired_rate < 8000 ||
974 desired_rate > PA_RATE_MAX))
975 return FALSE;
976
977 if (!passthrough) {
978 pa_assert(default_rate % 4000 || default_rate % 11025);
979 pa_assert(alternate_rate % 4000 || alternate_rate % 11025);
980
981 if (default_rate % 4000) {
982 /* default is a 11025 multiple */
983 if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
984 use_alternate=TRUE;
985 } else {
986 /* default is 4000 multiple */
987 if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
988 use_alternate=TRUE;
989 }
990
991 if (use_alternate)
992 desired_rate = alternate_rate;
993 else
994 desired_rate = default_rate;
995 } else {
996 desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
997 }
998
999 if (!passthrough && pa_source_used_by(s) > 0)
1000 return FALSE;
1001
1002 pa_source_suspend(s, TRUE, PA_SUSPEND_IDLE); /* needed before rate update, will be resumed automatically */
1003
1004 if (s->update_rate(s, desired_rate) == TRUE) {
1005 pa_log_info("Changed sampling rate successfully ");
1006
1007 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1008 if (o->state == PA_SOURCE_OUTPUT_CORKED)
1009 pa_source_output_update_rate(o);
1010 }
1011 return TRUE;
1012 }
1013 }
1014 return FALSE;
1015 }
1016
1017 /* Called from main thread */
1018 pa_usec_t pa_source_get_latency(pa_source *s) {
1019 pa_usec_t usec;
1020
1021 pa_source_assert_ref(s);
1022 pa_assert_ctl_context();
1023 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1024
1025 if (s->state == PA_SOURCE_SUSPENDED)
1026 return 0;
1027
1028 if (!(s->flags & PA_SOURCE_LATENCY))
1029 return 0;
1030
1031 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1032
1033 /* usec is unsigned, so check that the offset can be added to usec without
1034 * underflowing. */
1035 if (-s->latency_offset <= (int64_t) usec)
1036 usec += s->latency_offset;
1037 else
1038 usec = 0;
1039
1040 return usec;
1041 }
1042
1043 /* Called from IO thread */
1044 pa_usec_t pa_source_get_latency_within_thread(pa_source *s) {
1045 pa_usec_t usec = 0;
1046 pa_msgobject *o;
1047
1048 pa_source_assert_ref(s);
1049 pa_source_assert_io_context(s);
1050 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1051
1052 /* The returned value is supposed to be in the time domain of the sound card! */
1053
1054 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
1055 return 0;
1056
1057 if (!(s->flags & PA_SOURCE_LATENCY))
1058 return 0;
1059
1060 o = PA_MSGOBJECT(s);
1061
1062 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1063
1064 if (o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1065 return -1;
1066
1067 /* usec is unsigned, so check that the offset can be added to usec without
1068 * underflowing. */
1069 if (-s->thread_info.latency_offset <= (int64_t) usec)
1070 usec += s->thread_info.latency_offset;
1071 else
1072 usec = 0;
1073
1074 return usec;
1075 }
1076
1077 /* Called from the main thread (and also from the IO thread while the main
1078 * thread is waiting).
1079 *
1080 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
1081 * set. Instead, flat volume mode is detected by checking whether the root source
1082 * has the flag set. */
1083 pa_bool_t pa_source_flat_volume_enabled(pa_source *s) {
1084 pa_source_assert_ref(s);
1085
1086 s = pa_source_get_master(s);
1087
1088 if (PA_LIKELY(s))
1089 return (s->flags & PA_SOURCE_FLAT_VOLUME);
1090 else
1091 return FALSE;
1092 }
1093
1094 /* Called from the main thread (and also from the IO thread while the main
1095 * thread is waiting). */
1096 pa_source *pa_source_get_master(pa_source *s) {
1097 pa_source_assert_ref(s);
1098
1099 while (s && (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1100 if (PA_UNLIKELY(!s->output_from_master))
1101 return NULL;
1102
1103 s = s->output_from_master->source;
1104 }
1105
1106 return s;
1107 }
1108
1109 /* Called from main context */
1110 pa_bool_t pa_source_is_passthrough(pa_source *s) {
1111
1112 pa_source_assert_ref(s);
1113
1114 /* NB Currently only monitor sources support passthrough mode */
1115 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
1116 }
1117
1118 /* Called from main context */
1119 void pa_source_enter_passthrough(pa_source *s) {
1120 pa_cvolume volume;
1121
1122 /* set the volume to NORM */
1123 s->saved_volume = *pa_source_get_volume(s, TRUE);
1124 s->saved_save_volume = s->save_volume;
1125
1126 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1127 pa_source_set_volume(s, &volume, TRUE, FALSE);
1128 }
1129
1130 /* Called from main context */
1131 void pa_source_leave_passthrough(pa_source *s) {
1132 /* Restore source volume to what it was before we entered passthrough mode */
1133 pa_source_set_volume(s, &s->saved_volume, TRUE, s->saved_save_volume);
1134
1135 pa_cvolume_init(&s->saved_volume);
1136 s->saved_save_volume = FALSE;
1137 }
1138
1139 /* Called from main context. */
1140 static void compute_reference_ratio(pa_source_output *o) {
1141 unsigned c = 0;
1142 pa_cvolume remapped;
1143
1144 pa_assert(o);
1145 pa_assert(pa_source_flat_volume_enabled(o->source));
1146
1147 /*
1148 * Calculates the reference ratio from the source's reference
1149 * volume. This basically calculates:
1150 *
1151 * o->reference_ratio = o->volume / o->source->reference_volume
1152 */
1153
1154 remapped = o->source->reference_volume;
1155 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
1156
1157 o->reference_ratio.channels = o->sample_spec.channels;
1158
1159 for (c = 0; c < o->sample_spec.channels; c++) {
1160
1161 /* We don't update when the source volume is 0 anyway */
1162 if (remapped.values[c] <= PA_VOLUME_MUTED)
1163 continue;
1164
1165 /* Don't update the reference ratio unless necessary */
1166 if (pa_sw_volume_multiply(
1167 o->reference_ratio.values[c],
1168 remapped.values[c]) == o->volume.values[c])
1169 continue;
1170
1171 o->reference_ratio.values[c] = pa_sw_volume_divide(
1172 o->volume.values[c],
1173 remapped.values[c]);
1174 }
1175 }
1176
1177 /* Called from main context. Only called for the root source in volume sharing
1178 * cases, except for internal recursive calls. */
1179 static void compute_reference_ratios(pa_source *s) {
1180 uint32_t idx;
1181 pa_source_output *o;
1182
1183 pa_source_assert_ref(s);
1184 pa_assert_ctl_context();
1185 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1186 pa_assert(pa_source_flat_volume_enabled(s));
1187
1188 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1189 compute_reference_ratio(o);
1190
1191 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1192 compute_reference_ratios(o->destination_source);
1193 }
1194 }
1195
1196 /* Called from main context. Only called for the root source in volume sharing
1197 * cases, except for internal recursive calls. */
1198 static void compute_real_ratios(pa_source *s) {
1199 pa_source_output *o;
1200 uint32_t idx;
1201
1202 pa_source_assert_ref(s);
1203 pa_assert_ctl_context();
1204 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1205 pa_assert(pa_source_flat_volume_enabled(s));
1206
1207 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1208 unsigned c;
1209 pa_cvolume remapped;
1210
1211 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1212 /* The origin source uses volume sharing, so this input's real ratio
1213 * is handled as a special case - the real ratio must be 0 dB, and
1214 * as a result i->soft_volume must equal i->volume_factor. */
1215 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
1216 o->soft_volume = o->volume_factor;
1217
1218 compute_real_ratios(o->destination_source);
1219
1220 continue;
1221 }
1222
1223 /*
1224 * This basically calculates:
1225 *
1226 * i->real_ratio := i->volume / s->real_volume
1227 * i->soft_volume := i->real_ratio * i->volume_factor
1228 */
1229
1230 remapped = s->real_volume;
1231 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
1232
1233 o->real_ratio.channels = o->sample_spec.channels;
1234 o->soft_volume.channels = o->sample_spec.channels;
1235
1236 for (c = 0; c < o->sample_spec.channels; c++) {
1237
1238 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1239 /* We leave o->real_ratio untouched */
1240 o->soft_volume.values[c] = PA_VOLUME_MUTED;
1241 continue;
1242 }
1243
1244 /* Don't lose accuracy unless necessary */
1245 if (pa_sw_volume_multiply(
1246 o->real_ratio.values[c],
1247 remapped.values[c]) != o->volume.values[c])
1248
1249 o->real_ratio.values[c] = pa_sw_volume_divide(
1250 o->volume.values[c],
1251 remapped.values[c]);
1252
1253 o->soft_volume.values[c] = pa_sw_volume_multiply(
1254 o->real_ratio.values[c],
1255 o->volume_factor.values[c]);
1256 }
1257
1258 /* We don't copy the soft_volume to the thread_info data
1259 * here. That must be done by the caller */
1260 }
1261 }
1262
1263 static pa_cvolume *cvolume_remap_minimal_impact(
1264 pa_cvolume *v,
1265 const pa_cvolume *template,
1266 const pa_channel_map *from,
1267 const pa_channel_map *to) {
1268
1269 pa_cvolume t;
1270
1271 pa_assert(v);
1272 pa_assert(template);
1273 pa_assert(from);
1274 pa_assert(to);
1275 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1276 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1277
1278 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1279 * mapping from source output to source volumes:
1280 *
1281 * If template is a possible remapping from v it is used instead
1282 * of remapping anew.
1283 *
1284 * If the channel maps don't match we set an all-channel volume on
1285 * the source to ensure that changing a volume on one stream has no
1286 * effect that cannot be compensated for in another stream that
1287 * does not have the same channel map as the source. */
1288
1289 if (pa_channel_map_equal(from, to))
1290 return v;
1291
1292 t = *template;
1293 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1294 *v = *template;
1295 return v;
1296 }
1297
1298 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1299 return v;
1300 }
1301
1302 /* Called from main thread. Only called for the root source in volume sharing
1303 * cases, except for internal recursive calls. */
1304 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1305 pa_source_output *o;
1306 uint32_t idx;
1307
1308 pa_source_assert_ref(s);
1309 pa_assert(max_volume);
1310 pa_assert(channel_map);
1311 pa_assert(pa_source_flat_volume_enabled(s));
1312
1313 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1314 pa_cvolume remapped;
1315
1316 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1317 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1318
1319 /* Ignore this output. The origin source uses volume sharing, so this
1320 * output's volume will be set to be equal to the root source's real
1321 * volume. Obviously this output's current volume must not then
1322 * affect what the root source's real volume will be. */
1323 continue;
1324 }
1325
1326 remapped = o->volume;
1327 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1328 pa_cvolume_merge(max_volume, max_volume, &remapped);
1329 }
1330 }
1331
1332 /* Called from main thread. Only called for the root source in volume sharing
1333 * cases, except for internal recursive calls. */
1334 static pa_bool_t has_outputs(pa_source *s) {
1335 pa_source_output *o;
1336 uint32_t idx;
1337
1338 pa_source_assert_ref(s);
1339
1340 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1341 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1342 return TRUE;
1343 }
1344
1345 return FALSE;
1346 }
1347
1348 /* Called from main thread. Only called for the root source in volume sharing
1349 * cases, except for internal recursive calls. */
1350 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1351 pa_source_output *o;
1352 uint32_t idx;
1353
1354 pa_source_assert_ref(s);
1355 pa_assert(new_volume);
1356 pa_assert(channel_map);
1357
1358 s->real_volume = *new_volume;
1359 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1360
1361 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1362 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1363 if (pa_source_flat_volume_enabled(s)) {
1364 pa_cvolume old_volume = o->volume;
1365
1366 /* Follow the root source's real volume. */
1367 o->volume = *new_volume;
1368 pa_cvolume_remap(&o->volume, channel_map, &o->channel_map);
1369 compute_reference_ratio(o);
1370
1371 /* The volume changed, let's tell people so */
1372 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1373 if (o->volume_changed)
1374 o->volume_changed(o);
1375
1376 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1377 }
1378 }
1379
1380 update_real_volume(o->destination_source, new_volume, channel_map);
1381 }
1382 }
1383 }
1384
1385 /* Called from main thread. Only called for the root source in shared volume
1386 * cases. */
1387 static void compute_real_volume(pa_source *s) {
1388 pa_source_assert_ref(s);
1389 pa_assert_ctl_context();
1390 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1391 pa_assert(pa_source_flat_volume_enabled(s));
1392 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1393
1394 /* This determines the maximum volume of all streams and sets
1395 * s->real_volume accordingly. */
1396
1397 if (!has_outputs(s)) {
1398 /* In the special case that we have no source outputs we leave the
1399 * volume unmodified. */
1400 update_real_volume(s, &s->reference_volume, &s->channel_map);
1401 return;
1402 }
1403
1404 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1405
1406 /* First let's determine the new maximum volume of all outputs
1407 * connected to this source */
1408 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1409 update_real_volume(s, &s->real_volume, &s->channel_map);
1410
1411 /* Then, let's update the real ratios/soft volumes of all outputs
1412 * connected to this source */
1413 compute_real_ratios(s);
1414 }
1415
1416 /* Called from main thread. Only called for the root source in shared volume
1417 * cases, except for internal recursive calls. */
1418 static void propagate_reference_volume(pa_source *s) {
1419 pa_source_output *o;
1420 uint32_t idx;
1421
1422 pa_source_assert_ref(s);
1423 pa_assert_ctl_context();
1424 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1425 pa_assert(pa_source_flat_volume_enabled(s));
1426
1427 /* This is called whenever the source volume changes that is not
1428 * caused by a source output volume change. We need to fix up the
1429 * source output volumes accordingly */
1430
1431 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1432 pa_cvolume old_volume;
1433
1434 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1435 propagate_reference_volume(o->destination_source);
1436
1437 /* Since the origin source uses volume sharing, this output's volume
1438 * needs to be updated to match the root source's real volume, but
1439 * that will be done later in update_shared_real_volume(). */
1440 continue;
1441 }
1442
1443 old_volume = o->volume;
1444
1445 /* This basically calculates:
1446 *
1447 * o->volume := o->reference_volume * o->reference_ratio */
1448
1449 o->volume = s->reference_volume;
1450 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1451 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1452
1453 /* The volume changed, let's tell people so */
1454 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1455
1456 if (o->volume_changed)
1457 o->volume_changed(o);
1458
1459 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1460 }
1461 }
1462 }
1463
1464 /* Called from main thread. Only called for the root source in volume sharing
1465 * cases, except for internal recursive calls. The return value indicates
1466 * whether any reference volume actually changed. */
1467 static pa_bool_t update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1468 pa_cvolume volume;
1469 pa_bool_t reference_volume_changed;
1470 pa_source_output *o;
1471 uint32_t idx;
1472
1473 pa_source_assert_ref(s);
1474 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1475 pa_assert(v);
1476 pa_assert(channel_map);
1477 pa_assert(pa_cvolume_valid(v));
1478
1479 volume = *v;
1480 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1481
1482 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1483 s->reference_volume = volume;
1484
1485 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1486
1487 if (reference_volume_changed)
1488 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1489 else if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1490 /* If the root source's volume doesn't change, then there can't be any
1491 * changes in the other source in the source tree either.
1492 *
1493 * It's probably theoretically possible that even if the root source's
1494 * volume changes slightly, some filter source doesn't change its volume
1495 * due to rounding errors. If that happens, we still want to propagate
1496 * the changed root source volume to the sources connected to the
1497 * intermediate source that didn't change its volume. This theoretical
1498 * possibility is the reason why we have that !(s->flags &
1499 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1500 * notice even if we returned here FALSE always if
1501 * reference_volume_changed is FALSE. */
1502 return FALSE;
1503
1504 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1505 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1506 update_reference_volume(o->destination_source, v, channel_map, FALSE);
1507 }
1508
1509 return TRUE;
1510 }
1511
1512 /* Called from main thread */
1513 void pa_source_set_volume(
1514 pa_source *s,
1515 const pa_cvolume *volume,
1516 pa_bool_t send_msg,
1517 pa_bool_t save) {
1518
1519 pa_cvolume new_reference_volume;
1520 pa_source *root_source;
1521
1522 pa_source_assert_ref(s);
1523 pa_assert_ctl_context();
1524 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1525 pa_assert(!volume || pa_cvolume_valid(volume));
1526 pa_assert(volume || pa_source_flat_volume_enabled(s));
1527 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1528
1529 /* make sure we don't change the volume in PASSTHROUGH mode ...
1530 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1531 if (pa_source_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1532 pa_log_warn("Cannot change volume, source is monitor of a PASSTHROUGH sink");
1533 return;
1534 }
1535
1536 /* In case of volume sharing, the volume is set for the root source first,
1537 * from which it's then propagated to the sharing sources. */
1538 root_source = pa_source_get_master(s);
1539
1540 if (PA_UNLIKELY(!root_source))
1541 return;
1542
1543 /* As a special exception we accept mono volumes on all sources --
1544 * even on those with more complex channel maps */
1545
1546 if (volume) {
1547 if (pa_cvolume_compatible(volume, &s->sample_spec))
1548 new_reference_volume = *volume;
1549 else {
1550 new_reference_volume = s->reference_volume;
1551 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1552 }
1553
1554 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1555
1556 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1557 if (pa_source_flat_volume_enabled(root_source)) {
1558 /* OK, propagate this volume change back to the outputs */
1559 propagate_reference_volume(root_source);
1560
1561 /* And now recalculate the real volume */
1562 compute_real_volume(root_source);
1563 } else
1564 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1565 }
1566
1567 } else {
1568 /* If volume is NULL we synchronize the source's real and
1569 * reference volumes with the stream volumes. */
1570
1571 pa_assert(pa_source_flat_volume_enabled(root_source));
1572
1573 /* Ok, let's determine the new real volume */
1574 compute_real_volume(root_source);
1575
1576 /* Let's 'push' the reference volume if necessary */
1577 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_source->real_volume);
1578 /* If the source and it's root don't have the same number of channels, we need to remap */
1579 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1580 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1581 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1582
1583 /* Now that the reference volume is updated, we can update the streams'
1584 * reference ratios. */
1585 compute_reference_ratios(root_source);
1586 }
1587
1588 if (root_source->set_volume) {
1589 /* If we have a function set_volume(), then we do not apply a
1590 * soft volume by default. However, set_volume() is free to
1591 * apply one to root_source->soft_volume */
1592
1593 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1594 if (!(root_source->flags & PA_SOURCE_DEFERRED_VOLUME))
1595 root_source->set_volume(root_source);
1596
1597 } else
1598 /* If we have no function set_volume(), then the soft volume
1599 * becomes the real volume */
1600 root_source->soft_volume = root_source->real_volume;
1601
1602 /* This tells the source that soft volume and/or real volume changed */
1603 if (send_msg)
1604 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1605 }
1606
1607 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1608 * Only to be called by source implementor */
1609 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1610
1611 pa_source_assert_ref(s);
1612 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1613
1614 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1615 pa_source_assert_io_context(s);
1616 else
1617 pa_assert_ctl_context();
1618
1619 if (!volume)
1620 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1621 else
1622 s->soft_volume = *volume;
1623
1624 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_DEFERRED_VOLUME))
1625 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1626 else
1627 s->thread_info.soft_volume = s->soft_volume;
1628 }
1629
1630 /* Called from the main thread. Only called for the root source in volume sharing
1631 * cases, except for internal recursive calls. */
1632 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1633 pa_source_output *o;
1634 uint32_t idx;
1635
1636 pa_source_assert_ref(s);
1637 pa_assert(old_real_volume);
1638 pa_assert_ctl_context();
1639 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1640
1641 /* This is called when the hardware's real volume changes due to
1642 * some external event. We copy the real volume into our
1643 * reference volume and then rebuild the stream volumes based on
1644 * i->real_ratio which should stay fixed. */
1645
1646 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1647 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1648 return;
1649
1650 /* 1. Make the real volume the reference volume */
1651 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
1652 }
1653
1654 if (pa_source_flat_volume_enabled(s)) {
1655
1656 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1657 pa_cvolume old_volume = o->volume;
1658
1659 /* 2. Since the source's reference and real volumes are equal
1660 * now our ratios should be too. */
1661 o->reference_ratio = o->real_ratio;
1662
1663 /* 3. Recalculate the new stream reference volume based on the
1664 * reference ratio and the sink's reference volume.
1665 *
1666 * This basically calculates:
1667 *
1668 * o->volume = s->reference_volume * o->reference_ratio
1669 *
1670 * This is identical to propagate_reference_volume() */
1671 o->volume = s->reference_volume;
1672 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1673 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1674
1675 /* Notify if something changed */
1676 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1677
1678 if (o->volume_changed)
1679 o->volume_changed(o);
1680
1681 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1682 }
1683
1684 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1685 propagate_real_volume(o->destination_source, old_real_volume);
1686 }
1687 }
1688
1689 /* Something got changed in the hardware. It probably makes sense
1690 * to save changed hw settings given that hw volume changes not
1691 * triggered by PA are almost certainly done by the user. */
1692 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1693 s->save_volume = TRUE;
1694 }
1695
1696 /* Called from io thread */
1697 void pa_source_update_volume_and_mute(pa_source *s) {
1698 pa_assert(s);
1699 pa_source_assert_io_context(s);
1700
1701 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1702 }
1703
1704 /* Called from main thread */
1705 const pa_cvolume *pa_source_get_volume(pa_source *s, pa_bool_t force_refresh) {
1706 pa_source_assert_ref(s);
1707 pa_assert_ctl_context();
1708 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1709
1710 if (s->refresh_volume || force_refresh) {
1711 struct pa_cvolume old_real_volume;
1712
1713 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1714
1715 old_real_volume = s->real_volume;
1716
1717 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume)
1718 s->get_volume(s);
1719
1720 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1721
1722 update_real_volume(s, &s->real_volume, &s->channel_map);
1723 propagate_real_volume(s, &old_real_volume);
1724 }
1725
1726 return &s->reference_volume;
1727 }
1728
1729 /* Called from main thread. In volume sharing cases, only the root source may
1730 * call this. */
1731 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1732 pa_cvolume old_real_volume;
1733
1734 pa_source_assert_ref(s);
1735 pa_assert_ctl_context();
1736 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1737 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1738
1739 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1740
1741 old_real_volume = s->real_volume;
1742 update_real_volume(s, new_real_volume, &s->channel_map);
1743 propagate_real_volume(s, &old_real_volume);
1744 }
1745
1746 /* Called from main thread */
1747 void pa_source_set_mute(pa_source *s, pa_bool_t mute, pa_bool_t save) {
1748 pa_bool_t old_muted;
1749
1750 pa_source_assert_ref(s);
1751 pa_assert_ctl_context();
1752 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1753
1754 old_muted = s->muted;
1755 s->muted = mute;
1756 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1757
1758 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->set_mute)
1759 s->set_mute(s);
1760
1761 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1762
1763 if (old_muted != s->muted)
1764 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1765 }
1766
1767 /* Called from main thread */
1768 pa_bool_t pa_source_get_mute(pa_source *s, pa_bool_t force_refresh) {
1769
1770 pa_source_assert_ref(s);
1771 pa_assert_ctl_context();
1772 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1773
1774 if (s->refresh_muted || force_refresh) {
1775 pa_bool_t old_muted = s->muted;
1776
1777 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_mute)
1778 s->get_mute(s);
1779
1780 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1781
1782 if (old_muted != s->muted) {
1783 s->save_muted = TRUE;
1784
1785 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1786
1787 /* Make sure the soft mute status stays in sync */
1788 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1789 }
1790 }
1791
1792 return s->muted;
1793 }
1794
1795 /* Called from main thread */
1796 void pa_source_mute_changed(pa_source *s, pa_bool_t new_muted) {
1797 pa_source_assert_ref(s);
1798 pa_assert_ctl_context();
1799 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1800
1801 /* The source implementor may call this if the mute state changed to make sure everyone is notified */
1802
1803 if (s->muted == new_muted)
1804 return;
1805
1806 s->muted = new_muted;
1807 s->save_muted = TRUE;
1808
1809 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1810 }
1811
1812 /* Called from main thread */
1813 pa_bool_t pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1814 pa_source_assert_ref(s);
1815 pa_assert_ctl_context();
1816
1817 if (p)
1818 pa_proplist_update(s->proplist, mode, p);
1819
1820 if (PA_SOURCE_IS_LINKED(s->state)) {
1821 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1822 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1823 }
1824
1825 return TRUE;
1826 }
1827
1828 /* Called from main thread */
1829 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
1830 void pa_source_set_description(pa_source *s, const char *description) {
1831 const char *old;
1832 pa_source_assert_ref(s);
1833 pa_assert_ctl_context();
1834
1835 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1836 return;
1837
1838 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1839
1840 if (old && description && pa_streq(old, description))
1841 return;
1842
1843 if (description)
1844 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1845 else
1846 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1847
1848 if (PA_SOURCE_IS_LINKED(s->state)) {
1849 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1850 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1851 }
1852 }
1853
1854 /* Called from main thread */
1855 unsigned pa_source_linked_by(pa_source *s) {
1856 pa_source_assert_ref(s);
1857 pa_assert_ctl_context();
1858 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1859
1860 return pa_idxset_size(s->outputs);
1861 }
1862
1863 /* Called from main thread */
1864 unsigned pa_source_used_by(pa_source *s) {
1865 unsigned ret;
1866
1867 pa_source_assert_ref(s);
1868 pa_assert_ctl_context();
1869 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1870
1871 ret = pa_idxset_size(s->outputs);
1872 pa_assert(ret >= s->n_corked);
1873
1874 return ret - s->n_corked;
1875 }
1876
1877 /* Called from main thread */
1878 unsigned pa_source_check_suspend(pa_source *s) {
1879 unsigned ret;
1880 pa_source_output *o;
1881 uint32_t idx;
1882
1883 pa_source_assert_ref(s);
1884 pa_assert_ctl_context();
1885
1886 if (!PA_SOURCE_IS_LINKED(s->state))
1887 return 0;
1888
1889 ret = 0;
1890
1891 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1892 pa_source_output_state_t st;
1893
1894 st = pa_source_output_get_state(o);
1895
1896 /* We do not assert here. It is perfectly valid for a source output to
1897 * be in the INIT state (i.e. created, marked done but not yet put)
1898 * and we should not care if it's unlinked as it won't contribute
1899 * towards our busy status.
1900 */
1901 if (!PA_SOURCE_OUTPUT_IS_LINKED(st))
1902 continue;
1903
1904 if (st == PA_SOURCE_OUTPUT_CORKED)
1905 continue;
1906
1907 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
1908 continue;
1909
1910 ret ++;
1911 }
1912
1913 return ret;
1914 }
1915
1916 /* Called from the IO thread */
1917 static void sync_output_volumes_within_thread(pa_source *s) {
1918 pa_source_output *o;
1919 void *state = NULL;
1920
1921 pa_source_assert_ref(s);
1922 pa_source_assert_io_context(s);
1923
1924 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1925 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
1926 continue;
1927
1928 o->thread_info.soft_volume = o->soft_volume;
1929 //pa_source_output_request_rewind(o, 0, TRUE, FALSE, FALSE);
1930 }
1931 }
1932
1933 /* Called from the IO thread. Only called for the root source in volume sharing
1934 * cases, except for internal recursive calls. */
1935 static void set_shared_volume_within_thread(pa_source *s) {
1936 pa_source_output *o;
1937 void *state = NULL;
1938
1939 pa_source_assert_ref(s);
1940
1941 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
1942
1943 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1944 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1945 set_shared_volume_within_thread(o->destination_source);
1946 }
1947 }
1948
1949 /* Called from IO thread, except when it is not */
1950 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1951 pa_source *s = PA_SOURCE(object);
1952 pa_source_assert_ref(s);
1953
1954 switch ((pa_source_message_t) code) {
1955
1956 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
1957 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
1958
1959 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
1960
1961 if (o->direct_on_input) {
1962 o->thread_info.direct_on_input = o->direct_on_input;
1963 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
1964 }
1965
1966 pa_assert(!o->thread_info.attached);
1967 o->thread_info.attached = TRUE;
1968
1969 if (o->attach)
1970 o->attach(o);
1971
1972 pa_source_output_set_state_within_thread(o, o->state);
1973
1974 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
1975 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
1976
1977 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
1978
1979 /* We don't just invalidate the requested latency here,
1980 * because if we are in a move we might need to fix up the
1981 * requested latency. */
1982 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
1983
1984 /* In flat volume mode we need to update the volume as
1985 * well */
1986 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
1987 }
1988
1989 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
1990 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
1991
1992 pa_source_output_set_state_within_thread(o, o->state);
1993
1994 if (o->detach)
1995 o->detach(o);
1996
1997 pa_assert(o->thread_info.attached);
1998 o->thread_info.attached = FALSE;
1999
2000 if (o->thread_info.direct_on_input) {
2001 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
2002 o->thread_info.direct_on_input = NULL;
2003 }
2004
2005 if (pa_hashmap_remove(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index)))
2006 pa_source_output_unref(o);
2007
2008 pa_source_invalidate_requested_latency(s, TRUE);
2009
2010 /* In flat volume mode we need to update the volume as
2011 * well */
2012 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2013 }
2014
2015 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
2016 pa_source *root_source = pa_source_get_master(s);
2017
2018 if (PA_LIKELY(root_source))
2019 set_shared_volume_within_thread(root_source);
2020
2021 return 0;
2022 }
2023
2024 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
2025
2026 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2027 s->set_volume(s);
2028 pa_source_volume_change_push(s);
2029 }
2030 /* Fall through ... */
2031
2032 case PA_SOURCE_MESSAGE_SET_VOLUME:
2033
2034 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2035 s->thread_info.soft_volume = s->soft_volume;
2036 }
2037
2038 /* Fall through ... */
2039
2040 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
2041 sync_output_volumes_within_thread(s);
2042 return 0;
2043
2044 case PA_SOURCE_MESSAGE_GET_VOLUME:
2045
2046 if ((s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume) {
2047 s->get_volume(s);
2048 pa_source_volume_change_flush(s);
2049 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2050 }
2051
2052 /* In case source implementor reset SW volume. */
2053 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2054 s->thread_info.soft_volume = s->soft_volume;
2055 }
2056
2057 return 0;
2058
2059 case PA_SOURCE_MESSAGE_SET_MUTE:
2060
2061 if (s->thread_info.soft_muted != s->muted) {
2062 s->thread_info.soft_muted = s->muted;
2063 }
2064
2065 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->set_mute)
2066 s->set_mute(s);
2067
2068 return 0;
2069
2070 case PA_SOURCE_MESSAGE_GET_MUTE:
2071
2072 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->get_mute)
2073 s->get_mute(s);
2074
2075 return 0;
2076
2077 case PA_SOURCE_MESSAGE_SET_STATE: {
2078
2079 pa_bool_t suspend_change =
2080 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2081 (PA_SOURCE_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SOURCE_SUSPENDED);
2082
2083 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2084
2085 if (suspend_change) {
2086 pa_source_output *o;
2087 void *state = NULL;
2088
2089 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2090 if (o->suspend_within_thread)
2091 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
2092 }
2093
2094 return 0;
2095 }
2096
2097 case PA_SOURCE_MESSAGE_DETACH:
2098
2099 /* Detach all streams */
2100 pa_source_detach_within_thread(s);
2101 return 0;
2102
2103 case PA_SOURCE_MESSAGE_ATTACH:
2104
2105 /* Reattach all streams */
2106 pa_source_attach_within_thread(s);
2107 return 0;
2108
2109 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
2110
2111 pa_usec_t *usec = userdata;
2112 *usec = pa_source_get_requested_latency_within_thread(s);
2113
2114 /* Yes, that's right, the IO thread will see -1 when no
2115 * explicit requested latency is configured, the main
2116 * thread will see max_latency */
2117 if (*usec == (pa_usec_t) -1)
2118 *usec = s->thread_info.max_latency;
2119
2120 return 0;
2121 }
2122
2123 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
2124 pa_usec_t *r = userdata;
2125
2126 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
2127
2128 return 0;
2129 }
2130
2131 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
2132 pa_usec_t *r = userdata;
2133
2134 r[0] = s->thread_info.min_latency;
2135 r[1] = s->thread_info.max_latency;
2136
2137 return 0;
2138 }
2139
2140 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
2141
2142 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2143 return 0;
2144
2145 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
2146
2147 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2148 return 0;
2149
2150 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
2151
2152 *((size_t*) userdata) = s->thread_info.max_rewind;
2153 return 0;
2154
2155 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
2156
2157 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
2158 return 0;
2159
2160 case PA_SOURCE_MESSAGE_GET_LATENCY:
2161
2162 if (s->monitor_of) {
2163 *((pa_usec_t*) userdata) = 0;
2164 return 0;
2165 }
2166
2167 /* Implementors need to overwrite this implementation! */
2168 return -1;
2169
2170 case PA_SOURCE_MESSAGE_SET_PORT:
2171
2172 pa_assert(userdata);
2173 if (s->set_port) {
2174 struct source_message_set_port *msg_data = userdata;
2175 msg_data->ret = s->set_port(s, msg_data->port);
2176 }
2177 return 0;
2178
2179 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2180 /* This message is sent from IO-thread and handled in main thread. */
2181 pa_assert_ctl_context();
2182
2183 /* Make sure we're not messing with main thread when no longer linked */
2184 if (!PA_SOURCE_IS_LINKED(s->state))
2185 return 0;
2186
2187 pa_source_get_volume(s, TRUE);
2188 pa_source_get_mute(s, TRUE);
2189 return 0;
2190
2191 case PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET:
2192 s->thread_info.latency_offset = offset;
2193 return 0;
2194
2195 case PA_SOURCE_MESSAGE_MAX:
2196 ;
2197 }
2198
2199 return -1;
2200 }
2201
2202 /* Called from main thread */
2203 int pa_source_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2204 pa_source *source;
2205 uint32_t idx;
2206 int ret = 0;
2207
2208 pa_core_assert_ref(c);
2209 pa_assert_ctl_context();
2210 pa_assert(cause != 0);
2211
2212 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
2213 int r;
2214
2215 if (source->monitor_of)
2216 continue;
2217
2218 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
2219 ret = r;
2220 }
2221
2222 return ret;
2223 }
2224
2225 /* Called from main thread */
2226 void pa_source_detach(pa_source *s) {
2227 pa_source_assert_ref(s);
2228 pa_assert_ctl_context();
2229 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2230
2231 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2232 }
2233
2234 /* Called from main thread */
2235 void pa_source_attach(pa_source *s) {
2236 pa_source_assert_ref(s);
2237 pa_assert_ctl_context();
2238 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2239
2240 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2241 }
2242
2243 /* Called from IO thread */
2244 void pa_source_detach_within_thread(pa_source *s) {
2245 pa_source_output *o;
2246 void *state = NULL;
2247
2248 pa_source_assert_ref(s);
2249 pa_source_assert_io_context(s);
2250 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2251
2252 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2253 if (o->detach)
2254 o->detach(o);
2255 }
2256
2257 /* Called from IO thread */
2258 void pa_source_attach_within_thread(pa_source *s) {
2259 pa_source_output *o;
2260 void *state = NULL;
2261
2262 pa_source_assert_ref(s);
2263 pa_source_assert_io_context(s);
2264 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2265
2266 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2267 if (o->attach)
2268 o->attach(o);
2269 }
2270
2271 /* Called from IO thread */
2272 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
2273 pa_usec_t result = (pa_usec_t) -1;
2274 pa_source_output *o;
2275 void *state = NULL;
2276
2277 pa_source_assert_ref(s);
2278 pa_source_assert_io_context(s);
2279
2280 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2281 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2282
2283 if (s->thread_info.requested_latency_valid)
2284 return s->thread_info.requested_latency;
2285
2286 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2287 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
2288 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
2289 result = o->thread_info.requested_source_latency;
2290
2291 if (result != (pa_usec_t) -1)
2292 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2293
2294 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2295 /* Only cache this if we are fully set up */
2296 s->thread_info.requested_latency = result;
2297 s->thread_info.requested_latency_valid = TRUE;
2298 }
2299
2300 return result;
2301 }
2302
2303 /* Called from main thread */
2304 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2305 pa_usec_t usec = 0;
2306
2307 pa_source_assert_ref(s);
2308 pa_assert_ctl_context();
2309 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2310
2311 if (s->state == PA_SOURCE_SUSPENDED)
2312 return 0;
2313
2314 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2315
2316 return usec;
2317 }
2318
2319 /* Called from IO thread */
2320 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2321 pa_source_output *o;
2322 void *state = NULL;
2323
2324 pa_source_assert_ref(s);
2325 pa_source_assert_io_context(s);
2326
2327 if (max_rewind == s->thread_info.max_rewind)
2328 return;
2329
2330 s->thread_info.max_rewind = max_rewind;
2331
2332 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2333 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2334 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2335 }
2336
2337 /* Called from main thread */
2338 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2339 pa_source_assert_ref(s);
2340 pa_assert_ctl_context();
2341
2342 if (PA_SOURCE_IS_LINKED(s->state))
2343 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2344 else
2345 pa_source_set_max_rewind_within_thread(s, max_rewind);
2346 }
2347
2348 /* Called from IO thread */
2349 void pa_source_invalidate_requested_latency(pa_source *s, pa_bool_t dynamic) {
2350 pa_source_output *o;
2351 void *state = NULL;
2352
2353 pa_source_assert_ref(s);
2354 pa_source_assert_io_context(s);
2355
2356 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2357 s->thread_info.requested_latency_valid = FALSE;
2358 else if (dynamic)
2359 return;
2360
2361 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2362
2363 if (s->update_requested_latency)
2364 s->update_requested_latency(s);
2365
2366 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2367 if (o->update_source_requested_latency)
2368 o->update_source_requested_latency(o);
2369 }
2370
2371 if (s->monitor_of)
2372 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2373 }
2374
2375 /* Called from main thread */
2376 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2377 pa_source_assert_ref(s);
2378 pa_assert_ctl_context();
2379
2380 /* min_latency == 0: no limit
2381 * min_latency anything else: specified limit
2382 *
2383 * Similar for max_latency */
2384
2385 if (min_latency < ABSOLUTE_MIN_LATENCY)
2386 min_latency = ABSOLUTE_MIN_LATENCY;
2387
2388 if (max_latency <= 0 ||
2389 max_latency > ABSOLUTE_MAX_LATENCY)
2390 max_latency = ABSOLUTE_MAX_LATENCY;
2391
2392 pa_assert(min_latency <= max_latency);
2393
2394 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2395 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2396 max_latency == ABSOLUTE_MAX_LATENCY) ||
2397 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2398
2399 if (PA_SOURCE_IS_LINKED(s->state)) {
2400 pa_usec_t r[2];
2401
2402 r[0] = min_latency;
2403 r[1] = max_latency;
2404
2405 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2406 } else
2407 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2408 }
2409
2410 /* Called from main thread */
2411 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2412 pa_source_assert_ref(s);
2413 pa_assert_ctl_context();
2414 pa_assert(min_latency);
2415 pa_assert(max_latency);
2416
2417 if (PA_SOURCE_IS_LINKED(s->state)) {
2418 pa_usec_t r[2] = { 0, 0 };
2419
2420 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2421
2422 *min_latency = r[0];
2423 *max_latency = r[1];
2424 } else {
2425 *min_latency = s->thread_info.min_latency;
2426 *max_latency = s->thread_info.max_latency;
2427 }
2428 }
2429
2430 /* Called from IO thread, and from main thread before pa_source_put() is called */
2431 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2432 pa_source_assert_ref(s);
2433 pa_source_assert_io_context(s);
2434
2435 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2436 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2437 pa_assert(min_latency <= max_latency);
2438
2439 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2440 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2441 max_latency == ABSOLUTE_MAX_LATENCY) ||
2442 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2443 s->monitor_of);
2444
2445 if (s->thread_info.min_latency == min_latency &&
2446 s->thread_info.max_latency == max_latency)
2447 return;
2448
2449 s->thread_info.min_latency = min_latency;
2450 s->thread_info.max_latency = max_latency;
2451
2452 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2453 pa_source_output *o;
2454 void *state = NULL;
2455
2456 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2457 if (o->update_source_latency_range)
2458 o->update_source_latency_range(o);
2459 }
2460
2461 pa_source_invalidate_requested_latency(s, FALSE);
2462 }
2463
2464 /* Called from main thread, before the source is put */
2465 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2466 pa_source_assert_ref(s);
2467 pa_assert_ctl_context();
2468
2469 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2470 pa_assert(latency == 0);
2471 return;
2472 }
2473
2474 if (latency < ABSOLUTE_MIN_LATENCY)
2475 latency = ABSOLUTE_MIN_LATENCY;
2476
2477 if (latency > ABSOLUTE_MAX_LATENCY)
2478 latency = ABSOLUTE_MAX_LATENCY;
2479
2480 if (PA_SOURCE_IS_LINKED(s->state))
2481 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2482 else
2483 s->thread_info.fixed_latency = latency;
2484 }
2485
2486 /* Called from main thread */
2487 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2488 pa_usec_t latency;
2489
2490 pa_source_assert_ref(s);
2491 pa_assert_ctl_context();
2492
2493 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2494 return 0;
2495
2496 if (PA_SOURCE_IS_LINKED(s->state))
2497 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2498 else
2499 latency = s->thread_info.fixed_latency;
2500
2501 return latency;
2502 }
2503
2504 /* Called from IO thread */
2505 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2506 pa_source_assert_ref(s);
2507 pa_source_assert_io_context(s);
2508
2509 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2510 pa_assert(latency == 0);
2511 return;
2512 }
2513
2514 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2515 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2516
2517 if (s->thread_info.fixed_latency == latency)
2518 return;
2519
2520 s->thread_info.fixed_latency = latency;
2521
2522 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2523 pa_source_output *o;
2524 void *state = NULL;
2525
2526 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2527 if (o->update_source_fixed_latency)
2528 o->update_source_fixed_latency(o);
2529 }
2530
2531 pa_source_invalidate_requested_latency(s, FALSE);
2532 }
2533
2534 /* Called from main thread */
2535 void pa_source_set_latency_offset(pa_source *s, int64_t offset) {
2536 pa_source_assert_ref(s);
2537
2538 s->latency_offset = offset;
2539
2540 if (PA_SOURCE_IS_LINKED(s->state))
2541 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
2542 else
2543 s->thread_info.latency_offset = offset;
2544 }
2545
2546 /* Called from main thread */
2547 size_t pa_source_get_max_rewind(pa_source *s) {
2548 size_t r;
2549 pa_assert_ctl_context();
2550 pa_source_assert_ref(s);
2551
2552 if (!PA_SOURCE_IS_LINKED(s->state))
2553 return s->thread_info.max_rewind;
2554
2555 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2556
2557 return r;
2558 }
2559
2560 /* Called from main context */
2561 int pa_source_set_port(pa_source *s, const char *name, pa_bool_t save) {
2562 pa_device_port *port;
2563 int ret;
2564
2565 pa_source_assert_ref(s);
2566 pa_assert_ctl_context();
2567
2568 if (!s->set_port) {
2569 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2570 return -PA_ERR_NOTIMPLEMENTED;
2571 }
2572
2573 if (!s->ports || !name)
2574 return -PA_ERR_NOENTITY;
2575
2576 if (!(port = pa_hashmap_get(s->ports, name)))
2577 return -PA_ERR_NOENTITY;
2578
2579 if (s->active_port == port) {
2580 s->save_port = s->save_port || save;
2581 return 0;
2582 }
2583
2584 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2585 struct source_message_set_port msg = { .port = port, .ret = 0 };
2586 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2587 ret = msg.ret;
2588 }
2589 else
2590 ret = s->set_port(s, port);
2591
2592 if (ret < 0)
2593 return -PA_ERR_NOENTITY;
2594
2595 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2596
2597 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2598
2599 s->active_port = port;
2600 s->save_port = save;
2601
2602 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2603
2604 return 0;
2605 }
2606
2607 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2608
2609 /* Called from the IO thread. */
2610 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2611 pa_source_volume_change *c;
2612 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2613 c = pa_xnew(pa_source_volume_change, 1);
2614
2615 PA_LLIST_INIT(pa_source_volume_change, c);
2616 c->at = 0;
2617 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2618 return c;
2619 }
2620
2621 /* Called from the IO thread. */
2622 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2623 pa_assert(c);
2624 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2625 pa_xfree(c);
2626 }
2627
2628 /* Called from the IO thread. */
2629 void pa_source_volume_change_push(pa_source *s) {
2630 pa_source_volume_change *c = NULL;
2631 pa_source_volume_change *nc = NULL;
2632 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2633
2634 const char *direction = NULL;
2635
2636 pa_assert(s);
2637 nc = pa_source_volume_change_new(s);
2638
2639 /* NOTE: There is already more different volumes in pa_source that I can remember.
2640 * Adding one more volume for HW would get us rid of this, but I am trying
2641 * to survive with the ones we already have. */
2642 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2643
2644 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2645 pa_log_debug("Volume not changing");
2646 pa_source_volume_change_free(nc);
2647 return;
2648 }
2649
2650 nc->at = pa_source_get_latency_within_thread(s);
2651 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2652
2653 if (s->thread_info.volume_changes_tail) {
2654 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2655 /* If volume is going up let's do it a bit late. If it is going
2656 * down let's do it a bit early. */
2657 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2658 if (nc->at + safety_margin > c->at) {
2659 nc->at += safety_margin;
2660 direction = "up";
2661 break;
2662 }
2663 }
2664 else if (nc->at - safety_margin > c->at) {
2665 nc->at -= safety_margin;
2666 direction = "down";
2667 break;
2668 }
2669 }
2670 }
2671
2672 if (c == NULL) {
2673 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2674 nc->at += safety_margin;
2675 direction = "up";
2676 } else {
2677 nc->at -= safety_margin;
2678 direction = "down";
2679 }
2680 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2681 }
2682 else {
2683 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2684 }
2685
2686 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2687
2688 /* We can ignore volume events that came earlier but should happen later than this. */
2689 PA_LLIST_FOREACH(c, nc->next) {
2690 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2691 pa_source_volume_change_free(c);
2692 }
2693 nc->next = NULL;
2694 s->thread_info.volume_changes_tail = nc;
2695 }
2696
2697 /* Called from the IO thread. */
2698 static void pa_source_volume_change_flush(pa_source *s) {
2699 pa_source_volume_change *c = s->thread_info.volume_changes;
2700 pa_assert(s);
2701 s->thread_info.volume_changes = NULL;
2702 s->thread_info.volume_changes_tail = NULL;
2703 while (c) {
2704 pa_source_volume_change *next = c->next;
2705 pa_source_volume_change_free(c);
2706 c = next;
2707 }
2708 }
2709
2710 /* Called from the IO thread. */
2711 pa_bool_t pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2712 pa_usec_t now;
2713 pa_bool_t ret = FALSE;
2714
2715 pa_assert(s);
2716
2717 if (!s->thread_info.volume_changes || !PA_SOURCE_IS_LINKED(s->state)) {
2718 if (usec_to_next)
2719 *usec_to_next = 0;
2720 return ret;
2721 }
2722
2723 pa_assert(s->write_volume);
2724
2725 now = pa_rtclock_now();
2726
2727 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2728 pa_source_volume_change *c = s->thread_info.volume_changes;
2729 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2730 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2731 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2732 ret = TRUE;
2733 s->thread_info.current_hw_volume = c->hw_volume;
2734 pa_source_volume_change_free(c);
2735 }
2736
2737 if (ret)
2738 s->write_volume(s);
2739
2740 if (s->thread_info.volume_changes) {
2741 if (usec_to_next)
2742 *usec_to_next = s->thread_info.volume_changes->at - now;
2743 if (pa_log_ratelimit(PA_LOG_DEBUG))
2744 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2745 }
2746 else {
2747 if (usec_to_next)
2748 *usec_to_next = 0;
2749 s->thread_info.volume_changes_tail = NULL;
2750 }
2751 return ret;
2752 }
2753
2754
2755 /* Called from the main thread */
2756 /* Gets the list of formats supported by the source. The members and idxset must
2757 * be freed by the caller. */
2758 pa_idxset* pa_source_get_formats(pa_source *s) {
2759 pa_idxset *ret;
2760
2761 pa_assert(s);
2762
2763 if (s->get_formats) {
2764 /* Source supports format query, all is good */
2765 ret = s->get_formats(s);
2766 } else {
2767 /* Source doesn't support format query, so assume it does PCM */
2768 pa_format_info *f = pa_format_info_new();
2769 f->encoding = PA_ENCODING_PCM;
2770
2771 ret = pa_idxset_new(NULL, NULL);
2772 pa_idxset_put(ret, f, NULL);
2773 }
2774
2775 return ret;
2776 }
2777
2778 /* Called from the main thread */
2779 /* Checks if the source can accept this format */
2780 pa_bool_t pa_source_check_format(pa_source *s, pa_format_info *f)
2781 {
2782 pa_idxset *formats = NULL;
2783 pa_bool_t ret = FALSE;
2784
2785 pa_assert(s);
2786 pa_assert(f);
2787
2788 formats = pa_source_get_formats(s);
2789
2790 if (formats) {
2791 pa_format_info *finfo_device;
2792 uint32_t i;
2793
2794 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2795 if (pa_format_info_is_compatible(finfo_device, f)) {
2796 ret = TRUE;
2797 break;
2798 }
2799 }
2800
2801 pa_idxset_free(formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2802 }
2803
2804 return ret;
2805 }
2806
2807 /* Called from the main thread */
2808 /* Calculates the intersection between formats supported by the source and
2809 * in_formats, and returns these, in the order of the source's formats. */
2810 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2811 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2812 pa_format_info *f_source, *f_in;
2813 uint32_t i, j;
2814
2815 pa_assert(s);
2816
2817 if (!in_formats || pa_idxset_isempty(in_formats))
2818 goto done;
2819
2820 source_formats = pa_source_get_formats(s);
2821
2822 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2823 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2824 if (pa_format_info_is_compatible(f_source, f_in))
2825 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2826 }
2827 }
2828
2829 done:
2830 if (source_formats)
2831 pa_idxset_free(source_formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2832
2833 return out_formats;
2834 }