]> code.delx.au - pulseaudio/blob - src/pulsecore/source.c
devices: Set certain sink/source flags automatically.
[pulseaudio] / src / pulsecore / source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29
30 #include <pulse/utf8.h>
31 #include <pulse/xmalloc.h>
32 #include <pulse/timeval.h>
33 #include <pulse/util.h>
34 #include <pulse/rtclock.h>
35 #include <pulse/internal.h>
36
37 #include <pulsecore/core-util.h>
38 #include <pulsecore/source-output.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-subscribe.h>
41 #include <pulsecore/log.h>
42 #include <pulsecore/sample-util.h>
43 #include <pulsecore/flist.h>
44
45 #include "source.h"
46
47 #define ABSOLUTE_MIN_LATENCY (500)
48 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
49 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
50
51 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
52
53 struct pa_source_volume_change {
54 pa_usec_t at;
55 pa_cvolume hw_volume;
56
57 PA_LLIST_FIELDS(pa_source_volume_change);
58 };
59
60 struct source_message_set_port {
61 pa_device_port *port;
62 int ret;
63 };
64
65 static void source_free(pa_object *o);
66
67 static void pa_source_volume_change_push(pa_source *s);
68 static void pa_source_volume_change_flush(pa_source *s);
69
70 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
71 pa_assert(data);
72
73 pa_zero(*data);
74 data->proplist = pa_proplist_new();
75
76 return data;
77 }
78
79 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
80 pa_assert(data);
81
82 pa_xfree(data->name);
83 data->name = pa_xstrdup(name);
84 }
85
86 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
87 pa_assert(data);
88
89 if ((data->sample_spec_is_set = !!spec))
90 data->sample_spec = *spec;
91 }
92
93 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
94 pa_assert(data);
95
96 if ((data->channel_map_is_set = !!map))
97 data->channel_map = *map;
98 }
99
100 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
101 pa_assert(data);
102
103 if ((data->volume_is_set = !!volume))
104 data->volume = *volume;
105 }
106
107 void pa_source_new_data_set_muted(pa_source_new_data *data, pa_bool_t mute) {
108 pa_assert(data);
109
110 data->muted_is_set = TRUE;
111 data->muted = !!mute;
112 }
113
114 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
115 pa_assert(data);
116
117 pa_xfree(data->active_port);
118 data->active_port = pa_xstrdup(port);
119 }
120
121 void pa_source_new_data_done(pa_source_new_data *data) {
122 pa_assert(data);
123
124 pa_proplist_free(data->proplist);
125
126 if (data->ports) {
127 pa_device_port *p;
128
129 while ((p = pa_hashmap_steal_first(data->ports)))
130 pa_device_port_free(p);
131
132 pa_hashmap_free(data->ports, NULL, NULL);
133 }
134
135 pa_xfree(data->name);
136 pa_xfree(data->active_port);
137 }
138
139 /* Called from main context */
140 static void reset_callbacks(pa_source *s) {
141 pa_assert(s);
142
143 s->set_state = NULL;
144 s->get_volume = NULL;
145 s->set_volume = NULL;
146 s->get_mute = NULL;
147 s->set_mute = NULL;
148 s->update_requested_latency = NULL;
149 s->set_port = NULL;
150 s->get_formats = NULL;
151 }
152
153 /* Called from main context */
154 pa_source* pa_source_new(
155 pa_core *core,
156 pa_source_new_data *data,
157 pa_source_flags_t flags) {
158
159 pa_source *s;
160 const char *name;
161 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
162 char *pt;
163
164 pa_assert(core);
165 pa_assert(data);
166 pa_assert(data->name);
167 pa_assert_ctl_context();
168
169 s = pa_msgobject_new(pa_source);
170
171 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
172 pa_log_debug("Failed to register name %s.", data->name);
173 pa_xfree(s);
174 return NULL;
175 }
176
177 pa_source_new_data_set_name(data, name);
178
179 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
180 pa_xfree(s);
181 pa_namereg_unregister(core, name);
182 return NULL;
183 }
184
185 /* FIXME, need to free s here on failure */
186
187 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
188 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
189
190 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
191
192 if (!data->channel_map_is_set)
193 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
194
195 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
196 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
197
198 /* FIXME: There should probably be a general function for checking whether
199 * the source volume is allowed to be set, like there is for source outputs. */
200 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
201
202 if (!data->volume_is_set) {
203 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
204 data->save_volume = FALSE;
205 }
206
207 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
208 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
209
210 if (!data->muted_is_set)
211 data->muted = FALSE;
212
213 if (data->card)
214 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
215
216 pa_device_init_description(data->proplist);
217 pa_device_init_icon(data->proplist, FALSE);
218 pa_device_init_intended_roles(data->proplist);
219
220 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
221 pa_xfree(s);
222 pa_namereg_unregister(core, name);
223 return NULL;
224 }
225
226 s->parent.parent.free = source_free;
227 s->parent.process_msg = pa_source_process_msg;
228
229 s->core = core;
230 s->state = PA_SOURCE_INIT;
231 s->flags = flags;
232 s->priority = 0;
233 s->suspend_cause = 0;
234 s->name = pa_xstrdup(name);
235 s->proplist = pa_proplist_copy(data->proplist);
236 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
237 s->module = data->module;
238 s->card = data->card;
239
240 s->priority = pa_device_init_priority(s->proplist);
241
242 s->sample_spec = data->sample_spec;
243 s->channel_map = data->channel_map;
244
245 s->outputs = pa_idxset_new(NULL, NULL);
246 s->n_corked = 0;
247 s->monitor_of = NULL;
248 s->output_from_master = NULL;
249
250 s->reference_volume = s->real_volume = data->volume;
251 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
252 s->base_volume = PA_VOLUME_NORM;
253 s->n_volume_steps = PA_VOLUME_NORM+1;
254 s->muted = data->muted;
255 s->refresh_volume = s->refresh_muted = FALSE;
256
257 reset_callbacks(s);
258 s->userdata = NULL;
259
260 s->asyncmsgq = NULL;
261
262 /* As a minor optimization we just steal the list instead of
263 * copying it here */
264 s->ports = data->ports;
265 data->ports = NULL;
266
267 s->active_port = NULL;
268 s->save_port = FALSE;
269
270 if (data->active_port && s->ports)
271 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
272 s->save_port = data->save_port;
273
274 if (!s->active_port && s->ports) {
275 void *state;
276 pa_device_port *p;
277
278 PA_HASHMAP_FOREACH(p, s->ports, state)
279 if (!s->active_port || p->priority > s->active_port->priority)
280 s->active_port = p;
281 }
282
283 s->save_volume = data->save_volume;
284 s->save_muted = data->save_muted;
285
286 pa_silence_memchunk_get(
287 &core->silence_cache,
288 core->mempool,
289 &s->silence,
290 &s->sample_spec,
291 0);
292
293 s->thread_info.rtpoll = NULL;
294 s->thread_info.outputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
295 s->thread_info.soft_volume = s->soft_volume;
296 s->thread_info.soft_muted = s->muted;
297 s->thread_info.state = s->state;
298 s->thread_info.max_rewind = 0;
299 s->thread_info.requested_latency_valid = FALSE;
300 s->thread_info.requested_latency = 0;
301 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
302 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
303 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
304
305 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
306 s->thread_info.volume_changes_tail = NULL;
307 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
308 s->thread_info.volume_change_safety_margin = core->sync_volume_safety_margin_usec;
309 s->thread_info.volume_change_extra_delay = core->sync_volume_extra_delay_usec;
310
311 /* FIXME: This should probably be moved to pa_source_put() */
312 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
313
314 if (s->card)
315 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
316
317 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
318 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
319 s->index,
320 s->name,
321 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
322 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
323 pt);
324 pa_xfree(pt);
325
326 return s;
327 }
328
329 /* Called from main context */
330 static int source_set_state(pa_source *s, pa_source_state_t state) {
331 int ret;
332 pa_bool_t suspend_change;
333 pa_source_state_t original_state;
334
335 pa_assert(s);
336 pa_assert_ctl_context();
337
338 if (s->state == state)
339 return 0;
340
341 original_state = s->state;
342
343 suspend_change =
344 (original_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state)) ||
345 (PA_SOURCE_IS_OPENED(original_state) && state == PA_SOURCE_SUSPENDED);
346
347 if (s->set_state)
348 if ((ret = s->set_state(s, state)) < 0)
349 return ret;
350
351 if (s->asyncmsgq)
352 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
353
354 if (s->set_state)
355 s->set_state(s, original_state);
356
357 return ret;
358 }
359
360 s->state = state;
361
362 if (state != PA_SOURCE_UNLINKED) { /* if we enter UNLINKED state pa_source_unlink() will fire the apropriate events */
363 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
364 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
365 }
366
367 if (suspend_change) {
368 pa_source_output *o;
369 uint32_t idx;
370
371 /* We're suspending or resuming, tell everyone about it */
372
373 PA_IDXSET_FOREACH(o, s->outputs, idx)
374 if (s->state == PA_SOURCE_SUSPENDED &&
375 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
376 pa_source_output_kill(o);
377 else if (o->suspend)
378 o->suspend(o, state == PA_SOURCE_SUSPENDED);
379 }
380
381 return 0;
382 }
383
384 void pa_source_set_get_volume_callback(pa_source *s, pa_source_cb_t cb) {
385 pa_assert(s);
386
387 s->get_volume = cb;
388 }
389
390 void pa_source_set_set_volume_callback(pa_source *s, pa_source_cb_t cb) {
391 pa_assert(s);
392
393 pa_assert(!s->write_volume || cb);
394
395 s->set_volume = cb;
396
397 if (cb)
398 s->flags |= PA_SOURCE_HW_VOLUME_CTRL;
399 else
400 s->flags &= ~PA_SOURCE_HW_VOLUME_CTRL;
401 }
402
403 void pa_source_set_write_volume_callback(pa_source *s, pa_source_cb_t cb) {
404 pa_assert(s);
405
406 pa_assert(!cb || s->set_volume);
407
408 s->write_volume = cb;
409
410 if (cb)
411 s->flags |= PA_SOURCE_SYNC_VOLUME;
412 else
413 s->flags &= ~PA_SOURCE_SYNC_VOLUME;
414 }
415
416 void pa_source_set_get_mute_callback(pa_source *s, pa_source_cb_t cb) {
417 pa_assert(s);
418
419 s->get_mute = cb;
420 }
421
422 void pa_source_set_set_mute_callback(pa_source *s, pa_source_cb_t cb) {
423 pa_assert(s);
424
425 s->set_mute = cb;
426
427 if (cb)
428 s->flags |= PA_SOURCE_HW_MUTE_CTRL;
429 else
430 s->flags &= ~PA_SOURCE_HW_MUTE_CTRL;
431 }
432
433 /* Called from main context */
434 void pa_source_put(pa_source *s) {
435 pa_source_assert_ref(s);
436 pa_assert_ctl_context();
437
438 pa_assert(s->state == PA_SOURCE_INIT);
439 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || s->output_from_master);
440
441 /* The following fields must be initialized properly when calling _put() */
442 pa_assert(s->asyncmsgq);
443 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
444
445 /* Generally, flags should be initialized via pa_source_new(). As a
446 * special exception we allow some volume related flags to be set
447 * between _new() and _put() by the callback setter functions above.
448 *
449 * Thus we implement a couple safeguards here which ensure the above
450 * setters were used (or at least the implementor made manual changes
451 * in a compatible way).
452 *
453 * Note: All of these flags set here can change over the life time
454 * of the source. */
455 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
456 pa_assert(!(s->flags & PA_SOURCE_SYNC_VOLUME) || s->write_volume);
457 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
458
459 /* XXX: Currently decibel volume is disabled for all sources that use volume
460 * sharing. When the master source supports decibel volume, it would be good
461 * to have the flag also in the filter source, but currently we don't do that
462 * so that the flags of the filter source never change when it's moved from
463 * a master source to another. One solution for this problem would be to
464 * remove user-visible volume altogether from filter sources when volume
465 * sharing is used, but the current approach was easier to implement... */
466 /* We always support decibel volumes in software, otherwise we leave it to
467 * the source implementor to set this flag as needed.
468 *
469 * Note: This flag can also change over the life time of the source. */
470 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
471 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
472
473 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME) && s->core->flat_volumes)
474 s->flags |= PA_SOURCE_FLAT_VOLUME;
475
476 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
477 pa_source *root_source = s->output_from_master->source;
478
479 while (root_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
480 root_source = root_source->output_from_master->source;
481
482 s->reference_volume = root_source->reference_volume;
483 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
484
485 s->real_volume = root_source->real_volume;
486 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
487 } else
488 /* We assume that if the sink implementor changed the default
489 * volume he did so in real_volume, because that is the usual
490 * place where he is supposed to place his changes. */
491 s->reference_volume = s->real_volume;
492
493 s->thread_info.soft_volume = s->soft_volume;
494 s->thread_info.soft_muted = s->muted;
495 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
496
497 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
498 || (s->base_volume == PA_VOLUME_NORM
499 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
500 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
501 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
502
503 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE) == 0);
504
505 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
506 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
507 }
508
509 /* Called from main context */
510 void pa_source_unlink(pa_source *s) {
511 pa_bool_t linked;
512 pa_source_output *o, *j = NULL;
513
514 pa_assert(s);
515 pa_assert_ctl_context();
516
517 /* See pa_sink_unlink() for a couple of comments how this function
518 * works. */
519
520 linked = PA_SOURCE_IS_LINKED(s->state);
521
522 if (linked)
523 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
524
525 if (s->state != PA_SOURCE_UNLINKED)
526 pa_namereg_unregister(s->core, s->name);
527 pa_idxset_remove_by_data(s->core->sources, s, NULL);
528
529 if (s->card)
530 pa_idxset_remove_by_data(s->card->sources, s, NULL);
531
532 while ((o = pa_idxset_first(s->outputs, NULL))) {
533 pa_assert(o != j);
534 pa_source_output_kill(o);
535 j = o;
536 }
537
538 if (linked)
539 source_set_state(s, PA_SOURCE_UNLINKED);
540 else
541 s->state = PA_SOURCE_UNLINKED;
542
543 reset_callbacks(s);
544
545 if (linked) {
546 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
547 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
548 }
549 }
550
551 /* Called from main context */
552 static void source_free(pa_object *o) {
553 pa_source_output *so;
554 pa_source *s = PA_SOURCE(o);
555
556 pa_assert(s);
557 pa_assert_ctl_context();
558 pa_assert(pa_source_refcnt(s) == 0);
559
560 if (PA_SOURCE_IS_LINKED(s->state))
561 pa_source_unlink(s);
562
563 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
564
565 pa_idxset_free(s->outputs, NULL, NULL);
566
567 while ((so = pa_hashmap_steal_first(s->thread_info.outputs)))
568 pa_source_output_unref(so);
569
570 pa_hashmap_free(s->thread_info.outputs, NULL, NULL);
571
572 if (s->silence.memblock)
573 pa_memblock_unref(s->silence.memblock);
574
575 pa_xfree(s->name);
576 pa_xfree(s->driver);
577
578 if (s->proplist)
579 pa_proplist_free(s->proplist);
580
581 if (s->ports) {
582 pa_device_port *p;
583
584 while ((p = pa_hashmap_steal_first(s->ports)))
585 pa_device_port_free(p);
586
587 pa_hashmap_free(s->ports, NULL, NULL);
588 }
589
590 pa_xfree(s);
591 }
592
593 /* Called from main context, and not while the IO thread is active, please */
594 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
595 pa_source_assert_ref(s);
596 pa_assert_ctl_context();
597
598 s->asyncmsgq = q;
599 }
600
601 /* Called from main context, and not while the IO thread is active, please */
602 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
603 pa_source_assert_ref(s);
604 pa_assert_ctl_context();
605
606 if (mask == 0)
607 return;
608
609 /* For now, allow only a minimal set of flags to be changed. */
610 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
611
612 s->flags = (s->flags & ~mask) | (value & mask);
613 }
614
615 /* Called from IO context, or before _put() from main context */
616 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
617 pa_source_assert_ref(s);
618 pa_source_assert_io_context(s);
619
620 s->thread_info.rtpoll = p;
621 }
622
623 /* Called from main context */
624 int pa_source_update_status(pa_source*s) {
625 pa_source_assert_ref(s);
626 pa_assert_ctl_context();
627 pa_assert(PA_SOURCE_IS_LINKED(s->state));
628
629 if (s->state == PA_SOURCE_SUSPENDED)
630 return 0;
631
632 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
633 }
634
635 /* Called from main context */
636 int pa_source_suspend(pa_source *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
637 pa_source_assert_ref(s);
638 pa_assert_ctl_context();
639 pa_assert(PA_SOURCE_IS_LINKED(s->state));
640 pa_assert(cause != 0);
641
642 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
643 return -PA_ERR_NOTSUPPORTED;
644
645 if (suspend)
646 s->suspend_cause |= cause;
647 else
648 s->suspend_cause &= ~cause;
649
650 if ((pa_source_get_state(s) == PA_SOURCE_SUSPENDED) == !!s->suspend_cause)
651 return 0;
652
653 pa_log_debug("Suspend cause of source %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
654
655 if (s->suspend_cause)
656 return source_set_state(s, PA_SOURCE_SUSPENDED);
657 else
658 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
659 }
660
661 /* Called from main context */
662 int pa_source_sync_suspend(pa_source *s) {
663 pa_sink_state_t state;
664
665 pa_source_assert_ref(s);
666 pa_assert_ctl_context();
667 pa_assert(PA_SOURCE_IS_LINKED(s->state));
668 pa_assert(s->monitor_of);
669
670 state = pa_sink_get_state(s->monitor_of);
671
672 if (state == PA_SINK_SUSPENDED)
673 return source_set_state(s, PA_SOURCE_SUSPENDED);
674
675 pa_assert(PA_SINK_IS_OPENED(state));
676
677 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
678 }
679
680 /* Called from main context */
681 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
682 pa_source_output *o, *n;
683 uint32_t idx;
684
685 pa_source_assert_ref(s);
686 pa_assert_ctl_context();
687 pa_assert(PA_SOURCE_IS_LINKED(s->state));
688
689 if (!q)
690 q = pa_queue_new();
691
692 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
693 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
694
695 pa_source_output_ref(o);
696
697 if (pa_source_output_start_move(o) >= 0)
698 pa_queue_push(q, o);
699 else
700 pa_source_output_unref(o);
701 }
702
703 return q;
704 }
705
706 /* Called from main context */
707 void pa_source_move_all_finish(pa_source *s, pa_queue *q, pa_bool_t save) {
708 pa_source_output *o;
709
710 pa_source_assert_ref(s);
711 pa_assert_ctl_context();
712 pa_assert(PA_SOURCE_IS_LINKED(s->state));
713 pa_assert(q);
714
715 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
716 if (pa_source_output_finish_move(o, s, save) < 0)
717 pa_source_output_fail_move(o);
718
719 pa_source_output_unref(o);
720 }
721
722 pa_queue_free(q, NULL, NULL);
723 }
724
725 /* Called from main context */
726 void pa_source_move_all_fail(pa_queue *q) {
727 pa_source_output *o;
728
729 pa_assert_ctl_context();
730 pa_assert(q);
731
732 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
733 pa_source_output_fail_move(o);
734 pa_source_output_unref(o);
735 }
736
737 pa_queue_free(q, NULL, NULL);
738 }
739
740 /* Called from IO thread context */
741 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
742 pa_source_output *o;
743 void *state = NULL;
744
745 pa_source_assert_ref(s);
746 pa_source_assert_io_context(s);
747 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
748
749 if (nbytes <= 0)
750 return;
751
752 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
753 return;
754
755 pa_log_debug("Processing rewind...");
756
757 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
758 pa_source_output_assert_ref(o);
759 pa_source_output_process_rewind(o, nbytes);
760 }
761 }
762
763 /* Called from IO thread context */
764 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
765 pa_source_output *o;
766 void *state = NULL;
767
768 pa_source_assert_ref(s);
769 pa_source_assert_io_context(s);
770 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
771 pa_assert(chunk);
772
773 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
774 return;
775
776 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
777 pa_memchunk vchunk = *chunk;
778
779 pa_memblock_ref(vchunk.memblock);
780 pa_memchunk_make_writable(&vchunk, 0);
781
782 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
783 pa_silence_memchunk(&vchunk, &s->sample_spec);
784 else
785 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
786
787 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
788 pa_source_output_assert_ref(o);
789
790 if (!o->thread_info.direct_on_input)
791 pa_source_output_push(o, &vchunk);
792 }
793
794 pa_memblock_unref(vchunk.memblock);
795 } else {
796
797 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
798 pa_source_output_assert_ref(o);
799
800 if (!o->thread_info.direct_on_input)
801 pa_source_output_push(o, chunk);
802 }
803 }
804 }
805
806 /* Called from IO thread context */
807 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
808 pa_source_assert_ref(s);
809 pa_source_assert_io_context(s);
810 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
811 pa_source_output_assert_ref(o);
812 pa_assert(o->thread_info.direct_on_input);
813 pa_assert(chunk);
814
815 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
816 return;
817
818 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
819 pa_memchunk vchunk = *chunk;
820
821 pa_memblock_ref(vchunk.memblock);
822 pa_memchunk_make_writable(&vchunk, 0);
823
824 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
825 pa_silence_memchunk(&vchunk, &s->sample_spec);
826 else
827 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
828
829 pa_source_output_push(o, &vchunk);
830
831 pa_memblock_unref(vchunk.memblock);
832 } else
833 pa_source_output_push(o, chunk);
834 }
835
836 /* Called from main thread */
837 pa_usec_t pa_source_get_latency(pa_source *s) {
838 pa_usec_t usec;
839
840 pa_source_assert_ref(s);
841 pa_assert_ctl_context();
842 pa_assert(PA_SOURCE_IS_LINKED(s->state));
843
844 if (s->state == PA_SOURCE_SUSPENDED)
845 return 0;
846
847 if (!(s->flags & PA_SOURCE_LATENCY))
848 return 0;
849
850 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
851
852 return usec;
853 }
854
855 /* Called from IO thread */
856 pa_usec_t pa_source_get_latency_within_thread(pa_source *s) {
857 pa_usec_t usec = 0;
858 pa_msgobject *o;
859
860 pa_source_assert_ref(s);
861 pa_source_assert_io_context(s);
862 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
863
864 /* The returned value is supposed to be in the time domain of the sound card! */
865
866 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
867 return 0;
868
869 if (!(s->flags & PA_SOURCE_LATENCY))
870 return 0;
871
872 o = PA_MSGOBJECT(s);
873
874 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
875
876 if (o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
877 return -1;
878
879 return usec;
880 }
881
882 /* Called from the main thread (and also from the IO thread while the main
883 * thread is waiting).
884 *
885 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
886 * set. Instead, flat volume mode is detected by checking whether the root source
887 * has the flag set. */
888 pa_bool_t pa_source_flat_volume_enabled(pa_source *s) {
889 pa_source_assert_ref(s);
890
891 while (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
892 s = s->output_from_master->source;
893
894 return (s->flags & PA_SOURCE_FLAT_VOLUME);
895 }
896
897 /* Called from main context */
898 pa_bool_t pa_source_is_passthrough(pa_source *s) {
899
900 pa_source_assert_ref(s);
901
902 /* NB Currently only monitor sources support passthrough mode */
903 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
904 }
905
906 /* Called from main context. */
907 static void compute_reference_ratio(pa_source_output *o) {
908 unsigned c = 0;
909 pa_cvolume remapped;
910
911 pa_assert(o);
912 pa_assert(pa_source_flat_volume_enabled(o->source));
913
914 /*
915 * Calculates the reference ratio from the source's reference
916 * volume. This basically calculates:
917 *
918 * o->reference_ratio = o->volume / o->source->reference_volume
919 */
920
921 remapped = o->source->reference_volume;
922 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
923
924 o->reference_ratio.channels = o->sample_spec.channels;
925
926 for (c = 0; c < o->sample_spec.channels; c++) {
927
928 /* We don't update when the source volume is 0 anyway */
929 if (remapped.values[c] <= PA_VOLUME_MUTED)
930 continue;
931
932 /* Don't update the reference ratio unless necessary */
933 if (pa_sw_volume_multiply(
934 o->reference_ratio.values[c],
935 remapped.values[c]) == o->volume.values[c])
936 continue;
937
938 o->reference_ratio.values[c] = pa_sw_volume_divide(
939 o->volume.values[c],
940 remapped.values[c]);
941 }
942 }
943
944 /* Called from main context. Only called for the root source in volume sharing
945 * cases, except for internal recursive calls. */
946 static void compute_reference_ratios(pa_source *s) {
947 uint32_t idx;
948 pa_source_output *o;
949
950 pa_source_assert_ref(s);
951 pa_assert_ctl_context();
952 pa_assert(PA_SOURCE_IS_LINKED(s->state));
953 pa_assert(pa_source_flat_volume_enabled(s));
954
955 PA_IDXSET_FOREACH(o, s->outputs, idx) {
956 compute_reference_ratio(o);
957
958 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
959 compute_reference_ratios(o->destination_source);
960 }
961 }
962
963 /* Called from main context. Only called for the root source in volume sharing
964 * cases, except for internal recursive calls. */
965 static void compute_real_ratios(pa_source *s) {
966 pa_source_output *o;
967 uint32_t idx;
968
969 pa_source_assert_ref(s);
970 pa_assert_ctl_context();
971 pa_assert(PA_SOURCE_IS_LINKED(s->state));
972 pa_assert(pa_source_flat_volume_enabled(s));
973
974 PA_IDXSET_FOREACH(o, s->outputs, idx) {
975 unsigned c;
976 pa_cvolume remapped;
977
978 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
979 /* The origin source uses volume sharing, so this input's real ratio
980 * is handled as a special case - the real ratio must be 0 dB, and
981 * as a result i->soft_volume must equal i->volume_factor. */
982 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
983 o->soft_volume = o->volume_factor;
984
985 compute_real_ratios(o->destination_source);
986
987 continue;
988 }
989
990 /*
991 * This basically calculates:
992 *
993 * i->real_ratio := i->volume / s->real_volume
994 * i->soft_volume := i->real_ratio * i->volume_factor
995 */
996
997 remapped = s->real_volume;
998 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
999
1000 o->real_ratio.channels = o->sample_spec.channels;
1001 o->soft_volume.channels = o->sample_spec.channels;
1002
1003 for (c = 0; c < o->sample_spec.channels; c++) {
1004
1005 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1006 /* We leave o->real_ratio untouched */
1007 o->soft_volume.values[c] = PA_VOLUME_MUTED;
1008 continue;
1009 }
1010
1011 /* Don't lose accuracy unless necessary */
1012 if (pa_sw_volume_multiply(
1013 o->real_ratio.values[c],
1014 remapped.values[c]) != o->volume.values[c])
1015
1016 o->real_ratio.values[c] = pa_sw_volume_divide(
1017 o->volume.values[c],
1018 remapped.values[c]);
1019
1020 o->soft_volume.values[c] = pa_sw_volume_multiply(
1021 o->real_ratio.values[c],
1022 o->volume_factor.values[c]);
1023 }
1024
1025 /* We don't copy the soft_volume to the thread_info data
1026 * here. That must be done by the caller */
1027 }
1028 }
1029
1030 static pa_cvolume *cvolume_remap_minimal_impact(
1031 pa_cvolume *v,
1032 const pa_cvolume *template,
1033 const pa_channel_map *from,
1034 const pa_channel_map *to) {
1035
1036 pa_cvolume t;
1037
1038 pa_assert(v);
1039 pa_assert(template);
1040 pa_assert(from);
1041 pa_assert(to);
1042 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1043 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1044
1045 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1046 * mapping from source output to source volumes:
1047 *
1048 * If template is a possible remapping from v it is used instead
1049 * of remapping anew.
1050 *
1051 * If the channel maps don't match we set an all-channel volume on
1052 * the source to ensure that changing a volume on one stream has no
1053 * effect that cannot be compensated for in another stream that
1054 * does not have the same channel map as the source. */
1055
1056 if (pa_channel_map_equal(from, to))
1057 return v;
1058
1059 t = *template;
1060 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1061 *v = *template;
1062 return v;
1063 }
1064
1065 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1066 return v;
1067 }
1068
1069 /* Called from main thread. Only called for the root source in volume sharing
1070 * cases, except for internal recursive calls. */
1071 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1072 pa_source_output *o;
1073 uint32_t idx;
1074
1075 pa_source_assert_ref(s);
1076 pa_assert(max_volume);
1077 pa_assert(channel_map);
1078 pa_assert(pa_source_flat_volume_enabled(s));
1079
1080 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1081 pa_cvolume remapped;
1082
1083 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1084 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1085
1086 /* Ignore this output. The origin source uses volume sharing, so this
1087 * output's volume will be set to be equal to the root source's real
1088 * volume. Obviously this outputs's current volume must not then
1089 * affect what the root source's real volume will be. */
1090 continue;
1091 }
1092
1093 remapped = o->volume;
1094 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1095 pa_cvolume_merge(max_volume, max_volume, &remapped);
1096 }
1097 }
1098
1099 /* Called from main thread. Only called for the root source in volume sharing
1100 * cases, except for internal recursive calls. */
1101 static pa_bool_t has_outputs(pa_source *s) {
1102 pa_source_output *o;
1103 uint32_t idx;
1104
1105 pa_source_assert_ref(s);
1106
1107 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1108 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1109 return TRUE;
1110 }
1111
1112 return FALSE;
1113 }
1114
1115 /* Called from main thread. Only called for the root source in volume sharing
1116 * cases, except for internal recursive calls. */
1117 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1118 pa_source_output *o;
1119 uint32_t idx;
1120
1121 pa_source_assert_ref(s);
1122 pa_assert(new_volume);
1123 pa_assert(channel_map);
1124
1125 s->real_volume = *new_volume;
1126 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1127
1128 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1129 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1130 if (pa_source_flat_volume_enabled(s)) {
1131 pa_cvolume old_volume = o->volume;
1132
1133 /* Follow the root source's real volume. */
1134 o->volume = *new_volume;
1135 pa_cvolume_remap(&o->volume, channel_map, &o->channel_map);
1136 compute_reference_ratio(o);
1137
1138 /* The volume changed, let's tell people so */
1139 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1140 if (o->volume_changed)
1141 o->volume_changed(o);
1142
1143 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1144 }
1145 }
1146
1147 update_real_volume(o->destination_source, new_volume, channel_map);
1148 }
1149 }
1150 }
1151
1152 /* Called from main thread. Only called for the root source in shared volume
1153 * cases. */
1154 static void compute_real_volume(pa_source *s) {
1155 pa_source_assert_ref(s);
1156 pa_assert_ctl_context();
1157 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1158 pa_assert(pa_source_flat_volume_enabled(s));
1159 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1160
1161 /* This determines the maximum volume of all streams and sets
1162 * s->real_volume accordingly. */
1163
1164 if (!has_outputs(s)) {
1165 /* In the special case that we have no source outputs we leave the
1166 * volume unmodified. */
1167 update_real_volume(s, &s->reference_volume, &s->channel_map);
1168 return;
1169 }
1170
1171 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1172
1173 /* First let's determine the new maximum volume of all outputs
1174 * connected to this source */
1175 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1176 update_real_volume(s, &s->real_volume, &s->channel_map);
1177
1178 /* Then, let's update the real ratios/soft volumes of all outputs
1179 * connected to this source */
1180 compute_real_ratios(s);
1181 }
1182
1183 /* Called from main thread. Only called for the root source in shared volume
1184 * cases, except for internal recursive calls. */
1185 static void propagate_reference_volume(pa_source *s) {
1186 pa_source_output *o;
1187 uint32_t idx;
1188
1189 pa_source_assert_ref(s);
1190 pa_assert_ctl_context();
1191 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1192 pa_assert(pa_source_flat_volume_enabled(s));
1193
1194 /* This is called whenever the source volume changes that is not
1195 * caused by a source output volume change. We need to fix up the
1196 * source output volumes accordingly */
1197
1198 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1199 pa_cvolume old_volume;
1200
1201 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1202 propagate_reference_volume(o->destination_source);
1203
1204 /* Since the origin source uses volume sharing, this output's volume
1205 * needs to be updated to match the root source's real volume, but
1206 * that will be done later in update_shared_real_volume(). */
1207 continue;
1208 }
1209
1210 old_volume = o->volume;
1211
1212 /* This basically calculates:
1213 *
1214 * o->volume := o->reference_volume * o->reference_ratio */
1215
1216 o->volume = s->reference_volume;
1217 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1218 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1219
1220 /* The volume changed, let's tell people so */
1221 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1222
1223 if (o->volume_changed)
1224 o->volume_changed(o);
1225
1226 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1227 }
1228 }
1229 }
1230
1231 /* Called from main thread. Only called for the root source in volume sharing
1232 * cases, except for internal recursive calls. The return value indicates
1233 * whether any reference volume actually changed. */
1234 static pa_bool_t update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1235 pa_cvolume volume;
1236 pa_bool_t reference_volume_changed;
1237 pa_source_output *o;
1238 uint32_t idx;
1239
1240 pa_source_assert_ref(s);
1241 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1242 pa_assert(v);
1243 pa_assert(channel_map);
1244 pa_assert(pa_cvolume_valid(v));
1245
1246 volume = *v;
1247 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1248
1249 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1250 s->reference_volume = volume;
1251
1252 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1253
1254 if (reference_volume_changed)
1255 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1256 else if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1257 /* If the root source's volume doesn't change, then there can't be any
1258 * changes in the other source in the source tree either.
1259 *
1260 * It's probably theoretically possible that even if the root source's
1261 * volume changes slightly, some filter source doesn't change its volume
1262 * due to rounding errors. If that happens, we still want to propagate
1263 * the changed root source volume to the sources connected to the
1264 * intermediate source that didn't change its volume. This theoretical
1265 * possiblity is the reason why we have that !(s->flags &
1266 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1267 * notice even if we returned here FALSE always if
1268 * reference_volume_changed is FALSE. */
1269 return FALSE;
1270
1271 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1272 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1273 update_reference_volume(o->destination_source, v, channel_map, FALSE);
1274 }
1275
1276 return TRUE;
1277 }
1278
1279 /* Called from main thread */
1280 void pa_source_set_volume(
1281 pa_source *s,
1282 const pa_cvolume *volume,
1283 pa_bool_t send_msg,
1284 pa_bool_t save) {
1285
1286 pa_cvolume new_reference_volume;
1287 pa_source *root_source = s;
1288
1289 pa_source_assert_ref(s);
1290 pa_assert_ctl_context();
1291 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1292 pa_assert(!volume || pa_cvolume_valid(volume));
1293 pa_assert(volume || pa_source_flat_volume_enabled(s));
1294 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1295
1296 /* make sure we don't change the volume when a PASSTHROUGH output is connected */
1297 if (pa_source_is_passthrough(s)) {
1298 /* FIXME: Need to notify client that volume control is disabled */
1299 pa_log_warn("Cannot change volume, Source is monitor of a PASSTHROUGH sink");
1300 return;
1301 }
1302
1303 /* In case of volume sharing, the volume is set for the root source first,
1304 * from which it's then propagated to the sharing sources. */
1305 while (root_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1306 root_source = root_source->output_from_master->source;
1307
1308 /* As a special exception we accept mono volumes on all sources --
1309 * even on those with more complex channel maps */
1310
1311 if (volume) {
1312 if (pa_cvolume_compatible(volume, &s->sample_spec))
1313 new_reference_volume = *volume;
1314 else {
1315 new_reference_volume = s->reference_volume;
1316 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1317 }
1318
1319 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1320 }
1321
1322 /* If volume is NULL we synchronize the source's real and reference
1323 * volumes with the stream volumes. If it is not NULL we update
1324 * the reference_volume with it. */
1325
1326 if (volume) {
1327 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1328 if (pa_source_flat_volume_enabled(root_source)) {
1329 /* OK, propagate this volume change back to the outputs */
1330 propagate_reference_volume(root_source);
1331
1332 /* And now recalculate the real volume */
1333 compute_real_volume(root_source);
1334 } else
1335 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1336 }
1337
1338 } else {
1339 pa_assert(pa_source_flat_volume_enabled(root_source));
1340
1341 /* Ok, let's determine the new real volume */
1342 compute_real_volume(root_source);
1343
1344 /* Let's 'push' the reference volume if necessary */
1345 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_source->real_volume);
1346 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1347
1348 /* Now that the reference volume is updated, we can update the streams'
1349 * reference ratios. */
1350 compute_reference_ratios(root_source);
1351 }
1352
1353 if (root_source->set_volume) {
1354 /* If we have a function set_volume(), then we do not apply a
1355 * soft volume by default. However, set_volume() is free to
1356 * apply one to root_source->soft_volume */
1357
1358 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1359 if (!(root_source->flags & PA_SOURCE_SYNC_VOLUME))
1360 root_source->set_volume(root_source);
1361
1362 } else
1363 /* If we have no function set_volume(), then the soft volume
1364 * becomes the real volume */
1365 root_source->soft_volume = root_source->real_volume;
1366
1367 /* This tells the source that soft volume and/or real volume changed */
1368 if (send_msg)
1369 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1370 }
1371
1372 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1373 * Only to be called by source implementor */
1374 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1375
1376 pa_source_assert_ref(s);
1377 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1378
1379 if (s->flags & PA_SOURCE_SYNC_VOLUME)
1380 pa_source_assert_io_context(s);
1381 else
1382 pa_assert_ctl_context();
1383
1384 if (!volume)
1385 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1386 else
1387 s->soft_volume = *volume;
1388
1389 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_SYNC_VOLUME))
1390 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1391 else
1392 s->thread_info.soft_volume = s->soft_volume;
1393 }
1394
1395 /* Called from the main thread. Only called for the root source in volume sharing
1396 * cases, except for internal recursive calls. */
1397 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1398 pa_source_output *o;
1399 uint32_t idx;
1400
1401 pa_source_assert_ref(s);
1402 pa_assert(old_real_volume);
1403 pa_assert_ctl_context();
1404 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1405
1406 /* This is called when the hardware's real volume changes due to
1407 * some external event. We copy the real volume into our
1408 * reference volume and then rebuild the stream volumes based on
1409 * i->real_ratio which should stay fixed. */
1410
1411 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1412 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1413 return;
1414
1415 /* 1. Make the real volume the reference volume */
1416 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
1417 }
1418
1419 if (pa_source_flat_volume_enabled(s)) {
1420
1421 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1422 pa_cvolume old_volume = o->volume;
1423
1424 /* 2. Since the source's reference and real volumes are equal
1425 * now our ratios should be too. */
1426 o->reference_ratio = o->real_ratio;
1427
1428 /* 3. Recalculate the new stream reference volume based on the
1429 * reference ratio and the sink's reference volume.
1430 *
1431 * This basically calculates:
1432 *
1433 * o->volume = s->reference_volume * o->reference_ratio
1434 *
1435 * This is identical to propagate_reference_volume() */
1436 o->volume = s->reference_volume;
1437 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1438 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1439
1440 /* Notify if something changed */
1441 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1442
1443 if (o->volume_changed)
1444 o->volume_changed(o);
1445
1446 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1447 }
1448
1449 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1450 propagate_real_volume(o->destination_source, old_real_volume);
1451 }
1452 }
1453
1454 /* Something got changed in the hardware. It probably makes sense
1455 * to save changed hw settings given that hw volume changes not
1456 * triggered by PA are almost certainly done by the user. */
1457 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1458 s->save_volume = TRUE;
1459 }
1460
1461 /* Called from io thread */
1462 void pa_source_update_volume_and_mute(pa_source *s) {
1463 pa_assert(s);
1464 pa_source_assert_io_context(s);
1465
1466 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1467 }
1468
1469 /* Called from main thread */
1470 const pa_cvolume *pa_source_get_volume(pa_source *s, pa_bool_t force_refresh) {
1471 pa_source_assert_ref(s);
1472 pa_assert_ctl_context();
1473 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1474
1475 if (s->refresh_volume || force_refresh) {
1476 struct pa_cvolume old_real_volume;
1477
1478 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1479
1480 old_real_volume = s->real_volume;
1481
1482 if (!(s->flags & PA_SOURCE_SYNC_VOLUME) && s->get_volume)
1483 s->get_volume(s);
1484
1485 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1486
1487 update_real_volume(s, &s->real_volume, &s->channel_map);
1488 propagate_real_volume(s, &old_real_volume);
1489 }
1490
1491 return &s->reference_volume;
1492 }
1493
1494 /* Called from main thread. In volume sharing cases, only the root source may
1495 * call this. */
1496 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1497 pa_cvolume old_real_volume;
1498
1499 pa_source_assert_ref(s);
1500 pa_assert_ctl_context();
1501 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1502 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1503
1504 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1505
1506 old_real_volume = s->real_volume;
1507 update_real_volume(s, new_real_volume, &s->channel_map);
1508 propagate_real_volume(s, &old_real_volume);
1509 }
1510
1511 /* Called from main thread */
1512 void pa_source_set_mute(pa_source *s, pa_bool_t mute, pa_bool_t save) {
1513 pa_bool_t old_muted;
1514
1515 pa_source_assert_ref(s);
1516 pa_assert_ctl_context();
1517 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1518
1519 old_muted = s->muted;
1520 s->muted = mute;
1521 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1522
1523 if (!(s->flags & PA_SOURCE_SYNC_VOLUME) && s->set_mute)
1524 s->set_mute(s);
1525
1526 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1527
1528 if (old_muted != s->muted)
1529 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1530 }
1531
1532 /* Called from main thread */
1533 pa_bool_t pa_source_get_mute(pa_source *s, pa_bool_t force_refresh) {
1534
1535 pa_source_assert_ref(s);
1536 pa_assert_ctl_context();
1537 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1538
1539 if (s->refresh_muted || force_refresh) {
1540 pa_bool_t old_muted = s->muted;
1541
1542 if (!(s->flags & PA_SOURCE_SYNC_VOLUME) && s->get_mute)
1543 s->get_mute(s);
1544
1545 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1546
1547 if (old_muted != s->muted) {
1548 s->save_muted = TRUE;
1549
1550 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1551
1552 /* Make sure the soft mute status stays in sync */
1553 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1554 }
1555 }
1556
1557 return s->muted;
1558 }
1559
1560 /* Called from main thread */
1561 void pa_source_mute_changed(pa_source *s, pa_bool_t new_muted) {
1562 pa_source_assert_ref(s);
1563 pa_assert_ctl_context();
1564 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1565
1566 /* The source implementor may call this if the mute state changed to make sure everyone is notified */
1567
1568 if (s->muted == new_muted)
1569 return;
1570
1571 s->muted = new_muted;
1572 s->save_muted = TRUE;
1573
1574 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1575 }
1576
1577 /* Called from main thread */
1578 pa_bool_t pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1579 pa_source_assert_ref(s);
1580 pa_assert_ctl_context();
1581
1582 if (p)
1583 pa_proplist_update(s->proplist, mode, p);
1584
1585 if (PA_SOURCE_IS_LINKED(s->state)) {
1586 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1587 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1588 }
1589
1590 return TRUE;
1591 }
1592
1593 /* Called from main thread */
1594 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
1595 void pa_source_set_description(pa_source *s, const char *description) {
1596 const char *old;
1597 pa_source_assert_ref(s);
1598 pa_assert_ctl_context();
1599
1600 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1601 return;
1602
1603 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1604
1605 if (old && description && pa_streq(old, description))
1606 return;
1607
1608 if (description)
1609 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1610 else
1611 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1612
1613 if (PA_SOURCE_IS_LINKED(s->state)) {
1614 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1615 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1616 }
1617 }
1618
1619 /* Called from main thread */
1620 unsigned pa_source_linked_by(pa_source *s) {
1621 pa_source_assert_ref(s);
1622 pa_assert_ctl_context();
1623 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1624
1625 return pa_idxset_size(s->outputs);
1626 }
1627
1628 /* Called from main thread */
1629 unsigned pa_source_used_by(pa_source *s) {
1630 unsigned ret;
1631
1632 pa_source_assert_ref(s);
1633 pa_assert_ctl_context();
1634 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1635
1636 ret = pa_idxset_size(s->outputs);
1637 pa_assert(ret >= s->n_corked);
1638
1639 return ret - s->n_corked;
1640 }
1641
1642 /* Called from main thread */
1643 unsigned pa_source_check_suspend(pa_source *s) {
1644 unsigned ret;
1645 pa_source_output *o;
1646 uint32_t idx;
1647
1648 pa_source_assert_ref(s);
1649 pa_assert_ctl_context();
1650
1651 if (!PA_SOURCE_IS_LINKED(s->state))
1652 return 0;
1653
1654 ret = 0;
1655
1656 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1657 pa_source_output_state_t st;
1658
1659 st = pa_source_output_get_state(o);
1660
1661 /* We do not assert here. It is perfectly valid for a source output to
1662 * be in the INIT state (i.e. created, marked done but not yet put)
1663 * and we should not care if it's unlinked as it won't contribute
1664 * towarards our busy status.
1665 */
1666 if (!PA_SOURCE_OUTPUT_IS_LINKED(st))
1667 continue;
1668
1669 if (st == PA_SOURCE_OUTPUT_CORKED)
1670 continue;
1671
1672 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
1673 continue;
1674
1675 ret ++;
1676 }
1677
1678 return ret;
1679 }
1680
1681 /* Called from the IO thread */
1682 static void sync_output_volumes_within_thread(pa_source *s) {
1683 pa_source_output *o;
1684 void *state = NULL;
1685
1686 pa_source_assert_ref(s);
1687 pa_source_assert_io_context(s);
1688
1689 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1690 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
1691 continue;
1692
1693 o->thread_info.soft_volume = o->soft_volume;
1694 //pa_source_output_request_rewind(o, 0, TRUE, FALSE, FALSE);
1695 }
1696 }
1697
1698 /* Called from the IO thread. Only called for the root source in volume sharing
1699 * cases, except for internal recursive calls. */
1700 static void set_shared_volume_within_thread(pa_source *s) {
1701 pa_source_output *o;
1702 void *state = NULL;
1703
1704 pa_source_assert_ref(s);
1705
1706 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
1707
1708 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1709 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1710 set_shared_volume_within_thread(o->destination_source);
1711 }
1712 }
1713
1714 /* Called from IO thread, except when it is not */
1715 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1716 pa_source *s = PA_SOURCE(object);
1717 pa_source_assert_ref(s);
1718
1719 switch ((pa_source_message_t) code) {
1720
1721 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
1722 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
1723
1724 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
1725
1726 if (o->direct_on_input) {
1727 o->thread_info.direct_on_input = o->direct_on_input;
1728 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
1729 }
1730
1731 pa_assert(!o->thread_info.attached);
1732 o->thread_info.attached = TRUE;
1733
1734 if (o->attach)
1735 o->attach(o);
1736
1737 pa_source_output_set_state_within_thread(o, o->state);
1738
1739 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
1740 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
1741
1742 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
1743
1744 /* We don't just invalidate the requested latency here,
1745 * because if we are in a move we might need to fix up the
1746 * requested latency. */
1747 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
1748
1749 /* In flat volume mode we need to update the volume as
1750 * well */
1751 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
1752 }
1753
1754 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
1755 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
1756
1757 pa_source_output_set_state_within_thread(o, o->state);
1758
1759 if (o->detach)
1760 o->detach(o);
1761
1762 pa_assert(o->thread_info.attached);
1763 o->thread_info.attached = FALSE;
1764
1765 if (o->thread_info.direct_on_input) {
1766 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
1767 o->thread_info.direct_on_input = NULL;
1768 }
1769
1770 if (pa_hashmap_remove(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index)))
1771 pa_source_output_unref(o);
1772
1773 pa_source_invalidate_requested_latency(s, TRUE);
1774
1775 /* In flat volume mode we need to update the volume as
1776 * well */
1777 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
1778 }
1779
1780 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
1781 pa_source *root_source = s;
1782
1783 while (root_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1784 root_source = root_source->output_from_master->source;
1785
1786 set_shared_volume_within_thread(root_source);
1787 return 0;
1788 }
1789
1790 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
1791
1792 if (s->flags & PA_SOURCE_SYNC_VOLUME) {
1793 s->set_volume(s);
1794 pa_source_volume_change_push(s);
1795 }
1796 /* Fall through ... */
1797
1798 case PA_SOURCE_MESSAGE_SET_VOLUME:
1799
1800 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1801 s->thread_info.soft_volume = s->soft_volume;
1802 }
1803
1804 /* Fall through ... */
1805
1806 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
1807 sync_output_volumes_within_thread(s);
1808 return 0;
1809
1810 case PA_SOURCE_MESSAGE_GET_VOLUME:
1811
1812 if ((s->flags & PA_SOURCE_SYNC_VOLUME) && s->get_volume) {
1813 s->get_volume(s);
1814 pa_source_volume_change_flush(s);
1815 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
1816 }
1817
1818 /* In case source implementor reset SW volume. */
1819 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1820 s->thread_info.soft_volume = s->soft_volume;
1821 }
1822
1823 return 0;
1824
1825 case PA_SOURCE_MESSAGE_SET_MUTE:
1826
1827 if (s->thread_info.soft_muted != s->muted) {
1828 s->thread_info.soft_muted = s->muted;
1829 }
1830
1831 if (s->flags & PA_SOURCE_SYNC_VOLUME && s->set_mute)
1832 s->set_mute(s);
1833
1834 return 0;
1835
1836 case PA_SOURCE_MESSAGE_GET_MUTE:
1837
1838 if (s->flags & PA_SOURCE_SYNC_VOLUME && s->get_mute)
1839 s->get_mute(s);
1840
1841 return 0;
1842
1843 case PA_SOURCE_MESSAGE_SET_STATE: {
1844
1845 pa_bool_t suspend_change =
1846 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1847 (PA_SOURCE_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SOURCE_SUSPENDED);
1848
1849 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1850
1851 if (suspend_change) {
1852 pa_source_output *o;
1853 void *state = NULL;
1854
1855 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
1856 if (o->suspend_within_thread)
1857 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
1858 }
1859
1860 return 0;
1861 }
1862
1863 case PA_SOURCE_MESSAGE_DETACH:
1864
1865 /* Detach all streams */
1866 pa_source_detach_within_thread(s);
1867 return 0;
1868
1869 case PA_SOURCE_MESSAGE_ATTACH:
1870
1871 /* Reattach all streams */
1872 pa_source_attach_within_thread(s);
1873 return 0;
1874
1875 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
1876
1877 pa_usec_t *usec = userdata;
1878 *usec = pa_source_get_requested_latency_within_thread(s);
1879
1880 /* Yes, that's right, the IO thread will see -1 when no
1881 * explicit requested latency is configured, the main
1882 * thread will see max_latency */
1883 if (*usec == (pa_usec_t) -1)
1884 *usec = s->thread_info.max_latency;
1885
1886 return 0;
1887 }
1888
1889 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
1890 pa_usec_t *r = userdata;
1891
1892 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
1893
1894 return 0;
1895 }
1896
1897 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
1898 pa_usec_t *r = userdata;
1899
1900 r[0] = s->thread_info.min_latency;
1901 r[1] = s->thread_info.max_latency;
1902
1903 return 0;
1904 }
1905
1906 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
1907
1908 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
1909 return 0;
1910
1911 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
1912
1913 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
1914 return 0;
1915
1916 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
1917
1918 *((size_t*) userdata) = s->thread_info.max_rewind;
1919 return 0;
1920
1921 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
1922
1923 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
1924 return 0;
1925
1926 case PA_SOURCE_MESSAGE_GET_LATENCY:
1927
1928 if (s->monitor_of) {
1929 *((pa_usec_t*) userdata) = 0;
1930 return 0;
1931 }
1932
1933 /* Implementors need to overwrite this implementation! */
1934 return -1;
1935
1936 case PA_SOURCE_MESSAGE_SET_PORT:
1937
1938 pa_assert(userdata);
1939 if (s->set_port) {
1940 struct source_message_set_port *msg_data = userdata;
1941 msg_data->ret = s->set_port(s, msg_data->port);
1942 }
1943 return 0;
1944
1945 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
1946 /* This message is sent from IO-thread and handled in main thread. */
1947 pa_assert_ctl_context();
1948
1949 pa_source_get_volume(s, TRUE);
1950 pa_source_get_mute(s, TRUE);
1951 return 0;
1952
1953 case PA_SOURCE_MESSAGE_MAX:
1954 ;
1955 }
1956
1957 return -1;
1958 }
1959
1960 /* Called from main thread */
1961 int pa_source_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1962 pa_source *source;
1963 uint32_t idx;
1964 int ret = 0;
1965
1966 pa_core_assert_ref(c);
1967 pa_assert_ctl_context();
1968 pa_assert(cause != 0);
1969
1970 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
1971 int r;
1972
1973 if (source->monitor_of)
1974 continue;
1975
1976 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
1977 ret = r;
1978 }
1979
1980 return ret;
1981 }
1982
1983 /* Called from main thread */
1984 void pa_source_detach(pa_source *s) {
1985 pa_source_assert_ref(s);
1986 pa_assert_ctl_context();
1987 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1988
1989 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1990 }
1991
1992 /* Called from main thread */
1993 void pa_source_attach(pa_source *s) {
1994 pa_source_assert_ref(s);
1995 pa_assert_ctl_context();
1996 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1997
1998 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1999 }
2000
2001 /* Called from IO thread */
2002 void pa_source_detach_within_thread(pa_source *s) {
2003 pa_source_output *o;
2004 void *state = NULL;
2005
2006 pa_source_assert_ref(s);
2007 pa_source_assert_io_context(s);
2008 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2009
2010 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2011 if (o->detach)
2012 o->detach(o);
2013 }
2014
2015 /* Called from IO thread */
2016 void pa_source_attach_within_thread(pa_source *s) {
2017 pa_source_output *o;
2018 void *state = NULL;
2019
2020 pa_source_assert_ref(s);
2021 pa_source_assert_io_context(s);
2022 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2023
2024 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2025 if (o->attach)
2026 o->attach(o);
2027 }
2028
2029 /* Called from IO thread */
2030 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
2031 pa_usec_t result = (pa_usec_t) -1;
2032 pa_source_output *o;
2033 void *state = NULL;
2034
2035 pa_source_assert_ref(s);
2036 pa_source_assert_io_context(s);
2037
2038 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2039 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2040
2041 if (s->thread_info.requested_latency_valid)
2042 return s->thread_info.requested_latency;
2043
2044 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2045 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
2046 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
2047 result = o->thread_info.requested_source_latency;
2048
2049 if (result != (pa_usec_t) -1)
2050 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2051
2052 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2053 /* Only cache this if we are fully set up */
2054 s->thread_info.requested_latency = result;
2055 s->thread_info.requested_latency_valid = TRUE;
2056 }
2057
2058 return result;
2059 }
2060
2061 /* Called from main thread */
2062 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2063 pa_usec_t usec = 0;
2064
2065 pa_source_assert_ref(s);
2066 pa_assert_ctl_context();
2067 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2068
2069 if (s->state == PA_SOURCE_SUSPENDED)
2070 return 0;
2071
2072 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2073
2074 return usec;
2075 }
2076
2077 /* Called from IO thread */
2078 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2079 pa_source_output *o;
2080 void *state = NULL;
2081
2082 pa_source_assert_ref(s);
2083 pa_source_assert_io_context(s);
2084
2085 if (max_rewind == s->thread_info.max_rewind)
2086 return;
2087
2088 s->thread_info.max_rewind = max_rewind;
2089
2090 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2091 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2092 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2093 }
2094
2095 /* Called from main thread */
2096 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2097 pa_source_assert_ref(s);
2098 pa_assert_ctl_context();
2099
2100 if (PA_SOURCE_IS_LINKED(s->state))
2101 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2102 else
2103 pa_source_set_max_rewind_within_thread(s, max_rewind);
2104 }
2105
2106 /* Called from IO thread */
2107 void pa_source_invalidate_requested_latency(pa_source *s, pa_bool_t dynamic) {
2108 pa_source_output *o;
2109 void *state = NULL;
2110
2111 pa_source_assert_ref(s);
2112 pa_source_assert_io_context(s);
2113
2114 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2115 s->thread_info.requested_latency_valid = FALSE;
2116 else if (dynamic)
2117 return;
2118
2119 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2120
2121 if (s->update_requested_latency)
2122 s->update_requested_latency(s);
2123
2124 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2125 if (o->update_source_requested_latency)
2126 o->update_source_requested_latency(o);
2127 }
2128
2129 if (s->monitor_of)
2130 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2131 }
2132
2133 /* Called from main thread */
2134 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2135 pa_source_assert_ref(s);
2136 pa_assert_ctl_context();
2137
2138 /* min_latency == 0: no limit
2139 * min_latency anything else: specified limit
2140 *
2141 * Similar for max_latency */
2142
2143 if (min_latency < ABSOLUTE_MIN_LATENCY)
2144 min_latency = ABSOLUTE_MIN_LATENCY;
2145
2146 if (max_latency <= 0 ||
2147 max_latency > ABSOLUTE_MAX_LATENCY)
2148 max_latency = ABSOLUTE_MAX_LATENCY;
2149
2150 pa_assert(min_latency <= max_latency);
2151
2152 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2153 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2154 max_latency == ABSOLUTE_MAX_LATENCY) ||
2155 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2156
2157 if (PA_SOURCE_IS_LINKED(s->state)) {
2158 pa_usec_t r[2];
2159
2160 r[0] = min_latency;
2161 r[1] = max_latency;
2162
2163 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2164 } else
2165 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2166 }
2167
2168 /* Called from main thread */
2169 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2170 pa_source_assert_ref(s);
2171 pa_assert_ctl_context();
2172 pa_assert(min_latency);
2173 pa_assert(max_latency);
2174
2175 if (PA_SOURCE_IS_LINKED(s->state)) {
2176 pa_usec_t r[2] = { 0, 0 };
2177
2178 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2179
2180 *min_latency = r[0];
2181 *max_latency = r[1];
2182 } else {
2183 *min_latency = s->thread_info.min_latency;
2184 *max_latency = s->thread_info.max_latency;
2185 }
2186 }
2187
2188 /* Called from IO thread, and from main thread before pa_source_put() is called */
2189 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2190 pa_source_assert_ref(s);
2191 pa_source_assert_io_context(s);
2192
2193 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2194 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2195 pa_assert(min_latency <= max_latency);
2196
2197 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2198 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2199 max_latency == ABSOLUTE_MAX_LATENCY) ||
2200 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2201 s->monitor_of);
2202
2203 if (s->thread_info.min_latency == min_latency &&
2204 s->thread_info.max_latency == max_latency)
2205 return;
2206
2207 s->thread_info.min_latency = min_latency;
2208 s->thread_info.max_latency = max_latency;
2209
2210 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2211 pa_source_output *o;
2212 void *state = NULL;
2213
2214 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2215 if (o->update_source_latency_range)
2216 o->update_source_latency_range(o);
2217 }
2218
2219 pa_source_invalidate_requested_latency(s, FALSE);
2220 }
2221
2222 /* Called from main thread, before the source is put */
2223 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2224 pa_source_assert_ref(s);
2225 pa_assert_ctl_context();
2226
2227 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2228 pa_assert(latency == 0);
2229 return;
2230 }
2231
2232 if (latency < ABSOLUTE_MIN_LATENCY)
2233 latency = ABSOLUTE_MIN_LATENCY;
2234
2235 if (latency > ABSOLUTE_MAX_LATENCY)
2236 latency = ABSOLUTE_MAX_LATENCY;
2237
2238 if (PA_SOURCE_IS_LINKED(s->state))
2239 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2240 else
2241 s->thread_info.fixed_latency = latency;
2242 }
2243
2244 /* Called from main thread */
2245 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2246 pa_usec_t latency;
2247
2248 pa_source_assert_ref(s);
2249 pa_assert_ctl_context();
2250
2251 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2252 return 0;
2253
2254 if (PA_SOURCE_IS_LINKED(s->state))
2255 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2256 else
2257 latency = s->thread_info.fixed_latency;
2258
2259 return latency;
2260 }
2261
2262 /* Called from IO thread */
2263 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2264 pa_source_assert_ref(s);
2265 pa_source_assert_io_context(s);
2266
2267 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2268 pa_assert(latency == 0);
2269 return;
2270 }
2271
2272 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2273 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2274
2275 if (s->thread_info.fixed_latency == latency)
2276 return;
2277
2278 s->thread_info.fixed_latency = latency;
2279
2280 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2281 pa_source_output *o;
2282 void *state = NULL;
2283
2284 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2285 if (o->update_source_fixed_latency)
2286 o->update_source_fixed_latency(o);
2287 }
2288
2289 pa_source_invalidate_requested_latency(s, FALSE);
2290 }
2291
2292 /* Called from main thread */
2293 size_t pa_source_get_max_rewind(pa_source *s) {
2294 size_t r;
2295 pa_assert_ctl_context();
2296 pa_source_assert_ref(s);
2297
2298 if (!PA_SOURCE_IS_LINKED(s->state))
2299 return s->thread_info.max_rewind;
2300
2301 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2302
2303 return r;
2304 }
2305
2306 /* Called from main context */
2307 int pa_source_set_port(pa_source *s, const char *name, pa_bool_t save) {
2308 pa_device_port *port;
2309 int ret;
2310
2311 pa_source_assert_ref(s);
2312 pa_assert_ctl_context();
2313
2314 if (!s->set_port) {
2315 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2316 return -PA_ERR_NOTIMPLEMENTED;
2317 }
2318
2319 if (!s->ports)
2320 return -PA_ERR_NOENTITY;
2321
2322 if (!(port = pa_hashmap_get(s->ports, name)))
2323 return -PA_ERR_NOENTITY;
2324
2325 if (s->active_port == port) {
2326 s->save_port = s->save_port || save;
2327 return 0;
2328 }
2329
2330 if (s->flags & PA_SOURCE_SYNC_VOLUME) {
2331 struct source_message_set_port msg = { .port = port, .ret = 0 };
2332 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2333 ret = msg.ret;
2334 }
2335 else
2336 ret = s->set_port(s, port);
2337
2338 if (ret < 0)
2339 return -PA_ERR_NOENTITY;
2340
2341 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2342
2343 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2344
2345 s->active_port = port;
2346 s->save_port = save;
2347
2348 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2349
2350 return 0;
2351 }
2352
2353 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2354
2355 /* Called from the IO thread. */
2356 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2357 pa_source_volume_change *c;
2358 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2359 c = pa_xnew(pa_source_volume_change, 1);
2360
2361 PA_LLIST_INIT(pa_source_volume_change, c);
2362 c->at = 0;
2363 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2364 return c;
2365 }
2366
2367 /* Called from the IO thread. */
2368 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2369 pa_assert(c);
2370 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2371 pa_xfree(c);
2372 }
2373
2374 /* Called from the IO thread. */
2375 void pa_source_volume_change_push(pa_source *s) {
2376 pa_source_volume_change *c = NULL;
2377 pa_source_volume_change *nc = NULL;
2378 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2379
2380 const char *direction = NULL;
2381
2382 pa_assert(s);
2383 nc = pa_source_volume_change_new(s);
2384
2385 /* NOTE: There is already more different volumes in pa_source that I can remember.
2386 * Adding one more volume for HW would get us rid of this, but I am trying
2387 * to survive with the ones we already have. */
2388 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2389
2390 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2391 pa_log_debug("Volume not changing");
2392 pa_source_volume_change_free(nc);
2393 return;
2394 }
2395
2396 nc->at = pa_source_get_latency_within_thread(s);
2397 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2398
2399 if (s->thread_info.volume_changes_tail) {
2400 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2401 /* If volume is going up let's do it a bit late. If it is going
2402 * down let's do it a bit early. */
2403 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2404 if (nc->at + safety_margin > c->at) {
2405 nc->at += safety_margin;
2406 direction = "up";
2407 break;
2408 }
2409 }
2410 else if (nc->at - safety_margin > c->at) {
2411 nc->at -= safety_margin;
2412 direction = "down";
2413 break;
2414 }
2415 }
2416 }
2417
2418 if (c == NULL) {
2419 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2420 nc->at += safety_margin;
2421 direction = "up";
2422 } else {
2423 nc->at -= safety_margin;
2424 direction = "down";
2425 }
2426 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2427 }
2428 else {
2429 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2430 }
2431
2432 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2433
2434 /* We can ignore volume events that came earlier but should happen later than this. */
2435 PA_LLIST_FOREACH(c, nc->next) {
2436 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2437 pa_source_volume_change_free(c);
2438 }
2439 nc->next = NULL;
2440 s->thread_info.volume_changes_tail = nc;
2441 }
2442
2443 /* Called from the IO thread. */
2444 static void pa_source_volume_change_flush(pa_source *s) {
2445 pa_source_volume_change *c = s->thread_info.volume_changes;
2446 pa_assert(s);
2447 s->thread_info.volume_changes = NULL;
2448 s->thread_info.volume_changes_tail = NULL;
2449 while (c) {
2450 pa_source_volume_change *next = c->next;
2451 pa_source_volume_change_free(c);
2452 c = next;
2453 }
2454 }
2455
2456 /* Called from the IO thread. */
2457 pa_bool_t pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2458 pa_usec_t now = pa_rtclock_now();
2459 pa_bool_t ret = FALSE;
2460
2461 pa_assert(s);
2462 pa_assert(s->write_volume);
2463
2464 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2465 pa_source_volume_change *c = s->thread_info.volume_changes;
2466 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2467 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2468 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2469 ret = TRUE;
2470 s->thread_info.current_hw_volume = c->hw_volume;
2471 pa_source_volume_change_free(c);
2472 }
2473
2474 if (s->write_volume && ret)
2475 s->write_volume(s);
2476
2477 if (s->thread_info.volume_changes) {
2478 if (usec_to_next)
2479 *usec_to_next = s->thread_info.volume_changes->at - now;
2480 if (pa_log_ratelimit(PA_LOG_DEBUG))
2481 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2482 }
2483 else {
2484 if (usec_to_next)
2485 *usec_to_next = 0;
2486 s->thread_info.volume_changes_tail = NULL;
2487 }
2488 return ret;
2489 }
2490
2491
2492 /* Called from the main thread */
2493 /* Gets the list of formats supported by the source. The members and idxset must
2494 * be freed by the caller. */
2495 pa_idxset* pa_source_get_formats(pa_source *s) {
2496 pa_idxset *ret;
2497
2498 pa_assert(s);
2499
2500 if (s->get_formats) {
2501 /* Source supports format query, all is good */
2502 ret = s->get_formats(s);
2503 } else {
2504 /* Source doesn't support format query, so assume it does PCM */
2505 pa_format_info *f = pa_format_info_new();
2506 f->encoding = PA_ENCODING_PCM;
2507
2508 ret = pa_idxset_new(NULL, NULL);
2509 pa_idxset_put(ret, f, NULL);
2510 }
2511
2512 return ret;
2513 }
2514
2515 /* Called from the main thread */
2516 /* Checks if the source can accept this format */
2517 pa_bool_t pa_source_check_format(pa_source *s, pa_format_info *f)
2518 {
2519 pa_idxset *formats = NULL;
2520 pa_bool_t ret = FALSE;
2521
2522 pa_assert(s);
2523 pa_assert(f);
2524
2525 formats = pa_source_get_formats(s);
2526
2527 if (formats) {
2528 pa_format_info *finfo_device;
2529 uint32_t i;
2530
2531 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2532 if (pa_format_info_is_compatible(finfo_device, f)) {
2533 ret = TRUE;
2534 break;
2535 }
2536 }
2537
2538 pa_idxset_free(formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2539 }
2540
2541 return ret;
2542 }
2543
2544 /* Called from the main thread */
2545 /* Calculates the intersection between formats supported by the source and
2546 * in_formats, and returns these, in the order of the source's formats. */
2547 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2548 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2549 pa_format_info *f_source, *f_in;
2550 uint32_t i, j;
2551
2552 pa_assert(s);
2553
2554 if (!in_formats || pa_idxset_isempty(in_formats))
2555 goto done;
2556
2557 source_formats = pa_source_get_formats(s);
2558
2559 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2560 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2561 if (pa_format_info_is_compatible(f_source, f_in))
2562 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2563 }
2564 }
2565
2566 done:
2567 if (source_formats)
2568 pa_idxset_free(source_formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2569
2570 return out_formats;
2571 }