]> code.delx.au - pulseaudio/blob - src/pulsecore/source.c
capture: Implement per-stream volume control for capture streams.
[pulseaudio] / src / pulsecore / source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
37
38 #include <pulsecore/core-util.h>
39 #include <pulsecore/source-output.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-subscribe.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/flist.h>
45
46 #include "source.h"
47
48 #define ABSOLUTE_MIN_LATENCY (500)
49 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
50 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
51
52 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
53
54 struct pa_source_volume_change {
55 pa_usec_t at;
56 pa_cvolume hw_volume;
57
58 PA_LLIST_FIELDS(pa_source_volume_change);
59 };
60
61 struct source_message_set_port {
62 pa_device_port *port;
63 int ret;
64 };
65
66 static void source_free(pa_object *o);
67
68 static void pa_source_volume_change_push(pa_source *s);
69 static void pa_source_volume_change_flush(pa_source *s);
70
71 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
72 pa_assert(data);
73
74 pa_zero(*data);
75 data->proplist = pa_proplist_new();
76
77 return data;
78 }
79
80 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
81 pa_assert(data);
82
83 pa_xfree(data->name);
84 data->name = pa_xstrdup(name);
85 }
86
87 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
88 pa_assert(data);
89
90 if ((data->sample_spec_is_set = !!spec))
91 data->sample_spec = *spec;
92 }
93
94 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
95 pa_assert(data);
96
97 if ((data->channel_map_is_set = !!map))
98 data->channel_map = *map;
99 }
100
101 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
102 pa_assert(data);
103
104 if ((data->volume_is_set = !!volume))
105 data->volume = *volume;
106 }
107
108 void pa_source_new_data_set_muted(pa_source_new_data *data, pa_bool_t mute) {
109 pa_assert(data);
110
111 data->muted_is_set = TRUE;
112 data->muted = !!mute;
113 }
114
115 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
116 pa_assert(data);
117
118 pa_xfree(data->active_port);
119 data->active_port = pa_xstrdup(port);
120 }
121
122 void pa_source_new_data_done(pa_source_new_data *data) {
123 pa_assert(data);
124
125 pa_proplist_free(data->proplist);
126
127 if (data->ports) {
128 pa_device_port *p;
129
130 while ((p = pa_hashmap_steal_first(data->ports)))
131 pa_device_port_free(p);
132
133 pa_hashmap_free(data->ports, NULL, NULL);
134 }
135
136 pa_xfree(data->name);
137 pa_xfree(data->active_port);
138 }
139
140 /* Called from main context */
141 static void reset_callbacks(pa_source *s) {
142 pa_assert(s);
143
144 s->set_state = NULL;
145 s->get_volume = NULL;
146 s->set_volume = NULL;
147 s->get_mute = NULL;
148 s->set_mute = NULL;
149 s->update_requested_latency = NULL;
150 s->set_port = NULL;
151 s->get_formats = NULL;
152 }
153
154 /* Called from main context */
155 pa_source* pa_source_new(
156 pa_core *core,
157 pa_source_new_data *data,
158 pa_source_flags_t flags) {
159
160 pa_source *s;
161 const char *name;
162 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
163 char *pt;
164
165 pa_assert(core);
166 pa_assert(data);
167 pa_assert(data->name);
168 pa_assert_ctl_context();
169
170 s = pa_msgobject_new(pa_source);
171
172 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
173 pa_log_debug("Failed to register name %s.", data->name);
174 pa_xfree(s);
175 return NULL;
176 }
177
178 pa_source_new_data_set_name(data, name);
179
180 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
181 pa_xfree(s);
182 pa_namereg_unregister(core, name);
183 return NULL;
184 }
185
186 /* FIXME, need to free s here on failure */
187
188 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
189 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
190
191 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
192
193 if (!data->channel_map_is_set)
194 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
195
196 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
197 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
198
199 /* FIXME: There should probably be a general function for checking whether
200 * the source volume is allowed to be set, like there is for source outputs. */
201 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
202
203 if (!data->volume_is_set) {
204 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
205 data->save_volume = FALSE;
206 }
207
208 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
209 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
210
211 if (!data->muted_is_set)
212 data->muted = FALSE;
213
214 if (data->card)
215 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
216
217 pa_device_init_description(data->proplist);
218 pa_device_init_icon(data->proplist, FALSE);
219 pa_device_init_intended_roles(data->proplist);
220
221 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
222 pa_xfree(s);
223 pa_namereg_unregister(core, name);
224 return NULL;
225 }
226
227 s->parent.parent.free = source_free;
228 s->parent.process_msg = pa_source_process_msg;
229
230 s->core = core;
231 s->state = PA_SOURCE_INIT;
232 s->flags = flags;
233 s->priority = 0;
234 s->suspend_cause = 0;
235 s->name = pa_xstrdup(name);
236 s->proplist = pa_proplist_copy(data->proplist);
237 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
238 s->module = data->module;
239 s->card = data->card;
240
241 s->priority = pa_device_init_priority(s->proplist);
242
243 s->sample_spec = data->sample_spec;
244 s->channel_map = data->channel_map;
245
246 s->outputs = pa_idxset_new(NULL, NULL);
247 s->n_corked = 0;
248 s->monitor_of = NULL;
249 s->output_from_master = NULL;
250
251 s->reference_volume = s->real_volume = data->volume;
252 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
253 s->base_volume = PA_VOLUME_NORM;
254 s->n_volume_steps = PA_VOLUME_NORM+1;
255 s->muted = data->muted;
256 s->refresh_volume = s->refresh_muted = FALSE;
257
258 reset_callbacks(s);
259 s->userdata = NULL;
260
261 s->asyncmsgq = NULL;
262
263 /* As a minor optimization we just steal the list instead of
264 * copying it here */
265 s->ports = data->ports;
266 data->ports = NULL;
267
268 s->active_port = NULL;
269 s->save_port = FALSE;
270
271 if (data->active_port && s->ports)
272 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
273 s->save_port = data->save_port;
274
275 if (!s->active_port && s->ports) {
276 void *state;
277 pa_device_port *p;
278
279 PA_HASHMAP_FOREACH(p, s->ports, state)
280 if (!s->active_port || p->priority > s->active_port->priority)
281 s->active_port = p;
282 }
283
284 s->save_volume = data->save_volume;
285 s->save_muted = data->save_muted;
286
287 pa_silence_memchunk_get(
288 &core->silence_cache,
289 core->mempool,
290 &s->silence,
291 &s->sample_spec,
292 0);
293
294 s->thread_info.rtpoll = NULL;
295 s->thread_info.outputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
296 s->thread_info.soft_volume = s->soft_volume;
297 s->thread_info.soft_muted = s->muted;
298 s->thread_info.state = s->state;
299 s->thread_info.max_rewind = 0;
300 s->thread_info.requested_latency_valid = FALSE;
301 s->thread_info.requested_latency = 0;
302 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
303 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
304 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
305
306 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
307 s->thread_info.volume_changes_tail = NULL;
308 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
309 s->thread_info.volume_change_safety_margin = core->sync_volume_safety_margin_usec;
310 s->thread_info.volume_change_extra_delay = core->sync_volume_extra_delay_usec;
311
312 /* FIXME: This should probably be moved to pa_source_put() */
313 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
314
315 if (s->card)
316 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
317
318 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
319 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
320 s->index,
321 s->name,
322 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
323 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
324 pt);
325 pa_xfree(pt);
326
327 return s;
328 }
329
330 /* Called from main context */
331 static int source_set_state(pa_source *s, pa_source_state_t state) {
332 int ret;
333 pa_bool_t suspend_change;
334 pa_source_state_t original_state;
335
336 pa_assert(s);
337 pa_assert_ctl_context();
338
339 if (s->state == state)
340 return 0;
341
342 original_state = s->state;
343
344 suspend_change =
345 (original_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state)) ||
346 (PA_SOURCE_IS_OPENED(original_state) && state == PA_SOURCE_SUSPENDED);
347
348 if (s->set_state)
349 if ((ret = s->set_state(s, state)) < 0)
350 return ret;
351
352 if (s->asyncmsgq)
353 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
354
355 if (s->set_state)
356 s->set_state(s, original_state);
357
358 return ret;
359 }
360
361 s->state = state;
362
363 if (state != PA_SOURCE_UNLINKED) { /* if we enter UNLINKED state pa_source_unlink() will fire the apropriate events */
364 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
365 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
366 }
367
368 if (suspend_change) {
369 pa_source_output *o;
370 uint32_t idx;
371
372 /* We're suspending or resuming, tell everyone about it */
373
374 PA_IDXSET_FOREACH(o, s->outputs, idx)
375 if (s->state == PA_SOURCE_SUSPENDED &&
376 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
377 pa_source_output_kill(o);
378 else if (o->suspend)
379 o->suspend(o, state == PA_SOURCE_SUSPENDED);
380 }
381
382 return 0;
383 }
384
385 /* Called from main context */
386 void pa_source_put(pa_source *s) {
387 pa_source_assert_ref(s);
388 pa_assert_ctl_context();
389
390 pa_assert(s->state == PA_SOURCE_INIT);
391 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || s->output_from_master);
392
393 /* The following fields must be initialized properly when calling _put() */
394 pa_assert(s->asyncmsgq);
395 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
396
397 /* Generally, flags should be initialized via pa_source_new(). As a
398 * special exception we allow volume related flags to be set
399 * between _new() and _put(). */
400
401 /* XXX: Currently decibel volume is disabled for all sources that use volume
402 * sharing. When the master source supports decibel volume, it would be good
403 * to have the flag also in the filter source, but currently we don't do that
404 * so that the flags of the filter source never change when it's moved from
405 * a master source to another. One solution for this problem would be to
406 * remove user-visible volume altogether from filter sources when volume
407 * sharing is used, but the current approach was easier to implement... */
408 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
409 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
410
411 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME) && s->core->flat_volumes)
412 s->flags |= PA_SOURCE_FLAT_VOLUME;
413
414 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
415 pa_source *root_source = s->output_from_master->source;
416
417 while (root_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
418 root_source = root_source->output_from_master->source;
419
420 s->reference_volume = root_source->reference_volume;
421 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
422
423 s->real_volume = root_source->real_volume;
424 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
425 } else
426 /* We assume that if the sink implementor changed the default
427 * volume he did so in real_volume, because that is the usual
428 * place where he is supposed to place his changes. */
429 s->reference_volume = s->real_volume;
430
431 s->thread_info.soft_volume = s->soft_volume;
432 s->thread_info.soft_muted = s->muted;
433 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
434
435 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
436 || (s->base_volume == PA_VOLUME_NORM
437 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
438 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
439 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
440 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
441 pa_assert(!(s->flags & PA_SOURCE_SYNC_VOLUME) || (s->flags & PA_SOURCE_HW_VOLUME_CTRL));
442 pa_assert(!(s->flags & PA_SOURCE_SYNC_VOLUME) || s->write_volume);
443 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
444
445 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE) == 0);
446
447 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
448 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
449 }
450
451 /* Called from main context */
452 void pa_source_unlink(pa_source *s) {
453 pa_bool_t linked;
454 pa_source_output *o, *j = NULL;
455
456 pa_assert(s);
457 pa_assert_ctl_context();
458
459 /* See pa_sink_unlink() for a couple of comments how this function
460 * works. */
461
462 linked = PA_SOURCE_IS_LINKED(s->state);
463
464 if (linked)
465 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
466
467 if (s->state != PA_SOURCE_UNLINKED)
468 pa_namereg_unregister(s->core, s->name);
469 pa_idxset_remove_by_data(s->core->sources, s, NULL);
470
471 if (s->card)
472 pa_idxset_remove_by_data(s->card->sources, s, NULL);
473
474 while ((o = pa_idxset_first(s->outputs, NULL))) {
475 pa_assert(o != j);
476 pa_source_output_kill(o);
477 j = o;
478 }
479
480 if (linked)
481 source_set_state(s, PA_SOURCE_UNLINKED);
482 else
483 s->state = PA_SOURCE_UNLINKED;
484
485 reset_callbacks(s);
486
487 if (linked) {
488 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
489 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
490 }
491 }
492
493 /* Called from main context */
494 static void source_free(pa_object *o) {
495 pa_source_output *so;
496 pa_source *s = PA_SOURCE(o);
497
498 pa_assert(s);
499 pa_assert_ctl_context();
500 pa_assert(pa_source_refcnt(s) == 0);
501
502 if (PA_SOURCE_IS_LINKED(s->state))
503 pa_source_unlink(s);
504
505 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
506
507 pa_idxset_free(s->outputs, NULL, NULL);
508
509 while ((so = pa_hashmap_steal_first(s->thread_info.outputs)))
510 pa_source_output_unref(so);
511
512 pa_hashmap_free(s->thread_info.outputs, NULL, NULL);
513
514 if (s->silence.memblock)
515 pa_memblock_unref(s->silence.memblock);
516
517 pa_xfree(s->name);
518 pa_xfree(s->driver);
519
520 if (s->proplist)
521 pa_proplist_free(s->proplist);
522
523 if (s->ports) {
524 pa_device_port *p;
525
526 while ((p = pa_hashmap_steal_first(s->ports)))
527 pa_device_port_free(p);
528
529 pa_hashmap_free(s->ports, NULL, NULL);
530 }
531
532 pa_xfree(s);
533 }
534
535 /* Called from main context, and not while the IO thread is active, please */
536 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
537 pa_source_assert_ref(s);
538 pa_assert_ctl_context();
539
540 s->asyncmsgq = q;
541 }
542
543 /* Called from main context, and not while the IO thread is active, please */
544 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
545 pa_source_assert_ref(s);
546 pa_assert_ctl_context();
547
548 if (mask == 0)
549 return;
550
551 /* For now, allow only a minimal set of flags to be changed. */
552 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
553
554 s->flags = (s->flags & ~mask) | (value & mask);
555 }
556
557 /* Called from IO context, or before _put() from main context */
558 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
559 pa_source_assert_ref(s);
560 pa_source_assert_io_context(s);
561
562 s->thread_info.rtpoll = p;
563 }
564
565 /* Called from main context */
566 int pa_source_update_status(pa_source*s) {
567 pa_source_assert_ref(s);
568 pa_assert_ctl_context();
569 pa_assert(PA_SOURCE_IS_LINKED(s->state));
570
571 if (s->state == PA_SOURCE_SUSPENDED)
572 return 0;
573
574 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
575 }
576
577 /* Called from main context */
578 int pa_source_suspend(pa_source *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
579 pa_source_assert_ref(s);
580 pa_assert_ctl_context();
581 pa_assert(PA_SOURCE_IS_LINKED(s->state));
582 pa_assert(cause != 0);
583
584 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
585 return -PA_ERR_NOTSUPPORTED;
586
587 if (suspend)
588 s->suspend_cause |= cause;
589 else
590 s->suspend_cause &= ~cause;
591
592 if ((pa_source_get_state(s) == PA_SOURCE_SUSPENDED) == !!s->suspend_cause)
593 return 0;
594
595 pa_log_debug("Suspend cause of source %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
596
597 if (s->suspend_cause)
598 return source_set_state(s, PA_SOURCE_SUSPENDED);
599 else
600 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
601 }
602
603 /* Called from main context */
604 int pa_source_sync_suspend(pa_source *s) {
605 pa_sink_state_t state;
606
607 pa_source_assert_ref(s);
608 pa_assert_ctl_context();
609 pa_assert(PA_SOURCE_IS_LINKED(s->state));
610 pa_assert(s->monitor_of);
611
612 state = pa_sink_get_state(s->monitor_of);
613
614 if (state == PA_SINK_SUSPENDED)
615 return source_set_state(s, PA_SOURCE_SUSPENDED);
616
617 pa_assert(PA_SINK_IS_OPENED(state));
618
619 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
620 }
621
622 /* Called from main context */
623 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
624 pa_source_output *o, *n;
625 uint32_t idx;
626
627 pa_source_assert_ref(s);
628 pa_assert_ctl_context();
629 pa_assert(PA_SOURCE_IS_LINKED(s->state));
630
631 if (!q)
632 q = pa_queue_new();
633
634 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
635 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
636
637 pa_source_output_ref(o);
638
639 if (pa_source_output_start_move(o) >= 0)
640 pa_queue_push(q, o);
641 else
642 pa_source_output_unref(o);
643 }
644
645 return q;
646 }
647
648 /* Called from main context */
649 void pa_source_move_all_finish(pa_source *s, pa_queue *q, pa_bool_t save) {
650 pa_source_output *o;
651
652 pa_source_assert_ref(s);
653 pa_assert_ctl_context();
654 pa_assert(PA_SOURCE_IS_LINKED(s->state));
655 pa_assert(q);
656
657 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
658 if (pa_source_output_finish_move(o, s, save) < 0)
659 pa_source_output_fail_move(o);
660
661 pa_source_output_unref(o);
662 }
663
664 pa_queue_free(q, NULL, NULL);
665 }
666
667 /* Called from main context */
668 void pa_source_move_all_fail(pa_queue *q) {
669 pa_source_output *o;
670
671 pa_assert_ctl_context();
672 pa_assert(q);
673
674 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
675 pa_source_output_fail_move(o);
676 pa_source_output_unref(o);
677 }
678
679 pa_queue_free(q, NULL, NULL);
680 }
681
682 /* Called from IO thread context */
683 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
684 pa_source_output *o;
685 void *state = NULL;
686
687 pa_source_assert_ref(s);
688 pa_source_assert_io_context(s);
689 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
690
691 if (nbytes <= 0)
692 return;
693
694 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
695 return;
696
697 pa_log_debug("Processing rewind...");
698
699 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
700 pa_source_output_assert_ref(o);
701 pa_source_output_process_rewind(o, nbytes);
702 }
703 }
704
705 /* Called from IO thread context */
706 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
707 pa_source_output *o;
708 void *state = NULL;
709
710 pa_source_assert_ref(s);
711 pa_source_assert_io_context(s);
712 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
713 pa_assert(chunk);
714
715 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
716 return;
717
718 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
719 pa_memchunk vchunk = *chunk;
720
721 pa_memblock_ref(vchunk.memblock);
722 pa_memchunk_make_writable(&vchunk, 0);
723
724 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
725 pa_silence_memchunk(&vchunk, &s->sample_spec);
726 else
727 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
728
729 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
730 pa_source_output_assert_ref(o);
731
732 if (!o->thread_info.direct_on_input)
733 pa_source_output_push(o, &vchunk);
734 }
735
736 pa_memblock_unref(vchunk.memblock);
737 } else {
738
739 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
740 pa_source_output_assert_ref(o);
741
742 if (!o->thread_info.direct_on_input)
743 pa_source_output_push(o, chunk);
744 }
745 }
746 }
747
748 /* Called from IO thread context */
749 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
750 pa_source_assert_ref(s);
751 pa_source_assert_io_context(s);
752 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
753 pa_source_output_assert_ref(o);
754 pa_assert(o->thread_info.direct_on_input);
755 pa_assert(chunk);
756
757 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
758 return;
759
760 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
761 pa_memchunk vchunk = *chunk;
762
763 pa_memblock_ref(vchunk.memblock);
764 pa_memchunk_make_writable(&vchunk, 0);
765
766 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
767 pa_silence_memchunk(&vchunk, &s->sample_spec);
768 else
769 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
770
771 pa_source_output_push(o, &vchunk);
772
773 pa_memblock_unref(vchunk.memblock);
774 } else
775 pa_source_output_push(o, chunk);
776 }
777
778 /* Called from main thread */
779 pa_usec_t pa_source_get_latency(pa_source *s) {
780 pa_usec_t usec;
781
782 pa_source_assert_ref(s);
783 pa_assert_ctl_context();
784 pa_assert(PA_SOURCE_IS_LINKED(s->state));
785
786 if (s->state == PA_SOURCE_SUSPENDED)
787 return 0;
788
789 if (!(s->flags & PA_SOURCE_LATENCY))
790 return 0;
791
792 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
793
794 return usec;
795 }
796
797 /* Called from IO thread */
798 pa_usec_t pa_source_get_latency_within_thread(pa_source *s) {
799 pa_usec_t usec = 0;
800 pa_msgobject *o;
801
802 pa_source_assert_ref(s);
803 pa_source_assert_io_context(s);
804 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
805
806 /* The returned value is supposed to be in the time domain of the sound card! */
807
808 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
809 return 0;
810
811 if (!(s->flags & PA_SOURCE_LATENCY))
812 return 0;
813
814 o = PA_MSGOBJECT(s);
815
816 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
817
818 if (o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
819 return -1;
820
821 return usec;
822 }
823
824 /* Called from the main thread (and also from the IO thread while the main
825 * thread is waiting).
826 *
827 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
828 * set. Instead, flat volume mode is detected by checking whether the root source
829 * has the flag set. */
830 pa_bool_t pa_source_flat_volume_enabled(pa_source *s) {
831 pa_source_assert_ref(s);
832
833 while (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
834 s = s->output_from_master->source;
835
836 return (s->flags & PA_SOURCE_FLAT_VOLUME);
837 }
838
839 /* Called from main context */
840 pa_bool_t pa_source_is_passthrough(pa_source *s) {
841
842 pa_source_assert_ref(s);
843
844 /* NB Currently only monitor sources support passthrough mode */
845 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
846 }
847
848 /* Called from main context. */
849 static void compute_reference_ratio(pa_source_output *o) {
850 unsigned c = 0;
851 pa_cvolume remapped;
852
853 pa_assert(o);
854 pa_assert(pa_source_flat_volume_enabled(o->source));
855
856 /*
857 * Calculates the reference ratio from the source's reference
858 * volume. This basically calculates:
859 *
860 * o->reference_ratio = o->volume / o->source->reference_volume
861 */
862
863 remapped = o->source->reference_volume;
864 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
865
866 o->reference_ratio.channels = o->sample_spec.channels;
867
868 for (c = 0; c < o->sample_spec.channels; c++) {
869
870 /* We don't update when the source volume is 0 anyway */
871 if (remapped.values[c] <= PA_VOLUME_MUTED)
872 continue;
873
874 /* Don't update the reference ratio unless necessary */
875 if (pa_sw_volume_multiply(
876 o->reference_ratio.values[c],
877 remapped.values[c]) == o->volume.values[c])
878 continue;
879
880 o->reference_ratio.values[c] = pa_sw_volume_divide(
881 o->volume.values[c],
882 remapped.values[c]);
883 }
884 }
885
886 /* Called from main context. Only called for the root source in volume sharing
887 * cases, except for internal recursive calls. */
888 static void compute_reference_ratios(pa_source *s) {
889 uint32_t idx;
890 pa_source_output *o;
891
892 pa_source_assert_ref(s);
893 pa_assert_ctl_context();
894 pa_assert(PA_SOURCE_IS_LINKED(s->state));
895 pa_assert(pa_source_flat_volume_enabled(s));
896
897 PA_IDXSET_FOREACH(o, s->outputs, idx) {
898 compute_reference_ratio(o);
899
900 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
901 compute_reference_ratios(o->destination_source);
902 }
903 }
904
905 /* Called from main context. Only called for the root source in volume sharing
906 * cases, except for internal recursive calls. */
907 static void compute_real_ratios(pa_source *s) {
908 pa_source_output *o;
909 uint32_t idx;
910
911 pa_source_assert_ref(s);
912 pa_assert_ctl_context();
913 pa_assert(PA_SOURCE_IS_LINKED(s->state));
914 pa_assert(pa_source_flat_volume_enabled(s));
915
916 PA_IDXSET_FOREACH(o, s->outputs, idx) {
917 unsigned c;
918 pa_cvolume remapped;
919
920 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
921 /* The origin source uses volume sharing, so this input's real ratio
922 * is handled as a special case - the real ratio must be 0 dB, and
923 * as a result i->soft_volume must equal i->volume_factor. */
924 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
925 o->soft_volume = o->volume_factor;
926
927 compute_real_ratios(o->destination_source);
928
929 continue;
930 }
931
932 /*
933 * This basically calculates:
934 *
935 * i->real_ratio := i->volume / s->real_volume
936 * i->soft_volume := i->real_ratio * i->volume_factor
937 */
938
939 remapped = s->real_volume;
940 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
941
942 o->real_ratio.channels = o->sample_spec.channels;
943 o->soft_volume.channels = o->sample_spec.channels;
944
945 for (c = 0; c < o->sample_spec.channels; c++) {
946
947 if (remapped.values[c] <= PA_VOLUME_MUTED) {
948 /* We leave o->real_ratio untouched */
949 o->soft_volume.values[c] = PA_VOLUME_MUTED;
950 continue;
951 }
952
953 /* Don't lose accuracy unless necessary */
954 if (pa_sw_volume_multiply(
955 o->real_ratio.values[c],
956 remapped.values[c]) != o->volume.values[c])
957
958 o->real_ratio.values[c] = pa_sw_volume_divide(
959 o->volume.values[c],
960 remapped.values[c]);
961
962 o->soft_volume.values[c] = pa_sw_volume_multiply(
963 o->real_ratio.values[c],
964 o->volume_factor.values[c]);
965 }
966
967 /* We don't copy the soft_volume to the thread_info data
968 * here. That must be done by the caller */
969 }
970 }
971
972 static pa_cvolume *cvolume_remap_minimal_impact(
973 pa_cvolume *v,
974 const pa_cvolume *template,
975 const pa_channel_map *from,
976 const pa_channel_map *to) {
977
978 pa_cvolume t;
979
980 pa_assert(v);
981 pa_assert(template);
982 pa_assert(from);
983 pa_assert(to);
984 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
985 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
986
987 /* Much like pa_cvolume_remap(), but tries to minimize impact when
988 * mapping from source output to source volumes:
989 *
990 * If template is a possible remapping from v it is used instead
991 * of remapping anew.
992 *
993 * If the channel maps don't match we set an all-channel volume on
994 * the source to ensure that changing a volume on one stream has no
995 * effect that cannot be compensated for in another stream that
996 * does not have the same channel map as the source. */
997
998 if (pa_channel_map_equal(from, to))
999 return v;
1000
1001 t = *template;
1002 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1003 *v = *template;
1004 return v;
1005 }
1006
1007 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1008 return v;
1009 }
1010
1011 /* Called from main thread. Only called for the root source in volume sharing
1012 * cases, except for internal recursive calls. */
1013 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1014 pa_source_output *o;
1015 uint32_t idx;
1016
1017 pa_source_assert_ref(s);
1018 pa_assert(max_volume);
1019 pa_assert(channel_map);
1020 pa_assert(pa_source_flat_volume_enabled(s));
1021
1022 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1023 pa_cvolume remapped;
1024
1025 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1026 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1027
1028 /* Ignore this output. The origin source uses volume sharing, so this
1029 * output's volume will be set to be equal to the root source's real
1030 * volume. Obviously this outputs's current volume must not then
1031 * affect what the root source's real volume will be. */
1032 continue;
1033 }
1034
1035 remapped = o->volume;
1036 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1037 pa_cvolume_merge(max_volume, max_volume, &remapped);
1038 }
1039 }
1040
1041 /* Called from main thread. Only called for the root source in volume sharing
1042 * cases, except for internal recursive calls. */
1043 static pa_bool_t has_outputs(pa_source *s) {
1044 pa_source_output *o;
1045 uint32_t idx;
1046
1047 pa_source_assert_ref(s);
1048
1049 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1050 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1051 return TRUE;
1052 }
1053
1054 return FALSE;
1055 }
1056
1057 /* Called from main thread. Only called for the root source in volume sharing
1058 * cases, except for internal recursive calls. */
1059 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1060 pa_source_output *o;
1061 uint32_t idx;
1062
1063 pa_source_assert_ref(s);
1064 pa_assert(new_volume);
1065 pa_assert(channel_map);
1066
1067 s->real_volume = *new_volume;
1068 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1069
1070 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1071 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1072 if (pa_source_flat_volume_enabled(s)) {
1073 pa_cvolume old_volume = o->volume;
1074
1075 /* Follow the root source's real volume. */
1076 o->volume = *new_volume;
1077 pa_cvolume_remap(&o->volume, channel_map, &o->channel_map);
1078 compute_reference_ratio(o);
1079
1080 /* The volume changed, let's tell people so */
1081 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1082 if (o->volume_changed)
1083 o->volume_changed(o);
1084
1085 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1086 }
1087 }
1088
1089 update_real_volume(o->destination_source, new_volume, channel_map);
1090 }
1091 }
1092 }
1093
1094 /* Called from main thread. Only called for the root source in shared volume
1095 * cases. */
1096 static void compute_real_volume(pa_source *s) {
1097 pa_source_assert_ref(s);
1098 pa_assert_ctl_context();
1099 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1100 pa_assert(pa_source_flat_volume_enabled(s));
1101 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1102
1103 /* This determines the maximum volume of all streams and sets
1104 * s->real_volume accordingly. */
1105
1106 if (!has_outputs(s)) {
1107 /* In the special case that we have no source outputs we leave the
1108 * volume unmodified. */
1109 update_real_volume(s, &s->reference_volume, &s->channel_map);
1110 return;
1111 }
1112
1113 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1114
1115 /* First let's determine the new maximum volume of all outputs
1116 * connected to this source */
1117 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1118 update_real_volume(s, &s->real_volume, &s->channel_map);
1119
1120 /* Then, let's update the real ratios/soft volumes of all outputs
1121 * connected to this source */
1122 compute_real_ratios(s);
1123 }
1124
1125 /* Called from main thread. Only called for the root source in shared volume
1126 * cases, except for internal recursive calls. */
1127 static void propagate_reference_volume(pa_source *s) {
1128 pa_source_output *o;
1129 uint32_t idx;
1130
1131 pa_source_assert_ref(s);
1132 pa_assert_ctl_context();
1133 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1134 pa_assert(pa_source_flat_volume_enabled(s));
1135
1136 /* This is called whenever the source volume changes that is not
1137 * caused by a source output volume change. We need to fix up the
1138 * source output volumes accordingly */
1139
1140 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1141 pa_cvolume old_volume;
1142
1143 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1144 propagate_reference_volume(o->destination_source);
1145
1146 /* Since the origin source uses volume sharing, this output's volume
1147 * needs to be updated to match the root source's real volume, but
1148 * that will be done later in update_shared_real_volume(). */
1149 continue;
1150 }
1151
1152 old_volume = o->volume;
1153
1154 /* This basically calculates:
1155 *
1156 * o->volume := o->reference_volume * o->reference_ratio */
1157
1158 o->volume = s->reference_volume;
1159 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1160 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1161
1162 /* The volume changed, let's tell people so */
1163 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1164
1165 if (o->volume_changed)
1166 o->volume_changed(o);
1167
1168 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1169 }
1170 }
1171 }
1172
1173 /* Called from main thread. Only called for the root source in volume sharing
1174 * cases, except for internal recursive calls. The return value indicates
1175 * whether any reference volume actually changed. */
1176 static pa_bool_t update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1177 pa_cvolume volume;
1178 pa_bool_t reference_volume_changed;
1179 pa_source_output *o;
1180 uint32_t idx;
1181
1182 pa_source_assert_ref(s);
1183 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1184 pa_assert(v);
1185 pa_assert(channel_map);
1186 pa_assert(pa_cvolume_valid(v));
1187
1188 volume = *v;
1189 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1190
1191 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1192 s->reference_volume = volume;
1193
1194 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1195
1196 if (reference_volume_changed)
1197 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1198 else if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1199 /* If the root source's volume doesn't change, then there can't be any
1200 * changes in the other source in the source tree either.
1201 *
1202 * It's probably theoretically possible that even if the root source's
1203 * volume changes slightly, some filter source doesn't change its volume
1204 * due to rounding errors. If that happens, we still want to propagate
1205 * the changed root source volume to the sources connected to the
1206 * intermediate source that didn't change its volume. This theoretical
1207 * possiblity is the reason why we have that !(s->flags &
1208 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1209 * notice even if we returned here FALSE always if
1210 * reference_volume_changed is FALSE. */
1211 return FALSE;
1212
1213 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1214 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1215 update_reference_volume(o->destination_source, v, channel_map, FALSE);
1216 }
1217
1218 return TRUE;
1219 }
1220
1221 /* Called from main thread */
1222 void pa_source_set_volume(
1223 pa_source *s,
1224 const pa_cvolume *volume,
1225 pa_bool_t send_msg,
1226 pa_bool_t save) {
1227
1228 pa_cvolume new_reference_volume;
1229 pa_source *root_source = s;
1230
1231 pa_source_assert_ref(s);
1232 pa_assert_ctl_context();
1233 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1234 pa_assert(!volume || pa_cvolume_valid(volume));
1235 pa_assert(volume || pa_source_flat_volume_enabled(s));
1236 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1237
1238 /* make sure we don't change the volume when a PASSTHROUGH output is connected */
1239 if (pa_source_is_passthrough(s)) {
1240 /* FIXME: Need to notify client that volume control is disabled */
1241 pa_log_warn("Cannot change volume, Source is monitor of a PASSTHROUGH sink");
1242 return;
1243 }
1244
1245 /* In case of volume sharing, the volume is set for the root source first,
1246 * from which it's then propagated to the sharing sources. */
1247 while (root_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1248 root_source = root_source->output_from_master->source;
1249
1250 /* As a special exception we accept mono volumes on all sources --
1251 * even on those with more complex channel maps */
1252
1253 if (volume) {
1254 if (pa_cvolume_compatible(volume, &s->sample_spec))
1255 new_reference_volume = *volume;
1256 else {
1257 new_reference_volume = s->reference_volume;
1258 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1259 }
1260
1261 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1262 }
1263
1264 /* If volume is NULL we synchronize the source's real and reference
1265 * volumes with the stream volumes. If it is not NULL we update
1266 * the reference_volume with it. */
1267
1268 if (volume) {
1269 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1270 if (pa_source_flat_volume_enabled(root_source)) {
1271 /* OK, propagate this volume change back to the outputs */
1272 propagate_reference_volume(root_source);
1273
1274 /* And now recalculate the real volume */
1275 compute_real_volume(root_source);
1276 } else
1277 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1278 }
1279
1280 } else {
1281 pa_assert(pa_source_flat_volume_enabled(root_source));
1282
1283 /* Ok, let's determine the new real volume */
1284 compute_real_volume(root_source);
1285
1286 /* Let's 'push' the reference volume if necessary */
1287 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_source->real_volume);
1288 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1289
1290 /* Now that the reference volume is updated, we can update the streams'
1291 * reference ratios. */
1292 compute_reference_ratios(root_source);
1293 }
1294
1295 if (root_source->set_volume) {
1296 /* If we have a function set_volume(), then we do not apply a
1297 * soft volume by default. However, set_volume() is free to
1298 * apply one to root_source->soft_volume */
1299
1300 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1301 if (!(root_source->flags & PA_SOURCE_SYNC_VOLUME))
1302 root_source->set_volume(root_source);
1303
1304 } else
1305 /* If we have no function set_volume(), then the soft volume
1306 * becomes the real volume */
1307 root_source->soft_volume = root_source->real_volume;
1308
1309 /* This tells the source that soft volume and/or real volume changed */
1310 if (send_msg)
1311 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1312 }
1313
1314 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1315 * Only to be called by source implementor */
1316 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1317
1318 pa_source_assert_ref(s);
1319 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1320
1321 if (s->flags & PA_SOURCE_SYNC_VOLUME)
1322 pa_source_assert_io_context(s);
1323 else
1324 pa_assert_ctl_context();
1325
1326 if (!volume)
1327 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1328 else
1329 s->soft_volume = *volume;
1330
1331 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_SYNC_VOLUME))
1332 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1333 else
1334 s->thread_info.soft_volume = s->soft_volume;
1335 }
1336
1337 /* Called from the main thread. Only called for the root source in volume sharing
1338 * cases, except for internal recursive calls. */
1339 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1340 pa_source_output *o;
1341 uint32_t idx;
1342
1343 pa_source_assert_ref(s);
1344 pa_assert(old_real_volume);
1345 pa_assert_ctl_context();
1346 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1347
1348 /* This is called when the hardware's real volume changes due to
1349 * some external event. We copy the real volume into our
1350 * reference volume and then rebuild the stream volumes based on
1351 * i->real_ratio which should stay fixed. */
1352
1353 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1354 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1355 return;
1356
1357 /* 1. Make the real volume the reference volume */
1358 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
1359 }
1360
1361 if (pa_source_flat_volume_enabled(s)) {
1362
1363 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1364 pa_cvolume old_volume = o->volume;
1365
1366 /* 2. Since the source's reference and real volumes are equal
1367 * now our ratios should be too. */
1368 o->reference_ratio = o->real_ratio;
1369
1370 /* 3. Recalculate the new stream reference volume based on the
1371 * reference ratio and the sink's reference volume.
1372 *
1373 * This basically calculates:
1374 *
1375 * o->volume = s->reference_volume * o->reference_ratio
1376 *
1377 * This is identical to propagate_reference_volume() */
1378 o->volume = s->reference_volume;
1379 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1380 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1381
1382 /* Notify if something changed */
1383 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1384
1385 if (o->volume_changed)
1386 o->volume_changed(o);
1387
1388 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1389 }
1390
1391 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1392 propagate_real_volume(o->destination_source, old_real_volume);
1393 }
1394 }
1395
1396 /* Something got changed in the hardware. It probably makes sense
1397 * to save changed hw settings given that hw volume changes not
1398 * triggered by PA are almost certainly done by the user. */
1399 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1400 s->save_volume = TRUE;
1401 }
1402
1403 /* Called from io thread */
1404 void pa_source_update_volume_and_mute(pa_source *s) {
1405 pa_assert(s);
1406 pa_source_assert_io_context(s);
1407
1408 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1409 }
1410
1411 /* Called from main thread */
1412 const pa_cvolume *pa_source_get_volume(pa_source *s, pa_bool_t force_refresh) {
1413 pa_source_assert_ref(s);
1414 pa_assert_ctl_context();
1415 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1416
1417 if (s->refresh_volume || force_refresh) {
1418 struct pa_cvolume old_real_volume;
1419
1420 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1421
1422 old_real_volume = s->real_volume;
1423
1424 if (!(s->flags & PA_SOURCE_SYNC_VOLUME) && s->get_volume)
1425 s->get_volume(s);
1426
1427 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1428
1429 update_real_volume(s, &s->real_volume, &s->channel_map);
1430 propagate_real_volume(s, &old_real_volume);
1431 }
1432
1433 return &s->reference_volume;
1434 }
1435
1436 /* Called from main thread. In volume sharing cases, only the root source may
1437 * call this. */
1438 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1439 pa_cvolume old_real_volume;
1440
1441 pa_source_assert_ref(s);
1442 pa_assert_ctl_context();
1443 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1444 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1445
1446 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1447
1448 old_real_volume = s->real_volume;
1449 update_real_volume(s, new_real_volume, &s->channel_map);
1450 propagate_real_volume(s, &old_real_volume);
1451 }
1452
1453 /* Called from main thread */
1454 void pa_source_set_mute(pa_source *s, pa_bool_t mute, pa_bool_t save) {
1455 pa_bool_t old_muted;
1456
1457 pa_source_assert_ref(s);
1458 pa_assert_ctl_context();
1459 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1460
1461 old_muted = s->muted;
1462 s->muted = mute;
1463 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1464
1465 if (!(s->flags & PA_SOURCE_SYNC_VOLUME) && s->set_mute)
1466 s->set_mute(s);
1467
1468 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1469
1470 if (old_muted != s->muted)
1471 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1472 }
1473
1474 /* Called from main thread */
1475 pa_bool_t pa_source_get_mute(pa_source *s, pa_bool_t force_refresh) {
1476
1477 pa_source_assert_ref(s);
1478 pa_assert_ctl_context();
1479 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1480
1481 if (s->refresh_muted || force_refresh) {
1482 pa_bool_t old_muted = s->muted;
1483
1484 if (!(s->flags & PA_SOURCE_SYNC_VOLUME) && s->get_mute)
1485 s->get_mute(s);
1486
1487 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1488
1489 if (old_muted != s->muted) {
1490 s->save_muted = TRUE;
1491
1492 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1493
1494 /* Make sure the soft mute status stays in sync */
1495 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1496 }
1497 }
1498
1499 return s->muted;
1500 }
1501
1502 /* Called from main thread */
1503 void pa_source_mute_changed(pa_source *s, pa_bool_t new_muted) {
1504 pa_source_assert_ref(s);
1505 pa_assert_ctl_context();
1506 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1507
1508 /* The source implementor may call this if the mute state changed to make sure everyone is notified */
1509
1510 if (s->muted == new_muted)
1511 return;
1512
1513 s->muted = new_muted;
1514 s->save_muted = TRUE;
1515
1516 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1517 }
1518
1519 /* Called from main thread */
1520 pa_bool_t pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1521 pa_source_assert_ref(s);
1522 pa_assert_ctl_context();
1523
1524 if (p)
1525 pa_proplist_update(s->proplist, mode, p);
1526
1527 if (PA_SOURCE_IS_LINKED(s->state)) {
1528 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1529 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1530 }
1531
1532 return TRUE;
1533 }
1534
1535 /* Called from main thread */
1536 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
1537 void pa_source_set_description(pa_source *s, const char *description) {
1538 const char *old;
1539 pa_source_assert_ref(s);
1540 pa_assert_ctl_context();
1541
1542 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1543 return;
1544
1545 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1546
1547 if (old && description && pa_streq(old, description))
1548 return;
1549
1550 if (description)
1551 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1552 else
1553 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1554
1555 if (PA_SOURCE_IS_LINKED(s->state)) {
1556 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1557 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1558 }
1559 }
1560
1561 /* Called from main thread */
1562 unsigned pa_source_linked_by(pa_source *s) {
1563 pa_source_assert_ref(s);
1564 pa_assert_ctl_context();
1565 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1566
1567 return pa_idxset_size(s->outputs);
1568 }
1569
1570 /* Called from main thread */
1571 unsigned pa_source_used_by(pa_source *s) {
1572 unsigned ret;
1573
1574 pa_source_assert_ref(s);
1575 pa_assert_ctl_context();
1576 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1577
1578 ret = pa_idxset_size(s->outputs);
1579 pa_assert(ret >= s->n_corked);
1580
1581 return ret - s->n_corked;
1582 }
1583
1584 /* Called from main thread */
1585 unsigned pa_source_check_suspend(pa_source *s) {
1586 unsigned ret;
1587 pa_source_output *o;
1588 uint32_t idx;
1589
1590 pa_source_assert_ref(s);
1591 pa_assert_ctl_context();
1592
1593 if (!PA_SOURCE_IS_LINKED(s->state))
1594 return 0;
1595
1596 ret = 0;
1597
1598 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1599 pa_source_output_state_t st;
1600
1601 st = pa_source_output_get_state(o);
1602
1603 /* We do not assert here. It is perfectly valid for a source output to
1604 * be in the INIT state (i.e. created, marked done but not yet put)
1605 * and we should not care if it's unlinked as it won't contribute
1606 * towarards our busy status.
1607 */
1608 if (!PA_SOURCE_OUTPUT_IS_LINKED(st))
1609 continue;
1610
1611 if (st == PA_SOURCE_OUTPUT_CORKED)
1612 continue;
1613
1614 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
1615 continue;
1616
1617 ret ++;
1618 }
1619
1620 return ret;
1621 }
1622
1623 /* Called from the IO thread */
1624 static void sync_output_volumes_within_thread(pa_source *s) {
1625 pa_source_output *o;
1626 void *state = NULL;
1627
1628 pa_source_assert_ref(s);
1629 pa_source_assert_io_context(s);
1630
1631 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1632 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
1633 continue;
1634
1635 o->thread_info.soft_volume = o->soft_volume;
1636 //pa_source_output_request_rewind(o, 0, TRUE, FALSE, FALSE);
1637 }
1638 }
1639
1640 /* Called from the IO thread. Only called for the root source in volume sharing
1641 * cases, except for internal recursive calls. */
1642 static void set_shared_volume_within_thread(pa_source *s) {
1643 pa_source_output *o;
1644 void *state = NULL;
1645
1646 pa_source_assert_ref(s);
1647
1648 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
1649
1650 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1651 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1652 set_shared_volume_within_thread(o->destination_source);
1653 }
1654 }
1655
1656 /* Called from IO thread, except when it is not */
1657 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1658 pa_source *s = PA_SOURCE(object);
1659 pa_source_assert_ref(s);
1660
1661 switch ((pa_source_message_t) code) {
1662
1663 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
1664 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
1665
1666 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
1667
1668 /* Since the caller sleeps in pa_source_output_put(), we can
1669 * safely access data outside of thread_info even though
1670 * it is mutable */
1671
1672 if ((o->thread_info.sync_prev = o->sync_prev)) {
1673 pa_assert(o->source == o->thread_info.sync_prev->source);
1674 pa_assert(o->sync_prev->sync_next == o);
1675 o->thread_info.sync_prev->thread_info.sync_next = o;
1676 }
1677
1678 if ((o->thread_info.sync_next = o->sync_next)) {
1679 pa_assert(o->source == o->thread_info.sync_next->source);
1680 pa_assert(o->sync_next->sync_prev == o);
1681 o->thread_info.sync_next->thread_info.sync_prev = o;
1682 }
1683
1684 if (o->direct_on_input) {
1685 o->thread_info.direct_on_input = o->direct_on_input;
1686 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
1687 }
1688
1689 pa_assert(!o->thread_info.attached);
1690 o->thread_info.attached = TRUE;
1691
1692 if (o->attach)
1693 o->attach(o);
1694
1695 pa_source_output_set_state_within_thread(o, o->state);
1696
1697 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
1698 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
1699
1700 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
1701
1702 /* We don't just invalidate the requested latency here,
1703 * because if we are in a move we might need to fix up the
1704 * requested latency. */
1705 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
1706
1707 /* In flat volume mode we need to update the volume as
1708 * well */
1709 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
1710 }
1711
1712 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
1713 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
1714
1715 pa_source_output_set_state_within_thread(o, o->state);
1716
1717 if (o->detach)
1718 o->detach(o);
1719
1720 pa_assert(o->thread_info.attached);
1721 o->thread_info.attached = FALSE;
1722
1723 /* Since the caller sleeps in pa_sink_input_unlink(),
1724 * we can safely access data outside of thread_info even
1725 * though it is mutable */
1726
1727 pa_assert(!o->sync_prev);
1728 pa_assert(!o->sync_next);
1729
1730 if (o->thread_info.sync_prev) {
1731 o->thread_info.sync_prev->thread_info.sync_next = o->thread_info.sync_prev->sync_next;
1732 o->thread_info.sync_prev = NULL;
1733 }
1734
1735 if (o->thread_info.sync_next) {
1736 o->thread_info.sync_next->thread_info.sync_prev = o->thread_info.sync_next->sync_prev;
1737 o->thread_info.sync_next = NULL;
1738 }
1739
1740 if (o->thread_info.direct_on_input) {
1741 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
1742 o->thread_info.direct_on_input = NULL;
1743 }
1744
1745 if (pa_hashmap_remove(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index)))
1746 pa_source_output_unref(o);
1747
1748 pa_source_invalidate_requested_latency(s, TRUE);
1749
1750 /* In flat volume mode we need to update the volume as
1751 * well */
1752 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
1753 }
1754
1755 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
1756 pa_source *root_source = s;
1757
1758 while (root_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1759 root_source = root_source->output_from_master->source;
1760
1761 set_shared_volume_within_thread(root_source);
1762 return 0;
1763 }
1764
1765 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
1766
1767 if (s->flags & PA_SOURCE_SYNC_VOLUME) {
1768 s->set_volume(s);
1769 pa_source_volume_change_push(s);
1770 }
1771 /* Fall through ... */
1772
1773 case PA_SOURCE_MESSAGE_SET_VOLUME:
1774
1775 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1776 s->thread_info.soft_volume = s->soft_volume;
1777 }
1778
1779 /* Fall through ... */
1780
1781 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
1782 sync_output_volumes_within_thread(s);
1783 return 0;
1784
1785 case PA_SOURCE_MESSAGE_GET_VOLUME:
1786
1787 if ((s->flags & PA_SOURCE_SYNC_VOLUME) && s->get_volume) {
1788 s->get_volume(s);
1789 pa_source_volume_change_flush(s);
1790 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
1791 }
1792
1793 /* In case source implementor reset SW volume. */
1794 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1795 s->thread_info.soft_volume = s->soft_volume;
1796 }
1797
1798 return 0;
1799
1800 case PA_SOURCE_MESSAGE_SET_MUTE:
1801
1802 if (s->thread_info.soft_muted != s->muted) {
1803 s->thread_info.soft_muted = s->muted;
1804 }
1805
1806 if (s->flags & PA_SOURCE_SYNC_VOLUME && s->set_mute)
1807 s->set_mute(s);
1808
1809 return 0;
1810
1811 case PA_SOURCE_MESSAGE_GET_MUTE:
1812
1813 if (s->flags & PA_SOURCE_SYNC_VOLUME && s->get_mute)
1814 s->get_mute(s);
1815
1816 return 0;
1817
1818 case PA_SOURCE_MESSAGE_SET_STATE: {
1819
1820 pa_bool_t suspend_change =
1821 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1822 (PA_SOURCE_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SOURCE_SUSPENDED);
1823
1824 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1825
1826 if (suspend_change) {
1827 pa_source_output *o;
1828 void *state = NULL;
1829
1830 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
1831 if (o->suspend_within_thread)
1832 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
1833 }
1834
1835 return 0;
1836 }
1837
1838 case PA_SOURCE_MESSAGE_DETACH:
1839
1840 /* Detach all streams */
1841 pa_source_detach_within_thread(s);
1842 return 0;
1843
1844 case PA_SOURCE_MESSAGE_ATTACH:
1845
1846 /* Reattach all streams */
1847 pa_source_attach_within_thread(s);
1848 return 0;
1849
1850 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
1851
1852 pa_usec_t *usec = userdata;
1853 *usec = pa_source_get_requested_latency_within_thread(s);
1854
1855 /* Yes, that's right, the IO thread will see -1 when no
1856 * explicit requested latency is configured, the main
1857 * thread will see max_latency */
1858 if (*usec == (pa_usec_t) -1)
1859 *usec = s->thread_info.max_latency;
1860
1861 return 0;
1862 }
1863
1864 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
1865 pa_usec_t *r = userdata;
1866
1867 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
1868
1869 return 0;
1870 }
1871
1872 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
1873 pa_usec_t *r = userdata;
1874
1875 r[0] = s->thread_info.min_latency;
1876 r[1] = s->thread_info.max_latency;
1877
1878 return 0;
1879 }
1880
1881 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
1882
1883 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
1884 return 0;
1885
1886 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
1887
1888 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
1889 return 0;
1890
1891 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
1892
1893 *((size_t*) userdata) = s->thread_info.max_rewind;
1894 return 0;
1895
1896 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
1897
1898 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
1899 return 0;
1900
1901 case PA_SOURCE_MESSAGE_GET_LATENCY:
1902
1903 if (s->monitor_of) {
1904 *((pa_usec_t*) userdata) = 0;
1905 return 0;
1906 }
1907
1908 /* Implementors need to overwrite this implementation! */
1909 return -1;
1910
1911 case PA_SOURCE_MESSAGE_SET_PORT:
1912
1913 pa_assert(userdata);
1914 if (s->set_port) {
1915 struct source_message_set_port *msg_data = userdata;
1916 msg_data->ret = s->set_port(s, msg_data->port);
1917 }
1918 return 0;
1919
1920 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
1921 /* This message is sent from IO-thread and handled in main thread. */
1922 pa_assert_ctl_context();
1923
1924 pa_source_get_volume(s, TRUE);
1925 pa_source_get_mute(s, TRUE);
1926 return 0;
1927
1928 case PA_SOURCE_MESSAGE_MAX:
1929 ;
1930 }
1931
1932 return -1;
1933 }
1934
1935 /* Called from main thread */
1936 int pa_source_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1937 pa_source *source;
1938 uint32_t idx;
1939 int ret = 0;
1940
1941 pa_core_assert_ref(c);
1942 pa_assert_ctl_context();
1943 pa_assert(cause != 0);
1944
1945 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
1946 int r;
1947
1948 if (source->monitor_of)
1949 continue;
1950
1951 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
1952 ret = r;
1953 }
1954
1955 return ret;
1956 }
1957
1958 /* Called from main thread */
1959 void pa_source_detach(pa_source *s) {
1960 pa_source_assert_ref(s);
1961 pa_assert_ctl_context();
1962 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1963
1964 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1965 }
1966
1967 /* Called from main thread */
1968 void pa_source_attach(pa_source *s) {
1969 pa_source_assert_ref(s);
1970 pa_assert_ctl_context();
1971 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1972
1973 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1974 }
1975
1976 /* Called from IO thread */
1977 void pa_source_detach_within_thread(pa_source *s) {
1978 pa_source_output *o;
1979 void *state = NULL;
1980
1981 pa_source_assert_ref(s);
1982 pa_source_assert_io_context(s);
1983 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1984
1985 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
1986 if (o->detach)
1987 o->detach(o);
1988 }
1989
1990 /* Called from IO thread */
1991 void pa_source_attach_within_thread(pa_source *s) {
1992 pa_source_output *o;
1993 void *state = NULL;
1994
1995 pa_source_assert_ref(s);
1996 pa_source_assert_io_context(s);
1997 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1998
1999 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2000 if (o->attach)
2001 o->attach(o);
2002 }
2003
2004 /* Called from IO thread */
2005 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
2006 pa_usec_t result = (pa_usec_t) -1;
2007 pa_source_output *o;
2008 void *state = NULL;
2009
2010 pa_source_assert_ref(s);
2011 pa_source_assert_io_context(s);
2012
2013 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2014 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2015
2016 if (s->thread_info.requested_latency_valid)
2017 return s->thread_info.requested_latency;
2018
2019 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2020 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
2021 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
2022 result = o->thread_info.requested_source_latency;
2023
2024 if (result != (pa_usec_t) -1)
2025 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2026
2027 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2028 /* Only cache this if we are fully set up */
2029 s->thread_info.requested_latency = result;
2030 s->thread_info.requested_latency_valid = TRUE;
2031 }
2032
2033 return result;
2034 }
2035
2036 /* Called from main thread */
2037 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2038 pa_usec_t usec = 0;
2039
2040 pa_source_assert_ref(s);
2041 pa_assert_ctl_context();
2042 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2043
2044 if (s->state == PA_SOURCE_SUSPENDED)
2045 return 0;
2046
2047 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2048
2049 return usec;
2050 }
2051
2052 /* Called from IO thread */
2053 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2054 pa_source_output *o;
2055 void *state = NULL;
2056
2057 pa_source_assert_ref(s);
2058 pa_source_assert_io_context(s);
2059
2060 if (max_rewind == s->thread_info.max_rewind)
2061 return;
2062
2063 s->thread_info.max_rewind = max_rewind;
2064
2065 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2066 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2067 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2068 }
2069
2070 /* Called from main thread */
2071 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2072 pa_source_assert_ref(s);
2073 pa_assert_ctl_context();
2074
2075 if (PA_SOURCE_IS_LINKED(s->state))
2076 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2077 else
2078 pa_source_set_max_rewind_within_thread(s, max_rewind);
2079 }
2080
2081 /* Called from IO thread */
2082 void pa_source_invalidate_requested_latency(pa_source *s, pa_bool_t dynamic) {
2083 pa_source_output *o;
2084 void *state = NULL;
2085
2086 pa_source_assert_ref(s);
2087 pa_source_assert_io_context(s);
2088
2089 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2090 s->thread_info.requested_latency_valid = FALSE;
2091 else if (dynamic)
2092 return;
2093
2094 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2095
2096 if (s->update_requested_latency)
2097 s->update_requested_latency(s);
2098
2099 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2100 if (o->update_source_requested_latency)
2101 o->update_source_requested_latency(o);
2102 }
2103
2104 if (s->monitor_of)
2105 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2106 }
2107
2108 /* Called from main thread */
2109 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2110 pa_source_assert_ref(s);
2111 pa_assert_ctl_context();
2112
2113 /* min_latency == 0: no limit
2114 * min_latency anything else: specified limit
2115 *
2116 * Similar for max_latency */
2117
2118 if (min_latency < ABSOLUTE_MIN_LATENCY)
2119 min_latency = ABSOLUTE_MIN_LATENCY;
2120
2121 if (max_latency <= 0 ||
2122 max_latency > ABSOLUTE_MAX_LATENCY)
2123 max_latency = ABSOLUTE_MAX_LATENCY;
2124
2125 pa_assert(min_latency <= max_latency);
2126
2127 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2128 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2129 max_latency == ABSOLUTE_MAX_LATENCY) ||
2130 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2131
2132 if (PA_SOURCE_IS_LINKED(s->state)) {
2133 pa_usec_t r[2];
2134
2135 r[0] = min_latency;
2136 r[1] = max_latency;
2137
2138 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2139 } else
2140 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2141 }
2142
2143 /* Called from main thread */
2144 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2145 pa_source_assert_ref(s);
2146 pa_assert_ctl_context();
2147 pa_assert(min_latency);
2148 pa_assert(max_latency);
2149
2150 if (PA_SOURCE_IS_LINKED(s->state)) {
2151 pa_usec_t r[2] = { 0, 0 };
2152
2153 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2154
2155 *min_latency = r[0];
2156 *max_latency = r[1];
2157 } else {
2158 *min_latency = s->thread_info.min_latency;
2159 *max_latency = s->thread_info.max_latency;
2160 }
2161 }
2162
2163 /* Called from IO thread, and from main thread before pa_source_put() is called */
2164 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2165 pa_source_assert_ref(s);
2166 pa_source_assert_io_context(s);
2167
2168 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2169 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2170 pa_assert(min_latency <= max_latency);
2171
2172 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2173 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2174 max_latency == ABSOLUTE_MAX_LATENCY) ||
2175 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2176 s->monitor_of);
2177
2178 if (s->thread_info.min_latency == min_latency &&
2179 s->thread_info.max_latency == max_latency)
2180 return;
2181
2182 s->thread_info.min_latency = min_latency;
2183 s->thread_info.max_latency = max_latency;
2184
2185 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2186 pa_source_output *o;
2187 void *state = NULL;
2188
2189 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2190 if (o->update_source_latency_range)
2191 o->update_source_latency_range(o);
2192 }
2193
2194 pa_source_invalidate_requested_latency(s, FALSE);
2195 }
2196
2197 /* Called from main thread, before the source is put */
2198 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2199 pa_source_assert_ref(s);
2200 pa_assert_ctl_context();
2201
2202 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2203 pa_assert(latency == 0);
2204 return;
2205 }
2206
2207 if (latency < ABSOLUTE_MIN_LATENCY)
2208 latency = ABSOLUTE_MIN_LATENCY;
2209
2210 if (latency > ABSOLUTE_MAX_LATENCY)
2211 latency = ABSOLUTE_MAX_LATENCY;
2212
2213 if (PA_SOURCE_IS_LINKED(s->state))
2214 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2215 else
2216 s->thread_info.fixed_latency = latency;
2217 }
2218
2219 /* Called from main thread */
2220 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2221 pa_usec_t latency;
2222
2223 pa_source_assert_ref(s);
2224 pa_assert_ctl_context();
2225
2226 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2227 return 0;
2228
2229 if (PA_SOURCE_IS_LINKED(s->state))
2230 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2231 else
2232 latency = s->thread_info.fixed_latency;
2233
2234 return latency;
2235 }
2236
2237 /* Called from IO thread */
2238 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2239 pa_source_assert_ref(s);
2240 pa_source_assert_io_context(s);
2241
2242 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2243 pa_assert(latency == 0);
2244 return;
2245 }
2246
2247 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2248 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2249
2250 if (s->thread_info.fixed_latency == latency)
2251 return;
2252
2253 s->thread_info.fixed_latency = latency;
2254
2255 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2256 pa_source_output *o;
2257 void *state = NULL;
2258
2259 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2260 if (o->update_source_fixed_latency)
2261 o->update_source_fixed_latency(o);
2262 }
2263
2264 pa_source_invalidate_requested_latency(s, FALSE);
2265 }
2266
2267 /* Called from main thread */
2268 size_t pa_source_get_max_rewind(pa_source *s) {
2269 size_t r;
2270 pa_assert_ctl_context();
2271 pa_source_assert_ref(s);
2272
2273 if (!PA_SOURCE_IS_LINKED(s->state))
2274 return s->thread_info.max_rewind;
2275
2276 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2277
2278 return r;
2279 }
2280
2281 /* Called from main context */
2282 int pa_source_set_port(pa_source *s, const char *name, pa_bool_t save) {
2283 pa_device_port *port;
2284 int ret;
2285
2286 pa_source_assert_ref(s);
2287 pa_assert_ctl_context();
2288
2289 if (!s->set_port) {
2290 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2291 return -PA_ERR_NOTIMPLEMENTED;
2292 }
2293
2294 if (!s->ports)
2295 return -PA_ERR_NOENTITY;
2296
2297 if (!(port = pa_hashmap_get(s->ports, name)))
2298 return -PA_ERR_NOENTITY;
2299
2300 if (s->active_port == port) {
2301 s->save_port = s->save_port || save;
2302 return 0;
2303 }
2304
2305 if (s->flags & PA_SOURCE_SYNC_VOLUME) {
2306 struct source_message_set_port msg = { .port = port, .ret = 0 };
2307 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2308 ret = msg.ret;
2309 }
2310 else
2311 ret = s->set_port(s, port);
2312
2313 if (ret < 0)
2314 return -PA_ERR_NOENTITY;
2315
2316 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2317
2318 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2319
2320 s->active_port = port;
2321 s->save_port = save;
2322
2323 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2324
2325 return 0;
2326 }
2327
2328 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2329
2330 /* Called from the IO thread. */
2331 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2332 pa_source_volume_change *c;
2333 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2334 c = pa_xnew(pa_source_volume_change, 1);
2335
2336 PA_LLIST_INIT(pa_source_volume_change, c);
2337 c->at = 0;
2338 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2339 return c;
2340 }
2341
2342 /* Called from the IO thread. */
2343 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2344 pa_assert(c);
2345 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2346 pa_xfree(c);
2347 }
2348
2349 /* Called from the IO thread. */
2350 void pa_source_volume_change_push(pa_source *s) {
2351 pa_source_volume_change *c = NULL;
2352 pa_source_volume_change *nc = NULL;
2353 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2354
2355 const char *direction = NULL;
2356
2357 pa_assert(s);
2358 nc = pa_source_volume_change_new(s);
2359
2360 /* NOTE: There is already more different volumes in pa_source that I can remember.
2361 * Adding one more volume for HW would get us rid of this, but I am trying
2362 * to survive with the ones we already have. */
2363 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2364
2365 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2366 pa_log_debug("Volume not changing");
2367 pa_source_volume_change_free(nc);
2368 return;
2369 }
2370
2371 nc->at = pa_source_get_latency_within_thread(s);
2372 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2373
2374 if (s->thread_info.volume_changes_tail) {
2375 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2376 /* If volume is going up let's do it a bit late. If it is going
2377 * down let's do it a bit early. */
2378 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2379 if (nc->at + safety_margin > c->at) {
2380 nc->at += safety_margin;
2381 direction = "up";
2382 break;
2383 }
2384 }
2385 else if (nc->at - safety_margin > c->at) {
2386 nc->at -= safety_margin;
2387 direction = "down";
2388 break;
2389 }
2390 }
2391 }
2392
2393 if (c == NULL) {
2394 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2395 nc->at += safety_margin;
2396 direction = "up";
2397 } else {
2398 nc->at -= safety_margin;
2399 direction = "down";
2400 }
2401 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2402 }
2403 else {
2404 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2405 }
2406
2407 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2408
2409 /* We can ignore volume events that came earlier but should happen later than this. */
2410 PA_LLIST_FOREACH(c, nc->next) {
2411 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2412 pa_source_volume_change_free(c);
2413 }
2414 nc->next = NULL;
2415 s->thread_info.volume_changes_tail = nc;
2416 }
2417
2418 /* Called from the IO thread. */
2419 static void pa_source_volume_change_flush(pa_source *s) {
2420 pa_source_volume_change *c = s->thread_info.volume_changes;
2421 pa_assert(s);
2422 s->thread_info.volume_changes = NULL;
2423 s->thread_info.volume_changes_tail = NULL;
2424 while (c) {
2425 pa_source_volume_change *next = c->next;
2426 pa_source_volume_change_free(c);
2427 c = next;
2428 }
2429 }
2430
2431 /* Called from the IO thread. */
2432 pa_bool_t pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2433 pa_usec_t now = pa_rtclock_now();
2434 pa_bool_t ret = FALSE;
2435
2436 pa_assert(s);
2437 pa_assert(s->write_volume);
2438
2439 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2440 pa_source_volume_change *c = s->thread_info.volume_changes;
2441 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2442 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2443 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2444 ret = TRUE;
2445 s->thread_info.current_hw_volume = c->hw_volume;
2446 pa_source_volume_change_free(c);
2447 }
2448
2449 if (s->write_volume && ret)
2450 s->write_volume(s);
2451
2452 if (s->thread_info.volume_changes) {
2453 if (usec_to_next)
2454 *usec_to_next = s->thread_info.volume_changes->at - now;
2455 if (pa_log_ratelimit(PA_LOG_DEBUG))
2456 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2457 }
2458 else {
2459 if (usec_to_next)
2460 *usec_to_next = 0;
2461 s->thread_info.volume_changes_tail = NULL;
2462 }
2463 return ret;
2464 }
2465
2466
2467 /* Called from the main thread */
2468 /* Gets the list of formats supported by the source. The members and idxset must
2469 * be freed by the caller. */
2470 pa_idxset* pa_source_get_formats(pa_source *s) {
2471 pa_idxset *ret;
2472
2473 pa_assert(s);
2474
2475 if (s->get_formats) {
2476 /* Source supports format query, all is good */
2477 ret = s->get_formats(s);
2478 } else {
2479 /* Source doesn't support format query, so assume it does PCM */
2480 pa_format_info *f = pa_format_info_new();
2481 f->encoding = PA_ENCODING_PCM;
2482
2483 ret = pa_idxset_new(NULL, NULL);
2484 pa_idxset_put(ret, f, NULL);
2485 }
2486
2487 return ret;
2488 }
2489
2490 /* Called from the main thread */
2491 /* Checks if the source can accept this format */
2492 pa_bool_t pa_source_check_format(pa_source *s, pa_format_info *f)
2493 {
2494 pa_idxset *formats = NULL;
2495 pa_bool_t ret = FALSE;
2496
2497 pa_assert(s);
2498 pa_assert(f);
2499
2500 formats = pa_source_get_formats(s);
2501
2502 if (formats) {
2503 pa_format_info *finfo_device;
2504 uint32_t i;
2505
2506 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2507 if (pa_format_info_is_compatible(finfo_device, f)) {
2508 ret = TRUE;
2509 break;
2510 }
2511 }
2512
2513 pa_idxset_free(formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2514 }
2515
2516 return ret;
2517 }
2518
2519 /* Called from the main thread */
2520 /* Calculates the intersection between formats supported by the source and
2521 * in_formats, and returns these, in the order of the source's formats. */
2522 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2523 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2524 pa_format_info *f_source, *f_in;
2525 uint32_t i, j;
2526
2527 pa_assert(s);
2528
2529 if (!in_formats || pa_idxset_isempty(in_formats))
2530 goto done;
2531
2532 source_formats = pa_source_get_formats(s);
2533
2534 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2535 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2536 if (pa_format_info_is_compatible(f_source, f_in))
2537 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2538 }
2539 }
2540
2541 done:
2542 if (source_formats)
2543 pa_idxset_free(source_formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2544
2545 return out_formats;
2546 }