]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
f7e4b6a273b8ca8399b803e082bee55d432f5a0e
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37 #include <pulse/rtclock.h>
38 #include <pulse/internal.h>
39
40 #include <pulsecore/sink-input.h>
41 #include <pulsecore/namereg.h>
42 #include <pulsecore/core-util.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/core-subscribe.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/play-memblockq.h>
48 #include <pulsecore/flist.h>
49
50 #include "sink.h"
51
52 #define MAX_MIX_CHANNELS 32
53 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
54 #define ABSOLUTE_MIN_LATENCY (500)
55 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
56 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
57
58 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
59
60 struct pa_sink_volume_change {
61 pa_usec_t at;
62 pa_cvolume hw_volume;
63
64 PA_LLIST_FIELDS(pa_sink_volume_change);
65 };
66
67 struct sink_message_set_port {
68 pa_device_port *port;
69 int ret;
70 };
71
72 static void sink_free(pa_object *s);
73
74 static void pa_sink_volume_change_push(pa_sink *s);
75 static void pa_sink_volume_change_flush(pa_sink *s);
76 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
77
78 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
79 pa_assert(data);
80
81 pa_zero(*data);
82 data->proplist = pa_proplist_new();
83
84 return data;
85 }
86
87 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
88 pa_assert(data);
89
90 pa_xfree(data->name);
91 data->name = pa_xstrdup(name);
92 }
93
94 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
95 pa_assert(data);
96
97 if ((data->sample_spec_is_set = !!spec))
98 data->sample_spec = *spec;
99 }
100
101 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
102 pa_assert(data);
103
104 if ((data->channel_map_is_set = !!map))
105 data->channel_map = *map;
106 }
107
108 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
109 pa_assert(data);
110
111 if ((data->volume_is_set = !!volume))
112 data->volume = *volume;
113 }
114
115 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
116 pa_assert(data);
117
118 data->muted_is_set = TRUE;
119 data->muted = !!mute;
120 }
121
122 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
123 pa_assert(data);
124
125 pa_xfree(data->active_port);
126 data->active_port = pa_xstrdup(port);
127 }
128
129 void pa_sink_new_data_done(pa_sink_new_data *data) {
130 pa_assert(data);
131
132 pa_proplist_free(data->proplist);
133
134 if (data->ports) {
135 pa_device_port *p;
136
137 while ((p = pa_hashmap_steal_first(data->ports)))
138 pa_device_port_free(p);
139
140 pa_hashmap_free(data->ports, NULL, NULL);
141 }
142
143 pa_xfree(data->name);
144 pa_xfree(data->active_port);
145 }
146
147 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
148 pa_device_port *p;
149
150 pa_assert(name);
151
152 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
153 p->name = pa_xstrdup(name);
154 p->description = pa_xstrdup(description);
155
156 p->priority = 0;
157
158 return p;
159 }
160
161 void pa_device_port_free(pa_device_port *p) {
162 pa_assert(p);
163
164 pa_xfree(p->name);
165 pa_xfree(p->description);
166 pa_xfree(p);
167 }
168
169 /* Called from main context */
170 static void reset_callbacks(pa_sink *s) {
171 pa_assert(s);
172
173 s->set_state = NULL;
174 s->get_volume = NULL;
175 s->set_volume = NULL;
176 s->get_mute = NULL;
177 s->set_mute = NULL;
178 s->request_rewind = NULL;
179 s->update_requested_latency = NULL;
180 s->set_port = NULL;
181 s->get_formats = NULL;
182 }
183
184 /* Called from main context */
185 pa_sink* pa_sink_new(
186 pa_core *core,
187 pa_sink_new_data *data,
188 pa_sink_flags_t flags) {
189
190 pa_sink *s;
191 const char *name;
192 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
193 pa_source_new_data source_data;
194 const char *dn;
195 char *pt;
196
197 pa_assert(core);
198 pa_assert(data);
199 pa_assert(data->name);
200 pa_assert_ctl_context();
201
202 s = pa_msgobject_new(pa_sink);
203
204 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
205 pa_log_debug("Failed to register name %s.", data->name);
206 pa_xfree(s);
207 return NULL;
208 }
209
210 pa_sink_new_data_set_name(data, name);
211
212 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
213 pa_xfree(s);
214 pa_namereg_unregister(core, name);
215 return NULL;
216 }
217
218 /* FIXME, need to free s here on failure */
219
220 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
221 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
222
223 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
224
225 if (!data->channel_map_is_set)
226 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
227
228 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
229 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
230
231 /* FIXME: There should probably be a general function for checking whether
232 * the sink volume is allowed to be set, like there is for sink inputs. */
233 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
234
235 if (!data->volume_is_set) {
236 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
237 data->save_volume = FALSE;
238 }
239
240 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
241 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
242
243 if (!data->muted_is_set)
244 data->muted = FALSE;
245
246 if (data->card)
247 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
248
249 pa_device_init_description(data->proplist);
250 pa_device_init_icon(data->proplist, TRUE);
251 pa_device_init_intended_roles(data->proplist);
252
253 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
254 pa_xfree(s);
255 pa_namereg_unregister(core, name);
256 return NULL;
257 }
258
259 s->parent.parent.free = sink_free;
260 s->parent.process_msg = pa_sink_process_msg;
261
262 s->core = core;
263 s->state = PA_SINK_INIT;
264 s->flags = flags;
265 s->priority = 0;
266 s->suspend_cause = 0;
267 s->name = pa_xstrdup(name);
268 s->proplist = pa_proplist_copy(data->proplist);
269 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
270 s->module = data->module;
271 s->card = data->card;
272
273 s->priority = pa_device_init_priority(s->proplist);
274
275 s->sample_spec = data->sample_spec;
276 s->channel_map = data->channel_map;
277
278 s->inputs = pa_idxset_new(NULL, NULL);
279 s->n_corked = 0;
280 s->input_to_master = NULL;
281
282 s->reference_volume = s->real_volume = data->volume;
283 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
284 s->base_volume = PA_VOLUME_NORM;
285 s->n_volume_steps = PA_VOLUME_NORM+1;
286 s->muted = data->muted;
287 s->refresh_volume = s->refresh_muted = FALSE;
288
289 reset_callbacks(s);
290 s->userdata = NULL;
291
292 s->asyncmsgq = NULL;
293
294 /* As a minor optimization we just steal the list instead of
295 * copying it here */
296 s->ports = data->ports;
297 data->ports = NULL;
298
299 s->active_port = NULL;
300 s->save_port = FALSE;
301
302 if (data->active_port && s->ports)
303 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
304 s->save_port = data->save_port;
305
306 if (!s->active_port && s->ports) {
307 void *state;
308 pa_device_port *p;
309
310 PA_HASHMAP_FOREACH(p, s->ports, state)
311 if (!s->active_port || p->priority > s->active_port->priority)
312 s->active_port = p;
313 }
314
315 s->save_volume = data->save_volume;
316 s->save_muted = data->save_muted;
317
318 pa_silence_memchunk_get(
319 &core->silence_cache,
320 core->mempool,
321 &s->silence,
322 &s->sample_spec,
323 0);
324
325 s->thread_info.rtpoll = NULL;
326 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
327 s->thread_info.soft_volume = s->soft_volume;
328 s->thread_info.soft_muted = s->muted;
329 s->thread_info.state = s->state;
330 s->thread_info.rewind_nbytes = 0;
331 s->thread_info.rewind_requested = FALSE;
332 s->thread_info.max_rewind = 0;
333 s->thread_info.max_request = 0;
334 s->thread_info.requested_latency_valid = FALSE;
335 s->thread_info.requested_latency = 0;
336 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
337 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
338 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
339
340 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
341 s->thread_info.volume_changes_tail = NULL;
342 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
343 s->thread_info.volume_change_safety_margin = core->sync_volume_safety_margin_usec;
344 s->thread_info.volume_change_extra_delay = core->sync_volume_extra_delay_usec;
345
346 /* FIXME: This should probably be moved to pa_sink_put() */
347 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
348
349 if (s->card)
350 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
351
352 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
353 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
354 s->index,
355 s->name,
356 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
357 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
358 pt);
359 pa_xfree(pt);
360
361 pa_source_new_data_init(&source_data);
362 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
363 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
364 source_data.name = pa_sprintf_malloc("%s.monitor", name);
365 source_data.driver = data->driver;
366 source_data.module = data->module;
367 source_data.card = data->card;
368
369 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
370 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
371 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
372
373 s->monitor_source = pa_source_new(core, &source_data,
374 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
375 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
376
377 pa_source_new_data_done(&source_data);
378
379 if (!s->monitor_source) {
380 pa_sink_unlink(s);
381 pa_sink_unref(s);
382 return NULL;
383 }
384
385 s->monitor_source->monitor_of = s;
386
387 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
388 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
389 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
390
391 return s;
392 }
393
394 /* Called from main context */
395 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
396 int ret;
397 pa_bool_t suspend_change;
398 pa_sink_state_t original_state;
399
400 pa_assert(s);
401 pa_assert_ctl_context();
402
403 if (s->state == state)
404 return 0;
405
406 original_state = s->state;
407
408 suspend_change =
409 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
410 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
411
412 if (s->set_state)
413 if ((ret = s->set_state(s, state)) < 0)
414 return ret;
415
416 if (s->asyncmsgq)
417 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
418
419 if (s->set_state)
420 s->set_state(s, original_state);
421
422 return ret;
423 }
424
425 s->state = state;
426
427 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
428 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
429 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
430 }
431
432 if (suspend_change) {
433 pa_sink_input *i;
434 uint32_t idx;
435
436 /* We're suspending or resuming, tell everyone about it */
437
438 PA_IDXSET_FOREACH(i, s->inputs, idx)
439 if (s->state == PA_SINK_SUSPENDED &&
440 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
441 pa_sink_input_kill(i);
442 else if (i->suspend)
443 i->suspend(i, state == PA_SINK_SUSPENDED);
444
445 if (s->monitor_source)
446 pa_source_sync_suspend(s->monitor_source);
447 }
448
449 return 0;
450 }
451
452 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
453 pa_assert(s);
454
455 s->get_volume = cb;
456 }
457
458 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
459 pa_assert(s);
460
461 s->set_volume = cb;
462 }
463
464 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
465 pa_assert(s);
466
467 s->write_volume = cb;
468 }
469
470 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
471 pa_assert(s);
472
473 s->get_mute = cb;
474 }
475
476 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
477 pa_assert(s);
478
479 s->set_mute = cb;
480 }
481
482 /* Called from main context */
483 void pa_sink_put(pa_sink* s) {
484 pa_sink_assert_ref(s);
485 pa_assert_ctl_context();
486
487 pa_assert(s->state == PA_SINK_INIT);
488 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || s->input_to_master);
489
490 /* The following fields must be initialized properly when calling _put() */
491 pa_assert(s->asyncmsgq);
492 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
493
494 /* Generally, flags should be initialized via pa_sink_new(). As a
495 * special exception we allow volume related flags to be set
496 * between _new() and _put(). */
497
498 /* XXX: Currently decibel volume is disabled for all sinks that use volume
499 * sharing. When the master sink supports decibel volume, it would be good
500 * to have the flag also in the filter sink, but currently we don't do that
501 * so that the flags of the filter sink never change when it's moved from
502 * a master sink to another. One solution for this problem would be to
503 * remove user-visible volume altogether from filter sinks when volume
504 * sharing is used, but the current approach was easier to implement... */
505 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
506 s->flags |= PA_SINK_DECIBEL_VOLUME;
507
508 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
509 s->flags |= PA_SINK_FLAT_VOLUME;
510
511 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
512 pa_sink *root_sink = s->input_to_master->sink;
513
514 while (root_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
515 root_sink = root_sink->input_to_master->sink;
516
517 s->reference_volume = root_sink->reference_volume;
518 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
519
520 s->real_volume = root_sink->real_volume;
521 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
522 } else
523 /* We assume that if the sink implementor changed the default
524 * volume he did so in real_volume, because that is the usual
525 * place where he is supposed to place his changes. */
526 s->reference_volume = s->real_volume;
527
528 s->thread_info.soft_volume = s->soft_volume;
529 s->thread_info.soft_muted = s->muted;
530 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
531
532 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
533 || (s->base_volume == PA_VOLUME_NORM
534 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
535 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
536 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
537 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
538 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
539 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
540 pa_assert(!(s->flags & PA_SINK_SYNC_VOLUME) || (s->flags & PA_SINK_HW_VOLUME_CTRL));
541 pa_assert(!(s->flags & PA_SINK_SYNC_VOLUME) || s->write_volume);
542 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
543
544 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
545 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
546 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
547
548 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
549
550 pa_source_put(s->monitor_source);
551
552 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
553 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
554 }
555
556 /* Called from main context */
557 void pa_sink_unlink(pa_sink* s) {
558 pa_bool_t linked;
559 pa_sink_input *i, *j = NULL;
560
561 pa_assert(s);
562 pa_assert_ctl_context();
563
564 /* Please note that pa_sink_unlink() does more than simply
565 * reversing pa_sink_put(). It also undoes the registrations
566 * already done in pa_sink_new()! */
567
568 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
569 * may be called multiple times on the same sink without bad
570 * effects. */
571
572 linked = PA_SINK_IS_LINKED(s->state);
573
574 if (linked)
575 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
576
577 if (s->state != PA_SINK_UNLINKED)
578 pa_namereg_unregister(s->core, s->name);
579 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
580
581 if (s->card)
582 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
583
584 while ((i = pa_idxset_first(s->inputs, NULL))) {
585 pa_assert(i != j);
586 pa_sink_input_kill(i);
587 j = i;
588 }
589
590 if (linked)
591 sink_set_state(s, PA_SINK_UNLINKED);
592 else
593 s->state = PA_SINK_UNLINKED;
594
595 reset_callbacks(s);
596
597 if (s->monitor_source)
598 pa_source_unlink(s->monitor_source);
599
600 if (linked) {
601 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
602 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
603 }
604 }
605
606 /* Called from main context */
607 static void sink_free(pa_object *o) {
608 pa_sink *s = PA_SINK(o);
609 pa_sink_input *i;
610
611 pa_assert(s);
612 pa_assert_ctl_context();
613 pa_assert(pa_sink_refcnt(s) == 0);
614
615 if (PA_SINK_IS_LINKED(s->state))
616 pa_sink_unlink(s);
617
618 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
619
620 if (s->monitor_source) {
621 pa_source_unref(s->monitor_source);
622 s->monitor_source = NULL;
623 }
624
625 pa_idxset_free(s->inputs, NULL, NULL);
626
627 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
628 pa_sink_input_unref(i);
629
630 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
631
632 if (s->silence.memblock)
633 pa_memblock_unref(s->silence.memblock);
634
635 pa_xfree(s->name);
636 pa_xfree(s->driver);
637
638 if (s->proplist)
639 pa_proplist_free(s->proplist);
640
641 if (s->ports) {
642 pa_device_port *p;
643
644 while ((p = pa_hashmap_steal_first(s->ports)))
645 pa_device_port_free(p);
646
647 pa_hashmap_free(s->ports, NULL, NULL);
648 }
649
650 pa_xfree(s);
651 }
652
653 /* Called from main context, and not while the IO thread is active, please */
654 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
655 pa_sink_assert_ref(s);
656 pa_assert_ctl_context();
657
658 s->asyncmsgq = q;
659
660 if (s->monitor_source)
661 pa_source_set_asyncmsgq(s->monitor_source, q);
662 }
663
664 /* Called from main context, and not while the IO thread is active, please */
665 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
666 pa_sink_assert_ref(s);
667 pa_assert_ctl_context();
668
669 if (mask == 0)
670 return;
671
672 /* For now, allow only a minimal set of flags to be changed. */
673 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
674
675 s->flags = (s->flags & ~mask) | (value & mask);
676
677 pa_source_update_flags(s->monitor_source,
678 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
679 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
680 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
681 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
682 }
683
684 /* Called from IO context, or before _put() from main context */
685 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
686 pa_sink_assert_ref(s);
687 pa_sink_assert_io_context(s);
688
689 s->thread_info.rtpoll = p;
690
691 if (s->monitor_source)
692 pa_source_set_rtpoll(s->monitor_source, p);
693 }
694
695 /* Called from main context */
696 int pa_sink_update_status(pa_sink*s) {
697 pa_sink_assert_ref(s);
698 pa_assert_ctl_context();
699 pa_assert(PA_SINK_IS_LINKED(s->state));
700
701 if (s->state == PA_SINK_SUSPENDED)
702 return 0;
703
704 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
705 }
706
707 /* Called from main context */
708 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
709 pa_sink_assert_ref(s);
710 pa_assert_ctl_context();
711 pa_assert(PA_SINK_IS_LINKED(s->state));
712 pa_assert(cause != 0);
713
714 if (suspend) {
715 s->suspend_cause |= cause;
716 s->monitor_source->suspend_cause |= cause;
717 } else {
718 s->suspend_cause &= ~cause;
719 s->monitor_source->suspend_cause &= ~cause;
720 }
721
722 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
723 return 0;
724
725 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
726
727 if (s->suspend_cause)
728 return sink_set_state(s, PA_SINK_SUSPENDED);
729 else
730 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
731 }
732
733 /* Called from main context */
734 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
735 pa_sink_input *i, *n;
736 uint32_t idx;
737
738 pa_sink_assert_ref(s);
739 pa_assert_ctl_context();
740 pa_assert(PA_SINK_IS_LINKED(s->state));
741
742 if (!q)
743 q = pa_queue_new();
744
745 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
746 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
747
748 pa_sink_input_ref(i);
749
750 if (pa_sink_input_start_move(i) >= 0)
751 pa_queue_push(q, i);
752 else
753 pa_sink_input_unref(i);
754 }
755
756 return q;
757 }
758
759 /* Called from main context */
760 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
761 pa_sink_input *i;
762
763 pa_sink_assert_ref(s);
764 pa_assert_ctl_context();
765 pa_assert(PA_SINK_IS_LINKED(s->state));
766 pa_assert(q);
767
768 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
769 if (pa_sink_input_finish_move(i, s, save) < 0)
770 pa_sink_input_fail_move(i);
771
772 pa_sink_input_unref(i);
773 }
774
775 pa_queue_free(q, NULL, NULL);
776 }
777
778 /* Called from main context */
779 void pa_sink_move_all_fail(pa_queue *q) {
780 pa_sink_input *i;
781
782 pa_assert_ctl_context();
783 pa_assert(q);
784
785 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
786 pa_sink_input_fail_move(i);
787 pa_sink_input_unref(i);
788 }
789
790 pa_queue_free(q, NULL, NULL);
791 }
792
793 /* Called from IO thread context */
794 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
795 pa_sink_input *i;
796 void *state = NULL;
797
798 pa_sink_assert_ref(s);
799 pa_sink_assert_io_context(s);
800 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
801
802 /* If nobody requested this and this is actually no real rewind
803 * then we can short cut this. Please note that this means that
804 * not all rewind requests triggered upstream will always be
805 * translated in actual requests! */
806 if (!s->thread_info.rewind_requested && nbytes <= 0)
807 return;
808
809 s->thread_info.rewind_nbytes = 0;
810 s->thread_info.rewind_requested = FALSE;
811
812 if (s->thread_info.state == PA_SINK_SUSPENDED)
813 return;
814
815 if (nbytes > 0) {
816 pa_log_debug("Processing rewind...");
817 if (s->flags & PA_SINK_SYNC_VOLUME)
818 pa_sink_volume_change_rewind(s, nbytes);
819 }
820
821 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
822 pa_sink_input_assert_ref(i);
823 pa_sink_input_process_rewind(i, nbytes);
824 }
825
826 if (nbytes > 0) {
827 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
828 pa_source_process_rewind(s->monitor_source, nbytes);
829 }
830 }
831
832 /* Called from IO thread context */
833 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
834 pa_sink_input *i;
835 unsigned n = 0;
836 void *state = NULL;
837 size_t mixlength = *length;
838
839 pa_sink_assert_ref(s);
840 pa_sink_assert_io_context(s);
841 pa_assert(info);
842
843 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
844 pa_sink_input_assert_ref(i);
845
846 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
847
848 if (mixlength == 0 || info->chunk.length < mixlength)
849 mixlength = info->chunk.length;
850
851 if (pa_memblock_is_silence(info->chunk.memblock)) {
852 pa_memblock_unref(info->chunk.memblock);
853 continue;
854 }
855
856 info->userdata = pa_sink_input_ref(i);
857
858 pa_assert(info->chunk.memblock);
859 pa_assert(info->chunk.length > 0);
860
861 info++;
862 n++;
863 maxinfo--;
864 }
865
866 if (mixlength > 0)
867 *length = mixlength;
868
869 return n;
870 }
871
872 /* Called from IO thread context */
873 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
874 pa_sink_input *i;
875 void *state;
876 unsigned p = 0;
877 unsigned n_unreffed = 0;
878
879 pa_sink_assert_ref(s);
880 pa_sink_assert_io_context(s);
881 pa_assert(result);
882 pa_assert(result->memblock);
883 pa_assert(result->length > 0);
884
885 /* We optimize for the case where the order of the inputs has not changed */
886
887 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
888 unsigned j;
889 pa_mix_info* m = NULL;
890
891 pa_sink_input_assert_ref(i);
892
893 /* Let's try to find the matching entry info the pa_mix_info array */
894 for (j = 0; j < n; j ++) {
895
896 if (info[p].userdata == i) {
897 m = info + p;
898 break;
899 }
900
901 p++;
902 if (p >= n)
903 p = 0;
904 }
905
906 /* Drop read data */
907 pa_sink_input_drop(i, result->length);
908
909 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
910
911 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
912 void *ostate = NULL;
913 pa_source_output *o;
914 pa_memchunk c;
915
916 if (m && m->chunk.memblock) {
917 c = m->chunk;
918 pa_memblock_ref(c.memblock);
919 pa_assert(result->length <= c.length);
920 c.length = result->length;
921
922 pa_memchunk_make_writable(&c, 0);
923 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
924 } else {
925 c = s->silence;
926 pa_memblock_ref(c.memblock);
927 pa_assert(result->length <= c.length);
928 c.length = result->length;
929 }
930
931 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
932 pa_source_output_assert_ref(o);
933 pa_assert(o->direct_on_input == i);
934 pa_source_post_direct(s->monitor_source, o, &c);
935 }
936
937 pa_memblock_unref(c.memblock);
938 }
939 }
940
941 if (m) {
942 if (m->chunk.memblock)
943 pa_memblock_unref(m->chunk.memblock);
944 pa_memchunk_reset(&m->chunk);
945
946 pa_sink_input_unref(m->userdata);
947 m->userdata = NULL;
948
949 n_unreffed += 1;
950 }
951 }
952
953 /* Now drop references to entries that are included in the
954 * pa_mix_info array but don't exist anymore */
955
956 if (n_unreffed < n) {
957 for (; n > 0; info++, n--) {
958 if (info->userdata)
959 pa_sink_input_unref(info->userdata);
960 if (info->chunk.memblock)
961 pa_memblock_unref(info->chunk.memblock);
962 }
963 }
964
965 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
966 pa_source_post(s->monitor_source, result);
967 }
968
969 /* Called from IO thread context */
970 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
971 pa_mix_info info[MAX_MIX_CHANNELS];
972 unsigned n;
973 size_t block_size_max;
974
975 pa_sink_assert_ref(s);
976 pa_sink_assert_io_context(s);
977 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
978 pa_assert(pa_frame_aligned(length, &s->sample_spec));
979 pa_assert(result);
980
981 pa_assert(!s->thread_info.rewind_requested);
982 pa_assert(s->thread_info.rewind_nbytes == 0);
983
984 if (s->thread_info.state == PA_SINK_SUSPENDED) {
985 result->memblock = pa_memblock_ref(s->silence.memblock);
986 result->index = s->silence.index;
987 result->length = PA_MIN(s->silence.length, length);
988 return;
989 }
990
991 pa_sink_ref(s);
992
993 if (length <= 0)
994 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
995
996 block_size_max = pa_mempool_block_size_max(s->core->mempool);
997 if (length > block_size_max)
998 length = pa_frame_align(block_size_max, &s->sample_spec);
999
1000 pa_assert(length > 0);
1001
1002 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1003
1004 if (n == 0) {
1005
1006 *result = s->silence;
1007 pa_memblock_ref(result->memblock);
1008
1009 if (result->length > length)
1010 result->length = length;
1011
1012 } else if (n == 1) {
1013 pa_cvolume volume;
1014
1015 *result = info[0].chunk;
1016 pa_memblock_ref(result->memblock);
1017
1018 if (result->length > length)
1019 result->length = length;
1020
1021 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1022
1023 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1024 pa_memblock_unref(result->memblock);
1025 pa_silence_memchunk_get(&s->core->silence_cache,
1026 s->core->mempool,
1027 result,
1028 &s->sample_spec,
1029 result->length);
1030 } else if (!pa_cvolume_is_norm(&volume)) {
1031 pa_memchunk_make_writable(result, 0);
1032 pa_volume_memchunk(result, &s->sample_spec, &volume);
1033 }
1034 } else {
1035 void *ptr;
1036 result->memblock = pa_memblock_new(s->core->mempool, length);
1037
1038 ptr = pa_memblock_acquire(result->memblock);
1039 result->length = pa_mix(info, n,
1040 ptr, length,
1041 &s->sample_spec,
1042 &s->thread_info.soft_volume,
1043 s->thread_info.soft_muted);
1044 pa_memblock_release(result->memblock);
1045
1046 result->index = 0;
1047 }
1048
1049 inputs_drop(s, info, n, result);
1050
1051 pa_sink_unref(s);
1052 }
1053
1054 /* Called from IO thread context */
1055 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1056 pa_mix_info info[MAX_MIX_CHANNELS];
1057 unsigned n;
1058 size_t length, block_size_max;
1059
1060 pa_sink_assert_ref(s);
1061 pa_sink_assert_io_context(s);
1062 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1063 pa_assert(target);
1064 pa_assert(target->memblock);
1065 pa_assert(target->length > 0);
1066 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1067
1068 pa_assert(!s->thread_info.rewind_requested);
1069 pa_assert(s->thread_info.rewind_nbytes == 0);
1070
1071 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1072 pa_silence_memchunk(target, &s->sample_spec);
1073 return;
1074 }
1075
1076 pa_sink_ref(s);
1077
1078 length = target->length;
1079 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1080 if (length > block_size_max)
1081 length = pa_frame_align(block_size_max, &s->sample_spec);
1082
1083 pa_assert(length > 0);
1084
1085 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1086
1087 if (n == 0) {
1088 if (target->length > length)
1089 target->length = length;
1090
1091 pa_silence_memchunk(target, &s->sample_spec);
1092 } else if (n == 1) {
1093 pa_cvolume volume;
1094
1095 if (target->length > length)
1096 target->length = length;
1097
1098 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1099
1100 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1101 pa_silence_memchunk(target, &s->sample_spec);
1102 else {
1103 pa_memchunk vchunk;
1104
1105 vchunk = info[0].chunk;
1106 pa_memblock_ref(vchunk.memblock);
1107
1108 if (vchunk.length > length)
1109 vchunk.length = length;
1110
1111 if (!pa_cvolume_is_norm(&volume)) {
1112 pa_memchunk_make_writable(&vchunk, 0);
1113 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1114 }
1115
1116 pa_memchunk_memcpy(target, &vchunk);
1117 pa_memblock_unref(vchunk.memblock);
1118 }
1119
1120 } else {
1121 void *ptr;
1122
1123 ptr = pa_memblock_acquire(target->memblock);
1124
1125 target->length = pa_mix(info, n,
1126 (uint8_t*) ptr + target->index, length,
1127 &s->sample_spec,
1128 &s->thread_info.soft_volume,
1129 s->thread_info.soft_muted);
1130
1131 pa_memblock_release(target->memblock);
1132 }
1133
1134 inputs_drop(s, info, n, target);
1135
1136 pa_sink_unref(s);
1137 }
1138
1139 /* Called from IO thread context */
1140 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1141 pa_memchunk chunk;
1142 size_t l, d;
1143
1144 pa_sink_assert_ref(s);
1145 pa_sink_assert_io_context(s);
1146 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1147 pa_assert(target);
1148 pa_assert(target->memblock);
1149 pa_assert(target->length > 0);
1150 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1151
1152 pa_assert(!s->thread_info.rewind_requested);
1153 pa_assert(s->thread_info.rewind_nbytes == 0);
1154
1155 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1156 pa_silence_memchunk(target, &s->sample_spec);
1157 return;
1158 }
1159
1160 pa_sink_ref(s);
1161
1162 l = target->length;
1163 d = 0;
1164 while (l > 0) {
1165 chunk = *target;
1166 chunk.index += d;
1167 chunk.length -= d;
1168
1169 pa_sink_render_into(s, &chunk);
1170
1171 d += chunk.length;
1172 l -= chunk.length;
1173 }
1174
1175 pa_sink_unref(s);
1176 }
1177
1178 /* Called from IO thread context */
1179 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1180 pa_sink_assert_ref(s);
1181 pa_sink_assert_io_context(s);
1182 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1183 pa_assert(length > 0);
1184 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1185 pa_assert(result);
1186
1187 pa_assert(!s->thread_info.rewind_requested);
1188 pa_assert(s->thread_info.rewind_nbytes == 0);
1189
1190 pa_sink_ref(s);
1191
1192 pa_sink_render(s, length, result);
1193
1194 if (result->length < length) {
1195 pa_memchunk chunk;
1196
1197 pa_memchunk_make_writable(result, length);
1198
1199 chunk.memblock = result->memblock;
1200 chunk.index = result->index + result->length;
1201 chunk.length = length - result->length;
1202
1203 pa_sink_render_into_full(s, &chunk);
1204
1205 result->length = length;
1206 }
1207
1208 pa_sink_unref(s);
1209 }
1210
1211 /* Called from main thread */
1212 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1213 pa_usec_t usec = 0;
1214
1215 pa_sink_assert_ref(s);
1216 pa_assert_ctl_context();
1217 pa_assert(PA_SINK_IS_LINKED(s->state));
1218
1219 /* The returned value is supposed to be in the time domain of the sound card! */
1220
1221 if (s->state == PA_SINK_SUSPENDED)
1222 return 0;
1223
1224 if (!(s->flags & PA_SINK_LATENCY))
1225 return 0;
1226
1227 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1228
1229 return usec;
1230 }
1231
1232 /* Called from IO thread */
1233 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1234 pa_usec_t usec = 0;
1235 pa_msgobject *o;
1236
1237 pa_sink_assert_ref(s);
1238 pa_sink_assert_io_context(s);
1239 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1240
1241 /* The returned value is supposed to be in the time domain of the sound card! */
1242
1243 if (s->thread_info.state == PA_SINK_SUSPENDED)
1244 return 0;
1245
1246 if (!(s->flags & PA_SINK_LATENCY))
1247 return 0;
1248
1249 o = PA_MSGOBJECT(s);
1250
1251 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1252
1253 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1254 return -1;
1255
1256 return usec;
1257 }
1258
1259 /* Called from the main thread (and also from the IO thread while the main
1260 * thread is waiting).
1261 *
1262 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1263 * set. Instead, flat volume mode is detected by checking whether the root sink
1264 * has the flag set. */
1265 pa_bool_t pa_sink_flat_volume_enabled(pa_sink *s) {
1266 pa_sink_assert_ref(s);
1267
1268 while (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1269 s = s->input_to_master->sink;
1270
1271 return (s->flags & PA_SINK_FLAT_VOLUME);
1272 }
1273
1274 /* Called from main context */
1275 pa_bool_t pa_sink_is_passthrough(pa_sink *s) {
1276 pa_sink_input *alt_i;
1277 uint32_t idx;
1278
1279 pa_sink_assert_ref(s);
1280
1281 /* one and only one PASSTHROUGH input can possibly be connected */
1282 if (pa_idxset_size(s->inputs) == 1) {
1283 alt_i = pa_idxset_first(s->inputs, &idx);
1284
1285 if (pa_sink_input_is_passthrough(alt_i))
1286 return TRUE;
1287 }
1288
1289 return FALSE;
1290 }
1291
1292 /* Called from main context. */
1293 static void compute_reference_ratio(pa_sink_input *i) {
1294 unsigned c = 0;
1295 pa_cvolume remapped;
1296
1297 pa_assert(i);
1298 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1299
1300 /*
1301 * Calculates the reference ratio from the sink's reference
1302 * volume. This basically calculates:
1303 *
1304 * i->reference_ratio = i->volume / i->sink->reference_volume
1305 */
1306
1307 remapped = i->sink->reference_volume;
1308 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1309
1310 i->reference_ratio.channels = i->sample_spec.channels;
1311
1312 for (c = 0; c < i->sample_spec.channels; c++) {
1313
1314 /* We don't update when the sink volume is 0 anyway */
1315 if (remapped.values[c] <= PA_VOLUME_MUTED)
1316 continue;
1317
1318 /* Don't update the reference ratio unless necessary */
1319 if (pa_sw_volume_multiply(
1320 i->reference_ratio.values[c],
1321 remapped.values[c]) == i->volume.values[c])
1322 continue;
1323
1324 i->reference_ratio.values[c] = pa_sw_volume_divide(
1325 i->volume.values[c],
1326 remapped.values[c]);
1327 }
1328 }
1329
1330 /* Called from main context. Only called for the root sink in volume sharing
1331 * cases, except for internal recursive calls. */
1332 static void compute_reference_ratios(pa_sink *s) {
1333 uint32_t idx;
1334 pa_sink_input *i;
1335
1336 pa_sink_assert_ref(s);
1337 pa_assert_ctl_context();
1338 pa_assert(PA_SINK_IS_LINKED(s->state));
1339 pa_assert(pa_sink_flat_volume_enabled(s));
1340
1341 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1342 compute_reference_ratio(i);
1343
1344 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1345 compute_reference_ratios(i->origin_sink);
1346 }
1347 }
1348
1349 /* Called from main context. Only called for the root sink in volume sharing
1350 * cases, except for internal recursive calls. */
1351 static void compute_real_ratios(pa_sink *s) {
1352 pa_sink_input *i;
1353 uint32_t idx;
1354
1355 pa_sink_assert_ref(s);
1356 pa_assert_ctl_context();
1357 pa_assert(PA_SINK_IS_LINKED(s->state));
1358 pa_assert(pa_sink_flat_volume_enabled(s));
1359
1360 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1361 unsigned c;
1362 pa_cvolume remapped;
1363
1364 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1365 /* The origin sink uses volume sharing, so this input's real ratio
1366 * is handled as a special case - the real ratio must be 0 dB, and
1367 * as a result i->soft_volume must equal i->volume_factor. */
1368 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1369 i->soft_volume = i->volume_factor;
1370
1371 compute_real_ratios(i->origin_sink);
1372
1373 continue;
1374 }
1375
1376 /*
1377 * This basically calculates:
1378 *
1379 * i->real_ratio := i->volume / s->real_volume
1380 * i->soft_volume := i->real_ratio * i->volume_factor
1381 */
1382
1383 remapped = s->real_volume;
1384 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1385
1386 i->real_ratio.channels = i->sample_spec.channels;
1387 i->soft_volume.channels = i->sample_spec.channels;
1388
1389 for (c = 0; c < i->sample_spec.channels; c++) {
1390
1391 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1392 /* We leave i->real_ratio untouched */
1393 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1394 continue;
1395 }
1396
1397 /* Don't lose accuracy unless necessary */
1398 if (pa_sw_volume_multiply(
1399 i->real_ratio.values[c],
1400 remapped.values[c]) != i->volume.values[c])
1401
1402 i->real_ratio.values[c] = pa_sw_volume_divide(
1403 i->volume.values[c],
1404 remapped.values[c]);
1405
1406 i->soft_volume.values[c] = pa_sw_volume_multiply(
1407 i->real_ratio.values[c],
1408 i->volume_factor.values[c]);
1409 }
1410
1411 /* We don't copy the soft_volume to the thread_info data
1412 * here. That must be done by the caller */
1413 }
1414 }
1415
1416 static pa_cvolume *cvolume_remap_minimal_impact(
1417 pa_cvolume *v,
1418 const pa_cvolume *template,
1419 const pa_channel_map *from,
1420 const pa_channel_map *to) {
1421
1422 pa_cvolume t;
1423
1424 pa_assert(v);
1425 pa_assert(template);
1426 pa_assert(from);
1427 pa_assert(to);
1428 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1429 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1430
1431 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1432 * mapping from sink input to sink volumes:
1433 *
1434 * If template is a possible remapping from v it is used instead
1435 * of remapping anew.
1436 *
1437 * If the channel maps don't match we set an all-channel volume on
1438 * the sink to ensure that changing a volume on one stream has no
1439 * effect that cannot be compensated for in another stream that
1440 * does not have the same channel map as the sink. */
1441
1442 if (pa_channel_map_equal(from, to))
1443 return v;
1444
1445 t = *template;
1446 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1447 *v = *template;
1448 return v;
1449 }
1450
1451 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1452 return v;
1453 }
1454
1455 /* Called from main thread. Only called for the root sink in volume sharing
1456 * cases, except for internal recursive calls. */
1457 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1458 pa_sink_input *i;
1459 uint32_t idx;
1460
1461 pa_sink_assert_ref(s);
1462 pa_assert(max_volume);
1463 pa_assert(channel_map);
1464 pa_assert(pa_sink_flat_volume_enabled(s));
1465
1466 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1467 pa_cvolume remapped;
1468
1469 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1470 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1471
1472 /* Ignore this input. The origin sink uses volume sharing, so this
1473 * input's volume will be set to be equal to the root sink's real
1474 * volume. Obviously this input's current volume must not then
1475 * affect what the root sink's real volume will be. */
1476 continue;
1477 }
1478
1479 remapped = i->volume;
1480 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1481 pa_cvolume_merge(max_volume, max_volume, &remapped);
1482 }
1483 }
1484
1485 /* Called from main thread. Only called for the root sink in volume sharing
1486 * cases, except for internal recursive calls. */
1487 static pa_bool_t has_inputs(pa_sink *s) {
1488 pa_sink_input *i;
1489 uint32_t idx;
1490
1491 pa_sink_assert_ref(s);
1492
1493 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1494 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1495 return TRUE;
1496 }
1497
1498 return FALSE;
1499 }
1500
1501 /* Called from main thread. Only called for the root sink in volume sharing
1502 * cases, except for internal recursive calls. */
1503 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1504 pa_sink_input *i;
1505 uint32_t idx;
1506
1507 pa_sink_assert_ref(s);
1508 pa_assert(new_volume);
1509 pa_assert(channel_map);
1510
1511 s->real_volume = *new_volume;
1512 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1513
1514 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1515 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1516 if (pa_sink_flat_volume_enabled(s)) {
1517 pa_cvolume old_volume = i->volume;
1518
1519 /* Follow the root sink's real volume. */
1520 i->volume = *new_volume;
1521 pa_cvolume_remap(&i->volume, channel_map, &i->channel_map);
1522 compute_reference_ratio(i);
1523
1524 /* The volume changed, let's tell people so */
1525 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1526 if (i->volume_changed)
1527 i->volume_changed(i);
1528
1529 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1530 }
1531 }
1532
1533 update_real_volume(i->origin_sink, new_volume, channel_map);
1534 }
1535 }
1536 }
1537
1538 /* Called from main thread. Only called for the root sink in shared volume
1539 * cases. */
1540 static void compute_real_volume(pa_sink *s) {
1541 pa_sink_assert_ref(s);
1542 pa_assert_ctl_context();
1543 pa_assert(PA_SINK_IS_LINKED(s->state));
1544 pa_assert(pa_sink_flat_volume_enabled(s));
1545 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1546
1547 /* This determines the maximum volume of all streams and sets
1548 * s->real_volume accordingly. */
1549
1550 if (!has_inputs(s)) {
1551 /* In the special case that we have no sink inputs we leave the
1552 * volume unmodified. */
1553 update_real_volume(s, &s->reference_volume, &s->channel_map);
1554 return;
1555 }
1556
1557 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1558
1559 /* First let's determine the new maximum volume of all inputs
1560 * connected to this sink */
1561 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1562 update_real_volume(s, &s->real_volume, &s->channel_map);
1563
1564 /* Then, let's update the real ratios/soft volumes of all inputs
1565 * connected to this sink */
1566 compute_real_ratios(s);
1567 }
1568
1569 /* Called from main thread. Only called for the root sink in shared volume
1570 * cases, except for internal recursive calls. */
1571 static void propagate_reference_volume(pa_sink *s) {
1572 pa_sink_input *i;
1573 uint32_t idx;
1574
1575 pa_sink_assert_ref(s);
1576 pa_assert_ctl_context();
1577 pa_assert(PA_SINK_IS_LINKED(s->state));
1578 pa_assert(pa_sink_flat_volume_enabled(s));
1579
1580 /* This is called whenever the sink volume changes that is not
1581 * caused by a sink input volume change. We need to fix up the
1582 * sink input volumes accordingly */
1583
1584 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1585 pa_cvolume old_volume;
1586
1587 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1588 propagate_reference_volume(i->origin_sink);
1589
1590 /* Since the origin sink uses volume sharing, this input's volume
1591 * needs to be updated to match the root sink's real volume, but
1592 * that will be done later in update_shared_real_volume(). */
1593 continue;
1594 }
1595
1596 old_volume = i->volume;
1597
1598 /* This basically calculates:
1599 *
1600 * i->volume := s->reference_volume * i->reference_ratio */
1601
1602 i->volume = s->reference_volume;
1603 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
1604 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
1605
1606 /* The volume changed, let's tell people so */
1607 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1608
1609 if (i->volume_changed)
1610 i->volume_changed(i);
1611
1612 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1613 }
1614 }
1615 }
1616
1617 /* Called from main thread. Only called for the root sink in volume sharing
1618 * cases, except for internal recursive calls. The return value indicates
1619 * whether any reference volume actually changed. */
1620 static pa_bool_t update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1621 pa_cvolume volume;
1622 pa_bool_t reference_volume_changed;
1623 pa_sink_input *i;
1624 uint32_t idx;
1625
1626 pa_sink_assert_ref(s);
1627 pa_assert(PA_SINK_IS_LINKED(s->state));
1628 pa_assert(v);
1629 pa_assert(channel_map);
1630 pa_assert(pa_cvolume_valid(v));
1631
1632 volume = *v;
1633 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1634
1635 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1636 s->reference_volume = volume;
1637
1638 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1639
1640 if (reference_volume_changed)
1641 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1642 else if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1643 /* If the root sink's volume doesn't change, then there can't be any
1644 * changes in the other sinks in the sink tree either.
1645 *
1646 * It's probably theoretically possible that even if the root sink's
1647 * volume changes slightly, some filter sink doesn't change its volume
1648 * due to rounding errors. If that happens, we still want to propagate
1649 * the changed root sink volume to the sinks connected to the
1650 * intermediate sink that didn't change its volume. This theoretical
1651 * possiblity is the reason why we have that !(s->flags &
1652 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1653 * notice even if we returned here FALSE always if
1654 * reference_volume_changed is FALSE. */
1655 return FALSE;
1656
1657 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1658 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1659 update_reference_volume(i->origin_sink, v, channel_map, FALSE);
1660 }
1661
1662 return TRUE;
1663 }
1664
1665 /* Called from main thread */
1666 void pa_sink_set_volume(
1667 pa_sink *s,
1668 const pa_cvolume *volume,
1669 pa_bool_t send_msg,
1670 pa_bool_t save) {
1671
1672 pa_cvolume new_reference_volume;
1673 pa_sink *root_sink = s;
1674
1675 pa_sink_assert_ref(s);
1676 pa_assert_ctl_context();
1677 pa_assert(PA_SINK_IS_LINKED(s->state));
1678 pa_assert(!volume || pa_cvolume_valid(volume));
1679 pa_assert(volume || pa_sink_flat_volume_enabled(s));
1680 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1681
1682 /* make sure we don't change the volume when a PASSTHROUGH input is connected */
1683 if (pa_sink_is_passthrough(s)) {
1684 /* FIXME: Need to notify client that volume control is disabled */
1685 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
1686 return;
1687 }
1688
1689 /* In case of volume sharing, the volume is set for the root sink first,
1690 * from which it's then propagated to the sharing sinks. */
1691 while (root_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1692 root_sink = root_sink->input_to_master->sink;
1693
1694 /* As a special exception we accept mono volumes on all sinks --
1695 * even on those with more complex channel maps */
1696
1697 if (volume) {
1698 if (pa_cvolume_compatible(volume, &s->sample_spec))
1699 new_reference_volume = *volume;
1700 else {
1701 new_reference_volume = s->reference_volume;
1702 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1703 }
1704
1705 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
1706 }
1707
1708 /* If volume is NULL we synchronize the sink's real and reference
1709 * volumes with the stream volumes. If it is not NULL we update
1710 * the reference_volume with it. */
1711
1712 if (volume) {
1713 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
1714 if (pa_sink_flat_volume_enabled(root_sink)) {
1715 /* OK, propagate this volume change back to the inputs */
1716 propagate_reference_volume(root_sink);
1717
1718 /* And now recalculate the real volume */
1719 compute_real_volume(root_sink);
1720 } else
1721 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
1722 }
1723
1724 } else {
1725 pa_assert(pa_sink_flat_volume_enabled(root_sink));
1726
1727 /* Ok, let's determine the new real volume */
1728 compute_real_volume(root_sink);
1729
1730 /* Let's 'push' the reference volume if necessary */
1731 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
1732 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
1733
1734 /* Now that the reference volume is updated, we can update the streams'
1735 * reference ratios. */
1736 compute_reference_ratios(root_sink);
1737 }
1738
1739 if (root_sink->set_volume) {
1740 /* If we have a function set_volume(), then we do not apply a
1741 * soft volume by default. However, set_volume() is free to
1742 * apply one to root_sink->soft_volume */
1743
1744 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
1745 if (!(root_sink->flags & PA_SINK_SYNC_VOLUME))
1746 root_sink->set_volume(root_sink);
1747
1748 } else
1749 /* If we have no function set_volume(), then the soft volume
1750 * becomes the real volume */
1751 root_sink->soft_volume = root_sink->real_volume;
1752
1753 /* This tells the sink that soft volume and/or real volume changed */
1754 if (send_msg)
1755 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1756 }
1757
1758 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1759 * Only to be called by sink implementor */
1760 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1761
1762 pa_sink_assert_ref(s);
1763 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1764
1765 if (s->flags & PA_SINK_SYNC_VOLUME)
1766 pa_sink_assert_io_context(s);
1767 else
1768 pa_assert_ctl_context();
1769
1770 if (!volume)
1771 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1772 else
1773 s->soft_volume = *volume;
1774
1775 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_SYNC_VOLUME))
1776 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1777 else
1778 s->thread_info.soft_volume = s->soft_volume;
1779 }
1780
1781 /* Called from the main thread. Only called for the root sink in volume sharing
1782 * cases, except for internal recursive calls. */
1783 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1784 pa_sink_input *i;
1785 uint32_t idx;
1786
1787 pa_sink_assert_ref(s);
1788 pa_assert(old_real_volume);
1789 pa_assert_ctl_context();
1790 pa_assert(PA_SINK_IS_LINKED(s->state));
1791
1792 /* This is called when the hardware's real volume changes due to
1793 * some external event. We copy the real volume into our
1794 * reference volume and then rebuild the stream volumes based on
1795 * i->real_ratio which should stay fixed. */
1796
1797 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1798 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1799 return;
1800
1801 /* 1. Make the real volume the reference volume */
1802 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
1803 }
1804
1805 if (pa_sink_flat_volume_enabled(s)) {
1806
1807 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1808 pa_cvolume old_volume = i->volume;
1809
1810 /* 2. Since the sink's reference and real volumes are equal
1811 * now our ratios should be too. */
1812 i->reference_ratio = i->real_ratio;
1813
1814 /* 3. Recalculate the new stream reference volume based on the
1815 * reference ratio and the sink's reference volume.
1816 *
1817 * This basically calculates:
1818 *
1819 * i->volume = s->reference_volume * i->reference_ratio
1820 *
1821 * This is identical to propagate_reference_volume() */
1822 i->volume = s->reference_volume;
1823 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
1824 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
1825
1826 /* Notify if something changed */
1827 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1828
1829 if (i->volume_changed)
1830 i->volume_changed(i);
1831
1832 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1833 }
1834
1835 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1836 propagate_real_volume(i->origin_sink, old_real_volume);
1837 }
1838 }
1839
1840 /* Something got changed in the hardware. It probably makes sense
1841 * to save changed hw settings given that hw volume changes not
1842 * triggered by PA are almost certainly done by the user. */
1843 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1844 s->save_volume = TRUE;
1845 }
1846
1847 /* Called from io thread */
1848 void pa_sink_update_volume_and_mute(pa_sink *s) {
1849 pa_assert(s);
1850 pa_sink_assert_io_context(s);
1851
1852 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1853 }
1854
1855 /* Called from main thread */
1856 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1857 pa_sink_assert_ref(s);
1858 pa_assert_ctl_context();
1859 pa_assert(PA_SINK_IS_LINKED(s->state));
1860
1861 if (s->refresh_volume || force_refresh) {
1862 struct pa_cvolume old_real_volume;
1863
1864 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1865
1866 old_real_volume = s->real_volume;
1867
1868 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->get_volume)
1869 s->get_volume(s);
1870
1871 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1872
1873 update_real_volume(s, &s->real_volume, &s->channel_map);
1874 propagate_real_volume(s, &old_real_volume);
1875 }
1876
1877 return &s->reference_volume;
1878 }
1879
1880 /* Called from main thread. In volume sharing cases, only the root sink may
1881 * call this. */
1882 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1883 pa_cvolume old_real_volume;
1884
1885 pa_sink_assert_ref(s);
1886 pa_assert_ctl_context();
1887 pa_assert(PA_SINK_IS_LINKED(s->state));
1888 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1889
1890 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1891
1892 old_real_volume = s->real_volume;
1893 update_real_volume(s, new_real_volume, &s->channel_map);
1894 propagate_real_volume(s, &old_real_volume);
1895 }
1896
1897 /* Called from main thread */
1898 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1899 pa_bool_t old_muted;
1900
1901 pa_sink_assert_ref(s);
1902 pa_assert_ctl_context();
1903 pa_assert(PA_SINK_IS_LINKED(s->state));
1904
1905 old_muted = s->muted;
1906 s->muted = mute;
1907 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1908
1909 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->set_mute)
1910 s->set_mute(s);
1911
1912 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1913
1914 if (old_muted != s->muted)
1915 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1916 }
1917
1918 /* Called from main thread */
1919 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1920
1921 pa_sink_assert_ref(s);
1922 pa_assert_ctl_context();
1923 pa_assert(PA_SINK_IS_LINKED(s->state));
1924
1925 if (s->refresh_muted || force_refresh) {
1926 pa_bool_t old_muted = s->muted;
1927
1928 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->get_mute)
1929 s->get_mute(s);
1930
1931 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1932
1933 if (old_muted != s->muted) {
1934 s->save_muted = TRUE;
1935
1936 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1937
1938 /* Make sure the soft mute status stays in sync */
1939 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1940 }
1941 }
1942
1943 return s->muted;
1944 }
1945
1946 /* Called from main thread */
1947 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1948 pa_sink_assert_ref(s);
1949 pa_assert_ctl_context();
1950 pa_assert(PA_SINK_IS_LINKED(s->state));
1951
1952 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1953
1954 if (s->muted == new_muted)
1955 return;
1956
1957 s->muted = new_muted;
1958 s->save_muted = TRUE;
1959
1960 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1961 }
1962
1963 /* Called from main thread */
1964 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1965 pa_sink_assert_ref(s);
1966 pa_assert_ctl_context();
1967
1968 if (p)
1969 pa_proplist_update(s->proplist, mode, p);
1970
1971 if (PA_SINK_IS_LINKED(s->state)) {
1972 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1973 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1974 }
1975
1976 return TRUE;
1977 }
1978
1979 /* Called from main thread */
1980 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1981 void pa_sink_set_description(pa_sink *s, const char *description) {
1982 const char *old;
1983 pa_sink_assert_ref(s);
1984 pa_assert_ctl_context();
1985
1986 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1987 return;
1988
1989 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1990
1991 if (old && description && pa_streq(old, description))
1992 return;
1993
1994 if (description)
1995 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1996 else
1997 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1998
1999 if (s->monitor_source) {
2000 char *n;
2001
2002 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2003 pa_source_set_description(s->monitor_source, n);
2004 pa_xfree(n);
2005 }
2006
2007 if (PA_SINK_IS_LINKED(s->state)) {
2008 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2009 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2010 }
2011 }
2012
2013 /* Called from main thread */
2014 unsigned pa_sink_linked_by(pa_sink *s) {
2015 unsigned ret;
2016
2017 pa_sink_assert_ref(s);
2018 pa_assert_ctl_context();
2019 pa_assert(PA_SINK_IS_LINKED(s->state));
2020
2021 ret = pa_idxset_size(s->inputs);
2022
2023 /* We add in the number of streams connected to us here. Please
2024 * note the asymmmetry to pa_sink_used_by()! */
2025
2026 if (s->monitor_source)
2027 ret += pa_source_linked_by(s->monitor_source);
2028
2029 return ret;
2030 }
2031
2032 /* Called from main thread */
2033 unsigned pa_sink_used_by(pa_sink *s) {
2034 unsigned ret;
2035
2036 pa_sink_assert_ref(s);
2037 pa_assert_ctl_context();
2038 pa_assert(PA_SINK_IS_LINKED(s->state));
2039
2040 ret = pa_idxset_size(s->inputs);
2041 pa_assert(ret >= s->n_corked);
2042
2043 /* Streams connected to our monitor source do not matter for
2044 * pa_sink_used_by()!.*/
2045
2046 return ret - s->n_corked;
2047 }
2048
2049 /* Called from main thread */
2050 unsigned pa_sink_check_suspend(pa_sink *s) {
2051 unsigned ret;
2052 pa_sink_input *i;
2053 uint32_t idx;
2054
2055 pa_sink_assert_ref(s);
2056 pa_assert_ctl_context();
2057
2058 if (!PA_SINK_IS_LINKED(s->state))
2059 return 0;
2060
2061 ret = 0;
2062
2063 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2064 pa_sink_input_state_t st;
2065
2066 st = pa_sink_input_get_state(i);
2067
2068 /* We do not assert here. It is perfectly valid for a sink input to
2069 * be in the INIT state (i.e. created, marked done but not yet put)
2070 * and we should not care if it's unlinked as it won't contribute
2071 * towarards our busy status.
2072 */
2073 if (!PA_SINK_INPUT_IS_LINKED(st))
2074 continue;
2075
2076 if (st == PA_SINK_INPUT_CORKED)
2077 continue;
2078
2079 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2080 continue;
2081
2082 ret ++;
2083 }
2084
2085 if (s->monitor_source)
2086 ret += pa_source_check_suspend(s->monitor_source);
2087
2088 return ret;
2089 }
2090
2091 /* Called from the IO thread */
2092 static void sync_input_volumes_within_thread(pa_sink *s) {
2093 pa_sink_input *i;
2094 void *state = NULL;
2095
2096 pa_sink_assert_ref(s);
2097 pa_sink_assert_io_context(s);
2098
2099 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2100 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2101 continue;
2102
2103 i->thread_info.soft_volume = i->soft_volume;
2104 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
2105 }
2106 }
2107
2108 /* Called from the IO thread. Only called for the root sink in volume sharing
2109 * cases, except for internal recursive calls. */
2110 static void set_shared_volume_within_thread(pa_sink *s) {
2111 pa_sink_input *i = NULL;
2112 void *state = NULL;
2113
2114 pa_sink_assert_ref(s);
2115
2116 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2117
2118 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2119 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2120 set_shared_volume_within_thread(i->origin_sink);
2121 }
2122 }
2123
2124 /* Called from IO thread, except when it is not */
2125 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2126 pa_sink *s = PA_SINK(o);
2127 pa_sink_assert_ref(s);
2128
2129 switch ((pa_sink_message_t) code) {
2130
2131 case PA_SINK_MESSAGE_ADD_INPUT: {
2132 pa_sink_input *i = PA_SINK_INPUT(userdata);
2133
2134 /* If you change anything here, make sure to change the
2135 * sink input handling a few lines down at
2136 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2137
2138 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2139
2140 /* Since the caller sleeps in pa_sink_input_put(), we can
2141 * safely access data outside of thread_info even though
2142 * it is mutable */
2143
2144 if ((i->thread_info.sync_prev = i->sync_prev)) {
2145 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2146 pa_assert(i->sync_prev->sync_next == i);
2147 i->thread_info.sync_prev->thread_info.sync_next = i;
2148 }
2149
2150 if ((i->thread_info.sync_next = i->sync_next)) {
2151 pa_assert(i->sink == i->thread_info.sync_next->sink);
2152 pa_assert(i->sync_next->sync_prev == i);
2153 i->thread_info.sync_next->thread_info.sync_prev = i;
2154 }
2155
2156 pa_assert(!i->thread_info.attached);
2157 i->thread_info.attached = TRUE;
2158
2159 if (i->attach)
2160 i->attach(i);
2161
2162 pa_sink_input_set_state_within_thread(i, i->state);
2163
2164 /* The requested latency of the sink input needs to be
2165 * fixed up and then configured on the sink */
2166
2167 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2168 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2169
2170 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2171 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2172
2173 /* We don't rewind here automatically. This is left to the
2174 * sink input implementor because some sink inputs need a
2175 * slow start, i.e. need some time to buffer client
2176 * samples before beginning streaming. */
2177
2178 /* In flat volume mode we need to update the volume as
2179 * well */
2180 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2181 }
2182
2183 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2184 pa_sink_input *i = PA_SINK_INPUT(userdata);
2185
2186 /* If you change anything here, make sure to change the
2187 * sink input handling a few lines down at
2188 * PA_SINK_MESSAGE_START_MOVE, too. */
2189
2190 if (i->detach)
2191 i->detach(i);
2192
2193 pa_sink_input_set_state_within_thread(i, i->state);
2194
2195 pa_assert(i->thread_info.attached);
2196 i->thread_info.attached = FALSE;
2197
2198 /* Since the caller sleeps in pa_sink_input_unlink(),
2199 * we can safely access data outside of thread_info even
2200 * though it is mutable */
2201
2202 pa_assert(!i->sync_prev);
2203 pa_assert(!i->sync_next);
2204
2205 if (i->thread_info.sync_prev) {
2206 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2207 i->thread_info.sync_prev = NULL;
2208 }
2209
2210 if (i->thread_info.sync_next) {
2211 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2212 i->thread_info.sync_next = NULL;
2213 }
2214
2215 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2216 pa_sink_input_unref(i);
2217
2218 pa_sink_invalidate_requested_latency(s, TRUE);
2219 pa_sink_request_rewind(s, (size_t) -1);
2220
2221 /* In flat volume mode we need to update the volume as
2222 * well */
2223 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2224 }
2225
2226 case PA_SINK_MESSAGE_START_MOVE: {
2227 pa_sink_input *i = PA_SINK_INPUT(userdata);
2228
2229 /* We don't support moving synchronized streams. */
2230 pa_assert(!i->sync_prev);
2231 pa_assert(!i->sync_next);
2232 pa_assert(!i->thread_info.sync_next);
2233 pa_assert(!i->thread_info.sync_prev);
2234
2235 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2236 pa_usec_t usec = 0;
2237 size_t sink_nbytes, total_nbytes;
2238
2239 /* Get the latency of the sink */
2240 usec = pa_sink_get_latency_within_thread(s);
2241 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2242 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2243
2244 if (total_nbytes > 0) {
2245 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2246 i->thread_info.rewrite_flush = TRUE;
2247 pa_sink_input_process_rewind(i, sink_nbytes);
2248 }
2249 }
2250
2251 if (i->detach)
2252 i->detach(i);
2253
2254 pa_assert(i->thread_info.attached);
2255 i->thread_info.attached = FALSE;
2256
2257 /* Let's remove the sink input ...*/
2258 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2259 pa_sink_input_unref(i);
2260
2261 pa_sink_invalidate_requested_latency(s, TRUE);
2262
2263 pa_log_debug("Requesting rewind due to started move");
2264 pa_sink_request_rewind(s, (size_t) -1);
2265
2266 /* In flat volume mode we need to update the volume as
2267 * well */
2268 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2269 }
2270
2271 case PA_SINK_MESSAGE_FINISH_MOVE: {
2272 pa_sink_input *i = PA_SINK_INPUT(userdata);
2273
2274 /* We don't support moving synchronized streams. */
2275 pa_assert(!i->sync_prev);
2276 pa_assert(!i->sync_next);
2277 pa_assert(!i->thread_info.sync_next);
2278 pa_assert(!i->thread_info.sync_prev);
2279
2280 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2281
2282 pa_assert(!i->thread_info.attached);
2283 i->thread_info.attached = TRUE;
2284
2285 if (i->attach)
2286 i->attach(i);
2287
2288 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2289 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2290
2291 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2292 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2293
2294 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2295 pa_usec_t usec = 0;
2296 size_t nbytes;
2297
2298 /* Get the latency of the sink */
2299 usec = pa_sink_get_latency_within_thread(s);
2300 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2301
2302 if (nbytes > 0)
2303 pa_sink_input_drop(i, nbytes);
2304
2305 pa_log_debug("Requesting rewind due to finished move");
2306 pa_sink_request_rewind(s, nbytes);
2307 }
2308
2309 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2310 }
2311
2312 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2313 pa_sink *root_sink = s;
2314
2315 while (root_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2316 root_sink = root_sink->input_to_master->sink;
2317
2318 set_shared_volume_within_thread(root_sink);
2319 return 0;
2320 }
2321
2322 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2323
2324 if (s->flags & PA_SINK_SYNC_VOLUME) {
2325 s->set_volume(s);
2326 pa_sink_volume_change_push(s);
2327 }
2328 /* Fall through ... */
2329
2330 case PA_SINK_MESSAGE_SET_VOLUME:
2331
2332 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2333 s->thread_info.soft_volume = s->soft_volume;
2334 pa_sink_request_rewind(s, (size_t) -1);
2335 }
2336
2337 /* Fall through ... */
2338
2339 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2340 sync_input_volumes_within_thread(s);
2341 return 0;
2342
2343 case PA_SINK_MESSAGE_GET_VOLUME:
2344
2345 if ((s->flags & PA_SINK_SYNC_VOLUME) && s->get_volume) {
2346 s->get_volume(s);
2347 pa_sink_volume_change_flush(s);
2348 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2349 }
2350
2351 /* In case sink implementor reset SW volume. */
2352 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2353 s->thread_info.soft_volume = s->soft_volume;
2354 pa_sink_request_rewind(s, (size_t) -1);
2355 }
2356
2357 return 0;
2358
2359 case PA_SINK_MESSAGE_SET_MUTE:
2360
2361 if (s->thread_info.soft_muted != s->muted) {
2362 s->thread_info.soft_muted = s->muted;
2363 pa_sink_request_rewind(s, (size_t) -1);
2364 }
2365
2366 if (s->flags & PA_SINK_SYNC_VOLUME && s->set_mute)
2367 s->set_mute(s);
2368
2369 return 0;
2370
2371 case PA_SINK_MESSAGE_GET_MUTE:
2372
2373 if (s->flags & PA_SINK_SYNC_VOLUME && s->get_mute)
2374 s->get_mute(s);
2375
2376 return 0;
2377
2378 case PA_SINK_MESSAGE_SET_STATE: {
2379
2380 pa_bool_t suspend_change =
2381 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2382 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2383
2384 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2385
2386 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2387 s->thread_info.rewind_nbytes = 0;
2388 s->thread_info.rewind_requested = FALSE;
2389 }
2390
2391 if (suspend_change) {
2392 pa_sink_input *i;
2393 void *state = NULL;
2394
2395 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2396 if (i->suspend_within_thread)
2397 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2398 }
2399
2400 return 0;
2401 }
2402
2403 case PA_SINK_MESSAGE_DETACH:
2404
2405 /* Detach all streams */
2406 pa_sink_detach_within_thread(s);
2407 return 0;
2408
2409 case PA_SINK_MESSAGE_ATTACH:
2410
2411 /* Reattach all streams */
2412 pa_sink_attach_within_thread(s);
2413 return 0;
2414
2415 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2416
2417 pa_usec_t *usec = userdata;
2418 *usec = pa_sink_get_requested_latency_within_thread(s);
2419
2420 /* Yes, that's right, the IO thread will see -1 when no
2421 * explicit requested latency is configured, the main
2422 * thread will see max_latency */
2423 if (*usec == (pa_usec_t) -1)
2424 *usec = s->thread_info.max_latency;
2425
2426 return 0;
2427 }
2428
2429 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2430 pa_usec_t *r = userdata;
2431
2432 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2433
2434 return 0;
2435 }
2436
2437 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2438 pa_usec_t *r = userdata;
2439
2440 r[0] = s->thread_info.min_latency;
2441 r[1] = s->thread_info.max_latency;
2442
2443 return 0;
2444 }
2445
2446 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2447
2448 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2449 return 0;
2450
2451 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2452
2453 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2454 return 0;
2455
2456 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2457
2458 *((size_t*) userdata) = s->thread_info.max_rewind;
2459 return 0;
2460
2461 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2462
2463 *((size_t*) userdata) = s->thread_info.max_request;
2464 return 0;
2465
2466 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2467
2468 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2469 return 0;
2470
2471 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2472
2473 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2474 return 0;
2475
2476 case PA_SINK_MESSAGE_SET_PORT:
2477
2478 pa_assert(userdata);
2479 if (s->set_port) {
2480 struct sink_message_set_port *msg_data = userdata;
2481 msg_data->ret = s->set_port(s, msg_data->port);
2482 }
2483 return 0;
2484
2485 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2486 /* This message is sent from IO-thread and handled in main thread. */
2487 pa_assert_ctl_context();
2488
2489 pa_sink_get_volume(s, TRUE);
2490 pa_sink_get_mute(s, TRUE);
2491 return 0;
2492
2493 case PA_SINK_MESSAGE_GET_LATENCY:
2494 case PA_SINK_MESSAGE_MAX:
2495 ;
2496 }
2497
2498 return -1;
2499 }
2500
2501 /* Called from main thread */
2502 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2503 pa_sink *sink;
2504 uint32_t idx;
2505 int ret = 0;
2506
2507 pa_core_assert_ref(c);
2508 pa_assert_ctl_context();
2509 pa_assert(cause != 0);
2510
2511 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2512 int r;
2513
2514 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2515 ret = r;
2516 }
2517
2518 return ret;
2519 }
2520
2521 /* Called from main thread */
2522 void pa_sink_detach(pa_sink *s) {
2523 pa_sink_assert_ref(s);
2524 pa_assert_ctl_context();
2525 pa_assert(PA_SINK_IS_LINKED(s->state));
2526
2527 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2528 }
2529
2530 /* Called from main thread */
2531 void pa_sink_attach(pa_sink *s) {
2532 pa_sink_assert_ref(s);
2533 pa_assert_ctl_context();
2534 pa_assert(PA_SINK_IS_LINKED(s->state));
2535
2536 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2537 }
2538
2539 /* Called from IO thread */
2540 void pa_sink_detach_within_thread(pa_sink *s) {
2541 pa_sink_input *i;
2542 void *state = NULL;
2543
2544 pa_sink_assert_ref(s);
2545 pa_sink_assert_io_context(s);
2546 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2547
2548 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2549 if (i->detach)
2550 i->detach(i);
2551
2552 if (s->monitor_source)
2553 pa_source_detach_within_thread(s->monitor_source);
2554 }
2555
2556 /* Called from IO thread */
2557 void pa_sink_attach_within_thread(pa_sink *s) {
2558 pa_sink_input *i;
2559 void *state = NULL;
2560
2561 pa_sink_assert_ref(s);
2562 pa_sink_assert_io_context(s);
2563 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2564
2565 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2566 if (i->attach)
2567 i->attach(i);
2568
2569 if (s->monitor_source)
2570 pa_source_attach_within_thread(s->monitor_source);
2571 }
2572
2573 /* Called from IO thread */
2574 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2575 pa_sink_assert_ref(s);
2576 pa_sink_assert_io_context(s);
2577 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2578
2579 if (s->thread_info.state == PA_SINK_SUSPENDED)
2580 return;
2581
2582 if (nbytes == (size_t) -1)
2583 nbytes = s->thread_info.max_rewind;
2584
2585 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2586
2587 if (s->thread_info.rewind_requested &&
2588 nbytes <= s->thread_info.rewind_nbytes)
2589 return;
2590
2591 s->thread_info.rewind_nbytes = nbytes;
2592 s->thread_info.rewind_requested = TRUE;
2593
2594 if (s->request_rewind)
2595 s->request_rewind(s);
2596 }
2597
2598 /* Called from IO thread */
2599 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2600 pa_usec_t result = (pa_usec_t) -1;
2601 pa_sink_input *i;
2602 void *state = NULL;
2603 pa_usec_t monitor_latency;
2604
2605 pa_sink_assert_ref(s);
2606 pa_sink_assert_io_context(s);
2607
2608 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2609 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2610
2611 if (s->thread_info.requested_latency_valid)
2612 return s->thread_info.requested_latency;
2613
2614 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2615 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2616 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2617 result = i->thread_info.requested_sink_latency;
2618
2619 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2620
2621 if (monitor_latency != (pa_usec_t) -1 &&
2622 (result == (pa_usec_t) -1 || result > monitor_latency))
2623 result = monitor_latency;
2624
2625 if (result != (pa_usec_t) -1)
2626 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2627
2628 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2629 /* Only cache if properly initialized */
2630 s->thread_info.requested_latency = result;
2631 s->thread_info.requested_latency_valid = TRUE;
2632 }
2633
2634 return result;
2635 }
2636
2637 /* Called from main thread */
2638 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2639 pa_usec_t usec = 0;
2640
2641 pa_sink_assert_ref(s);
2642 pa_assert_ctl_context();
2643 pa_assert(PA_SINK_IS_LINKED(s->state));
2644
2645 if (s->state == PA_SINK_SUSPENDED)
2646 return 0;
2647
2648 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2649
2650 return usec;
2651 }
2652
2653 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2654 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2655 pa_sink_input *i;
2656 void *state = NULL;
2657
2658 pa_sink_assert_ref(s);
2659 pa_sink_assert_io_context(s);
2660
2661 if (max_rewind == s->thread_info.max_rewind)
2662 return;
2663
2664 s->thread_info.max_rewind = max_rewind;
2665
2666 if (PA_SINK_IS_LINKED(s->thread_info.state))
2667 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2668 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2669
2670 if (s->monitor_source)
2671 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2672 }
2673
2674 /* Called from main thread */
2675 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2676 pa_sink_assert_ref(s);
2677 pa_assert_ctl_context();
2678
2679 if (PA_SINK_IS_LINKED(s->state))
2680 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2681 else
2682 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2683 }
2684
2685 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2686 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2687 void *state = NULL;
2688
2689 pa_sink_assert_ref(s);
2690 pa_sink_assert_io_context(s);
2691
2692 if (max_request == s->thread_info.max_request)
2693 return;
2694
2695 s->thread_info.max_request = max_request;
2696
2697 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2698 pa_sink_input *i;
2699
2700 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2701 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2702 }
2703 }
2704
2705 /* Called from main thread */
2706 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2707 pa_sink_assert_ref(s);
2708 pa_assert_ctl_context();
2709
2710 if (PA_SINK_IS_LINKED(s->state))
2711 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2712 else
2713 pa_sink_set_max_request_within_thread(s, max_request);
2714 }
2715
2716 /* Called from IO thread */
2717 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2718 pa_sink_input *i;
2719 void *state = NULL;
2720
2721 pa_sink_assert_ref(s);
2722 pa_sink_assert_io_context(s);
2723
2724 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2725 s->thread_info.requested_latency_valid = FALSE;
2726 else if (dynamic)
2727 return;
2728
2729 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2730
2731 if (s->update_requested_latency)
2732 s->update_requested_latency(s);
2733
2734 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2735 if (i->update_sink_requested_latency)
2736 i->update_sink_requested_latency(i);
2737 }
2738 }
2739
2740 /* Called from main thread */
2741 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2742 pa_sink_assert_ref(s);
2743 pa_assert_ctl_context();
2744
2745 /* min_latency == 0: no limit
2746 * min_latency anything else: specified limit
2747 *
2748 * Similar for max_latency */
2749
2750 if (min_latency < ABSOLUTE_MIN_LATENCY)
2751 min_latency = ABSOLUTE_MIN_LATENCY;
2752
2753 if (max_latency <= 0 ||
2754 max_latency > ABSOLUTE_MAX_LATENCY)
2755 max_latency = ABSOLUTE_MAX_LATENCY;
2756
2757 pa_assert(min_latency <= max_latency);
2758
2759 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2760 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2761 max_latency == ABSOLUTE_MAX_LATENCY) ||
2762 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2763
2764 if (PA_SINK_IS_LINKED(s->state)) {
2765 pa_usec_t r[2];
2766
2767 r[0] = min_latency;
2768 r[1] = max_latency;
2769
2770 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2771 } else
2772 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2773 }
2774
2775 /* Called from main thread */
2776 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2777 pa_sink_assert_ref(s);
2778 pa_assert_ctl_context();
2779 pa_assert(min_latency);
2780 pa_assert(max_latency);
2781
2782 if (PA_SINK_IS_LINKED(s->state)) {
2783 pa_usec_t r[2] = { 0, 0 };
2784
2785 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2786
2787 *min_latency = r[0];
2788 *max_latency = r[1];
2789 } else {
2790 *min_latency = s->thread_info.min_latency;
2791 *max_latency = s->thread_info.max_latency;
2792 }
2793 }
2794
2795 /* Called from IO thread */
2796 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2797 pa_sink_assert_ref(s);
2798 pa_sink_assert_io_context(s);
2799
2800 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2801 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2802 pa_assert(min_latency <= max_latency);
2803
2804 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2805 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2806 max_latency == ABSOLUTE_MAX_LATENCY) ||
2807 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2808
2809 if (s->thread_info.min_latency == min_latency &&
2810 s->thread_info.max_latency == max_latency)
2811 return;
2812
2813 s->thread_info.min_latency = min_latency;
2814 s->thread_info.max_latency = max_latency;
2815
2816 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2817 pa_sink_input *i;
2818 void *state = NULL;
2819
2820 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2821 if (i->update_sink_latency_range)
2822 i->update_sink_latency_range(i);
2823 }
2824
2825 pa_sink_invalidate_requested_latency(s, FALSE);
2826
2827 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2828 }
2829
2830 /* Called from main thread */
2831 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2832 pa_sink_assert_ref(s);
2833 pa_assert_ctl_context();
2834
2835 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2836 pa_assert(latency == 0);
2837 return;
2838 }
2839
2840 if (latency < ABSOLUTE_MIN_LATENCY)
2841 latency = ABSOLUTE_MIN_LATENCY;
2842
2843 if (latency > ABSOLUTE_MAX_LATENCY)
2844 latency = ABSOLUTE_MAX_LATENCY;
2845
2846 if (PA_SINK_IS_LINKED(s->state))
2847 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2848 else
2849 s->thread_info.fixed_latency = latency;
2850
2851 pa_source_set_fixed_latency(s->monitor_source, latency);
2852 }
2853
2854 /* Called from main thread */
2855 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2856 pa_usec_t latency;
2857
2858 pa_sink_assert_ref(s);
2859 pa_assert_ctl_context();
2860
2861 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2862 return 0;
2863
2864 if (PA_SINK_IS_LINKED(s->state))
2865 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2866 else
2867 latency = s->thread_info.fixed_latency;
2868
2869 return latency;
2870 }
2871
2872 /* Called from IO thread */
2873 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2874 pa_sink_assert_ref(s);
2875 pa_sink_assert_io_context(s);
2876
2877 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2878 pa_assert(latency == 0);
2879 return;
2880 }
2881
2882 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2883 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2884
2885 if (s->thread_info.fixed_latency == latency)
2886 return;
2887
2888 s->thread_info.fixed_latency = latency;
2889
2890 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2891 pa_sink_input *i;
2892 void *state = NULL;
2893
2894 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2895 if (i->update_sink_fixed_latency)
2896 i->update_sink_fixed_latency(i);
2897 }
2898
2899 pa_sink_invalidate_requested_latency(s, FALSE);
2900
2901 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2902 }
2903
2904 /* Called from main context */
2905 size_t pa_sink_get_max_rewind(pa_sink *s) {
2906 size_t r;
2907 pa_assert_ctl_context();
2908 pa_sink_assert_ref(s);
2909
2910 if (!PA_SINK_IS_LINKED(s->state))
2911 return s->thread_info.max_rewind;
2912
2913 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2914
2915 return r;
2916 }
2917
2918 /* Called from main context */
2919 size_t pa_sink_get_max_request(pa_sink *s) {
2920 size_t r;
2921 pa_sink_assert_ref(s);
2922 pa_assert_ctl_context();
2923
2924 if (!PA_SINK_IS_LINKED(s->state))
2925 return s->thread_info.max_request;
2926
2927 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2928
2929 return r;
2930 }
2931
2932 /* Called from main context */
2933 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2934 pa_device_port *port;
2935 int ret;
2936
2937 pa_sink_assert_ref(s);
2938 pa_assert_ctl_context();
2939
2940 if (!s->set_port) {
2941 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2942 return -PA_ERR_NOTIMPLEMENTED;
2943 }
2944
2945 if (!s->ports)
2946 return -PA_ERR_NOENTITY;
2947
2948 if (!(port = pa_hashmap_get(s->ports, name)))
2949 return -PA_ERR_NOENTITY;
2950
2951 if (s->active_port == port) {
2952 s->save_port = s->save_port || save;
2953 return 0;
2954 }
2955
2956 if (s->flags & PA_SINK_SYNC_VOLUME) {
2957 struct sink_message_set_port msg = { .port = port, .ret = 0 };
2958 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2959 ret = msg.ret;
2960 }
2961 else
2962 ret = s->set_port(s, port);
2963
2964 if (ret < 0)
2965 return -PA_ERR_NOENTITY;
2966
2967 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2968
2969 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2970
2971 s->active_port = port;
2972 s->save_port = save;
2973
2974 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
2975
2976 return 0;
2977 }
2978
2979 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2980 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2981
2982 pa_assert(p);
2983
2984 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2985 return TRUE;
2986
2987 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2988
2989 if (pa_streq(ff, "microphone"))
2990 t = "audio-input-microphone";
2991 else if (pa_streq(ff, "webcam"))
2992 t = "camera-web";
2993 else if (pa_streq(ff, "computer"))
2994 t = "computer";
2995 else if (pa_streq(ff, "handset"))
2996 t = "phone";
2997 else if (pa_streq(ff, "portable"))
2998 t = "multimedia-player";
2999 else if (pa_streq(ff, "tv"))
3000 t = "video-display";
3001
3002 /*
3003 * The following icons are not part of the icon naming spec,
3004 * because Rodney Dawes sucks as the maintainer of that spec.
3005 *
3006 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3007 */
3008 else if (pa_streq(ff, "headset"))
3009 t = "audio-headset";
3010 else if (pa_streq(ff, "headphone"))
3011 t = "audio-headphones";
3012 else if (pa_streq(ff, "speaker"))
3013 t = "audio-speakers";
3014 else if (pa_streq(ff, "hands-free"))
3015 t = "audio-handsfree";
3016 }
3017
3018 if (!t)
3019 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3020 if (pa_streq(c, "modem"))
3021 t = "modem";
3022
3023 if (!t) {
3024 if (is_sink)
3025 t = "audio-card";
3026 else
3027 t = "audio-input-microphone";
3028 }
3029
3030 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3031 if (strstr(profile, "analog"))
3032 s = "-analog";
3033 else if (strstr(profile, "iec958"))
3034 s = "-iec958";
3035 else if (strstr(profile, "hdmi"))
3036 s = "-hdmi";
3037 }
3038
3039 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3040
3041 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3042
3043 return TRUE;
3044 }
3045
3046 pa_bool_t pa_device_init_description(pa_proplist *p) {
3047 const char *s, *d = NULL, *k;
3048 pa_assert(p);
3049
3050 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3051 return TRUE;
3052
3053 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3054 if (pa_streq(s, "internal"))
3055 d = _("Internal Audio");
3056
3057 if (!d)
3058 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3059 if (pa_streq(s, "modem"))
3060 d = _("Modem");
3061
3062 if (!d)
3063 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3064
3065 if (!d)
3066 return FALSE;
3067
3068 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3069
3070 if (d && k)
3071 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
3072 else if (d)
3073 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3074
3075 return TRUE;
3076 }
3077
3078 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
3079 const char *s;
3080 pa_assert(p);
3081
3082 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3083 return TRUE;
3084
3085 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3086 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3087 || pa_streq(s, "headset")) {
3088 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3089 return TRUE;
3090 }
3091
3092 return FALSE;
3093 }
3094
3095 unsigned pa_device_init_priority(pa_proplist *p) {
3096 const char *s;
3097 unsigned priority = 0;
3098
3099 pa_assert(p);
3100
3101 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3102
3103 if (pa_streq(s, "sound"))
3104 priority += 9000;
3105 else if (!pa_streq(s, "modem"))
3106 priority += 1000;
3107 }
3108
3109 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3110
3111 if (pa_streq(s, "internal"))
3112 priority += 900;
3113 else if (pa_streq(s, "speaker"))
3114 priority += 500;
3115 else if (pa_streq(s, "headphone"))
3116 priority += 400;
3117 }
3118
3119 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3120
3121 if (pa_streq(s, "pci"))
3122 priority += 50;
3123 else if (pa_streq(s, "usb"))
3124 priority += 40;
3125 else if (pa_streq(s, "bluetooth"))
3126 priority += 30;
3127 }
3128
3129 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3130
3131 if (pa_startswith(s, "analog-"))
3132 priority += 9;
3133 else if (pa_startswith(s, "iec958-"))
3134 priority += 8;
3135 }
3136
3137 return priority;
3138 }
3139
3140 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3141
3142 /* Called from the IO thread. */
3143 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3144 pa_sink_volume_change *c;
3145 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3146 c = pa_xnew(pa_sink_volume_change, 1);
3147
3148 PA_LLIST_INIT(pa_sink_volume_change, c);
3149 c->at = 0;
3150 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3151 return c;
3152 }
3153
3154 /* Called from the IO thread. */
3155 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3156 pa_assert(c);
3157 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3158 pa_xfree(c);
3159 }
3160
3161 /* Called from the IO thread. */
3162 void pa_sink_volume_change_push(pa_sink *s) {
3163 pa_sink_volume_change *c = NULL;
3164 pa_sink_volume_change *nc = NULL;
3165 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3166
3167 const char *direction = NULL;
3168
3169 pa_assert(s);
3170 nc = pa_sink_volume_change_new(s);
3171
3172 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3173 * Adding one more volume for HW would get us rid of this, but I am trying
3174 * to survive with the ones we already have. */
3175 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3176
3177 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3178 pa_log_debug("Volume not changing");
3179 pa_sink_volume_change_free(nc);
3180 return;
3181 }
3182
3183 nc->at = pa_sink_get_latency_within_thread(s);
3184 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3185
3186 if (s->thread_info.volume_changes_tail) {
3187 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3188 /* If volume is going up let's do it a bit late. If it is going
3189 * down let's do it a bit early. */
3190 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3191 if (nc->at + safety_margin > c->at) {
3192 nc->at += safety_margin;
3193 direction = "up";
3194 break;
3195 }
3196 }
3197 else if (nc->at - safety_margin > c->at) {
3198 nc->at -= safety_margin;
3199 direction = "down";
3200 break;
3201 }
3202 }
3203 }
3204
3205 if (c == NULL) {
3206 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3207 nc->at += safety_margin;
3208 direction = "up";
3209 } else {
3210 nc->at -= safety_margin;
3211 direction = "down";
3212 }
3213 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3214 }
3215 else {
3216 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3217 }
3218
3219 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3220
3221 /* We can ignore volume events that came earlier but should happen later than this. */
3222 PA_LLIST_FOREACH(c, nc->next) {
3223 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3224 pa_sink_volume_change_free(c);
3225 }
3226 nc->next = NULL;
3227 s->thread_info.volume_changes_tail = nc;
3228 }
3229
3230 /* Called from the IO thread. */
3231 static void pa_sink_volume_change_flush(pa_sink *s) {
3232 pa_sink_volume_change *c = s->thread_info.volume_changes;
3233 pa_assert(s);
3234 s->thread_info.volume_changes = NULL;
3235 s->thread_info.volume_changes_tail = NULL;
3236 while (c) {
3237 pa_sink_volume_change *next = c->next;
3238 pa_sink_volume_change_free(c);
3239 c = next;
3240 }
3241 }
3242
3243 /* Called from the IO thread. */
3244 pa_bool_t pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3245 pa_usec_t now = pa_rtclock_now();
3246 pa_bool_t ret = FALSE;
3247
3248 pa_assert(s);
3249 pa_assert(s->write_volume);
3250
3251 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3252 pa_sink_volume_change *c = s->thread_info.volume_changes;
3253 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3254 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3255 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3256 ret = TRUE;
3257 s->thread_info.current_hw_volume = c->hw_volume;
3258 pa_sink_volume_change_free(c);
3259 }
3260
3261 if (s->write_volume && ret)
3262 s->write_volume(s);
3263
3264 if (s->thread_info.volume_changes) {
3265 if (usec_to_next)
3266 *usec_to_next = s->thread_info.volume_changes->at - now;
3267 if (pa_log_ratelimit(PA_LOG_DEBUG))
3268 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3269 }
3270 else {
3271 if (usec_to_next)
3272 *usec_to_next = 0;
3273 s->thread_info.volume_changes_tail = NULL;
3274 }
3275 return ret;
3276 }
3277
3278 /* Called from the IO thread. */
3279 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3280 /* All the queued volume events later than current latency are shifted to happen earlier. */
3281 pa_sink_volume_change *c;
3282 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3283 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3284 pa_usec_t limit = pa_sink_get_latency_within_thread(s);
3285
3286 pa_log_debug("latency = %lld", (long long) limit);
3287 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3288
3289 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3290 pa_usec_t modified_limit = limit;
3291 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3292 modified_limit -= s->thread_info.volume_change_safety_margin;
3293 else
3294 modified_limit += s->thread_info.volume_change_safety_margin;
3295 if (c->at > modified_limit) {
3296 c->at -= rewound;
3297 if (c->at < modified_limit)
3298 c->at = modified_limit;
3299 }
3300 prev_vol = pa_cvolume_avg(&c->hw_volume);
3301 }
3302 pa_sink_volume_change_apply(s, NULL);
3303 }
3304
3305 /* Called from the main thread */
3306 /* Gets the list of formats supported by the sink. The members and idxset must
3307 * be freed by the caller. */
3308 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3309 pa_idxset *ret;
3310
3311 pa_assert(s);
3312
3313 if (s->get_formats) {
3314 /* Sink supports format query, all is good */
3315 ret = s->get_formats(s);
3316 } else {
3317 /* Sink doesn't support format query, so assume it does PCM */
3318 pa_format_info *f = pa_format_info_new();
3319 f->encoding = PA_ENCODING_PCM;
3320
3321 ret = pa_idxset_new(NULL, NULL);
3322 pa_idxset_put(ret, f, NULL);
3323 }
3324
3325 return ret;
3326 }
3327
3328 /* Called from the main thread */
3329 /* Checks if the sink can accept this format */
3330 pa_bool_t pa_sink_check_format(pa_sink *s, pa_format_info *f)
3331 {
3332 pa_idxset *formats = NULL;
3333 pa_bool_t ret = FALSE;
3334
3335 pa_assert(s);
3336 pa_assert(f);
3337
3338 formats = pa_sink_get_formats(s);
3339
3340 if (formats) {
3341 pa_format_info *finfo_device;
3342 uint32_t i;
3343
3344 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3345 if (pa_format_info_is_compatible(finfo_device, f)) {
3346 ret = TRUE;
3347 break;
3348 }
3349 }
3350
3351 pa_idxset_free(formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
3352 }
3353
3354 return ret;
3355 }
3356
3357 /* Called from the main thread */
3358 /* Calculates the intersection between formats supported by the sink and
3359 * in_formats, and returns these, in the order of the sink's formats. */
3360 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3361 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3362 pa_format_info *f_sink, *f_in;
3363 uint32_t i, j;
3364
3365 pa_assert(s);
3366
3367 if (!in_formats || pa_idxset_isempty(in_formats))
3368 goto done;
3369
3370 sink_formats = pa_sink_get_formats(s);
3371
3372 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3373 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3374 if (pa_format_info_is_compatible(f_sink, f_in))
3375 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3376 }
3377 }
3378
3379 done:
3380 if (sink_formats)
3381 pa_idxset_free(sink_formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
3382
3383 return out_formats;
3384 }