]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
core: always allow volume setting with single-channel pa_cvolume
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
181
182 s = pa_msgobject_new(pa_sink);
183
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
188 }
189
190 pa_sink_new_data_set_name(data, name);
191
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
196 }
197
198 /* FIXME, need to free s here on failure */
199
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
216
217 if (!data->muted_is_set)
218 data->muted = FALSE;
219
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
226
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
231 }
232
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
235
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->priority = 0;
240 s->suspend_cause = 0;
241 s->name = pa_xstrdup(name);
242 s->proplist = pa_proplist_copy(data->proplist);
243 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
244 s->module = data->module;
245 s->card = data->card;
246
247 s->priority = pa_device_init_priority(s->proplist);
248
249 s->sample_spec = data->sample_spec;
250 s->channel_map = data->channel_map;
251
252 s->inputs = pa_idxset_new(NULL, NULL);
253 s->n_corked = 0;
254
255 s->reference_volume = s->real_volume = data->volume;
256 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
257 s->base_volume = PA_VOLUME_NORM;
258 s->n_volume_steps = PA_VOLUME_NORM+1;
259 s->muted = data->muted;
260 s->refresh_volume = s->refresh_muted = FALSE;
261
262 reset_callbacks(s);
263 s->userdata = NULL;
264
265 s->asyncmsgq = NULL;
266
267 /* As a minor optimization we just steal the list instead of
268 * copying it here */
269 s->ports = data->ports;
270 data->ports = NULL;
271
272 s->active_port = NULL;
273 s->save_port = FALSE;
274
275 if (data->active_port && s->ports)
276 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
277 s->save_port = data->save_port;
278
279 if (!s->active_port && s->ports) {
280 void *state;
281 pa_device_port *p;
282
283 PA_HASHMAP_FOREACH(p, s->ports, state)
284 if (!s->active_port || p->priority > s->active_port->priority)
285 s->active_port = p;
286 }
287
288 s->save_volume = data->save_volume;
289 s->save_muted = data->save_muted;
290
291 pa_silence_memchunk_get(
292 &core->silence_cache,
293 core->mempool,
294 &s->silence,
295 &s->sample_spec,
296 0);
297
298 s->thread_info.rtpoll = NULL;
299 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
300 s->thread_info.soft_volume = s->soft_volume;
301 s->thread_info.soft_muted = s->muted;
302 s->thread_info.state = s->state;
303 s->thread_info.rewind_nbytes = 0;
304 s->thread_info.rewind_requested = FALSE;
305 s->thread_info.max_rewind = 0;
306 s->thread_info.max_request = 0;
307 s->thread_info.requested_latency_valid = FALSE;
308 s->thread_info.requested_latency = 0;
309 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
310 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
311 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
312
313 /* FIXME: This should probably be moved to pa_sink_put() */
314 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
315
316 if (s->card)
317 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
318
319 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
320 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
321 s->index,
322 s->name,
323 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
324 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
325 pt);
326 pa_xfree(pt);
327
328 pa_source_new_data_init(&source_data);
329 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
330 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
331 source_data.name = pa_sprintf_malloc("%s.monitor", name);
332 source_data.driver = data->driver;
333 source_data.module = data->module;
334 source_data.card = data->card;
335
336 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
337 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
338 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
339
340 s->monitor_source = pa_source_new(core, &source_data,
341 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
342 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
343
344 pa_source_new_data_done(&source_data);
345
346 if (!s->monitor_source) {
347 pa_sink_unlink(s);
348 pa_sink_unref(s);
349 return NULL;
350 }
351
352 s->monitor_source->monitor_of = s;
353
354 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
355 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
356 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
357
358 return s;
359 }
360
361 /* Called from main context */
362 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
363 int ret;
364 pa_bool_t suspend_change;
365 pa_sink_state_t original_state;
366
367 pa_assert(s);
368 pa_assert_ctl_context();
369
370 if (s->state == state)
371 return 0;
372
373 original_state = s->state;
374
375 suspend_change =
376 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
377 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
378
379 if (s->set_state)
380 if ((ret = s->set_state(s, state)) < 0)
381 return ret;
382
383 if (s->asyncmsgq)
384 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
385
386 if (s->set_state)
387 s->set_state(s, original_state);
388
389 return ret;
390 }
391
392 s->state = state;
393
394 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
395 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
396 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
397 }
398
399 if (suspend_change) {
400 pa_sink_input *i;
401 uint32_t idx;
402
403 /* We're suspending or resuming, tell everyone about it */
404
405 PA_IDXSET_FOREACH(i, s->inputs, idx)
406 if (s->state == PA_SINK_SUSPENDED &&
407 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
408 pa_sink_input_kill(i);
409 else if (i->suspend)
410 i->suspend(i, state == PA_SINK_SUSPENDED);
411
412 if (s->monitor_source)
413 pa_source_sync_suspend(s->monitor_source);
414 }
415
416 return 0;
417 }
418
419 /* Called from main context */
420 void pa_sink_put(pa_sink* s) {
421 pa_sink_assert_ref(s);
422 pa_assert_ctl_context();
423
424 pa_assert(s->state == PA_SINK_INIT);
425
426 /* The following fields must be initialized properly when calling _put() */
427 pa_assert(s->asyncmsgq);
428 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
429
430 /* Generally, flags should be initialized via pa_sink_new(). As a
431 * special exception we allow volume related flags to be set
432 * between _new() and _put(). */
433
434 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
435 s->flags |= PA_SINK_DECIBEL_VOLUME;
436
437 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
438 s->flags |= PA_SINK_FLAT_VOLUME;
439
440 /* We assume that if the sink implementor changed the default
441 * volume he did so in real_volume, because that is the usual
442 * place where he is supposed to place his changes. */
443 s->reference_volume = s->real_volume;
444
445 s->thread_info.soft_volume = s->soft_volume;
446 s->thread_info.soft_muted = s->muted;
447
448 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
449 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
450 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
451 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
452 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
453
454 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
455 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
456 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
457
458 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
459
460 pa_source_put(s->monitor_source);
461
462 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
463 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
464 }
465
466 /* Called from main context */
467 void pa_sink_unlink(pa_sink* s) {
468 pa_bool_t linked;
469 pa_sink_input *i, *j = NULL;
470
471 pa_assert(s);
472 pa_assert_ctl_context();
473
474 /* Please note that pa_sink_unlink() does more than simply
475 * reversing pa_sink_put(). It also undoes the registrations
476 * already done in pa_sink_new()! */
477
478 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
479 * may be called multiple times on the same sink without bad
480 * effects. */
481
482 linked = PA_SINK_IS_LINKED(s->state);
483
484 if (linked)
485 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
486
487 if (s->state != PA_SINK_UNLINKED)
488 pa_namereg_unregister(s->core, s->name);
489 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
490
491 if (s->card)
492 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
493
494 while ((i = pa_idxset_first(s->inputs, NULL))) {
495 pa_assert(i != j);
496 pa_sink_input_kill(i);
497 j = i;
498 }
499
500 if (linked)
501 sink_set_state(s, PA_SINK_UNLINKED);
502 else
503 s->state = PA_SINK_UNLINKED;
504
505 reset_callbacks(s);
506
507 if (s->monitor_source)
508 pa_source_unlink(s->monitor_source);
509
510 if (linked) {
511 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
512 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
513 }
514 }
515
516 /* Called from main context */
517 static void sink_free(pa_object *o) {
518 pa_sink *s = PA_SINK(o);
519 pa_sink_input *i;
520
521 pa_assert(s);
522 pa_assert_ctl_context();
523 pa_assert(pa_sink_refcnt(s) == 0);
524
525 if (PA_SINK_IS_LINKED(s->state))
526 pa_sink_unlink(s);
527
528 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
529
530 if (s->monitor_source) {
531 pa_source_unref(s->monitor_source);
532 s->monitor_source = NULL;
533 }
534
535 pa_idxset_free(s->inputs, NULL, NULL);
536
537 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
538 pa_sink_input_unref(i);
539
540 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
541
542 if (s->silence.memblock)
543 pa_memblock_unref(s->silence.memblock);
544
545 pa_xfree(s->name);
546 pa_xfree(s->driver);
547
548 if (s->proplist)
549 pa_proplist_free(s->proplist);
550
551 if (s->ports) {
552 pa_device_port *p;
553
554 while ((p = pa_hashmap_steal_first(s->ports)))
555 pa_device_port_free(p);
556
557 pa_hashmap_free(s->ports, NULL, NULL);
558 }
559
560 pa_xfree(s);
561 }
562
563 /* Called from main context, and not while the IO thread is active, please */
564 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
565 pa_sink_assert_ref(s);
566 pa_assert_ctl_context();
567
568 s->asyncmsgq = q;
569
570 if (s->monitor_source)
571 pa_source_set_asyncmsgq(s->monitor_source, q);
572 }
573
574 /* Called from main context, and not while the IO thread is active, please */
575 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
576 pa_sink_assert_ref(s);
577 pa_assert_ctl_context();
578
579 if (mask == 0)
580 return;
581
582 /* For now, allow only a minimal set of flags to be changed. */
583 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
584
585 s->flags = (s->flags & ~mask) | (value & mask);
586
587 pa_source_update_flags(s->monitor_source,
588 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
589 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
590 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
591 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
592 }
593
594 /* Called from IO context, or before _put() from main context */
595 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
596 pa_sink_assert_ref(s);
597 pa_sink_assert_io_context(s);
598
599 s->thread_info.rtpoll = p;
600
601 if (s->monitor_source)
602 pa_source_set_rtpoll(s->monitor_source, p);
603 }
604
605 /* Called from main context */
606 int pa_sink_update_status(pa_sink*s) {
607 pa_sink_assert_ref(s);
608 pa_assert_ctl_context();
609 pa_assert(PA_SINK_IS_LINKED(s->state));
610
611 if (s->state == PA_SINK_SUSPENDED)
612 return 0;
613
614 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
615 }
616
617 /* Called from main context */
618 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
619 pa_sink_assert_ref(s);
620 pa_assert_ctl_context();
621 pa_assert(PA_SINK_IS_LINKED(s->state));
622 pa_assert(cause != 0);
623
624 if (suspend) {
625 s->suspend_cause |= cause;
626 s->monitor_source->suspend_cause |= cause;
627 } else {
628 s->suspend_cause &= ~cause;
629 s->monitor_source->suspend_cause &= ~cause;
630 }
631
632 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
633 return 0;
634
635 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
636
637 if (s->suspend_cause)
638 return sink_set_state(s, PA_SINK_SUSPENDED);
639 else
640 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
641 }
642
643 /* Called from main context */
644 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
645 pa_sink_input *i, *n;
646 uint32_t idx;
647
648 pa_sink_assert_ref(s);
649 pa_assert_ctl_context();
650 pa_assert(PA_SINK_IS_LINKED(s->state));
651
652 if (!q)
653 q = pa_queue_new();
654
655 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
656 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
657
658 pa_sink_input_ref(i);
659
660 if (pa_sink_input_start_move(i) >= 0)
661 pa_queue_push(q, i);
662 else
663 pa_sink_input_unref(i);
664 }
665
666 return q;
667 }
668
669 /* Called from main context */
670 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
671 pa_sink_input *i;
672
673 pa_sink_assert_ref(s);
674 pa_assert_ctl_context();
675 pa_assert(PA_SINK_IS_LINKED(s->state));
676 pa_assert(q);
677
678 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
679 if (pa_sink_input_finish_move(i, s, save) < 0)
680 pa_sink_input_fail_move(i);
681
682 pa_sink_input_unref(i);
683 }
684
685 pa_queue_free(q, NULL, NULL);
686 }
687
688 /* Called from main context */
689 void pa_sink_move_all_fail(pa_queue *q) {
690 pa_sink_input *i;
691
692 pa_assert_ctl_context();
693 pa_assert(q);
694
695 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
696 pa_sink_input_fail_move(i);
697 pa_sink_input_unref(i);
698 }
699
700 pa_queue_free(q, NULL, NULL);
701 }
702
703 /* Called from IO thread context */
704 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
705 pa_sink_input *i;
706 void *state = NULL;
707
708 pa_sink_assert_ref(s);
709 pa_sink_assert_io_context(s);
710 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
711
712 /* If nobody requested this and this is actually no real rewind
713 * then we can short cut this. Please note that this means that
714 * not all rewind requests triggered upstream will always be
715 * translated in actual requests! */
716 if (!s->thread_info.rewind_requested && nbytes <= 0)
717 return;
718
719 s->thread_info.rewind_nbytes = 0;
720 s->thread_info.rewind_requested = FALSE;
721
722 if (s->thread_info.state == PA_SINK_SUSPENDED)
723 return;
724
725 if (nbytes > 0)
726 pa_log_debug("Processing rewind...");
727
728 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
729 pa_sink_input_assert_ref(i);
730 pa_sink_input_process_rewind(i, nbytes);
731 }
732
733 if (nbytes > 0)
734 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
735 pa_source_process_rewind(s->monitor_source, nbytes);
736 }
737
738 /* Called from IO thread context */
739 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
740 pa_sink_input *i;
741 unsigned n = 0;
742 void *state = NULL;
743 size_t mixlength = *length;
744
745 pa_sink_assert_ref(s);
746 pa_sink_assert_io_context(s);
747 pa_assert(info);
748
749 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
750 pa_sink_input_assert_ref(i);
751
752 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
753
754 if (mixlength == 0 || info->chunk.length < mixlength)
755 mixlength = info->chunk.length;
756
757 if (pa_memblock_is_silence(info->chunk.memblock)) {
758 pa_memblock_unref(info->chunk.memblock);
759 continue;
760 }
761
762 info->userdata = pa_sink_input_ref(i);
763
764 pa_assert(info->chunk.memblock);
765 pa_assert(info->chunk.length > 0);
766
767 info++;
768 n++;
769 maxinfo--;
770 }
771
772 if (mixlength > 0)
773 *length = mixlength;
774
775 return n;
776 }
777
778 /* Called from IO thread context */
779 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
780 pa_sink_input *i;
781 void *state = NULL;
782 unsigned p = 0;
783 unsigned n_unreffed = 0;
784
785 pa_sink_assert_ref(s);
786 pa_sink_assert_io_context(s);
787 pa_assert(result);
788 pa_assert(result->memblock);
789 pa_assert(result->length > 0);
790
791 /* We optimize for the case where the order of the inputs has not changed */
792
793 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
794 unsigned j;
795 pa_mix_info* m = NULL;
796
797 pa_sink_input_assert_ref(i);
798
799 /* Let's try to find the matching entry info the pa_mix_info array */
800 for (j = 0; j < n; j ++) {
801
802 if (info[p].userdata == i) {
803 m = info + p;
804 break;
805 }
806
807 p++;
808 if (p >= n)
809 p = 0;
810 }
811
812 /* Drop read data */
813 pa_sink_input_drop(i, result->length);
814
815 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
816
817 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
818 void *ostate = NULL;
819 pa_source_output *o;
820 pa_memchunk c;
821
822 if (m && m->chunk.memblock) {
823 c = m->chunk;
824 pa_memblock_ref(c.memblock);
825 pa_assert(result->length <= c.length);
826 c.length = result->length;
827
828 pa_memchunk_make_writable(&c, 0);
829 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
830 } else {
831 c = s->silence;
832 pa_memblock_ref(c.memblock);
833 pa_assert(result->length <= c.length);
834 c.length = result->length;
835 }
836
837 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
838 pa_source_output_assert_ref(o);
839 pa_assert(o->direct_on_input == i);
840 pa_source_post_direct(s->monitor_source, o, &c);
841 }
842
843 pa_memblock_unref(c.memblock);
844 }
845 }
846
847 if (m) {
848 if (m->chunk.memblock)
849 pa_memblock_unref(m->chunk.memblock);
850 pa_memchunk_reset(&m->chunk);
851
852 pa_sink_input_unref(m->userdata);
853 m->userdata = NULL;
854
855 n_unreffed += 1;
856 }
857 }
858
859 /* Now drop references to entries that are included in the
860 * pa_mix_info array but don't exist anymore */
861
862 if (n_unreffed < n) {
863 for (; n > 0; info++, n--) {
864 if (info->userdata)
865 pa_sink_input_unref(info->userdata);
866 if (info->chunk.memblock)
867 pa_memblock_unref(info->chunk.memblock);
868 }
869 }
870
871 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
872 pa_source_post(s->monitor_source, result);
873 }
874
875 /* Called from IO thread context */
876 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
877 pa_mix_info info[MAX_MIX_CHANNELS];
878 unsigned n;
879 size_t block_size_max;
880
881 pa_sink_assert_ref(s);
882 pa_sink_assert_io_context(s);
883 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
884 pa_assert(pa_frame_aligned(length, &s->sample_spec));
885 pa_assert(result);
886
887 pa_sink_ref(s);
888
889 pa_assert(!s->thread_info.rewind_requested);
890 pa_assert(s->thread_info.rewind_nbytes == 0);
891
892 if (s->thread_info.state == PA_SINK_SUSPENDED) {
893 result->memblock = pa_memblock_ref(s->silence.memblock);
894 result->index = s->silence.index;
895 result->length = PA_MIN(s->silence.length, length);
896 return;
897 }
898
899 if (length <= 0)
900 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
901
902 block_size_max = pa_mempool_block_size_max(s->core->mempool);
903 if (length > block_size_max)
904 length = pa_frame_align(block_size_max, &s->sample_spec);
905
906 pa_assert(length > 0);
907
908 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
909
910 if (n == 0) {
911
912 *result = s->silence;
913 pa_memblock_ref(result->memblock);
914
915 if (result->length > length)
916 result->length = length;
917
918 } else if (n == 1) {
919 pa_cvolume volume;
920
921 *result = info[0].chunk;
922 pa_memblock_ref(result->memblock);
923
924 if (result->length > length)
925 result->length = length;
926
927 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
928
929 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
930 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
931 pa_memblock_unref(result->memblock);
932 pa_silence_memchunk_get(&s->core->silence_cache,
933 s->core->mempool,
934 result,
935 &s->sample_spec,
936 result->length);
937 } else {
938 pa_memchunk_make_writable(result, 0);
939 pa_volume_memchunk(result, &s->sample_spec, &volume);
940 }
941 }
942 } else {
943 void *ptr;
944 result->memblock = pa_memblock_new(s->core->mempool, length);
945
946 ptr = pa_memblock_acquire(result->memblock);
947 result->length = pa_mix(info, n,
948 ptr, length,
949 &s->sample_spec,
950 &s->thread_info.soft_volume,
951 s->thread_info.soft_muted);
952 pa_memblock_release(result->memblock);
953
954 result->index = 0;
955 }
956
957 inputs_drop(s, info, n, result);
958
959 pa_sink_unref(s);
960 }
961
962 /* Called from IO thread context */
963 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
964 pa_mix_info info[MAX_MIX_CHANNELS];
965 unsigned n;
966 size_t length, block_size_max;
967
968 pa_sink_assert_ref(s);
969 pa_sink_assert_io_context(s);
970 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
971 pa_assert(target);
972 pa_assert(target->memblock);
973 pa_assert(target->length > 0);
974 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
975
976 pa_sink_ref(s);
977
978 pa_assert(!s->thread_info.rewind_requested);
979 pa_assert(s->thread_info.rewind_nbytes == 0);
980
981 if (s->thread_info.state == PA_SINK_SUSPENDED) {
982 pa_silence_memchunk(target, &s->sample_spec);
983 return;
984 }
985
986 length = target->length;
987 block_size_max = pa_mempool_block_size_max(s->core->mempool);
988 if (length > block_size_max)
989 length = pa_frame_align(block_size_max, &s->sample_spec);
990
991 pa_assert(length > 0);
992
993 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
994
995 if (n == 0) {
996 if (target->length > length)
997 target->length = length;
998
999 pa_silence_memchunk(target, &s->sample_spec);
1000 } else if (n == 1) {
1001 pa_cvolume volume;
1002
1003 if (target->length > length)
1004 target->length = length;
1005
1006 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1007
1008 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1009 pa_silence_memchunk(target, &s->sample_spec);
1010 else {
1011 pa_memchunk vchunk;
1012
1013 vchunk = info[0].chunk;
1014 pa_memblock_ref(vchunk.memblock);
1015
1016 if (vchunk.length > length)
1017 vchunk.length = length;
1018
1019 if (!pa_cvolume_is_norm(&volume)) {
1020 pa_memchunk_make_writable(&vchunk, 0);
1021 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1022 }
1023
1024 pa_memchunk_memcpy(target, &vchunk);
1025 pa_memblock_unref(vchunk.memblock);
1026 }
1027
1028 } else {
1029 void *ptr;
1030
1031 ptr = pa_memblock_acquire(target->memblock);
1032
1033 target->length = pa_mix(info, n,
1034 (uint8_t*) ptr + target->index, length,
1035 &s->sample_spec,
1036 &s->thread_info.soft_volume,
1037 s->thread_info.soft_muted);
1038
1039 pa_memblock_release(target->memblock);
1040 }
1041
1042 inputs_drop(s, info, n, target);
1043
1044 pa_sink_unref(s);
1045 }
1046
1047 /* Called from IO thread context */
1048 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1049 pa_memchunk chunk;
1050 size_t l, d;
1051
1052 pa_sink_assert_ref(s);
1053 pa_sink_assert_io_context(s);
1054 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1055 pa_assert(target);
1056 pa_assert(target->memblock);
1057 pa_assert(target->length > 0);
1058 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1059
1060 pa_sink_ref(s);
1061
1062 pa_assert(!s->thread_info.rewind_requested);
1063 pa_assert(s->thread_info.rewind_nbytes == 0);
1064
1065 l = target->length;
1066 d = 0;
1067 while (l > 0) {
1068 chunk = *target;
1069 chunk.index += d;
1070 chunk.length -= d;
1071
1072 pa_sink_render_into(s, &chunk);
1073
1074 d += chunk.length;
1075 l -= chunk.length;
1076 }
1077
1078 pa_sink_unref(s);
1079 }
1080
1081 /* Called from IO thread context */
1082 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1083 pa_mix_info info[MAX_MIX_CHANNELS];
1084 size_t length1st = length;
1085 unsigned n;
1086
1087 pa_sink_assert_ref(s);
1088 pa_sink_assert_io_context(s);
1089 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1090 pa_assert(length > 0);
1091 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1092 pa_assert(result);
1093
1094 pa_sink_ref(s);
1095
1096 pa_assert(!s->thread_info.rewind_requested);
1097 pa_assert(s->thread_info.rewind_nbytes == 0);
1098
1099 pa_assert(length > 0);
1100
1101 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1102
1103 if (n == 0) {
1104 pa_silence_memchunk_get(&s->core->silence_cache,
1105 s->core->mempool,
1106 result,
1107 &s->sample_spec,
1108 length1st);
1109 } else if (n == 1) {
1110 pa_cvolume volume;
1111
1112 *result = info[0].chunk;
1113 pa_memblock_ref(result->memblock);
1114
1115 if (result->length > length)
1116 result->length = length;
1117
1118 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1119
1120 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1121 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1122 pa_memblock_unref(result->memblock);
1123 pa_silence_memchunk_get(&s->core->silence_cache,
1124 s->core->mempool,
1125 result,
1126 &s->sample_spec,
1127 result->length);
1128 } else {
1129 pa_memchunk_make_writable(result, length);
1130 pa_volume_memchunk(result, &s->sample_spec, &volume);
1131 }
1132 }
1133 } else {
1134 void *ptr;
1135
1136 result->index = 0;
1137 result->memblock = pa_memblock_new(s->core->mempool, length);
1138
1139 ptr = pa_memblock_acquire(result->memblock);
1140
1141 result->length = pa_mix(info, n,
1142 (uint8_t*) ptr + result->index, length1st,
1143 &s->sample_spec,
1144 &s->thread_info.soft_volume,
1145 s->thread_info.soft_muted);
1146
1147 pa_memblock_release(result->memblock);
1148 }
1149
1150 inputs_drop(s, info, n, result);
1151
1152 if (result->length < length) {
1153 pa_memchunk chunk;
1154 size_t l, d;
1155 pa_memchunk_make_writable(result, length);
1156
1157 l = length - result->length;
1158 d = result->index + result->length;
1159 while (l > 0) {
1160 chunk = *result;
1161 chunk.index = d;
1162 chunk.length = l;
1163
1164 pa_sink_render_into(s, &chunk);
1165
1166 d += chunk.length;
1167 l -= chunk.length;
1168 }
1169 result->length = length;
1170 }
1171
1172 pa_sink_unref(s);
1173 }
1174
1175 /* Called from main thread */
1176 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1177 pa_usec_t usec = 0;
1178
1179 pa_sink_assert_ref(s);
1180 pa_assert_ctl_context();
1181 pa_assert(PA_SINK_IS_LINKED(s->state));
1182
1183 /* The returned value is supposed to be in the time domain of the sound card! */
1184
1185 if (s->state == PA_SINK_SUSPENDED)
1186 return 0;
1187
1188 if (!(s->flags & PA_SINK_LATENCY))
1189 return 0;
1190
1191 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1192
1193 return usec;
1194 }
1195
1196 /* Called from IO thread */
1197 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1198 pa_usec_t usec = 0;
1199 pa_msgobject *o;
1200
1201 pa_sink_assert_ref(s);
1202 pa_sink_assert_io_context(s);
1203 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1204
1205 /* The returned value is supposed to be in the time domain of the sound card! */
1206
1207 if (s->thread_info.state == PA_SINK_SUSPENDED)
1208 return 0;
1209
1210 if (!(s->flags & PA_SINK_LATENCY))
1211 return 0;
1212
1213 o = PA_MSGOBJECT(s);
1214
1215 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1216
1217 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1218 return -1;
1219
1220 return usec;
1221 }
1222
1223 /* Called from main context */
1224 static void compute_reference_ratios(pa_sink *s) {
1225 uint32_t idx;
1226 pa_sink_input *i;
1227
1228 pa_sink_assert_ref(s);
1229 pa_assert_ctl_context();
1230 pa_assert(PA_SINK_IS_LINKED(s->state));
1231 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1232
1233 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1234 unsigned c;
1235 pa_cvolume remapped;
1236
1237 /*
1238 * Calculates the reference volume from the sink's reference
1239 * volume. This basically calculates:
1240 *
1241 * i->reference_ratio = i->volume / s->reference_volume
1242 */
1243
1244 remapped = s->reference_volume;
1245 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1246
1247 i->reference_ratio.channels = i->sample_spec.channels;
1248
1249 for (c = 0; c < i->sample_spec.channels; c++) {
1250
1251 /* We don't update when the sink volume is 0 anyway */
1252 if (remapped.values[c] <= PA_VOLUME_MUTED)
1253 continue;
1254
1255 /* Don't update the reference ratio unless necessary */
1256 if (pa_sw_volume_multiply(
1257 i->reference_ratio.values[c],
1258 remapped.values[c]) == i->volume.values[c])
1259 continue;
1260
1261 i->reference_ratio.values[c] = pa_sw_volume_divide(
1262 i->volume.values[c],
1263 remapped.values[c]);
1264 }
1265 }
1266 }
1267
1268 /* Called from main context */
1269 static void compute_real_ratios(pa_sink *s) {
1270 pa_sink_input *i;
1271 uint32_t idx;
1272
1273 pa_sink_assert_ref(s);
1274 pa_assert_ctl_context();
1275 pa_assert(PA_SINK_IS_LINKED(s->state));
1276 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1277
1278 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1279 unsigned c;
1280 pa_cvolume remapped;
1281
1282 /*
1283 * This basically calculates:
1284 *
1285 * i->real_ratio := i->volume / s->real_volume
1286 * i->soft_volume := i->real_ratio * i->volume_factor
1287 */
1288
1289 remapped = s->real_volume;
1290 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1291
1292 i->real_ratio.channels = i->sample_spec.channels;
1293 i->soft_volume.channels = i->sample_spec.channels;
1294
1295 for (c = 0; c < i->sample_spec.channels; c++) {
1296
1297 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1298 /* We leave i->real_ratio untouched */
1299 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1300 continue;
1301 }
1302
1303 /* Don't lose accuracy unless necessary */
1304 if (pa_sw_volume_multiply(
1305 i->real_ratio.values[c],
1306 remapped.values[c]) != i->volume.values[c])
1307
1308 i->real_ratio.values[c] = pa_sw_volume_divide(
1309 i->volume.values[c],
1310 remapped.values[c]);
1311
1312 i->soft_volume.values[c] = pa_sw_volume_multiply(
1313 i->real_ratio.values[c],
1314 i->volume_factor.values[c]);
1315 }
1316
1317 /* We don't copy the soft_volume to the thread_info data
1318 * here. That must be done by the caller */
1319 }
1320 }
1321
1322 /* Called from main thread */
1323 static void compute_real_volume(pa_sink *s) {
1324 pa_sink_input *i;
1325 uint32_t idx;
1326
1327 pa_sink_assert_ref(s);
1328 pa_assert_ctl_context();
1329 pa_assert(PA_SINK_IS_LINKED(s->state));
1330 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1331
1332 /* This determines the maximum volume of all streams and sets
1333 * s->real_volume accordingly. */
1334
1335 if (pa_idxset_isempty(s->inputs)) {
1336 /* In the special case that we have no sink input we leave the
1337 * volume unmodified. */
1338 s->real_volume = s->reference_volume;
1339 return;
1340 }
1341
1342 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1343
1344 /* First let's determine the new maximum volume of all inputs
1345 * connected to this sink */
1346 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1347 pa_cvolume remapped;
1348
1349 remapped = i->volume;
1350 pa_cvolume_remap(&remapped, &i->channel_map, &s->channel_map);
1351 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1352 }
1353
1354 /* Then, let's update the real ratios/soft volumes of all inputs
1355 * connected to this sink */
1356 compute_real_ratios(s);
1357 }
1358
1359 /* Called from main thread */
1360 static void propagate_reference_volume(pa_sink *s) {
1361 pa_sink_input *i;
1362 uint32_t idx;
1363
1364 pa_sink_assert_ref(s);
1365 pa_assert_ctl_context();
1366 pa_assert(PA_SINK_IS_LINKED(s->state));
1367 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1368
1369 /* This is called whenever the sink volume changes that is not
1370 * caused by a sink input volume change. We need to fix up the
1371 * sink input volumes accordingly */
1372
1373 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1374 pa_cvolume old_volume, remapped;
1375
1376 old_volume = i->volume;
1377
1378 /* This basically calculates:
1379 *
1380 * i->volume := s->reference_volume * i->reference_ratio */
1381
1382 remapped = s->reference_volume;
1383 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1384 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1385
1386 /* The volume changed, let's tell people so */
1387 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1388
1389 if (i->volume_changed)
1390 i->volume_changed(i);
1391
1392 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1393 }
1394 }
1395 }
1396
1397 /* Called from main thread */
1398 void pa_sink_set_volume(
1399 pa_sink *s,
1400 const pa_cvolume *volume,
1401 pa_bool_t sendmsg,
1402 pa_bool_t save) {
1403
1404 pa_cvolume old_reference_volume;
1405 pa_bool_t reference_changed;
1406
1407 pa_sink_assert_ref(s);
1408 pa_assert_ctl_context();
1409 pa_assert(PA_SINK_IS_LINKED(s->state));
1410 pa_assert(!volume || pa_cvolume_valid(volume));
1411 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1412 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1413
1414 /* As a special exception we accept mono volumes on all sinks --
1415 * even on those with more complex channel maps */
1416
1417 /* If volume is NULL we synchronize the sink's real and reference
1418 * volumes with the stream volumes. If it is not NULL we update
1419 * the reference_volume with it. */
1420
1421 old_reference_volume = s->reference_volume;
1422
1423 if (volume) {
1424
1425 if (pa_cvolume_compatible(volume, &s->sample_spec))
1426 s->reference_volume = *volume;
1427 else
1428 pa_cvolume_scale(&s->reference_volume, pa_cvolume_max(volume));
1429
1430 if (s->flags & PA_SINK_FLAT_VOLUME) {
1431 /* OK, propagate this volume change back to the inputs */
1432 propagate_reference_volume(s);
1433
1434 /* And now recalculate the real volume */
1435 compute_real_volume(s);
1436 } else
1437 s->real_volume = s->reference_volume;
1438
1439 } else {
1440 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1441
1442 /* Ok, let's determine the new real volume */
1443 compute_real_volume(s);
1444
1445 /* Let's 'push' the reference volume if necessary */
1446 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1447
1448 /* We need to fix the reference ratios of all streams now that
1449 * we changed the reference volume */
1450 compute_reference_ratios(s);
1451 }
1452
1453 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1454 s->save_volume = (!reference_changed && s->save_volume) || save;
1455
1456 if (s->set_volume) {
1457 /* If we have a function set_volume(), then we do not apply a
1458 * soft volume by default. However, set_volume() is free to
1459 * apply one to s->soft_volume */
1460
1461 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1462 s->set_volume(s);
1463
1464 } else
1465 /* If we have no function set_volume(), then the soft volume
1466 * becomes the virtual volume */
1467 s->soft_volume = s->real_volume;
1468
1469 /* This tells the sink that soft and/or virtual volume changed */
1470 if (sendmsg)
1471 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1472
1473 if (reference_changed)
1474 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1475 }
1476
1477 /* Called from main thread. Only to be called by sink implementor */
1478 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1479 pa_sink_assert_ref(s);
1480 pa_assert_ctl_context();
1481
1482 if (!volume)
1483 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1484 else
1485 s->soft_volume = *volume;
1486
1487 if (PA_SINK_IS_LINKED(s->state))
1488 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1489 else
1490 s->thread_info.soft_volume = s->soft_volume;
1491 }
1492
1493 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1494 pa_sink_input *i;
1495 uint32_t idx;
1496 pa_cvolume old_reference_volume;
1497
1498 pa_sink_assert_ref(s);
1499 pa_assert_ctl_context();
1500 pa_assert(PA_SINK_IS_LINKED(s->state));
1501
1502 /* This is called when the hardware's real volume changes due to
1503 * some external event. We copy the real volume into our
1504 * reference volume and then rebuild the stream volumes based on
1505 * i->real_ratio which should stay fixed. */
1506
1507 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1508 return;
1509
1510 old_reference_volume = s->reference_volume;
1511
1512 /* 1. Make the real volume the reference volume */
1513 s->reference_volume = s->real_volume;
1514
1515 if (s->flags & PA_SINK_FLAT_VOLUME) {
1516
1517 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1518 pa_cvolume old_volume, remapped;
1519
1520 old_volume = i->volume;
1521
1522 /* 2. Since the sink's reference and real volumes are equal
1523 * now our ratios should be too. */
1524 i->reference_ratio = i->real_ratio;
1525
1526 /* 3. Recalculate the new stream reference volume based on the
1527 * reference ratio and the sink's reference volume.
1528 *
1529 * This basically calculates:
1530 *
1531 * i->volume = s->reference_volume * i->reference_ratio
1532 *
1533 * This is identical to propagate_reference_volume() */
1534 remapped = s->reference_volume;
1535 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1536 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1537
1538 /* Notify if something changed */
1539 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1540
1541 if (i->volume_changed)
1542 i->volume_changed(i);
1543
1544 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1545 }
1546 }
1547 }
1548
1549 /* Something got changed in the hardware. It probably makes sense
1550 * to save changed hw settings given that hw volume changes not
1551 * triggered by PA are almost certainly done by the user. */
1552 s->save_volume = TRUE;
1553
1554 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1555 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1556 }
1557
1558 /* Called from main thread */
1559 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1560 pa_sink_assert_ref(s);
1561 pa_assert_ctl_context();
1562 pa_assert(PA_SINK_IS_LINKED(s->state));
1563
1564 if (s->refresh_volume || force_refresh) {
1565 struct pa_cvolume old_real_volume;
1566
1567 old_real_volume = s->real_volume;
1568
1569 if (s->get_volume)
1570 s->get_volume(s);
1571
1572 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1573
1574 propagate_real_volume(s, &old_real_volume);
1575 }
1576
1577 return &s->reference_volume;
1578 }
1579
1580 /* Called from main thread */
1581 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1582 pa_cvolume old_real_volume;
1583
1584 pa_sink_assert_ref(s);
1585 pa_assert_ctl_context();
1586 pa_assert(PA_SINK_IS_LINKED(s->state));
1587
1588 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1589
1590 old_real_volume = s->real_volume;
1591 s->real_volume = *new_real_volume;
1592
1593 propagate_real_volume(s, &old_real_volume);
1594 }
1595
1596 /* Called from main thread */
1597 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1598 pa_bool_t old_muted;
1599
1600 pa_sink_assert_ref(s);
1601 pa_assert_ctl_context();
1602 pa_assert(PA_SINK_IS_LINKED(s->state));
1603
1604 old_muted = s->muted;
1605 s->muted = mute;
1606 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1607
1608 if (s->set_mute)
1609 s->set_mute(s);
1610
1611 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1612
1613 if (old_muted != s->muted)
1614 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1615 }
1616
1617 /* Called from main thread */
1618 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1619
1620 pa_sink_assert_ref(s);
1621 pa_assert_ctl_context();
1622 pa_assert(PA_SINK_IS_LINKED(s->state));
1623
1624 if (s->refresh_muted || force_refresh) {
1625 pa_bool_t old_muted = s->muted;
1626
1627 if (s->get_mute)
1628 s->get_mute(s);
1629
1630 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1631
1632 if (old_muted != s->muted) {
1633 s->save_muted = TRUE;
1634
1635 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1636
1637 /* Make sure the soft mute status stays in sync */
1638 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1639 }
1640 }
1641
1642 return s->muted;
1643 }
1644
1645 /* Called from main thread */
1646 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1647 pa_sink_assert_ref(s);
1648 pa_assert_ctl_context();
1649 pa_assert(PA_SINK_IS_LINKED(s->state));
1650
1651 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1652
1653 if (s->muted == new_muted)
1654 return;
1655
1656 s->muted = new_muted;
1657 s->save_muted = TRUE;
1658
1659 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1660 }
1661
1662 /* Called from main thread */
1663 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1664 pa_sink_assert_ref(s);
1665 pa_assert_ctl_context();
1666
1667 if (p)
1668 pa_proplist_update(s->proplist, mode, p);
1669
1670 if (PA_SINK_IS_LINKED(s->state)) {
1671 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1672 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1673 }
1674
1675 return TRUE;
1676 }
1677
1678 /* Called from main thread */
1679 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1680 void pa_sink_set_description(pa_sink *s, const char *description) {
1681 const char *old;
1682 pa_sink_assert_ref(s);
1683 pa_assert_ctl_context();
1684
1685 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1686 return;
1687
1688 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1689
1690 if (old && description && pa_streq(old, description))
1691 return;
1692
1693 if (description)
1694 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1695 else
1696 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1697
1698 if (s->monitor_source) {
1699 char *n;
1700
1701 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1702 pa_source_set_description(s->monitor_source, n);
1703 pa_xfree(n);
1704 }
1705
1706 if (PA_SINK_IS_LINKED(s->state)) {
1707 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1708 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1709 }
1710 }
1711
1712 /* Called from main thread */
1713 unsigned pa_sink_linked_by(pa_sink *s) {
1714 unsigned ret;
1715
1716 pa_sink_assert_ref(s);
1717 pa_assert_ctl_context();
1718 pa_assert(PA_SINK_IS_LINKED(s->state));
1719
1720 ret = pa_idxset_size(s->inputs);
1721
1722 /* We add in the number of streams connected to us here. Please
1723 * note the asymmmetry to pa_sink_used_by()! */
1724
1725 if (s->monitor_source)
1726 ret += pa_source_linked_by(s->monitor_source);
1727
1728 return ret;
1729 }
1730
1731 /* Called from main thread */
1732 unsigned pa_sink_used_by(pa_sink *s) {
1733 unsigned ret;
1734
1735 pa_sink_assert_ref(s);
1736 pa_assert_ctl_context();
1737 pa_assert(PA_SINK_IS_LINKED(s->state));
1738
1739 ret = pa_idxset_size(s->inputs);
1740 pa_assert(ret >= s->n_corked);
1741
1742 /* Streams connected to our monitor source do not matter for
1743 * pa_sink_used_by()!.*/
1744
1745 return ret - s->n_corked;
1746 }
1747
1748 /* Called from main thread */
1749 unsigned pa_sink_check_suspend(pa_sink *s) {
1750 unsigned ret;
1751 pa_sink_input *i;
1752 uint32_t idx;
1753
1754 pa_sink_assert_ref(s);
1755 pa_assert_ctl_context();
1756
1757 if (!PA_SINK_IS_LINKED(s->state))
1758 return 0;
1759
1760 ret = 0;
1761
1762 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1763 pa_sink_input_state_t st;
1764
1765 st = pa_sink_input_get_state(i);
1766 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1767
1768 if (st == PA_SINK_INPUT_CORKED)
1769 continue;
1770
1771 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1772 continue;
1773
1774 ret ++;
1775 }
1776
1777 if (s->monitor_source)
1778 ret += pa_source_check_suspend(s->monitor_source);
1779
1780 return ret;
1781 }
1782
1783 /* Called from the IO thread */
1784 static void sync_input_volumes_within_thread(pa_sink *s) {
1785 pa_sink_input *i;
1786 void *state = NULL;
1787
1788 pa_sink_assert_ref(s);
1789 pa_sink_assert_io_context(s);
1790
1791 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1792 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1793 continue;
1794
1795 i->thread_info.soft_volume = i->soft_volume;
1796 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1797 }
1798 }
1799
1800 /* Called from IO thread, except when it is not */
1801 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1802 pa_sink *s = PA_SINK(o);
1803 pa_sink_assert_ref(s);
1804
1805 switch ((pa_sink_message_t) code) {
1806
1807 case PA_SINK_MESSAGE_ADD_INPUT: {
1808 pa_sink_input *i = PA_SINK_INPUT(userdata);
1809
1810 /* If you change anything here, make sure to change the
1811 * sink input handling a few lines down at
1812 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1813
1814 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1815
1816 /* Since the caller sleeps in pa_sink_input_put(), we can
1817 * safely access data outside of thread_info even though
1818 * it is mutable */
1819
1820 if ((i->thread_info.sync_prev = i->sync_prev)) {
1821 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1822 pa_assert(i->sync_prev->sync_next == i);
1823 i->thread_info.sync_prev->thread_info.sync_next = i;
1824 }
1825
1826 if ((i->thread_info.sync_next = i->sync_next)) {
1827 pa_assert(i->sink == i->thread_info.sync_next->sink);
1828 pa_assert(i->sync_next->sync_prev == i);
1829 i->thread_info.sync_next->thread_info.sync_prev = i;
1830 }
1831
1832 pa_assert(!i->thread_info.attached);
1833 i->thread_info.attached = TRUE;
1834
1835 if (i->attach)
1836 i->attach(i);
1837
1838 pa_sink_input_set_state_within_thread(i, i->state);
1839
1840 /* The requested latency of the sink input needs to be
1841 * fixed up and then configured on the sink */
1842
1843 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1844 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1845
1846 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1847 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1848
1849 /* We don't rewind here automatically. This is left to the
1850 * sink input implementor because some sink inputs need a
1851 * slow start, i.e. need some time to buffer client
1852 * samples before beginning streaming. */
1853
1854 /* In flat volume mode we need to update the volume as
1855 * well */
1856 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1857 }
1858
1859 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1860 pa_sink_input *i = PA_SINK_INPUT(userdata);
1861
1862 /* If you change anything here, make sure to change the
1863 * sink input handling a few lines down at
1864 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1865
1866 if (i->detach)
1867 i->detach(i);
1868
1869 pa_sink_input_set_state_within_thread(i, i->state);
1870
1871 pa_assert(i->thread_info.attached);
1872 i->thread_info.attached = FALSE;
1873
1874 /* Since the caller sleeps in pa_sink_input_unlink(),
1875 * we can safely access data outside of thread_info even
1876 * though it is mutable */
1877
1878 pa_assert(!i->sync_prev);
1879 pa_assert(!i->sync_next);
1880
1881 if (i->thread_info.sync_prev) {
1882 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1883 i->thread_info.sync_prev = NULL;
1884 }
1885
1886 if (i->thread_info.sync_next) {
1887 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1888 i->thread_info.sync_next = NULL;
1889 }
1890
1891 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1892 pa_sink_input_unref(i);
1893
1894 pa_sink_invalidate_requested_latency(s, TRUE);
1895 pa_sink_request_rewind(s, (size_t) -1);
1896
1897 /* In flat volume mode we need to update the volume as
1898 * well */
1899 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1900 }
1901
1902 case PA_SINK_MESSAGE_START_MOVE: {
1903 pa_sink_input *i = PA_SINK_INPUT(userdata);
1904
1905 /* We don't support moving synchronized streams. */
1906 pa_assert(!i->sync_prev);
1907 pa_assert(!i->sync_next);
1908 pa_assert(!i->thread_info.sync_next);
1909 pa_assert(!i->thread_info.sync_prev);
1910
1911 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1912 pa_usec_t usec = 0;
1913 size_t sink_nbytes, total_nbytes;
1914
1915 /* Get the latency of the sink */
1916 usec = pa_sink_get_latency_within_thread(s);
1917 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1918 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1919
1920 if (total_nbytes > 0) {
1921 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1922 i->thread_info.rewrite_flush = TRUE;
1923 pa_sink_input_process_rewind(i, sink_nbytes);
1924 }
1925 }
1926
1927 if (i->detach)
1928 i->detach(i);
1929
1930 pa_assert(i->thread_info.attached);
1931 i->thread_info.attached = FALSE;
1932
1933 /* Let's remove the sink input ...*/
1934 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1935 pa_sink_input_unref(i);
1936
1937 pa_sink_invalidate_requested_latency(s, TRUE);
1938
1939 pa_log_debug("Requesting rewind due to started move");
1940 pa_sink_request_rewind(s, (size_t) -1);
1941
1942 /* In flat volume mode we need to update the volume as
1943 * well */
1944 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1945 }
1946
1947 case PA_SINK_MESSAGE_FINISH_MOVE: {
1948 pa_sink_input *i = PA_SINK_INPUT(userdata);
1949
1950 /* We don't support moving synchronized streams. */
1951 pa_assert(!i->sync_prev);
1952 pa_assert(!i->sync_next);
1953 pa_assert(!i->thread_info.sync_next);
1954 pa_assert(!i->thread_info.sync_prev);
1955
1956 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1957
1958 pa_assert(!i->thread_info.attached);
1959 i->thread_info.attached = TRUE;
1960
1961 if (i->attach)
1962 i->attach(i);
1963
1964 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1965 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1966
1967 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1968 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1969
1970 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1971 pa_usec_t usec = 0;
1972 size_t nbytes;
1973
1974 /* Get the latency of the sink */
1975 usec = pa_sink_get_latency_within_thread(s);
1976 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1977
1978 if (nbytes > 0)
1979 pa_sink_input_drop(i, nbytes);
1980
1981 pa_log_debug("Requesting rewind due to finished move");
1982 pa_sink_request_rewind(s, nbytes);
1983 }
1984
1985 /* In flat volume mode we need to update the volume as
1986 * well */
1987 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1988 }
1989
1990 case PA_SINK_MESSAGE_SET_VOLUME:
1991
1992 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1993 s->thread_info.soft_volume = s->soft_volume;
1994 pa_sink_request_rewind(s, (size_t) -1);
1995 }
1996
1997 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1998 return 0;
1999
2000 /* Fall through ... */
2001
2002 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2003 sync_input_volumes_within_thread(s);
2004 return 0;
2005
2006 case PA_SINK_MESSAGE_GET_VOLUME:
2007 return 0;
2008
2009 case PA_SINK_MESSAGE_SET_MUTE:
2010
2011 if (s->thread_info.soft_muted != s->muted) {
2012 s->thread_info.soft_muted = s->muted;
2013 pa_sink_request_rewind(s, (size_t) -1);
2014 }
2015
2016 return 0;
2017
2018 case PA_SINK_MESSAGE_GET_MUTE:
2019 return 0;
2020
2021 case PA_SINK_MESSAGE_SET_STATE: {
2022
2023 pa_bool_t suspend_change =
2024 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2025 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2026
2027 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2028
2029 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2030 s->thread_info.rewind_nbytes = 0;
2031 s->thread_info.rewind_requested = FALSE;
2032 }
2033
2034 if (suspend_change) {
2035 pa_sink_input *i;
2036 void *state = NULL;
2037
2038 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2039 if (i->suspend_within_thread)
2040 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2041 }
2042
2043 return 0;
2044 }
2045
2046 case PA_SINK_MESSAGE_DETACH:
2047
2048 /* Detach all streams */
2049 pa_sink_detach_within_thread(s);
2050 return 0;
2051
2052 case PA_SINK_MESSAGE_ATTACH:
2053
2054 /* Reattach all streams */
2055 pa_sink_attach_within_thread(s);
2056 return 0;
2057
2058 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2059
2060 pa_usec_t *usec = userdata;
2061 *usec = pa_sink_get_requested_latency_within_thread(s);
2062
2063 /* Yes, that's right, the IO thread will see -1 when no
2064 * explicit requested latency is configured, the main
2065 * thread will see max_latency */
2066 if (*usec == (pa_usec_t) -1)
2067 *usec = s->thread_info.max_latency;
2068
2069 return 0;
2070 }
2071
2072 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2073 pa_usec_t *r = userdata;
2074
2075 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2076
2077 return 0;
2078 }
2079
2080 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2081 pa_usec_t *r = userdata;
2082
2083 r[0] = s->thread_info.min_latency;
2084 r[1] = s->thread_info.max_latency;
2085
2086 return 0;
2087 }
2088
2089 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2090
2091 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2092 return 0;
2093
2094 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2095
2096 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2097 return 0;
2098
2099 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2100
2101 *((size_t*) userdata) = s->thread_info.max_rewind;
2102 return 0;
2103
2104 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2105
2106 *((size_t*) userdata) = s->thread_info.max_request;
2107 return 0;
2108
2109 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2110
2111 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2112 return 0;
2113
2114 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2115
2116 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2117 return 0;
2118
2119 case PA_SINK_MESSAGE_GET_LATENCY:
2120 case PA_SINK_MESSAGE_MAX:
2121 ;
2122 }
2123
2124 return -1;
2125 }
2126
2127 /* Called from main thread */
2128 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2129 pa_sink *sink;
2130 uint32_t idx;
2131 int ret = 0;
2132
2133 pa_core_assert_ref(c);
2134 pa_assert_ctl_context();
2135 pa_assert(cause != 0);
2136
2137 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2138 int r;
2139
2140 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2141 ret = r;
2142 }
2143
2144 return ret;
2145 }
2146
2147 /* Called from main thread */
2148 void pa_sink_detach(pa_sink *s) {
2149 pa_sink_assert_ref(s);
2150 pa_assert_ctl_context();
2151 pa_assert(PA_SINK_IS_LINKED(s->state));
2152
2153 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2154 }
2155
2156 /* Called from main thread */
2157 void pa_sink_attach(pa_sink *s) {
2158 pa_sink_assert_ref(s);
2159 pa_assert_ctl_context();
2160 pa_assert(PA_SINK_IS_LINKED(s->state));
2161
2162 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2163 }
2164
2165 /* Called from IO thread */
2166 void pa_sink_detach_within_thread(pa_sink *s) {
2167 pa_sink_input *i;
2168 void *state = NULL;
2169
2170 pa_sink_assert_ref(s);
2171 pa_sink_assert_io_context(s);
2172 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2173
2174 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2175 if (i->detach)
2176 i->detach(i);
2177
2178 if (s->monitor_source)
2179 pa_source_detach_within_thread(s->monitor_source);
2180 }
2181
2182 /* Called from IO thread */
2183 void pa_sink_attach_within_thread(pa_sink *s) {
2184 pa_sink_input *i;
2185 void *state = NULL;
2186
2187 pa_sink_assert_ref(s);
2188 pa_sink_assert_io_context(s);
2189 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2190
2191 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2192 if (i->attach)
2193 i->attach(i);
2194
2195 if (s->monitor_source)
2196 pa_source_attach_within_thread(s->monitor_source);
2197 }
2198
2199 /* Called from IO thread */
2200 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2201 pa_sink_assert_ref(s);
2202 pa_sink_assert_io_context(s);
2203 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2204
2205 if (s->thread_info.state == PA_SINK_SUSPENDED)
2206 return;
2207
2208 if (nbytes == (size_t) -1)
2209 nbytes = s->thread_info.max_rewind;
2210
2211 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2212
2213 if (s->thread_info.rewind_requested &&
2214 nbytes <= s->thread_info.rewind_nbytes)
2215 return;
2216
2217 s->thread_info.rewind_nbytes = nbytes;
2218 s->thread_info.rewind_requested = TRUE;
2219
2220 if (s->request_rewind)
2221 s->request_rewind(s);
2222 }
2223
2224 /* Called from IO thread */
2225 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2226 pa_usec_t result = (pa_usec_t) -1;
2227 pa_sink_input *i;
2228 void *state = NULL;
2229 pa_usec_t monitor_latency;
2230
2231 pa_sink_assert_ref(s);
2232 pa_sink_assert_io_context(s);
2233
2234 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2235 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2236
2237 if (s->thread_info.requested_latency_valid)
2238 return s->thread_info.requested_latency;
2239
2240 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2241 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2242 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2243 result = i->thread_info.requested_sink_latency;
2244
2245 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2246
2247 if (monitor_latency != (pa_usec_t) -1 &&
2248 (result == (pa_usec_t) -1 || result > monitor_latency))
2249 result = monitor_latency;
2250
2251 if (result != (pa_usec_t) -1)
2252 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2253
2254 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2255 /* Only cache if properly initialized */
2256 s->thread_info.requested_latency = result;
2257 s->thread_info.requested_latency_valid = TRUE;
2258 }
2259
2260 return result;
2261 }
2262
2263 /* Called from main thread */
2264 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2265 pa_usec_t usec = 0;
2266
2267 pa_sink_assert_ref(s);
2268 pa_assert_ctl_context();
2269 pa_assert(PA_SINK_IS_LINKED(s->state));
2270
2271 if (s->state == PA_SINK_SUSPENDED)
2272 return 0;
2273
2274 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2275 return usec;
2276 }
2277
2278 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2279 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2280 pa_sink_input *i;
2281 void *state = NULL;
2282
2283 pa_sink_assert_ref(s);
2284 pa_sink_assert_io_context(s);
2285
2286 if (max_rewind == s->thread_info.max_rewind)
2287 return;
2288
2289 s->thread_info.max_rewind = max_rewind;
2290
2291 if (PA_SINK_IS_LINKED(s->thread_info.state))
2292 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2293 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2294
2295 if (s->monitor_source)
2296 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2297 }
2298
2299 /* Called from main thread */
2300 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2301 pa_sink_assert_ref(s);
2302 pa_assert_ctl_context();
2303
2304 if (PA_SINK_IS_LINKED(s->state))
2305 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2306 else
2307 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2308 }
2309
2310 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2311 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2312 void *state = NULL;
2313
2314 pa_sink_assert_ref(s);
2315 pa_sink_assert_io_context(s);
2316
2317 if (max_request == s->thread_info.max_request)
2318 return;
2319
2320 s->thread_info.max_request = max_request;
2321
2322 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2323 pa_sink_input *i;
2324
2325 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2326 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2327 }
2328 }
2329
2330 /* Called from main thread */
2331 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2332 pa_sink_assert_ref(s);
2333 pa_assert_ctl_context();
2334
2335 if (PA_SINK_IS_LINKED(s->state))
2336 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2337 else
2338 pa_sink_set_max_request_within_thread(s, max_request);
2339 }
2340
2341 /* Called from IO thread */
2342 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2343 pa_sink_input *i;
2344 void *state = NULL;
2345
2346 pa_sink_assert_ref(s);
2347 pa_sink_assert_io_context(s);
2348
2349 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2350 s->thread_info.requested_latency_valid = FALSE;
2351 else if (dynamic)
2352 return;
2353
2354 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2355
2356 if (s->update_requested_latency)
2357 s->update_requested_latency(s);
2358
2359 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2360 if (i->update_sink_requested_latency)
2361 i->update_sink_requested_latency(i);
2362 }
2363 }
2364
2365 /* Called from main thread */
2366 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2367 pa_sink_assert_ref(s);
2368 pa_assert_ctl_context();
2369
2370 /* min_latency == 0: no limit
2371 * min_latency anything else: specified limit
2372 *
2373 * Similar for max_latency */
2374
2375 if (min_latency < ABSOLUTE_MIN_LATENCY)
2376 min_latency = ABSOLUTE_MIN_LATENCY;
2377
2378 if (max_latency <= 0 ||
2379 max_latency > ABSOLUTE_MAX_LATENCY)
2380 max_latency = ABSOLUTE_MAX_LATENCY;
2381
2382 pa_assert(min_latency <= max_latency);
2383
2384 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2385 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2386 max_latency == ABSOLUTE_MAX_LATENCY) ||
2387 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2388
2389 if (PA_SINK_IS_LINKED(s->state)) {
2390 pa_usec_t r[2];
2391
2392 r[0] = min_latency;
2393 r[1] = max_latency;
2394
2395 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2396 } else
2397 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2398 }
2399
2400 /* Called from main thread */
2401 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2402 pa_sink_assert_ref(s);
2403 pa_assert_ctl_context();
2404 pa_assert(min_latency);
2405 pa_assert(max_latency);
2406
2407 if (PA_SINK_IS_LINKED(s->state)) {
2408 pa_usec_t r[2] = { 0, 0 };
2409
2410 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2411
2412 *min_latency = r[0];
2413 *max_latency = r[1];
2414 } else {
2415 *min_latency = s->thread_info.min_latency;
2416 *max_latency = s->thread_info.max_latency;
2417 }
2418 }
2419
2420 /* Called from IO thread */
2421 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2422 pa_sink_assert_ref(s);
2423 pa_sink_assert_io_context(s);
2424
2425 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2426 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2427 pa_assert(min_latency <= max_latency);
2428
2429 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2430 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2431 max_latency == ABSOLUTE_MAX_LATENCY) ||
2432 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2433
2434 if (s->thread_info.min_latency == min_latency &&
2435 s->thread_info.max_latency == max_latency)
2436 return;
2437
2438 s->thread_info.min_latency = min_latency;
2439 s->thread_info.max_latency = max_latency;
2440
2441 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2442 pa_sink_input *i;
2443 void *state = NULL;
2444
2445 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2446 if (i->update_sink_latency_range)
2447 i->update_sink_latency_range(i);
2448 }
2449
2450 pa_sink_invalidate_requested_latency(s, FALSE);
2451
2452 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2453 }
2454
2455 /* Called from main thread */
2456 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2457 pa_sink_assert_ref(s);
2458 pa_assert_ctl_context();
2459
2460 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2461 pa_assert(latency == 0);
2462 return;
2463 }
2464
2465 if (latency < ABSOLUTE_MIN_LATENCY)
2466 latency = ABSOLUTE_MIN_LATENCY;
2467
2468 if (latency > ABSOLUTE_MAX_LATENCY)
2469 latency = ABSOLUTE_MAX_LATENCY;
2470
2471 if (PA_SINK_IS_LINKED(s->state))
2472 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2473 else
2474 s->thread_info.fixed_latency = latency;
2475
2476 pa_source_set_fixed_latency(s->monitor_source, latency);
2477 }
2478
2479 /* Called from main thread */
2480 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2481 pa_usec_t latency;
2482
2483 pa_sink_assert_ref(s);
2484 pa_assert_ctl_context();
2485
2486 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2487 return 0;
2488
2489 if (PA_SINK_IS_LINKED(s->state))
2490 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2491 else
2492 latency = s->thread_info.fixed_latency;
2493
2494 return latency;
2495 }
2496
2497 /* Called from IO thread */
2498 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2499 pa_sink_assert_ref(s);
2500 pa_sink_assert_io_context(s);
2501
2502 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2503 pa_assert(latency == 0);
2504 return;
2505 }
2506
2507 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2508 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2509
2510 if (s->thread_info.fixed_latency == latency)
2511 return;
2512
2513 s->thread_info.fixed_latency = latency;
2514
2515 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2516 pa_sink_input *i;
2517 void *state = NULL;
2518
2519 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2520 if (i->update_sink_fixed_latency)
2521 i->update_sink_fixed_latency(i);
2522 }
2523
2524 pa_sink_invalidate_requested_latency(s, FALSE);
2525
2526 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2527 }
2528
2529 /* Called from main context */
2530 size_t pa_sink_get_max_rewind(pa_sink *s) {
2531 size_t r;
2532 pa_sink_assert_ref(s);
2533 pa_assert_ctl_context();
2534
2535 if (!PA_SINK_IS_LINKED(s->state))
2536 return s->thread_info.max_rewind;
2537
2538 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2539
2540 return r;
2541 }
2542
2543 /* Called from main context */
2544 size_t pa_sink_get_max_request(pa_sink *s) {
2545 size_t r;
2546 pa_sink_assert_ref(s);
2547 pa_assert_ctl_context();
2548
2549 if (!PA_SINK_IS_LINKED(s->state))
2550 return s->thread_info.max_request;
2551
2552 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2553
2554 return r;
2555 }
2556
2557 /* Called from main context */
2558 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2559 pa_device_port *port;
2560
2561 pa_sink_assert_ref(s);
2562 pa_assert_ctl_context();
2563
2564 if (!s->set_port) {
2565 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2566 return -PA_ERR_NOTIMPLEMENTED;
2567 }
2568
2569 if (!s->ports)
2570 return -PA_ERR_NOENTITY;
2571
2572 if (!(port = pa_hashmap_get(s->ports, name)))
2573 return -PA_ERR_NOENTITY;
2574
2575 if (s->active_port == port) {
2576 s->save_port = s->save_port || save;
2577 return 0;
2578 }
2579
2580 if ((s->set_port(s, port)) < 0)
2581 return -PA_ERR_NOENTITY;
2582
2583 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2584
2585 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2586
2587 s->active_port = port;
2588 s->save_port = save;
2589
2590 return 0;
2591 }
2592
2593 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2594 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2595
2596 pa_assert(p);
2597
2598 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2599 return TRUE;
2600
2601 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2602
2603 if (pa_streq(ff, "microphone"))
2604 t = "audio-input-microphone";
2605 else if (pa_streq(ff, "webcam"))
2606 t = "camera-web";
2607 else if (pa_streq(ff, "computer"))
2608 t = "computer";
2609 else if (pa_streq(ff, "handset"))
2610 t = "phone";
2611 else if (pa_streq(ff, "portable"))
2612 t = "multimedia-player";
2613 else if (pa_streq(ff, "tv"))
2614 t = "video-display";
2615
2616 /*
2617 * The following icons are not part of the icon naming spec,
2618 * because Rodney Dawes sucks as the maintainer of that spec.
2619 *
2620 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2621 */
2622 else if (pa_streq(ff, "headset"))
2623 t = "audio-headset";
2624 else if (pa_streq(ff, "headphone"))
2625 t = "audio-headphones";
2626 else if (pa_streq(ff, "speaker"))
2627 t = "audio-speakers";
2628 else if (pa_streq(ff, "hands-free"))
2629 t = "audio-handsfree";
2630 }
2631
2632 if (!t)
2633 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2634 if (pa_streq(c, "modem"))
2635 t = "modem";
2636
2637 if (!t) {
2638 if (is_sink)
2639 t = "audio-card";
2640 else
2641 t = "audio-input-microphone";
2642 }
2643
2644 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2645 if (strstr(profile, "analog"))
2646 s = "-analog";
2647 else if (strstr(profile, "iec958"))
2648 s = "-iec958";
2649 else if (strstr(profile, "hdmi"))
2650 s = "-hdmi";
2651 }
2652
2653 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2654
2655 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2656
2657 return TRUE;
2658 }
2659
2660 pa_bool_t pa_device_init_description(pa_proplist *p) {
2661 const char *s, *d = NULL, *k;
2662 pa_assert(p);
2663
2664 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2665 return TRUE;
2666
2667 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2668 if (pa_streq(s, "internal"))
2669 d = _("Internal Audio");
2670
2671 if (!d)
2672 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2673 if (pa_streq(s, "modem"))
2674 d = _("Modem");
2675
2676 if (!d)
2677 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2678
2679 if (!d)
2680 return FALSE;
2681
2682 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2683
2684 if (d && k)
2685 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2686 else if (d)
2687 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2688
2689 return TRUE;
2690 }
2691
2692 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2693 const char *s;
2694 pa_assert(p);
2695
2696 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2697 return TRUE;
2698
2699 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2700 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2701 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2702 return TRUE;
2703 }
2704
2705 return FALSE;
2706 }
2707
2708 unsigned pa_device_init_priority(pa_proplist *p) {
2709 const char *s;
2710 unsigned priority = 0;
2711
2712 pa_assert(p);
2713
2714 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
2715
2716 if (pa_streq(s, "sound"))
2717 priority += 9000;
2718 else if (!pa_streq(s, "modem"))
2719 priority += 1000;
2720 }
2721
2722 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2723
2724 if (pa_streq(s, "internal"))
2725 priority += 900;
2726 else if (pa_streq(s, "speaker"))
2727 priority += 500;
2728 else if (pa_streq(s, "headphone"))
2729 priority += 400;
2730 }
2731
2732 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
2733
2734 if (pa_streq(s, "pci"))
2735 priority += 50;
2736 else if (pa_streq(s, "usb"))
2737 priority += 40;
2738 else if (pa_streq(s, "bluetooth"))
2739 priority += 30;
2740 }
2741
2742 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2743
2744 if (pa_startswith(s, "analog-"))
2745 priority += 9;
2746 else if (pa_startswith(s, "iec958-"))
2747 priority += 8;
2748 }
2749
2750 return priority;
2751 }