]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
daemon-conf: Add sync volume parameters to daemon-conf
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37 #include <pulse/rtclock.h>
38
39 #include <pulsecore/sink-input.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-util.h>
42 #include <pulsecore/sample-util.h>
43 #include <pulsecore/core-subscribe.h>
44 #include <pulsecore/log.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/play-memblockq.h>
47 #include <pulsecore/flist.h>
48
49 #include "sink.h"
50
51 #define MAX_MIX_CHANNELS 32
52 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
53 #define ABSOLUTE_MIN_LATENCY (500)
54 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
55 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
56
57 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
58
59 struct pa_sink_volume_change {
60 pa_usec_t at;
61 pa_cvolume hw_volume;
62
63 PA_LLIST_FIELDS(pa_sink_volume_change);
64 };
65
66 struct sink_message_set_port {
67 pa_device_port *port;
68 int ret;
69 };
70
71 static void sink_free(pa_object *s);
72
73 static void pa_sink_volume_change_push(pa_sink *s);
74 static void pa_sink_volume_change_flush(pa_sink *s);
75 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
76
77 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
78 pa_assert(data);
79
80 pa_zero(*data);
81 data->proplist = pa_proplist_new();
82
83 return data;
84 }
85
86 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
87 pa_assert(data);
88
89 pa_xfree(data->name);
90 data->name = pa_xstrdup(name);
91 }
92
93 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
94 pa_assert(data);
95
96 if ((data->sample_spec_is_set = !!spec))
97 data->sample_spec = *spec;
98 }
99
100 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
101 pa_assert(data);
102
103 if ((data->channel_map_is_set = !!map))
104 data->channel_map = *map;
105 }
106
107 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
108 pa_assert(data);
109
110 if ((data->volume_is_set = !!volume))
111 data->volume = *volume;
112 }
113
114 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
115 pa_assert(data);
116
117 data->muted_is_set = TRUE;
118 data->muted = !!mute;
119 }
120
121 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
122 pa_assert(data);
123
124 pa_xfree(data->active_port);
125 data->active_port = pa_xstrdup(port);
126 }
127
128 void pa_sink_new_data_done(pa_sink_new_data *data) {
129 pa_assert(data);
130
131 pa_proplist_free(data->proplist);
132
133 if (data->ports) {
134 pa_device_port *p;
135
136 while ((p = pa_hashmap_steal_first(data->ports)))
137 pa_device_port_free(p);
138
139 pa_hashmap_free(data->ports, NULL, NULL);
140 }
141
142 pa_xfree(data->name);
143 pa_xfree(data->active_port);
144 }
145
146 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
147 pa_device_port *p;
148
149 pa_assert(name);
150
151 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
152 p->name = pa_xstrdup(name);
153 p->description = pa_xstrdup(description);
154
155 p->priority = 0;
156
157 return p;
158 }
159
160 void pa_device_port_free(pa_device_port *p) {
161 pa_assert(p);
162
163 pa_xfree(p->name);
164 pa_xfree(p->description);
165 pa_xfree(p);
166 }
167
168 /* Called from main context */
169 static void reset_callbacks(pa_sink *s) {
170 pa_assert(s);
171
172 s->set_state = NULL;
173 s->get_volume = NULL;
174 s->set_volume = NULL;
175 s->get_mute = NULL;
176 s->set_mute = NULL;
177 s->request_rewind = NULL;
178 s->update_requested_latency = NULL;
179 s->set_port = NULL;
180 }
181
182 /* Called from main context */
183 pa_sink* pa_sink_new(
184 pa_core *core,
185 pa_sink_new_data *data,
186 pa_sink_flags_t flags) {
187
188 pa_sink *s;
189 const char *name;
190 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
191 pa_source_new_data source_data;
192 const char *dn;
193 char *pt;
194
195 pa_assert(core);
196 pa_assert(data);
197 pa_assert(data->name);
198 pa_assert_ctl_context();
199
200 s = pa_msgobject_new(pa_sink);
201
202 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
203 pa_log_debug("Failed to register name %s.", data->name);
204 pa_xfree(s);
205 return NULL;
206 }
207
208 pa_sink_new_data_set_name(data, name);
209
210 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
211 pa_xfree(s);
212 pa_namereg_unregister(core, name);
213 return NULL;
214 }
215
216 /* FIXME, need to free s here on failure */
217
218 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
219 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
220
221 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
222
223 if (!data->channel_map_is_set)
224 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
225
226 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
227 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
228
229 if (!data->volume_is_set)
230 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
231
232 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
233 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
234
235 if (!data->muted_is_set)
236 data->muted = FALSE;
237
238 if (data->card)
239 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
240
241 pa_device_init_description(data->proplist);
242 pa_device_init_icon(data->proplist, TRUE);
243 pa_device_init_intended_roles(data->proplist);
244
245 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
246 pa_xfree(s);
247 pa_namereg_unregister(core, name);
248 return NULL;
249 }
250
251 s->parent.parent.free = sink_free;
252 s->parent.process_msg = pa_sink_process_msg;
253
254 s->core = core;
255 s->state = PA_SINK_INIT;
256 s->flags = flags;
257 s->priority = 0;
258 s->suspend_cause = 0;
259 s->name = pa_xstrdup(name);
260 s->proplist = pa_proplist_copy(data->proplist);
261 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
262 s->module = data->module;
263 s->card = data->card;
264
265 s->priority = pa_device_init_priority(s->proplist);
266
267 s->sample_spec = data->sample_spec;
268 s->channel_map = data->channel_map;
269
270 s->inputs = pa_idxset_new(NULL, NULL);
271 s->n_corked = 0;
272
273 s->reference_volume = s->real_volume = data->volume;
274 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
275 s->base_volume = PA_VOLUME_NORM;
276 s->n_volume_steps = PA_VOLUME_NORM+1;
277 s->muted = data->muted;
278 s->refresh_volume = s->refresh_muted = FALSE;
279
280 reset_callbacks(s);
281 s->userdata = NULL;
282
283 s->asyncmsgq = NULL;
284
285 /* As a minor optimization we just steal the list instead of
286 * copying it here */
287 s->ports = data->ports;
288 data->ports = NULL;
289
290 s->active_port = NULL;
291 s->save_port = FALSE;
292
293 if (data->active_port && s->ports)
294 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
295 s->save_port = data->save_port;
296
297 if (!s->active_port && s->ports) {
298 void *state;
299 pa_device_port *p;
300
301 PA_HASHMAP_FOREACH(p, s->ports, state)
302 if (!s->active_port || p->priority > s->active_port->priority)
303 s->active_port = p;
304 }
305
306 s->save_volume = data->save_volume;
307 s->save_muted = data->save_muted;
308
309 pa_silence_memchunk_get(
310 &core->silence_cache,
311 core->mempool,
312 &s->silence,
313 &s->sample_spec,
314 0);
315
316 s->thread_info.rtpoll = NULL;
317 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
318 s->thread_info.soft_volume = s->soft_volume;
319 s->thread_info.soft_muted = s->muted;
320 s->thread_info.state = s->state;
321 s->thread_info.rewind_nbytes = 0;
322 s->thread_info.rewind_requested = FALSE;
323 s->thread_info.max_rewind = 0;
324 s->thread_info.max_request = 0;
325 s->thread_info.requested_latency_valid = FALSE;
326 s->thread_info.requested_latency = 0;
327 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
328 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
329 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
330
331 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
332 s->thread_info.volume_changes_tail = NULL;
333 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
334 s->thread_info.volume_change_safety_margin = core->sync_volume_safety_margin_usec;
335 s->thread_info.volume_change_extra_delay = core->sync_volume_extra_delay_usec;
336
337 /* FIXME: This should probably be moved to pa_sink_put() */
338 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
339
340 if (s->card)
341 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
342
343 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
344 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
345 s->index,
346 s->name,
347 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
348 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
349 pt);
350 pa_xfree(pt);
351
352 pa_source_new_data_init(&source_data);
353 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
354 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
355 source_data.name = pa_sprintf_malloc("%s.monitor", name);
356 source_data.driver = data->driver;
357 source_data.module = data->module;
358 source_data.card = data->card;
359
360 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
361 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
362 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
363
364 s->monitor_source = pa_source_new(core, &source_data,
365 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
366 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
367
368 pa_source_new_data_done(&source_data);
369
370 if (!s->monitor_source) {
371 pa_sink_unlink(s);
372 pa_sink_unref(s);
373 return NULL;
374 }
375
376 s->monitor_source->monitor_of = s;
377
378 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
379 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
380 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
381
382 return s;
383 }
384
385 /* Called from main context */
386 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
387 int ret;
388 pa_bool_t suspend_change;
389 pa_sink_state_t original_state;
390
391 pa_assert(s);
392 pa_assert_ctl_context();
393
394 if (s->state == state)
395 return 0;
396
397 original_state = s->state;
398
399 suspend_change =
400 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
401 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
402
403 if (s->set_state)
404 if ((ret = s->set_state(s, state)) < 0)
405 return ret;
406
407 if (s->asyncmsgq)
408 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
409
410 if (s->set_state)
411 s->set_state(s, original_state);
412
413 return ret;
414 }
415
416 s->state = state;
417
418 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
419 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
420 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
421 }
422
423 if (suspend_change) {
424 pa_sink_input *i;
425 uint32_t idx;
426
427 /* We're suspending or resuming, tell everyone about it */
428
429 PA_IDXSET_FOREACH(i, s->inputs, idx)
430 if (s->state == PA_SINK_SUSPENDED &&
431 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
432 pa_sink_input_kill(i);
433 else if (i->suspend)
434 i->suspend(i, state == PA_SINK_SUSPENDED);
435
436 if (s->monitor_source)
437 pa_source_sync_suspend(s->monitor_source);
438 }
439
440 return 0;
441 }
442
443 /* Called from main context */
444 void pa_sink_put(pa_sink* s) {
445 pa_sink_assert_ref(s);
446 pa_assert_ctl_context();
447
448 pa_assert(s->state == PA_SINK_INIT);
449
450 /* The following fields must be initialized properly when calling _put() */
451 pa_assert(s->asyncmsgq);
452 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
453
454 /* Generally, flags should be initialized via pa_sink_new(). As a
455 * special exception we allow volume related flags to be set
456 * between _new() and _put(). */
457
458 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
459 s->flags |= PA_SINK_DECIBEL_VOLUME;
460
461 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
462 s->flags |= PA_SINK_FLAT_VOLUME;
463
464 /* We assume that if the sink implementor changed the default
465 * volume he did so in real_volume, because that is the usual
466 * place where he is supposed to place his changes. */
467 s->reference_volume = s->real_volume;
468
469 s->thread_info.soft_volume = s->soft_volume;
470 s->thread_info.soft_muted = s->muted;
471 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
472
473 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
474 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
475 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
476 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
477 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
478 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
479 pa_assert(!(s->flags & PA_SINK_SYNC_VOLUME) || (s->flags & PA_SINK_HW_VOLUME_CTRL));
480 pa_assert(!(s->flags & PA_SINK_SYNC_VOLUME) || s->write_volume);
481 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
482
483 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
484 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
485 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
486
487 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
488
489 pa_source_put(s->monitor_source);
490
491 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
492 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
493 }
494
495 /* Called from main context */
496 void pa_sink_unlink(pa_sink* s) {
497 pa_bool_t linked;
498 pa_sink_input *i, *j = NULL;
499
500 pa_assert(s);
501 pa_assert_ctl_context();
502
503 /* Please note that pa_sink_unlink() does more than simply
504 * reversing pa_sink_put(). It also undoes the registrations
505 * already done in pa_sink_new()! */
506
507 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
508 * may be called multiple times on the same sink without bad
509 * effects. */
510
511 linked = PA_SINK_IS_LINKED(s->state);
512
513 if (linked)
514 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
515
516 if (s->state != PA_SINK_UNLINKED)
517 pa_namereg_unregister(s->core, s->name);
518 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
519
520 if (s->card)
521 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
522
523 while ((i = pa_idxset_first(s->inputs, NULL))) {
524 pa_assert(i != j);
525 pa_sink_input_kill(i);
526 j = i;
527 }
528
529 if (linked)
530 sink_set_state(s, PA_SINK_UNLINKED);
531 else
532 s->state = PA_SINK_UNLINKED;
533
534 reset_callbacks(s);
535
536 if (s->monitor_source)
537 pa_source_unlink(s->monitor_source);
538
539 if (linked) {
540 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
541 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
542 }
543 }
544
545 /* Called from main context */
546 static void sink_free(pa_object *o) {
547 pa_sink *s = PA_SINK(o);
548 pa_sink_input *i;
549
550 pa_assert(s);
551 pa_assert_ctl_context();
552 pa_assert(pa_sink_refcnt(s) == 0);
553
554 if (PA_SINK_IS_LINKED(s->state))
555 pa_sink_unlink(s);
556
557 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
558
559 if (s->monitor_source) {
560 pa_source_unref(s->monitor_source);
561 s->monitor_source = NULL;
562 }
563
564 pa_idxset_free(s->inputs, NULL, NULL);
565
566 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
567 pa_sink_input_unref(i);
568
569 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
570
571 if (s->silence.memblock)
572 pa_memblock_unref(s->silence.memblock);
573
574 pa_xfree(s->name);
575 pa_xfree(s->driver);
576
577 if (s->proplist)
578 pa_proplist_free(s->proplist);
579
580 if (s->ports) {
581 pa_device_port *p;
582
583 while ((p = pa_hashmap_steal_first(s->ports)))
584 pa_device_port_free(p);
585
586 pa_hashmap_free(s->ports, NULL, NULL);
587 }
588
589 pa_xfree(s);
590 }
591
592 /* Called from main context, and not while the IO thread is active, please */
593 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
594 pa_sink_assert_ref(s);
595 pa_assert_ctl_context();
596
597 s->asyncmsgq = q;
598
599 if (s->monitor_source)
600 pa_source_set_asyncmsgq(s->monitor_source, q);
601 }
602
603 /* Called from main context, and not while the IO thread is active, please */
604 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
605 pa_sink_assert_ref(s);
606 pa_assert_ctl_context();
607
608 if (mask == 0)
609 return;
610
611 /* For now, allow only a minimal set of flags to be changed. */
612 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
613
614 s->flags = (s->flags & ~mask) | (value & mask);
615
616 pa_source_update_flags(s->monitor_source,
617 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
618 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
619 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
620 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
621 }
622
623 /* Called from IO context, or before _put() from main context */
624 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
625 pa_sink_assert_ref(s);
626 pa_sink_assert_io_context(s);
627
628 s->thread_info.rtpoll = p;
629
630 if (s->monitor_source)
631 pa_source_set_rtpoll(s->monitor_source, p);
632 }
633
634 /* Called from main context */
635 int pa_sink_update_status(pa_sink*s) {
636 pa_sink_assert_ref(s);
637 pa_assert_ctl_context();
638 pa_assert(PA_SINK_IS_LINKED(s->state));
639
640 if (s->state == PA_SINK_SUSPENDED)
641 return 0;
642
643 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
644 }
645
646 /* Called from main context */
647 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
648 pa_sink_assert_ref(s);
649 pa_assert_ctl_context();
650 pa_assert(PA_SINK_IS_LINKED(s->state));
651 pa_assert(cause != 0);
652
653 if (suspend) {
654 s->suspend_cause |= cause;
655 s->monitor_source->suspend_cause |= cause;
656 } else {
657 s->suspend_cause &= ~cause;
658 s->monitor_source->suspend_cause &= ~cause;
659 }
660
661 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
662 return 0;
663
664 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
665
666 if (s->suspend_cause)
667 return sink_set_state(s, PA_SINK_SUSPENDED);
668 else
669 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
670 }
671
672 /* Called from main context */
673 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
674 pa_sink_input *i, *n;
675 uint32_t idx;
676
677 pa_sink_assert_ref(s);
678 pa_assert_ctl_context();
679 pa_assert(PA_SINK_IS_LINKED(s->state));
680
681 if (!q)
682 q = pa_queue_new();
683
684 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
685 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
686
687 pa_sink_input_ref(i);
688
689 if (pa_sink_input_start_move(i) >= 0)
690 pa_queue_push(q, i);
691 else
692 pa_sink_input_unref(i);
693 }
694
695 return q;
696 }
697
698 /* Called from main context */
699 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
700 pa_sink_input *i;
701
702 pa_sink_assert_ref(s);
703 pa_assert_ctl_context();
704 pa_assert(PA_SINK_IS_LINKED(s->state));
705 pa_assert(q);
706
707 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
708 if (pa_sink_input_finish_move(i, s, save) < 0)
709 pa_sink_input_fail_move(i);
710
711 pa_sink_input_unref(i);
712 }
713
714 pa_queue_free(q, NULL, NULL);
715 }
716
717 /* Called from main context */
718 void pa_sink_move_all_fail(pa_queue *q) {
719 pa_sink_input *i;
720
721 pa_assert_ctl_context();
722 pa_assert(q);
723
724 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
725 pa_sink_input_fail_move(i);
726 pa_sink_input_unref(i);
727 }
728
729 pa_queue_free(q, NULL, NULL);
730 }
731
732 /* Called from IO thread context */
733 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
734 pa_sink_input *i;
735 void *state = NULL;
736
737 pa_sink_assert_ref(s);
738 pa_sink_assert_io_context(s);
739 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
740
741 /* If nobody requested this and this is actually no real rewind
742 * then we can short cut this. Please note that this means that
743 * not all rewind requests triggered upstream will always be
744 * translated in actual requests! */
745 if (!s->thread_info.rewind_requested && nbytes <= 0)
746 return;
747
748 s->thread_info.rewind_nbytes = 0;
749 s->thread_info.rewind_requested = FALSE;
750
751 if (s->thread_info.state == PA_SINK_SUSPENDED)
752 return;
753
754 if (nbytes > 0)
755 pa_log_debug("Processing rewind...");
756
757 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
758 pa_sink_input_assert_ref(i);
759 pa_sink_input_process_rewind(i, nbytes);
760 }
761
762 if (nbytes > 0) {
763 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
764 pa_source_process_rewind(s->monitor_source, nbytes);
765 if (s->flags & PA_SINK_SYNC_VOLUME)
766 pa_sink_volume_change_rewind(s, nbytes);
767 }
768 }
769
770 /* Called from IO thread context */
771 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
772 pa_sink_input *i;
773 unsigned n = 0;
774 void *state = NULL;
775 size_t mixlength = *length;
776
777 pa_sink_assert_ref(s);
778 pa_sink_assert_io_context(s);
779 pa_assert(info);
780
781 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
782 pa_sink_input_assert_ref(i);
783
784 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
785
786 if (mixlength == 0 || info->chunk.length < mixlength)
787 mixlength = info->chunk.length;
788
789 if (pa_memblock_is_silence(info->chunk.memblock)) {
790 pa_memblock_unref(info->chunk.memblock);
791 continue;
792 }
793
794 info->userdata = pa_sink_input_ref(i);
795
796 pa_assert(info->chunk.memblock);
797 pa_assert(info->chunk.length > 0);
798
799 info++;
800 n++;
801 maxinfo--;
802 }
803
804 if (mixlength > 0)
805 *length = mixlength;
806
807 return n;
808 }
809
810 /* Called from IO thread context */
811 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
812 pa_sink_input *i;
813 void *state;
814 unsigned p = 0;
815 unsigned n_unreffed = 0;
816
817 pa_sink_assert_ref(s);
818 pa_sink_assert_io_context(s);
819 pa_assert(result);
820 pa_assert(result->memblock);
821 pa_assert(result->length > 0);
822
823 /* We optimize for the case where the order of the inputs has not changed */
824
825 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
826 unsigned j;
827 pa_mix_info* m = NULL;
828
829 pa_sink_input_assert_ref(i);
830
831 /* Let's try to find the matching entry info the pa_mix_info array */
832 for (j = 0; j < n; j ++) {
833
834 if (info[p].userdata == i) {
835 m = info + p;
836 break;
837 }
838
839 p++;
840 if (p >= n)
841 p = 0;
842 }
843
844 /* Drop read data */
845 pa_sink_input_drop(i, result->length);
846
847 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
848
849 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
850 void *ostate = NULL;
851 pa_source_output *o;
852 pa_memchunk c;
853
854 if (m && m->chunk.memblock) {
855 c = m->chunk;
856 pa_memblock_ref(c.memblock);
857 pa_assert(result->length <= c.length);
858 c.length = result->length;
859
860 pa_memchunk_make_writable(&c, 0);
861 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
862 } else {
863 c = s->silence;
864 pa_memblock_ref(c.memblock);
865 pa_assert(result->length <= c.length);
866 c.length = result->length;
867 }
868
869 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
870 pa_source_output_assert_ref(o);
871 pa_assert(o->direct_on_input == i);
872 pa_source_post_direct(s->monitor_source, o, &c);
873 }
874
875 pa_memblock_unref(c.memblock);
876 }
877 }
878
879 if (m) {
880 if (m->chunk.memblock)
881 pa_memblock_unref(m->chunk.memblock);
882 pa_memchunk_reset(&m->chunk);
883
884 pa_sink_input_unref(m->userdata);
885 m->userdata = NULL;
886
887 n_unreffed += 1;
888 }
889 }
890
891 /* Now drop references to entries that are included in the
892 * pa_mix_info array but don't exist anymore */
893
894 if (n_unreffed < n) {
895 for (; n > 0; info++, n--) {
896 if (info->userdata)
897 pa_sink_input_unref(info->userdata);
898 if (info->chunk.memblock)
899 pa_memblock_unref(info->chunk.memblock);
900 }
901 }
902
903 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
904 pa_source_post(s->monitor_source, result);
905 }
906
907 /* Called from IO thread context */
908 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
909 pa_mix_info info[MAX_MIX_CHANNELS];
910 unsigned n;
911 size_t block_size_max;
912
913 pa_sink_assert_ref(s);
914 pa_sink_assert_io_context(s);
915 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
916 pa_assert(pa_frame_aligned(length, &s->sample_spec));
917 pa_assert(result);
918
919 pa_assert(!s->thread_info.rewind_requested);
920 pa_assert(s->thread_info.rewind_nbytes == 0);
921
922 if (s->thread_info.state == PA_SINK_SUSPENDED) {
923 result->memblock = pa_memblock_ref(s->silence.memblock);
924 result->index = s->silence.index;
925 result->length = PA_MIN(s->silence.length, length);
926 return;
927 }
928
929 pa_sink_ref(s);
930
931 if (length <= 0)
932 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
933
934 block_size_max = pa_mempool_block_size_max(s->core->mempool);
935 if (length > block_size_max)
936 length = pa_frame_align(block_size_max, &s->sample_spec);
937
938 pa_assert(length > 0);
939
940 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
941
942 if (n == 0) {
943
944 *result = s->silence;
945 pa_memblock_ref(result->memblock);
946
947 if (result->length > length)
948 result->length = length;
949
950 } else if (n == 1) {
951 pa_cvolume volume;
952
953 *result = info[0].chunk;
954 pa_memblock_ref(result->memblock);
955
956 if (result->length > length)
957 result->length = length;
958
959 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
960
961 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
962 pa_memblock_unref(result->memblock);
963 pa_silence_memchunk_get(&s->core->silence_cache,
964 s->core->mempool,
965 result,
966 &s->sample_spec,
967 result->length);
968 } else if (!pa_cvolume_is_norm(&volume)) {
969 pa_memchunk_make_writable(result, 0);
970 pa_volume_memchunk(result, &s->sample_spec, &volume);
971 }
972 } else {
973 void *ptr;
974 result->memblock = pa_memblock_new(s->core->mempool, length);
975
976 ptr = pa_memblock_acquire(result->memblock);
977 result->length = pa_mix(info, n,
978 ptr, length,
979 &s->sample_spec,
980 &s->thread_info.soft_volume,
981 s->thread_info.soft_muted);
982 pa_memblock_release(result->memblock);
983
984 result->index = 0;
985 }
986
987 inputs_drop(s, info, n, result);
988
989 pa_sink_unref(s);
990 }
991
992 /* Called from IO thread context */
993 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
994 pa_mix_info info[MAX_MIX_CHANNELS];
995 unsigned n;
996 size_t length, block_size_max;
997
998 pa_sink_assert_ref(s);
999 pa_sink_assert_io_context(s);
1000 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1001 pa_assert(target);
1002 pa_assert(target->memblock);
1003 pa_assert(target->length > 0);
1004 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1005
1006 pa_assert(!s->thread_info.rewind_requested);
1007 pa_assert(s->thread_info.rewind_nbytes == 0);
1008
1009 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1010 pa_silence_memchunk(target, &s->sample_spec);
1011 return;
1012 }
1013
1014 pa_sink_ref(s);
1015
1016 length = target->length;
1017 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1018 if (length > block_size_max)
1019 length = pa_frame_align(block_size_max, &s->sample_spec);
1020
1021 pa_assert(length > 0);
1022
1023 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1024
1025 if (n == 0) {
1026 if (target->length > length)
1027 target->length = length;
1028
1029 pa_silence_memchunk(target, &s->sample_spec);
1030 } else if (n == 1) {
1031 pa_cvolume volume;
1032
1033 if (target->length > length)
1034 target->length = length;
1035
1036 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1037
1038 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1039 pa_silence_memchunk(target, &s->sample_spec);
1040 else {
1041 pa_memchunk vchunk;
1042
1043 vchunk = info[0].chunk;
1044 pa_memblock_ref(vchunk.memblock);
1045
1046 if (vchunk.length > length)
1047 vchunk.length = length;
1048
1049 if (!pa_cvolume_is_norm(&volume)) {
1050 pa_memchunk_make_writable(&vchunk, 0);
1051 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1052 }
1053
1054 pa_memchunk_memcpy(target, &vchunk);
1055 pa_memblock_unref(vchunk.memblock);
1056 }
1057
1058 } else {
1059 void *ptr;
1060
1061 ptr = pa_memblock_acquire(target->memblock);
1062
1063 target->length = pa_mix(info, n,
1064 (uint8_t*) ptr + target->index, length,
1065 &s->sample_spec,
1066 &s->thread_info.soft_volume,
1067 s->thread_info.soft_muted);
1068
1069 pa_memblock_release(target->memblock);
1070 }
1071
1072 inputs_drop(s, info, n, target);
1073
1074 pa_sink_unref(s);
1075 }
1076
1077 /* Called from IO thread context */
1078 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1079 pa_memchunk chunk;
1080 size_t l, d;
1081
1082 pa_sink_assert_ref(s);
1083 pa_sink_assert_io_context(s);
1084 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1085 pa_assert(target);
1086 pa_assert(target->memblock);
1087 pa_assert(target->length > 0);
1088 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1089
1090 pa_assert(!s->thread_info.rewind_requested);
1091 pa_assert(s->thread_info.rewind_nbytes == 0);
1092
1093 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1094 pa_silence_memchunk(target, &s->sample_spec);
1095 return;
1096 }
1097
1098 pa_sink_ref(s);
1099
1100 l = target->length;
1101 d = 0;
1102 while (l > 0) {
1103 chunk = *target;
1104 chunk.index += d;
1105 chunk.length -= d;
1106
1107 pa_sink_render_into(s, &chunk);
1108
1109 d += chunk.length;
1110 l -= chunk.length;
1111 }
1112
1113 pa_sink_unref(s);
1114 }
1115
1116 /* Called from IO thread context */
1117 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1118 pa_sink_assert_ref(s);
1119 pa_sink_assert_io_context(s);
1120 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1121 pa_assert(length > 0);
1122 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1123 pa_assert(result);
1124
1125 pa_assert(!s->thread_info.rewind_requested);
1126 pa_assert(s->thread_info.rewind_nbytes == 0);
1127
1128 pa_sink_ref(s);
1129
1130 pa_sink_render(s, length, result);
1131
1132 if (result->length < length) {
1133 pa_memchunk chunk;
1134
1135 pa_memchunk_make_writable(result, length);
1136
1137 chunk.memblock = result->memblock;
1138 chunk.index = result->index + result->length;
1139 chunk.length = length - result->length;
1140
1141 pa_sink_render_into_full(s, &chunk);
1142
1143 result->length = length;
1144 }
1145
1146 pa_sink_unref(s);
1147 }
1148
1149 /* Called from main thread */
1150 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1151 pa_usec_t usec = 0;
1152
1153 pa_sink_assert_ref(s);
1154 pa_assert_ctl_context();
1155 pa_assert(PA_SINK_IS_LINKED(s->state));
1156
1157 /* The returned value is supposed to be in the time domain of the sound card! */
1158
1159 if (s->state == PA_SINK_SUSPENDED)
1160 return 0;
1161
1162 if (!(s->flags & PA_SINK_LATENCY))
1163 return 0;
1164
1165 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1166
1167 return usec;
1168 }
1169
1170 /* Called from IO thread */
1171 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1172 pa_usec_t usec = 0;
1173 pa_msgobject *o;
1174
1175 pa_sink_assert_ref(s);
1176 pa_sink_assert_io_context(s);
1177 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1178
1179 /* The returned value is supposed to be in the time domain of the sound card! */
1180
1181 if (s->thread_info.state == PA_SINK_SUSPENDED)
1182 return 0;
1183
1184 if (!(s->flags & PA_SINK_LATENCY))
1185 return 0;
1186
1187 o = PA_MSGOBJECT(s);
1188
1189 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1190
1191 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1192 return -1;
1193
1194 return usec;
1195 }
1196
1197 static pa_cvolume* cvolume_remap_minimal_impact(
1198 pa_cvolume *v,
1199 const pa_cvolume *template,
1200 const pa_channel_map *from,
1201 const pa_channel_map *to) {
1202
1203 pa_cvolume t;
1204
1205 pa_assert(v);
1206 pa_assert(template);
1207 pa_assert(from);
1208 pa_assert(to);
1209
1210 pa_return_val_if_fail(pa_cvolume_compatible_with_channel_map(v, from), NULL);
1211 pa_return_val_if_fail(pa_cvolume_compatible_with_channel_map(template, to), NULL);
1212
1213 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1214 * mapping from sink input to sink volumes:
1215 *
1216 * If template is a possible remapping from v it is used instead
1217 * of remapping anew.
1218 *
1219 * If the channel maps don't match we set an all-channel volume on
1220 * the sink to ensure that changing a volume on one stream has no
1221 * effect that cannot be compensated for in another stream that
1222 * does not have the same channel map as the sink. */
1223
1224 if (pa_channel_map_equal(from, to))
1225 return v;
1226
1227 t = *template;
1228 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1229 *v = *template;
1230 return v;
1231 }
1232
1233 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1234 return v;
1235 }
1236
1237 /* Called from main context */
1238 static void compute_reference_ratios(pa_sink *s) {
1239 uint32_t idx;
1240 pa_sink_input *i;
1241
1242 pa_sink_assert_ref(s);
1243 pa_assert_ctl_context();
1244 pa_assert(PA_SINK_IS_LINKED(s->state));
1245 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1246
1247 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1248 unsigned c;
1249 pa_cvolume remapped;
1250
1251 /*
1252 * Calculates the reference volume from the sink's reference
1253 * volume. This basically calculates:
1254 *
1255 * i->reference_ratio = i->volume / s->reference_volume
1256 */
1257
1258 remapped = s->reference_volume;
1259 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1260
1261 i->reference_ratio.channels = i->sample_spec.channels;
1262
1263 for (c = 0; c < i->sample_spec.channels; c++) {
1264
1265 /* We don't update when the sink volume is 0 anyway */
1266 if (remapped.values[c] <= PA_VOLUME_MUTED)
1267 continue;
1268
1269 /* Don't update the reference ratio unless necessary */
1270 if (pa_sw_volume_multiply(
1271 i->reference_ratio.values[c],
1272 remapped.values[c]) == i->volume.values[c])
1273 continue;
1274
1275 i->reference_ratio.values[c] = pa_sw_volume_divide(
1276 i->volume.values[c],
1277 remapped.values[c]);
1278 }
1279 }
1280 }
1281
1282 /* Called from main context */
1283 static void compute_real_ratios(pa_sink *s) {
1284 pa_sink_input *i;
1285 uint32_t idx;
1286
1287 pa_sink_assert_ref(s);
1288 pa_assert_ctl_context();
1289 pa_assert(PA_SINK_IS_LINKED(s->state));
1290 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1291
1292 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1293 unsigned c;
1294 pa_cvolume remapped;
1295
1296 /*
1297 * This basically calculates:
1298 *
1299 * i->real_ratio := i->volume / s->real_volume
1300 * i->soft_volume := i->real_ratio * i->volume_factor
1301 */
1302
1303 remapped = s->real_volume;
1304 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1305
1306 i->real_ratio.channels = i->sample_spec.channels;
1307 i->soft_volume.channels = i->sample_spec.channels;
1308
1309 for (c = 0; c < i->sample_spec.channels; c++) {
1310
1311 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1312 /* We leave i->real_ratio untouched */
1313 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1314 continue;
1315 }
1316
1317 /* Don't lose accuracy unless necessary */
1318 if (pa_sw_volume_multiply(
1319 i->real_ratio.values[c],
1320 remapped.values[c]) != i->volume.values[c])
1321
1322 i->real_ratio.values[c] = pa_sw_volume_divide(
1323 i->volume.values[c],
1324 remapped.values[c]);
1325
1326 i->soft_volume.values[c] = pa_sw_volume_multiply(
1327 i->real_ratio.values[c],
1328 i->volume_factor.values[c]);
1329 }
1330
1331 /* We don't copy the soft_volume to the thread_info data
1332 * here. That must be done by the caller */
1333 }
1334 }
1335
1336 /* Called from main thread */
1337 static void compute_real_volume(pa_sink *s) {
1338 pa_sink_input *i;
1339 uint32_t idx;
1340
1341 pa_sink_assert_ref(s);
1342 pa_assert_ctl_context();
1343 pa_assert(PA_SINK_IS_LINKED(s->state));
1344 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1345
1346 /* This determines the maximum volume of all streams and sets
1347 * s->real_volume accordingly. */
1348
1349 if (pa_idxset_isempty(s->inputs)) {
1350 /* In the special case that we have no sink input we leave the
1351 * volume unmodified. */
1352 s->real_volume = s->reference_volume;
1353 return;
1354 }
1355
1356 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1357
1358 /* First let's determine the new maximum volume of all inputs
1359 * connected to this sink */
1360 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1361 pa_cvolume remapped;
1362
1363 remapped = i->volume;
1364 cvolume_remap_minimal_impact(&remapped, &s->real_volume, &i->channel_map, &s->channel_map);
1365 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1366 }
1367
1368 /* Then, let's update the real ratios/soft volumes of all inputs
1369 * connected to this sink */
1370 compute_real_ratios(s);
1371 }
1372
1373 /* Called from main thread */
1374 static void propagate_reference_volume(pa_sink *s) {
1375 pa_sink_input *i;
1376 uint32_t idx;
1377
1378 pa_sink_assert_ref(s);
1379 pa_assert_ctl_context();
1380 pa_assert(PA_SINK_IS_LINKED(s->state));
1381 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1382
1383 /* This is called whenever the sink volume changes that is not
1384 * caused by a sink input volume change. We need to fix up the
1385 * sink input volumes accordingly */
1386
1387 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1388 pa_cvolume old_volume, remapped;
1389
1390 old_volume = i->volume;
1391
1392 /* This basically calculates:
1393 *
1394 * i->volume := s->reference_volume * i->reference_ratio */
1395
1396 remapped = s->reference_volume;
1397 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1398 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1399
1400 /* The volume changed, let's tell people so */
1401 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1402
1403 if (i->volume_changed)
1404 i->volume_changed(i);
1405
1406 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1407 }
1408 }
1409 }
1410
1411 /* Called from main thread */
1412 void pa_sink_set_volume(
1413 pa_sink *s,
1414 const pa_cvolume *volume,
1415 pa_bool_t send_msg,
1416 pa_bool_t save) {
1417
1418 pa_cvolume old_reference_volume;
1419 pa_bool_t reference_changed;
1420
1421 pa_sink_assert_ref(s);
1422 pa_assert_ctl_context();
1423 pa_assert(PA_SINK_IS_LINKED(s->state));
1424 pa_assert(!volume || pa_cvolume_valid(volume));
1425 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1426 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1427
1428 /* make sure we don't change the volume when a PASSTHROUGH input is connected */
1429 if (s->flags & PA_SINK_PASSTHROUGH) {
1430 pa_sink_input *alt_i;
1431 uint32_t idx;
1432
1433 /* one and only one PASSTHROUGH input can possibly be connected */
1434 if (pa_idxset_size(s->inputs) == 1) {
1435
1436 alt_i = pa_idxset_first(s->inputs, &idx);
1437
1438 if (alt_i->flags & PA_SINK_INPUT_PASSTHROUGH) {
1439 /* FIXME: Need to notify client that volume control is disabled */
1440 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
1441 return;
1442 }
1443 }
1444 }
1445
1446 /* As a special exception we accept mono volumes on all sinks --
1447 * even on those with more complex channel maps */
1448
1449 /* If volume is NULL we synchronize the sink's real and reference
1450 * volumes with the stream volumes. If it is not NULL we update
1451 * the reference_volume with it. */
1452
1453 old_reference_volume = s->reference_volume;
1454
1455 if (volume) {
1456
1457 if (pa_cvolume_compatible(volume, &s->sample_spec))
1458 s->reference_volume = *volume;
1459 else
1460 pa_cvolume_scale(&s->reference_volume, pa_cvolume_max(volume));
1461
1462 if (s->flags & PA_SINK_FLAT_VOLUME) {
1463 /* OK, propagate this volume change back to the inputs */
1464 propagate_reference_volume(s);
1465
1466 /* And now recalculate the real volume */
1467 compute_real_volume(s);
1468 } else
1469 s->real_volume = s->reference_volume;
1470
1471 } else {
1472 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1473
1474 /* Ok, let's determine the new real volume */
1475 compute_real_volume(s);
1476
1477 /* Let's 'push' the reference volume if necessary */
1478 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1479
1480 /* We need to fix the reference ratios of all streams now that
1481 * we changed the reference volume */
1482 compute_reference_ratios(s);
1483 }
1484
1485 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1486 s->save_volume = (!reference_changed && s->save_volume) || save;
1487
1488 if (s->set_volume) {
1489 /* If we have a function set_volume(), then we do not apply a
1490 * soft volume by default. However, set_volume() is free to
1491 * apply one to s->soft_volume */
1492
1493 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1494 if (!(s->flags & PA_SINK_SYNC_VOLUME))
1495 s->set_volume(s);
1496 else
1497 send_msg = TRUE;
1498
1499 } else
1500 /* If we have no function set_volume(), then the soft volume
1501 * becomes the virtual volume */
1502 s->soft_volume = s->real_volume;
1503
1504 /* This tells the sink that soft and/or virtual volume changed */
1505 if (send_msg)
1506 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL) == 0);
1507
1508 if (reference_changed)
1509 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1510 }
1511
1512 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1513 * Only to be called by sink implementor */
1514 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1515 pa_sink_assert_ref(s);
1516 if (s->flags & PA_SINK_SYNC_VOLUME)
1517 pa_sink_assert_io_context(s);
1518 else
1519 pa_assert_ctl_context();
1520
1521 if (!volume)
1522 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1523 else
1524 s->soft_volume = *volume;
1525
1526 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_SYNC_VOLUME))
1527 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1528 else
1529 s->thread_info.soft_volume = s->soft_volume;
1530 }
1531
1532 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1533 pa_sink_input *i;
1534 uint32_t idx;
1535 pa_cvolume old_reference_volume;
1536
1537 pa_sink_assert_ref(s);
1538 pa_assert_ctl_context();
1539 pa_assert(PA_SINK_IS_LINKED(s->state));
1540
1541 /* This is called when the hardware's real volume changes due to
1542 * some external event. We copy the real volume into our
1543 * reference volume and then rebuild the stream volumes based on
1544 * i->real_ratio which should stay fixed. */
1545
1546 if (old_real_volume && pa_cvolume_equal(old_real_volume, &s->real_volume))
1547 return;
1548
1549 old_reference_volume = s->reference_volume;
1550
1551 /* 1. Make the real volume the reference volume */
1552 s->reference_volume = s->real_volume;
1553
1554 if (s->flags & PA_SINK_FLAT_VOLUME) {
1555
1556 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1557 pa_cvolume old_volume, remapped;
1558
1559 old_volume = i->volume;
1560
1561 /* 2. Since the sink's reference and real volumes are equal
1562 * now our ratios should be too. */
1563 i->reference_ratio = i->real_ratio;
1564
1565 /* 3. Recalculate the new stream reference volume based on the
1566 * reference ratio and the sink's reference volume.
1567 *
1568 * This basically calculates:
1569 *
1570 * i->volume = s->reference_volume * i->reference_ratio
1571 *
1572 * This is identical to propagate_reference_volume() */
1573 remapped = s->reference_volume;
1574 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1575 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1576
1577 /* Notify if something changed */
1578 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1579
1580 if (i->volume_changed)
1581 i->volume_changed(i);
1582
1583 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1584 }
1585 }
1586 }
1587
1588 /* Something got changed in the hardware. It probably makes sense
1589 * to save changed hw settings given that hw volume changes not
1590 * triggered by PA are almost certainly done by the user. */
1591 s->save_volume = TRUE;
1592
1593 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1594 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1595 }
1596
1597 /* Called from io thread */
1598 void pa_sink_update_volume_and_mute(pa_sink *s) {
1599 pa_assert(s);
1600 pa_sink_assert_io_context(s);
1601
1602 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1603 }
1604
1605 /* Called from main thread */
1606 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1607 pa_sink_assert_ref(s);
1608 pa_assert_ctl_context();
1609 pa_assert(PA_SINK_IS_LINKED(s->state));
1610
1611 if (s->refresh_volume || force_refresh) {
1612 struct pa_cvolume old_real_volume;
1613
1614 old_real_volume = s->real_volume;
1615
1616 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->get_volume)
1617 s->get_volume(s);
1618
1619 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1620
1621 propagate_real_volume(s, &old_real_volume);
1622 }
1623
1624 return &s->reference_volume;
1625 }
1626
1627 /* Called from main thread */
1628 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1629 pa_cvolume old_real_volume;
1630
1631 pa_sink_assert_ref(s);
1632 pa_assert_ctl_context();
1633 pa_assert(PA_SINK_IS_LINKED(s->state));
1634
1635 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1636
1637 old_real_volume = s->real_volume;
1638 s->real_volume = *new_real_volume;
1639
1640 propagate_real_volume(s, &old_real_volume);
1641 }
1642
1643 /* Called from main thread */
1644 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1645 pa_bool_t old_muted;
1646
1647 pa_sink_assert_ref(s);
1648 pa_assert_ctl_context();
1649 pa_assert(PA_SINK_IS_LINKED(s->state));
1650
1651 old_muted = s->muted;
1652 s->muted = mute;
1653 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1654
1655 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->set_mute)
1656 s->set_mute(s);
1657
1658 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1659
1660 if (old_muted != s->muted)
1661 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1662 }
1663
1664 /* Called from main thread */
1665 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1666
1667 pa_sink_assert_ref(s);
1668 pa_assert_ctl_context();
1669 pa_assert(PA_SINK_IS_LINKED(s->state));
1670
1671 if (s->refresh_muted || force_refresh) {
1672 pa_bool_t old_muted = s->muted;
1673
1674 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->get_mute)
1675 s->get_mute(s);
1676
1677 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1678
1679 if (old_muted != s->muted) {
1680 s->save_muted = TRUE;
1681
1682 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1683
1684 /* Make sure the soft mute status stays in sync */
1685 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1686 }
1687 }
1688
1689 return s->muted;
1690 }
1691
1692 /* Called from main thread */
1693 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1694 pa_sink_assert_ref(s);
1695 pa_assert_ctl_context();
1696 pa_assert(PA_SINK_IS_LINKED(s->state));
1697
1698 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1699
1700 if (s->muted == new_muted)
1701 return;
1702
1703 s->muted = new_muted;
1704 s->save_muted = TRUE;
1705
1706 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1707 }
1708
1709 /* Called from main thread */
1710 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1711 pa_sink_assert_ref(s);
1712 pa_assert_ctl_context();
1713
1714 if (p)
1715 pa_proplist_update(s->proplist, mode, p);
1716
1717 if (PA_SINK_IS_LINKED(s->state)) {
1718 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1719 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1720 }
1721
1722 return TRUE;
1723 }
1724
1725 /* Called from main thread */
1726 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1727 void pa_sink_set_description(pa_sink *s, const char *description) {
1728 const char *old;
1729 pa_sink_assert_ref(s);
1730 pa_assert_ctl_context();
1731
1732 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1733 return;
1734
1735 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1736
1737 if (old && description && pa_streq(old, description))
1738 return;
1739
1740 if (description)
1741 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1742 else
1743 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1744
1745 if (s->monitor_source) {
1746 char *n;
1747
1748 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1749 pa_source_set_description(s->monitor_source, n);
1750 pa_xfree(n);
1751 }
1752
1753 if (PA_SINK_IS_LINKED(s->state)) {
1754 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1755 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1756 }
1757 }
1758
1759 /* Called from main thread */
1760 unsigned pa_sink_linked_by(pa_sink *s) {
1761 unsigned ret;
1762
1763 pa_sink_assert_ref(s);
1764 pa_assert_ctl_context();
1765 pa_assert(PA_SINK_IS_LINKED(s->state));
1766
1767 ret = pa_idxset_size(s->inputs);
1768
1769 /* We add in the number of streams connected to us here. Please
1770 * note the asymmmetry to pa_sink_used_by()! */
1771
1772 if (s->monitor_source)
1773 ret += pa_source_linked_by(s->monitor_source);
1774
1775 return ret;
1776 }
1777
1778 /* Called from main thread */
1779 unsigned pa_sink_used_by(pa_sink *s) {
1780 unsigned ret;
1781
1782 pa_sink_assert_ref(s);
1783 pa_assert_ctl_context();
1784 pa_assert(PA_SINK_IS_LINKED(s->state));
1785
1786 ret = pa_idxset_size(s->inputs);
1787 pa_assert(ret >= s->n_corked);
1788
1789 /* Streams connected to our monitor source do not matter for
1790 * pa_sink_used_by()!.*/
1791
1792 return ret - s->n_corked;
1793 }
1794
1795 /* Called from main thread */
1796 unsigned pa_sink_check_suspend(pa_sink *s) {
1797 unsigned ret;
1798 pa_sink_input *i;
1799 uint32_t idx;
1800
1801 pa_sink_assert_ref(s);
1802 pa_assert_ctl_context();
1803
1804 if (!PA_SINK_IS_LINKED(s->state))
1805 return 0;
1806
1807 ret = 0;
1808
1809 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1810 pa_sink_input_state_t st;
1811
1812 st = pa_sink_input_get_state(i);
1813
1814 /* We do not assert here. It is perfectly valid for a sink input to
1815 * be in the INIT state (i.e. created, marked done but not yet put)
1816 * and we should not care if it's unlinked as it won't contribute
1817 * towarards our busy status.
1818 */
1819 if (!PA_SINK_INPUT_IS_LINKED(st))
1820 continue;
1821
1822 if (st == PA_SINK_INPUT_CORKED)
1823 continue;
1824
1825 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1826 continue;
1827
1828 ret ++;
1829 }
1830
1831 if (s->monitor_source)
1832 ret += pa_source_check_suspend(s->monitor_source);
1833
1834 return ret;
1835 }
1836
1837 /* Called from the IO thread */
1838 static void sync_input_volumes_within_thread(pa_sink *s) {
1839 pa_sink_input *i;
1840 void *state = NULL;
1841
1842 pa_sink_assert_ref(s);
1843 pa_sink_assert_io_context(s);
1844
1845 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1846 if (pa_atomic_load(&i->before_ramping_v))
1847 i->thread_info.future_soft_volume = i->soft_volume;
1848
1849 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1850 continue;
1851
1852 if (!pa_atomic_load(&i->before_ramping_v))
1853 i->thread_info.soft_volume = i->soft_volume;
1854 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1855 }
1856 }
1857
1858 /* Called from IO thread, except when it is not */
1859 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1860 pa_sink *s = PA_SINK(o);
1861 pa_sink_assert_ref(s);
1862
1863 switch ((pa_sink_message_t) code) {
1864
1865 case PA_SINK_MESSAGE_ADD_INPUT: {
1866 pa_sink_input *i = PA_SINK_INPUT(userdata);
1867
1868 /* If you change anything here, make sure to change the
1869 * sink input handling a few lines down at
1870 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1871
1872 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1873
1874 /* Since the caller sleeps in pa_sink_input_put(), we can
1875 * safely access data outside of thread_info even though
1876 * it is mutable */
1877
1878 if ((i->thread_info.sync_prev = i->sync_prev)) {
1879 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1880 pa_assert(i->sync_prev->sync_next == i);
1881 i->thread_info.sync_prev->thread_info.sync_next = i;
1882 }
1883
1884 if ((i->thread_info.sync_next = i->sync_next)) {
1885 pa_assert(i->sink == i->thread_info.sync_next->sink);
1886 pa_assert(i->sync_next->sync_prev == i);
1887 i->thread_info.sync_next->thread_info.sync_prev = i;
1888 }
1889
1890 pa_assert(!i->thread_info.attached);
1891 i->thread_info.attached = TRUE;
1892
1893 if (i->attach)
1894 i->attach(i);
1895
1896 pa_sink_input_set_state_within_thread(i, i->state);
1897
1898 /* The requested latency of the sink input needs to be
1899 * fixed up and then configured on the sink */
1900
1901 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1902 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1903
1904 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1905 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1906
1907 /* We don't rewind here automatically. This is left to the
1908 * sink input implementor because some sink inputs need a
1909 * slow start, i.e. need some time to buffer client
1910 * samples before beginning streaming. */
1911
1912 /* In flat volume mode we need to update the volume as
1913 * well */
1914 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
1915 }
1916
1917 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1918 pa_sink_input *i = PA_SINK_INPUT(userdata);
1919
1920 /* If you change anything here, make sure to change the
1921 * sink input handling a few lines down at
1922 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1923
1924 if (i->detach)
1925 i->detach(i);
1926
1927 pa_sink_input_set_state_within_thread(i, i->state);
1928
1929 pa_assert(i->thread_info.attached);
1930 i->thread_info.attached = FALSE;
1931
1932 /* Since the caller sleeps in pa_sink_input_unlink(),
1933 * we can safely access data outside of thread_info even
1934 * though it is mutable */
1935
1936 pa_assert(!i->sync_prev);
1937 pa_assert(!i->sync_next);
1938
1939 if (i->thread_info.sync_prev) {
1940 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1941 i->thread_info.sync_prev = NULL;
1942 }
1943
1944 if (i->thread_info.sync_next) {
1945 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1946 i->thread_info.sync_next = NULL;
1947 }
1948
1949 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1950 pa_sink_input_unref(i);
1951
1952 pa_sink_invalidate_requested_latency(s, TRUE);
1953 pa_sink_request_rewind(s, (size_t) -1);
1954
1955 /* In flat volume mode we need to update the volume as
1956 * well */
1957 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
1958 }
1959
1960 case PA_SINK_MESSAGE_START_MOVE: {
1961 pa_sink_input *i = PA_SINK_INPUT(userdata);
1962
1963 /* We don't support moving synchronized streams. */
1964 pa_assert(!i->sync_prev);
1965 pa_assert(!i->sync_next);
1966 pa_assert(!i->thread_info.sync_next);
1967 pa_assert(!i->thread_info.sync_prev);
1968
1969 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1970 pa_usec_t usec = 0;
1971 size_t sink_nbytes, total_nbytes;
1972
1973 /* Get the latency of the sink */
1974 usec = pa_sink_get_latency_within_thread(s);
1975 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1976 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1977
1978 if (total_nbytes > 0) {
1979 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1980 i->thread_info.rewrite_flush = TRUE;
1981 pa_sink_input_process_rewind(i, sink_nbytes);
1982 }
1983 }
1984
1985 if (i->detach)
1986 i->detach(i);
1987
1988 pa_assert(i->thread_info.attached);
1989 i->thread_info.attached = FALSE;
1990
1991 /* Let's remove the sink input ...*/
1992 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1993 pa_sink_input_unref(i);
1994
1995 pa_sink_invalidate_requested_latency(s, TRUE);
1996
1997 pa_log_debug("Requesting rewind due to started move");
1998 pa_sink_request_rewind(s, (size_t) -1);
1999
2000 /* In flat volume mode we need to update the volume as
2001 * well */
2002 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2003 }
2004
2005 case PA_SINK_MESSAGE_FINISH_MOVE: {
2006 pa_sink_input *i = PA_SINK_INPUT(userdata);
2007
2008 /* We don't support moving synchronized streams. */
2009 pa_assert(!i->sync_prev);
2010 pa_assert(!i->sync_next);
2011 pa_assert(!i->thread_info.sync_next);
2012 pa_assert(!i->thread_info.sync_prev);
2013
2014 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2015
2016 pa_assert(!i->thread_info.attached);
2017 i->thread_info.attached = TRUE;
2018
2019 if (i->attach)
2020 i->attach(i);
2021
2022 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2023 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2024
2025 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2026 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2027
2028 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2029 pa_usec_t usec = 0;
2030 size_t nbytes;
2031
2032 /* Get the latency of the sink */
2033 usec = pa_sink_get_latency_within_thread(s);
2034 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2035
2036 if (nbytes > 0)
2037 pa_sink_input_drop(i, nbytes);
2038
2039 pa_log_debug("Requesting rewind due to finished move");
2040 pa_sink_request_rewind(s, nbytes);
2041 }
2042
2043 /* In flat volume mode we need to update the volume as
2044 * well */
2045 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2046 }
2047
2048 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2049
2050 if (s->flags & PA_SINK_SYNC_VOLUME) {
2051 s->set_volume(s);
2052 pa_sink_volume_change_push(s);
2053 }
2054 /* Fall through ... */
2055
2056 case PA_SINK_MESSAGE_SET_VOLUME:
2057
2058 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2059 s->thread_info.soft_volume = s->soft_volume;
2060 pa_sink_request_rewind(s, (size_t) -1);
2061 }
2062
2063 if (!(s->flags & PA_SINK_FLAT_VOLUME))
2064 return 0;
2065
2066 /* Fall through ... */
2067
2068 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2069 sync_input_volumes_within_thread(s);
2070 return 0;
2071
2072 case PA_SINK_MESSAGE_GET_VOLUME:
2073
2074 if ((s->flags & PA_SINK_SYNC_VOLUME) && s->get_volume) {
2075 s->get_volume(s);
2076 pa_sink_volume_change_flush(s);
2077 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2078 }
2079
2080 /* In case sink implementor reset SW volume. */
2081 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2082 s->thread_info.soft_volume = s->soft_volume;
2083 pa_sink_request_rewind(s, (size_t) -1);
2084 }
2085
2086 return 0;
2087
2088 case PA_SINK_MESSAGE_SET_MUTE:
2089
2090 if (s->thread_info.soft_muted != s->muted) {
2091 s->thread_info.soft_muted = s->muted;
2092 pa_sink_request_rewind(s, (size_t) -1);
2093 }
2094
2095 if (s->flags & PA_SINK_SYNC_VOLUME && s->set_mute)
2096 s->set_mute(s);
2097
2098 return 0;
2099
2100 case PA_SINK_MESSAGE_GET_MUTE:
2101
2102 if (s->flags & PA_SINK_SYNC_VOLUME && s->get_mute)
2103 s->get_mute(s);
2104
2105 return 0;
2106
2107 case PA_SINK_MESSAGE_SET_STATE: {
2108
2109 pa_bool_t suspend_change =
2110 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2111 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2112
2113 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2114
2115 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2116 s->thread_info.rewind_nbytes = 0;
2117 s->thread_info.rewind_requested = FALSE;
2118 }
2119
2120 if (suspend_change) {
2121 pa_sink_input *i;
2122 void *state = NULL;
2123
2124 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2125 if (i->suspend_within_thread)
2126 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2127 }
2128
2129 return 0;
2130 }
2131
2132 case PA_SINK_MESSAGE_DETACH:
2133
2134 /* Detach all streams */
2135 pa_sink_detach_within_thread(s);
2136 return 0;
2137
2138 case PA_SINK_MESSAGE_ATTACH:
2139
2140 /* Reattach all streams */
2141 pa_sink_attach_within_thread(s);
2142 return 0;
2143
2144 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2145
2146 pa_usec_t *usec = userdata;
2147 *usec = pa_sink_get_requested_latency_within_thread(s);
2148
2149 /* Yes, that's right, the IO thread will see -1 when no
2150 * explicit requested latency is configured, the main
2151 * thread will see max_latency */
2152 if (*usec == (pa_usec_t) -1)
2153 *usec = s->thread_info.max_latency;
2154
2155 return 0;
2156 }
2157
2158 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2159 pa_usec_t *r = userdata;
2160
2161 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2162
2163 return 0;
2164 }
2165
2166 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2167 pa_usec_t *r = userdata;
2168
2169 r[0] = s->thread_info.min_latency;
2170 r[1] = s->thread_info.max_latency;
2171
2172 return 0;
2173 }
2174
2175 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2176
2177 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2178 return 0;
2179
2180 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2181
2182 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2183 return 0;
2184
2185 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2186
2187 *((size_t*) userdata) = s->thread_info.max_rewind;
2188 return 0;
2189
2190 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2191
2192 *((size_t*) userdata) = s->thread_info.max_request;
2193 return 0;
2194
2195 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2196
2197 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2198 return 0;
2199
2200 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2201
2202 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2203 return 0;
2204
2205 case PA_SINK_MESSAGE_SET_PORT:
2206
2207 pa_assert(userdata);
2208 if (s->set_port) {
2209 struct sink_message_set_port *msg_data = userdata;
2210 msg_data->ret = s->set_port(s, msg_data->port);
2211 }
2212 return 0;
2213
2214 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2215 /* This message is sent from IO-thread and handled in main thread. */
2216 pa_assert_ctl_context();
2217
2218 pa_sink_get_volume(s, TRUE);
2219 pa_sink_get_mute(s, TRUE);
2220 return 0;
2221
2222 case PA_SINK_MESSAGE_GET_LATENCY:
2223 case PA_SINK_MESSAGE_MAX:
2224 ;
2225 }
2226
2227 return -1;
2228 }
2229
2230 /* Called from main thread */
2231 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2232 pa_sink *sink;
2233 uint32_t idx;
2234 int ret = 0;
2235
2236 pa_core_assert_ref(c);
2237 pa_assert_ctl_context();
2238 pa_assert(cause != 0);
2239
2240 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2241 int r;
2242
2243 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2244 ret = r;
2245 }
2246
2247 return ret;
2248 }
2249
2250 /* Called from main thread */
2251 void pa_sink_detach(pa_sink *s) {
2252 pa_sink_assert_ref(s);
2253 pa_assert_ctl_context();
2254 pa_assert(PA_SINK_IS_LINKED(s->state));
2255
2256 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2257 }
2258
2259 /* Called from main thread */
2260 void pa_sink_attach(pa_sink *s) {
2261 pa_sink_assert_ref(s);
2262 pa_assert_ctl_context();
2263 pa_assert(PA_SINK_IS_LINKED(s->state));
2264
2265 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2266 }
2267
2268 /* Called from IO thread */
2269 void pa_sink_detach_within_thread(pa_sink *s) {
2270 pa_sink_input *i;
2271 void *state = NULL;
2272
2273 pa_sink_assert_ref(s);
2274 pa_sink_assert_io_context(s);
2275 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2276
2277 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2278 if (i->detach)
2279 i->detach(i);
2280
2281 if (s->monitor_source)
2282 pa_source_detach_within_thread(s->monitor_source);
2283 }
2284
2285 /* Called from IO thread */
2286 void pa_sink_attach_within_thread(pa_sink *s) {
2287 pa_sink_input *i;
2288 void *state = NULL;
2289
2290 pa_sink_assert_ref(s);
2291 pa_sink_assert_io_context(s);
2292 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2293
2294 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2295 if (i->attach)
2296 i->attach(i);
2297
2298 if (s->monitor_source)
2299 pa_source_attach_within_thread(s->monitor_source);
2300 }
2301
2302 /* Called from IO thread */
2303 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2304 pa_sink_assert_ref(s);
2305 pa_sink_assert_io_context(s);
2306 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2307
2308 if (s->thread_info.state == PA_SINK_SUSPENDED)
2309 return;
2310
2311 if (nbytes == (size_t) -1)
2312 nbytes = s->thread_info.max_rewind;
2313
2314 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2315
2316 if (s->thread_info.rewind_requested &&
2317 nbytes <= s->thread_info.rewind_nbytes)
2318 return;
2319
2320 s->thread_info.rewind_nbytes = nbytes;
2321 s->thread_info.rewind_requested = TRUE;
2322
2323 if (s->request_rewind)
2324 s->request_rewind(s);
2325 }
2326
2327 /* Called from IO thread */
2328 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2329 pa_usec_t result = (pa_usec_t) -1;
2330 pa_sink_input *i;
2331 void *state = NULL;
2332 pa_usec_t monitor_latency;
2333
2334 pa_sink_assert_ref(s);
2335 pa_sink_assert_io_context(s);
2336
2337 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2338 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2339
2340 if (s->thread_info.requested_latency_valid)
2341 return s->thread_info.requested_latency;
2342
2343 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2344 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2345 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2346 result = i->thread_info.requested_sink_latency;
2347
2348 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2349
2350 if (monitor_latency != (pa_usec_t) -1 &&
2351 (result == (pa_usec_t) -1 || result > monitor_latency))
2352 result = monitor_latency;
2353
2354 if (result != (pa_usec_t) -1)
2355 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2356
2357 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2358 /* Only cache if properly initialized */
2359 s->thread_info.requested_latency = result;
2360 s->thread_info.requested_latency_valid = TRUE;
2361 }
2362
2363 return result;
2364 }
2365
2366 /* Called from main thread */
2367 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2368 pa_usec_t usec = 0;
2369
2370 pa_sink_assert_ref(s);
2371 pa_assert_ctl_context();
2372 pa_assert(PA_SINK_IS_LINKED(s->state));
2373
2374 if (s->state == PA_SINK_SUSPENDED)
2375 return 0;
2376
2377 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2378 return usec;
2379 }
2380
2381 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2382 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2383 pa_sink_input *i;
2384 void *state = NULL;
2385
2386 pa_sink_assert_ref(s);
2387 pa_sink_assert_io_context(s);
2388
2389 if (max_rewind == s->thread_info.max_rewind)
2390 return;
2391
2392 s->thread_info.max_rewind = max_rewind;
2393
2394 if (PA_SINK_IS_LINKED(s->thread_info.state))
2395 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2396 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2397
2398 if (s->monitor_source)
2399 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2400 }
2401
2402 /* Called from main thread */
2403 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2404 pa_sink_assert_ref(s);
2405 pa_assert_ctl_context();
2406
2407 if (PA_SINK_IS_LINKED(s->state))
2408 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2409 else
2410 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2411 }
2412
2413 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2414 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2415 void *state = NULL;
2416
2417 pa_sink_assert_ref(s);
2418 pa_sink_assert_io_context(s);
2419
2420 if (max_request == s->thread_info.max_request)
2421 return;
2422
2423 s->thread_info.max_request = max_request;
2424
2425 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2426 pa_sink_input *i;
2427
2428 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2429 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2430 }
2431 }
2432
2433 /* Called from main thread */
2434 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2435 pa_sink_assert_ref(s);
2436 pa_assert_ctl_context();
2437
2438 if (PA_SINK_IS_LINKED(s->state))
2439 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2440 else
2441 pa_sink_set_max_request_within_thread(s, max_request);
2442 }
2443
2444 /* Called from IO thread */
2445 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2446 pa_sink_input *i;
2447 void *state = NULL;
2448
2449 pa_sink_assert_ref(s);
2450 pa_sink_assert_io_context(s);
2451
2452 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2453 s->thread_info.requested_latency_valid = FALSE;
2454 else if (dynamic)
2455 return;
2456
2457 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2458
2459 if (s->update_requested_latency)
2460 s->update_requested_latency(s);
2461
2462 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2463 if (i->update_sink_requested_latency)
2464 i->update_sink_requested_latency(i);
2465 }
2466 }
2467
2468 /* Called from main thread */
2469 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2470 pa_sink_assert_ref(s);
2471 pa_assert_ctl_context();
2472
2473 /* min_latency == 0: no limit
2474 * min_latency anything else: specified limit
2475 *
2476 * Similar for max_latency */
2477
2478 if (min_latency < ABSOLUTE_MIN_LATENCY)
2479 min_latency = ABSOLUTE_MIN_LATENCY;
2480
2481 if (max_latency <= 0 ||
2482 max_latency > ABSOLUTE_MAX_LATENCY)
2483 max_latency = ABSOLUTE_MAX_LATENCY;
2484
2485 pa_assert(min_latency <= max_latency);
2486
2487 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2488 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2489 max_latency == ABSOLUTE_MAX_LATENCY) ||
2490 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2491
2492 if (PA_SINK_IS_LINKED(s->state)) {
2493 pa_usec_t r[2];
2494
2495 r[0] = min_latency;
2496 r[1] = max_latency;
2497
2498 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2499 } else
2500 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2501 }
2502
2503 /* Called from main thread */
2504 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2505 pa_sink_assert_ref(s);
2506 pa_assert_ctl_context();
2507 pa_assert(min_latency);
2508 pa_assert(max_latency);
2509
2510 if (PA_SINK_IS_LINKED(s->state)) {
2511 pa_usec_t r[2] = { 0, 0 };
2512
2513 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2514
2515 *min_latency = r[0];
2516 *max_latency = r[1];
2517 } else {
2518 *min_latency = s->thread_info.min_latency;
2519 *max_latency = s->thread_info.max_latency;
2520 }
2521 }
2522
2523 /* Called from IO thread */
2524 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2525 pa_sink_assert_ref(s);
2526 pa_sink_assert_io_context(s);
2527
2528 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2529 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2530 pa_assert(min_latency <= max_latency);
2531
2532 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2533 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2534 max_latency == ABSOLUTE_MAX_LATENCY) ||
2535 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2536
2537 if (s->thread_info.min_latency == min_latency &&
2538 s->thread_info.max_latency == max_latency)
2539 return;
2540
2541 s->thread_info.min_latency = min_latency;
2542 s->thread_info.max_latency = max_latency;
2543
2544 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2545 pa_sink_input *i;
2546 void *state = NULL;
2547
2548 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2549 if (i->update_sink_latency_range)
2550 i->update_sink_latency_range(i);
2551 }
2552
2553 pa_sink_invalidate_requested_latency(s, FALSE);
2554
2555 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2556 }
2557
2558 /* Called from main thread */
2559 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2560 pa_sink_assert_ref(s);
2561 pa_assert_ctl_context();
2562
2563 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2564 pa_assert(latency == 0);
2565 return;
2566 }
2567
2568 if (latency < ABSOLUTE_MIN_LATENCY)
2569 latency = ABSOLUTE_MIN_LATENCY;
2570
2571 if (latency > ABSOLUTE_MAX_LATENCY)
2572 latency = ABSOLUTE_MAX_LATENCY;
2573
2574 if (PA_SINK_IS_LINKED(s->state))
2575 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2576 else
2577 s->thread_info.fixed_latency = latency;
2578
2579 pa_source_set_fixed_latency(s->monitor_source, latency);
2580 }
2581
2582 /* Called from main thread */
2583 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2584 pa_usec_t latency;
2585
2586 pa_sink_assert_ref(s);
2587 pa_assert_ctl_context();
2588
2589 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2590 return 0;
2591
2592 if (PA_SINK_IS_LINKED(s->state))
2593 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2594 else
2595 latency = s->thread_info.fixed_latency;
2596
2597 return latency;
2598 }
2599
2600 /* Called from IO thread */
2601 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2602 pa_sink_assert_ref(s);
2603 pa_sink_assert_io_context(s);
2604
2605 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2606 pa_assert(latency == 0);
2607 return;
2608 }
2609
2610 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2611 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2612
2613 if (s->thread_info.fixed_latency == latency)
2614 return;
2615
2616 s->thread_info.fixed_latency = latency;
2617
2618 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2619 pa_sink_input *i;
2620 void *state = NULL;
2621
2622 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2623 if (i->update_sink_fixed_latency)
2624 i->update_sink_fixed_latency(i);
2625 }
2626
2627 pa_sink_invalidate_requested_latency(s, FALSE);
2628
2629 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2630 }
2631
2632 /* Called from main context */
2633 size_t pa_sink_get_max_rewind(pa_sink *s) {
2634 size_t r;
2635 pa_sink_assert_ref(s);
2636 pa_assert_ctl_context();
2637
2638 if (!PA_SINK_IS_LINKED(s->state))
2639 return s->thread_info.max_rewind;
2640
2641 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2642
2643 return r;
2644 }
2645
2646 /* Called from main context */
2647 size_t pa_sink_get_max_request(pa_sink *s) {
2648 size_t r;
2649 pa_sink_assert_ref(s);
2650 pa_assert_ctl_context();
2651
2652 if (!PA_SINK_IS_LINKED(s->state))
2653 return s->thread_info.max_request;
2654
2655 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2656
2657 return r;
2658 }
2659
2660 /* Called from main context */
2661 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2662 pa_device_port *port;
2663 int ret;
2664 pa_sink_assert_ref(s);
2665 pa_assert_ctl_context();
2666
2667 if (!s->set_port) {
2668 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2669 return -PA_ERR_NOTIMPLEMENTED;
2670 }
2671
2672 if (!s->ports)
2673 return -PA_ERR_NOENTITY;
2674
2675 if (!(port = pa_hashmap_get(s->ports, name)))
2676 return -PA_ERR_NOENTITY;
2677
2678 if (s->active_port == port) {
2679 s->save_port = s->save_port || save;
2680 return 0;
2681 }
2682
2683 if (s->flags & PA_SINK_SYNC_VOLUME) {
2684 struct sink_message_set_port msg = { .port = port, .ret = 0 };
2685 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2686 ret = msg.ret;
2687 }
2688 else
2689 ret = s->set_port(s, port);
2690
2691 if (ret < 0)
2692 return -PA_ERR_NOENTITY;
2693
2694 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2695
2696 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2697
2698 s->active_port = port;
2699 s->save_port = save;
2700
2701 return 0;
2702 }
2703
2704 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2705 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2706
2707 pa_assert(p);
2708
2709 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2710 return TRUE;
2711
2712 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2713
2714 if (pa_streq(ff, "microphone"))
2715 t = "audio-input-microphone";
2716 else if (pa_streq(ff, "webcam"))
2717 t = "camera-web";
2718 else if (pa_streq(ff, "computer"))
2719 t = "computer";
2720 else if (pa_streq(ff, "handset"))
2721 t = "phone";
2722 else if (pa_streq(ff, "portable"))
2723 t = "multimedia-player";
2724 else if (pa_streq(ff, "tv"))
2725 t = "video-display";
2726
2727 /*
2728 * The following icons are not part of the icon naming spec,
2729 * because Rodney Dawes sucks as the maintainer of that spec.
2730 *
2731 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2732 */
2733 else if (pa_streq(ff, "headset"))
2734 t = "audio-headset";
2735 else if (pa_streq(ff, "headphone"))
2736 t = "audio-headphones";
2737 else if (pa_streq(ff, "speaker"))
2738 t = "audio-speakers";
2739 else if (pa_streq(ff, "hands-free"))
2740 t = "audio-handsfree";
2741 }
2742
2743 if (!t)
2744 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2745 if (pa_streq(c, "modem"))
2746 t = "modem";
2747
2748 if (!t) {
2749 if (is_sink)
2750 t = "audio-card";
2751 else
2752 t = "audio-input-microphone";
2753 }
2754
2755 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2756 if (strstr(profile, "analog"))
2757 s = "-analog";
2758 else if (strstr(profile, "iec958"))
2759 s = "-iec958";
2760 else if (strstr(profile, "hdmi"))
2761 s = "-hdmi";
2762 }
2763
2764 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2765
2766 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2767
2768 return TRUE;
2769 }
2770
2771 pa_bool_t pa_device_init_description(pa_proplist *p) {
2772 const char *s, *d = NULL, *k;
2773 pa_assert(p);
2774
2775 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2776 return TRUE;
2777
2778 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2779 if (pa_streq(s, "internal"))
2780 d = _("Internal Audio");
2781
2782 if (!d)
2783 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2784 if (pa_streq(s, "modem"))
2785 d = _("Modem");
2786
2787 if (!d)
2788 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2789
2790 if (!d)
2791 return FALSE;
2792
2793 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2794
2795 if (d && k)
2796 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2797 else if (d)
2798 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2799
2800 return TRUE;
2801 }
2802
2803 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2804 const char *s;
2805 pa_assert(p);
2806
2807 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2808 return TRUE;
2809
2810 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2811 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2812 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2813 return TRUE;
2814 }
2815
2816 return FALSE;
2817 }
2818
2819 unsigned pa_device_init_priority(pa_proplist *p) {
2820 const char *s;
2821 unsigned priority = 0;
2822
2823 pa_assert(p);
2824
2825 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
2826
2827 if (pa_streq(s, "sound"))
2828 priority += 9000;
2829 else if (!pa_streq(s, "modem"))
2830 priority += 1000;
2831 }
2832
2833 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2834
2835 if (pa_streq(s, "internal"))
2836 priority += 900;
2837 else if (pa_streq(s, "speaker"))
2838 priority += 500;
2839 else if (pa_streq(s, "headphone"))
2840 priority += 400;
2841 }
2842
2843 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
2844
2845 if (pa_streq(s, "pci"))
2846 priority += 50;
2847 else if (pa_streq(s, "usb"))
2848 priority += 40;
2849 else if (pa_streq(s, "bluetooth"))
2850 priority += 30;
2851 }
2852
2853 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2854
2855 if (pa_startswith(s, "analog-"))
2856 priority += 9;
2857 else if (pa_startswith(s, "iec958-"))
2858 priority += 8;
2859 }
2860
2861 return priority;
2862 }
2863
2864 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
2865
2866 /* Called from the IO thread. */
2867 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
2868 pa_sink_volume_change *c;
2869 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
2870 c = pa_xnew(pa_sink_volume_change, 1);
2871
2872 PA_LLIST_INIT(pa_sink_volume_change, c);
2873 c->at = 0;
2874 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2875 return c;
2876 }
2877
2878 /* Called from the IO thread. */
2879 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
2880 pa_assert(c);
2881 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
2882 pa_xfree(c);
2883 }
2884
2885 /* Called from the IO thread. */
2886 void pa_sink_volume_change_push(pa_sink *s) {
2887 pa_sink_volume_change *c = NULL;
2888 pa_sink_volume_change *nc = NULL;
2889 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2890
2891 const char *direction = NULL;
2892
2893 pa_assert(s);
2894 nc = pa_sink_volume_change_new(s);
2895
2896 /* NOTE: There is already more different volumes in pa_sink that I can remember.
2897 * Adding one more volume for HW would get us rid of this, but I am trying
2898 * to survive with the ones we already have. */
2899 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2900
2901 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2902 pa_log_debug("Volume not changing");
2903 pa_sink_volume_change_free(nc);
2904 return;
2905 }
2906
2907 /* Get the latency of the sink */
2908 if (PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &nc->at, 0, NULL) < 0)
2909 nc->at = 0;
2910
2911 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2912
2913 if (s->thread_info.volume_changes_tail) {
2914 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2915 /* If volume is going up let's do it a bit late. If it is going
2916 * down let's do it a bit early. */
2917 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2918 if (nc->at + safety_margin > c->at) {
2919 nc->at += safety_margin;
2920 direction = "up";
2921 break;
2922 }
2923 }
2924 else if (nc->at - safety_margin > c->at) {
2925 nc->at -= safety_margin;
2926 direction = "down";
2927 break;
2928 }
2929 }
2930 }
2931
2932 if (c == NULL) {
2933 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2934 nc->at += safety_margin;
2935 direction = "up";
2936 } else {
2937 nc->at -= safety_margin;
2938 direction = "down";
2939 }
2940 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
2941 }
2942 else {
2943 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
2944 }
2945
2946 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), nc->at);
2947
2948 /* We can ignore volume events that came earlier but should happen later than this. */
2949 PA_LLIST_FOREACH(c, nc->next) {
2950 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), c->at);
2951 pa_sink_volume_change_free(c);
2952 }
2953 nc->next = NULL;
2954 s->thread_info.volume_changes_tail = nc;
2955 }
2956
2957 /* Called from the IO thread. */
2958 static void pa_sink_volume_change_flush(pa_sink *s) {
2959 pa_sink_volume_change *c = s->thread_info.volume_changes;
2960 pa_assert(s);
2961 s->thread_info.volume_changes = NULL;
2962 s->thread_info.volume_changes_tail = NULL;
2963 while (c) {
2964 pa_sink_volume_change *next = c->next;
2965 pa_sink_volume_change_free(c);
2966 c = next;
2967 }
2968 }
2969
2970 /* Called from the IO thread. */
2971 pa_bool_t pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
2972 pa_usec_t now = pa_rtclock_now();
2973 pa_bool_t ret = FALSE;
2974
2975 pa_assert(s);
2976 pa_assert(s->write_volume);
2977
2978 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2979 pa_sink_volume_change *c = s->thread_info.volume_changes;
2980 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
2981 pa_log_debug("Volume change to %d at %llu was written %llu usec late", pa_cvolume_avg(&c->hw_volume), c->at, now - c->at);
2982 ret = TRUE;
2983 s->thread_info.current_hw_volume = c->hw_volume;
2984 pa_sink_volume_change_free(c);
2985 }
2986
2987 if (s->write_volume && ret)
2988 s->write_volume(s);
2989
2990 if (s->thread_info.volume_changes) {
2991 if (usec_to_next)
2992 *usec_to_next = s->thread_info.volume_changes->at - now;
2993 if (pa_log_ratelimit())
2994 pa_log_debug("Next volume change in %lld usec", s->thread_info.volume_changes->at - now);
2995 }
2996 else {
2997 if (usec_to_next)
2998 *usec_to_next = 0;
2999 s->thread_info.volume_changes_tail = NULL;
3000 }
3001 return ret;
3002 }
3003
3004 /* Called from the IO thread. */
3005 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3006 /* All the queued volume events later than current latency are shifted to happen earlier. */
3007 pa_sink_volume_change *c;
3008 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3009 pa_usec_t limit;
3010
3011 /* Get the latency of the sink */
3012 if (PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &limit, 0, NULL) < 0)
3013 limit = 0;
3014
3015 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3016
3017 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3018 if (c->at > limit) {
3019 c->at -= rewound;
3020 if (c->at < limit)
3021 c->at = limit;
3022 }
3023 }
3024 }