#include <pulsecore/namereg.h>
#include <pulsecore/core-util.h>
#include <pulsecore/sample-util.h>
+#include <pulsecore/mix.h>
#include <pulsecore/core-subscribe.h>
#include <pulsecore/log.h>
#include <pulsecore/macro.h>
pa_zero(*data);
data->proplist = pa_proplist_new();
+ data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
return data;
}
void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
pa_assert(data);
- data->alternate_sample_rate_is_set = TRUE;
+ data->alternate_sample_rate_is_set = true;
data->alternate_sample_rate = alternate_sample_rate;
}
data->volume = *volume;
}
-void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
+void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
pa_assert(data);
- data->muted_is_set = TRUE;
+ data->muted_is_set = true;
data->muted = !!mute;
}
pa_proplist_free(data->proplist);
if (data->ports)
- pa_device_port_hashmap_free(data->ports);
+ pa_hashmap_free(data->ports);
pa_xfree(data->name);
pa_xfree(data->active_port);
}
-
/* Called from main context */
static void reset_callbacks(pa_sink *s) {
pa_assert(s);
if (!data->volume_is_set) {
pa_cvolume_reset(&data->volume, data->sample_spec.channels);
- data->save_volume = FALSE;
+ data->save_volume = false;
}
pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
if (!data->muted_is_set)
- data->muted = FALSE;
+ data->muted = false;
if (data->card)
pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
pa_device_init_description(data->proplist);
- pa_device_init_icon(data->proplist, TRUE);
+ pa_device_init_icon(data->proplist, true);
pa_device_init_intended_roles(data->proplist);
if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
s->state = PA_SINK_INIT;
s->flags = flags;
s->priority = 0;
- s->suspend_cause = 0;
- pa_sink_set_mixer_dirty(s, FALSE);
+ s->suspend_cause = data->suspend_cause;
+ pa_sink_set_mixer_dirty(s, false);
s->name = pa_xstrdup(name);
s->proplist = pa_proplist_copy(data->proplist);
s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
s->base_volume = PA_VOLUME_NORM;
s->n_volume_steps = PA_VOLUME_NORM+1;
s->muted = data->muted;
- s->refresh_volume = s->refresh_muted = FALSE;
+ s->refresh_volume = s->refresh_muted = false;
reset_callbacks(s);
s->userdata = NULL;
data->ports = NULL;
s->active_port = NULL;
- s->save_port = FALSE;
+ s->save_port = false;
- if (data->active_port && s->ports)
+ if (data->active_port)
if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
s->save_port = data->save_port;
- if (!s->active_port && s->ports) {
+ if (!s->active_port) {
void *state;
pa_device_port *p;
- PA_HASHMAP_FOREACH(p, s->ports, state)
+ PA_HASHMAP_FOREACH(p, s->ports, state) {
+ if (p->available == PA_AVAILABLE_NO)
+ continue;
+
if (!s->active_port || p->priority > s->active_port->priority)
s->active_port = p;
+ }
+ if (!s->active_port) {
+ PA_HASHMAP_FOREACH(p, s->ports, state)
+ if (!s->active_port || p->priority > s->active_port->priority)
+ s->active_port = p;
+ }
}
+ if (s->active_port)
+ s->latency_offset = s->active_port->latency_offset;
+ else
+ s->latency_offset = 0;
+
s->save_volume = data->save_volume;
s->save_muted = data->save_muted;
0);
s->thread_info.rtpoll = NULL;
- s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
+ s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
+ (pa_free_cb_t) pa_sink_input_unref);
s->thread_info.soft_volume = s->soft_volume;
s->thread_info.soft_muted = s->muted;
s->thread_info.state = s->state;
s->thread_info.rewind_nbytes = 0;
- s->thread_info.rewind_requested = FALSE;
+ s->thread_info.rewind_requested = false;
s->thread_info.max_rewind = 0;
s->thread_info.max_request = 0;
- s->thread_info.requested_latency_valid = FALSE;
+ s->thread_info.requested_latency_valid = false;
s->thread_info.requested_latency = 0;
s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
+ s->thread_info.latency_offset = s->latency_offset;
/* FIXME: This should probably be moved to pa_sink_put() */
pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
/* Called from main context */
static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
int ret;
- pa_bool_t suspend_change;
+ bool suspend_change;
pa_sink_state_t original_state;
pa_assert(s);
pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
}
-static void enable_flat_volume(pa_sink *s, pa_bool_t enable) {
+static void enable_flat_volume(pa_sink *s, bool enable) {
pa_sink_flags_t flags;
pa_assert(s);
pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
}
-void pa_sink_enable_decibel_volume(pa_sink *s, pa_bool_t enable) {
+void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
pa_sink_flags_t flags;
pa_assert(s);
if (enable) {
s->flags |= PA_SINK_DECIBEL_VOLUME;
- enable_flat_volume(s, TRUE);
+ enable_flat_volume(s, true);
} else {
s->flags &= ~PA_SINK_DECIBEL_VOLUME;
- enable_flat_volume(s, FALSE);
+ enable_flat_volume(s, false);
}
/* If the flags have changed after init, let any clients know via a change event */
*
* Note: This flag can also change over the life time of the sink. */
if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
- pa_sink_enable_decibel_volume(s, TRUE);
+ pa_sink_enable_decibel_volume(s, true);
/* If the sink implementor support DB volumes by itself, we should always
* try and enable flat volumes too */
if ((s->flags & PA_SINK_DECIBEL_VOLUME))
- enable_flat_volume(s, TRUE);
+ enable_flat_volume(s, true);
if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
pa_sink *root_sink = pa_sink_get_master(s);
pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
- pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
+ if (s->suspend_cause)
+ pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED) == 0);
+ else
+ pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
pa_source_put(s->monitor_source);
/* Called from main context */
void pa_sink_unlink(pa_sink* s) {
- pa_bool_t linked;
+ bool linked;
pa_sink_input *i, *j = NULL;
pa_assert(s);
/* Called from main context */
static void sink_free(pa_object *o) {
pa_sink *s = PA_SINK(o);
- pa_sink_input *i;
pa_assert(s);
pa_assert_ctl_context();
s->monitor_source = NULL;
}
- pa_idxset_free(s->inputs, NULL, NULL);
-
- while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
- pa_sink_input_unref(i);
-
- pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
+ pa_idxset_free(s->inputs, NULL);
+ pa_hashmap_free(s->thread_info.inputs);
if (s->silence.memblock)
pa_memblock_unref(s->silence.memblock);
pa_proplist_free(s->proplist);
if (s->ports)
- pa_device_port_hashmap_free(s->ports);
+ pa_hashmap_free(s->ports);
pa_xfree(s);
}
/* Called from main context, and not while the IO thread is active, please */
void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
+ pa_sink_flags_t old_flags;
+ pa_sink_input *input;
+ uint32_t idx;
+
pa_sink_assert_ref(s);
pa_assert_ctl_context();
- if (mask == 0)
- return;
-
/* For now, allow only a minimal set of flags to be changed. */
pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
+ old_flags = s->flags;
s->flags = (s->flags & ~mask) | (value & mask);
- pa_source_update_flags(s->monitor_source,
- ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
- ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
- ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
- ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
+ if (s->flags == old_flags)
+ return;
+
+ if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
+ pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
+
+ if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
+ pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
+ s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
+
+ pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
+ pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
+
+ if (s->monitor_source)
+ pa_source_update_flags(s->monitor_source,
+ ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
+ ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
+ ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
+ ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
+
+ PA_IDXSET_FOREACH(input, s->inputs, idx) {
+ if (input->origin_sink)
+ pa_sink_update_flags(input->origin_sink, mask, value);
+ }
}
/* Called from IO context, or before _put() from main context */
}
/* Called from any context - must be threadsafe */
-void pa_sink_set_mixer_dirty(pa_sink *s, pa_bool_t is_dirty)
-{
+void pa_sink_set_mixer_dirty(pa_sink *s, bool is_dirty) {
pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
}
/* Called from main context */
-int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
+int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
pa_sink_assert_ref(s);
pa_assert_ctl_context();
pa_assert(PA_SINK_IS_LINKED(s->state));
if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
/* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
it'll be handled just fine. */
- pa_sink_set_mixer_dirty(s, FALSE);
+ pa_sink_set_mixer_dirty(s, false);
pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
if (s->active_port && s->set_port) {
if (s->flags & PA_SINK_DEFERRED_VOLUME) {
}
/* Called from main context */
-void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
+void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
pa_sink_input *i;
pa_sink_assert_ref(s);
pa_queue_free(q, NULL);
}
+ /* Called from IO thread context */
+size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
+ pa_sink_input *i;
+ void *state = NULL;
+ size_t result = 0;
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+
+ PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
+ size_t uf = i->thread_info.underrun_for_sink;
+ if (uf == 0)
+ continue;
+ if (uf >= left_to_play) {
+ if (pa_sink_input_process_underrun(i))
+ continue;
+ }
+ else if (uf > result)
+ result = uf;
+ }
+
+ if (result > 0)
+ pa_log_debug("Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", (long) result, (long) left_to_play - result);
+ return left_to_play - result;
+}
+
/* Called from IO thread context */
void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
pa_sink_input *i;
return;
s->thread_info.rewind_nbytes = 0;
- s->thread_info.rewind_requested = FALSE;
-
- if (s->thread_info.state == PA_SINK_SUSPENDED)
- return;
+ s->thread_info.rewind_requested = false;
if (nbytes > 0) {
pa_log_debug("Processing rewind...");
}
if (m) {
- if (m->chunk.memblock)
+ if (m->chunk.memblock) {
pa_memblock_unref(m->chunk.memblock);
pa_memchunk_reset(&m->chunk);
+ }
pa_sink_input_unref(m->userdata);
m->userdata = NULL;
}
/* Called from main thread */
-pa_bool_t pa_sink_update_rate(pa_sink *s, uint32_t rate, pa_bool_t passthrough)
-{
- if (s->update_rate) {
- uint32_t desired_rate = rate;
- uint32_t default_rate = s->default_sample_rate;
- uint32_t alternate_rate = s->alternate_sample_rate;
- uint32_t idx;
- pa_sink_input *i;
- pa_bool_t use_alternate = FALSE;
+int pa_sink_update_rate(pa_sink *s, uint32_t rate, bool passthrough) {
+ int ret = -1;
+ uint32_t desired_rate = rate;
+ uint32_t default_rate = s->default_sample_rate;
+ uint32_t alternate_rate = s->alternate_sample_rate;
+ uint32_t idx;
+ pa_sink_input *i;
+ bool use_alternate = false;
- if (PA_UNLIKELY(default_rate == alternate_rate)) {
- pa_log_warn("Default and alternate sample rates are the same.");
- return FALSE;
- }
+ if (rate == s->sample_spec.rate)
+ return 0;
- if (PA_SINK_IS_RUNNING(s->state)) {
- pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
- s->sample_spec.rate);
- return FALSE;
- }
+ if (!s->update_rate)
+ return -1;
- if (s->monitor_source) {
- if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == TRUE) {
- pa_log_info("Cannot update rate, monitor source is RUNNING");
- return FALSE;
- }
+ if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough)) {
+ pa_log_debug("Default and alternate sample rates are the same.");
+ return -1;
+ }
+
+ if (PA_SINK_IS_RUNNING(s->state)) {
+ pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
+ s->sample_spec.rate);
+ return -1;
+ }
+
+ if (s->monitor_source) {
+ if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
+ pa_log_info("Cannot update rate, monitor source is RUNNING");
+ return -1;
}
+ }
- if (PA_UNLIKELY (desired_rate < 8000 ||
- desired_rate > PA_RATE_MAX))
- return FALSE;
-
- if (!passthrough) {
- pa_assert(default_rate % 4000 || default_rate % 11025);
- pa_assert(alternate_rate % 4000 || alternate_rate % 11025);
-
- if (default_rate % 4000) {
- /* default is a 11025 multiple */
- if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
- use_alternate=TRUE;
- } else {
- /* default is 4000 multiple */
- if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
- use_alternate=TRUE;
- }
+ if (PA_UNLIKELY(!pa_sample_rate_valid(desired_rate)))
+ return -1;
- if (use_alternate)
- desired_rate = alternate_rate;
- else
- desired_rate = default_rate;
+ if (!passthrough) {
+ pa_assert((default_rate % 4000 == 0) || (default_rate % 11025 == 0));
+ pa_assert((alternate_rate % 4000 == 0) || (alternate_rate % 11025 == 0));
+
+ if (default_rate % 11025 == 0) {
+ if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
+ use_alternate=true;
} else {
- desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
+ /* default is 4000 multiple */
+ if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
+ use_alternate=true;
}
- if (!passthrough && pa_sink_used_by(s) > 0)
- return FALSE;
+ if (use_alternate)
+ desired_rate = alternate_rate;
+ else
+ desired_rate = default_rate;
+ } else {
+ desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
+ }
+
+ if (desired_rate == s->sample_spec.rate)
+ return -1;
- pa_sink_suspend(s, TRUE, PA_SUSPEND_IDLE); /* needed before rate update, will be resumed automatically */
+ if (!passthrough && pa_sink_used_by(s) > 0)
+ return -1;
- if (s->update_rate(s, desired_rate) == TRUE) {
- /* update monitor source as well */
- if (s->monitor_source && !passthrough)
- pa_source_update_rate(s->monitor_source, desired_rate, FALSE);
- pa_log_info("Changed sampling rate successfully");
+ pa_log_debug("Suspending sink %s due to changing the sample rate.", s->name);
+ pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
- PA_IDXSET_FOREACH(i, s->inputs, idx) {
- if (i->state == PA_SINK_INPUT_CORKED)
- pa_sink_input_update_rate(i);
- }
+ if (s->update_rate(s, desired_rate) >= 0) {
+ /* update monitor source as well */
+ if (s->monitor_source && !passthrough)
+ pa_source_update_rate(s->monitor_source, desired_rate, false);
+ pa_log_info("Changed sampling rate successfully");
- return TRUE;
+ PA_IDXSET_FOREACH(i, s->inputs, idx) {
+ if (i->state == PA_SINK_INPUT_CORKED)
+ pa_sink_input_update_rate(i);
}
+
+ ret = 0;
}
- return FALSE;
+
+ pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
+
+ return ret;
}
/* Called from main thread */
pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
+ /* usec is unsigned, so check that the offset can be added to usec without
+ * underflowing. */
+ if (-s->latency_offset <= (int64_t) usec)
+ usec += s->latency_offset;
+ else
+ usec = 0;
+
return usec;
}
if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
return -1;
+ /* usec is unsigned, so check that the offset can be added to usec without
+ * underflowing. */
+ if (-s->thread_info.latency_offset <= (int64_t) usec)
+ usec += s->thread_info.latency_offset;
+ else
+ usec = 0;
+
return usec;
}
* When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
* set. Instead, flat volume mode is detected by checking whether the root sink
* has the flag set. */
-pa_bool_t pa_sink_flat_volume_enabled(pa_sink *s) {
+bool pa_sink_flat_volume_enabled(pa_sink *s) {
pa_sink_assert_ref(s);
s = pa_sink_get_master(s);
if (PA_LIKELY(s))
return (s->flags & PA_SINK_FLAT_VOLUME);
else
- return FALSE;
+ return false;
}
/* Called from the main thread (and also from the IO thread while the main
}
/* Called from main context */
-pa_bool_t pa_sink_is_passthrough(pa_sink *s) {
+bool pa_sink_is_passthrough(pa_sink *s) {
pa_sink_input *alt_i;
uint32_t idx;
alt_i = pa_idxset_first(s->inputs, &idx);
if (pa_sink_input_is_passthrough(alt_i))
- return TRUE;
+ return true;
}
- return FALSE;
+ return false;
}
/* Called from main context */
pa_cvolume volume;
/* disable the monitor in passthrough mode */
- if (s->monitor_source)
- pa_source_suspend(s->monitor_source, TRUE, PA_SUSPEND_PASSTHROUGH);
+ if (s->monitor_source) {
+ pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
+ pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
+ }
/* set the volume to NORM */
- s->saved_volume = *pa_sink_get_volume(s, TRUE);
+ s->saved_volume = *pa_sink_get_volume(s, true);
s->saved_save_volume = s->save_volume;
pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
- pa_sink_set_volume(s, &volume, TRUE, FALSE);
+ pa_sink_set_volume(s, &volume, true, false);
}
/* Called from main context */
void pa_sink_leave_passthrough(pa_sink *s) {
/* Unsuspend monitor */
- if (s->monitor_source)
- pa_source_suspend(s->monitor_source, FALSE, PA_SUSPEND_PASSTHROUGH);
+ if (s->monitor_source) {
+ pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
+ pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
+ }
/* Restore sink volume to what it was before we entered passthrough mode */
- pa_sink_set_volume(s, &s->saved_volume, TRUE, s->saved_save_volume);
+ pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
pa_cvolume_init(&s->saved_volume);
- s->saved_save_volume = FALSE;
+ s->saved_save_volume = false;
}
/* Called from main context. */
/* Called from main thread. Only called for the root sink in volume sharing
* cases, except for internal recursive calls. */
-static pa_bool_t has_inputs(pa_sink *s) {
+static bool has_inputs(pa_sink *s) {
pa_sink_input *i;
uint32_t idx;
PA_IDXSET_FOREACH(i, s->inputs, idx) {
if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
- return TRUE;
+ return true;
}
- return FALSE;
+ return false;
}
/* Called from main thread. Only called for the root sink in volume sharing
/* Called from main thread. Only called for the root sink in volume sharing
* cases, except for internal recursive calls. The return value indicates
* whether any reference volume actually changed. */
-static pa_bool_t update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
+static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
pa_cvolume volume;
- pa_bool_t reference_volume_changed;
+ bool reference_volume_changed;
pa_sink_input *i;
uint32_t idx;
* intermediate sink that didn't change its volume. This theoretical
* possibility is the reason why we have that !(s->flags &
* PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
- * notice even if we returned here FALSE always if
- * reference_volume_changed is FALSE. */
- return FALSE;
+ * notice even if we returned here false always if
+ * reference_volume_changed is false. */
+ return false;
PA_IDXSET_FOREACH(i, s->inputs, idx) {
if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
- update_reference_volume(i->origin_sink, v, channel_map, FALSE);
+ update_reference_volume(i->origin_sink, v, channel_map, false);
}
- return TRUE;
+ return true;
}
/* Called from main thread */
void pa_sink_set_volume(
pa_sink *s,
const pa_cvolume *volume,
- pa_bool_t send_msg,
- pa_bool_t save) {
+ bool send_msg,
+ bool save) {
pa_cvolume new_reference_volume;
pa_sink *root_sink;
return;
/* 1. Make the real volume the reference volume */
- update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
+ update_reference_volume(s, &s->real_volume, &s->channel_map, true);
}
if (pa_sink_flat_volume_enabled(s)) {
* to save changed hw settings given that hw volume changes not
* triggered by PA are almost certainly done by the user. */
if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
- s->save_volume = TRUE;
+ s->save_volume = true;
}
/* Called from io thread */
}
/* Called from main thread */
-const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
+const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
pa_sink_assert_ref(s);
pa_assert_ctl_context();
pa_assert(PA_SINK_IS_LINKED(s->state));
}
/* Called from main thread */
-void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
- pa_bool_t old_muted;
+void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
+ bool old_muted;
pa_sink_assert_ref(s);
pa_assert_ctl_context();
}
/* Called from main thread */
-pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
+bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
pa_sink_assert_ref(s);
pa_assert_ctl_context();
pa_assert(PA_SINK_IS_LINKED(s->state));
if (s->refresh_muted || force_refresh) {
- pa_bool_t old_muted = s->muted;
+ bool old_muted = s->muted;
if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_mute)
s->get_mute(s);
pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
if (old_muted != s->muted) {
- s->save_muted = TRUE;
+ s->save_muted = true;
pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
}
/* Called from main thread */
-void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
+void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
pa_sink_assert_ref(s);
pa_assert_ctl_context();
pa_assert(PA_SINK_IS_LINKED(s->state));
return;
s->muted = new_muted;
- s->save_muted = TRUE;
+ s->save_muted = true;
pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
}
/* Called from main thread */
-pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
+bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
pa_sink_assert_ref(s);
pa_assert_ctl_context();
pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
}
- return TRUE;
+ return true;
}
/* Called from main thread */
continue;
i->thread_info.soft_volume = i->soft_volume;
- pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
+ pa_sink_input_request_rewind(i, 0, true, false, false);
}
}
}
pa_assert(!i->thread_info.attached);
- i->thread_info.attached = TRUE;
+ i->thread_info.attached = true;
if (i->attach)
i->attach(i);
pa_sink_input_set_state_within_thread(i, i->state);
- /* The requested latency of the sink input needs to be
- * fixed up and then configured on the sink */
+ /* The requested latency of the sink input needs to be fixed up and
+ * then configured on the sink. If this causes the sink latency to
+ * go down, the sink implementor is responsible for doing a rewind
+ * in the update_requested_latency() callback to ensure that the
+ * sink buffer doesn't contain more data than what the new latency
+ * allows.
+ *
+ * XXX: Does it really make sense to push this responsibility to
+ * the sink implementors? Wouldn't it be better to do it once in
+ * the core than many times in the modules? */
if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
/* We don't rewind here automatically. This is left to the
* sink input implementor because some sink inputs need a
* slow start, i.e. need some time to buffer client
- * samples before beginning streaming. */
-
- /* FIXME: Actually rewinding should be requested before
- * updating the sink requested latency, because updating
- * the requested latency updates also max_rewind of the
- * sink. Now consider this: a sink has a 10 s buffer and
- * nobody has requested anything less. Then a new stream
- * appears while the sink buffer is full. The new stream
- * requests e.g. 100 ms latency. That request is forwarded
- * to the sink, so now max_rewind is 100 ms. When a rewind
- * is requested, the sink will only rewind 100 ms, and the
- * new stream will have to wait about 10 seconds before it
- * becomes audible. */
+ * samples before beginning streaming.
+ *
+ * XXX: Does it really make sense to push this functionality to
+ * the sink implementors? Wouldn't it be better to do it once in
+ * the core than many times in the modules? */
/* In flat volume mode we need to update the volume as
* well */
pa_sink_input_set_state_within_thread(i, i->state);
pa_assert(i->thread_info.attached);
- i->thread_info.attached = FALSE;
+ i->thread_info.attached = false;
/* Since the caller sleeps in pa_sink_input_unlink(),
* we can safely access data outside of thread_info even
if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
pa_sink_input_unref(i);
- pa_sink_invalidate_requested_latency(s, TRUE);
+ pa_sink_invalidate_requested_latency(s, true);
pa_sink_request_rewind(s, (size_t) -1);
/* In flat volume mode we need to update the volume as
if (total_nbytes > 0) {
i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
- i->thread_info.rewrite_flush = TRUE;
+ i->thread_info.rewrite_flush = true;
pa_sink_input_process_rewind(i, sink_nbytes);
}
}
i->detach(i);
pa_assert(i->thread_info.attached);
- i->thread_info.attached = FALSE;
+ i->thread_info.attached = false;
/* Let's remove the sink input ...*/
if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
pa_sink_input_unref(i);
- pa_sink_invalidate_requested_latency(s, TRUE);
+ pa_sink_invalidate_requested_latency(s, true);
pa_log_debug("Requesting rewind due to started move");
pa_sink_request_rewind(s, (size_t) -1);
pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
pa_assert(!i->thread_info.attached);
- i->thread_info.attached = TRUE;
+ i->thread_info.attached = true;
if (i->attach)
i->attach(i);
case PA_SINK_MESSAGE_SET_STATE: {
- pa_bool_t suspend_change =
+ bool suspend_change =
(s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
(PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
if (s->thread_info.state == PA_SINK_SUSPENDED) {
s->thread_info.rewind_nbytes = 0;
- s->thread_info.rewind_requested = FALSE;
+ s->thread_info.rewind_requested = false;
}
if (suspend_change) {
if (!PA_SINK_IS_LINKED(s->state))
return 0;
- pa_sink_get_volume(s, TRUE);
- pa_sink_get_mute(s, TRUE);
+ pa_sink_get_volume(s, true);
+ pa_sink_get_mute(s, true);
+ return 0;
+
+ case PA_SINK_MESSAGE_SET_LATENCY_OFFSET:
+ s->thread_info.latency_offset = offset;
return 0;
case PA_SINK_MESSAGE_GET_LATENCY:
}
/* Called from main thread */
-int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
+int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
pa_sink *sink;
uint32_t idx;
int ret = 0;
pa_sink_assert_io_context(s);
pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
- if (s->thread_info.state == PA_SINK_SUSPENDED)
- return;
-
if (nbytes == (size_t) -1)
nbytes = s->thread_info.max_rewind;
return;
s->thread_info.rewind_nbytes = nbytes;
- s->thread_info.rewind_requested = TRUE;
+ s->thread_info.rewind_requested = true;
if (s->request_rewind)
s->request_rewind(s);
if (PA_SINK_IS_LINKED(s->thread_info.state)) {
/* Only cache if properly initialized */
s->thread_info.requested_latency = result;
- s->thread_info.requested_latency_valid = TRUE;
+ s->thread_info.requested_latency_valid = true;
}
return result;
}
/* Called from IO thread */
-void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
+void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
pa_sink_input *i;
void *state = NULL;
pa_sink_assert_io_context(s);
if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
- s->thread_info.requested_latency_valid = FALSE;
+ s->thread_info.requested_latency_valid = false;
else if (dynamic)
return;
i->update_sink_latency_range(i);
}
- pa_sink_invalidate_requested_latency(s, FALSE);
+ pa_sink_invalidate_requested_latency(s, false);
pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
}
if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
pa_assert(latency == 0);
+ s->thread_info.fixed_latency = 0;
+
+ if (s->monitor_source)
+ pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
+
return;
}
i->update_sink_fixed_latency(i);
}
- pa_sink_invalidate_requested_latency(s, FALSE);
+ pa_sink_invalidate_requested_latency(s, false);
pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
}
+/* Called from main context */
+void pa_sink_set_latency_offset(pa_sink *s, int64_t offset) {
+ pa_sink_assert_ref(s);
+
+ s->latency_offset = offset;
+
+ if (PA_SINK_IS_LINKED(s->state))
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
+ else
+ s->thread_info.latency_offset = offset;
+}
+
/* Called from main context */
size_t pa_sink_get_max_rewind(pa_sink *s) {
size_t r;
}
/* Called from main context */
-int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
+int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
pa_device_port *port;
int ret;
return -PA_ERR_NOTIMPLEMENTED;
}
- if (!s->ports || !name)
+ if (!name)
return -PA_ERR_NOENTITY;
if (!(port = pa_hashmap_get(s->ports, name)))
s->active_port = port;
s->save_port = save;
+ pa_sink_set_latency_offset(s, s->active_port->latency_offset);
+
pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
return 0;
}
-pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
+bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
pa_assert(p);
if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
- return TRUE;
+ return true;
if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
- return TRUE;
+ return true;
}
-pa_bool_t pa_device_init_description(pa_proplist *p) {
+bool pa_device_init_description(pa_proplist *p) {
const char *s, *d = NULL, *k;
pa_assert(p);
if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
- return TRUE;
+ return true;
if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
if (pa_streq(s, "internal"))
d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
if (!d)
- return FALSE;
+ return false;
k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
else if (d)
pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
- return TRUE;
+ return true;
}
-pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
+bool pa_device_init_intended_roles(pa_proplist *p) {
const char *s;
pa_assert(p);
if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
- return TRUE;
+ return true;
if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
|| pa_streq(s, "headset")) {
pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
- return TRUE;
+ return true;
}
- return FALSE;
+ return false;
}
unsigned pa_device_init_priority(pa_proplist *p) {
}
/* Called from the IO thread. */
-pa_bool_t pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
+bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
pa_usec_t now;
- pa_bool_t ret = FALSE;
+ bool ret = false;
pa_assert(s);
PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
pa_log_debug("Volume change to %d at %llu was written %llu usec late",
pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
- ret = TRUE;
+ ret = true;
s->thread_info.current_hw_volume = c->hw_volume;
pa_sink_volume_change_free(c);
}
/* Called from the main thread */
/* Allows an external source to set what formats a sink supports if the sink
* permits this. The function makes a copy of the formats on success. */
-pa_bool_t pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
+bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
pa_assert(s);
pa_assert(formats);
return s->set_formats(s, formats);
else
/* Sink doesn't support setting this -- bail out */
- return FALSE;
+ return false;
}
/* Called from the main thread */
/* Checks if the sink can accept this format */
-pa_bool_t pa_sink_check_format(pa_sink *s, pa_format_info *f)
-{
+bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
pa_idxset *formats = NULL;
- pa_bool_t ret = FALSE;
+ bool ret = false;
pa_assert(s);
pa_assert(f);
PA_IDXSET_FOREACH(finfo_device, formats, i) {
if (pa_format_info_is_compatible(finfo_device, f)) {
- ret = TRUE;
+ ret = true;
break;
}
}
- pa_idxset_free(formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
+ pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
}
return ret;
done:
if (sink_formats)
- pa_idxset_free(sink_formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
+ pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
return out_formats;
}