]> code.delx.au - pulseaudio/blob - src/modules/echo-cancel/module-echo-cancel.c
echo-cancel: Adapt test code for drift compensation
[pulseaudio] / src / modules / echo-cancel / module-echo-cancel.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2010 Wim Taymans <wim.taymans@gmail.com>
5
6 Based on module-virtual-sink.c
7 module-virtual-source.c
8 module-loopback.c
9
10 Copyright 2010 Intel Corporation
11 Contributor: Pierre-Louis Bossart <pierre-louis.bossart@intel.com>
12
13 PulseAudio is free software; you can redistribute it and/or modify
14 it under the terms of the GNU Lesser General Public License as published
15 by the Free Software Foundation; either version 2.1 of the License,
16 or (at your option) any later version.
17
18 PulseAudio is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU Lesser General Public License
24 along with PulseAudio; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 USA.
27 ***/
28
29 #ifdef HAVE_CONFIG_H
30 #include <config.h>
31 #endif
32
33 #include <stdio.h>
34 #include <math.h>
35
36 #include "echo-cancel.h"
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/timeval.h>
40 #include <pulse/rtclock.h>
41
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/atomic.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/namereg.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/module.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/modargs.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/sample-util.h>
54 #include <pulsecore/ltdl-helper.h>
55
56 #include "module-echo-cancel-symdef.h"
57
58 PA_MODULE_AUTHOR("Wim Taymans");
59 PA_MODULE_DESCRIPTION("Echo Cancellation");
60 PA_MODULE_VERSION(PACKAGE_VERSION);
61 PA_MODULE_LOAD_ONCE(FALSE);
62 PA_MODULE_USAGE(
63 _("source_name=<name for the source> "
64 "source_properties=<properties for the source> "
65 "source_master=<name of source to filter> "
66 "sink_name=<name for the sink> "
67 "sink_properties=<properties for the sink> "
68 "sink_master=<name of sink to filter> "
69 "adjust_time=<how often to readjust rates in s> "
70 "adjust_threshold=<how much drift to readjust after in ms> "
71 "format=<sample format> "
72 "rate=<sample rate> "
73 "channels=<number of channels> "
74 "channel_map=<channel map> "
75 "aec_method=<implementation to use> "
76 "aec_args=<parameters for the AEC engine> "
77 "save_aec=<save AEC data in /tmp> "
78 "autoloaded=<set if this module is being loaded automatically> "
79 "use_volume_sharing=<yes or no> "
80 ));
81
82 /* NOTE: Make sure the enum and ec_table are maintained in the correct order */
83 typedef enum {
84 PA_ECHO_CANCELLER_INVALID = -1,
85 PA_ECHO_CANCELLER_SPEEX = 0,
86 PA_ECHO_CANCELLER_ADRIAN,
87 #ifdef HAVE_WEBRTC
88 PA_ECHO_CANCELLER_WEBRTC,
89 #endif
90 } pa_echo_canceller_method_t;
91
92 #define DEFAULT_ECHO_CANCELLER "speex"
93
94 static const pa_echo_canceller ec_table[] = {
95 {
96 /* Speex */
97 .init = pa_speex_ec_init,
98 .run = pa_speex_ec_run,
99 .done = pa_speex_ec_done,
100 },
101 {
102 /* Adrian Andre's NLMS implementation */
103 .init = pa_adrian_ec_init,
104 .run = pa_adrian_ec_run,
105 .done = pa_adrian_ec_done,
106 },
107 #ifdef HAVE_WEBRTC
108 {
109 /* WebRTC's audio processing engine */
110 .init = pa_webrtc_ec_init,
111 .play = pa_webrtc_ec_play,
112 .record = pa_webrtc_ec_record,
113 .set_drift = pa_webrtc_ec_set_drift,
114 .run = pa_webrtc_ec_run,
115 .done = pa_webrtc_ec_done,
116 },
117 #endif
118 };
119
120 #define DEFAULT_RATE 32000
121 #define DEFAULT_CHANNELS 1
122 #define DEFAULT_ADJUST_TIME_USEC (1*PA_USEC_PER_SEC)
123 #define DEFAULT_ADJUST_TOLERANCE (5*PA_USEC_PER_MSEC)
124 #define DEFAULT_SAVE_AEC FALSE
125 #define DEFAULT_AUTOLOADED FALSE
126
127 #define MEMBLOCKQ_MAXLENGTH (16*1024*1024)
128
129 /* Can only be used in main context */
130 #define IS_ACTIVE(u) ((pa_source_get_state((u)->source) == PA_SOURCE_RUNNING) && \
131 (pa_sink_get_state((u)->sink) == PA_SINK_RUNNING))
132
133 /* This module creates a new (virtual) source and sink.
134 *
135 * The data sent to the new sink is kept in a memblockq before being
136 * forwarded to the real sink_master.
137 *
138 * Data read from source_master is matched against the saved sink data and
139 * echo canceled data is then pushed onto the new source.
140 *
141 * Both source and sink masters have their own threads to push/pull data
142 * respectively. We however perform all our actions in the source IO thread.
143 * To do this we send all played samples to the source IO thread where they
144 * are then pushed into the memblockq.
145 *
146 * Alignment is performed in two steps:
147 *
148 * 1) when something happens that requires quick adjustment of the alignment of
149 * capture and playback samples, we perform a resync. This adjusts the
150 * position in the playback memblock to the requested sample. Quick
151 * adjustments include moving the playback samples before the capture
152 * samples (because else the echo canceler does not work) or when the
153 * playback pointer drifts too far away.
154 *
155 * 2) periodically check the difference between capture and playback. we use a
156 * low and high watermark for adjusting the alignment. playback should always
157 * be before capture and the difference should not be bigger than one frame
158 * size. We would ideally like to resample the sink_input but most driver
159 * don't give enough accuracy to be able to do that right now.
160 */
161
162 struct snapshot {
163 pa_usec_t sink_now;
164 pa_usec_t sink_latency;
165 size_t sink_delay;
166 int64_t send_counter;
167
168 pa_usec_t source_now;
169 pa_usec_t source_latency;
170 size_t source_delay;
171 int64_t recv_counter;
172 size_t rlen;
173 size_t plen;
174 };
175
176 struct userdata {
177 pa_core *core;
178 pa_module *module;
179
180 pa_bool_t autoloaded;
181 pa_bool_t dead;
182 pa_bool_t save_aec;
183
184 pa_echo_canceller *ec;
185 uint32_t blocksize;
186
187 pa_bool_t need_realign;
188
189 /* to wakeup the source I/O thread */
190 pa_asyncmsgq *asyncmsgq;
191 pa_rtpoll_item *rtpoll_item_read, *rtpoll_item_write;
192
193 pa_source *source;
194 pa_bool_t source_auto_desc;
195 pa_source_output *source_output;
196 pa_memblockq *source_memblockq; /* echo canceler needs fixed sized chunks */
197 size_t source_skip;
198
199 pa_sink *sink;
200 pa_bool_t sink_auto_desc;
201 pa_sink_input *sink_input;
202 pa_memblockq *sink_memblockq;
203 int64_t send_counter; /* updated in sink IO thread */
204 int64_t recv_counter;
205 size_t sink_skip;
206
207 /* Bytes left over from previous iteration */
208 size_t sink_rem;
209 size_t source_rem;
210
211 pa_atomic_t request_resync;
212
213 pa_time_event *time_event;
214 pa_usec_t adjust_time;
215 int adjust_threshold;
216
217 FILE *captured_file;
218 FILE *played_file;
219 FILE *canceled_file;
220 FILE *drift_file;
221 };
222
223 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot);
224
225 static const char* const valid_modargs[] = {
226 "source_name",
227 "source_properties",
228 "source_master",
229 "sink_name",
230 "sink_properties",
231 "sink_master",
232 "adjust_time",
233 "adjust_threshold",
234 "format",
235 "rate",
236 "channels",
237 "channel_map",
238 "aec_method",
239 "aec_args",
240 "save_aec",
241 "autoloaded",
242 "use_volume_sharing",
243 NULL
244 };
245
246 enum {
247 SOURCE_OUTPUT_MESSAGE_POST = PA_SOURCE_OUTPUT_MESSAGE_MAX,
248 SOURCE_OUTPUT_MESSAGE_REWIND,
249 SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT,
250 SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME
251 };
252
253 enum {
254 SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT
255 };
256
257 static int64_t calc_diff(struct userdata *u, struct snapshot *snapshot) {
258 int64_t buffer, diff_time, buffer_latency;
259
260 /* get the number of samples between capture and playback */
261 if (snapshot->plen > snapshot->rlen)
262 buffer = snapshot->plen - snapshot->rlen;
263 else
264 buffer = 0;
265
266 buffer += snapshot->source_delay + snapshot->sink_delay;
267
268 /* add the amount of samples not yet transferred to the source context */
269 if (snapshot->recv_counter <= snapshot->send_counter)
270 buffer += (int64_t) (snapshot->send_counter - snapshot->recv_counter);
271 else
272 buffer += PA_CLIP_SUB(buffer, (int64_t) (snapshot->recv_counter - snapshot->send_counter));
273
274 /* convert to time */
275 buffer_latency = pa_bytes_to_usec(buffer, &u->source_output->sample_spec);
276
277 /* capture and playback samples are perfectly aligned when diff_time is 0 */
278 diff_time = (snapshot->sink_now + snapshot->sink_latency - buffer_latency) -
279 (snapshot->source_now - snapshot->source_latency);
280
281 pa_log_debug("diff %lld (%lld - %lld + %lld) %lld %lld %lld %lld", (long long) diff_time,
282 (long long) snapshot->sink_latency,
283 (long long) buffer_latency, (long long) snapshot->source_latency,
284 (long long) snapshot->source_delay, (long long) snapshot->sink_delay,
285 (long long) (snapshot->send_counter - snapshot->recv_counter),
286 (long long) (snapshot->sink_now - snapshot->source_now));
287
288 return diff_time;
289 }
290
291 /* Called from main context */
292 static void time_callback(pa_mainloop_api *a, pa_time_event *e, const struct timeval *t, void *userdata) {
293 struct userdata *u = userdata;
294 uint32_t old_rate, base_rate, new_rate;
295 int64_t diff_time;
296 /*size_t fs*/
297 struct snapshot latency_snapshot;
298
299 pa_assert(u);
300 pa_assert(a);
301 pa_assert(u->time_event == e);
302 pa_assert_ctl_context();
303
304 if (!IS_ACTIVE(u))
305 return;
306
307 /* update our snapshots */
308 pa_asyncmsgq_send(u->source_output->source->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
309 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
310
311 /* calculate drift between capture and playback */
312 diff_time = calc_diff(u, &latency_snapshot);
313
314 /*fs = pa_frame_size(&u->source_output->sample_spec);*/
315 old_rate = u->sink_input->sample_spec.rate;
316 base_rate = u->source_output->sample_spec.rate;
317
318 if (diff_time < 0) {
319 /* recording before playback, we need to adjust quickly. The echo
320 * canceler does not work in this case. */
321 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
322 NULL, diff_time, NULL, NULL);
323 /*new_rate = base_rate - ((pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
324 new_rate = base_rate;
325 }
326 else {
327 if (diff_time > u->adjust_threshold) {
328 /* diff too big, quickly adjust */
329 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
330 NULL, diff_time, NULL, NULL);
331 }
332
333 /* recording behind playback, we need to slowly adjust the rate to match */
334 /*new_rate = base_rate + ((pa_usec_to_bytes(diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
335
336 /* assume equal samplerates for now */
337 new_rate = base_rate;
338 }
339
340 /* make sure we don't make too big adjustments because that sounds horrible */
341 if (new_rate > base_rate * 1.1 || new_rate < base_rate * 0.9)
342 new_rate = base_rate;
343
344 if (new_rate != old_rate) {
345 pa_log_info("Old rate %lu Hz, new rate %lu Hz", (unsigned long) old_rate, (unsigned long) new_rate);
346
347 pa_sink_input_set_rate(u->sink_input, new_rate);
348 }
349
350 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
351 }
352
353 /* Called from source I/O thread context */
354 static int source_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
355 struct userdata *u = PA_SOURCE(o)->userdata;
356
357 switch (code) {
358
359 case PA_SOURCE_MESSAGE_GET_LATENCY:
360
361 /* The source is _put() before the source output is, so let's
362 * make sure we don't access it in that time. Also, the
363 * source output is first shut down, the source second. */
364 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
365 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) {
366 *((pa_usec_t*) data) = 0;
367 return 0;
368 }
369
370 *((pa_usec_t*) data) =
371
372 /* Get the latency of the master source */
373 pa_source_get_latency_within_thread(u->source_output->source) +
374 /* Add the latency internal to our source output on top */
375 pa_bytes_to_usec(pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq), &u->source_output->source->sample_spec) +
376 /* and the buffering we do on the source */
377 pa_bytes_to_usec(u->blocksize, &u->source_output->source->sample_spec);
378
379 return 0;
380
381 }
382
383 return pa_source_process_msg(o, code, data, offset, chunk);
384 }
385
386 /* Called from sink I/O thread context */
387 static int sink_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
388 struct userdata *u = PA_SINK(o)->userdata;
389
390 switch (code) {
391
392 case PA_SINK_MESSAGE_GET_LATENCY:
393
394 /* The sink is _put() before the sink input is, so let's
395 * make sure we don't access it in that time. Also, the
396 * sink input is first shut down, the sink second. */
397 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
398 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) {
399 *((pa_usec_t*) data) = 0;
400 return 0;
401 }
402
403 *((pa_usec_t*) data) =
404
405 /* Get the latency of the master sink */
406 pa_sink_get_latency_within_thread(u->sink_input->sink) +
407
408 /* Add the latency internal to our sink input on top */
409 pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec);
410
411 return 0;
412 }
413
414 return pa_sink_process_msg(o, code, data, offset, chunk);
415 }
416
417
418 /* Called from main context */
419 static int source_set_state_cb(pa_source *s, pa_source_state_t state) {
420 struct userdata *u;
421
422 pa_source_assert_ref(s);
423 pa_assert_se(u = s->userdata);
424
425 if (!PA_SOURCE_IS_LINKED(state) ||
426 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
427 return 0;
428
429 if (state == PA_SOURCE_RUNNING) {
430 /* restart timer when both sink and source are active */
431 if (IS_ACTIVE(u) && u->adjust_time)
432 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
433
434 pa_atomic_store(&u->request_resync, 1);
435 pa_source_output_cork(u->source_output, FALSE);
436 } else if (state == PA_SOURCE_SUSPENDED) {
437 pa_source_output_cork(u->source_output, TRUE);
438 }
439
440 return 0;
441 }
442
443 /* Called from main context */
444 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t state) {
445 struct userdata *u;
446
447 pa_sink_assert_ref(s);
448 pa_assert_se(u = s->userdata);
449
450 if (!PA_SINK_IS_LINKED(state) ||
451 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
452 return 0;
453
454 if (state == PA_SINK_RUNNING) {
455 /* restart timer when both sink and source are active */
456 if (IS_ACTIVE(u) && u->adjust_time)
457 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
458
459 pa_atomic_store(&u->request_resync, 1);
460 pa_sink_input_cork(u->sink_input, FALSE);
461 } else if (state == PA_SINK_SUSPENDED) {
462 pa_sink_input_cork(u->sink_input, TRUE);
463 }
464
465 return 0;
466 }
467
468 /* Called from I/O thread context */
469 static void source_update_requested_latency_cb(pa_source *s) {
470 struct userdata *u;
471
472 pa_source_assert_ref(s);
473 pa_assert_se(u = s->userdata);
474
475 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
476 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state))
477 return;
478
479 pa_log_debug("Source update requested latency");
480
481 /* Just hand this one over to the master source */
482 pa_source_output_set_requested_latency_within_thread(
483 u->source_output,
484 pa_source_get_requested_latency_within_thread(s));
485 }
486
487 /* Called from I/O thread context */
488 static void sink_update_requested_latency_cb(pa_sink *s) {
489 struct userdata *u;
490
491 pa_sink_assert_ref(s);
492 pa_assert_se(u = s->userdata);
493
494 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
495 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
496 return;
497
498 pa_log_debug("Sink update requested latency");
499
500 /* Just hand this one over to the master sink */
501 pa_sink_input_set_requested_latency_within_thread(
502 u->sink_input,
503 pa_sink_get_requested_latency_within_thread(s));
504 }
505
506 /* Called from I/O thread context */
507 static void sink_request_rewind_cb(pa_sink *s) {
508 struct userdata *u;
509
510 pa_sink_assert_ref(s);
511 pa_assert_se(u = s->userdata);
512
513 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
514 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
515 return;
516
517 pa_log_debug("Sink request rewind %lld", (long long) s->thread_info.rewind_nbytes);
518
519 /* Just hand this one over to the master sink */
520 pa_sink_input_request_rewind(u->sink_input,
521 s->thread_info.rewind_nbytes, TRUE, FALSE, FALSE);
522 }
523
524 /* Called from main context */
525 static void source_set_volume_cb(pa_source *s) {
526 struct userdata *u;
527
528 pa_source_assert_ref(s);
529 pa_assert_se(u = s->userdata);
530
531 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
532 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
533 return;
534
535 pa_source_output_set_volume(u->source_output, &s->real_volume, s->save_volume, TRUE);
536 }
537
538 /* Called from main context */
539 static void sink_set_volume_cb(pa_sink *s) {
540 struct userdata *u;
541
542 pa_sink_assert_ref(s);
543 pa_assert_se(u = s->userdata);
544
545 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
546 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
547 return;
548
549 pa_sink_input_set_volume(u->sink_input, &s->real_volume, s->save_volume, TRUE);
550 }
551
552 static void source_get_volume_cb(pa_source *s) {
553 struct userdata *u;
554 pa_cvolume v;
555
556 pa_source_assert_ref(s);
557 pa_assert_se(u = s->userdata);
558
559 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
560 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
561 return;
562
563 pa_source_output_get_volume(u->source_output, &v, TRUE);
564
565 if (pa_cvolume_equal(&s->real_volume, &v))
566 /* no change */
567 return;
568
569 s->real_volume = v;
570 pa_source_set_soft_volume(s, NULL);
571 }
572
573 /* Called from main context */
574 static void source_set_mute_cb(pa_source *s) {
575 struct userdata *u;
576
577 pa_source_assert_ref(s);
578 pa_assert_se(u = s->userdata);
579
580 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
581 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
582 return;
583
584 pa_source_output_set_mute(u->source_output, s->muted, s->save_muted);
585 }
586
587 /* Called from main context */
588 static void sink_set_mute_cb(pa_sink *s) {
589 struct userdata *u;
590
591 pa_sink_assert_ref(s);
592 pa_assert_se(u = s->userdata);
593
594 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
595 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
596 return;
597
598 pa_sink_input_set_mute(u->sink_input, s->muted, s->save_muted);
599 }
600
601 /* Called from main context */
602 static void source_get_mute_cb(pa_source *s) {
603 struct userdata *u;
604
605 pa_source_assert_ref(s);
606 pa_assert_se(u = s->userdata);
607
608 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
609 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
610 return;
611
612 pa_source_output_get_mute(u->source_output);
613 }
614
615 /* must be called from the input thread context */
616 static void apply_diff_time(struct userdata *u, int64_t diff_time) {
617 int64_t diff;
618
619 if (diff_time < 0) {
620 diff = pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec);
621
622 if (diff > 0) {
623 /* add some extra safety samples to compensate for jitter in the
624 * timings */
625 diff += 10 * pa_frame_size (&u->source_output->sample_spec);
626
627 pa_log("Playback after capture (%lld), drop sink %lld", (long long) diff_time, (long long) diff);
628
629 u->sink_skip = diff;
630 u->source_skip = 0;
631 }
632 } else if (diff_time > 0) {
633 diff = pa_usec_to_bytes(diff_time, &u->source_output->sample_spec);
634
635 if (diff > 0) {
636 pa_log("playback too far ahead (%lld), drop source %lld", (long long) diff_time, (long long) diff);
637
638 u->source_skip = diff;
639 u->sink_skip = 0;
640 }
641 }
642 }
643
644 /* must be called from the input thread */
645 static void do_resync(struct userdata *u) {
646 int64_t diff_time;
647 struct snapshot latency_snapshot;
648
649 pa_log("Doing resync");
650
651 /* update our snapshot */
652 source_output_snapshot_within_thread(u, &latency_snapshot);
653 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
654
655 /* calculate drift between capture and playback */
656 diff_time = calc_diff(u, &latency_snapshot);
657
658 /* and adjust for the drift */
659 apply_diff_time(u, diff_time);
660 }
661
662 /* 1. Calculate drift at this point, pass to canceller
663 * 2. Push out playback samples in blocksize chunks
664 * 3. Push out capture samples in blocksize chunks
665 * 4. ???
666 * 5. Profit
667 */
668 static void do_push_drift_comp(struct userdata *u) {
669 size_t rlen, plen;
670 pa_memchunk rchunk, pchunk, cchunk;
671 uint8_t *rdata, *pdata, *cdata;
672 float drift;
673 int unused;
674
675 rlen = pa_memblockq_get_length(u->source_memblockq);
676 plen = pa_memblockq_get_length(u->sink_memblockq);
677
678 /* Estimate snapshot drift as follows:
679 * pd: amount of data consumed since last time
680 * rd: amount of data consumed since last time
681 *
682 * drift = (pd - rd) / rd;
683 *
684 * We calculate pd and rd as the memblockq length less the number of
685 * samples left from the last iteration (to avoid double counting
686 * those remainder samples.
687 */
688 drift = ((float)(plen - u->sink_rem) - (rlen - u->source_rem)) / ((float)(rlen - u->source_rem));
689 u->sink_rem = plen % u->blocksize;
690 u->source_rem = rlen % u->blocksize;
691
692 /* Now let the canceller work its drift compensation magic */
693 u->ec->set_drift(u->ec, drift);
694
695 if (u->save_aec) {
696 if (u->drift_file)
697 fprintf(u->drift_file, "d %a\n", drift);
698 }
699
700 /* Send in the playback samples first */
701 while (plen >= u->blocksize) {
702 pa_memblockq_peek_fixed_size(u->sink_memblockq, u->blocksize, &pchunk);
703 pdata = pa_memblock_acquire(pchunk.memblock);
704 pdata += pchunk.index;
705
706 u->ec->play(u->ec, pdata);
707
708 if (u->save_aec) {
709 if (u->drift_file)
710 fprintf(u->drift_file, "p %d\n", u->blocksize);
711 if (u->played_file)
712 unused = fwrite(pdata, 1, u->blocksize, u->played_file);
713 }
714
715 pa_memblock_release(pchunk.memblock);
716 pa_memblockq_drop(u->sink_memblockq, u->blocksize);
717 pa_memblock_unref(pchunk.memblock);
718
719 plen -= u->blocksize;
720 }
721
722 /* And now the capture samples */
723 while (rlen >= u->blocksize) {
724 pa_memblockq_peek_fixed_size(u->source_memblockq, u->blocksize, &rchunk);
725
726 rdata = pa_memblock_acquire(rchunk.memblock);
727 rdata += rchunk.index;
728
729 cchunk.index = 0;
730 cchunk.length = u->blocksize;
731 cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
732 cdata = pa_memblock_acquire(cchunk.memblock);
733
734 u->ec->record(u->ec, rdata, cdata);
735
736 if (u->save_aec) {
737 if (u->drift_file)
738 fprintf(u->drift_file, "c %d\n", u->blocksize);
739 if (u->captured_file)
740 unused = fwrite(rdata, 1, u->blocksize, u->captured_file);
741 if (u->canceled_file)
742 unused = fwrite(cdata, 1, u->blocksize, u->canceled_file);
743 }
744
745 pa_memblock_release(cchunk.memblock);
746 pa_memblock_release(rchunk.memblock);
747
748 pa_memblock_unref(rchunk.memblock);
749
750 pa_source_post(u->source, &cchunk);
751 pa_memblock_unref(cchunk.memblock);
752
753 pa_memblockq_drop(u->source_memblockq, u->blocksize);
754 rlen -= u->blocksize;
755 }
756 }
757
758 /* This one's simpler than the drift compensation case -- we just iterate over
759 * the capture buffer, and pass the canceller blocksize bytes of playback and
760 * capture data. */
761 static void do_push(struct userdata *u) {
762 size_t rlen, plen;
763 pa_memchunk rchunk, pchunk, cchunk;
764 uint8_t *rdata, *pdata, *cdata;
765 int unused;
766
767 rlen = pa_memblockq_get_length(u->source_memblockq);
768 plen = pa_memblockq_get_length(u->sink_memblockq);
769
770 while (rlen >= u->blocksize) {
771 /* take fixed block from recorded samples */
772 pa_memblockq_peek_fixed_size(u->source_memblockq, u->blocksize, &rchunk);
773
774 if (plen > u->blocksize) {
775 if (plen > u->blocksize) {
776 /* take fixed block from played samples */
777 pa_memblockq_peek_fixed_size(u->sink_memblockq, u->blocksize, &pchunk);
778
779 rdata = pa_memblock_acquire(rchunk.memblock);
780 rdata += rchunk.index;
781 pdata = pa_memblock_acquire(pchunk.memblock);
782 pdata += pchunk.index;
783
784 cchunk.index = 0;
785 cchunk.length = u->blocksize;
786 cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
787 cdata = pa_memblock_acquire(cchunk.memblock);
788
789 if (u->save_aec) {
790 if (u->captured_file)
791 unused = fwrite(rdata, 1, u->blocksize, u->captured_file);
792 if (u->played_file)
793 unused = fwrite(pdata, 1, u->blocksize, u->played_file);
794 }
795
796 /* perform echo cancellation */
797 u->ec->run(u->ec, rdata, pdata, cdata);
798
799 if (u->save_aec) {
800 if (u->canceled_file)
801 unused = fwrite(cdata, 1, u->blocksize, u->canceled_file);
802 }
803
804 pa_memblock_release(cchunk.memblock);
805 pa_memblock_release(pchunk.memblock);
806 pa_memblock_release(rchunk.memblock);
807
808 /* drop consumed sink samples */
809 pa_memblockq_drop(u->sink_memblockq, u->blocksize);
810 pa_memblock_unref(pchunk.memblock);
811
812 pa_memblock_unref(rchunk.memblock);
813 /* the filtered samples now become the samples from our
814 * source */
815 rchunk = cchunk;
816
817 plen -= u->blocksize;
818 }
819 }
820
821 /* forward the (echo-canceled) data to the virtual source */
822 pa_source_post(u->source, &rchunk);
823 pa_memblock_unref(rchunk.memblock);
824
825 pa_memblockq_drop(u->source_memblockq, u->blocksize);
826 rlen -= u->blocksize;
827 }
828 }
829
830 /* Called from input thread context */
831 static void source_output_push_cb(pa_source_output *o, const pa_memchunk *chunk) {
832 struct userdata *u;
833 size_t rlen, plen, to_skip;
834 pa_memchunk rchunk;
835
836 pa_source_output_assert_ref(o);
837 pa_source_output_assert_io_context(o);
838 pa_assert_se(u = o->userdata);
839
840 if (!PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output))) {
841 pa_log("push when no link?");
842 return;
843 }
844
845 if (PA_UNLIKELY(u->source->thread_info.state != PA_SOURCE_RUNNING ||
846 u->sink->thread_info.state != PA_SINK_RUNNING)) {
847 pa_source_post(u->source, chunk);
848 return;
849 }
850
851 /* handle queued messages, do any message sending of our own */
852 while (pa_asyncmsgq_process_one(u->asyncmsgq) > 0)
853 ;
854
855 pa_memblockq_push_align(u->source_memblockq, chunk);
856
857 rlen = pa_memblockq_get_length(u->source_memblockq);
858 plen = pa_memblockq_get_length(u->sink_memblockq);
859
860 /* Let's not do anything else till we have enough data to process */
861 if (rlen < u->blocksize)
862 return;
863
864 /* See if we need to drop samples in order to sync */
865 if (pa_atomic_cmpxchg (&u->request_resync, 1, 0)) {
866 do_resync(u);
867 }
868
869 /* Okay, skip cancellation for skipped source samples if needed. */
870 if (PA_UNLIKELY(u->source_skip)) {
871 /* The slightly tricky bit here is that we drop all but modulo
872 * blocksize bytes and then adjust for that last bit on the sink side.
873 * We do this because the source data is coming at a fixed rate, which
874 * means the only way to try to catch up is drop sink samples and let
875 * the canceller cope up with this. */
876 to_skip = rlen >= u->source_skip ? u->source_skip : rlen;
877 to_skip -= to_skip % u->blocksize;
878
879 if (to_skip) {
880 pa_memblockq_peek_fixed_size(u->source_memblockq, to_skip, &rchunk);
881 pa_source_post(u->source, &rchunk);
882
883 pa_memblock_unref(rchunk.memblock);
884 pa_memblockq_drop(u->source_memblockq, u->blocksize);
885
886 rlen -= to_skip;
887 u->source_skip -= to_skip;
888 }
889
890 if (rlen && u->source_skip % u->blocksize) {
891 u->sink_skip += u->blocksize - (u->source_skip % u->blocksize);
892 u->source_skip -= (u->source_skip % u->blocksize);
893 }
894 }
895
896 /* And for the sink, these samples have been played back already, so we can
897 * just drop them and get on with it. */
898 if (PA_UNLIKELY(u->sink_skip)) {
899 to_skip = plen >= u->sink_skip ? u->sink_skip : plen;
900
901 pa_memblockq_drop(u->sink_memblockq, to_skip);
902
903 plen -= to_skip;
904 u->sink_skip -= to_skip;
905 }
906
907 /* process and push out samples */
908 if (u->ec->params.drift_compensation)
909 do_push_drift_comp(u);
910 else
911 do_push(u);
912 }
913
914 /* Called from I/O thread context */
915 static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) {
916 struct userdata *u;
917
918 pa_sink_input_assert_ref(i);
919 pa_assert(chunk);
920 pa_assert_se(u = i->userdata);
921
922 if (u->sink->thread_info.rewind_requested)
923 pa_sink_process_rewind(u->sink, 0);
924
925 pa_sink_render_full(u->sink, nbytes, chunk);
926
927 if (i->thread_info.underrun_for > 0) {
928 pa_log_debug("Handling end of underrun.");
929 pa_atomic_store(&u->request_resync, 1);
930 }
931
932 /* let source thread handle the chunk. pass the sample count as well so that
933 * the source IO thread can update the right variables. */
934 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_POST,
935 NULL, 0, chunk, NULL);
936 u->send_counter += chunk->length;
937
938 return 0;
939 }
940
941 /* Called from input thread context */
942 static void source_output_process_rewind_cb(pa_source_output *o, size_t nbytes) {
943 struct userdata *u;
944
945 pa_source_output_assert_ref(o);
946 pa_source_output_assert_io_context(o);
947 pa_assert_se(u = o->userdata);
948
949 pa_source_process_rewind(u->source, nbytes);
950
951 /* go back on read side, we need to use older sink data for this */
952 pa_memblockq_rewind(u->sink_memblockq, nbytes);
953
954 /* manipulate write index */
955 pa_memblockq_seek(u->source_memblockq, -nbytes, PA_SEEK_RELATIVE, TRUE);
956
957 pa_log_debug("Source rewind (%lld) %lld", (long long) nbytes,
958 (long long) pa_memblockq_get_length (u->source_memblockq));
959 }
960
961 /* Called from I/O thread context */
962 static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
963 struct userdata *u;
964
965 pa_sink_input_assert_ref(i);
966 pa_assert_se(u = i->userdata);
967
968 pa_log_debug("Sink process rewind %lld", (long long) nbytes);
969
970 pa_sink_process_rewind(u->sink, nbytes);
971
972 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_REWIND, NULL, (int64_t) nbytes, NULL, NULL);
973 u->send_counter -= nbytes;
974 }
975
976 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot) {
977 size_t delay, rlen, plen;
978 pa_usec_t now, latency;
979
980 now = pa_rtclock_now();
981 latency = pa_source_get_latency_within_thread(u->source_output->source);
982 delay = pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq);
983
984 delay = (u->source_output->thread_info.resampler ? pa_resampler_request(u->source_output->thread_info.resampler, delay) : delay);
985 rlen = pa_memblockq_get_length(u->source_memblockq);
986 plen = pa_memblockq_get_length(u->sink_memblockq);
987
988 snapshot->source_now = now;
989 snapshot->source_latency = latency;
990 snapshot->source_delay = delay;
991 snapshot->recv_counter = u->recv_counter;
992 snapshot->rlen = rlen + u->sink_skip;
993 snapshot->plen = plen + u->source_skip;
994 }
995
996
997 /* Called from output thread context */
998 static int source_output_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
999 struct userdata *u = PA_SOURCE_OUTPUT(obj)->userdata;
1000
1001 switch (code) {
1002
1003 case SOURCE_OUTPUT_MESSAGE_POST:
1004
1005 pa_source_output_assert_io_context(u->source_output);
1006
1007 if (u->source_output->source->thread_info.state == PA_SOURCE_RUNNING)
1008 pa_memblockq_push_align(u->sink_memblockq, chunk);
1009 else
1010 pa_memblockq_flush_write(u->sink_memblockq, TRUE);
1011
1012 u->recv_counter += (int64_t) chunk->length;
1013
1014 return 0;
1015
1016 case SOURCE_OUTPUT_MESSAGE_REWIND:
1017 pa_source_output_assert_io_context(u->source_output);
1018
1019 /* manipulate write index, never go past what we have */
1020 if (PA_SOURCE_IS_OPENED(u->source_output->source->thread_info.state))
1021 pa_memblockq_seek(u->sink_memblockq, -offset, PA_SEEK_RELATIVE, TRUE);
1022 else
1023 pa_memblockq_flush_write(u->sink_memblockq, TRUE);
1024
1025 pa_log_debug("Sink rewind (%lld)", (long long) offset);
1026
1027 u->recv_counter -= offset;
1028
1029 return 0;
1030
1031 case SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT: {
1032 struct snapshot *snapshot = (struct snapshot *) data;
1033
1034 source_output_snapshot_within_thread(u, snapshot);
1035 return 0;
1036 }
1037
1038 case SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME:
1039 apply_diff_time(u, offset);
1040 return 0;
1041
1042 }
1043
1044 return pa_source_output_process_msg(obj, code, data, offset, chunk);
1045 }
1046
1047 static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1048 struct userdata *u = PA_SINK_INPUT(obj)->userdata;
1049
1050 switch (code) {
1051
1052 case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: {
1053 size_t delay;
1054 pa_usec_t now, latency;
1055 struct snapshot *snapshot = (struct snapshot *) data;
1056
1057 pa_sink_input_assert_io_context(u->sink_input);
1058
1059 now = pa_rtclock_now();
1060 latency = pa_sink_get_latency_within_thread(u->sink_input->sink);
1061 delay = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq);
1062
1063 delay = (u->sink_input->thread_info.resampler ? pa_resampler_request(u->sink_input->thread_info.resampler, delay) : delay);
1064
1065 snapshot->sink_now = now;
1066 snapshot->sink_latency = latency;
1067 snapshot->sink_delay = delay;
1068 snapshot->send_counter = u->send_counter;
1069 return 0;
1070 }
1071 }
1072
1073 return pa_sink_input_process_msg(obj, code, data, offset, chunk);
1074 }
1075
1076 /* Called from I/O thread context */
1077 static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) {
1078 struct userdata *u;
1079
1080 pa_sink_input_assert_ref(i);
1081 pa_assert_se(u = i->userdata);
1082
1083 pa_log_debug("Sink input update max rewind %lld", (long long) nbytes);
1084
1085 pa_memblockq_set_maxrewind(u->sink_memblockq, nbytes);
1086 pa_sink_set_max_rewind_within_thread(u->sink, nbytes);
1087 }
1088
1089 /* Called from I/O thread context */
1090 static void source_output_update_max_rewind_cb(pa_source_output *o, size_t nbytes) {
1091 struct userdata *u;
1092
1093 pa_source_output_assert_ref(o);
1094 pa_assert_se(u = o->userdata);
1095
1096 pa_log_debug("Source output update max rewind %lld", (long long) nbytes);
1097
1098 pa_source_set_max_rewind_within_thread(u->source, nbytes);
1099 }
1100
1101 /* Called from I/O thread context */
1102 static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) {
1103 struct userdata *u;
1104
1105 pa_sink_input_assert_ref(i);
1106 pa_assert_se(u = i->userdata);
1107
1108 pa_log_debug("Sink input update max request %lld", (long long) nbytes);
1109
1110 pa_sink_set_max_request_within_thread(u->sink, nbytes);
1111 }
1112
1113 /* Called from I/O thread context */
1114 static void sink_input_update_sink_requested_latency_cb(pa_sink_input *i) {
1115 struct userdata *u;
1116 pa_usec_t latency;
1117
1118 pa_sink_input_assert_ref(i);
1119 pa_assert_se(u = i->userdata);
1120
1121 latency = pa_sink_get_requested_latency_within_thread(i->sink);
1122
1123 pa_log_debug("Sink input update requested latency %lld", (long long) latency);
1124 }
1125
1126 /* Called from I/O thread context */
1127 static void source_output_update_source_requested_latency_cb(pa_source_output *o) {
1128 struct userdata *u;
1129 pa_usec_t latency;
1130
1131 pa_source_output_assert_ref(o);
1132 pa_assert_se(u = o->userdata);
1133
1134 latency = pa_source_get_requested_latency_within_thread(o->source);
1135
1136 pa_log_debug("source output update requested latency %lld", (long long) latency);
1137 }
1138
1139 /* Called from I/O thread context */
1140 static void sink_input_update_sink_latency_range_cb(pa_sink_input *i) {
1141 struct userdata *u;
1142
1143 pa_sink_input_assert_ref(i);
1144 pa_assert_se(u = i->userdata);
1145
1146 pa_log_debug("Sink input update latency range %lld %lld",
1147 (long long) i->sink->thread_info.min_latency,
1148 (long long) i->sink->thread_info.max_latency);
1149
1150 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1151 }
1152
1153 /* Called from I/O thread context */
1154 static void source_output_update_source_latency_range_cb(pa_source_output *o) {
1155 struct userdata *u;
1156
1157 pa_source_output_assert_ref(o);
1158 pa_assert_se(u = o->userdata);
1159
1160 pa_log_debug("Source output update latency range %lld %lld",
1161 (long long) o->source->thread_info.min_latency,
1162 (long long) o->source->thread_info.max_latency);
1163
1164 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1165 }
1166
1167 /* Called from I/O thread context */
1168 static void sink_input_update_sink_fixed_latency_cb(pa_sink_input *i) {
1169 struct userdata *u;
1170
1171 pa_sink_input_assert_ref(i);
1172 pa_assert_se(u = i->userdata);
1173
1174 pa_log_debug("Sink input update fixed latency %lld",
1175 (long long) i->sink->thread_info.fixed_latency);
1176
1177 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1178 }
1179
1180 /* Called from I/O thread context */
1181 static void source_output_update_source_fixed_latency_cb(pa_source_output *o) {
1182 struct userdata *u;
1183
1184 pa_source_output_assert_ref(o);
1185 pa_assert_se(u = o->userdata);
1186
1187 pa_log_debug("Source output update fixed latency %lld",
1188 (long long) o->source->thread_info.fixed_latency);
1189
1190 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1191 }
1192
1193 /* Called from output thread context */
1194 static void source_output_attach_cb(pa_source_output *o) {
1195 struct userdata *u;
1196
1197 pa_source_output_assert_ref(o);
1198 pa_source_output_assert_io_context(o);
1199 pa_assert_se(u = o->userdata);
1200
1201 pa_source_set_rtpoll(u->source, o->source->thread_info.rtpoll);
1202 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1203 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1204 pa_source_set_max_rewind_within_thread(u->source, pa_source_output_get_max_rewind(o));
1205
1206 pa_log_debug("Source output %d attach", o->index);
1207
1208 pa_source_attach_within_thread(u->source);
1209
1210 u->rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
1211 o->source->thread_info.rtpoll,
1212 PA_RTPOLL_LATE,
1213 u->asyncmsgq);
1214 }
1215
1216 /* Called from I/O thread context */
1217 static void sink_input_attach_cb(pa_sink_input *i) {
1218 struct userdata *u;
1219
1220 pa_sink_input_assert_ref(i);
1221 pa_assert_se(u = i->userdata);
1222
1223 pa_sink_set_rtpoll(u->sink, i->sink->thread_info.rtpoll);
1224 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1225
1226 /* (8.1) IF YOU NEED A FIXED BLOCK SIZE ADD THE LATENCY FOR ONE
1227 * BLOCK MINUS ONE SAMPLE HERE. SEE (7) */
1228 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1229
1230 /* (8.2) IF YOU NEED A FIXED BLOCK SIZE ROUND
1231 * pa_sink_input_get_max_request(i) UP TO MULTIPLES OF IT
1232 * HERE. SEE (6) */
1233 pa_sink_set_max_request_within_thread(u->sink, pa_sink_input_get_max_request(i));
1234 pa_sink_set_max_rewind_within_thread(u->sink, pa_sink_input_get_max_rewind(i));
1235
1236 pa_log_debug("Sink input %d attach", i->index);
1237
1238 u->rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
1239 i->sink->thread_info.rtpoll,
1240 PA_RTPOLL_LATE,
1241 u->asyncmsgq);
1242
1243 pa_sink_attach_within_thread(u->sink);
1244 }
1245
1246
1247 /* Called from output thread context */
1248 static void source_output_detach_cb(pa_source_output *o) {
1249 struct userdata *u;
1250
1251 pa_source_output_assert_ref(o);
1252 pa_source_output_assert_io_context(o);
1253 pa_assert_se(u = o->userdata);
1254
1255 pa_source_detach_within_thread(u->source);
1256 pa_source_set_rtpoll(u->source, NULL);
1257
1258 pa_log_debug("Source output %d detach", o->index);
1259
1260 if (u->rtpoll_item_read) {
1261 pa_rtpoll_item_free(u->rtpoll_item_read);
1262 u->rtpoll_item_read = NULL;
1263 }
1264 }
1265
1266 /* Called from I/O thread context */
1267 static void sink_input_detach_cb(pa_sink_input *i) {
1268 struct userdata *u;
1269
1270 pa_sink_input_assert_ref(i);
1271 pa_assert_se(u = i->userdata);
1272
1273 pa_sink_detach_within_thread(u->sink);
1274
1275 pa_sink_set_rtpoll(u->sink, NULL);
1276
1277 pa_log_debug("Sink input %d detach", i->index);
1278
1279 if (u->rtpoll_item_write) {
1280 pa_rtpoll_item_free(u->rtpoll_item_write);
1281 u->rtpoll_item_write = NULL;
1282 }
1283 }
1284
1285 /* Called from output thread context */
1286 static void source_output_state_change_cb(pa_source_output *o, pa_source_output_state_t state) {
1287 struct userdata *u;
1288
1289 pa_source_output_assert_ref(o);
1290 pa_source_output_assert_io_context(o);
1291 pa_assert_se(u = o->userdata);
1292
1293 pa_log_debug("Source output %d state %d", o->index, state);
1294 }
1295
1296 /* Called from IO thread context */
1297 static void sink_input_state_change_cb(pa_sink_input *i, pa_sink_input_state_t state) {
1298 struct userdata *u;
1299
1300 pa_sink_input_assert_ref(i);
1301 pa_assert_se(u = i->userdata);
1302
1303 pa_log_debug("Sink input %d state %d", i->index, state);
1304
1305 /* If we are added for the first time, ask for a rewinding so that
1306 * we are heard right-away. */
1307 if (PA_SINK_INPUT_IS_LINKED(state) &&
1308 i->thread_info.state == PA_SINK_INPUT_INIT) {
1309 pa_log_debug("Requesting rewind due to state change.");
1310 pa_sink_input_request_rewind(i, 0, FALSE, TRUE, TRUE);
1311 }
1312 }
1313
1314 /* Called from main thread */
1315 static void source_output_kill_cb(pa_source_output *o) {
1316 struct userdata *u;
1317
1318 pa_source_output_assert_ref(o);
1319 pa_assert_ctl_context();
1320 pa_assert_se(u = o->userdata);
1321
1322 u->dead = TRUE;
1323
1324 /* The order here matters! We first kill the source output, followed
1325 * by the source. That means the source callbacks must be protected
1326 * against an unconnected source output! */
1327 pa_source_output_unlink(u->source_output);
1328 pa_source_unlink(u->source);
1329
1330 pa_source_output_unref(u->source_output);
1331 u->source_output = NULL;
1332
1333 pa_source_unref(u->source);
1334 u->source = NULL;
1335
1336 pa_log_debug("Source output kill %d", o->index);
1337
1338 pa_module_unload_request(u->module, TRUE);
1339 }
1340
1341 /* Called from main context */
1342 static void sink_input_kill_cb(pa_sink_input *i) {
1343 struct userdata *u;
1344
1345 pa_sink_input_assert_ref(i);
1346 pa_assert_se(u = i->userdata);
1347
1348 u->dead = TRUE;
1349
1350 /* The order here matters! We first kill the sink input, followed
1351 * by the sink. That means the sink callbacks must be protected
1352 * against an unconnected sink input! */
1353 pa_sink_input_unlink(u->sink_input);
1354 pa_sink_unlink(u->sink);
1355
1356 pa_sink_input_unref(u->sink_input);
1357 u->sink_input = NULL;
1358
1359 pa_sink_unref(u->sink);
1360 u->sink = NULL;
1361
1362 pa_log_debug("Sink input kill %d", i->index);
1363
1364 pa_module_unload_request(u->module, TRUE);
1365 }
1366
1367 /* Called from main thread */
1368 static pa_bool_t source_output_may_move_to_cb(pa_source_output *o, pa_source *dest) {
1369 struct userdata *u;
1370
1371 pa_source_output_assert_ref(o);
1372 pa_assert_ctl_context();
1373 pa_assert_se(u = o->userdata);
1374
1375 if (u->dead)
1376 return FALSE;
1377
1378 return (u->source != dest) && (u->sink != dest->monitor_of);
1379 }
1380
1381 /* Called from main context */
1382 static pa_bool_t sink_input_may_move_to_cb(pa_sink_input *i, pa_sink *dest) {
1383 struct userdata *u;
1384
1385 pa_sink_input_assert_ref(i);
1386 pa_assert_se(u = i->userdata);
1387
1388 if (u->dead)
1389 return FALSE;
1390
1391 return u->sink != dest;
1392 }
1393
1394 /* Called from main thread */
1395 static void source_output_moving_cb(pa_source_output *o, pa_source *dest) {
1396 struct userdata *u;
1397
1398 pa_source_output_assert_ref(o);
1399 pa_assert_ctl_context();
1400 pa_assert_se(u = o->userdata);
1401
1402 if (dest) {
1403 pa_source_set_asyncmsgq(u->source, dest->asyncmsgq);
1404 pa_source_update_flags(u->source, PA_SOURCE_LATENCY|PA_SOURCE_DYNAMIC_LATENCY, dest->flags);
1405 } else
1406 pa_source_set_asyncmsgq(u->source, NULL);
1407
1408 if (u->source_auto_desc && dest) {
1409 const char *z;
1410 pa_proplist *pl;
1411
1412 pl = pa_proplist_new();
1413 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1414 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "Echo-Cancel Source %s on %s",
1415 pa_proplist_gets(u->source->proplist, "device.echo-cancel.name"), z ? z : dest->name);
1416
1417 pa_source_update_proplist(u->source, PA_UPDATE_REPLACE, pl);
1418 pa_proplist_free(pl);
1419 }
1420 }
1421
1422 /* Called from main context */
1423 static void sink_input_moving_cb(pa_sink_input *i, pa_sink *dest) {
1424 struct userdata *u;
1425
1426 pa_sink_input_assert_ref(i);
1427 pa_assert_se(u = i->userdata);
1428
1429 if (dest) {
1430 pa_sink_set_asyncmsgq(u->sink, dest->asyncmsgq);
1431 pa_sink_update_flags(u->sink, PA_SINK_LATENCY|PA_SINK_DYNAMIC_LATENCY, dest->flags);
1432 } else
1433 pa_sink_set_asyncmsgq(u->sink, NULL);
1434
1435 if (u->sink_auto_desc && dest) {
1436 const char *z;
1437 pa_proplist *pl;
1438
1439 pl = pa_proplist_new();
1440 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1441 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "Echo-Cancel Sink %s on %s",
1442 pa_proplist_gets(u->sink->proplist, "device.echo-cancel.name"), z ? z : dest->name);
1443
1444 pa_sink_update_proplist(u->sink, PA_UPDATE_REPLACE, pl);
1445 pa_proplist_free(pl);
1446 }
1447 }
1448
1449 /* Called from main context */
1450 static void sink_input_volume_changed_cb(pa_sink_input *i) {
1451 struct userdata *u;
1452
1453 pa_sink_input_assert_ref(i);
1454 pa_assert_se(u = i->userdata);
1455
1456 pa_sink_volume_changed(u->sink, &i->volume);
1457 }
1458
1459 /* Called from main context */
1460 static void sink_input_mute_changed_cb(pa_sink_input *i) {
1461 struct userdata *u;
1462
1463 pa_sink_input_assert_ref(i);
1464 pa_assert_se(u = i->userdata);
1465
1466 pa_sink_mute_changed(u->sink, i->muted);
1467 }
1468
1469 static pa_echo_canceller_method_t get_ec_method_from_string(const char *method) {
1470 if (pa_streq(method, "speex"))
1471 return PA_ECHO_CANCELLER_SPEEX;
1472 else if (pa_streq(method, "adrian"))
1473 return PA_ECHO_CANCELLER_ADRIAN;
1474 #ifdef HAVE_WEBRTC
1475 else if (pa_streq(method, "webrtc"))
1476 return PA_ECHO_CANCELLER_WEBRTC;
1477 #endif
1478 else
1479 return PA_ECHO_CANCELLER_INVALID;
1480 }
1481
1482 /* Common initialisation bits between module-echo-cancel and the standalone test program */
1483 static int init_common(pa_modargs *ma, struct userdata *u, pa_sample_spec *source_ss, pa_channel_map *source_map) {
1484 pa_echo_canceller_method_t ec_method;
1485
1486 if (pa_modargs_get_sample_spec_and_channel_map(ma, source_ss, source_map, PA_CHANNEL_MAP_DEFAULT) < 0) {
1487 pa_log("Invalid sample format specification or channel map");
1488 goto fail;
1489 }
1490
1491 u->ec = pa_xnew0(pa_echo_canceller, 1);
1492 if (!u->ec) {
1493 pa_log("Failed to alloc echo canceller");
1494 goto fail;
1495 }
1496
1497 if ((ec_method = get_ec_method_from_string(pa_modargs_get_value(ma, "aec_method", DEFAULT_ECHO_CANCELLER))) < 0) {
1498 pa_log("Invalid echo canceller implementation");
1499 goto fail;
1500 }
1501
1502 u->ec->init = ec_table[ec_method].init;
1503 u->ec->play = ec_table[ec_method].play;
1504 u->ec->record = ec_table[ec_method].record;
1505 u->ec->set_drift = ec_table[ec_method].set_drift;
1506 u->ec->run = ec_table[ec_method].run;
1507 u->ec->done = ec_table[ec_method].done;
1508
1509 return 0;
1510
1511 fail:
1512 return -1;
1513 }
1514
1515
1516 int pa__init(pa_module*m) {
1517 struct userdata *u;
1518 pa_sample_spec source_ss, sink_ss;
1519 pa_channel_map source_map, sink_map;
1520 pa_modargs *ma;
1521 pa_source *source_master=NULL;
1522 pa_sink *sink_master=NULL;
1523 pa_source_output_new_data source_output_data;
1524 pa_sink_input_new_data sink_input_data;
1525 pa_source_new_data source_data;
1526 pa_sink_new_data sink_data;
1527 pa_memchunk silence;
1528 uint32_t temp;
1529 pa_bool_t use_volume_sharing = TRUE;
1530
1531 pa_assert(m);
1532
1533 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1534 pa_log("Failed to parse module arguments.");
1535 goto fail;
1536 }
1537
1538 if (!(source_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "source_master", NULL), PA_NAMEREG_SOURCE))) {
1539 pa_log("Master source not found");
1540 goto fail;
1541 }
1542 pa_assert(source_master);
1543
1544 if (!(sink_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "sink_master", NULL), PA_NAMEREG_SINK))) {
1545 pa_log("Master sink not found");
1546 goto fail;
1547 }
1548 pa_assert(sink_master);
1549
1550 if (source_master->monitor_of == sink_master) {
1551 pa_log("Can't cancel echo between a sink and its monitor");
1552 goto fail;
1553 }
1554
1555 source_ss = source_master->sample_spec;
1556 source_ss.rate = DEFAULT_RATE;
1557 source_ss.channels = DEFAULT_CHANNELS;
1558 pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
1559
1560 sink_ss = sink_master->sample_spec;
1561 sink_map = sink_master->channel_map;
1562
1563 if (pa_modargs_get_value_boolean(ma, "use_volume_sharing", &use_volume_sharing) < 0) {
1564 pa_log("use_volume_sharing= expects a boolean argument");
1565 goto fail;
1566 }
1567
1568 u = pa_xnew0(struct userdata, 1);
1569 if (!u) {
1570 pa_log("Failed to alloc userdata");
1571 goto fail;
1572 }
1573 u->core = m->core;
1574 u->module = m;
1575 m->userdata = u;
1576 u->dead = FALSE;
1577
1578 temp = DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC;
1579 if (pa_modargs_get_value_u32(ma, "adjust_time", &temp) < 0) {
1580 pa_log("Failed to parse adjust_time value");
1581 goto fail;
1582 }
1583
1584 if (temp != DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC)
1585 u->adjust_time = temp * PA_USEC_PER_SEC;
1586 else
1587 u->adjust_time = DEFAULT_ADJUST_TIME_USEC;
1588
1589 temp = DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC;
1590 if (pa_modargs_get_value_u32(ma, "adjust_threshold", &temp) < 0) {
1591 pa_log("Failed to parse adjust_threshold value");
1592 goto fail;
1593 }
1594
1595 if (temp != DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC)
1596 u->adjust_threshold = temp * PA_USEC_PER_MSEC;
1597 else
1598 u->adjust_threshold = DEFAULT_ADJUST_TOLERANCE;
1599
1600 u->save_aec = DEFAULT_SAVE_AEC;
1601 if (pa_modargs_get_value_boolean(ma, "save_aec", &u->save_aec) < 0) {
1602 pa_log("Failed to parse save_aec value");
1603 goto fail;
1604 }
1605
1606 u->autoloaded = DEFAULT_AUTOLOADED;
1607 if (pa_modargs_get_value_boolean(ma, "autoloaded", &u->autoloaded) < 0) {
1608 pa_log("Failed to parse autoloaded value");
1609 goto fail;
1610 }
1611
1612 if (init_common(ma, u, &source_ss, &source_map))
1613 goto fail;
1614
1615 u->asyncmsgq = pa_asyncmsgq_new(0);
1616 u->need_realign = TRUE;
1617
1618 if (u->ec->init) {
1619 if (!u->ec->init(u->core, u->ec, &source_ss, &source_map, &sink_ss, &sink_map, &u->blocksize, pa_modargs_get_value(ma, "aec_args", NULL))) {
1620 pa_log("Failed to init AEC engine");
1621 goto fail;
1622 }
1623 }
1624
1625 if (u->ec->params.drift_compensation)
1626 pa_assert(u->ec->set_drift);
1627
1628 /* Create source */
1629 pa_source_new_data_init(&source_data);
1630 source_data.driver = __FILE__;
1631 source_data.module = m;
1632 if (!(source_data.name = pa_xstrdup(pa_modargs_get_value(ma, "source_name", NULL))))
1633 source_data.name = pa_sprintf_malloc("%s.echo-cancel", source_master->name);
1634 pa_source_new_data_set_sample_spec(&source_data, &source_ss);
1635 pa_source_new_data_set_channel_map(&source_data, &source_map);
1636 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, source_master->name);
1637 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1638 if (!u->autoloaded)
1639 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1640 pa_proplist_sets(source_data.proplist, "device.echo-cancel.name", source_data.name);
1641
1642 if (pa_modargs_get_proplist(ma, "source_properties", source_data.proplist, PA_UPDATE_REPLACE) < 0) {
1643 pa_log("Invalid properties");
1644 pa_source_new_data_done(&source_data);
1645 goto fail;
1646 }
1647
1648 if ((u->source_auto_desc = !pa_proplist_contains(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1649 const char *z;
1650
1651 z = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1652 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Echo-Cancel Source %s on %s", source_data.name, z ? z : source_master->name);
1653 }
1654
1655 u->source = pa_source_new(m->core, &source_data, (source_master->flags & (PA_SOURCE_LATENCY | PA_SOURCE_DYNAMIC_LATENCY))
1656 | (use_volume_sharing ? PA_SOURCE_SHARE_VOLUME_WITH_MASTER : 0));
1657 pa_source_new_data_done(&source_data);
1658
1659 if (!u->source) {
1660 pa_log("Failed to create source.");
1661 goto fail;
1662 }
1663
1664 u->source->parent.process_msg = source_process_msg_cb;
1665 u->source->set_state = source_set_state_cb;
1666 u->source->update_requested_latency = source_update_requested_latency_cb;
1667 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1668 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1669 if (!use_volume_sharing) {
1670 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1671 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1672 pa_source_enable_decibel_volume(u->source, TRUE);
1673 }
1674 u->source->userdata = u;
1675
1676 pa_source_set_asyncmsgq(u->source, source_master->asyncmsgq);
1677
1678 /* Create sink */
1679 pa_sink_new_data_init(&sink_data);
1680 sink_data.driver = __FILE__;
1681 sink_data.module = m;
1682 if (!(sink_data.name = pa_xstrdup(pa_modargs_get_value(ma, "sink_name", NULL))))
1683 sink_data.name = pa_sprintf_malloc("%s.echo-cancel", sink_master->name);
1684 pa_sink_new_data_set_sample_spec(&sink_data, &sink_ss);
1685 pa_sink_new_data_set_channel_map(&sink_data, &sink_map);
1686 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, sink_master->name);
1687 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1688 if (!u->autoloaded)
1689 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1690 pa_proplist_sets(sink_data.proplist, "device.echo-cancel.name", sink_data.name);
1691
1692 if (pa_modargs_get_proplist(ma, "sink_properties", sink_data.proplist, PA_UPDATE_REPLACE) < 0) {
1693 pa_log("Invalid properties");
1694 pa_sink_new_data_done(&sink_data);
1695 goto fail;
1696 }
1697
1698 if ((u->sink_auto_desc = !pa_proplist_contains(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1699 const char *z;
1700
1701 z = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1702 pa_proplist_setf(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Echo-Cancel Sink %s on %s", sink_data.name, z ? z : sink_master->name);
1703 }
1704
1705 u->sink = pa_sink_new(m->core, &sink_data, (sink_master->flags & (PA_SINK_LATENCY | PA_SINK_DYNAMIC_LATENCY))
1706 | (use_volume_sharing ? PA_SINK_SHARE_VOLUME_WITH_MASTER : 0));
1707 pa_sink_new_data_done(&sink_data);
1708
1709 if (!u->sink) {
1710 pa_log("Failed to create sink.");
1711 goto fail;
1712 }
1713
1714 u->sink->parent.process_msg = sink_process_msg_cb;
1715 u->sink->set_state = sink_set_state_cb;
1716 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1717 u->sink->request_rewind = sink_request_rewind_cb;
1718 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1719 if (!use_volume_sharing) {
1720 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1721 pa_sink_enable_decibel_volume(u->sink, TRUE);
1722 }
1723 u->sink->userdata = u;
1724
1725 pa_sink_set_asyncmsgq(u->sink, sink_master->asyncmsgq);
1726
1727 /* Create source output */
1728 pa_source_output_new_data_init(&source_output_data);
1729 source_output_data.driver = __FILE__;
1730 source_output_data.module = m;
1731 pa_source_output_new_data_set_source(&source_output_data, source_master, FALSE);
1732 source_output_data.destination_source = u->source;
1733 /* FIXME
1734 source_output_data.flags = PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND; */
1735
1736 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Source Stream");
1737 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1738 pa_source_output_new_data_set_sample_spec(&source_output_data, &source_ss);
1739 pa_source_output_new_data_set_channel_map(&source_output_data, &source_map);
1740
1741 pa_source_output_new(&u->source_output, m->core, &source_output_data);
1742 pa_source_output_new_data_done(&source_output_data);
1743
1744 if (!u->source_output)
1745 goto fail;
1746
1747 u->source_output->parent.process_msg = source_output_process_msg_cb;
1748 u->source_output->push = source_output_push_cb;
1749 u->source_output->process_rewind = source_output_process_rewind_cb;
1750 u->source_output->update_max_rewind = source_output_update_max_rewind_cb;
1751 u->source_output->update_source_requested_latency = source_output_update_source_requested_latency_cb;
1752 u->source_output->update_source_latency_range = source_output_update_source_latency_range_cb;
1753 u->source_output->update_source_fixed_latency = source_output_update_source_fixed_latency_cb;
1754 u->source_output->kill = source_output_kill_cb;
1755 u->source_output->attach = source_output_attach_cb;
1756 u->source_output->detach = source_output_detach_cb;
1757 u->source_output->state_change = source_output_state_change_cb;
1758 u->source_output->may_move_to = source_output_may_move_to_cb;
1759 u->source_output->moving = source_output_moving_cb;
1760 u->source_output->userdata = u;
1761
1762 u->source->output_from_master = u->source_output;
1763
1764 /* Create sink input */
1765 pa_sink_input_new_data_init(&sink_input_data);
1766 sink_input_data.driver = __FILE__;
1767 sink_input_data.module = m;
1768 pa_sink_input_new_data_set_sink(&sink_input_data, sink_master, FALSE);
1769 sink_input_data.origin_sink = u->sink;
1770 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Sink Stream");
1771 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1772 pa_sink_input_new_data_set_sample_spec(&sink_input_data, &sink_ss);
1773 pa_sink_input_new_data_set_channel_map(&sink_input_data, &sink_map);
1774 sink_input_data.flags = PA_SINK_INPUT_VARIABLE_RATE;
1775
1776 pa_sink_input_new(&u->sink_input, m->core, &sink_input_data);
1777 pa_sink_input_new_data_done(&sink_input_data);
1778
1779 if (!u->sink_input)
1780 goto fail;
1781
1782 u->sink_input->parent.process_msg = sink_input_process_msg_cb;
1783 u->sink_input->pop = sink_input_pop_cb;
1784 u->sink_input->process_rewind = sink_input_process_rewind_cb;
1785 u->sink_input->update_max_rewind = sink_input_update_max_rewind_cb;
1786 u->sink_input->update_max_request = sink_input_update_max_request_cb;
1787 u->sink_input->update_sink_requested_latency = sink_input_update_sink_requested_latency_cb;
1788 u->sink_input->update_sink_latency_range = sink_input_update_sink_latency_range_cb;
1789 u->sink_input->update_sink_fixed_latency = sink_input_update_sink_fixed_latency_cb;
1790 u->sink_input->kill = sink_input_kill_cb;
1791 u->sink_input->attach = sink_input_attach_cb;
1792 u->sink_input->detach = sink_input_detach_cb;
1793 u->sink_input->state_change = sink_input_state_change_cb;
1794 u->sink_input->may_move_to = sink_input_may_move_to_cb;
1795 u->sink_input->moving = sink_input_moving_cb;
1796 if (!use_volume_sharing)
1797 u->sink_input->volume_changed = sink_input_volume_changed_cb;
1798 u->sink_input->mute_changed = sink_input_mute_changed_cb;
1799 u->sink_input->userdata = u;
1800
1801 u->sink->input_to_master = u->sink_input;
1802
1803 pa_sink_input_get_silence(u->sink_input, &silence);
1804
1805 u->source_memblockq = pa_memblockq_new("module-echo-cancel source_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1806 &source_ss, 1, 1, 0, &silence);
1807 u->sink_memblockq = pa_memblockq_new("module-echo-cancel sink_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1808 &sink_ss, 1, 1, 0, &silence);
1809
1810 pa_memblock_unref(silence.memblock);
1811
1812 if (!u->source_memblockq || !u->sink_memblockq) {
1813 pa_log("Failed to create memblockq.");
1814 goto fail;
1815 }
1816
1817 if (u->adjust_time > 0 && !u->ec->params.drift_compensation)
1818 u->time_event = pa_core_rttime_new(m->core, pa_rtclock_now() + u->adjust_time, time_callback, u);
1819 else if (u->ec->params.drift_compensation) {
1820 pa_log_info("Canceller does drift compensation -- built-in compensation will be disabled");
1821 u->adjust_time = 0;
1822 /* Perform resync just once to give the canceller a leg up */
1823 pa_atomic_store(&u->request_resync, 1);
1824 }
1825
1826 if (u->save_aec) {
1827 pa_log("Creating AEC files in /tmp");
1828 u->captured_file = fopen("/tmp/aec_rec.sw", "wb");
1829 if (u->captured_file == NULL)
1830 perror ("fopen failed");
1831 u->played_file = fopen("/tmp/aec_play.sw", "wb");
1832 if (u->played_file == NULL)
1833 perror ("fopen failed");
1834 u->canceled_file = fopen("/tmp/aec_out.sw", "wb");
1835 if (u->canceled_file == NULL)
1836 perror ("fopen failed");
1837 if (u->ec->params.drift_compensation) {
1838 u->drift_file = fopen("/tmp/aec_drift.txt", "w");
1839 if (u->drift_file == NULL)
1840 perror ("fopen failed");
1841 }
1842 }
1843
1844 pa_sink_put(u->sink);
1845 pa_source_put(u->source);
1846
1847 pa_sink_input_put(u->sink_input);
1848 pa_source_output_put(u->source_output);
1849
1850 pa_modargs_free(ma);
1851
1852 return 0;
1853
1854 fail:
1855 if (ma)
1856 pa_modargs_free(ma);
1857
1858 pa__done(m);
1859
1860 return -1;
1861 }
1862
1863 int pa__get_n_used(pa_module *m) {
1864 struct userdata *u;
1865
1866 pa_assert(m);
1867 pa_assert_se(u = m->userdata);
1868
1869 return pa_sink_linked_by(u->sink) + pa_source_linked_by(u->source);
1870 }
1871
1872 void pa__done(pa_module*m) {
1873 struct userdata *u;
1874
1875 pa_assert(m);
1876
1877 if (!(u = m->userdata))
1878 return;
1879
1880 u->dead = TRUE;
1881
1882 /* See comments in source_output_kill_cb() above regarding
1883 * destruction order! */
1884
1885 if (u->time_event)
1886 u->core->mainloop->time_free(u->time_event);
1887
1888 if (u->source_output)
1889 pa_source_output_unlink(u->source_output);
1890 if (u->sink_input)
1891 pa_sink_input_unlink(u->sink_input);
1892
1893 if (u->source)
1894 pa_source_unlink(u->source);
1895 if (u->sink)
1896 pa_sink_unlink(u->sink);
1897
1898 if (u->source_output)
1899 pa_source_output_unref(u->source_output);
1900 if (u->sink_input)
1901 pa_sink_input_unref(u->sink_input);
1902
1903 if (u->source)
1904 pa_source_unref(u->source);
1905 if (u->sink)
1906 pa_sink_unref(u->sink);
1907
1908 if (u->source_memblockq)
1909 pa_memblockq_free(u->source_memblockq);
1910 if (u->sink_memblockq)
1911 pa_memblockq_free(u->sink_memblockq);
1912
1913 if (u->ec) {
1914 if (u->ec->done)
1915 u->ec->done(u->ec);
1916
1917 pa_xfree(u->ec);
1918 }
1919
1920 if (u->asyncmsgq)
1921 pa_asyncmsgq_unref(u->asyncmsgq);
1922
1923 if (u->save_aec) {
1924 if (u->played_file)
1925 fclose(u->played_file);
1926 if (u->captured_file)
1927 fclose(u->captured_file);
1928 if (u->canceled_file)
1929 fclose(u->canceled_file);
1930 if (u->drift_file)
1931 fclose(u->drift_file);
1932 }
1933
1934 pa_xfree(u);
1935 }
1936
1937 #ifdef ECHO_CANCEL_TEST
1938 /*
1939 * Stand-alone test program for running in the canceller on pre-recorded files.
1940 */
1941 int main(int argc, char* argv[]) {
1942 struct userdata u;
1943 pa_sample_spec source_ss, sink_ss;
1944 pa_channel_map source_map, sink_map;
1945 pa_modargs *ma = NULL;
1946 uint8_t *rdata = NULL, *pdata = NULL, *cdata = NULL;
1947 int ret = 0, unused, i;
1948 char c;
1949 float drift;
1950
1951 pa_memzero(&u, sizeof(u));
1952
1953 if (argc < 4 || argc > 7) {
1954 goto usage;
1955 }
1956
1957 u.ec = pa_xnew0(pa_echo_canceller, 1);
1958 if (!u.ec) {
1959 pa_log("Failed to alloc echo canceller");
1960 goto fail;
1961 }
1962
1963 u.captured_file = fopen(argv[2], "r");
1964 if (u.captured_file == NULL) {
1965 perror ("fopen failed");
1966 goto fail;
1967 }
1968 u.played_file = fopen(argv[1], "r");
1969 if (u.played_file == NULL) {
1970 perror ("fopen failed");
1971 goto fail;
1972 }
1973 u.canceled_file = fopen(argv[3], "wb");
1974 if (u.canceled_file == NULL) {
1975 perror ("fopen failed");
1976 goto fail;
1977 }
1978
1979 u.core = pa_xnew0(pa_core, 1);
1980 u.core->cpu_info.cpu_type = PA_CPU_X86;
1981 u.core->cpu_info.flags.x86 |= PA_CPU_X86_SSE;
1982
1983 if (!(ma = pa_modargs_new(argc > 4 ? argv[4] : NULL, valid_modargs))) {
1984 pa_log("Failed to parse module arguments.");
1985 goto fail;
1986 }
1987
1988 source_ss.format = PA_SAMPLE_S16LE;
1989 source_ss.rate = DEFAULT_RATE;
1990 source_ss.channels = DEFAULT_CHANNELS;
1991 pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
1992
1993 init_common(ma, &u, &source_ss, &source_map);
1994
1995 if (!u.ec->init(u.core, u.ec, &source_ss, &source_map, &sink_ss, &sink_map, &u.blocksize,
1996 (argc > 4) ? argv[5] : NULL )) {
1997 pa_log("Failed to init AEC engine");
1998 goto fail;
1999 }
2000
2001 if (u.ec->params.drift_compensation) {
2002 if (argc < 7) {
2003 pa_log("Drift compensation enabled but drift file not specified");
2004 goto fail;
2005 }
2006
2007 u.drift_file = fopen(argv[6], "r");
2008
2009 if (u.drift_file == NULL) {
2010 perror ("fopen failed");
2011 goto fail;
2012 }
2013 }
2014
2015 rdata = pa_xmalloc(u.blocksize);
2016 pdata = pa_xmalloc(u.blocksize);
2017 cdata = pa_xmalloc(u.blocksize);
2018
2019 if (!u.ec->params.drift_compensation) {
2020 while (fread(rdata, u.blocksize, 1, u.captured_file) > 0) {
2021 if (fread(pdata, u.blocksize, 1, u.played_file) == 0) {
2022 perror("Played file ended before captured file");
2023 goto fail;
2024 }
2025
2026 u.ec->run(u.ec, rdata, pdata, cdata);
2027
2028 unused = fwrite(cdata, u.blocksize, 1, u.canceled_file);
2029 }
2030 } else {
2031 while (fscanf(u.drift_file, "%c", &c) > 0) {
2032 switch (c) {
2033 case 'd':
2034 if (!fscanf(u.drift_file, "%a", &drift)) {
2035 perror("Drift file incomplete");
2036 goto fail;
2037 }
2038
2039 u.ec->set_drift(u.ec, drift);
2040
2041 break;
2042
2043 case 'c':
2044 if (!fscanf(u.drift_file, "%d", &i)) {
2045 perror("Drift file incomplete");
2046 goto fail;
2047 }
2048
2049 if (fread(rdata, i, 1, u.captured_file) <= 0) {
2050 perror("Captured file ended prematurely");
2051 goto fail;
2052 }
2053
2054 u.ec->record(u.ec, rdata, cdata);
2055
2056 unused = fwrite(cdata, i, 1, u.canceled_file);
2057
2058 break;
2059
2060 case 'p':
2061 if (!fscanf(u.drift_file, "%d", &i)) {
2062 perror("Drift file incomplete");
2063 goto fail;
2064 }
2065
2066 if (fread(pdata, i, 1, u.played_file) <= 0) {
2067 perror("Played file ended prematurely");
2068 goto fail;
2069 }
2070
2071 u.ec->play(u.ec, pdata);
2072
2073 break;
2074 }
2075 }
2076
2077 if (fread(rdata, i, 1, u.captured_file) > 0)
2078 pa_log("All capture data was not consumed");
2079 if (fread(pdata, i, 1, u.played_file) > 0)
2080 pa_log("All playback data was not consumed");
2081 }
2082
2083 u.ec->done(u.ec);
2084
2085 fclose(u.captured_file);
2086 fclose(u.played_file);
2087 fclose(u.canceled_file);
2088 if (u.drift_file)
2089 fclose(u.drift_file);
2090
2091 out:
2092 pa_xfree(rdata);
2093 pa_xfree(pdata);
2094 pa_xfree(cdata);
2095
2096 pa_xfree(u.ec);
2097 pa_xfree(u.core);
2098
2099 if (ma)
2100 pa_modargs_free(ma);
2101
2102 return ret;
2103
2104 usage:
2105 pa_log("Usage: %s play_file rec_file out_file [module args] [aec_args] [drift_file]", argv[0]);
2106
2107 fail:
2108 ret = -1;
2109 goto out;
2110 }
2111 #endif /* ECHO_CANCEL_TEST */