]> code.delx.au - pulseaudio/blob - src/modules/echo-cancel/module-echo-cancel.c
echo-cancel: Plug in WebRTC drift compensation
[pulseaudio] / src / modules / echo-cancel / module-echo-cancel.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2010 Wim Taymans <wim.taymans@gmail.com>
5
6 Based on module-virtual-sink.c
7 module-virtual-source.c
8 module-loopback.c
9
10 Copyright 2010 Intel Corporation
11 Contributor: Pierre-Louis Bossart <pierre-louis.bossart@intel.com>
12
13 PulseAudio is free software; you can redistribute it and/or modify
14 it under the terms of the GNU Lesser General Public License as published
15 by the Free Software Foundation; either version 2.1 of the License,
16 or (at your option) any later version.
17
18 PulseAudio is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU Lesser General Public License
24 along with PulseAudio; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 USA.
27 ***/
28
29 #ifdef HAVE_CONFIG_H
30 #include <config.h>
31 #endif
32
33 #include <stdio.h>
34 #include <math.h>
35
36 #include "echo-cancel.h"
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/timeval.h>
40 #include <pulse/rtclock.h>
41
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/atomic.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/namereg.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/module.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/modargs.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/sample-util.h>
54 #include <pulsecore/ltdl-helper.h>
55
56 #include "module-echo-cancel-symdef.h"
57
58 PA_MODULE_AUTHOR("Wim Taymans");
59 PA_MODULE_DESCRIPTION("Echo Cancellation");
60 PA_MODULE_VERSION(PACKAGE_VERSION);
61 PA_MODULE_LOAD_ONCE(FALSE);
62 PA_MODULE_USAGE(
63 _("source_name=<name for the source> "
64 "source_properties=<properties for the source> "
65 "source_master=<name of source to filter> "
66 "sink_name=<name for the sink> "
67 "sink_properties=<properties for the sink> "
68 "sink_master=<name of sink to filter> "
69 "adjust_time=<how often to readjust rates in s> "
70 "adjust_threshold=<how much drift to readjust after in ms> "
71 "format=<sample format> "
72 "rate=<sample rate> "
73 "channels=<number of channels> "
74 "channel_map=<channel map> "
75 "aec_method=<implementation to use> "
76 "aec_args=<parameters for the AEC engine> "
77 "save_aec=<save AEC data in /tmp> "
78 "autoloaded=<set if this module is being loaded automatically> "
79 "use_volume_sharing=<yes or no> "
80 ));
81
82 /* NOTE: Make sure the enum and ec_table are maintained in the correct order */
83 typedef enum {
84 PA_ECHO_CANCELLER_INVALID = -1,
85 PA_ECHO_CANCELLER_SPEEX = 0,
86 PA_ECHO_CANCELLER_ADRIAN,
87 #ifdef HAVE_WEBRTC
88 PA_ECHO_CANCELLER_WEBRTC,
89 #endif
90 } pa_echo_canceller_method_t;
91
92 #define DEFAULT_ECHO_CANCELLER "speex"
93
94 static const pa_echo_canceller ec_table[] = {
95 {
96 /* Speex */
97 .init = pa_speex_ec_init,
98 .run = pa_speex_ec_run,
99 .done = pa_speex_ec_done,
100 },
101 {
102 /* Adrian Andre's NLMS implementation */
103 .init = pa_adrian_ec_init,
104 .run = pa_adrian_ec_run,
105 .done = pa_adrian_ec_done,
106 },
107 #ifdef HAVE_WEBRTC
108 {
109 /* WebRTC's audio processing engine */
110 .init = pa_webrtc_ec_init,
111 .play = pa_webrtc_ec_play,
112 .record = pa_webrtc_ec_record,
113 .set_drift = pa_webrtc_ec_set_drift,
114 .run = pa_webrtc_ec_run,
115 .done = pa_webrtc_ec_done,
116 },
117 #endif
118 };
119
120 #define DEFAULT_RATE 32000
121 #define DEFAULT_CHANNELS 1
122 #define DEFAULT_ADJUST_TIME_USEC (1*PA_USEC_PER_SEC)
123 #define DEFAULT_ADJUST_TOLERANCE (5*PA_USEC_PER_MSEC)
124 #define DEFAULT_SAVE_AEC FALSE
125 #define DEFAULT_AUTOLOADED FALSE
126
127 #define MEMBLOCKQ_MAXLENGTH (16*1024*1024)
128
129 /* Can only be used in main context */
130 #define IS_ACTIVE(u) ((pa_source_get_state((u)->source) == PA_SOURCE_RUNNING) && \
131 (pa_sink_get_state((u)->sink) == PA_SINK_RUNNING))
132
133 /* This module creates a new (virtual) source and sink.
134 *
135 * The data sent to the new sink is kept in a memblockq before being
136 * forwarded to the real sink_master.
137 *
138 * Data read from source_master is matched against the saved sink data and
139 * echo canceled data is then pushed onto the new source.
140 *
141 * Both source and sink masters have their own threads to push/pull data
142 * respectively. We however perform all our actions in the source IO thread.
143 * To do this we send all played samples to the source IO thread where they
144 * are then pushed into the memblockq.
145 *
146 * Alignment is performed in two steps:
147 *
148 * 1) when something happens that requires quick adjustment of the alignment of
149 * capture and playback samples, we perform a resync. This adjusts the
150 * position in the playback memblock to the requested sample. Quick
151 * adjustments include moving the playback samples before the capture
152 * samples (because else the echo canceler does not work) or when the
153 * playback pointer drifts too far away.
154 *
155 * 2) periodically check the difference between capture and playback. we use a
156 * low and high watermark for adjusting the alignment. playback should always
157 * be before capture and the difference should not be bigger than one frame
158 * size. We would ideally like to resample the sink_input but most driver
159 * don't give enough accuracy to be able to do that right now.
160 */
161
162 struct snapshot {
163 pa_usec_t sink_now;
164 pa_usec_t sink_latency;
165 size_t sink_delay;
166 int64_t send_counter;
167
168 pa_usec_t source_now;
169 pa_usec_t source_latency;
170 size_t source_delay;
171 int64_t recv_counter;
172 size_t rlen;
173 size_t plen;
174 };
175
176 struct userdata {
177 pa_core *core;
178 pa_module *module;
179
180 pa_bool_t autoloaded;
181 pa_bool_t dead;
182 pa_bool_t save_aec;
183
184 pa_echo_canceller *ec;
185 uint32_t blocksize;
186
187 pa_bool_t need_realign;
188
189 /* to wakeup the source I/O thread */
190 pa_asyncmsgq *asyncmsgq;
191 pa_rtpoll_item *rtpoll_item_read, *rtpoll_item_write;
192
193 pa_source *source;
194 pa_bool_t source_auto_desc;
195 pa_source_output *source_output;
196 pa_memblockq *source_memblockq; /* echo canceler needs fixed sized chunks */
197 size_t source_skip;
198
199 pa_sink *sink;
200 pa_bool_t sink_auto_desc;
201 pa_sink_input *sink_input;
202 pa_memblockq *sink_memblockq;
203 int64_t send_counter; /* updated in sink IO thread */
204 int64_t recv_counter;
205 size_t sink_skip;
206
207 /* Bytes left over from previous iteration */
208 size_t sink_rem;
209 size_t source_rem;
210
211 pa_atomic_t request_resync;
212
213 pa_time_event *time_event;
214 pa_usec_t adjust_time;
215 int adjust_threshold;
216
217 FILE *captured_file;
218 FILE *played_file;
219 FILE *canceled_file;
220 };
221
222 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot);
223
224 static const char* const valid_modargs[] = {
225 "source_name",
226 "source_properties",
227 "source_master",
228 "sink_name",
229 "sink_properties",
230 "sink_master",
231 "adjust_time",
232 "adjust_threshold",
233 "format",
234 "rate",
235 "channels",
236 "channel_map",
237 "aec_method",
238 "aec_args",
239 "save_aec",
240 "autoloaded",
241 "use_volume_sharing",
242 NULL
243 };
244
245 enum {
246 SOURCE_OUTPUT_MESSAGE_POST = PA_SOURCE_OUTPUT_MESSAGE_MAX,
247 SOURCE_OUTPUT_MESSAGE_REWIND,
248 SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT,
249 SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME
250 };
251
252 enum {
253 SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT
254 };
255
256 static int64_t calc_diff(struct userdata *u, struct snapshot *snapshot) {
257 int64_t buffer, diff_time, buffer_latency;
258
259 /* get the number of samples between capture and playback */
260 if (snapshot->plen > snapshot->rlen)
261 buffer = snapshot->plen - snapshot->rlen;
262 else
263 buffer = 0;
264
265 buffer += snapshot->source_delay + snapshot->sink_delay;
266
267 /* add the amount of samples not yet transferred to the source context */
268 if (snapshot->recv_counter <= snapshot->send_counter)
269 buffer += (int64_t) (snapshot->send_counter - snapshot->recv_counter);
270 else
271 buffer += PA_CLIP_SUB(buffer, (int64_t) (snapshot->recv_counter - snapshot->send_counter));
272
273 /* convert to time */
274 buffer_latency = pa_bytes_to_usec(buffer, &u->source_output->sample_spec);
275
276 /* capture and playback samples are perfectly aligned when diff_time is 0 */
277 diff_time = (snapshot->sink_now + snapshot->sink_latency - buffer_latency) -
278 (snapshot->source_now - snapshot->source_latency);
279
280 pa_log_debug("diff %lld (%lld - %lld + %lld) %lld %lld %lld %lld", (long long) diff_time,
281 (long long) snapshot->sink_latency,
282 (long long) buffer_latency, (long long) snapshot->source_latency,
283 (long long) snapshot->source_delay, (long long) snapshot->sink_delay,
284 (long long) (snapshot->send_counter - snapshot->recv_counter),
285 (long long) (snapshot->sink_now - snapshot->source_now));
286
287 return diff_time;
288 }
289
290 /* Called from main context */
291 static void time_callback(pa_mainloop_api *a, pa_time_event *e, const struct timeval *t, void *userdata) {
292 struct userdata *u = userdata;
293 uint32_t old_rate, base_rate, new_rate;
294 int64_t diff_time;
295 /*size_t fs*/
296 struct snapshot latency_snapshot;
297
298 pa_assert(u);
299 pa_assert(a);
300 pa_assert(u->time_event == e);
301 pa_assert_ctl_context();
302
303 if (!IS_ACTIVE(u))
304 return;
305
306 /* update our snapshots */
307 pa_asyncmsgq_send(u->source_output->source->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
308 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
309
310 /* calculate drift between capture and playback */
311 diff_time = calc_diff(u, &latency_snapshot);
312
313 /*fs = pa_frame_size(&u->source_output->sample_spec);*/
314 old_rate = u->sink_input->sample_spec.rate;
315 base_rate = u->source_output->sample_spec.rate;
316
317 if (diff_time < 0) {
318 /* recording before playback, we need to adjust quickly. The echo
319 * canceler does not work in this case. */
320 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
321 NULL, diff_time, NULL, NULL);
322 /*new_rate = base_rate - ((pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
323 new_rate = base_rate;
324 }
325 else {
326 if (diff_time > u->adjust_threshold) {
327 /* diff too big, quickly adjust */
328 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
329 NULL, diff_time, NULL, NULL);
330 }
331
332 /* recording behind playback, we need to slowly adjust the rate to match */
333 /*new_rate = base_rate + ((pa_usec_to_bytes(diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
334
335 /* assume equal samplerates for now */
336 new_rate = base_rate;
337 }
338
339 /* make sure we don't make too big adjustments because that sounds horrible */
340 if (new_rate > base_rate * 1.1 || new_rate < base_rate * 0.9)
341 new_rate = base_rate;
342
343 if (new_rate != old_rate) {
344 pa_log_info("Old rate %lu Hz, new rate %lu Hz", (unsigned long) old_rate, (unsigned long) new_rate);
345
346 pa_sink_input_set_rate(u->sink_input, new_rate);
347 }
348
349 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
350 }
351
352 /* Called from source I/O thread context */
353 static int source_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
354 struct userdata *u = PA_SOURCE(o)->userdata;
355
356 switch (code) {
357
358 case PA_SOURCE_MESSAGE_GET_LATENCY:
359
360 /* The source is _put() before the source output is, so let's
361 * make sure we don't access it in that time. Also, the
362 * source output is first shut down, the source second. */
363 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
364 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) {
365 *((pa_usec_t*) data) = 0;
366 return 0;
367 }
368
369 *((pa_usec_t*) data) =
370
371 /* Get the latency of the master source */
372 pa_source_get_latency_within_thread(u->source_output->source) +
373 /* Add the latency internal to our source output on top */
374 pa_bytes_to_usec(pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq), &u->source_output->source->sample_spec) +
375 /* and the buffering we do on the source */
376 pa_bytes_to_usec(u->blocksize, &u->source_output->source->sample_spec);
377
378 return 0;
379
380 }
381
382 return pa_source_process_msg(o, code, data, offset, chunk);
383 }
384
385 /* Called from sink I/O thread context */
386 static int sink_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
387 struct userdata *u = PA_SINK(o)->userdata;
388
389 switch (code) {
390
391 case PA_SINK_MESSAGE_GET_LATENCY:
392
393 /* The sink is _put() before the sink input is, so let's
394 * make sure we don't access it in that time. Also, the
395 * sink input is first shut down, the sink second. */
396 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
397 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) {
398 *((pa_usec_t*) data) = 0;
399 return 0;
400 }
401
402 *((pa_usec_t*) data) =
403
404 /* Get the latency of the master sink */
405 pa_sink_get_latency_within_thread(u->sink_input->sink) +
406
407 /* Add the latency internal to our sink input on top */
408 pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec);
409
410 return 0;
411 }
412
413 return pa_sink_process_msg(o, code, data, offset, chunk);
414 }
415
416
417 /* Called from main context */
418 static int source_set_state_cb(pa_source *s, pa_source_state_t state) {
419 struct userdata *u;
420
421 pa_source_assert_ref(s);
422 pa_assert_se(u = s->userdata);
423
424 if (!PA_SOURCE_IS_LINKED(state) ||
425 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
426 return 0;
427
428 if (state == PA_SOURCE_RUNNING) {
429 /* restart timer when both sink and source are active */
430 if (IS_ACTIVE(u) && u->adjust_time)
431 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
432
433 pa_atomic_store(&u->request_resync, 1);
434 pa_source_output_cork(u->source_output, FALSE);
435 } else if (state == PA_SOURCE_SUSPENDED) {
436 pa_source_output_cork(u->source_output, TRUE);
437 }
438
439 return 0;
440 }
441
442 /* Called from main context */
443 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t state) {
444 struct userdata *u;
445
446 pa_sink_assert_ref(s);
447 pa_assert_se(u = s->userdata);
448
449 if (!PA_SINK_IS_LINKED(state) ||
450 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
451 return 0;
452
453 if (state == PA_SINK_RUNNING) {
454 /* restart timer when both sink and source are active */
455 if (IS_ACTIVE(u) && u->adjust_time)
456 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
457
458 pa_atomic_store(&u->request_resync, 1);
459 pa_sink_input_cork(u->sink_input, FALSE);
460 } else if (state == PA_SINK_SUSPENDED) {
461 pa_sink_input_cork(u->sink_input, TRUE);
462 }
463
464 return 0;
465 }
466
467 /* Called from I/O thread context */
468 static void source_update_requested_latency_cb(pa_source *s) {
469 struct userdata *u;
470
471 pa_source_assert_ref(s);
472 pa_assert_se(u = s->userdata);
473
474 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
475 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state))
476 return;
477
478 pa_log_debug("Source update requested latency");
479
480 /* Just hand this one over to the master source */
481 pa_source_output_set_requested_latency_within_thread(
482 u->source_output,
483 pa_source_get_requested_latency_within_thread(s));
484 }
485
486 /* Called from I/O thread context */
487 static void sink_update_requested_latency_cb(pa_sink *s) {
488 struct userdata *u;
489
490 pa_sink_assert_ref(s);
491 pa_assert_se(u = s->userdata);
492
493 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
494 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
495 return;
496
497 pa_log_debug("Sink update requested latency");
498
499 /* Just hand this one over to the master sink */
500 pa_sink_input_set_requested_latency_within_thread(
501 u->sink_input,
502 pa_sink_get_requested_latency_within_thread(s));
503 }
504
505 /* Called from I/O thread context */
506 static void sink_request_rewind_cb(pa_sink *s) {
507 struct userdata *u;
508
509 pa_sink_assert_ref(s);
510 pa_assert_se(u = s->userdata);
511
512 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
513 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
514 return;
515
516 pa_log_debug("Sink request rewind %lld", (long long) s->thread_info.rewind_nbytes);
517
518 /* Just hand this one over to the master sink */
519 pa_sink_input_request_rewind(u->sink_input,
520 s->thread_info.rewind_nbytes, TRUE, FALSE, FALSE);
521 }
522
523 /* Called from main context */
524 static void source_set_volume_cb(pa_source *s) {
525 struct userdata *u;
526
527 pa_source_assert_ref(s);
528 pa_assert_se(u = s->userdata);
529
530 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
531 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
532 return;
533
534 pa_source_output_set_volume(u->source_output, &s->real_volume, s->save_volume, TRUE);
535 }
536
537 /* Called from main context */
538 static void sink_set_volume_cb(pa_sink *s) {
539 struct userdata *u;
540
541 pa_sink_assert_ref(s);
542 pa_assert_se(u = s->userdata);
543
544 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
545 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
546 return;
547
548 pa_sink_input_set_volume(u->sink_input, &s->real_volume, s->save_volume, TRUE);
549 }
550
551 static void source_get_volume_cb(pa_source *s) {
552 struct userdata *u;
553 pa_cvolume v;
554
555 pa_source_assert_ref(s);
556 pa_assert_se(u = s->userdata);
557
558 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
559 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
560 return;
561
562 pa_source_output_get_volume(u->source_output, &v, TRUE);
563
564 if (pa_cvolume_equal(&s->real_volume, &v))
565 /* no change */
566 return;
567
568 s->real_volume = v;
569 pa_source_set_soft_volume(s, NULL);
570 }
571
572 /* Called from main context */
573 static void source_set_mute_cb(pa_source *s) {
574 struct userdata *u;
575
576 pa_source_assert_ref(s);
577 pa_assert_se(u = s->userdata);
578
579 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
580 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
581 return;
582
583 pa_source_output_set_mute(u->source_output, s->muted, s->save_muted);
584 }
585
586 /* Called from main context */
587 static void sink_set_mute_cb(pa_sink *s) {
588 struct userdata *u;
589
590 pa_sink_assert_ref(s);
591 pa_assert_se(u = s->userdata);
592
593 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
594 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
595 return;
596
597 pa_sink_input_set_mute(u->sink_input, s->muted, s->save_muted);
598 }
599
600 /* Called from main context */
601 static void source_get_mute_cb(pa_source *s) {
602 struct userdata *u;
603
604 pa_source_assert_ref(s);
605 pa_assert_se(u = s->userdata);
606
607 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
608 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
609 return;
610
611 pa_source_output_get_mute(u->source_output);
612 }
613
614 /* must be called from the input thread context */
615 static void apply_diff_time(struct userdata *u, int64_t diff_time) {
616 int64_t diff;
617
618 if (diff_time < 0) {
619 diff = pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec);
620
621 if (diff > 0) {
622 /* add some extra safety samples to compensate for jitter in the
623 * timings */
624 diff += 10 * pa_frame_size (&u->source_output->sample_spec);
625
626 pa_log("Playback after capture (%lld), drop sink %lld", (long long) diff_time, (long long) diff);
627
628 u->sink_skip = diff;
629 u->source_skip = 0;
630 }
631 } else if (diff_time > 0) {
632 diff = pa_usec_to_bytes(diff_time, &u->source_output->sample_spec);
633
634 if (diff > 0) {
635 pa_log("playback too far ahead (%lld), drop source %lld", (long long) diff_time, (long long) diff);
636
637 u->source_skip = diff;
638 u->sink_skip = 0;
639 }
640 }
641 }
642
643 /* must be called from the input thread */
644 static void do_resync(struct userdata *u) {
645 int64_t diff_time;
646 struct snapshot latency_snapshot;
647
648 pa_log("Doing resync");
649
650 /* update our snapshot */
651 source_output_snapshot_within_thread(u, &latency_snapshot);
652 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
653
654 /* calculate drift between capture and playback */
655 diff_time = calc_diff(u, &latency_snapshot);
656
657 /* and adjust for the drift */
658 apply_diff_time(u, diff_time);
659 }
660
661 /* 1. Calculate drift at this point, pass to canceller
662 * 2. Push out playback samples in blocksize chunks
663 * 3. Push out capture samples in blocksize chunks
664 * 4. ???
665 * 5. Profit
666 */
667 static void do_push_drift_comp(struct userdata *u) {
668 size_t rlen, plen;
669 pa_memchunk rchunk, pchunk, cchunk;
670 uint8_t *rdata, *pdata, *cdata;
671 float drift;
672
673 rlen = pa_memblockq_get_length(u->source_memblockq);
674 plen = pa_memblockq_get_length(u->sink_memblockq);
675
676 /* Estimate snapshot drift as follows:
677 * pd: amount of data consumed since last time
678 * rd: amount of data consumed since last time
679 *
680 * drift = (pd - rd) / rd;
681 *
682 * We calculate pd and rd as the memblockq length less the number of
683 * samples left from the last iteration (to avoid double counting
684 * those remainder samples.
685 */
686 drift = ((float)(plen - u->sink_rem) - (rlen - u->source_rem)) / ((float)(rlen - u->source_rem));
687 u->sink_rem = plen % u->blocksize;
688 u->source_rem = rlen % u->blocksize;
689
690 /* Now let the canceller work its drift compensation magic */
691 u->ec->set_drift(u->ec, drift);
692
693 /* Send in the playback samples first */
694 while (plen >= u->blocksize) {
695 pa_memblockq_peek_fixed_size(u->sink_memblockq, u->blocksize, &pchunk);
696 pdata = pa_memblock_acquire(pchunk.memblock);
697 pdata += pchunk.index;
698
699 u->ec->play(u->ec, pdata);
700
701 pa_memblock_release(pchunk.memblock);
702 pa_memblockq_drop(u->sink_memblockq, u->blocksize);
703 pa_memblock_unref(pchunk.memblock);
704
705 plen -= u->blocksize;
706 }
707
708 /* And now the capture samples */
709 while (rlen >= u->blocksize) {
710 pa_memblockq_peek_fixed_size(u->source_memblockq, u->blocksize, &rchunk);
711
712 rdata = pa_memblock_acquire(rchunk.memblock);
713 rdata += rchunk.index;
714
715 cchunk.index = 0;
716 cchunk.length = u->blocksize;
717 cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
718 cdata = pa_memblock_acquire(cchunk.memblock);
719
720 u->ec->record(u->ec, rdata, cdata);
721
722 pa_memblock_release(cchunk.memblock);
723 pa_memblock_release(rchunk.memblock);
724
725 pa_memblock_unref(rchunk.memblock);
726
727 pa_source_post(u->source, &cchunk);
728 pa_memblock_unref(cchunk.memblock);
729
730 pa_memblockq_drop(u->source_memblockq, u->blocksize);
731 rlen -= u->blocksize;
732 }
733 }
734
735 /* This one's simpler than the drift compensation case -- we just iterate over
736 * the capture buffer, and pass the canceller blocksize bytes of playback and
737 * capture data. */
738 static void do_push(struct userdata *u) {
739 size_t rlen, plen;
740 pa_memchunk rchunk, pchunk, cchunk;
741 uint8_t *rdata, *pdata, *cdata;
742 int unused;
743
744 rlen = pa_memblockq_get_length(u->source_memblockq);
745 plen = pa_memblockq_get_length(u->sink_memblockq);
746
747 while (rlen >= u->blocksize) {
748 /* take fixed block from recorded samples */
749 pa_memblockq_peek_fixed_size(u->source_memblockq, u->blocksize, &rchunk);
750
751 if (plen > u->blocksize) {
752 if (plen > u->blocksize) {
753 /* take fixed block from played samples */
754 pa_memblockq_peek_fixed_size(u->sink_memblockq, u->blocksize, &pchunk);
755
756 rdata = pa_memblock_acquire(rchunk.memblock);
757 rdata += rchunk.index;
758 pdata = pa_memblock_acquire(pchunk.memblock);
759 pdata += pchunk.index;
760
761 cchunk.index = 0;
762 cchunk.length = u->blocksize;
763 cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
764 cdata = pa_memblock_acquire(cchunk.memblock);
765
766 if (u->save_aec) {
767 if (u->captured_file)
768 unused = fwrite(rdata, 1, u->blocksize, u->captured_file);
769 if (u->played_file)
770 unused = fwrite(pdata, 1, u->blocksize, u->played_file);
771 }
772
773 /* perform echo cancellation */
774 u->ec->run(u->ec, rdata, pdata, cdata);
775
776 if (u->save_aec) {
777 if (u->canceled_file)
778 unused = fwrite(cdata, 1, u->blocksize, u->canceled_file);
779 }
780
781 pa_memblock_release(cchunk.memblock);
782 pa_memblock_release(pchunk.memblock);
783 pa_memblock_release(rchunk.memblock);
784
785 /* drop consumed sink samples */
786 pa_memblockq_drop(u->sink_memblockq, u->blocksize);
787 pa_memblock_unref(pchunk.memblock);
788
789 pa_memblock_unref(rchunk.memblock);
790 /* the filtered samples now become the samples from our
791 * source */
792 rchunk = cchunk;
793
794 plen -= u->blocksize;
795 }
796 }
797
798 /* forward the (echo-canceled) data to the virtual source */
799 pa_source_post(u->source, &rchunk);
800 pa_memblock_unref(rchunk.memblock);
801
802 pa_memblockq_drop(u->source_memblockq, u->blocksize);
803 rlen -= u->blocksize;
804 }
805 }
806
807 /* Called from input thread context */
808 static void source_output_push_cb(pa_source_output *o, const pa_memchunk *chunk) {
809 struct userdata *u;
810 size_t rlen, plen, to_skip;
811 pa_memchunk rchunk;
812
813 pa_source_output_assert_ref(o);
814 pa_source_output_assert_io_context(o);
815 pa_assert_se(u = o->userdata);
816
817 if (!PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output))) {
818 pa_log("push when no link?");
819 return;
820 }
821
822 if (PA_UNLIKELY(u->source->thread_info.state != PA_SOURCE_RUNNING ||
823 u->sink->thread_info.state != PA_SINK_RUNNING)) {
824 pa_source_post(u->source, chunk);
825 return;
826 }
827
828 /* handle queued messages, do any message sending of our own */
829 while (pa_asyncmsgq_process_one(u->asyncmsgq) > 0)
830 ;
831
832 pa_memblockq_push_align(u->source_memblockq, chunk);
833
834 rlen = pa_memblockq_get_length(u->source_memblockq);
835 plen = pa_memblockq_get_length(u->sink_memblockq);
836
837 /* Let's not do anything else till we have enough data to process */
838 if (rlen < u->blocksize)
839 return;
840
841 /* See if we need to drop samples in order to sync */
842 if (pa_atomic_cmpxchg (&u->request_resync, 1, 0)) {
843 do_resync(u);
844 }
845
846 /* Okay, skip cancellation for skipped source samples if needed. */
847 if (PA_UNLIKELY(u->source_skip)) {
848 /* The slightly tricky bit here is that we drop all but modulo
849 * blocksize bytes and then adjust for that last bit on the sink side.
850 * We do this because the source data is coming at a fixed rate, which
851 * means the only way to try to catch up is drop sink samples and let
852 * the canceller cope up with this. */
853 to_skip = rlen >= u->source_skip ? u->source_skip : rlen;
854 to_skip -= to_skip % u->blocksize;
855
856 if (to_skip) {
857 pa_memblockq_peek_fixed_size(u->source_memblockq, to_skip, &rchunk);
858 pa_source_post(u->source, &rchunk);
859
860 pa_memblock_unref(rchunk.memblock);
861 pa_memblockq_drop(u->source_memblockq, u->blocksize);
862
863 rlen -= to_skip;
864 u->source_skip -= to_skip;
865 }
866
867 if (rlen && u->source_skip % u->blocksize) {
868 u->sink_skip += u->blocksize - (u->source_skip % u->blocksize);
869 u->source_skip -= (u->source_skip % u->blocksize);
870 }
871 }
872
873 /* And for the sink, these samples have been played back already, so we can
874 * just drop them and get on with it. */
875 if (PA_UNLIKELY(u->sink_skip)) {
876 to_skip = plen >= u->sink_skip ? u->sink_skip : plen;
877
878 pa_memblockq_drop(u->sink_memblockq, to_skip);
879
880 plen -= to_skip;
881 u->sink_skip -= to_skip;
882 }
883
884 /* process and push out samples */
885 if (u->ec->params.drift_compensation)
886 do_push_drift_comp(u);
887 else
888 do_push(u);
889 }
890
891 /* Called from I/O thread context */
892 static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) {
893 struct userdata *u;
894
895 pa_sink_input_assert_ref(i);
896 pa_assert(chunk);
897 pa_assert_se(u = i->userdata);
898
899 if (u->sink->thread_info.rewind_requested)
900 pa_sink_process_rewind(u->sink, 0);
901
902 pa_sink_render_full(u->sink, nbytes, chunk);
903
904 if (i->thread_info.underrun_for > 0) {
905 pa_log_debug("Handling end of underrun.");
906 pa_atomic_store(&u->request_resync, 1);
907 }
908
909 /* let source thread handle the chunk. pass the sample count as well so that
910 * the source IO thread can update the right variables. */
911 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_POST,
912 NULL, 0, chunk, NULL);
913 u->send_counter += chunk->length;
914
915 return 0;
916 }
917
918 /* Called from input thread context */
919 static void source_output_process_rewind_cb(pa_source_output *o, size_t nbytes) {
920 struct userdata *u;
921
922 pa_source_output_assert_ref(o);
923 pa_source_output_assert_io_context(o);
924 pa_assert_se(u = o->userdata);
925
926 pa_source_process_rewind(u->source, nbytes);
927
928 /* go back on read side, we need to use older sink data for this */
929 pa_memblockq_rewind(u->sink_memblockq, nbytes);
930
931 /* manipulate write index */
932 pa_memblockq_seek(u->source_memblockq, -nbytes, PA_SEEK_RELATIVE, TRUE);
933
934 pa_log_debug("Source rewind (%lld) %lld", (long long) nbytes,
935 (long long) pa_memblockq_get_length (u->source_memblockq));
936 }
937
938 /* Called from I/O thread context */
939 static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
940 struct userdata *u;
941
942 pa_sink_input_assert_ref(i);
943 pa_assert_se(u = i->userdata);
944
945 pa_log_debug("Sink process rewind %lld", (long long) nbytes);
946
947 pa_sink_process_rewind(u->sink, nbytes);
948
949 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_REWIND, NULL, (int64_t) nbytes, NULL, NULL);
950 u->send_counter -= nbytes;
951 }
952
953 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot) {
954 size_t delay, rlen, plen;
955 pa_usec_t now, latency;
956
957 now = pa_rtclock_now();
958 latency = pa_source_get_latency_within_thread(u->source_output->source);
959 delay = pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq);
960
961 delay = (u->source_output->thread_info.resampler ? pa_resampler_request(u->source_output->thread_info.resampler, delay) : delay);
962 rlen = pa_memblockq_get_length(u->source_memblockq);
963 plen = pa_memblockq_get_length(u->sink_memblockq);
964
965 snapshot->source_now = now;
966 snapshot->source_latency = latency;
967 snapshot->source_delay = delay;
968 snapshot->recv_counter = u->recv_counter;
969 snapshot->rlen = rlen + u->sink_skip;
970 snapshot->plen = plen + u->source_skip;
971 }
972
973
974 /* Called from output thread context */
975 static int source_output_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
976 struct userdata *u = PA_SOURCE_OUTPUT(obj)->userdata;
977
978 switch (code) {
979
980 case SOURCE_OUTPUT_MESSAGE_POST:
981
982 pa_source_output_assert_io_context(u->source_output);
983
984 if (u->source_output->source->thread_info.state == PA_SOURCE_RUNNING)
985 pa_memblockq_push_align(u->sink_memblockq, chunk);
986 else
987 pa_memblockq_flush_write(u->sink_memblockq, TRUE);
988
989 u->recv_counter += (int64_t) chunk->length;
990
991 return 0;
992
993 case SOURCE_OUTPUT_MESSAGE_REWIND:
994 pa_source_output_assert_io_context(u->source_output);
995
996 /* manipulate write index, never go past what we have */
997 if (PA_SOURCE_IS_OPENED(u->source_output->source->thread_info.state))
998 pa_memblockq_seek(u->sink_memblockq, -offset, PA_SEEK_RELATIVE, TRUE);
999 else
1000 pa_memblockq_flush_write(u->sink_memblockq, TRUE);
1001
1002 pa_log_debug("Sink rewind (%lld)", (long long) offset);
1003
1004 u->recv_counter -= offset;
1005
1006 return 0;
1007
1008 case SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT: {
1009 struct snapshot *snapshot = (struct snapshot *) data;
1010
1011 source_output_snapshot_within_thread(u, snapshot);
1012 return 0;
1013 }
1014
1015 case SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME:
1016 apply_diff_time(u, offset);
1017 return 0;
1018
1019 }
1020
1021 return pa_source_output_process_msg(obj, code, data, offset, chunk);
1022 }
1023
1024 static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1025 struct userdata *u = PA_SINK_INPUT(obj)->userdata;
1026
1027 switch (code) {
1028
1029 case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: {
1030 size_t delay;
1031 pa_usec_t now, latency;
1032 struct snapshot *snapshot = (struct snapshot *) data;
1033
1034 pa_sink_input_assert_io_context(u->sink_input);
1035
1036 now = pa_rtclock_now();
1037 latency = pa_sink_get_latency_within_thread(u->sink_input->sink);
1038 delay = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq);
1039
1040 delay = (u->sink_input->thread_info.resampler ? pa_resampler_request(u->sink_input->thread_info.resampler, delay) : delay);
1041
1042 snapshot->sink_now = now;
1043 snapshot->sink_latency = latency;
1044 snapshot->sink_delay = delay;
1045 snapshot->send_counter = u->send_counter;
1046 return 0;
1047 }
1048 }
1049
1050 return pa_sink_input_process_msg(obj, code, data, offset, chunk);
1051 }
1052
1053 /* Called from I/O thread context */
1054 static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) {
1055 struct userdata *u;
1056
1057 pa_sink_input_assert_ref(i);
1058 pa_assert_se(u = i->userdata);
1059
1060 pa_log_debug("Sink input update max rewind %lld", (long long) nbytes);
1061
1062 pa_memblockq_set_maxrewind(u->sink_memblockq, nbytes);
1063 pa_sink_set_max_rewind_within_thread(u->sink, nbytes);
1064 }
1065
1066 /* Called from I/O thread context */
1067 static void source_output_update_max_rewind_cb(pa_source_output *o, size_t nbytes) {
1068 struct userdata *u;
1069
1070 pa_source_output_assert_ref(o);
1071 pa_assert_se(u = o->userdata);
1072
1073 pa_log_debug("Source output update max rewind %lld", (long long) nbytes);
1074
1075 pa_source_set_max_rewind_within_thread(u->source, nbytes);
1076 }
1077
1078 /* Called from I/O thread context */
1079 static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) {
1080 struct userdata *u;
1081
1082 pa_sink_input_assert_ref(i);
1083 pa_assert_se(u = i->userdata);
1084
1085 pa_log_debug("Sink input update max request %lld", (long long) nbytes);
1086
1087 pa_sink_set_max_request_within_thread(u->sink, nbytes);
1088 }
1089
1090 /* Called from I/O thread context */
1091 static void sink_input_update_sink_requested_latency_cb(pa_sink_input *i) {
1092 struct userdata *u;
1093 pa_usec_t latency;
1094
1095 pa_sink_input_assert_ref(i);
1096 pa_assert_se(u = i->userdata);
1097
1098 latency = pa_sink_get_requested_latency_within_thread(i->sink);
1099
1100 pa_log_debug("Sink input update requested latency %lld", (long long) latency);
1101 }
1102
1103 /* Called from I/O thread context */
1104 static void source_output_update_source_requested_latency_cb(pa_source_output *o) {
1105 struct userdata *u;
1106 pa_usec_t latency;
1107
1108 pa_source_output_assert_ref(o);
1109 pa_assert_se(u = o->userdata);
1110
1111 latency = pa_source_get_requested_latency_within_thread(o->source);
1112
1113 pa_log_debug("source output update requested latency %lld", (long long) latency);
1114 }
1115
1116 /* Called from I/O thread context */
1117 static void sink_input_update_sink_latency_range_cb(pa_sink_input *i) {
1118 struct userdata *u;
1119
1120 pa_sink_input_assert_ref(i);
1121 pa_assert_se(u = i->userdata);
1122
1123 pa_log_debug("Sink input update latency range %lld %lld",
1124 (long long) i->sink->thread_info.min_latency,
1125 (long long) i->sink->thread_info.max_latency);
1126
1127 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1128 }
1129
1130 /* Called from I/O thread context */
1131 static void source_output_update_source_latency_range_cb(pa_source_output *o) {
1132 struct userdata *u;
1133
1134 pa_source_output_assert_ref(o);
1135 pa_assert_se(u = o->userdata);
1136
1137 pa_log_debug("Source output update latency range %lld %lld",
1138 (long long) o->source->thread_info.min_latency,
1139 (long long) o->source->thread_info.max_latency);
1140
1141 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1142 }
1143
1144 /* Called from I/O thread context */
1145 static void sink_input_update_sink_fixed_latency_cb(pa_sink_input *i) {
1146 struct userdata *u;
1147
1148 pa_sink_input_assert_ref(i);
1149 pa_assert_se(u = i->userdata);
1150
1151 pa_log_debug("Sink input update fixed latency %lld",
1152 (long long) i->sink->thread_info.fixed_latency);
1153
1154 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1155 }
1156
1157 /* Called from I/O thread context */
1158 static void source_output_update_source_fixed_latency_cb(pa_source_output *o) {
1159 struct userdata *u;
1160
1161 pa_source_output_assert_ref(o);
1162 pa_assert_se(u = o->userdata);
1163
1164 pa_log_debug("Source output update fixed latency %lld",
1165 (long long) o->source->thread_info.fixed_latency);
1166
1167 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1168 }
1169
1170 /* Called from output thread context */
1171 static void source_output_attach_cb(pa_source_output *o) {
1172 struct userdata *u;
1173
1174 pa_source_output_assert_ref(o);
1175 pa_source_output_assert_io_context(o);
1176 pa_assert_se(u = o->userdata);
1177
1178 pa_source_set_rtpoll(u->source, o->source->thread_info.rtpoll);
1179 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1180 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1181 pa_source_set_max_rewind_within_thread(u->source, pa_source_output_get_max_rewind(o));
1182
1183 pa_log_debug("Source output %d attach", o->index);
1184
1185 pa_source_attach_within_thread(u->source);
1186
1187 u->rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
1188 o->source->thread_info.rtpoll,
1189 PA_RTPOLL_LATE,
1190 u->asyncmsgq);
1191 }
1192
1193 /* Called from I/O thread context */
1194 static void sink_input_attach_cb(pa_sink_input *i) {
1195 struct userdata *u;
1196
1197 pa_sink_input_assert_ref(i);
1198 pa_assert_se(u = i->userdata);
1199
1200 pa_sink_set_rtpoll(u->sink, i->sink->thread_info.rtpoll);
1201 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1202
1203 /* (8.1) IF YOU NEED A FIXED BLOCK SIZE ADD THE LATENCY FOR ONE
1204 * BLOCK MINUS ONE SAMPLE HERE. SEE (7) */
1205 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1206
1207 /* (8.2) IF YOU NEED A FIXED BLOCK SIZE ROUND
1208 * pa_sink_input_get_max_request(i) UP TO MULTIPLES OF IT
1209 * HERE. SEE (6) */
1210 pa_sink_set_max_request_within_thread(u->sink, pa_sink_input_get_max_request(i));
1211 pa_sink_set_max_rewind_within_thread(u->sink, pa_sink_input_get_max_rewind(i));
1212
1213 pa_log_debug("Sink input %d attach", i->index);
1214
1215 u->rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
1216 i->sink->thread_info.rtpoll,
1217 PA_RTPOLL_LATE,
1218 u->asyncmsgq);
1219
1220 pa_sink_attach_within_thread(u->sink);
1221 }
1222
1223
1224 /* Called from output thread context */
1225 static void source_output_detach_cb(pa_source_output *o) {
1226 struct userdata *u;
1227
1228 pa_source_output_assert_ref(o);
1229 pa_source_output_assert_io_context(o);
1230 pa_assert_se(u = o->userdata);
1231
1232 pa_source_detach_within_thread(u->source);
1233 pa_source_set_rtpoll(u->source, NULL);
1234
1235 pa_log_debug("Source output %d detach", o->index);
1236
1237 if (u->rtpoll_item_read) {
1238 pa_rtpoll_item_free(u->rtpoll_item_read);
1239 u->rtpoll_item_read = NULL;
1240 }
1241 }
1242
1243 /* Called from I/O thread context */
1244 static void sink_input_detach_cb(pa_sink_input *i) {
1245 struct userdata *u;
1246
1247 pa_sink_input_assert_ref(i);
1248 pa_assert_se(u = i->userdata);
1249
1250 pa_sink_detach_within_thread(u->sink);
1251
1252 pa_sink_set_rtpoll(u->sink, NULL);
1253
1254 pa_log_debug("Sink input %d detach", i->index);
1255
1256 if (u->rtpoll_item_write) {
1257 pa_rtpoll_item_free(u->rtpoll_item_write);
1258 u->rtpoll_item_write = NULL;
1259 }
1260 }
1261
1262 /* Called from output thread context */
1263 static void source_output_state_change_cb(pa_source_output *o, pa_source_output_state_t state) {
1264 struct userdata *u;
1265
1266 pa_source_output_assert_ref(o);
1267 pa_source_output_assert_io_context(o);
1268 pa_assert_se(u = o->userdata);
1269
1270 pa_log_debug("Source output %d state %d", o->index, state);
1271 }
1272
1273 /* Called from IO thread context */
1274 static void sink_input_state_change_cb(pa_sink_input *i, pa_sink_input_state_t state) {
1275 struct userdata *u;
1276
1277 pa_sink_input_assert_ref(i);
1278 pa_assert_se(u = i->userdata);
1279
1280 pa_log_debug("Sink input %d state %d", i->index, state);
1281
1282 /* If we are added for the first time, ask for a rewinding so that
1283 * we are heard right-away. */
1284 if (PA_SINK_INPUT_IS_LINKED(state) &&
1285 i->thread_info.state == PA_SINK_INPUT_INIT) {
1286 pa_log_debug("Requesting rewind due to state change.");
1287 pa_sink_input_request_rewind(i, 0, FALSE, TRUE, TRUE);
1288 }
1289 }
1290
1291 /* Called from main thread */
1292 static void source_output_kill_cb(pa_source_output *o) {
1293 struct userdata *u;
1294
1295 pa_source_output_assert_ref(o);
1296 pa_assert_ctl_context();
1297 pa_assert_se(u = o->userdata);
1298
1299 u->dead = TRUE;
1300
1301 /* The order here matters! We first kill the source output, followed
1302 * by the source. That means the source callbacks must be protected
1303 * against an unconnected source output! */
1304 pa_source_output_unlink(u->source_output);
1305 pa_source_unlink(u->source);
1306
1307 pa_source_output_unref(u->source_output);
1308 u->source_output = NULL;
1309
1310 pa_source_unref(u->source);
1311 u->source = NULL;
1312
1313 pa_log_debug("Source output kill %d", o->index);
1314
1315 pa_module_unload_request(u->module, TRUE);
1316 }
1317
1318 /* Called from main context */
1319 static void sink_input_kill_cb(pa_sink_input *i) {
1320 struct userdata *u;
1321
1322 pa_sink_input_assert_ref(i);
1323 pa_assert_se(u = i->userdata);
1324
1325 u->dead = TRUE;
1326
1327 /* The order here matters! We first kill the sink input, followed
1328 * by the sink. That means the sink callbacks must be protected
1329 * against an unconnected sink input! */
1330 pa_sink_input_unlink(u->sink_input);
1331 pa_sink_unlink(u->sink);
1332
1333 pa_sink_input_unref(u->sink_input);
1334 u->sink_input = NULL;
1335
1336 pa_sink_unref(u->sink);
1337 u->sink = NULL;
1338
1339 pa_log_debug("Sink input kill %d", i->index);
1340
1341 pa_module_unload_request(u->module, TRUE);
1342 }
1343
1344 /* Called from main thread */
1345 static pa_bool_t source_output_may_move_to_cb(pa_source_output *o, pa_source *dest) {
1346 struct userdata *u;
1347
1348 pa_source_output_assert_ref(o);
1349 pa_assert_ctl_context();
1350 pa_assert_se(u = o->userdata);
1351
1352 if (u->dead)
1353 return FALSE;
1354
1355 return (u->source != dest) && (u->sink != dest->monitor_of);
1356 }
1357
1358 /* Called from main context */
1359 static pa_bool_t sink_input_may_move_to_cb(pa_sink_input *i, pa_sink *dest) {
1360 struct userdata *u;
1361
1362 pa_sink_input_assert_ref(i);
1363 pa_assert_se(u = i->userdata);
1364
1365 if (u->dead)
1366 return FALSE;
1367
1368 return u->sink != dest;
1369 }
1370
1371 /* Called from main thread */
1372 static void source_output_moving_cb(pa_source_output *o, pa_source *dest) {
1373 struct userdata *u;
1374
1375 pa_source_output_assert_ref(o);
1376 pa_assert_ctl_context();
1377 pa_assert_se(u = o->userdata);
1378
1379 if (dest) {
1380 pa_source_set_asyncmsgq(u->source, dest->asyncmsgq);
1381 pa_source_update_flags(u->source, PA_SOURCE_LATENCY|PA_SOURCE_DYNAMIC_LATENCY, dest->flags);
1382 } else
1383 pa_source_set_asyncmsgq(u->source, NULL);
1384
1385 if (u->source_auto_desc && dest) {
1386 const char *z;
1387 pa_proplist *pl;
1388
1389 pl = pa_proplist_new();
1390 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1391 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "Echo-Cancel Source %s on %s",
1392 pa_proplist_gets(u->source->proplist, "device.echo-cancel.name"), z ? z : dest->name);
1393
1394 pa_source_update_proplist(u->source, PA_UPDATE_REPLACE, pl);
1395 pa_proplist_free(pl);
1396 }
1397 }
1398
1399 /* Called from main context */
1400 static void sink_input_moving_cb(pa_sink_input *i, pa_sink *dest) {
1401 struct userdata *u;
1402
1403 pa_sink_input_assert_ref(i);
1404 pa_assert_se(u = i->userdata);
1405
1406 if (dest) {
1407 pa_sink_set_asyncmsgq(u->sink, dest->asyncmsgq);
1408 pa_sink_update_flags(u->sink, PA_SINK_LATENCY|PA_SINK_DYNAMIC_LATENCY, dest->flags);
1409 } else
1410 pa_sink_set_asyncmsgq(u->sink, NULL);
1411
1412 if (u->sink_auto_desc && dest) {
1413 const char *z;
1414 pa_proplist *pl;
1415
1416 pl = pa_proplist_new();
1417 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1418 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "Echo-Cancel Sink %s on %s",
1419 pa_proplist_gets(u->sink->proplist, "device.echo-cancel.name"), z ? z : dest->name);
1420
1421 pa_sink_update_proplist(u->sink, PA_UPDATE_REPLACE, pl);
1422 pa_proplist_free(pl);
1423 }
1424 }
1425
1426 /* Called from main context */
1427 static void sink_input_volume_changed_cb(pa_sink_input *i) {
1428 struct userdata *u;
1429
1430 pa_sink_input_assert_ref(i);
1431 pa_assert_se(u = i->userdata);
1432
1433 pa_sink_volume_changed(u->sink, &i->volume);
1434 }
1435
1436 /* Called from main context */
1437 static void sink_input_mute_changed_cb(pa_sink_input *i) {
1438 struct userdata *u;
1439
1440 pa_sink_input_assert_ref(i);
1441 pa_assert_se(u = i->userdata);
1442
1443 pa_sink_mute_changed(u->sink, i->muted);
1444 }
1445
1446 static pa_echo_canceller_method_t get_ec_method_from_string(const char *method) {
1447 if (pa_streq(method, "speex"))
1448 return PA_ECHO_CANCELLER_SPEEX;
1449 else if (pa_streq(method, "adrian"))
1450 return PA_ECHO_CANCELLER_ADRIAN;
1451 #ifdef HAVE_WEBRTC
1452 else if (pa_streq(method, "webrtc"))
1453 return PA_ECHO_CANCELLER_WEBRTC;
1454 #endif
1455 else
1456 return PA_ECHO_CANCELLER_INVALID;
1457 }
1458
1459 /* Common initialisation bits between module-echo-cancel and the standalone test program */
1460 static int init_common(pa_modargs *ma, struct userdata *u, pa_sample_spec *source_ss, pa_channel_map *source_map) {
1461 pa_echo_canceller_method_t ec_method;
1462
1463 if (pa_modargs_get_sample_spec_and_channel_map(ma, source_ss, source_map, PA_CHANNEL_MAP_DEFAULT) < 0) {
1464 pa_log("Invalid sample format specification or channel map");
1465 goto fail;
1466 }
1467
1468 u->ec = pa_xnew0(pa_echo_canceller, 1);
1469 if (!u->ec) {
1470 pa_log("Failed to alloc echo canceller");
1471 goto fail;
1472 }
1473
1474 if ((ec_method = get_ec_method_from_string(pa_modargs_get_value(ma, "aec_method", DEFAULT_ECHO_CANCELLER))) < 0) {
1475 pa_log("Invalid echo canceller implementation");
1476 goto fail;
1477 }
1478
1479 u->ec->init = ec_table[ec_method].init;
1480 u->ec->play = ec_table[ec_method].play;
1481 u->ec->record = ec_table[ec_method].record;
1482 u->ec->set_drift = ec_table[ec_method].set_drift;
1483 u->ec->run = ec_table[ec_method].run;
1484 u->ec->done = ec_table[ec_method].done;
1485
1486 return 0;
1487
1488 fail:
1489 return -1;
1490 }
1491
1492
1493 int pa__init(pa_module*m) {
1494 struct userdata *u;
1495 pa_sample_spec source_ss, sink_ss;
1496 pa_channel_map source_map, sink_map;
1497 pa_modargs *ma;
1498 pa_source *source_master=NULL;
1499 pa_sink *sink_master=NULL;
1500 pa_source_output_new_data source_output_data;
1501 pa_sink_input_new_data sink_input_data;
1502 pa_source_new_data source_data;
1503 pa_sink_new_data sink_data;
1504 pa_memchunk silence;
1505 uint32_t temp;
1506 pa_bool_t use_volume_sharing = TRUE;
1507
1508 pa_assert(m);
1509
1510 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1511 pa_log("Failed to parse module arguments.");
1512 goto fail;
1513 }
1514
1515 if (!(source_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "source_master", NULL), PA_NAMEREG_SOURCE))) {
1516 pa_log("Master source not found");
1517 goto fail;
1518 }
1519 pa_assert(source_master);
1520
1521 if (!(sink_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "sink_master", NULL), PA_NAMEREG_SINK))) {
1522 pa_log("Master sink not found");
1523 goto fail;
1524 }
1525 pa_assert(sink_master);
1526
1527 if (source_master->monitor_of == sink_master) {
1528 pa_log("Can't cancel echo between a sink and its monitor");
1529 goto fail;
1530 }
1531
1532 source_ss = source_master->sample_spec;
1533 source_ss.rate = DEFAULT_RATE;
1534 source_ss.channels = DEFAULT_CHANNELS;
1535 pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
1536
1537 sink_ss = sink_master->sample_spec;
1538 sink_map = sink_master->channel_map;
1539
1540 if (pa_modargs_get_value_boolean(ma, "use_volume_sharing", &use_volume_sharing) < 0) {
1541 pa_log("use_volume_sharing= expects a boolean argument");
1542 goto fail;
1543 }
1544
1545 u = pa_xnew0(struct userdata, 1);
1546 if (!u) {
1547 pa_log("Failed to alloc userdata");
1548 goto fail;
1549 }
1550 u->core = m->core;
1551 u->module = m;
1552 m->userdata = u;
1553 u->dead = FALSE;
1554
1555 temp = DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC;
1556 if (pa_modargs_get_value_u32(ma, "adjust_time", &temp) < 0) {
1557 pa_log("Failed to parse adjust_time value");
1558 goto fail;
1559 }
1560
1561 if (temp != DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC)
1562 u->adjust_time = temp * PA_USEC_PER_SEC;
1563 else
1564 u->adjust_time = DEFAULT_ADJUST_TIME_USEC;
1565
1566 temp = DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC;
1567 if (pa_modargs_get_value_u32(ma, "adjust_threshold", &temp) < 0) {
1568 pa_log("Failed to parse adjust_threshold value");
1569 goto fail;
1570 }
1571
1572 if (temp != DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC)
1573 u->adjust_threshold = temp * PA_USEC_PER_MSEC;
1574 else
1575 u->adjust_threshold = DEFAULT_ADJUST_TOLERANCE;
1576
1577 u->save_aec = DEFAULT_SAVE_AEC;
1578 if (pa_modargs_get_value_boolean(ma, "save_aec", &u->save_aec) < 0) {
1579 pa_log("Failed to parse save_aec value");
1580 goto fail;
1581 }
1582
1583 u->autoloaded = DEFAULT_AUTOLOADED;
1584 if (pa_modargs_get_value_boolean(ma, "autoloaded", &u->autoloaded) < 0) {
1585 pa_log("Failed to parse autoloaded value");
1586 goto fail;
1587 }
1588
1589 if (init_common(ma, u, &source_ss, &source_map))
1590 goto fail;
1591
1592 u->asyncmsgq = pa_asyncmsgq_new(0);
1593 u->need_realign = TRUE;
1594
1595 if (u->ec->init) {
1596 if (!u->ec->init(u->core, u->ec, &source_ss, &source_map, &sink_ss, &sink_map, &u->blocksize, pa_modargs_get_value(ma, "aec_args", NULL))) {
1597 pa_log("Failed to init AEC engine");
1598 goto fail;
1599 }
1600 }
1601
1602 if (u->ec->params.drift_compensation)
1603 pa_assert(u->ec->set_drift);
1604
1605 /* Create source */
1606 pa_source_new_data_init(&source_data);
1607 source_data.driver = __FILE__;
1608 source_data.module = m;
1609 if (!(source_data.name = pa_xstrdup(pa_modargs_get_value(ma, "source_name", NULL))))
1610 source_data.name = pa_sprintf_malloc("%s.echo-cancel", source_master->name);
1611 pa_source_new_data_set_sample_spec(&source_data, &source_ss);
1612 pa_source_new_data_set_channel_map(&source_data, &source_map);
1613 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, source_master->name);
1614 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1615 if (!u->autoloaded)
1616 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1617 pa_proplist_sets(source_data.proplist, "device.echo-cancel.name", source_data.name);
1618
1619 if (pa_modargs_get_proplist(ma, "source_properties", source_data.proplist, PA_UPDATE_REPLACE) < 0) {
1620 pa_log("Invalid properties");
1621 pa_source_new_data_done(&source_data);
1622 goto fail;
1623 }
1624
1625 if ((u->source_auto_desc = !pa_proplist_contains(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1626 const char *z;
1627
1628 z = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1629 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Echo-Cancel Source %s on %s", source_data.name, z ? z : source_master->name);
1630 }
1631
1632 u->source = pa_source_new(m->core, &source_data, (source_master->flags & (PA_SOURCE_LATENCY | PA_SOURCE_DYNAMIC_LATENCY))
1633 | (use_volume_sharing ? PA_SOURCE_SHARE_VOLUME_WITH_MASTER : 0));
1634 pa_source_new_data_done(&source_data);
1635
1636 if (!u->source) {
1637 pa_log("Failed to create source.");
1638 goto fail;
1639 }
1640
1641 u->source->parent.process_msg = source_process_msg_cb;
1642 u->source->set_state = source_set_state_cb;
1643 u->source->update_requested_latency = source_update_requested_latency_cb;
1644 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1645 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1646 if (!use_volume_sharing) {
1647 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1648 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1649 pa_source_enable_decibel_volume(u->source, TRUE);
1650 }
1651 u->source->userdata = u;
1652
1653 pa_source_set_asyncmsgq(u->source, source_master->asyncmsgq);
1654
1655 /* Create sink */
1656 pa_sink_new_data_init(&sink_data);
1657 sink_data.driver = __FILE__;
1658 sink_data.module = m;
1659 if (!(sink_data.name = pa_xstrdup(pa_modargs_get_value(ma, "sink_name", NULL))))
1660 sink_data.name = pa_sprintf_malloc("%s.echo-cancel", sink_master->name);
1661 pa_sink_new_data_set_sample_spec(&sink_data, &sink_ss);
1662 pa_sink_new_data_set_channel_map(&sink_data, &sink_map);
1663 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, sink_master->name);
1664 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1665 if (!u->autoloaded)
1666 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1667 pa_proplist_sets(sink_data.proplist, "device.echo-cancel.name", sink_data.name);
1668
1669 if (pa_modargs_get_proplist(ma, "sink_properties", sink_data.proplist, PA_UPDATE_REPLACE) < 0) {
1670 pa_log("Invalid properties");
1671 pa_sink_new_data_done(&sink_data);
1672 goto fail;
1673 }
1674
1675 if ((u->sink_auto_desc = !pa_proplist_contains(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1676 const char *z;
1677
1678 z = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1679 pa_proplist_setf(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Echo-Cancel Sink %s on %s", sink_data.name, z ? z : sink_master->name);
1680 }
1681
1682 u->sink = pa_sink_new(m->core, &sink_data, (sink_master->flags & (PA_SINK_LATENCY | PA_SINK_DYNAMIC_LATENCY))
1683 | (use_volume_sharing ? PA_SINK_SHARE_VOLUME_WITH_MASTER : 0));
1684 pa_sink_new_data_done(&sink_data);
1685
1686 if (!u->sink) {
1687 pa_log("Failed to create sink.");
1688 goto fail;
1689 }
1690
1691 u->sink->parent.process_msg = sink_process_msg_cb;
1692 u->sink->set_state = sink_set_state_cb;
1693 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1694 u->sink->request_rewind = sink_request_rewind_cb;
1695 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1696 if (!use_volume_sharing) {
1697 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1698 pa_sink_enable_decibel_volume(u->sink, TRUE);
1699 }
1700 u->sink->userdata = u;
1701
1702 pa_sink_set_asyncmsgq(u->sink, sink_master->asyncmsgq);
1703
1704 /* Create source output */
1705 pa_source_output_new_data_init(&source_output_data);
1706 source_output_data.driver = __FILE__;
1707 source_output_data.module = m;
1708 pa_source_output_new_data_set_source(&source_output_data, source_master, FALSE);
1709 source_output_data.destination_source = u->source;
1710 /* FIXME
1711 source_output_data.flags = PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND; */
1712
1713 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Source Stream");
1714 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1715 pa_source_output_new_data_set_sample_spec(&source_output_data, &source_ss);
1716 pa_source_output_new_data_set_channel_map(&source_output_data, &source_map);
1717
1718 pa_source_output_new(&u->source_output, m->core, &source_output_data);
1719 pa_source_output_new_data_done(&source_output_data);
1720
1721 if (!u->source_output)
1722 goto fail;
1723
1724 u->source_output->parent.process_msg = source_output_process_msg_cb;
1725 u->source_output->push = source_output_push_cb;
1726 u->source_output->process_rewind = source_output_process_rewind_cb;
1727 u->source_output->update_max_rewind = source_output_update_max_rewind_cb;
1728 u->source_output->update_source_requested_latency = source_output_update_source_requested_latency_cb;
1729 u->source_output->update_source_latency_range = source_output_update_source_latency_range_cb;
1730 u->source_output->update_source_fixed_latency = source_output_update_source_fixed_latency_cb;
1731 u->source_output->kill = source_output_kill_cb;
1732 u->source_output->attach = source_output_attach_cb;
1733 u->source_output->detach = source_output_detach_cb;
1734 u->source_output->state_change = source_output_state_change_cb;
1735 u->source_output->may_move_to = source_output_may_move_to_cb;
1736 u->source_output->moving = source_output_moving_cb;
1737 u->source_output->userdata = u;
1738
1739 u->source->output_from_master = u->source_output;
1740
1741 /* Create sink input */
1742 pa_sink_input_new_data_init(&sink_input_data);
1743 sink_input_data.driver = __FILE__;
1744 sink_input_data.module = m;
1745 pa_sink_input_new_data_set_sink(&sink_input_data, sink_master, FALSE);
1746 sink_input_data.origin_sink = u->sink;
1747 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Sink Stream");
1748 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1749 pa_sink_input_new_data_set_sample_spec(&sink_input_data, &sink_ss);
1750 pa_sink_input_new_data_set_channel_map(&sink_input_data, &sink_map);
1751 sink_input_data.flags = PA_SINK_INPUT_VARIABLE_RATE;
1752
1753 pa_sink_input_new(&u->sink_input, m->core, &sink_input_data);
1754 pa_sink_input_new_data_done(&sink_input_data);
1755
1756 if (!u->sink_input)
1757 goto fail;
1758
1759 u->sink_input->parent.process_msg = sink_input_process_msg_cb;
1760 u->sink_input->pop = sink_input_pop_cb;
1761 u->sink_input->process_rewind = sink_input_process_rewind_cb;
1762 u->sink_input->update_max_rewind = sink_input_update_max_rewind_cb;
1763 u->sink_input->update_max_request = sink_input_update_max_request_cb;
1764 u->sink_input->update_sink_requested_latency = sink_input_update_sink_requested_latency_cb;
1765 u->sink_input->update_sink_latency_range = sink_input_update_sink_latency_range_cb;
1766 u->sink_input->update_sink_fixed_latency = sink_input_update_sink_fixed_latency_cb;
1767 u->sink_input->kill = sink_input_kill_cb;
1768 u->sink_input->attach = sink_input_attach_cb;
1769 u->sink_input->detach = sink_input_detach_cb;
1770 u->sink_input->state_change = sink_input_state_change_cb;
1771 u->sink_input->may_move_to = sink_input_may_move_to_cb;
1772 u->sink_input->moving = sink_input_moving_cb;
1773 if (!use_volume_sharing)
1774 u->sink_input->volume_changed = sink_input_volume_changed_cb;
1775 u->sink_input->mute_changed = sink_input_mute_changed_cb;
1776 u->sink_input->userdata = u;
1777
1778 u->sink->input_to_master = u->sink_input;
1779
1780 pa_sink_input_get_silence(u->sink_input, &silence);
1781
1782 u->source_memblockq = pa_memblockq_new("module-echo-cancel source_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1783 &source_ss, 1, 1, 0, &silence);
1784 u->sink_memblockq = pa_memblockq_new("module-echo-cancel sink_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1785 &sink_ss, 1, 1, 0, &silence);
1786
1787 pa_memblock_unref(silence.memblock);
1788
1789 if (!u->source_memblockq || !u->sink_memblockq) {
1790 pa_log("Failed to create memblockq.");
1791 goto fail;
1792 }
1793
1794 if (u->adjust_time > 0 && !u->ec->params.drift_compensation)
1795 u->time_event = pa_core_rttime_new(m->core, pa_rtclock_now() + u->adjust_time, time_callback, u);
1796 else if (u->ec->params.drift_compensation) {
1797 pa_log_info("Canceller does drift compensation -- built-in compensation will be disabled");
1798 u->adjust_time = 0;
1799 /* Perform resync just once to give the canceller a leg up */
1800 pa_atomic_store(&u->request_resync, 1);
1801 }
1802
1803 if (u->save_aec) {
1804 pa_log("Creating AEC files in /tmp");
1805 u->captured_file = fopen("/tmp/aec_rec.sw", "wb");
1806 if (u->captured_file == NULL)
1807 perror ("fopen failed");
1808 u->played_file = fopen("/tmp/aec_play.sw", "wb");
1809 if (u->played_file == NULL)
1810 perror ("fopen failed");
1811 u->canceled_file = fopen("/tmp/aec_out.sw", "wb");
1812 if (u->canceled_file == NULL)
1813 perror ("fopen failed");
1814 }
1815
1816 pa_sink_put(u->sink);
1817 pa_source_put(u->source);
1818
1819 pa_sink_input_put(u->sink_input);
1820 pa_source_output_put(u->source_output);
1821
1822 pa_modargs_free(ma);
1823
1824 return 0;
1825
1826 fail:
1827 if (ma)
1828 pa_modargs_free(ma);
1829
1830 pa__done(m);
1831
1832 return -1;
1833 }
1834
1835 int pa__get_n_used(pa_module *m) {
1836 struct userdata *u;
1837
1838 pa_assert(m);
1839 pa_assert_se(u = m->userdata);
1840
1841 return pa_sink_linked_by(u->sink) + pa_source_linked_by(u->source);
1842 }
1843
1844 void pa__done(pa_module*m) {
1845 struct userdata *u;
1846
1847 pa_assert(m);
1848
1849 if (!(u = m->userdata))
1850 return;
1851
1852 u->dead = TRUE;
1853
1854 /* See comments in source_output_kill_cb() above regarding
1855 * destruction order! */
1856
1857 if (u->time_event)
1858 u->core->mainloop->time_free(u->time_event);
1859
1860 if (u->source_output)
1861 pa_source_output_unlink(u->source_output);
1862 if (u->sink_input)
1863 pa_sink_input_unlink(u->sink_input);
1864
1865 if (u->source)
1866 pa_source_unlink(u->source);
1867 if (u->sink)
1868 pa_sink_unlink(u->sink);
1869
1870 if (u->source_output)
1871 pa_source_output_unref(u->source_output);
1872 if (u->sink_input)
1873 pa_sink_input_unref(u->sink_input);
1874
1875 if (u->source)
1876 pa_source_unref(u->source);
1877 if (u->sink)
1878 pa_sink_unref(u->sink);
1879
1880 if (u->source_memblockq)
1881 pa_memblockq_free(u->source_memblockq);
1882 if (u->sink_memblockq)
1883 pa_memblockq_free(u->sink_memblockq);
1884
1885 if (u->ec) {
1886 if (u->ec->done)
1887 u->ec->done(u->ec);
1888
1889 pa_xfree(u->ec);
1890 }
1891
1892 if (u->asyncmsgq)
1893 pa_asyncmsgq_unref(u->asyncmsgq);
1894
1895 if (u->save_aec) {
1896 if (u->played_file)
1897 fclose(u->played_file);
1898 if (u->captured_file)
1899 fclose(u->captured_file);
1900 if (u->canceled_file)
1901 fclose(u->canceled_file);
1902 }
1903
1904 pa_xfree(u);
1905 }
1906
1907 #ifdef ECHO_CANCEL_TEST
1908 /*
1909 * Stand-alone test program for running in the canceller on pre-recorded files.
1910 */
1911 int main(int argc, char* argv[]) {
1912 struct userdata u;
1913 pa_sample_spec source_ss, sink_ss;
1914 pa_channel_map source_map, sink_map;
1915 pa_modargs *ma = NULL;
1916 uint8_t *rdata = NULL, *pdata = NULL, *cdata = NULL;
1917 int ret = 0, unused;
1918
1919 pa_memzero(&u, sizeof(u));
1920
1921 if (argc < 4 || argc > 6) {
1922 goto usage;
1923 }
1924
1925 u.ec = pa_xnew0(pa_echo_canceller, 1);
1926 if (!u.ec) {
1927 pa_log("Failed to alloc echo canceller");
1928 goto fail;
1929 }
1930
1931 u.captured_file = fopen(argv[2], "r");
1932 if (u.captured_file == NULL) {
1933 perror ("fopen failed");
1934 goto fail;
1935 }
1936 u.played_file = fopen(argv[1], "r");
1937 if (u.played_file == NULL) {
1938 perror ("fopen failed");
1939 goto fail;
1940 }
1941 u.canceled_file = fopen(argv[3], "wb");
1942 if (u.canceled_file == NULL) {
1943 perror ("fopen failed");
1944 goto fail;
1945 }
1946
1947 u.core = pa_xnew0(pa_core, 1);
1948 u.core->cpu_info.cpu_type = PA_CPU_X86;
1949 u.core->cpu_info.flags.x86 |= PA_CPU_X86_SSE;
1950
1951 if (!(ma = pa_modargs_new(argc > 4 ? argv[4] : NULL, valid_modargs))) {
1952 pa_log("Failed to parse module arguments.");
1953 goto fail;
1954 }
1955
1956 source_ss.format = PA_SAMPLE_S16LE;
1957 source_ss.rate = DEFAULT_RATE;
1958 source_ss.channels = DEFAULT_CHANNELS;
1959 pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
1960
1961 init_common(ma, &u, &source_ss, &source_map);
1962
1963 if (!u.ec->init(u.core, u.ec, &source_ss, &source_map, &sink_ss, &sink_map, &u.blocksize,
1964 (argc > 4) ? argv[5] : NULL )) {
1965 pa_log("Failed to init AEC engine");
1966 goto fail;
1967 }
1968
1969 rdata = pa_xmalloc(u.blocksize);
1970 pdata = pa_xmalloc(u.blocksize);
1971 cdata = pa_xmalloc(u.blocksize);
1972
1973 while (fread(rdata, u.blocksize, 1, u.captured_file) > 0) {
1974 if (fread(pdata, u.blocksize, 1, u.played_file) == 0) {
1975 perror("played file ended before captured file");
1976 break;
1977 }
1978
1979 u.ec->run(u.ec, rdata, pdata, cdata);
1980
1981 unused = fwrite(cdata, u.blocksize, 1, u.canceled_file);
1982 }
1983
1984 u.ec->done(u.ec);
1985
1986 fclose(u.captured_file);
1987 fclose(u.played_file);
1988 fclose(u.canceled_file);
1989
1990 out:
1991 pa_xfree(rdata);
1992 pa_xfree(pdata);
1993 pa_xfree(cdata);
1994
1995 pa_xfree(u.ec);
1996 pa_xfree(u.core);
1997
1998 if (ma)
1999 pa_modargs_free(ma);
2000
2001 return ret;
2002
2003 usage:
2004 pa_log("Usage: %s play_file rec_file out_file [module args] [aec_args]",argv[0]);
2005
2006 fail:
2007 ret = -1;
2008 goto out;
2009 }
2010 #endif /* ECHO_CANCEL_TEST */