From: Arun Raghavan <git@xxxxxxxxxxxxxxxx> Makes this part of the code just a little less verbose. --- src/modules/echo-cancel/adrian.c | 18 +++++----- src/modules/echo-cancel/echo-cancel.h | 3 +- src/modules/echo-cancel/null.c | 4 +-- src/modules/echo-cancel/speex.c | 50 +++++++++++++------------- src/modules/echo-cancel/webrtc.cc | 66 +++++++++++++++++------------------ 5 files changed, 71 insertions(+), 70 deletions(-) diff --git a/src/modules/echo-cancel/adrian.c b/src/modules/echo-cancel/adrian.c index 60a2b66..3c47fae 100644 --- a/src/modules/echo-cancel/adrian.c +++ b/src/modules/echo-cancel/adrian.c @@ -78,16 +78,16 @@ bool pa_adrian_ec_init(pa_core *c, pa_echo_canceller *ec, rate = out_ss->rate; *nframes = (rate * frame_size_ms) / 1000; - ec->params.priv.adrian.blocksize = (*nframes) * pa_frame_size(out_ss); + ec->params.adrian.blocksize = (*nframes) * pa_frame_size(out_ss); - pa_log_debug ("Using nframes %d, blocksize %u, channels %d, rate %d", *nframes, ec->params.priv.adrian.blocksize, out_ss->channels, out_ss->rate); + pa_log_debug ("Using nframes %d, blocksize %u, channels %d, rate %d", *nframes, ec->params.adrian.blocksize, out_ss->channels, out_ss->rate); /* For now we only support SSE */ if (c->cpu_info.cpu_type == PA_CPU_X86 && (c->cpu_info.flags.x86 & PA_CPU_X86_SSE)) have_vector = 1; - ec->params.priv.adrian.aec = AEC_init(rate, have_vector); - if (!ec->params.priv.adrian.aec) + ec->params.adrian.aec = AEC_init(rate, have_vector); + if (!ec->params.adrian.aec) goto fail; pa_modargs_free(ma); @@ -102,17 +102,17 @@ fail: void pa_adrian_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out) { unsigned int i; - for (i = 0; i < ec->params.priv.adrian.blocksize; i += 2) { + for (i = 0; i < ec->params.adrian.blocksize; i += 2) { /* We know it's S16NE mono data */ int r = *(int16_t *)(rec + i); int p = *(int16_t *)(play + i); - *(int16_t *)(out + i) = (int16_t) AEC_doAEC(ec->params.priv.adrian.aec, r, p); + *(int16_t *)(out + i) = (int16_t) AEC_doAEC(ec->params.adrian.aec, r, p); } } void pa_adrian_ec_done(pa_echo_canceller *ec) { - if (ec->params.priv.adrian.aec) { - AEC_done(ec->params.priv.adrian.aec); - ec->params.priv.adrian.aec = NULL; + if (ec->params.adrian.aec) { + AEC_done(ec->params.adrian.aec); + ec->params.adrian.aec = NULL; } } diff --git a/src/modules/echo-cancel/echo-cancel.h b/src/modules/echo-cancel/echo-cancel.h index cc554d5..a38b8f8 100644 --- a/src/modules/echo-cancel/echo-cancel.h +++ b/src/modules/echo-cancel/echo-cancel.h @@ -69,10 +69,11 @@ struct pa_echo_canceller_params { void *trace_callback; bool agc; bool first; + unsigned int agc_start_volume; } webrtc; #endif /* each canceller-specific structure goes here */ - } priv; + }; /* Set this if canceller can do drift compensation. Also see set_drift() * below */ diff --git a/src/modules/echo-cancel/null.c b/src/modules/echo-cancel/null.c index 673b14f..c8ecf27 100644 --- a/src/modules/echo-cancel/null.c +++ b/src/modules/echo-cancel/null.c @@ -34,7 +34,7 @@ bool pa_null_ec_init(pa_core *c, pa_echo_canceller *ec, char strss_sink[PA_SAMPLE_SPEC_SNPRINT_MAX]; *nframes = 256; - ec->params.priv.null.out_ss = *out_ss; + ec->params.null.out_ss = *out_ss; *rec_ss = *out_ss; *rec_map = *out_map; @@ -49,7 +49,7 @@ bool pa_null_ec_init(pa_core *c, pa_echo_canceller *ec, void pa_null_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out) { /* The null implementation simply copies the recorded buffer to the output buffer and ignores the play buffer. */ - memcpy(out, rec, 256 * pa_frame_size(&ec->params.priv.null.out_ss)); + memcpy(out, rec, 256 * pa_frame_size(&ec->params.null.out_ss)); } void pa_null_ec_done(pa_echo_canceller *ec) { diff --git a/src/modules/echo-cancel/speex.c b/src/modules/echo-cancel/speex.c index 11e53b3..08c1027 100644 --- a/src/modules/echo-cancel/speex.c +++ b/src/modules/echo-cancel/speex.c @@ -111,26 +111,26 @@ static bool pa_speex_ec_preprocessor_init(pa_echo_canceller *ec, pa_sample_spec goto fail; } - ec->params.priv.speex.pp_state = speex_preprocess_state_init(nframes, out_ss->rate); + ec->params.speex.pp_state = speex_preprocess_state_init(nframes, out_ss->rate); tmp = agc; - speex_preprocess_ctl(ec->params.priv.speex.pp_state, SPEEX_PREPROCESS_SET_AGC, &tmp); + speex_preprocess_ctl(ec->params.speex.pp_state, SPEEX_PREPROCESS_SET_AGC, &tmp); tmp = denoise; - speex_preprocess_ctl(ec->params.priv.speex.pp_state, SPEEX_PREPROCESS_SET_DENOISE, &tmp); + speex_preprocess_ctl(ec->params.speex.pp_state, SPEEX_PREPROCESS_SET_DENOISE, &tmp); if (echo_suppress) { if (echo_suppress_attenuation) - speex_preprocess_ctl(ec->params.priv.speex.pp_state, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS, + speex_preprocess_ctl(ec->params.speex.pp_state, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS, &echo_suppress_attenuation); if (echo_suppress_attenuation_active) { - speex_preprocess_ctl(ec->params.priv.speex.pp_state, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS_ACTIVE, + speex_preprocess_ctl(ec->params.speex.pp_state, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS_ACTIVE, &echo_suppress_attenuation_active); } - speex_preprocess_ctl(ec->params.priv.speex.pp_state, SPEEX_PREPROCESS_SET_ECHO_STATE, - ec->params.priv.speex.state); + speex_preprocess_ctl(ec->params.speex.pp_state, SPEEX_PREPROCESS_SET_ECHO_STATE, + ec->params.speex.state); } pa_log_info("Loaded speex preprocessor with params: agc=%s, denoise=%s, echo_suppress=%s", pa_yes_no(agc), @@ -176,12 +176,12 @@ bool pa_speex_ec_init(pa_core *c, pa_echo_canceller *ec, *nframes = pa_echo_canceller_blocksize_power2(rate, frame_size_ms); pa_log_debug ("Using nframes %d, channels %d, rate %d", *nframes, out_ss->channels, out_ss->rate); - ec->params.priv.speex.state = speex_echo_state_init_mc(*nframes, (rate * filter_size_ms) / 1000, out_ss->channels, out_ss->channels); + ec->params.speex.state = speex_echo_state_init_mc(*nframes, (rate * filter_size_ms) / 1000, out_ss->channels, out_ss->channels); - if (!ec->params.priv.speex.state) + if (!ec->params.speex.state) goto fail; - speex_echo_ctl(ec->params.priv.speex.state, SPEEX_ECHO_SET_SAMPLING_RATE, &rate); + speex_echo_ctl(ec->params.speex.state, SPEEX_ECHO_SET_SAMPLING_RATE, &rate); if (!pa_speex_ec_preprocessor_init(ec, out_ss, *nframes, ma)) goto fail; @@ -192,34 +192,34 @@ bool pa_speex_ec_init(pa_core *c, pa_echo_canceller *ec, fail: if (ma) pa_modargs_free(ma); - if (ec->params.priv.speex.pp_state) { - speex_preprocess_state_destroy(ec->params.priv.speex.pp_state); - ec->params.priv.speex.pp_state = NULL; + if (ec->params.speex.pp_state) { + speex_preprocess_state_destroy(ec->params.speex.pp_state); + ec->params.speex.pp_state = NULL; } - if (ec->params.priv.speex.state) { - speex_echo_state_destroy(ec->params.priv.speex.state); - ec->params.priv.speex.state = NULL; + if (ec->params.speex.state) { + speex_echo_state_destroy(ec->params.speex.state); + ec->params.speex.state = NULL; } return false; } void pa_speex_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out) { - speex_echo_cancellation(ec->params.priv.speex.state, (const spx_int16_t *) rec, (const spx_int16_t *) play, + speex_echo_cancellation(ec->params.speex.state, (const spx_int16_t *) rec, (const spx_int16_t *) play, (spx_int16_t *) out); /* preprecessor is run after AEC. This is not a mistake! */ - if (ec->params.priv.speex.pp_state) - speex_preprocess_run(ec->params.priv.speex.pp_state, (spx_int16_t *) out); + if (ec->params.speex.pp_state) + speex_preprocess_run(ec->params.speex.pp_state, (spx_int16_t *) out); } void pa_speex_ec_done(pa_echo_canceller *ec) { - if (ec->params.priv.speex.pp_state) { - speex_preprocess_state_destroy(ec->params.priv.speex.pp_state); - ec->params.priv.speex.pp_state = NULL; + if (ec->params.speex.pp_state) { + speex_preprocess_state_destroy(ec->params.speex.pp_state); + ec->params.speex.pp_state = NULL; } - if (ec->params.priv.speex.state) { - speex_echo_state_destroy(ec->params.priv.speex.state); - ec->params.priv.speex.state = NULL; + if (ec->params.speex.state) { + speex_echo_state_destroy(ec->params.speex.state); + ec->params.speex.state = NULL; } } diff --git a/src/modules/echo-cancel/webrtc.cc b/src/modules/echo-cancel/webrtc.cc index 768f631..6f15d00 100644 --- a/src/modules/echo-cancel/webrtc.cc +++ b/src/modules/echo-cancel/webrtc.cc @@ -259,8 +259,8 @@ bool pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec, if (trace) { webrtc::Trace::CreateTrace(); webrtc::Trace::set_level_filter(webrtc::kTraceAll); - ec->params.priv.webrtc.trace_callback = new PaWebrtcTraceCallback(); - webrtc::Trace::SetTraceCallback((PaWebrtcTraceCallback *) ec->params.priv.webrtc.trace_callback); + ec->params.webrtc.trace_callback = new PaWebrtcTraceCallback(); + webrtc::Trace::SetTraceCallback((PaWebrtcTraceCallback *) ec->params.webrtc.trace_callback); } pa_webrtc_ec_fixate_spec(rec_ss, rec_map, play_ss, play_map, out_ss, out_map); @@ -296,17 +296,17 @@ bool pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec, if (mobile && rm <= webrtc::EchoControlMobile::kEarpiece) { /* Maybe this should be a knob, but we've got a lot of knobs already */ apm->gain_control()->set_mode(webrtc::GainControl::kFixedDigital); - ec->params.priv.webrtc.agc = false; + ec->params.webrtc.agc = false; } else if (dgc) { apm->gain_control()->set_mode(webrtc::GainControl::kAdaptiveDigital); - ec->params.priv.webrtc.agc = false; + ec->params.webrtc.agc = false; } else { apm->gain_control()->set_mode(webrtc::GainControl::kAdaptiveAnalog); if (apm->gain_control()->set_analog_level_limits(0, WEBRTC_AGC_MAX_VOLUME) != apm->kNoError) { pa_log("Failed to initialise AGC"); goto fail; } - ec->params.priv.webrtc.agc = true; + ec->params.webrtc.agc = true; } apm->gain_control()->Enable(true); @@ -315,11 +315,11 @@ bool pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec, if (vad) apm->voice_detection()->Enable(true); - ec->params.priv.webrtc.apm = apm; - ec->params.priv.webrtc.sample_spec = *out_ss; - ec->params.priv.webrtc.blocksize = (uint64_t)pa_bytes_per_second(out_ss) * BLOCK_SIZE_US / PA_USEC_PER_SEC; - *nframes = ec->params.priv.webrtc.blocksize / pa_frame_size(out_ss); - ec->params.priv.webrtc.first = true; + ec->params.webrtc.apm = apm; + ec->params.webrtc.sample_spec = *out_ss; + ec->params.webrtc.blocksize = (uint64_t)pa_bytes_per_second(out_ss) * BLOCK_SIZE_US / PA_USEC_PER_SEC; + *nframes = ec->params.webrtc.blocksize / pa_frame_size(out_ss); + ec->params.webrtc.first = true; pa_modargs_free(ma); return true; @@ -327,9 +327,9 @@ bool pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec, fail: if (ma) pa_modargs_free(ma); - if (ec->params.priv.webrtc.trace_callback) { + if (ec->params.webrtc.trace_callback) { webrtc::Trace::ReturnTrace(); - delete ((PaWebrtcTraceCallback *) ec->params.priv.webrtc.trace_callback); + delete ((PaWebrtcTraceCallback *) ec->params.webrtc.trace_callback); } if (apm) delete apm; @@ -337,37 +337,37 @@ fail: } void pa_webrtc_ec_play(pa_echo_canceller *ec, const uint8_t *play) { - webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.priv.webrtc.apm; + webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.webrtc.apm; webrtc::AudioFrame play_frame; - const pa_sample_spec *ss = &ec->params.priv.webrtc.sample_spec; + const pa_sample_spec *ss = &ec->params.webrtc.sample_spec; play_frame.num_channels_ = ss->channels; play_frame.sample_rate_hz_ = ss->rate; play_frame.interleaved_ = true; - play_frame.samples_per_channel_ = ec->params.priv.webrtc.blocksize / pa_frame_size(ss); + play_frame.samples_per_channel_ = ec->params.webrtc.blocksize / pa_frame_size(ss); pa_assert(play_frame.samples_per_channel_ <= webrtc::AudioFrame::kMaxDataSizeSamples); - memcpy(play_frame.data_, play, ec->params.priv.webrtc.blocksize); + memcpy(play_frame.data_, play, ec->params.webrtc.blocksize); apm->ProcessReverseStream(&play_frame); } void pa_webrtc_ec_record(pa_echo_canceller *ec, const uint8_t *rec, uint8_t *out) { - webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.priv.webrtc.apm; + webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.webrtc.apm; webrtc::AudioFrame out_frame; - const pa_sample_spec *ss = &ec->params.priv.webrtc.sample_spec; + const pa_sample_spec *ss = &ec->params.webrtc.sample_spec; pa_cvolume v; int old_volume, new_volume; out_frame.num_channels_ = ss->channels; out_frame.sample_rate_hz_ = ss->rate; out_frame.interleaved_ = true; - out_frame.samples_per_channel_ = ec->params.priv.webrtc.blocksize / pa_frame_size(ss); + out_frame.samples_per_channel_ = ec->params.webrtc.blocksize / pa_frame_size(ss); pa_assert(out_frame.samples_per_channel_ <= webrtc::AudioFrame::kMaxDataSizeSamples); - memcpy(out_frame.data_, rec, ec->params.priv.webrtc.blocksize); + memcpy(out_frame.data_, rec, ec->params.webrtc.blocksize); - if (ec->params.priv.webrtc.agc) { + if (ec->params.webrtc.agc) { pa_cvolume_init(&v); pa_echo_canceller_get_capture_volume(ec, &v); old_volume = webrtc_volume_from_pa(pa_cvolume_avg(&v)); @@ -377,13 +377,13 @@ void pa_webrtc_ec_record(pa_echo_canceller *ec, const uint8_t *rec, uint8_t *out apm->set_stream_delay_ms(0); apm->ProcessStream(&out_frame); - if (ec->params.priv.webrtc.agc) { - if (PA_UNLIKELY(ec->params.priv.webrtc.first)) { + if (ec->params.webrtc.agc) { + if (PA_UNLIKELY(ec->params.webrtc.first)) { /* We start at a sane default volume (taken from the Chromium * condition on the experimental AGC in audio_processing.h). This is * needed to make sure that there's enough energy in the capture * signal for the AGC to work */ - ec->params.priv.webrtc.first = false; + ec->params.webrtc.first = false; new_volume = WEBRTC_AGC_START_VOLUME; } else { new_volume = apm->gain_control()->stream_analog_level(); @@ -395,14 +395,14 @@ void pa_webrtc_ec_record(pa_echo_canceller *ec, const uint8_t *rec, uint8_t *out } } - memcpy(out, out_frame.data_, ec->params.priv.webrtc.blocksize); + memcpy(out, out_frame.data_, ec->params.webrtc.blocksize); } void pa_webrtc_ec_set_drift(pa_echo_canceller *ec, float drift) { - webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.priv.webrtc.apm; - const pa_sample_spec *ss = &ec->params.priv.webrtc.sample_spec; + webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.webrtc.apm; + const pa_sample_spec *ss = &ec->params.webrtc.sample_spec; - apm->echo_cancellation()->set_stream_drift_samples(drift * ec->params.priv.webrtc.blocksize / pa_frame_size(ss)); + apm->echo_cancellation()->set_stream_drift_samples(drift * ec->params.webrtc.blocksize / pa_frame_size(ss)); } void pa_webrtc_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out) { @@ -411,13 +411,13 @@ void pa_webrtc_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t * } void pa_webrtc_ec_done(pa_echo_canceller *ec) { - if (ec->params.priv.webrtc.trace_callback) { + if (ec->params.webrtc.trace_callback) { webrtc::Trace::ReturnTrace(); - delete ((PaWebrtcTraceCallback *) ec->params.priv.webrtc.trace_callback); + delete ((PaWebrtcTraceCallback *) ec->params.webrtc.trace_callback); } - if (ec->params.priv.webrtc.apm) { - delete (webrtc::AudioProcessing*)ec->params.priv.webrtc.apm; - ec->params.priv.webrtc.apm = NULL; + if (ec->params.webrtc.apm) { + delete (webrtc::AudioProcessing*)ec->params.webrtc.apm; + ec->params.webrtc.apm = NULL; } } -- 2.5.0