Reduce complexity in the APM pipeline when the output is not used

This CL selectively turns off parts of the audio processing when
the output of APM is not used. The parts turned off are such that
don't need to continuously need to be trained, but rather can be
temporarily deactivated.

The purpose of this CL is to allow CPU to be reduced when the
client is muted.

The CL will be follow by additional CLs, adding similar functionality
in the echo canceller and the noiser suppressor

Bug: b/177830919
Change-Id: I72d24505197a53872562c0955f3e7b670c43df6b
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/209703
Commit-Queue: Per Åhgren <peah@webrtc.org>
Reviewed-by: Sam Zackrisson <saza@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#33431}
This commit is contained in:
Per Åhgren 2021-03-11 08:57:07 +00:00 committed by Commit Bot
parent 54dbc3be3f
commit aa6adffba3
2 changed files with 109 additions and 94 deletions

View file

@ -115,6 +115,10 @@ GainControl::Mode Agc1ConfigModeToInterfaceMode(
RTC_CHECK_NOTREACHED(); RTC_CHECK_NOTREACHED();
} }
bool MinimizeProcessingForUnusedOutput() {
return !field_trial::IsEnabled("WebRTC-MutedStateKillSwitch");
}
// Maximum lengths that frame of samples being passed from the render side to // Maximum lengths that frame of samples being passed from the render side to
// the capture side can have (does not apply to AEC3). // the capture side can have (does not apply to AEC3).
static const size_t kMaxAllowedValuesOfSamplesPerBand = 160; static const size_t kMaxAllowedValuesOfSamplesPerBand = 160;
@ -267,7 +271,9 @@ AudioProcessingImpl::AudioProcessingImpl(
"WebRTC-ApmExperimentalMultiChannelRenderKillSwitch"), "WebRTC-ApmExperimentalMultiChannelRenderKillSwitch"),
!field_trial::IsEnabled( !field_trial::IsEnabled(
"WebRTC-ApmExperimentalMultiChannelCaptureKillSwitch"), "WebRTC-ApmExperimentalMultiChannelCaptureKillSwitch"),
EnforceSplitBandHpf()), EnforceSplitBandHpf(),
MinimizeProcessingForUnusedOutput()),
capture_(),
capture_nonlocked_() { capture_nonlocked_() {
RTC_LOG(LS_INFO) << "Injected APM submodules:" RTC_LOG(LS_INFO) << "Injected APM submodules:"
"\nEcho control factory: " "\nEcho control factory: "
@ -667,7 +673,9 @@ void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
void AudioProcessingImpl::HandleCaptureOutputUsedSetting( void AudioProcessingImpl::HandleCaptureOutputUsedSetting(
bool capture_output_used) { bool capture_output_used) {
capture_.capture_output_used = capture_output_used; capture_.capture_output_used =
capture_output_used || !constants_.minimize_processing_for_unused_output;
if (submodules_.agc_manager.get()) { if (submodules_.agc_manager.get()) {
submodules_.agc_manager->HandleCaptureOutputUsedChange( submodules_.agc_manager->HandleCaptureOutputUsedChange(
capture_.capture_output_used); capture_.capture_output_used);
@ -874,11 +882,7 @@ void AudioProcessingImpl::HandleCaptureRuntimeSettings() {
void AudioProcessingImpl::HandleOverrunInCaptureRuntimeSettingsQueue() { void AudioProcessingImpl::HandleOverrunInCaptureRuntimeSettingsQueue() {
// Fall back to a safe state for the case when a setting for capture output // Fall back to a safe state for the case when a setting for capture output
// usage setting has been missed. // usage setting has been missed.
capture_.capture_output_used = true; HandleCaptureOutputUsedSetting(/*capture_output_used=*/true);
if (submodules_.echo_controller) {
submodules_.echo_controller->SetCaptureOutputUsage(
capture_.capture_output_used);
}
} }
void AudioProcessingImpl::HandleRenderRuntimeSettings() { void AudioProcessingImpl::HandleRenderRuntimeSettings() {
@ -1226,6 +1230,8 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() {
capture_buffer, /*stream_has_echo*/ false)); capture_buffer, /*stream_has_echo*/ false));
} }
capture_.stats.output_rms_dbfs = absl::nullopt;
if (capture_.capture_output_used) {
if (submodule_states_.CaptureMultiBandProcessingPresent() && if (submodule_states_.CaptureMultiBandProcessingPresent() &&
SampleRateSupportsMultiBand( SampleRateSupportsMultiBand(
capture_nonlocked_.capture_processing_format.sample_rate_hz())) { capture_nonlocked_.capture_processing_format.sample_rate_hz())) {
@ -1245,14 +1251,16 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() {
if (config_.residual_echo_detector.enabled) { if (config_.residual_echo_detector.enabled) {
RTC_DCHECK(submodules_.echo_detector); RTC_DCHECK(submodules_.echo_detector);
submodules_.echo_detector->AnalyzeCaptureAudio(rtc::ArrayView<const float>( submodules_.echo_detector->AnalyzeCaptureAudio(
capture_buffer->channels()[0], capture_buffer->num_frames())); rtc::ArrayView<const float>(capture_buffer->channels()[0],
capture_buffer->num_frames()));
} }
// TODO(aluebs): Investigate if the transient suppression placement should be // TODO(aluebs): Investigate if the transient suppression placement should
// before or after the AGC. // be before or after the AGC.
if (submodules_.transient_suppressor) { if (submodules_.transient_suppressor) {
float voice_probability = submodules_.agc_manager.get() float voice_probability =
submodules_.agc_manager.get()
? submodules_.agc_manager->voice_probability() ? submodules_.agc_manager->voice_probability()
: 1.f; : 1.f;
@ -1284,9 +1292,8 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() {
// The level estimator operates on the recombined data. // The level estimator operates on the recombined data.
if (config_.level_estimation.enabled) { if (config_.level_estimation.enabled) {
submodules_.output_level_estimator->ProcessStream(*capture_buffer); submodules_.output_level_estimator->ProcessStream(*capture_buffer);
capture_.stats.output_rms_dbfs = submodules_.output_level_estimator->RMS(); capture_.stats.output_rms_dbfs =
} else { submodules_.output_level_estimator->RMS();
capture_.stats.output_rms_dbfs = absl::nullopt;
} }
capture_output_rms_.Analyze(rtc::ArrayView<const float>( capture_output_rms_.Analyze(rtc::ArrayView<const float>(
@ -1294,8 +1301,9 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() {
capture_nonlocked_.capture_processing_format.num_frames())); capture_nonlocked_.capture_processing_format.num_frames()));
if (log_rms) { if (log_rms) {
RmsLevel::Levels levels = capture_output_rms_.AverageAndPeak(); RmsLevel::Levels levels = capture_output_rms_.AverageAndPeak();
RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.ApmCaptureOutputLevelAverageRms", RTC_HISTOGRAM_COUNTS_LINEAR(
levels.average, 1, RmsLevel::kMinLevelDb, 64); "WebRTC.Audio.ApmCaptureOutputLevelAverageRms", levels.average, 1,
RmsLevel::kMinLevelDb, 64);
RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.ApmCaptureOutputLevelPeakRms", RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.ApmCaptureOutputLevelPeakRms",
levels.peak, 1, RmsLevel::kMinLevelDb, 64); levels.peak, 1, RmsLevel::kMinLevelDb, 64);
} }
@ -1306,14 +1314,7 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() {
&level); &level);
} }
// Compute echo-related stats. // Compute echo-detector stats.
if (submodules_.echo_controller) {
auto ec_metrics = submodules_.echo_controller->GetMetrics();
capture_.stats.echo_return_loss = ec_metrics.echo_return_loss;
capture_.stats.echo_return_loss_enhancement =
ec_metrics.echo_return_loss_enhancement;
capture_.stats.delay_ms = ec_metrics.delay_ms;
}
if (config_.residual_echo_detector.enabled) { if (config_.residual_echo_detector.enabled) {
RTC_DCHECK(submodules_.echo_detector); RTC_DCHECK(submodules_.echo_detector);
auto ed_metrics = submodules_.echo_detector->GetMetrics(); auto ed_metrics = submodules_.echo_detector->GetMetrics();
@ -1321,6 +1322,16 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() {
capture_.stats.residual_echo_likelihood_recent_max = capture_.stats.residual_echo_likelihood_recent_max =
ed_metrics.echo_likelihood_recent_max; ed_metrics.echo_likelihood_recent_max;
} }
}
// Compute echo-controller stats.
if (submodules_.echo_controller) {
auto ec_metrics = submodules_.echo_controller->GetMetrics();
capture_.stats.echo_return_loss = ec_metrics.echo_return_loss;
capture_.stats.echo_return_loss_enhancement =
ec_metrics.echo_return_loss_enhancement;
capture_.stats.delay_ms = ec_metrics.delay_ms;
}
// Pass stats for reporting. // Pass stats for reporting.
stats_reporter_.UpdateStatistics(capture_.stats); stats_reporter_.UpdateStatistics(capture_.stats);

View file

@ -419,13 +419,17 @@ class AudioProcessingImpl : public AudioProcessing {
const struct ApmConstants { const struct ApmConstants {
ApmConstants(bool multi_channel_render_support, ApmConstants(bool multi_channel_render_support,
bool multi_channel_capture_support, bool multi_channel_capture_support,
bool enforce_split_band_hpf) bool enforce_split_band_hpf,
bool minimize_processing_for_unused_output)
: multi_channel_render_support(multi_channel_render_support), : multi_channel_render_support(multi_channel_render_support),
multi_channel_capture_support(multi_channel_capture_support), multi_channel_capture_support(multi_channel_capture_support),
enforce_split_band_hpf(enforce_split_band_hpf) {} enforce_split_band_hpf(enforce_split_band_hpf),
minimize_processing_for_unused_output(
minimize_processing_for_unused_output) {}
bool multi_channel_render_support; bool multi_channel_render_support;
bool multi_channel_capture_support; bool multi_channel_capture_support;
bool enforce_split_band_hpf; bool enforce_split_band_hpf;
bool minimize_processing_for_unused_output;
} constants_; } constants_;
struct ApmCaptureState { struct ApmCaptureState {