diff --git a/modules/audio_device/audio_device_impl.cc b/modules/audio_device/audio_device_impl.cc index 73031b9597..b410654a14 100644 --- a/modules/audio_device/audio_device_impl.cc +++ b/modules/audio_device/audio_device_impl.cc @@ -280,7 +280,8 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { // iOS ADM implementation. #if defined(WEBRTC_IOS) if (audio_layer == kPlatformDefaultAudio) { - audio_device_.reset(new ios_adm::AudioDeviceIOS()); + audio_device_.reset( + new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/false)); RTC_LOG(INFO) << "iPhone Audio APIs will be utilized."; } // END #if defined(WEBRTC_IOS) diff --git a/sdk/objc/native/api/audio_device_module.h b/sdk/objc/native/api/audio_device_module.h index 08e48ff392..8925f307a3 100644 --- a/sdk/objc/native/api/audio_device_module.h +++ b/sdk/objc/native/api/audio_device_module.h @@ -17,7 +17,13 @@ namespace webrtc { -rtc::scoped_refptr CreateAudioDeviceModule(); +// If |bypass_voice_processing| is true, WebRTC will attempt to disable hardware +// audio processing on iOS. +// Warning: Setting |bypass_voice_processing| will have unpredictable +// consequences for the audio path in the device. It is not advisable to use in +// most scenarios. +rtc::scoped_refptr CreateAudioDeviceModule( + bool bypass_voice_processing = false); } // namespace webrtc diff --git a/sdk/objc/native/api/audio_device_module.mm b/sdk/objc/native/api/audio_device_module.mm index e8f2b9a0bc..0968af1dcf 100644 --- a/sdk/objc/native/api/audio_device_module.mm +++ b/sdk/objc/native/api/audio_device_module.mm @@ -17,10 +17,10 @@ namespace webrtc { -rtc::scoped_refptr CreateAudioDeviceModule() { +rtc::scoped_refptr CreateAudioDeviceModule(bool bypass_voice_processing) { RTC_LOG(INFO) << __FUNCTION__; #if defined(WEBRTC_IOS) - return new rtc::RefCountedObject(); + return new rtc::RefCountedObject(bypass_voice_processing); #else RTC_LOG(LERROR) << "current platform is not supported => this module will self destruct!"; diff --git a/sdk/objc/native/src/audio/audio_device_ios.h b/sdk/objc/native/src/audio/audio_device_ios.h index 9d251724a5..ac2dc34b9a 100644 --- a/sdk/objc/native/src/audio/audio_device_ios.h +++ b/sdk/objc/native/src/audio/audio_device_ios.h @@ -48,7 +48,7 @@ class AudioDeviceIOS : public AudioDeviceGeneric, public VoiceProcessingAudioUnitObserver, public rtc::MessageHandler { public: - AudioDeviceIOS(); + explicit AudioDeviceIOS(bool bypass_voice_processing); ~AudioDeviceIOS() override; void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; @@ -205,6 +205,9 @@ class AudioDeviceIOS : public AudioDeviceGeneric, // Resets thread-checkers before a call is restarted. void PrepareForNewStart(); + // Determines whether voice processing should be enabled or disabled. + const bool bypass_voice_processing_; + // Ensures that methods are called from the same thread as this object is // created on. rtc::ThreadChecker thread_checker_; diff --git a/sdk/objc/native/src/audio/audio_device_ios.mm b/sdk/objc/native/src/audio/audio_device_ios.mm index 3d953c0331..5ffeaa0cc5 100644 --- a/sdk/objc/native/src/audio/audio_device_ios.mm +++ b/sdk/objc/native/src/audio/audio_device_ios.mm @@ -100,8 +100,9 @@ static void LogDeviceInfo() { } #endif // !defined(NDEBUG) -AudioDeviceIOS::AudioDeviceIOS() - : audio_device_buffer_(nullptr), +AudioDeviceIOS::AudioDeviceIOS(bool bypass_voice_processing) + : bypass_voice_processing_(bypass_voice_processing), + audio_device_buffer_(nullptr), audio_unit_(nullptr), recording_(0), playing_(0), @@ -113,7 +114,8 @@ AudioDeviceIOS::AudioDeviceIOS() last_playout_time_(0), num_playout_callbacks_(0), last_output_volume_change_time_(0) { - LOGI() << "ctor" << ios::GetCurrentThreadDescription(); + LOGI() << "ctor" << ios::GetCurrentThreadDescription() + << ",bypass_voice_processing=" << bypass_voice_processing_; io_thread_checker_.Detach(); thread_checker_.Detach(); thread_ = rtc::Thread::Current(); @@ -731,7 +733,7 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { bool AudioDeviceIOS::CreateAudioUnit() { RTC_DCHECK(!audio_unit_); - audio_unit_.reset(new VoiceProcessingAudioUnit(this)); + audio_unit_.reset(new VoiceProcessingAudioUnit(bypass_voice_processing_, this)); if (!audio_unit_->Init()) { audio_unit_.reset(); return false; diff --git a/sdk/objc/native/src/audio/audio_device_module_ios.h b/sdk/objc/native/src/audio/audio_device_module_ios.h index fcd3bd7bc1..9bcf114e32 100644 --- a/sdk/objc/native/src/audio/audio_device_module_ios.h +++ b/sdk/objc/native/src/audio/audio_device_module_ios.h @@ -30,7 +30,7 @@ class AudioDeviceModuleIOS : public AudioDeviceModule { public: int32_t AttachAudioBuffer(); - AudioDeviceModuleIOS(); + explicit AudioDeviceModuleIOS(bool bypass_voice_processing); ~AudioDeviceModuleIOS() override; // Retrieve the currently utilized audio layer @@ -131,6 +131,7 @@ class AudioDeviceModuleIOS : public AudioDeviceModule { int GetRecordAudioParameters(AudioParameters* params) const override; #endif // WEBRTC_IOS private: + const bool bypass_voice_processing_; bool initialized_ = false; const std::unique_ptr task_queue_factory_; std::unique_ptr audio_device_; diff --git a/sdk/objc/native/src/audio/audio_device_module_ios.mm b/sdk/objc/native/src/audio/audio_device_module_ios.mm index fbfa88f9b9..611b5297d4 100644 --- a/sdk/objc/native/src/audio/audio_device_module_ios.mm +++ b/sdk/objc/native/src/audio/audio_device_module_ios.mm @@ -40,8 +40,9 @@ namespace webrtc { namespace ios_adm { -AudioDeviceModuleIOS::AudioDeviceModuleIOS() - : task_queue_factory_(CreateDefaultTaskQueueFactory()) { +AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) + : bypass_voice_processing_(bypass_voice_processing), + task_queue_factory_(CreateDefaultTaskQueueFactory()) { RTC_LOG(INFO) << "current platform is IOS"; RTC_LOG(INFO) << "iPhone Audio APIs will be utilized."; } @@ -72,7 +73,7 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS() return 0; audio_device_buffer_.reset(new webrtc::AudioDeviceBuffer(task_queue_factory_.get())); - audio_device_.reset(new ios_adm::AudioDeviceIOS()); + audio_device_.reset(new ios_adm::AudioDeviceIOS(bypass_voice_processing_)); RTC_CHECK(audio_device_); this->AttachAudioBuffer(); diff --git a/sdk/objc/native/src/audio/voice_processing_audio_unit.h b/sdk/objc/native/src/audio/voice_processing_audio_unit.h index 7293032f6f..72e29c0d67 100644 --- a/sdk/objc/native/src/audio/voice_processing_audio_unit.h +++ b/sdk/objc/native/src/audio/voice_processing_audio_unit.h @@ -46,7 +46,8 @@ class VoiceProcessingAudioUnitObserver { // VoIP applications. class VoiceProcessingAudioUnit { public: - explicit VoiceProcessingAudioUnit(VoiceProcessingAudioUnitObserver* observer); + VoiceProcessingAudioUnit(bool bypass_voice_processing, + VoiceProcessingAudioUnitObserver* observer); ~VoiceProcessingAudioUnit(); // TODO(tkchin): enum for state and state checking. @@ -129,6 +130,7 @@ class VoiceProcessingAudioUnit { // Deletes the underlying audio unit. void DisposeAudioUnit(); + const bool bypass_voice_processing_; VoiceProcessingAudioUnitObserver* observer_; AudioUnit vpio_unit_; VoiceProcessingAudioUnit::State state_; diff --git a/sdk/objc/native/src/audio/voice_processing_audio_unit.mm b/sdk/objc/native/src/audio/voice_processing_audio_unit.mm index a2aa7f323b..2325b2ed2e 100644 --- a/sdk/objc/native/src/audio/voice_processing_audio_unit.mm +++ b/sdk/objc/native/src/audio/voice_processing_audio_unit.mm @@ -72,9 +72,12 @@ static OSStatus GetAGCState(AudioUnit audio_unit, UInt32* enabled) { return result; } -VoiceProcessingAudioUnit::VoiceProcessingAudioUnit( - VoiceProcessingAudioUnitObserver* observer) - : observer_(observer), vpio_unit_(nullptr), state_(kInitRequired) { +VoiceProcessingAudioUnit::VoiceProcessingAudioUnit(bool bypass_voice_processing, + VoiceProcessingAudioUnitObserver* observer) + : bypass_voice_processing_(bypass_voice_processing), + observer_(observer), + vpio_unit_(nullptr), + state_(kInitRequired) { RTC_DCHECK(observer); } @@ -250,6 +253,24 @@ bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) { RTCLog(@"Voice Processing I/O unit is now initialized."); } + if (bypass_voice_processing_) { + // Attempt to disable builtin voice processing. + UInt32 toggle = 1; + result = AudioUnitSetProperty(vpio_unit_, + kAUVoiceIOProperty_BypassVoiceProcessing, + kAudioUnitScope_Global, + kInputBus, + &toggle, + sizeof(toggle)); + if (result == noErr) { + RTCLog(@"Successfully bypassed voice processing."); + } else { + RTCLogError(@"Failed to bypass voice processing. Error=%ld.", (long)result); + } + state_ = kInitialized; + return true; + } + // AGC should be enabled by default for Voice Processing I/O units but it is // checked below and enabled explicitly if needed. This scheme is used // to be absolutely sure that the AGC is enabled since we have seen cases diff --git a/sdk/objc/unittests/RTCAudioDevice_xctest.mm b/sdk/objc/unittests/RTCAudioDevice_xctest.mm index c936399f34..e01fdbd6e3 100644 --- a/sdk/objc/unittests/RTCAudioDevice_xctest.mm +++ b/sdk/objc/unittests/RTCAudioDevice_xctest.mm @@ -33,7 +33,7 @@ [super setUp]; _audioDeviceModule = webrtc::CreateAudioDeviceModule(); - _audio_device.reset(new webrtc::ios_adm::AudioDeviceIOS()); + _audio_device.reset(new webrtc::ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/false)); self.audioSession = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; NSError *error = nil;