Add support for toggling builtin voice processing on iOS

Bug: None
Change-Id: I3b64afdaed4777960124f248840f36598bba2ed4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/195443
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Commit-Queue: Sam Zackrisson <saza@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#32742}
This commit is contained in:
Sam Zackrisson 2020-11-26 12:18:11 +01:00 committed by Commit Bot
parent 01b3e24a83
commit 76443eafa9
10 changed files with 55 additions and 18 deletions

View file

@ -280,7 +280,8 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() {
// iOS ADM implementation.
#if defined(WEBRTC_IOS)
if (audio_layer == kPlatformDefaultAudio) {
audio_device_.reset(new ios_adm::AudioDeviceIOS());
audio_device_.reset(
new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/false));
RTC_LOG(INFO) << "iPhone Audio APIs will be utilized.";
}
// END #if defined(WEBRTC_IOS)

View file

@ -17,7 +17,13 @@
namespace webrtc {
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule();
// If |bypass_voice_processing| is true, WebRTC will attempt to disable hardware
// audio processing on iOS.
// Warning: Setting |bypass_voice_processing| will have unpredictable
// consequences for the audio path in the device. It is not advisable to use in
// most scenarios.
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
bool bypass_voice_processing = false);
} // namespace webrtc

View file

@ -17,10 +17,10 @@
namespace webrtc {
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule() {
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(bool bypass_voice_processing) {
RTC_LOG(INFO) << __FUNCTION__;
#if defined(WEBRTC_IOS)
return new rtc::RefCountedObject<ios_adm::AudioDeviceModuleIOS>();
return new rtc::RefCountedObject<ios_adm::AudioDeviceModuleIOS>(bypass_voice_processing);
#else
RTC_LOG(LERROR)
<< "current platform is not supported => this module will self destruct!";

View file

@ -48,7 +48,7 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
public VoiceProcessingAudioUnitObserver,
public rtc::MessageHandler {
public:
AudioDeviceIOS();
explicit AudioDeviceIOS(bool bypass_voice_processing);
~AudioDeviceIOS() override;
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
@ -205,6 +205,9 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
// Resets thread-checkers before a call is restarted.
void PrepareForNewStart();
// Determines whether voice processing should be enabled or disabled.
const bool bypass_voice_processing_;
// Ensures that methods are called from the same thread as this object is
// created on.
rtc::ThreadChecker thread_checker_;

View file

@ -100,8 +100,9 @@ static void LogDeviceInfo() {
}
#endif // !defined(NDEBUG)
AudioDeviceIOS::AudioDeviceIOS()
: audio_device_buffer_(nullptr),
AudioDeviceIOS::AudioDeviceIOS(bool bypass_voice_processing)
: bypass_voice_processing_(bypass_voice_processing),
audio_device_buffer_(nullptr),
audio_unit_(nullptr),
recording_(0),
playing_(0),
@ -113,7 +114,8 @@ AudioDeviceIOS::AudioDeviceIOS()
last_playout_time_(0),
num_playout_callbacks_(0),
last_output_volume_change_time_(0) {
LOGI() << "ctor" << ios::GetCurrentThreadDescription();
LOGI() << "ctor" << ios::GetCurrentThreadDescription()
<< ",bypass_voice_processing=" << bypass_voice_processing_;
io_thread_checker_.Detach();
thread_checker_.Detach();
thread_ = rtc::Thread::Current();
@ -731,7 +733,7 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
bool AudioDeviceIOS::CreateAudioUnit() {
RTC_DCHECK(!audio_unit_);
audio_unit_.reset(new VoiceProcessingAudioUnit(this));
audio_unit_.reset(new VoiceProcessingAudioUnit(bypass_voice_processing_, this));
if (!audio_unit_->Init()) {
audio_unit_.reset();
return false;

View file

@ -30,7 +30,7 @@ class AudioDeviceModuleIOS : public AudioDeviceModule {
public:
int32_t AttachAudioBuffer();
AudioDeviceModuleIOS();
explicit AudioDeviceModuleIOS(bool bypass_voice_processing);
~AudioDeviceModuleIOS() override;
// Retrieve the currently utilized audio layer
@ -131,6 +131,7 @@ class AudioDeviceModuleIOS : public AudioDeviceModule {
int GetRecordAudioParameters(AudioParameters* params) const override;
#endif // WEBRTC_IOS
private:
const bool bypass_voice_processing_;
bool initialized_ = false;
const std::unique_ptr<TaskQueueFactory> task_queue_factory_;
std::unique_ptr<AudioDeviceIOS> audio_device_;

View file

@ -40,8 +40,9 @@
namespace webrtc {
namespace ios_adm {
AudioDeviceModuleIOS::AudioDeviceModuleIOS()
: task_queue_factory_(CreateDefaultTaskQueueFactory()) {
AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing)
: bypass_voice_processing_(bypass_voice_processing),
task_queue_factory_(CreateDefaultTaskQueueFactory()) {
RTC_LOG(INFO) << "current platform is IOS";
RTC_LOG(INFO) << "iPhone Audio APIs will be utilized.";
}
@ -72,7 +73,7 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS()
return 0;
audio_device_buffer_.reset(new webrtc::AudioDeviceBuffer(task_queue_factory_.get()));
audio_device_.reset(new ios_adm::AudioDeviceIOS());
audio_device_.reset(new ios_adm::AudioDeviceIOS(bypass_voice_processing_));
RTC_CHECK(audio_device_);
this->AttachAudioBuffer();

View file

@ -46,7 +46,8 @@ class VoiceProcessingAudioUnitObserver {
// VoIP applications.
class VoiceProcessingAudioUnit {
public:
explicit VoiceProcessingAudioUnit(VoiceProcessingAudioUnitObserver* observer);
VoiceProcessingAudioUnit(bool bypass_voice_processing,
VoiceProcessingAudioUnitObserver* observer);
~VoiceProcessingAudioUnit();
// TODO(tkchin): enum for state and state checking.
@ -129,6 +130,7 @@ class VoiceProcessingAudioUnit {
// Deletes the underlying audio unit.
void DisposeAudioUnit();
const bool bypass_voice_processing_;
VoiceProcessingAudioUnitObserver* observer_;
AudioUnit vpio_unit_;
VoiceProcessingAudioUnit::State state_;

View file

@ -72,9 +72,12 @@ static OSStatus GetAGCState(AudioUnit audio_unit, UInt32* enabled) {
return result;
}
VoiceProcessingAudioUnit::VoiceProcessingAudioUnit(
VoiceProcessingAudioUnit::VoiceProcessingAudioUnit(bool bypass_voice_processing,
VoiceProcessingAudioUnitObserver* observer)
: observer_(observer), vpio_unit_(nullptr), state_(kInitRequired) {
: bypass_voice_processing_(bypass_voice_processing),
observer_(observer),
vpio_unit_(nullptr),
state_(kInitRequired) {
RTC_DCHECK(observer);
}
@ -250,6 +253,24 @@ bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) {
RTCLog(@"Voice Processing I/O unit is now initialized.");
}
if (bypass_voice_processing_) {
// Attempt to disable builtin voice processing.
UInt32 toggle = 1;
result = AudioUnitSetProperty(vpio_unit_,
kAUVoiceIOProperty_BypassVoiceProcessing,
kAudioUnitScope_Global,
kInputBus,
&toggle,
sizeof(toggle));
if (result == noErr) {
RTCLog(@"Successfully bypassed voice processing.");
} else {
RTCLogError(@"Failed to bypass voice processing. Error=%ld.", (long)result);
}
state_ = kInitialized;
return true;
}
// AGC should be enabled by default for Voice Processing I/O units but it is
// checked below and enabled explicitly if needed. This scheme is used
// to be absolutely sure that the AGC is enabled since we have seen cases

View file

@ -33,7 +33,7 @@
[super setUp];
_audioDeviceModule = webrtc::CreateAudioDeviceModule();
_audio_device.reset(new webrtc::ios_adm::AudioDeviceIOS());
_audio_device.reset(new webrtc::ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/false));
self.audioSession = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
NSError *error = nil;