Allow AudioAttributes to be app/client configurable

WebRtcAudioTrack is hardcoded to configure AudioAttributes with
1. usage=USAGE_VOICE_COMMUNICATIOON
2. contentType=CONTENT_TYPE_SPEECH

This change allows AudioAttributes to be configured via the
 JavaAudioDeviceModule.

Bug: webrtc:12153
Change-Id: I67c7f6e572c5a9f3a8fde674b6600d2adaf17895
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/191941
Commit-Queue: Gaurav Vaish <gvaish@chromium.org>
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Reviewed-by: Paulina Hensman <phensman@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#32583}
This commit is contained in:
Gaurav Vaish 2020-11-09 11:08:17 -08:00 committed by Commit Bot
parent 0bfdbc37e9
commit b249d0a905
2 changed files with 39 additions and 11 deletions

View file

@ -11,6 +11,7 @@
package org.webrtc.audio;
import android.content.Context;
import android.media.AudioAttributes;
import android.media.AudioDeviceInfo;
import android.media.AudioManager;
import android.os.Build;
@ -47,6 +48,7 @@ public class JavaAudioDeviceModule implements AudioDeviceModule {
private boolean useHardwareNoiseSuppressor = isBuiltInNoiseSuppressorSupported();
private boolean useStereoInput;
private boolean useStereoOutput;
private AudioAttributes audioAttributes;
private Builder(Context context) {
this.context = context;
@ -193,6 +195,14 @@ public class JavaAudioDeviceModule implements AudioDeviceModule {
return this;
}
/**
* Set custom {@link AudioAttributes} to use.
*/
public Builder setAudioAttributes(AudioAttributes audioAttributes) {
this.audioAttributes = audioAttributes;
return this;
}
/**
* Construct an AudioDeviceModule based on the supplied arguments. The caller takes ownership
* and is responsible for calling release().
@ -223,7 +233,7 @@ public class JavaAudioDeviceModule implements AudioDeviceModule {
audioSource, audioFormat, audioRecordErrorCallback, audioRecordStateCallback,
samplesReadyCallback, useHardwareAcousticEchoCanceler, useHardwareNoiseSuppressor);
final WebRtcAudioTrack audioOutput = new WebRtcAudioTrack(
context, audioManager, audioTrackErrorCallback, audioTrackStateCallback);
context, audioManager, audioAttributes, audioTrackErrorCallback, audioTrackStateCallback);
return new JavaAudioDeviceModule(context, audioManager, audioInput, audioOutput,
inputSampleRate, outputSampleRate, useStereoInput, useStereoOutput);
}

View file

@ -71,6 +71,7 @@ class WebRtcAudioTrack {
private ByteBuffer byteBuffer;
private @Nullable final AudioAttributes audioAttributes;
private @Nullable AudioTrack audioTrack;
private @Nullable AudioTrackThread audioThread;
private final VolumeLogger volumeLogger;
@ -162,15 +163,17 @@ class WebRtcAudioTrack {
@CalledByNative
WebRtcAudioTrack(Context context, AudioManager audioManager) {
this(context, audioManager, null /* errorCallback */, null /* stateCallback */);
this(context, audioManager, null /* audioAttributes */, null /* errorCallback */,
null /* stateCallback */);
}
WebRtcAudioTrack(Context context, AudioManager audioManager,
@Nullable AudioTrackErrorCallback errorCallback,
@Nullable AudioAttributes audioAttributes, @Nullable AudioTrackErrorCallback errorCallback,
@Nullable AudioTrackStateCallback stateCallback) {
threadChecker.detachThread();
this.context = context;
this.audioManager = audioManager;
this.audioAttributes = audioAttributes;
this.errorCallback = errorCallback;
this.stateCallback = stateCallback;
this.volumeLogger = new VolumeLogger(audioManager);
@ -231,8 +234,8 @@ class WebRtcAudioTrack {
// supersede the notion of stream types for defining the behavior of audio playback,
// and to allow certain platforms or routing policies to use this information for more
// refined volume or routing decisions.
audioTrack =
createAudioTrackOnLollipopOrHigher(sampleRate, channelConfig, minBufferSizeInBytes);
audioTrack = createAudioTrackOnLollipopOrHigher(
sampleRate, channelConfig, minBufferSizeInBytes, audioAttributes);
} else {
// Use default constructor for API levels below 21.
audioTrack =
@ -383,8 +386,8 @@ class WebRtcAudioTrack {
// It allows certain platforms or routing policies to use this information for more
// refined volume or routing decisions.
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
private static AudioTrack createAudioTrackOnLollipopOrHigher(
int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
private static AudioTrack createAudioTrackOnLollipopOrHigher(int sampleRateInHz,
int channelConfig, int bufferSizeInBytes, @Nullable AudioAttributes overrideAttributes) {
Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
// TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
// performance when Android O is supported. Add some logging in the mean time.
@ -394,11 +397,26 @@ class WebRtcAudioTrack {
if (sampleRateInHz != nativeOutputSampleRate) {
Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
}
AudioAttributes.Builder attributesBuilder =
new AudioAttributes.Builder()
.setUsage(DEFAULT_USAGE)
.setContentType(AudioAttributes.CONTENT_TYPE_SPEECH);
if (overrideAttributes != null) {
if (overrideAttributes.getUsage() != AudioAttributes.USAGE_UNKNOWN) {
attributesBuilder.setUsage(overrideAttributes.getUsage());
}
if (overrideAttributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN) {
attributesBuilder.setContentType(overrideAttributes.getContentType());
}
attributesBuilder.setAllowedCapturePolicy(overrideAttributes.getAllowedCapturePolicy())
.setFlags(overrideAttributes.getFlags());
}
// Create an audio track where the audio usage is for VoIP and the content type is speech.
return new AudioTrack(new AudioAttributes.Builder()
.setUsage(DEFAULT_USAGE)
.setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
.build(),
return new AudioTrack(attributesBuilder.build(),
new AudioFormat.Builder()
.setEncoding(AudioFormat.ENCODING_PCM_16BIT)
.setSampleRate(sampleRateInHz)