mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-12 21:30:45 +01:00
Update AudioFrameOperations to require ArrayView
Bug: chromium:335805780 Change-Id: I14d97315f4cffa21bcc11b063e86c5adcebe78ae Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/348800 Commit-Queue: Tomas Gunnarsson <tommi@webrtc.org> Reviewed-by: Per Åhgren <peah@webrtc.org> Reviewed-by: Olga Sharonova <olka@webrtc.org> Cr-Commit-Position: refs/heads/main@{#42204}
This commit is contained in:
parent
acfd279a14
commit
57b09eca64
7 changed files with 108 additions and 100 deletions
|
@ -91,6 +91,16 @@ void AudioFrame::CopyFrom(const AudioFrame& src) {
|
|||
if (this == &src)
|
||||
return;
|
||||
|
||||
if (muted_ && !src.muted()) {
|
||||
// TODO: bugs.webrtc.org/5647 - Since the default value for `muted_` is
|
||||
// false and `data_` may still be uninitialized (because we don't initialize
|
||||
// data_ as part of construction), we clear the full buffer here before
|
||||
// copying over new values. If we don't, msan might complain in some tests.
|
||||
// Consider locking down construction, avoiding the default constructor and
|
||||
// prefering construction that initializes all state.
|
||||
memset(data_, 0, kMaxDataSizeBytes);
|
||||
}
|
||||
|
||||
timestamp_ = src.timestamp_;
|
||||
elapsed_time_ms_ = src.elapsed_time_ms_;
|
||||
ntp_time_ms_ = src.ntp_time_ms_;
|
||||
|
@ -104,11 +114,10 @@ void AudioFrame::CopyFrom(const AudioFrame& src) {
|
|||
channel_layout_ = src.channel_layout_;
|
||||
absolute_capture_timestamp_ms_ = src.absolute_capture_timestamp_ms();
|
||||
|
||||
const size_t length = samples_per_channel_ * num_channels_;
|
||||
RTC_CHECK_LE(length, kMaxDataSizeSamples);
|
||||
if (!src.muted()) {
|
||||
memcpy(data_, src.data(), sizeof(int16_t) * length);
|
||||
muted_ = false;
|
||||
auto data = src.data_view();
|
||||
RTC_CHECK_LE(data.size(), kMaxDataSizeSamples);
|
||||
if (!muted_ && !data.empty()) {
|
||||
memcpy(&data_[0], &data[0], sizeof(int16_t) * data.size());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -158,10 +167,12 @@ rtc::ArrayView<int16_t> AudioFrame::mutable_data(size_t samples_per_channel,
|
|||
RTC_CHECK_LE(total_samples, kMaxDataSizeSamples);
|
||||
RTC_CHECK_LE(num_channels, kMaxConcurrentChannels);
|
||||
// Sanity check for valid argument values during development.
|
||||
// If `samples_per_channel` is <= kMaxConcurrentChannels but larger than 0,
|
||||
// If `samples_per_channel` is < `num_channels` but larger than 0,
|
||||
// then chances are the order of arguments is incorrect.
|
||||
RTC_DCHECK((samples_per_channel == 0 && num_channels == 0) ||
|
||||
samples_per_channel > kMaxConcurrentChannels);
|
||||
num_channels <= samples_per_channel)
|
||||
<< "samples_per_channel=" << samples_per_channel
|
||||
<< "num_channels=" << num_channels;
|
||||
|
||||
// TODO: bugs.webrtc.org/5647 - Can we skip zeroing the buffer?
|
||||
// Consider instead if we should rather zero the whole buffer when `muted_` is
|
||||
|
|
|
@ -31,6 +31,7 @@ void RemixAndResample(const AudioFrame& src_frame,
|
|||
dst_frame->packet_infos_ = src_frame.packet_infos_;
|
||||
}
|
||||
|
||||
// TODO: b/335805780 - Accept ArrayView.
|
||||
void RemixAndResample(const int16_t* src_data,
|
||||
size_t samples_per_channel,
|
||||
size_t num_channels,
|
||||
|
@ -49,8 +50,11 @@ void RemixAndResample(const int16_t* src_data,
|
|||
<< "dst_frame->num_channels_: " << dst_frame->num_channels_;
|
||||
|
||||
AudioFrameOperations::DownmixChannels(
|
||||
src_data, num_channels, samples_per_channel, dst_frame->num_channels_,
|
||||
downmixed_audio);
|
||||
rtc::ArrayView<const int16_t>(src_data,
|
||||
num_channels * samples_per_channel),
|
||||
num_channels, samples_per_channel, dst_frame->num_channels_,
|
||||
rtc::ArrayView<int16_t>(&downmixed_audio[0], dst_frame->num_channels_ *
|
||||
samples_per_channel));
|
||||
audio_ptr = downmixed_audio;
|
||||
audio_ptr_num_channels = dst_frame->num_channels_;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ rtc_library("audio_frame_operations") {
|
|||
]
|
||||
|
||||
deps = [
|
||||
"../../api:array_view",
|
||||
"../../api/audio:audio_frame_api",
|
||||
"../../common_audio",
|
||||
"../../rtc_base:checks",
|
||||
|
|
|
@ -56,14 +56,13 @@ void AudioFrameOperations::Add(const AudioFrame& frame_to_add,
|
|||
result_frame->speech_type_ = AudioFrame::kUndefined;
|
||||
|
||||
if (!frame_to_add.muted()) {
|
||||
const int16_t* in_data = frame_to_add.data();
|
||||
int16_t* out_data = result_frame->mutable_data();
|
||||
size_t length =
|
||||
frame_to_add.samples_per_channel_ * frame_to_add.num_channels_;
|
||||
auto in_data = frame_to_add.data_view();
|
||||
auto out_data = result_frame->mutable_data(
|
||||
frame_to_add.samples_per_channel_, frame_to_add.num_channels_);
|
||||
if (no_previous_data) {
|
||||
std::copy(in_data, in_data + length, out_data);
|
||||
std::copy(in_data.begin(), in_data.end(), out_data.data());
|
||||
} else {
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
for (size_t i = 0; i < in_data.size(); ++i) {
|
||||
const int32_t wrap_guard = static_cast<int32_t>(out_data[i]) +
|
||||
static_cast<int32_t>(in_data[i]);
|
||||
out_data[i] = rtc::saturated_cast<int16_t>(wrap_guard);
|
||||
|
@ -72,9 +71,11 @@ void AudioFrameOperations::Add(const AudioFrame& frame_to_add,
|
|||
}
|
||||
}
|
||||
|
||||
void AudioFrameOperations::QuadToStereo(const int16_t* src_audio,
|
||||
void AudioFrameOperations::QuadToStereo(rtc::ArrayView<const int16_t> src_audio,
|
||||
size_t samples_per_channel,
|
||||
int16_t* dst_audio) {
|
||||
rtc::ArrayView<int16_t> dst_audio) {
|
||||
RTC_DCHECK_EQ(src_audio.size(), samples_per_channel * 4);
|
||||
RTC_DCHECK_EQ(dst_audio.size(), samples_per_channel * 2);
|
||||
for (size_t i = 0; i < samples_per_channel; i++) {
|
||||
dst_audio[i * 2] =
|
||||
(static_cast<int32_t>(src_audio[4 * i]) + src_audio[4 * i + 1]) >> 1;
|
||||
|
@ -93,30 +94,33 @@ int AudioFrameOperations::QuadToStereo(AudioFrame* frame) {
|
|||
AudioFrame::kMaxDataSizeSamples);
|
||||
|
||||
if (!frame->muted()) {
|
||||
QuadToStereo(frame->data(), frame->samples_per_channel_,
|
||||
frame->mutable_data());
|
||||
}
|
||||
auto current_data = frame->data_view();
|
||||
QuadToStereo(current_data, frame->samples_per_channel_,
|
||||
frame->mutable_data(frame->samples_per_channel_, 2));
|
||||
} else {
|
||||
frame->num_channels_ = 2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AudioFrameOperations::DownmixChannels(const int16_t* src_audio,
|
||||
void AudioFrameOperations::DownmixChannels(
|
||||
rtc::ArrayView<const int16_t> src_audio,
|
||||
size_t src_channels,
|
||||
size_t samples_per_channel,
|
||||
size_t dst_channels,
|
||||
int16_t* dst_audio) {
|
||||
rtc::ArrayView<int16_t> dst_audio) {
|
||||
RTC_DCHECK_EQ(src_audio.size(), src_channels * samples_per_channel);
|
||||
RTC_DCHECK_EQ(dst_audio.size(), dst_channels * samples_per_channel);
|
||||
if (src_channels > 1 && dst_channels == 1) {
|
||||
DownmixInterleavedToMono(src_audio, samples_per_channel, src_channels,
|
||||
dst_audio);
|
||||
return;
|
||||
DownmixInterleavedToMono(src_audio.data(), samples_per_channel,
|
||||
src_channels, &dst_audio[0]);
|
||||
} else if (src_channels == 4 && dst_channels == 2) {
|
||||
QuadToStereo(src_audio, samples_per_channel, dst_audio);
|
||||
return;
|
||||
}
|
||||
|
||||
} else {
|
||||
RTC_DCHECK_NOTREACHED() << "src_channels: " << src_channels
|
||||
<< ", dst_channels: " << dst_channels;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioFrameOperations::DownmixChannels(size_t dst_channels,
|
||||
|
@ -153,14 +157,16 @@ void AudioFrameOperations::UpmixChannels(size_t target_number_of_channels,
|
|||
if (!frame->muted()) {
|
||||
// Up-mixing done in place. Going backwards through the frame ensure nothing
|
||||
// is irrevocably overwritten.
|
||||
int16_t* frame_data = frame->mutable_data();
|
||||
for (int i = frame->samples_per_channel_ - 1; i >= 0; i--) {
|
||||
auto frame_data = frame->mutable_data(frame->samples_per_channel_,
|
||||
target_number_of_channels);
|
||||
for (int i = frame->samples_per_channel_ - 1; i >= 0; --i) {
|
||||
for (size_t j = 0; j < target_number_of_channels; ++j) {
|
||||
frame_data[target_number_of_channels * i + j] = frame_data[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
frame->num_channels_ = target_number_of_channels;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <stdint.h>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "api/array_view.h"
|
||||
#include "api/audio/audio_frame.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
@ -36,9 +37,9 @@ class AudioFrameOperations {
|
|||
// Downmixes 4 channels `src_audio` to stereo `dst_audio`. This is an in-place
|
||||
// operation, meaning `src_audio` and `dst_audio` may point to the same
|
||||
// buffer.
|
||||
static void QuadToStereo(const int16_t* src_audio,
|
||||
static void QuadToStereo(rtc::ArrayView<const int16_t> src_audio,
|
||||
size_t samples_per_channel,
|
||||
int16_t* dst_audio);
|
||||
rtc::ArrayView<int16_t> dst_audio);
|
||||
|
||||
// `frame.num_channels_` will be updated. This version checks that
|
||||
// `num_channels_` is 4 channels.
|
||||
|
@ -48,11 +49,11 @@ class AudioFrameOperations {
|
|||
// This is an in-place operation, meaning `src_audio` and `dst_audio`
|
||||
// may point to the same buffer. Supported channel combinations are
|
||||
// Stereo to Mono, Quad to Mono, and Quad to Stereo.
|
||||
static void DownmixChannels(const int16_t* src_audio,
|
||||
static void DownmixChannels(rtc::ArrayView<const int16_t> src_audio,
|
||||
size_t src_channels,
|
||||
size_t samples_per_channel,
|
||||
size_t dst_channels,
|
||||
int16_t* dst_audio);
|
||||
rtc::ArrayView<int16_t> dst_audio);
|
||||
|
||||
// `frame.num_channels_` will be updated. This version checks that
|
||||
// `num_channels_` and `dst_channels` are valid and performs relevant downmix.
|
||||
|
|
|
@ -18,13 +18,10 @@ namespace {
|
|||
|
||||
class AudioFrameOperationsTest : public ::testing::Test {
|
||||
protected:
|
||||
AudioFrameOperationsTest() {
|
||||
// Set typical values.
|
||||
frame_.samples_per_channel_ = 320;
|
||||
frame_.num_channels_ = 2;
|
||||
}
|
||||
AudioFrameOperationsTest() = default;
|
||||
|
||||
AudioFrame frame_;
|
||||
// Set typical values.
|
||||
AudioFrame frame_{/*sample_rate=*/32000, /*num_channels*/ 2};
|
||||
};
|
||||
|
||||
class AudioFrameOperationsDeathTest : public AudioFrameOperationsTest {};
|
||||
|
@ -34,7 +31,8 @@ void SetFrameData(int16_t ch1,
|
|||
int16_t ch3,
|
||||
int16_t ch4,
|
||||
AudioFrame* frame) {
|
||||
int16_t* frame_data = frame->mutable_data();
|
||||
rtc::ArrayView<int16_t> frame_data =
|
||||
frame->mutable_data(frame->samples_per_channel_, 4);
|
||||
for (size_t i = 0; i < frame->samples_per_channel_ * 4; i += 4) {
|
||||
frame_data[i] = ch1;
|
||||
frame_data[i + 1] = ch2;
|
||||
|
@ -44,7 +42,8 @@ void SetFrameData(int16_t ch1,
|
|||
}
|
||||
|
||||
void SetFrameData(int16_t left, int16_t right, AudioFrame* frame) {
|
||||
int16_t* frame_data = frame->mutable_data();
|
||||
rtc::ArrayView<int16_t> frame_data =
|
||||
frame->mutable_data(frame->samples_per_channel_, 2);
|
||||
for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
|
||||
frame_data[i] = left;
|
||||
frame_data[i + 1] = right;
|
||||
|
@ -52,7 +51,8 @@ void SetFrameData(int16_t left, int16_t right, AudioFrame* frame) {
|
|||
}
|
||||
|
||||
void SetFrameData(int16_t data, AudioFrame* frame) {
|
||||
int16_t* frame_data = frame->mutable_data();
|
||||
rtc::ArrayView<int16_t> frame_data =
|
||||
frame->mutable_data(frame->samples_per_channel_, 1);
|
||||
for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
|
||||
i++) {
|
||||
frame_data[i] = data;
|
||||
|
@ -60,15 +60,18 @@ void SetFrameData(int16_t data, AudioFrame* frame) {
|
|||
}
|
||||
|
||||
void VerifyFramesAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
|
||||
EXPECT_EQ(frame1.num_channels_, frame2.num_channels_);
|
||||
EXPECT_EQ(frame1.samples_per_channel_, frame2.samples_per_channel_);
|
||||
ASSERT_EQ(frame1.num_channels_, frame2.num_channels_);
|
||||
ASSERT_EQ(frame1.samples_per_channel_, frame2.samples_per_channel_);
|
||||
EXPECT_EQ(frame1.muted(), frame2.muted());
|
||||
const int16_t* frame1_data = frame1.data();
|
||||
const int16_t* frame2_data = frame2.data();
|
||||
// TODO(tommi): Use sample_count() or data_view().
|
||||
for (size_t i = 0; i < frame1.samples_per_channel_ * frame1.num_channels_;
|
||||
i++) {
|
||||
EXPECT_EQ(frame1_data[i], frame2_data[i]);
|
||||
if (frame1_data[i] != frame2_data[i])
|
||||
break; // To avoid spamming the log.
|
||||
}
|
||||
EXPECT_EQ(frame1.muted(), frame2.muted());
|
||||
}
|
||||
|
||||
void InitFrame(AudioFrame* frame,
|
||||
|
@ -76,17 +79,16 @@ void InitFrame(AudioFrame* frame,
|
|||
size_t samples_per_channel,
|
||||
int16_t left_data,
|
||||
int16_t right_data) {
|
||||
RTC_DCHECK(frame);
|
||||
RTC_DCHECK_GE(2, channels);
|
||||
RTC_DCHECK_GE(AudioFrame::kMaxDataSizeSamples,
|
||||
samples_per_channel * channels);
|
||||
frame->samples_per_channel_ = samples_per_channel;
|
||||
frame->num_channels_ = channels;
|
||||
if (channels == 2) {
|
||||
SetFrameData(left_data, right_data, frame);
|
||||
} else if (channels == 1) {
|
||||
SetFrameData(left_data, frame);
|
||||
}
|
||||
ASSERT_EQ(frame->num_channels_, channels);
|
||||
}
|
||||
|
||||
int16_t GetChannelData(const AudioFrame& frame, size_t channel, size_t index) {
|
||||
|
@ -116,7 +118,6 @@ TEST_F(AudioFrameOperationsDeathTest, MonoToStereoFailsWithBadParameters) {
|
|||
#endif
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, MonoToStereoSucceeds) {
|
||||
frame_.num_channels_ = 1;
|
||||
SetFrameData(1, &frame_);
|
||||
|
||||
AudioFrameOperations::UpmixChannels(2, &frame_);
|
||||
|
@ -124,7 +125,6 @@ TEST_F(AudioFrameOperationsTest, MonoToStereoSucceeds) {
|
|||
|
||||
AudioFrame stereo_frame;
|
||||
stereo_frame.samples_per_channel_ = 320;
|
||||
stereo_frame.num_channels_ = 2;
|
||||
SetFrameData(1, 1, &stereo_frame);
|
||||
VerifyFramesAreEqual(stereo_frame, frame_);
|
||||
}
|
||||
|
@ -151,7 +151,6 @@ TEST_F(AudioFrameOperationsTest, StereoToMonoSucceeds) {
|
|||
|
||||
AudioFrame mono_frame;
|
||||
mono_frame.samples_per_channel_ = 320;
|
||||
mono_frame.num_channels_ = 1;
|
||||
SetFrameData(3, &mono_frame);
|
||||
VerifyFramesAreEqual(mono_frame, frame_);
|
||||
}
|
||||
|
@ -167,16 +166,12 @@ TEST_F(AudioFrameOperationsTest, StereoToMonoBufferSucceeds) {
|
|||
AudioFrame target_frame;
|
||||
SetFrameData(4, 2, &frame_);
|
||||
|
||||
target_frame.num_channels_ = 1;
|
||||
target_frame.samples_per_channel_ = frame_.samples_per_channel_;
|
||||
|
||||
AudioFrameOperations::DownmixChannels(frame_.data(), 2,
|
||||
frame_.samples_per_channel_, 1,
|
||||
target_frame.mutable_data());
|
||||
AudioFrameOperations::DownmixChannels(
|
||||
frame_.data_view(), 2, frame_.samples_per_channel_, 1,
|
||||
target_frame.mutable_data(frame_.samples_per_channel_, 1));
|
||||
|
||||
AudioFrame mono_frame;
|
||||
mono_frame.samples_per_channel_ = 320;
|
||||
mono_frame.num_channels_ = 1;
|
||||
SetFrameData(3, &mono_frame);
|
||||
VerifyFramesAreEqual(mono_frame, target_frame);
|
||||
}
|
||||
|
@ -187,13 +182,11 @@ TEST_F(AudioFrameOperationsTest, StereoToMonoDoesNotWrapAround) {
|
|||
EXPECT_EQ(1u, frame_.num_channels_);
|
||||
AudioFrame mono_frame;
|
||||
mono_frame.samples_per_channel_ = 320;
|
||||
mono_frame.num_channels_ = 1;
|
||||
SetFrameData(-32768, &mono_frame);
|
||||
VerifyFramesAreEqual(mono_frame, frame_);
|
||||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, QuadToMonoSucceeds) {
|
||||
frame_.num_channels_ = 4;
|
||||
SetFrameData(4, 2, 6, 8, &frame_);
|
||||
|
||||
AudioFrameOperations::DownmixChannels(1, &frame_);
|
||||
|
@ -201,7 +194,6 @@ TEST_F(AudioFrameOperationsTest, QuadToMonoSucceeds) {
|
|||
|
||||
AudioFrame mono_frame;
|
||||
mono_frame.samples_per_channel_ = 320;
|
||||
mono_frame.num_channels_ = 1;
|
||||
SetFrameData(5, &mono_frame);
|
||||
VerifyFramesAreEqual(mono_frame, frame_);
|
||||
}
|
||||
|
@ -216,31 +208,24 @@ TEST_F(AudioFrameOperationsTest, QuadToMonoMuted) {
|
|||
|
||||
TEST_F(AudioFrameOperationsTest, QuadToMonoBufferSucceeds) {
|
||||
AudioFrame target_frame;
|
||||
frame_.num_channels_ = 4;
|
||||
SetFrameData(4, 2, 6, 8, &frame_);
|
||||
|
||||
target_frame.num_channels_ = 1;
|
||||
target_frame.samples_per_channel_ = frame_.samples_per_channel_;
|
||||
|
||||
AudioFrameOperations::DownmixChannels(frame_.data(), 4,
|
||||
frame_.samples_per_channel_, 1,
|
||||
target_frame.mutable_data());
|
||||
AudioFrameOperations::DownmixChannels(
|
||||
frame_.data_view(), 4, frame_.samples_per_channel_, 1,
|
||||
target_frame.mutable_data(frame_.samples_per_channel_, 1));
|
||||
AudioFrame mono_frame;
|
||||
mono_frame.samples_per_channel_ = 320;
|
||||
mono_frame.num_channels_ = 1;
|
||||
SetFrameData(5, &mono_frame);
|
||||
VerifyFramesAreEqual(mono_frame, target_frame);
|
||||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, QuadToMonoDoesNotWrapAround) {
|
||||
frame_.num_channels_ = 4;
|
||||
SetFrameData(-32768, -32768, -32768, -32768, &frame_);
|
||||
AudioFrameOperations::DownmixChannels(1, &frame_);
|
||||
EXPECT_EQ(1u, frame_.num_channels_);
|
||||
|
||||
AudioFrame mono_frame;
|
||||
mono_frame.samples_per_channel_ = 320;
|
||||
mono_frame.num_channels_ = 1;
|
||||
SetFrameData(-32768, &mono_frame);
|
||||
VerifyFramesAreEqual(mono_frame, frame_);
|
||||
}
|
||||
|
@ -253,13 +238,11 @@ TEST_F(AudioFrameOperationsTest, QuadToStereoFailsWithBadParameters) {
|
|||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, QuadToStereoSucceeds) {
|
||||
frame_.num_channels_ = 4;
|
||||
SetFrameData(4, 2, 6, 8, &frame_);
|
||||
EXPECT_EQ(0, AudioFrameOperations::QuadToStereo(&frame_));
|
||||
|
||||
AudioFrame stereo_frame;
|
||||
stereo_frame.samples_per_channel_ = 320;
|
||||
stereo_frame.num_channels_ = 2;
|
||||
SetFrameData(3, 7, &stereo_frame);
|
||||
VerifyFramesAreEqual(stereo_frame, frame_);
|
||||
}
|
||||
|
@ -273,29 +256,23 @@ TEST_F(AudioFrameOperationsTest, QuadToStereoMuted) {
|
|||
|
||||
TEST_F(AudioFrameOperationsTest, QuadToStereoBufferSucceeds) {
|
||||
AudioFrame target_frame;
|
||||
frame_.num_channels_ = 4;
|
||||
SetFrameData(4, 2, 6, 8, &frame_);
|
||||
|
||||
target_frame.num_channels_ = 2;
|
||||
target_frame.samples_per_channel_ = frame_.samples_per_channel_;
|
||||
|
||||
AudioFrameOperations::QuadToStereo(frame_.data(), frame_.samples_per_channel_,
|
||||
target_frame.mutable_data());
|
||||
AudioFrameOperations::QuadToStereo(
|
||||
frame_.data_view(), frame_.samples_per_channel_,
|
||||
target_frame.mutable_data(frame_.samples_per_channel_, 2));
|
||||
AudioFrame stereo_frame;
|
||||
stereo_frame.samples_per_channel_ = 320;
|
||||
stereo_frame.num_channels_ = 2;
|
||||
SetFrameData(3, 7, &stereo_frame);
|
||||
VerifyFramesAreEqual(stereo_frame, target_frame);
|
||||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, QuadToStereoDoesNotWrapAround) {
|
||||
frame_.num_channels_ = 4;
|
||||
SetFrameData(-32768, -32768, -32768, -32768, &frame_);
|
||||
EXPECT_EQ(0, AudioFrameOperations::QuadToStereo(&frame_));
|
||||
|
||||
AudioFrame stereo_frame;
|
||||
stereo_frame.samples_per_channel_ = 320;
|
||||
stereo_frame.num_channels_ = 2;
|
||||
SetFrameData(-32768, -32768, &stereo_frame);
|
||||
VerifyFramesAreEqual(stereo_frame, frame_);
|
||||
}
|
||||
|
@ -305,7 +282,6 @@ TEST_F(AudioFrameOperationsTest, SwapStereoChannelsSucceedsOnStereo) {
|
|||
|
||||
AudioFrame swapped_frame;
|
||||
swapped_frame.samples_per_channel_ = 320;
|
||||
swapped_frame.num_channels_ = 2;
|
||||
SetFrameData(1, 0, &swapped_frame);
|
||||
|
||||
AudioFrameOperations::SwapStereoChannels(&frame_);
|
||||
|
@ -319,9 +295,9 @@ TEST_F(AudioFrameOperationsTest, SwapStereoChannelsMuted) {
|
|||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, SwapStereoChannelsFailsOnMono) {
|
||||
frame_.num_channels_ = 1;
|
||||
// Set data to "stereo", despite it being a mono frame.
|
||||
SetFrameData(0, 1, &frame_);
|
||||
frame_.num_channels_ = 1; // Reset to mono after SetFrameData().
|
||||
|
||||
AudioFrame orig_frame;
|
||||
orig_frame.CopyFrom(frame_);
|
||||
|
@ -336,7 +312,6 @@ TEST_F(AudioFrameOperationsTest, MuteDisabled) {
|
|||
|
||||
AudioFrame muted_frame;
|
||||
muted_frame.samples_per_channel_ = 320;
|
||||
muted_frame.num_channels_ = 2;
|
||||
SetFrameData(1000, -1000, &muted_frame);
|
||||
VerifyFramesAreEqual(muted_frame, frame_);
|
||||
}
|
||||
|
@ -506,7 +481,6 @@ TEST_F(AudioFrameOperationsTest, DISABLED_ScaleDoesNotWrapAround) {
|
|||
|
||||
AudioFrame clipped_frame;
|
||||
clipped_frame.samples_per_channel_ = 320;
|
||||
clipped_frame.num_channels_ = 2;
|
||||
SetFrameData(32767, -32768, &clipped_frame);
|
||||
VerifyFramesAreEqual(clipped_frame, frame_);
|
||||
}
|
||||
|
@ -517,7 +491,6 @@ TEST_F(AudioFrameOperationsTest, ScaleSucceeds) {
|
|||
|
||||
AudioFrame scaled_frame;
|
||||
scaled_frame.samples_per_channel_ = 320;
|
||||
scaled_frame.num_channels_ = 2;
|
||||
SetFrameData(2, -3, &scaled_frame);
|
||||
VerifyFramesAreEqual(scaled_frame, frame_);
|
||||
}
|
||||
|
@ -534,13 +507,11 @@ TEST_F(AudioFrameOperationsTest, DISABLED_ScaleWithSatFailsWithBadParameters) {
|
|||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) {
|
||||
frame_.num_channels_ = 1;
|
||||
SetFrameData(4000, &frame_);
|
||||
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, &frame_));
|
||||
|
||||
AudioFrame clipped_frame;
|
||||
clipped_frame.samples_per_channel_ = 320;
|
||||
clipped_frame.num_channels_ = 1;
|
||||
SetFrameData(32767, &clipped_frame);
|
||||
VerifyFramesAreEqual(clipped_frame, frame_);
|
||||
|
||||
|
@ -551,13 +522,11 @@ TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) {
|
|||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, ScaleWithSatSucceeds) {
|
||||
frame_.num_channels_ = 1;
|
||||
SetFrameData(1, &frame_);
|
||||
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(2.0, &frame_));
|
||||
|
||||
AudioFrame scaled_frame;
|
||||
scaled_frame.samples_per_channel_ = 320;
|
||||
scaled_frame.num_channels_ = 1;
|
||||
SetFrameData(2, &scaled_frame);
|
||||
VerifyFramesAreEqual(scaled_frame, frame_);
|
||||
}
|
||||
|
@ -573,10 +542,11 @@ TEST_F(AudioFrameOperationsTest, AddingXToEmptyGivesX) {
|
|||
AudioFrame frame_to_add_to;
|
||||
frame_to_add_to.mutable_data(); // Unmute the frame.
|
||||
ASSERT_FALSE(frame_to_add_to.muted());
|
||||
|
||||
SetFrameData(1000, &frame_);
|
||||
frame_to_add_to.samples_per_channel_ = 0;
|
||||
frame_to_add_to.num_channels_ = frame_.num_channels_;
|
||||
|
||||
SetFrameData(1000, &frame_);
|
||||
AudioFrameOperations::Add(frame_, &frame_to_add_to);
|
||||
VerifyFramesAreEqual(frame_, frame_to_add_to);
|
||||
}
|
||||
|
@ -584,19 +554,27 @@ TEST_F(AudioFrameOperationsTest, AddingXToEmptyGivesX) {
|
|||
TEST_F(AudioFrameOperationsTest, AddingXToMutedGivesX) {
|
||||
AudioFrame frame_to_add_to;
|
||||
ASSERT_TRUE(frame_to_add_to.muted());
|
||||
|
||||
frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
|
||||
SetFrameData(1000, &frame_);
|
||||
frame_to_add_to.num_channels_ = frame_.num_channels_;
|
||||
|
||||
SetFrameData(1000, &frame_);
|
||||
AudioFrameOperations::Add(frame_, &frame_to_add_to);
|
||||
VerifyFramesAreEqual(frame_, frame_to_add_to);
|
||||
}
|
||||
|
||||
TEST_F(AudioFrameOperationsTest, AddingMutedToXGivesX) {
|
||||
AudioFrame frame_to_add_to;
|
||||
|
||||
// Clear the internal buffer to avoid msan issues since we're changing
|
||||
// buffer dimension member variables outside of the class without updating
|
||||
// the buffer.
|
||||
RTC_DCHECK(frame_to_add_to.muted());
|
||||
frame_to_add_to.mutable_data();
|
||||
|
||||
frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
|
||||
SetFrameData(1000, &frame_to_add_to); // sets frame to mono.
|
||||
frame_to_add_to.num_channels_ = frame_.num_channels_;
|
||||
SetFrameData(1000, &frame_to_add_to);
|
||||
|
||||
AudioFrame frame_copy;
|
||||
frame_copy.CopyFrom(frame_to_add_to);
|
||||
|
@ -609,7 +587,6 @@ TEST_F(AudioFrameOperationsTest, AddingMutedToXGivesX) {
|
|||
TEST_F(AudioFrameOperationsTest, AddingTwoFramesProducesTheirSum) {
|
||||
AudioFrame frame_to_add_to;
|
||||
frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
|
||||
frame_to_add_to.num_channels_ = frame_.num_channels_;
|
||||
SetFrameData(1000, &frame_to_add_to);
|
||||
SetFrameData(2000, &frame_);
|
||||
|
||||
|
|
|
@ -94,6 +94,7 @@ inline float FloatS16ToDbfs(float v) {
|
|||
// Copy audio from `src` channels to `dest` channels unless `src` and `dest`
|
||||
// point to the same address. `src` and `dest` must have the same number of
|
||||
// channels, and there must be sufficient space allocated in `dest`.
|
||||
// TODO: b/335805780 - Accept ArrayView.
|
||||
template <typename T>
|
||||
void CopyAudioIfNeeded(const T* const* src,
|
||||
int num_frames,
|
||||
|
@ -110,6 +111,7 @@ void CopyAudioIfNeeded(const T* const* src,
|
|||
// by `deinterleaved`. There must be sufficient space allocated in the
|
||||
// `deinterleaved` buffers (`num_channel` buffers with `samples_per_channel`
|
||||
// per buffer).
|
||||
// TODO: b/335805780 - Accept ArrayView.
|
||||
template <typename T>
|
||||
void Deinterleave(const T* interleaved,
|
||||
size_t samples_per_channel,
|
||||
|
@ -128,6 +130,7 @@ void Deinterleave(const T* interleaved,
|
|||
// Interleave audio from the channel buffers pointed to by `deinterleaved` to
|
||||
// `interleaved`. There must be sufficient space allocated in `interleaved`
|
||||
// (`samples_per_channel` * `num_channels`).
|
||||
// TODO: b/335805780 - Accept ArrayView.
|
||||
template <typename T>
|
||||
void Interleave(const T* const* deinterleaved,
|
||||
size_t samples_per_channel,
|
||||
|
@ -146,6 +149,7 @@ void Interleave(const T* const* deinterleaved,
|
|||
// Copies audio from a single channel buffer pointed to by `mono` to each
|
||||
// channel of `interleaved`. There must be sufficient space allocated in
|
||||
// `interleaved` (`samples_per_channel` * `num_channels`).
|
||||
// TODO: b/335805780 - Accept ArrayView.
|
||||
template <typename T>
|
||||
void UpmixMonoToInterleaved(const T* mono,
|
||||
int num_frames,
|
||||
|
@ -159,6 +163,7 @@ void UpmixMonoToInterleaved(const T* mono,
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: b/335805780 - Accept ArrayView.
|
||||
template <typename T, typename Intermediate>
|
||||
void DownmixToMono(const T* const* input_channels,
|
||||
size_t num_frames,
|
||||
|
@ -175,6 +180,7 @@ void DownmixToMono(const T* const* input_channels,
|
|||
|
||||
// Downmixes an interleaved multichannel signal to a single channel by averaging
|
||||
// all channels.
|
||||
// TODO: b/335805780 - Accept ArrayView.
|
||||
template <typename T, typename Intermediate>
|
||||
void DownmixInterleavedToMonoImpl(const T* interleaved,
|
||||
size_t num_frames,
|
||||
|
@ -197,12 +203,14 @@ void DownmixInterleavedToMonoImpl(const T* interleaved,
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: b/335805780 - Accept ArrayView.
|
||||
template <typename T>
|
||||
void DownmixInterleavedToMono(const T* interleaved,
|
||||
size_t num_frames,
|
||||
int num_channels,
|
||||
T* deinterleaved);
|
||||
|
||||
// TODO: b/335805780 - Accept ArrayView.
|
||||
template <>
|
||||
void DownmixInterleavedToMono<int16_t>(const int16_t* interleaved,
|
||||
size_t num_frames,
|
||||
|
|
Loading…
Reference in a new issue