Remove AudioFrameOperations::Add, ApplyHalfGain and Scale.

These methods are unused.

Bug: none
Change-Id: If1499c7c0bc925c2504b7a1318b2d7c4fc4240b1
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/349500
Reviewed-by: Per Åhgren <peah@webrtc.org>
Commit-Queue: Tomas Gunnarsson <tommi@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#42214}
This commit is contained in:
Tommi 2024-05-01 00:31:44 +02:00 committed by WebRTC LUCI CQ
parent 81eca8306b
commit 1a436f7e9e
3 changed files with 0 additions and 201 deletions

View file

@ -29,48 +29,6 @@ const float kMuteFadeInc = 1.0f / kMuteFadeFrames;
} // namespace
void AudioFrameOperations::Add(const AudioFrame& frame_to_add,
AudioFrame* result_frame) {
// Sanity check.
RTC_DCHECK(result_frame);
RTC_DCHECK_GT(result_frame->num_channels_, 0);
RTC_DCHECK_EQ(result_frame->num_channels_, frame_to_add.num_channels_);
bool no_previous_data = result_frame->muted();
if (result_frame->samples_per_channel_ != frame_to_add.samples_per_channel_) {
// Special case we have no data to start with.
RTC_DCHECK_EQ(result_frame->samples_per_channel_, 0);
result_frame->samples_per_channel_ = frame_to_add.samples_per_channel_;
no_previous_data = true;
}
if (result_frame->vad_activity_ == AudioFrame::kVadActive ||
frame_to_add.vad_activity_ == AudioFrame::kVadActive) {
result_frame->vad_activity_ = AudioFrame::kVadActive;
} else if (result_frame->vad_activity_ == AudioFrame::kVadUnknown ||
frame_to_add.vad_activity_ == AudioFrame::kVadUnknown) {
result_frame->vad_activity_ = AudioFrame::kVadUnknown;
}
if (result_frame->speech_type_ != frame_to_add.speech_type_)
result_frame->speech_type_ = AudioFrame::kUndefined;
if (!frame_to_add.muted()) {
auto in_data = frame_to_add.data_view();
auto out_data = result_frame->mutable_data(
frame_to_add.samples_per_channel_, frame_to_add.num_channels_);
if (no_previous_data) {
std::copy(in_data.begin(), in_data.end(), out_data.data());
} else {
for (size_t i = 0; i < in_data.size(); ++i) {
const int32_t wrap_guard = static_cast<int32_t>(out_data[i]) +
static_cast<int32_t>(in_data[i]);
out_data[i] = rtc::saturated_cast<int16_t>(wrap_guard);
}
}
}
}
void AudioFrameOperations::QuadToStereo(rtc::ArrayView<const int16_t> src_audio,
size_t samples_per_channel,
rtc::ArrayView<int16_t> dst_audio) {
@ -240,35 +198,6 @@ void AudioFrameOperations::Mute(AudioFrame* frame) {
Mute(frame, true, true);
}
void AudioFrameOperations::ApplyHalfGain(AudioFrame* frame) {
RTC_DCHECK(frame);
RTC_DCHECK_GT(frame->num_channels_, 0);
if (frame->num_channels_ < 1 || frame->muted()) {
return;
}
int16_t* frame_data = frame->mutable_data();
for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
i++) {
frame_data[i] = frame_data[i] >> 1;
}
}
int AudioFrameOperations::Scale(float left, float right, AudioFrame* frame) {
if (frame->num_channels_ != 2) {
return -1;
} else if (frame->muted()) {
return 0;
}
int16_t* frame_data = frame->mutable_data();
for (size_t i = 0; i < frame->samples_per_channel_; i++) {
frame_data[2 * i] = static_cast<int16_t>(left * frame_data[2 * i]);
frame_data[2 * i + 1] = static_cast<int16_t>(right * frame_data[2 * i + 1]);
}
return 0;
}
int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame* frame) {
if (frame->muted()) {
return 0;

View file

@ -25,15 +25,6 @@ namespace webrtc {
// than a class.
class AudioFrameOperations {
public:
// Add samples in `frame_to_add` with samples in `result_frame`
// putting the results in `results_frame`. The fields
// `vad_activity_` and `speech_type_` of the result frame are
// updated. If `result_frame` is empty (`samples_per_channel_`==0),
// the samples in `frame_to_add` are added to it. The number of
// channels and number of samples per channel must match except when
// `result_frame` is empty.
static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame);
// Downmixes 4 channels `src_audio` to stereo `dst_audio`. This is an in-place
// operation, meaning `src_audio` and `dst_audio` may point to the same
// buffer.
@ -83,11 +74,6 @@ class AudioFrameOperations {
// Zero out contents of frame.
static void Mute(AudioFrame* frame);
// Halve samples in `frame`.
static void ApplyHalfGain(AudioFrame* frame);
static int Scale(float left, float right, AudioFrame* frame);
static int ScaleWithSat(float scale, AudioFrame* frame);
};

View file

@ -443,64 +443,6 @@ TEST_F(AudioFrameOperationsTest, MuteEndAlreadyMuted) {
EXPECT_TRUE(frame_.muted());
}
TEST_F(AudioFrameOperationsTest, ApplyHalfGainSucceeds) {
SetFrameData(2, &frame_);
AudioFrame half_gain_frame;
half_gain_frame.num_channels_ = frame_.num_channels_;
half_gain_frame.samples_per_channel_ = frame_.samples_per_channel_;
SetFrameData(1, &half_gain_frame);
AudioFrameOperations::ApplyHalfGain(&frame_);
VerifyFramesAreEqual(half_gain_frame, frame_);
}
TEST_F(AudioFrameOperationsTest, ApplyHalfGainMuted) {
ASSERT_TRUE(frame_.muted());
AudioFrameOperations::ApplyHalfGain(&frame_);
EXPECT_TRUE(frame_.muted());
}
// TODO(andrew): should not allow negative scales.
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleFailsWithBadParameters) {
frame_.num_channels_ = 1;
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, &frame_));
frame_.num_channels_ = 3;
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, &frame_));
frame_.num_channels_ = 2;
EXPECT_EQ(-1, AudioFrameOperations::Scale(-1.0, 1.0, &frame_));
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, -1.0, &frame_));
}
// TODO(andrew): fix the wraparound bug. We should always saturate.
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleDoesNotWrapAround) {
SetFrameData(4000, -4000, &frame_);
EXPECT_EQ(0, AudioFrameOperations::Scale(10.0, 10.0, &frame_));
AudioFrame clipped_frame;
clipped_frame.samples_per_channel_ = 320;
SetFrameData(32767, -32768, &clipped_frame);
VerifyFramesAreEqual(clipped_frame, frame_);
}
TEST_F(AudioFrameOperationsTest, ScaleSucceeds) {
SetFrameData(1, -1, &frame_);
EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, &frame_));
AudioFrame scaled_frame;
scaled_frame.samples_per_channel_ = 320;
SetFrameData(2, -3, &scaled_frame);
VerifyFramesAreEqual(scaled_frame, frame_);
}
TEST_F(AudioFrameOperationsTest, ScaleMuted) {
ASSERT_TRUE(frame_.muted());
EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, &frame_));
EXPECT_TRUE(frame_.muted());
}
// TODO(andrew): should fail with a negative scale.
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleWithSatFailsWithBadParameters) {
EXPECT_EQ(-1, AudioFrameOperations::ScaleWithSat(-1.0, &frame_));
@ -537,63 +479,5 @@ TEST_F(AudioFrameOperationsTest, ScaleWithSatMuted) {
EXPECT_TRUE(frame_.muted());
}
TEST_F(AudioFrameOperationsTest, AddingXToEmptyGivesX) {
// When samples_per_channel_ is 0, the frame counts as empty and zero.
AudioFrame frame_to_add_to;
frame_to_add_to.mutable_data(); // Unmute the frame.
ASSERT_FALSE(frame_to_add_to.muted());
SetFrameData(1000, &frame_);
frame_to_add_to.samples_per_channel_ = 0;
frame_to_add_to.num_channels_ = frame_.num_channels_;
AudioFrameOperations::Add(frame_, &frame_to_add_to);
VerifyFramesAreEqual(frame_, frame_to_add_to);
}
TEST_F(AudioFrameOperationsTest, AddingXToMutedGivesX) {
AudioFrame frame_to_add_to;
ASSERT_TRUE(frame_to_add_to.muted());
frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
SetFrameData(1000, &frame_);
frame_to_add_to.num_channels_ = frame_.num_channels_;
AudioFrameOperations::Add(frame_, &frame_to_add_to);
VerifyFramesAreEqual(frame_, frame_to_add_to);
}
TEST_F(AudioFrameOperationsTest, AddingMutedToXGivesX) {
AudioFrame frame_to_add_to;
// Clear the internal buffer to avoid msan issues since we're changing
// buffer dimension member variables outside of the class without updating
// the buffer.
RTC_DCHECK(frame_to_add_to.muted());
frame_to_add_to.mutable_data();
frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
SetFrameData(1000, &frame_to_add_to); // sets frame to mono.
frame_to_add_to.num_channels_ = frame_.num_channels_;
AudioFrame frame_copy;
frame_copy.CopyFrom(frame_to_add_to);
ASSERT_TRUE(frame_.muted());
AudioFrameOperations::Add(frame_, &frame_to_add_to);
VerifyFramesAreEqual(frame_copy, frame_to_add_to);
}
TEST_F(AudioFrameOperationsTest, AddingTwoFramesProducesTheirSum) {
AudioFrame frame_to_add_to;
frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
SetFrameData(1000, &frame_to_add_to);
SetFrameData(2000, &frame_);
AudioFrameOperations::Add(frame_, &frame_to_add_to);
SetFrameData(frame_.data()[0] + 1000, &frame_);
VerifyFramesAreEqual(frame_, frame_to_add_to);
}
} // namespace
} // namespace webrtc