diff --git a/audio/utility/audio_frame_operations.cc b/audio/utility/audio_frame_operations.cc index 23cc6453fd..c4582a4d3e 100644 --- a/audio/utility/audio_frame_operations.cc +++ b/audio/utility/audio_frame_operations.cc @@ -29,48 +29,6 @@ const float kMuteFadeInc = 1.0f / kMuteFadeFrames; } // namespace -void AudioFrameOperations::Add(const AudioFrame& frame_to_add, - AudioFrame* result_frame) { - // Sanity check. - RTC_DCHECK(result_frame); - RTC_DCHECK_GT(result_frame->num_channels_, 0); - RTC_DCHECK_EQ(result_frame->num_channels_, frame_to_add.num_channels_); - - bool no_previous_data = result_frame->muted(); - if (result_frame->samples_per_channel_ != frame_to_add.samples_per_channel_) { - // Special case we have no data to start with. - RTC_DCHECK_EQ(result_frame->samples_per_channel_, 0); - result_frame->samples_per_channel_ = frame_to_add.samples_per_channel_; - no_previous_data = true; - } - - if (result_frame->vad_activity_ == AudioFrame::kVadActive || - frame_to_add.vad_activity_ == AudioFrame::kVadActive) { - result_frame->vad_activity_ = AudioFrame::kVadActive; - } else if (result_frame->vad_activity_ == AudioFrame::kVadUnknown || - frame_to_add.vad_activity_ == AudioFrame::kVadUnknown) { - result_frame->vad_activity_ = AudioFrame::kVadUnknown; - } - - if (result_frame->speech_type_ != frame_to_add.speech_type_) - result_frame->speech_type_ = AudioFrame::kUndefined; - - if (!frame_to_add.muted()) { - auto in_data = frame_to_add.data_view(); - auto out_data = result_frame->mutable_data( - frame_to_add.samples_per_channel_, frame_to_add.num_channels_); - if (no_previous_data) { - std::copy(in_data.begin(), in_data.end(), out_data.data()); - } else { - for (size_t i = 0; i < in_data.size(); ++i) { - const int32_t wrap_guard = static_cast(out_data[i]) + - static_cast(in_data[i]); - out_data[i] = rtc::saturated_cast(wrap_guard); - } - } - } -} - void AudioFrameOperations::QuadToStereo(rtc::ArrayView src_audio, size_t samples_per_channel, rtc::ArrayView dst_audio) { @@ -240,35 +198,6 @@ void AudioFrameOperations::Mute(AudioFrame* frame) { Mute(frame, true, true); } -void AudioFrameOperations::ApplyHalfGain(AudioFrame* frame) { - RTC_DCHECK(frame); - RTC_DCHECK_GT(frame->num_channels_, 0); - if (frame->num_channels_ < 1 || frame->muted()) { - return; - } - - int16_t* frame_data = frame->mutable_data(); - for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_; - i++) { - frame_data[i] = frame_data[i] >> 1; - } -} - -int AudioFrameOperations::Scale(float left, float right, AudioFrame* frame) { - if (frame->num_channels_ != 2) { - return -1; - } else if (frame->muted()) { - return 0; - } - - int16_t* frame_data = frame->mutable_data(); - for (size_t i = 0; i < frame->samples_per_channel_; i++) { - frame_data[2 * i] = static_cast(left * frame_data[2 * i]); - frame_data[2 * i + 1] = static_cast(right * frame_data[2 * i + 1]); - } - return 0; -} - int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame* frame) { if (frame->muted()) { return 0; diff --git a/audio/utility/audio_frame_operations.h b/audio/utility/audio_frame_operations.h index 7b076e1487..3d1e996aec 100644 --- a/audio/utility/audio_frame_operations.h +++ b/audio/utility/audio_frame_operations.h @@ -25,15 +25,6 @@ namespace webrtc { // than a class. class AudioFrameOperations { public: - // Add samples in `frame_to_add` with samples in `result_frame` - // putting the results in `results_frame`. The fields - // `vad_activity_` and `speech_type_` of the result frame are - // updated. If `result_frame` is empty (`samples_per_channel_`==0), - // the samples in `frame_to_add` are added to it. The number of - // channels and number of samples per channel must match except when - // `result_frame` is empty. - static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame); - // Downmixes 4 channels `src_audio` to stereo `dst_audio`. This is an in-place // operation, meaning `src_audio` and `dst_audio` may point to the same // buffer. @@ -83,11 +74,6 @@ class AudioFrameOperations { // Zero out contents of frame. static void Mute(AudioFrame* frame); - // Halve samples in `frame`. - static void ApplyHalfGain(AudioFrame* frame); - - static int Scale(float left, float right, AudioFrame* frame); - static int ScaleWithSat(float scale, AudioFrame* frame); }; diff --git a/audio/utility/audio_frame_operations_unittest.cc b/audio/utility/audio_frame_operations_unittest.cc index a1f8db3ece..d50b685b05 100644 --- a/audio/utility/audio_frame_operations_unittest.cc +++ b/audio/utility/audio_frame_operations_unittest.cc @@ -443,64 +443,6 @@ TEST_F(AudioFrameOperationsTest, MuteEndAlreadyMuted) { EXPECT_TRUE(frame_.muted()); } -TEST_F(AudioFrameOperationsTest, ApplyHalfGainSucceeds) { - SetFrameData(2, &frame_); - - AudioFrame half_gain_frame; - half_gain_frame.num_channels_ = frame_.num_channels_; - half_gain_frame.samples_per_channel_ = frame_.samples_per_channel_; - SetFrameData(1, &half_gain_frame); - - AudioFrameOperations::ApplyHalfGain(&frame_); - VerifyFramesAreEqual(half_gain_frame, frame_); -} - -TEST_F(AudioFrameOperationsTest, ApplyHalfGainMuted) { - ASSERT_TRUE(frame_.muted()); - AudioFrameOperations::ApplyHalfGain(&frame_); - EXPECT_TRUE(frame_.muted()); -} - -// TODO(andrew): should not allow negative scales. -TEST_F(AudioFrameOperationsTest, DISABLED_ScaleFailsWithBadParameters) { - frame_.num_channels_ = 1; - EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, &frame_)); - - frame_.num_channels_ = 3; - EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, &frame_)); - - frame_.num_channels_ = 2; - EXPECT_EQ(-1, AudioFrameOperations::Scale(-1.0, 1.0, &frame_)); - EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, -1.0, &frame_)); -} - -// TODO(andrew): fix the wraparound bug. We should always saturate. -TEST_F(AudioFrameOperationsTest, DISABLED_ScaleDoesNotWrapAround) { - SetFrameData(4000, -4000, &frame_); - EXPECT_EQ(0, AudioFrameOperations::Scale(10.0, 10.0, &frame_)); - - AudioFrame clipped_frame; - clipped_frame.samples_per_channel_ = 320; - SetFrameData(32767, -32768, &clipped_frame); - VerifyFramesAreEqual(clipped_frame, frame_); -} - -TEST_F(AudioFrameOperationsTest, ScaleSucceeds) { - SetFrameData(1, -1, &frame_); - EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, &frame_)); - - AudioFrame scaled_frame; - scaled_frame.samples_per_channel_ = 320; - SetFrameData(2, -3, &scaled_frame); - VerifyFramesAreEqual(scaled_frame, frame_); -} - -TEST_F(AudioFrameOperationsTest, ScaleMuted) { - ASSERT_TRUE(frame_.muted()); - EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, &frame_)); - EXPECT_TRUE(frame_.muted()); -} - // TODO(andrew): should fail with a negative scale. TEST_F(AudioFrameOperationsTest, DISABLED_ScaleWithSatFailsWithBadParameters) { EXPECT_EQ(-1, AudioFrameOperations::ScaleWithSat(-1.0, &frame_)); @@ -537,63 +479,5 @@ TEST_F(AudioFrameOperationsTest, ScaleWithSatMuted) { EXPECT_TRUE(frame_.muted()); } -TEST_F(AudioFrameOperationsTest, AddingXToEmptyGivesX) { - // When samples_per_channel_ is 0, the frame counts as empty and zero. - AudioFrame frame_to_add_to; - frame_to_add_to.mutable_data(); // Unmute the frame. - ASSERT_FALSE(frame_to_add_to.muted()); - - SetFrameData(1000, &frame_); - frame_to_add_to.samples_per_channel_ = 0; - frame_to_add_to.num_channels_ = frame_.num_channels_; - - AudioFrameOperations::Add(frame_, &frame_to_add_to); - VerifyFramesAreEqual(frame_, frame_to_add_to); -} - -TEST_F(AudioFrameOperationsTest, AddingXToMutedGivesX) { - AudioFrame frame_to_add_to; - ASSERT_TRUE(frame_to_add_to.muted()); - - frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_; - SetFrameData(1000, &frame_); - frame_to_add_to.num_channels_ = frame_.num_channels_; - - AudioFrameOperations::Add(frame_, &frame_to_add_to); - VerifyFramesAreEqual(frame_, frame_to_add_to); -} - -TEST_F(AudioFrameOperationsTest, AddingMutedToXGivesX) { - AudioFrame frame_to_add_to; - - // Clear the internal buffer to avoid msan issues since we're changing - // buffer dimension member variables outside of the class without updating - // the buffer. - RTC_DCHECK(frame_to_add_to.muted()); - frame_to_add_to.mutable_data(); - - frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_; - SetFrameData(1000, &frame_to_add_to); // sets frame to mono. - frame_to_add_to.num_channels_ = frame_.num_channels_; - - AudioFrame frame_copy; - frame_copy.CopyFrom(frame_to_add_to); - - ASSERT_TRUE(frame_.muted()); - AudioFrameOperations::Add(frame_, &frame_to_add_to); - VerifyFramesAreEqual(frame_copy, frame_to_add_to); -} - -TEST_F(AudioFrameOperationsTest, AddingTwoFramesProducesTheirSum) { - AudioFrame frame_to_add_to; - frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_; - SetFrameData(1000, &frame_to_add_to); - SetFrameData(2000, &frame_); - - AudioFrameOperations::Add(frame_, &frame_to_add_to); - SetFrameData(frame_.data()[0] + 1000, &frame_); - VerifyFramesAreEqual(frame_, frame_to_add_to); -} - } // namespace } // namespace webrtc