diff --git a/api/audio/BUILD.gn b/api/audio/BUILD.gn index de654c7b03..749a0d9261 100644 --- a/api/audio/BUILD.gn +++ b/api/audio/BUILD.gn @@ -34,6 +34,7 @@ rtc_library("audio_frame_api") { ] deps = [ + "..:array_view", "..:rtp_packet_info", "../../rtc_base:checks", "../../rtc_base:logging", diff --git a/api/audio/audio_frame.cc b/api/audio/audio_frame.cc index 4ddaaf65d5..375e1b54f3 100644 --- a/api/audio/audio_frame.cc +++ b/api/audio/audio_frame.cc @@ -22,6 +22,20 @@ AudioFrame::AudioFrame() { static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes"); } +AudioFrame::AudioFrame(int sample_rate_hz, + size_t num_channels, + ChannelLayout layout /*= CHANNEL_LAYOUT_UNSUPPORTED*/) + : samples_per_channel_(SampleRateToDefaultChannelSize(sample_rate_hz)), + sample_rate_hz_(sample_rate_hz), + num_channels_(num_channels), + channel_layout_(layout == CHANNEL_LAYOUT_UNSUPPORTED + ? GuessChannelLayout(num_channels) + : layout) { + RTC_DCHECK_LE(num_channels_, kMaxConcurrentChannels); + RTC_DCHECK_GT(sample_rate_hz_, 0); + RTC_DCHECK_GT(samples_per_channel_, 0u); +} + void AudioFrame::Reset() { ResetWithoutMuting(); muted_ = true; @@ -51,6 +65,7 @@ void AudioFrame::UpdateFrame(uint32_t timestamp, SpeechType speech_type, VADActivity vad_activity, size_t num_channels) { + RTC_CHECK_LE(num_channels, kMaxConcurrentChannels); timestamp_ = timestamp; samples_per_channel_ = samples_per_channel; sample_rate_hz_ = sample_rate_hz; @@ -110,12 +125,26 @@ int64_t AudioFrame::ElapsedProfileTimeMs() const { } const int16_t* AudioFrame::data() const { - return muted_ ? empty_data() : data_; + return muted_ ? zeroed_data().begin() : data_; +} + +rtc::ArrayView AudioFrame::data_view() const { + const auto samples = samples_per_channel_ * num_channels_; + // If you get a nullptr from `data_view()`, it's likely because the + // samples_per_channel_ and/or num_channels_ haven't been properly set. + // Since `data_view()` returns an rtc::ArrayView<>, we inherit the behavior + // in ArrayView when the view size is 0 that ArrayView<>::data() will always + // return nullptr. So, even when an AudioFrame is muted and we want to + // return `zeroed_data()`, if samples_per_channel_ or num_channels_ is 0, + // the view will point to nullptr. + return muted_ ? zeroed_data().subview(0, samples) + : rtc::ArrayView(&data_[0], samples); } -// TODO(henrik.lundin) Can we skip zeroing the buffer? -// See https://bugs.chromium.org/p/webrtc/issues/detail?id=5647. int16_t* AudioFrame::mutable_data() { + // TODO: bugs.webrtc.org/5647 - Can we skip zeroing the buffer? + // Consider instead if we should rather zero the buffer when `muted_` is set + // to `true`. if (muted_) { memset(data_, 0, kMaxDataSizeBytes); muted_ = false; @@ -123,6 +152,29 @@ int16_t* AudioFrame::mutable_data() { return data_; } +rtc::ArrayView AudioFrame::mutable_data(size_t samples_per_channel, + size_t num_channels) { + const size_t total_samples = samples_per_channel * num_channels; + RTC_CHECK_LE(total_samples, kMaxDataSizeSamples); + RTC_CHECK_LE(num_channels, kMaxConcurrentChannels); + // Sanity check for valid argument values during development. + // If `samples_per_channel` is <= kMaxConcurrentChannels but larger than 0, + // then chances are the order of arguments is incorrect. + RTC_DCHECK((samples_per_channel == 0 && num_channels == 0) || + samples_per_channel > kMaxConcurrentChannels); + + // TODO: bugs.webrtc.org/5647 - Can we skip zeroing the buffer? + // Consider instead if we should rather zero the whole buffer when `muted_` is + // set to `true`. + if (muted_) { + memset(data_, 0, total_samples * sizeof(int16_t)); + muted_ = false; + } + samples_per_channel_ = samples_per_channel; + num_channels_ = num_channels; + return rtc::ArrayView(&data_[0], total_samples); +} + void AudioFrame::Mute() { muted_ = true; } @@ -146,10 +198,20 @@ void AudioFrame::SetLayoutAndNumChannels(ChannelLayout layout, RTC_CHECK_LE(samples_per_channel_ * num_channels_, kMaxDataSizeSamples); } +void AudioFrame::SetSampleRateAndChannelSize(int sample_rate) { + sample_rate_hz_ = sample_rate; + // We could call `AudioProcessing::GetFrameSize()` here, but that requires + // adding a dependency on the ":audio_processing" build target, which can + // complicate the dependency tree. Some refactoring is probably in order to + // get some consistency around this since there are many places across the + // code that assume this default buffer size. + samples_per_channel_ = SampleRateToDefaultChannelSize(sample_rate_hz_); +} + // static -const int16_t* AudioFrame::empty_data() { +rtc::ArrayView AudioFrame::zeroed_data() { static int16_t* null_data = new int16_t[kMaxDataSizeSamples](); - return &null_data[0]; + return rtc::ArrayView(null_data, kMaxDataSizeSamples); } } // namespace webrtc diff --git a/api/audio/audio_frame.h b/api/audio/audio_frame.h index 81d1255b56..665127eca5 100644 --- a/api/audio/audio_frame.h +++ b/api/audio/audio_frame.h @@ -14,11 +14,30 @@ #include #include +#include "api/array_view.h" #include "api/audio/channel_layout.h" #include "api/rtp_packet_infos.h" +#include "rtc_base/checks.h" namespace webrtc { +// Default webrtc buffer size in milliseconds. +constexpr size_t kDefaultAudioBufferLengthMs = 10u; + +// Default total number of audio buffers per second based on the default length. +constexpr size_t kDefaultAudioBuffersPerSec = + 1000u / kDefaultAudioBufferLengthMs; + +// Returns the number of samples a buffer needs to hold for ~10ms of a single +// audio channel at a given sample rate. +// See also `AudioProcessing::GetFrameSize()`. +inline size_t SampleRateToDefaultChannelSize(size_t sample_rate) { + // Basic sanity check. 192kHz is the highest supported input sample rate. + RTC_DCHECK_LE(sample_rate, 192000); + return sample_rate / kDefaultAudioBuffersPerSec; +} +///////////////////////////////////////////////////////////////////// + /* This class holds up to 120 ms of super-wideband (32 kHz) stereo audio. It * allows for adding and subtracting frames while keeping track of the resulting * states. @@ -57,6 +76,15 @@ class AudioFrame { AudioFrame(); + // Construct an audio frame with frame length properties and channel + // information. `samples_per_channel()` will be initialized to a 10ms buffer + // size and if `layout` is not specified (default value of + // CHANNEL_LAYOUT_UNSUPPORTED is set), then the channel layout is derived + // (guessed) from `num_channels`. + AudioFrame(int sample_rate_hz, + size_t num_channels, + ChannelLayout layout = CHANNEL_LAYOUT_UNSUPPORTED); + AudioFrame(const AudioFrame&) = delete; AudioFrame& operator=(const AudioFrame&) = delete; @@ -68,6 +96,7 @@ class AudioFrame { // ResetWithoutMuting() to skip this wasteful zeroing. void ResetWithoutMuting(); + // TODO: b/335805780 - Accept ArrayView. void UpdateFrame(uint32_t timestamp, const int16_t* data, size_t samples_per_channel, @@ -90,11 +119,29 @@ class AudioFrame { int64_t ElapsedProfileTimeMs() const; // data() returns a zeroed static buffer if the frame is muted. - // mutable_frame() always returns a non-static buffer; the first call to - // mutable_frame() zeros the non-static buffer and marks the frame unmuted. + // TODO: b/335805780 - Return ArrayView. const int16_t* data() const; + + // Returns a read-only view of all the valid samples held by the AudioFrame. + // Note that for a muted AudioFrame, the size of the returned view will be + // 0u and the contained data will be nullptr. + rtc::ArrayView data_view() const; + + // mutable_frame() always returns a non-static buffer; the first call to + // mutable_frame() zeros the buffer and marks the frame as unmuted. + // TODO: b/335805780 - Return ArrayView based on the current values for + // samples per channel and num channels. int16_t* mutable_data(); + // Grants write access to the audio buffer. The size of the returned writable + // view is determined by the `samples_per_channel` and `num_channels` + // dimensions which the function checks for correctness and stores in the + // internal member variables; `samples_per_channel()` and `num_channels()` + // respectively. + // If the state is currently muted, the returned view will be zeroed out. + rtc::ArrayView mutable_data(size_t samples_per_channel, + size_t num_channels); + // Prefer to mute frames using AudioFrameOperations::Mute. void Mute(); // Frame is muted by default. @@ -119,6 +166,10 @@ class AudioFrame { return absolute_capture_timestamp_ms_; } + // Sets the sample_rate_hz and samples_per_channel properties based on a + // given sample rate and calculates a default 10ms samples_per_channel value. + void SetSampleRateAndChannelSize(int sample_rate); + // RTP timestamp of the first sample in the AudioFrame. uint32_t timestamp_ = 0; // Time since the first frame in milliseconds. @@ -157,9 +208,9 @@ class AudioFrame { private: // A permanently zeroed out buffer to represent muted frames. This is a - // header-only class, so the only way to avoid creating a separate empty + // header-only class, so the only way to avoid creating a separate zeroed // buffer per translation unit is to wrap a static in an inline function. - static const int16_t* empty_data(); + static rtc::ArrayView zeroed_data(); int16_t data_[kMaxDataSizeSamples]; bool muted_ = true; diff --git a/api/audio/test/audio_frame_unittest.cc b/api/audio/test/audio_frame_unittest.cc index dbf45ceabc..52d7e424b9 100644 --- a/api/audio/test/audio_frame_unittest.cc +++ b/api/audio/test/audio_frame_unittest.cc @@ -19,10 +19,27 @@ namespace webrtc { namespace { +bool AllSamplesAre(int16_t sample, rtc::ArrayView samples) { + for (const auto s : samples) { + if (s != sample) { + return false; + } + } + return true; +} + bool AllSamplesAre(int16_t sample, const AudioFrame& frame) { - const int16_t* frame_data = frame.data(); - for (size_t i = 0; i < frame.max_16bit_samples(); i++) { - if (frame_data[i] != sample) { + return AllSamplesAre(sample, frame.data_view()); +} + +// Checks the values of samples in the AudioFrame buffer, regardless of whether +// they're valid or not, and disregard the `muted()` state of the frame. +// I.e. use `max_16bit_samples()` instead of the audio properties +// `num_samples * samples_per_channel`. +bool AllBufferSamplesAre(int16_t sample, const AudioFrame& frame) { + const auto* data = frame.data_view().data(); + for (size_t i = 0; i < frame.max_16bit_samples(); ++i) { + if (data[i] != sample) { return false; } } @@ -38,29 +55,46 @@ constexpr size_t kSamplesPerChannel = kSampleRateHz / 100; } // namespace -TEST(AudioFrameTest, FrameStartsMuted) { +TEST(AudioFrameTest, FrameStartsZeroedAndMuted) { AudioFrame frame; EXPECT_TRUE(frame.muted()); + EXPECT_TRUE(frame.data_view().empty()); EXPECT_TRUE(AllSamplesAre(0, frame)); } +// TODO: b/335805780 - Delete test when `mutable_data()` returns ArrayView. +TEST(AudioFrameTest, UnmutedFrameIsInitiallyZeroedLegacy) { + AudioFrame frame(kSampleRateHz, kNumChannelsMono, CHANNEL_LAYOUT_NONE); + frame.mutable_data(); + EXPECT_FALSE(frame.muted()); + EXPECT_TRUE(AllSamplesAre(0, frame)); + EXPECT_TRUE(AllBufferSamplesAre(0, frame)); +} + TEST(AudioFrameTest, UnmutedFrameIsInitiallyZeroed) { AudioFrame frame; - frame.mutable_data(); + auto data = frame.mutable_data(kSamplesPerChannel, kNumChannelsMono); EXPECT_FALSE(frame.muted()); + EXPECT_EQ(frame.data_view().size(), kSamplesPerChannel); + EXPECT_EQ(data.size(), kSamplesPerChannel); EXPECT_TRUE(AllSamplesAre(0, frame)); } TEST(AudioFrameTest, MutedFrameBufferIsZeroed) { AudioFrame frame; - int16_t* frame_data = frame.mutable_data(); + int16_t* frame_data = + frame.mutable_data(kSamplesPerChannel, kNumChannelsMono).begin(); + EXPECT_FALSE(frame.muted()); + // Fill the reserved buffer with non-zero data. for (size_t i = 0; i < frame.max_16bit_samples(); i++) { frame_data[i] = 17; } ASSERT_TRUE(AllSamplesAre(17, frame)); + ASSERT_TRUE(AllBufferSamplesAre(17, frame)); frame.Mute(); EXPECT_TRUE(frame.muted()); EXPECT_TRUE(AllSamplesAre(0, frame)); + ASSERT_TRUE(AllBufferSamplesAre(0, frame)); } TEST(AudioFrameTest, UpdateFrameMono) { @@ -95,11 +129,17 @@ TEST(AudioFrameTest, UpdateFrameMultiChannel) { EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel()); EXPECT_EQ(kNumChannelsStereo, frame.num_channels()); EXPECT_EQ(CHANNEL_LAYOUT_STEREO, frame.channel_layout()); + EXPECT_TRUE(frame.muted()); - frame.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel, + // Initialize the frame with valid `kNumChannels5_1` data to make sure we + // get an unmuted frame with valid samples. + int16_t samples[kSamplesPerChannel * kNumChannels5_1] = {17}; + frame.UpdateFrame(kTimestamp, samples /* data */, kSamplesPerChannel, kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive, kNumChannels5_1); + EXPECT_FALSE(frame.muted()); EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel()); + EXPECT_EQ(kSamplesPerChannel * kNumChannels5_1, frame.data_view().size()); EXPECT_EQ(kNumChannels5_1, frame.num_channels()); EXPECT_EQ(CHANNEL_LAYOUT_5_1, frame.channel_layout()); } @@ -121,6 +161,7 @@ TEST(AudioFrameTest, CopyFrom) { EXPECT_EQ(frame2.vad_activity_, frame1.vad_activity_); EXPECT_EQ(frame2.num_channels_, frame1.num_channels_); + EXPECT_EQ(frame2.data_view().size(), frame1.data_view().size()); EXPECT_EQ(frame2.muted(), frame1.muted()); EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples))); diff --git a/audio/audio_transport_impl.cc b/audio/audio_transport_impl.cc index 42a81d5b4a..d1ecb0f87b 100644 --- a/audio/audio_transport_impl.cc +++ b/audio/audio_transport_impl.cc @@ -70,20 +70,21 @@ void ProcessCaptureFrame(uint32_t delay_ms, int Resample(const AudioFrame& frame, const int destination_sample_rate, PushResampler* resampler, - int16_t* destination) { + rtc::ArrayView destination) { TRACE_EVENT2("webrtc", "Resample", "frame sample rate", frame.sample_rate_hz_, "destination_sample_rate", destination_sample_rate); const int number_of_channels = static_cast(frame.num_channels_); const int target_number_of_samples_per_channel = destination_sample_rate / 100; + RTC_CHECK_EQ(destination.size(), + frame.num_channels_ * target_number_of_samples_per_channel); + resampler->InitializeIfNeeded(frame.sample_rate_hz_, destination_sample_rate, number_of_channels); // TODO(yujo): make resampler take an AudioFrame, and add special case // handling of muted frames. - return resampler->Resample( - frame.data(), frame.samples_per_channel_ * number_of_channels, - destination, number_of_channels * target_number_of_samples_per_channel); + return resampler->Resample(frame.data_view(), destination); } } // namespace @@ -232,8 +233,10 @@ int32_t AudioTransportImpl::NeedMorePlayData(const size_t nSamples, RTC_DCHECK_EQ(error, AudioProcessing::kNoError); } - nSamplesOut = Resample(mixed_frame_, samplesPerSec, &render_resampler_, - static_cast(audioSamples)); + nSamplesOut = + Resample(mixed_frame_, samplesPerSec, &render_resampler_, + rtc::ArrayView(static_cast(audioSamples), + nSamples * nChannels)); RTC_DCHECK_EQ(nSamplesOut, nChannels * nSamples); return 0; } @@ -263,8 +266,10 @@ void AudioTransportImpl::PullRenderData(int bits_per_sample, *elapsed_time_ms = mixed_frame_.elapsed_time_ms_; *ntp_time_ms = mixed_frame_.ntp_time_ms_; - auto output_samples = Resample(mixed_frame_, sample_rate, &render_resampler_, - static_cast(audio_data)); + int output_samples = + Resample(mixed_frame_, sample_rate, &render_resampler_, + rtc::ArrayView(static_cast(audio_data), + number_of_channels * number_of_frames)); RTC_DCHECK_EQ(output_samples, number_of_channels * number_of_frames); } diff --git a/audio/remix_resample.cc b/audio/remix_resample.cc index 178af622a1..a0cf7cc3bf 100644 --- a/audio/remix_resample.cc +++ b/audio/remix_resample.cc @@ -14,6 +14,7 @@ #include "audio/utility/audio_frame_operations.h" #include "common_audio/resampler/include/push_resampler.h" #include "rtc_base/checks.h" +#include "rtc_base/logging.h" namespace webrtc { namespace voe { @@ -67,15 +68,22 @@ void RemixAndResample(const int16_t* src_data, // how much to zero here; or 2) make resampler accept a hint that the input is // zeroed. const size_t src_length = samples_per_channel * audio_ptr_num_channels; - int out_length = - resampler->Resample(audio_ptr, src_length, dst_frame->mutable_data(), - AudioFrame::kMaxDataSizeSamples); + // Ensure the `samples_per_channel_` member is set correctly based on the + // destination sample rate, number of channels and assumed 10ms buffer size. + // TODO(tommi): Could we rather assume that this has been done by the caller? + dst_frame->SetSampleRateAndChannelSize(dst_frame->sample_rate_hz_); + + int out_length = resampler->Resample( + rtc::ArrayView(audio_ptr, src_length), + dst_frame->mutable_data(dst_frame->samples_per_channel_, + dst_frame->num_channels_)); if (out_length == -1) { RTC_FATAL() << "Resample failed: audio_ptr = " << audio_ptr << ", src_length = " << src_length << ", dst_frame->mutable_data() = " << dst_frame->mutable_data(); } + dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels; // Upmix after resampling. diff --git a/audio/remix_resample.h b/audio/remix_resample.h index bd8da76c6a..580ba40310 100644 --- a/audio/remix_resample.h +++ b/audio/remix_resample.h @@ -17,6 +17,8 @@ namespace webrtc { namespace voe { +// Note: The RemixAndResample methods assume 10ms buffer sizes. + // Upmix or downmix and resample the audio to `dst_frame`. Expects `dst_frame` // to have its sample rate and channels members set to the desired values. // Updates the `samples_per_channel_` member accordingly. diff --git a/common_audio/resampler/include/push_resampler.h b/common_audio/resampler/include/push_resampler.h index 3da67120f0..35783b64f2 100644 --- a/common_audio/resampler/include/push_resampler.h +++ b/common_audio/resampler/include/push_resampler.h @@ -14,11 +14,14 @@ #include #include +#include "api/array_view.h" + namespace webrtc { class PushSincResampler; // Wraps PushSincResampler to provide stereo support. +// Note: This implementation assumes 10ms buffer sizes throughout. // TODO(ajm): add support for an arbitrary number of channels. template class PushResampler { @@ -34,7 +37,7 @@ class PushResampler { // Returns the total number of samples provided in destination (e.g. 32 kHz, // 2 channel audio gives 640 samples). - int Resample(const T* src, size_t src_length, T* dst, size_t dst_capacity); + int Resample(rtc::ArrayView src, rtc::ArrayView dst); private: int src_sample_rate_hz_; diff --git a/common_audio/resampler/push_resampler.cc b/common_audio/resampler/push_resampler.cc index 810d778993..0af5ec783d 100644 --- a/common_audio/resampler/push_resampler.cc +++ b/common_audio/resampler/push_resampler.cc @@ -73,32 +73,31 @@ int PushResampler::InitializeIfNeeded(int src_sample_rate_hz, } template -int PushResampler::Resample(const T* src, - size_t src_length, - T* dst, - size_t dst_capacity) { +int PushResampler::Resample(rtc::ArrayView src, + rtc::ArrayView dst) { // These checks used to be factored out of this template function due to // Windows debug build issues with clang. http://crbug.com/615050 const size_t src_size_10ms = (src_sample_rate_hz_ / 100) * num_channels_; const size_t dst_size_10ms = (dst_sample_rate_hz_ / 100) * num_channels_; - RTC_DCHECK_EQ(src_length, src_size_10ms); - RTC_DCHECK_GE(dst_capacity, dst_size_10ms); + RTC_DCHECK_EQ(src.size(), src_size_10ms); + RTC_DCHECK_GE(dst.size(), dst_size_10ms); if (src_sample_rate_hz_ == dst_sample_rate_hz_) { // The old resampler provides this memcpy facility in the case of matching // sample rates, so reproduce it here for the sinc resampler. - memcpy(dst, src, src_length * sizeof(T)); - return static_cast(src_length); + memcpy(dst.data(), src.data(), src.size() * sizeof(T)); + return static_cast(src.size()); } - const size_t src_length_mono = src_length / num_channels_; - const size_t dst_capacity_mono = dst_capacity / num_channels_; + const size_t src_length_mono = src.size() / num_channels_; + const size_t dst_capacity_mono = dst.size() / num_channels_; for (size_t ch = 0; ch < num_channels_; ++ch) { channel_data_array_[ch] = channel_resamplers_[ch].source.data(); } - Deinterleave(src, src_length_mono, num_channels_, channel_data_array_.data()); + Deinterleave(src.data(), src_length_mono, num_channels_, + channel_data_array_.data()); size_t dst_length_mono = 0; @@ -112,7 +111,8 @@ int PushResampler::Resample(const T* src, channel_data_array_[ch] = channel_resamplers_[ch].destination.data(); } - Interleave(channel_data_array_.data(), dst_length_mono, num_channels_, dst); + Interleave(channel_data_array_.data(), dst_length_mono, num_channels_, + dst.data()); return static_cast(dst_length_mono * num_channels_); } diff --git a/modules/audio_coding/acm2/acm_resampler.cc b/modules/audio_coding/acm2/acm_resampler.cc index e307c6ca57..bcac7b6ec5 100644 --- a/modules/audio_coding/acm2/acm_resampler.cc +++ b/modules/audio_coding/acm2/acm_resampler.cc @@ -45,8 +45,9 @@ int ACMResampler::Resample10Msec(const int16_t* in_audio, return -1; } - int out_length = - resampler_.Resample(in_audio, in_length, out_audio, out_capacity_samples); + int out_length = resampler_.Resample( + rtc::ArrayView(in_audio, in_length), + rtc::ArrayView(out_audio, out_capacity_samples)); if (out_length == -1) { RTC_LOG(LS_ERROR) << "Resample(" << in_audio << ", " << in_length << ", " << out_audio << ", " << out_capacity_samples diff --git a/modules/audio_mixer/audio_mixer_impl_unittest.cc b/modules/audio_mixer/audio_mixer_impl_unittest.cc index 2044cb9b90..b04b706902 100644 --- a/modules/audio_mixer/audio_mixer_impl_unittest.cc +++ b/modules/audio_mixer/audio_mixer_impl_unittest.cc @@ -517,13 +517,8 @@ TEST(AudioMixerDeathTest, MultipleChannelsAndHighRate) { other_frame->samples_per_channel_ = kSamplesPerChannel; mixer->AddSource(&other_source); -#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) EXPECT_DEATH(mixer->Mix(kNumberOfChannels, &frame_for_mixing), ""); -#elif !RTC_DCHECK_IS_ON - mixer->Mix(kNumberOfChannels, &frame_for_mixing); - EXPECT_EQ(frame_for_mixing.num_channels_, kNumberOfChannels); - EXPECT_EQ(frame_for_mixing.sample_rate_hz_, - HighOutputRateCalculator::kDefaultFrequency); #endif } diff --git a/modules/audio_mixer/frame_combiner_unittest.cc b/modules/audio_mixer/frame_combiner_unittest.cc index 6c64d0852a..486f551f78 100644 --- a/modules/audio_mixer/frame_combiner_unittest.cc +++ b/modules/audio_mixer/frame_combiner_unittest.cc @@ -139,8 +139,9 @@ TEST(FrameCombiner, ContainsAllRtpPacketInfos) { } } -// There are DCHECKs in place to check for invalid parameters. -TEST(FrameCombinerDeathTest, DebugBuildCrashesWithManyChannels) { +#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +// There are CHECKs in place to check for invalid parameters. +TEST(FrameCombinerDeathTest, BuildCrashesWithManyChannels) { FrameCombiner combiner(true); for (const int rate : {8000, 18000, 34000, 48000}) { for (const int number_of_channels : {10, 20, 21}) { @@ -149,7 +150,9 @@ TEST(FrameCombinerDeathTest, DebugBuildCrashesWithManyChannels) { continue; } const std::vector all_frames = {&frame1, &frame2}; - SetUpFrames(rate, number_of_channels); + // With an unsupported channel count, this will crash in + // `AudioFrame::UpdateFrame`. + EXPECT_DEATH(SetUpFrames(rate, number_of_channels), ""); const int number_of_frames = 2; SCOPED_TRACE( @@ -157,18 +160,14 @@ TEST(FrameCombinerDeathTest, DebugBuildCrashesWithManyChannels) { const std::vector frames_to_combine( all_frames.begin(), all_frames.begin() + number_of_frames); AudioFrame audio_frame_for_mixing; -#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) EXPECT_DEATH( combiner.Combine(frames_to_combine, number_of_channels, rate, frames_to_combine.size(), &audio_frame_for_mixing), ""); -#elif !RTC_DCHECK_IS_ON - combiner.Combine(frames_to_combine, number_of_channels, rate, - frames_to_combine.size(), &audio_frame_for_mixing); -#endif } } } +#endif // GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) TEST(FrameCombinerDeathTest, DebugBuildCrashesWithHighRate) { FrameCombiner combiner(true); @@ -249,7 +248,8 @@ TEST(FrameCombiner, CombiningZeroFramesShouldProduceSilence) { TEST(FrameCombiner, CombiningOneFrameShouldNotChangeFrame) { FrameCombiner combiner(false); for (const int rate : {8000, 10000, 11000, 32000, 44100}) { - for (const int number_of_channels : {1, 2, 4, 8, 10}) { + // kMaxConcurrentChannels is 8. + for (const int number_of_channels : {1, 2, 4, kMaxConcurrentChannels}) { SCOPED_TRACE(ProduceDebugText(rate, number_of_channels, 1)); AudioFrame audio_frame_for_mixing; diff --git a/modules/audio_processing/agc2/vad_wrapper.cc b/modules/audio_processing/agc2/vad_wrapper.cc index af6325dea7..b39122495c 100644 --- a/modules/audio_processing/agc2/vad_wrapper.cc +++ b/modules/audio_processing/agc2/vad_wrapper.cc @@ -104,8 +104,7 @@ float VoiceActivityDetectorWrapper::Analyze(AudioFrameView frame) { } // Resample the first channel of `frame`. RTC_DCHECK_EQ(frame.samples_per_channel(), frame_size_); - resampler_.Resample(frame.channel(0).data(), frame_size_, - resampled_buffer_.data(), resampled_buffer_.size()); + resampler_.Resample(frame.channel(0), resampled_buffer_); return vad_->Analyze(resampled_buffer_); } diff --git a/modules/audio_processing/audio_processing_unittest.cc b/modules/audio_processing/audio_processing_unittest.cc index 4d3fc65daf..819e98059d 100644 --- a/modules/audio_processing/audio_processing_unittest.cc +++ b/modules/audio_processing/audio_processing_unittest.cc @@ -2198,7 +2198,8 @@ TEST_P(AudioProcessingTest, Formats) { // necessary. ASSERT_EQ(ref_length, static_cast(resampler.Resample( - out_ptr, out_length, cmp_data.get(), ref_length))); + rtc::ArrayView(out_ptr, out_length), + rtc::ArrayView(cmp_data.get(), ref_length)))); out_ptr = cmp_data.get(); }