AudioFrameView: size_t -> int

Bug: webrtc:7494
Change-Id: I46b1328f3d7da721e144cc3752ed4f458084cf62
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/234522
Reviewed-by: Sam Zackrisson <saza@webrtc.org>
Commit-Queue: Alessio Bazzica <alessiob@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#35163}
This commit is contained in:
Alessio Bazzica 2021-10-07 14:08:59 +02:00 committed by WebRTC LUCI CQ
parent 82ea4ee9bf
commit 5c3ae49b44
11 changed files with 47 additions and 43 deletions

View file

@ -155,7 +155,7 @@ void AecDumpImpl::WriteRenderStreamMessage(
audioproc::ReverseStream* msg = event->mutable_reverse_stream();
for (size_t i = 0; i < src.num_channels(); ++i) {
for (int i = 0; i < src.num_channels(); ++i) {
const auto& channel_view = src.channel(i);
msg->add_channel(channel_view.begin(), sizeof(float) * channel_view.size());
}

View file

@ -23,7 +23,7 @@ void CaptureStreamInfo::AddInput(const AudioFrameView<const float>& src) {
RTC_DCHECK(task_);
auto* stream = task_->GetEvent()->mutable_stream();
for (size_t i = 0; i < src.num_channels(); ++i) {
for (int i = 0; i < src.num_channels(); ++i) {
const auto& channel_view = src.channel(i);
stream->add_input_channel(channel_view.begin(),
sizeof(float) * channel_view.size());
@ -34,7 +34,7 @@ void CaptureStreamInfo::AddOutput(const AudioFrameView<const float>& src) {
RTC_DCHECK(task_);
auto* stream = task_->GetEvent()->mutable_stream();
for (size_t i = 0; i < src.num_channels(); ++i) {
for (int i = 0; i < src.num_channels(); ++i) {
const auto& channel_view = src.channel(i);
stream->add_output_channel(channel_view.begin(),
sizeof(float) * channel_view.size());

View file

@ -91,6 +91,7 @@ rtc_library("fixed_digital") {
"../../../rtc_base:checks",
"../../../rtc_base:gtest_prod",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:safe_conversions",
"../../../rtc_base:safe_minmax",
"../../../system_wrappers:metrics",
]

View file

@ -106,7 +106,7 @@ void CopyAudio(AudioFrameView<const float> src,
RTC_DCHECK_GT(src.num_channels(), 0);
RTC_DCHECK_GT(src.samples_per_channel(), 0);
RTC_DCHECK_EQ(dst.size(), src.num_channels());
for (size_t c = 0; c < src.num_channels(); ++c) {
for (int c = 0; c < src.num_channels(); ++c) {
rtc::ArrayView<const float> channel_view = src.channel(c);
RTC_DCHECK_EQ(channel_view.size(), src.samples_per_channel());
RTC_DCHECK_EQ(dst[c].size(), src.samples_per_channel());

View file

@ -57,7 +57,7 @@ std::array<float, kSubFramesInFrame> FixedDigitalLevelEstimator::ComputeLevel(
// Compute max envelope without smoothing.
std::array<float, kSubFramesInFrame> envelope{};
for (size_t channel_idx = 0; channel_idx < float_frame.num_channels();
for (int channel_idx = 0; channel_idx < float_frame.num_channels();
++channel_idx) {
const auto channel = float_frame.channel(channel_idx);
for (int sub_frame = 0; sub_frame < kSubFramesInFrame; ++sub_frame) {

View file

@ -25,7 +25,7 @@ bool GainCloseToOne(float gain_factor) {
}
void ClipSignal(AudioFrameView<float> signal) {
for (size_t k = 0; k < signal.num_channels(); ++k) {
for (int k = 0; k < signal.num_channels(); ++k) {
rtc::ArrayView<float> channel_view = signal.channel(k);
for (auto& sample : channel_view) {
sample = rtc::SafeClamp(sample, kMinFloatS16Value, kMaxFloatS16Value);
@ -45,7 +45,7 @@ void ApplyGainWithRamping(float last_gain_linear,
// Gain is constant and different from 1.
if (last_gain_linear == gain_at_end_of_frame_linear) {
for (size_t k = 0; k < float_frame.num_channels(); ++k) {
for (int k = 0; k < float_frame.num_channels(); ++k) {
rtc::ArrayView<float> channel_view = float_frame.channel(k);
for (auto& sample : channel_view) {
sample *= gain_at_end_of_frame_linear;
@ -58,8 +58,8 @@ void ApplyGainWithRamping(float last_gain_linear,
const float increment = (gain_at_end_of_frame_linear - last_gain_linear) *
inverse_samples_per_channel;
float gain = last_gain_linear;
for (size_t i = 0; i < float_frame.samples_per_channel(); ++i) {
for (size_t ch = 0; ch < float_frame.num_channels(); ++ch) {
for (int i = 0; i < float_frame.samples_per_channel(); ++i) {
for (int ch = 0; ch < float_frame.num_channels(); ++ch) {
float_frame.channel(ch)[i] *= gain;
}
gain += increment;
@ -93,7 +93,7 @@ void GainApplier::SetGainFactor(float gain_factor) {
current_gain_factor_ = gain_factor;
}
void GainApplier::Initialize(size_t samples_per_channel) {
void GainApplier::Initialize(int samples_per_channel) {
RTC_DCHECK_GT(samples_per_channel, 0);
samples_per_channel_ = static_cast<int>(samples_per_channel);
inverse_samples_per_channel_ = 1.f / samples_per_channel_;

View file

@ -25,7 +25,7 @@ class GainApplier {
float GetGainFactor() const { return current_gain_factor_; }
private:
void Initialize(size_t samples_per_channel);
void Initialize(int samples_per_channel);
// Whether to clip samples after gain is applied. If 'true', result
// will fit in FloatS16 range.

View file

@ -18,6 +18,7 @@
#include "modules/audio_processing/agc2/agc2_common.h"
#include "modules/audio_processing/logging/apm_data_dumper.h"
#include "rtc_base/checks.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/numerics/safe_minmax.h"
namespace webrtc {
@ -29,14 +30,14 @@ namespace {
// sub-frame, linear interpolation is replaced with a power function which
// reduces the chances of over-shooting (and hence saturation), however reducing
// the fixed gain effectiveness.
constexpr float kAttackFirstSubframeInterpolationPower = 8.f;
constexpr float kAttackFirstSubframeInterpolationPower = 8.0f;
void InterpolateFirstSubframe(float last_factor,
float current_factor,
rtc::ArrayView<float> subframe) {
const auto n = subframe.size();
constexpr auto p = kAttackFirstSubframeInterpolationPower;
for (size_t i = 0; i < n; ++i) {
const int n = rtc::dchecked_cast<int>(subframe.size());
constexpr float p = kAttackFirstSubframeInterpolationPower;
for (int i = 0; i < n; ++i) {
subframe[i] = std::pow(1.f - i / n, p) * (last_factor - current_factor) +
current_factor;
}
@ -44,10 +45,10 @@ void InterpolateFirstSubframe(float last_factor,
void ComputePerSampleSubframeFactors(
const std::array<float, kSubFramesInFrame + 1>& scaling_factors,
size_t samples_per_channel,
int samples_per_channel,
rtc::ArrayView<float> per_sample_scaling_factors) {
const size_t num_subframes = scaling_factors.size() - 1;
const size_t subframe_size =
const int num_subframes = scaling_factors.size() - 1;
const int subframe_size =
rtc::CheckedDivExact(samples_per_channel, num_subframes);
// Handle first sub-frame differently in case of attack.
@ -59,12 +60,12 @@ void ComputePerSampleSubframeFactors(
per_sample_scaling_factors.subview(0, subframe_size)));
}
for (size_t i = is_attack ? 1 : 0; i < num_subframes; ++i) {
const size_t subframe_start = i * subframe_size;
for (int i = is_attack ? 1 : 0; i < num_subframes; ++i) {
const int subframe_start = i * subframe_size;
const float scaling_start = scaling_factors[i];
const float scaling_end = scaling_factors[i + 1];
const float scaling_diff = (scaling_end - scaling_start) / subframe_size;
for (size_t j = 0; j < subframe_size; ++j) {
for (int j = 0; j < subframe_size; ++j) {
per_sample_scaling_factors[subframe_start + j] =
scaling_start + scaling_diff * j;
}
@ -73,18 +74,18 @@ void ComputePerSampleSubframeFactors(
void ScaleSamples(rtc::ArrayView<const float> per_sample_scaling_factors,
AudioFrameView<float> signal) {
const size_t samples_per_channel = signal.samples_per_channel();
const int samples_per_channel = signal.samples_per_channel();
RTC_DCHECK_EQ(samples_per_channel, per_sample_scaling_factors.size());
for (size_t i = 0; i < signal.num_channels(); ++i) {
auto channel = signal.channel(i);
for (size_t j = 0; j < samples_per_channel; ++j) {
for (int i = 0; i < signal.num_channels(); ++i) {
rtc::ArrayView<float> channel = signal.channel(i);
for (int j = 0; j < samples_per_channel; ++j) {
channel[j] = rtc::SafeClamp(channel[j] * per_sample_scaling_factors[j],
kMinFloatS16Value, kMaxFloatS16Value);
}
}
}
void CheckLimiterSampleRate(size_t sample_rate_hz) {
void CheckLimiterSampleRate(int sample_rate_hz) {
// Check that per_sample_scaling_factors_ is large enough.
RTC_DCHECK_LE(sample_rate_hz,
kMaximalNumberOfSamplesPerChannel * 1000 / kFrameDurationMs);
@ -92,7 +93,7 @@ void CheckLimiterSampleRate(size_t sample_rate_hz) {
} // namespace
Limiter::Limiter(size_t sample_rate_hz,
Limiter::Limiter(int sample_rate_hz,
ApmDataDumper* apm_data_dumper,
const std::string& histogram_name)
: interp_gain_curve_(apm_data_dumper, histogram_name),
@ -104,7 +105,8 @@ Limiter::Limiter(size_t sample_rate_hz,
Limiter::~Limiter() = default;
void Limiter::Process(AudioFrameView<float> signal) {
const auto level_estimate = level_estimator_.ComputeLevel(signal);
const std::array<float, kSubFramesInFrame> level_estimate =
level_estimator_.ComputeLevel(signal);
RTC_DCHECK_EQ(level_estimate.size() + 1, scaling_factors_.size());
scaling_factors_[0] = last_scaling_factor_;
@ -113,7 +115,7 @@ void Limiter::Process(AudioFrameView<float> signal) {
return interp_gain_curve_.LookUpGainToApply(x);
});
const size_t samples_per_channel = signal.samples_per_channel();
const int samples_per_channel = signal.samples_per_channel();
RTC_DCHECK_LE(samples_per_channel, kMaximalNumberOfSamplesPerChannel);
auto per_sample_scaling_factors = rtc::ArrayView<float>(
@ -136,7 +138,7 @@ InterpolatedGainCurve::Stats Limiter::GetGainCurveStats() const {
return interp_gain_curve_.get_stats();
}
void Limiter::SetSampleRate(size_t sample_rate_hz) {
void Limiter::SetSampleRate(int sample_rate_hz) {
CheckLimiterSampleRate(sample_rate_hz);
level_estimator_.SetSampleRate(sample_rate_hz);
}

View file

@ -24,7 +24,7 @@ class ApmDataDumper;
class Limiter {
public:
Limiter(size_t sample_rate_hz,
Limiter(int sample_rate_hz,
ApmDataDumper* apm_data_dumper,
const std::string& histogram_name_prefix);
Limiter(const Limiter& limiter) = delete;
@ -40,7 +40,7 @@ class Limiter {
// * below kMaximalNumberOfSamplesPerChannel*1000/kFrameDurationMs
// so that samples_per_channel fit in the
// per_sample_scaling_factors_ array.
void SetSampleRate(size_t sample_rate_hz);
void SetSampleRate(int sample_rate_hz);
// Resets the internal state.
void Reset();

View file

@ -27,7 +27,7 @@ constexpr int kFramesPerSecond = 100;
float FrameEnergy(const AudioFrameView<const float>& audio) {
float energy = 0.0f;
for (size_t k = 0; k < audio.num_channels(); ++k) {
for (int k = 0; k < audio.num_channels(); ++k) {
float channel_energy =
std::accumulate(audio.channel(k).begin(), audio.channel(k).end(), 0.0f,
[](float a, float b) -> float { return a + b * b; });

View file

@ -22,12 +22,13 @@ class AudioFrameView {
// `num_channels` and `channel_size` describe the T**
// `audio_samples`. `audio_samples` is assumed to point to a
// two-dimensional |num_channels * channel_size| array of floats.
AudioFrameView(T* const* audio_samples,
size_t num_channels,
size_t channel_size)
AudioFrameView(T* const* audio_samples, int num_channels, int channel_size)
: audio_samples_(audio_samples),
num_channels_(num_channels),
channel_size_(channel_size) {}
channel_size_(channel_size) {
RTC_DCHECK_GE(num_channels_, 0);
RTC_DCHECK_GE(channel_size_, 0);
}
// Implicit cast to allow converting Frame<float> to
// Frame<const float>.
@ -39,17 +40,17 @@ class AudioFrameView {
AudioFrameView() = delete;
size_t num_channels() const { return num_channels_; }
int num_channels() const { return num_channels_; }
size_t samples_per_channel() const { return channel_size_; }
int samples_per_channel() const { return channel_size_; }
rtc::ArrayView<T> channel(size_t idx) {
rtc::ArrayView<T> channel(int idx) {
RTC_DCHECK_LE(0, idx);
RTC_DCHECK_LE(idx, num_channels_);
return rtc::ArrayView<T>(audio_samples_[idx], channel_size_);
}
rtc::ArrayView<const T> channel(size_t idx) const {
rtc::ArrayView<const T> channel(int idx) const {
RTC_DCHECK_LE(0, idx);
RTC_DCHECK_LE(idx, num_channels_);
return rtc::ArrayView<const T>(audio_samples_[idx], channel_size_);
@ -59,8 +60,8 @@ class AudioFrameView {
private:
T* const* audio_samples_;
size_t num_channels_;
size_t channel_size_;
int num_channels_;
int channel_size_;
};
} // namespace webrtc