webrtc/modules/audio_processing/include/audio_frame_view.h
Tommi 6056976709 Updates to AudioFrameView and VectorFloatFrame
Using DeinterleavedView<> simplifies these two classes, so now the
classes are arguably thin wrappers on top of DeinterleavedView<> and
AudioFrameView<> can be replaced with DeinterleavedView<>.

The changes are:
* Make VectorFloatFrame not use a vector of vectors but rather
  just hold a one dimensional vector of samples and leaves the mapping
  into the buffer up to DeinterleavedView<>.
* Remove the `channel_ptrs_` vector which was required due to an
  issue with AudioFrameView.
* AudioFrameView is now a wrapper over DeinterleavedView<>. The most
  important change is to remove the `audio_samples_` pointer, which
  pointed into an externally owned pointer array (in addition to
  the array that holds the samples themselves). Now AudioFrameView
  can be initialized without requiring such a long-lived array.

Bug: chromium:335805780
Change-Id: I8f3c23c0ac4b5a337f68e9161fc3a97271f4e87d
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/352504
Commit-Queue: Tomas Gunnarsson <tommi@webrtc.org>
Reviewed-by: Per Åhgren <peah@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#42498}
2024-06-17 12:13:40 +00:00

66 lines
2.6 KiB
C++

/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_FRAME_VIEW_H_
#define MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_FRAME_VIEW_H_
#include "api/audio/audio_view.h"
namespace webrtc {
// Class to pass audio data in T** format, where T is a numeric type.
template <class T>
class AudioFrameView {
public:
// `num_channels` and `channel_size` describe the T**
// `audio_samples`. `audio_samples` is assumed to point to a
// two-dimensional |num_channels * channel_size| array of floats.
//
// Note: The implementation now only requires the first channel pointer.
// The previous implementation retained a pointer to externally owned array
// of channel pointers, but since the channel size and count are provided
// and the array is assumed to be a single two-dimensional array, the other
// channel pointers can be calculated based on that (which is what the class
// now uses `DeinterleavedView<>` internally for).
AudioFrameView(T* const* audio_samples, int num_channels, int channel_size)
: view_(num_channels && channel_size ? audio_samples[0] : nullptr,
channel_size,
num_channels) {
RTC_DCHECK_GE(view_.num_channels(), 0);
RTC_DCHECK_GE(view_.samples_per_channel(), 0);
}
// Implicit cast to allow converting AudioFrameView<float> to
// AudioFrameView<const float>.
template <class U>
AudioFrameView(AudioFrameView<U> other) : view_(other.view()) {}
// Allow constructing AudioFrameView from a DeinterleavedView.
template <class U>
explicit AudioFrameView(DeinterleavedView<U> view) : view_(view) {}
AudioFrameView() = delete;
int num_channels() const { return view_.num_channels(); }
int samples_per_channel() const { return view_.samples_per_channel(); }
MonoView<T> channel(int idx) { return view_[idx]; }
MonoView<const T> channel(int idx) const { return view_[idx]; }
MonoView<T> operator[](int idx) { return view_[idx]; }
MonoView<const T> operator[](int idx) const { return view_[idx]; }
DeinterleavedView<T> view() { return view_; }
DeinterleavedView<const T> view() const { return view_; }
private:
DeinterleavedView<T> view_;
};
} // namespace webrtc
#endif // MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_FRAME_VIEW_H_