Use backticks not vertical bars to denote variables in comments for /audio

Bug: webrtc:12338
Change-Id: Ief89269aa39d0cb6749a1c6cc995ce8830ca327f
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/226942
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34564}
This commit is contained in:
Artem Titov 2021-07-26 11:47:07 +02:00 committed by WebRTC LUCI CQ
parent 179b46b5ae
commit b0ea637ec2
20 changed files with 85 additions and 85 deletions

View file

@ -189,7 +189,7 @@ class AudioSendStream final : public webrtc::AudioSendStream,
BitrateAllocatorInterface* const bitrate_allocator_ BitrateAllocatorInterface* const bitrate_allocator_
RTC_GUARDED_BY(rtp_transport_queue_); RTC_GUARDED_BY(rtp_transport_queue_);
// Constrains cached to be accessed from |rtp_transport_queue_|. // Constrains cached to be accessed from `rtp_transport_queue_`.
absl::optional<AudioSendStream::TargetAudioBitrateConstraints> absl::optional<AudioSendStream::TargetAudioBitrateConstraints>
cached_constraints_ RTC_GUARDED_BY(rtp_transport_queue_) = absl::nullopt; cached_constraints_ RTC_GUARDED_BY(rtp_transport_queue_) = absl::nullopt;
RtpTransportControllerSendInterface* const rtp_transport_; RtpTransportControllerSendInterface* const rtp_transport_;

View file

@ -172,7 +172,7 @@ struct ConfigHelper {
SetupMockForSetupSendCodec(expect_set_encoder_call); SetupMockForSetupSendCodec(expect_set_encoder_call);
SetupMockForCallEncoder(); SetupMockForCallEncoder();
// Use ISAC as default codec so as to prevent unnecessary |channel_proxy_| // Use ISAC as default codec so as to prevent unnecessary `channel_proxy_`
// calls from the default ctor behavior. // calls from the default ctor behavior.
stream_config_.send_codec_spec = stream_config_.send_codec_spec =
AudioSendStream::Config::SendCodecSpec(kIsacPayloadType, kIsacFormat); AudioSendStream::Config::SendCodecSpec(kIsacPayloadType, kIsacFormat);
@ -336,7 +336,7 @@ struct ConfigHelper {
::testing::NiceMock<MockRtpRtcpInterface> rtp_rtcp_; ::testing::NiceMock<MockRtpRtcpInterface> rtp_rtcp_;
::testing::NiceMock<MockLimitObserver> limit_observer_; ::testing::NiceMock<MockLimitObserver> limit_observer_;
BitrateAllocator bitrate_allocator_; BitrateAllocator bitrate_allocator_;
// |worker_queue| is defined last to ensure all pending tasks are cancelled // `worker_queue` is defined last to ensure all pending tasks are cancelled
// and deleted before any other members. // and deleted before any other members.
TaskQueueForTest worker_queue_; TaskQueueForTest worker_queue_;
std::unique_ptr<AudioEncoder> audio_encoder_; std::unique_ptr<AudioEncoder> audio_encoder_;

View file

@ -64,8 +64,8 @@ void ProcessCaptureFrame(uint32_t delay_ms,
} }
} }
// Resample audio in |frame| to given sample rate preserving the // Resample audio in `frame` to given sample rate preserving the
// channel count and place the result in |destination|. // channel count and place the result in `destination`.
int Resample(const AudioFrame& frame, int Resample(const AudioFrame& frame,
const int destination_sample_rate, const int destination_sample_rate,
PushResampler<int16_t>* resampler, PushResampler<int16_t>* resampler,

View file

@ -429,8 +429,8 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
} }
// Measure audio level (0-9) // Measure audio level (0-9)
// TODO(henrik.lundin) Use the |muted| information here too. // TODO(henrik.lundin) Use the `muted` information here too.
// TODO(deadbeef): Use RmsLevel for |_outputAudioLevel| (see // TODO(deadbeef): Use RmsLevel for `_outputAudioLevel` (see
// https://crbug.com/webrtc/7517). // https://crbug.com/webrtc/7517).
_outputAudioLevel.ComputeLevel(*audio_frame, kAudioSampleDurationSeconds); _outputAudioLevel.ComputeLevel(*audio_frame, kAudioSampleDurationSeconds);
@ -454,10 +454,10 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
// Compute ntp time. // Compute ntp time.
audio_frame->ntp_time_ms_ = audio_frame->ntp_time_ms_ =
ntp_estimator_.Estimate(audio_frame->timestamp_); ntp_estimator_.Estimate(audio_frame->timestamp_);
// |ntp_time_ms_| won't be valid until at least 2 RTCP SRs are received. // `ntp_time_ms_` won't be valid until at least 2 RTCP SRs are received.
if (audio_frame->ntp_time_ms_ > 0) { if (audio_frame->ntp_time_ms_ > 0) {
// Compute |capture_start_ntp_time_ms_| so that // Compute `capture_start_ntp_time_ms_` so that
// |capture_start_ntp_time_ms_| + |elapsed_time_ms_| == |ntp_time_ms_| // `capture_start_ntp_time_ms_` + `elapsed_time_ms_` == `ntp_time_ms_`
capture_start_ntp_time_ms_ = capture_start_ntp_time_ms_ =
audio_frame->ntp_time_ms_ - audio_frame->elapsed_time_ms_; audio_frame->ntp_time_ms_ - audio_frame->elapsed_time_ms_;
} }

View file

@ -23,7 +23,7 @@ namespace webrtc {
// Delegates calls to FrameTransformerInterface to transform frames, and to // Delegates calls to FrameTransformerInterface to transform frames, and to
// ChannelReceive to receive the transformed frames using the // ChannelReceive to receive the transformed frames using the
// |receive_frame_callback_| on the |channel_receive_thread_|. // `receive_frame_callback_` on the `channel_receive_thread_`.
class ChannelReceiveFrameTransformerDelegate : public TransformedFrameCallback { class ChannelReceiveFrameTransformerDelegate : public TransformedFrameCallback {
public: public:
using ReceiveFrameCallback = using ReceiveFrameCallback =
@ -34,12 +34,12 @@ class ChannelReceiveFrameTransformerDelegate : public TransformedFrameCallback {
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer, rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
TaskQueueBase* channel_receive_thread); TaskQueueBase* channel_receive_thread);
// Registers |this| as callback for |frame_transformer_|, to get the // Registers `this` as callback for `frame_transformer_`, to get the
// transformed frames. // transformed frames.
void Init(); void Init();
// Unregisters and releases the |frame_transformer_| reference, and resets // Unregisters and releases the `frame_transformer_` reference, and resets
// |receive_frame_callback_| on |channel_receive_thread_|. Called from // `receive_frame_callback_` on `channel_receive_thread_`. Called from
// ChannelReceive destructor to prevent running the callback on a dangling // ChannelReceive destructor to prevent running the callback on a dangling
// channel. // channel.
void Reset(); void Reset();
@ -55,7 +55,7 @@ class ChannelReceiveFrameTransformerDelegate : public TransformedFrameCallback {
std::unique_ptr<TransformableFrameInterface> frame) override; std::unique_ptr<TransformableFrameInterface> frame) override;
// Delegates the call to ChannelReceive::OnReceivedPayloadData on the // Delegates the call to ChannelReceive::OnReceivedPayloadData on the
// |channel_receive_thread_|, by calling |receive_frame_callback_|. // `channel_receive_thread_`, by calling `receive_frame_callback_`.
void ReceiveFrame(std::unique_ptr<TransformableFrameInterface> frame) const; void ReceiveFrame(std::unique_ptr<TransformableFrameInterface> frame) const;
protected: protected:

View file

@ -98,12 +98,12 @@ class ChannelSendInterface {
std::unique_ptr<AudioFrame> audio_frame) = 0; std::unique_ptr<AudioFrame> audio_frame) = 0;
virtual RtpRtcpInterface* GetRtpRtcp() const = 0; virtual RtpRtcpInterface* GetRtpRtcp() const = 0;
// In RTP we currently rely on RTCP packets (|ReceivedRTCPPacket|) to inform // In RTP we currently rely on RTCP packets (`ReceivedRTCPPacket`) to inform
// about RTT. // about RTT.
// In media transport we rely on the TargetTransferRateObserver instead. // In media transport we rely on the TargetTransferRateObserver instead.
// In other words, if you are using RTP, you should expect // In other words, if you are using RTP, you should expect
// |ReceivedRTCPPacket| to be called, if you are using media transport, // `ReceivedRTCPPacket` to be called, if you are using media transport,
// |OnTargetTransferRate| will be called. // `OnTargetTransferRate` will be called.
// //
// In future, RTP media will move to the media transport implementation and // In future, RTP media will move to the media transport implementation and
// these conditions will be removed. // these conditions will be removed.

View file

@ -23,8 +23,8 @@
namespace webrtc { namespace webrtc {
// Delegates calls to FrameTransformerInterface to transform frames, and to // Delegates calls to FrameTransformerInterface to transform frames, and to
// ChannelSend to send the transformed frames using |send_frame_callback_| on // ChannelSend to send the transformed frames using `send_frame_callback_` on
// the |encoder_queue_|. // the `encoder_queue_`.
// OnTransformedFrame() can be called from any thread, the delegate ensures // OnTransformedFrame() can be called from any thread, the delegate ensures
// thread-safe access to the ChannelSend callback. // thread-safe access to the ChannelSend callback.
class ChannelSendFrameTransformerDelegate : public TransformedFrameCallback { class ChannelSendFrameTransformerDelegate : public TransformedFrameCallback {
@ -40,12 +40,12 @@ class ChannelSendFrameTransformerDelegate : public TransformedFrameCallback {
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer, rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
rtc::TaskQueue* encoder_queue); rtc::TaskQueue* encoder_queue);
// Registers |this| as callback for |frame_transformer_|, to get the // Registers `this` as callback for `frame_transformer_`, to get the
// transformed frames. // transformed frames.
void Init(); void Init();
// Unregisters and releases the |frame_transformer_| reference, and resets // Unregisters and releases the `frame_transformer_` reference, and resets
// |send_frame_callback_| under lock. Called from ChannelSend destructor to // `send_frame_callback_` under lock. Called from ChannelSend destructor to
// prevent running the callback on a dangling channel. // prevent running the callback on a dangling channel.
void Reset(); void Reset();
@ -64,8 +64,8 @@ class ChannelSendFrameTransformerDelegate : public TransformedFrameCallback {
void OnTransformedFrame( void OnTransformedFrame(
std::unique_ptr<TransformableFrameInterface> frame) override; std::unique_ptr<TransformableFrameInterface> frame) override;
// Delegates the call to ChannelSend::SendRtpAudio on the |encoder_queue_|, // Delegates the call to ChannelSend::SendRtpAudio on the `encoder_queue_`,
// by calling |send_audio_callback_|. // by calling `send_audio_callback_`.
void SendFrame(std::unique_ptr<TransformableFrameInterface> frame) const; void SendFrame(std::unique_ptr<TransformableFrameInterface> frame) const;
protected: protected:

View file

@ -47,7 +47,7 @@ void NullAudioPoller::OnMessage(rtc::Message* msg) {
// Buffer to hold the audio samples. // Buffer to hold the audio samples.
int16_t buffer[kNumSamples * kNumChannels]; int16_t buffer[kNumSamples * kNumChannels];
// Output variables from |NeedMorePlayData|. // Output variables from `NeedMorePlayData`.
size_t n_samples; size_t n_samples;
int64_t elapsed_time_ms; int64_t elapsed_time_ms;
int64_t ntp_time_ms; int64_t ntp_time_ms;

View file

@ -17,19 +17,19 @@
namespace webrtc { namespace webrtc {
namespace voe { namespace voe {
// Upmix or downmix and resample the audio to |dst_frame|. Expects |dst_frame| // Upmix or downmix and resample the audio to `dst_frame`. Expects `dst_frame`
// to have its sample rate and channels members set to the desired values. // to have its sample rate and channels members set to the desired values.
// Updates the |samples_per_channel_| member accordingly. // Updates the `samples_per_channel_` member accordingly.
// //
// This version has an AudioFrame |src_frame| as input and sets the output // This version has an AudioFrame `src_frame` as input and sets the output
// |timestamp_|, |elapsed_time_ms_| and |ntp_time_ms_| members equals to the // `timestamp_`, `elapsed_time_ms_` and `ntp_time_ms_` members equals to the
// input ones. // input ones.
void RemixAndResample(const AudioFrame& src_frame, void RemixAndResample(const AudioFrame& src_frame,
PushResampler<int16_t>* resampler, PushResampler<int16_t>* resampler,
AudioFrame* dst_frame); AudioFrame* dst_frame);
// This version has a pointer to the samples |src_data| as input and receives // This version has a pointer to the samples `src_data` as input and receives
// |samples_per_channel|, |num_channels| and |sample_rate_hz| of the data as // `samples_per_channel`, `num_channels` and `sample_rate_hz` of the data as
// parameters. // parameters.
void RemixAndResample(const int16_t* src_data, void RemixAndResample(const int16_t* src_data,
size_t samples_per_channel, size_t samples_per_channel,

View file

@ -43,7 +43,7 @@ class UtilityTest : public ::testing::Test {
AudioFrame golden_frame_; AudioFrame golden_frame_;
}; };
// Sets the signal value to increase by |data| with every sample. Floats are // Sets the signal value to increase by `data` with every sample. Floats are
// used so non-integer values result in rounding error, but not an accumulating // used so non-integer values result in rounding error, but not an accumulating
// error. // error.
void SetMonoFrame(float data, int sample_rate_hz, AudioFrame* frame) { void SetMonoFrame(float data, int sample_rate_hz, AudioFrame* frame) {
@ -62,7 +62,7 @@ void SetMonoFrame(float data, AudioFrame* frame) {
SetMonoFrame(data, frame->sample_rate_hz_, frame); SetMonoFrame(data, frame->sample_rate_hz_, frame);
} }
// Sets the signal value to increase by |left| and |right| with every sample in // Sets the signal value to increase by `left` and `right` with every sample in
// each channel respectively. // each channel respectively.
void SetStereoFrame(float left, void SetStereoFrame(float left,
float right, float right,
@ -84,7 +84,7 @@ void SetStereoFrame(float left, float right, AudioFrame* frame) {
SetStereoFrame(left, right, frame->sample_rate_hz_, frame); SetStereoFrame(left, right, frame->sample_rate_hz_, frame);
} }
// Sets the signal value to increase by |ch1|, |ch2|, |ch3|, |ch4| with every // Sets the signal value to increase by `ch1`, `ch2`, `ch3`, `ch4` with every
// sample in each channel respectively. // sample in each channel respectively.
void SetQuadFrame(float ch1, void SetQuadFrame(float ch1,
float ch2, float ch2,
@ -111,8 +111,8 @@ void VerifyParams(const AudioFrame& ref_frame, const AudioFrame& test_frame) {
EXPECT_EQ(ref_frame.sample_rate_hz_, test_frame.sample_rate_hz_); EXPECT_EQ(ref_frame.sample_rate_hz_, test_frame.sample_rate_hz_);
} }
// Computes the best SNR based on the error between |ref_frame| and // Computes the best SNR based on the error between `ref_frame` and
// |test_frame|. It allows for up to a |max_delay| in samples between the // `test_frame`. It allows for up to a `max_delay` in samples between the
// signals to compensate for the resampling delay. // signals to compensate for the resampling delay.
float ComputeSNR(const AudioFrame& ref_frame, float ComputeSNR(const AudioFrame& ref_frame,
const AudioFrame& test_frame, const AudioFrame& test_frame,

View file

@ -222,14 +222,14 @@ void AudioFrameOperations::Mute(AudioFrame* frame,
size_t end = count; size_t end = count;
float start_g = 0.0f; float start_g = 0.0f;
if (current_frame_muted) { if (current_frame_muted) {
// Fade out the last |count| samples of frame. // Fade out the last `count` samples of frame.
RTC_DCHECK(!previous_frame_muted); RTC_DCHECK(!previous_frame_muted);
start = frame->samples_per_channel_ - count; start = frame->samples_per_channel_ - count;
end = frame->samples_per_channel_; end = frame->samples_per_channel_;
start_g = 1.0f; start_g = 1.0f;
inc = -inc; inc = -inc;
} else { } else {
// Fade in the first |count| samples of frame. // Fade in the first `count` samples of frame.
RTC_DCHECK(previous_frame_muted); RTC_DCHECK(previous_frame_muted);
} }

View file

@ -24,40 +24,40 @@ namespace webrtc {
// than a class. // than a class.
class AudioFrameOperations { class AudioFrameOperations {
public: public:
// Add samples in |frame_to_add| with samples in |result_frame| // Add samples in `frame_to_add` with samples in `result_frame`
// putting the results in |results_frame|. The fields // putting the results in `results_frame`. The fields
// |vad_activity_| and |speech_type_| of the result frame are // `vad_activity_` and `speech_type_` of the result frame are
// updated. If |result_frame| is empty (|samples_per_channel_|==0), // updated. If `result_frame` is empty (`samples_per_channel_`==0),
// the samples in |frame_to_add| are added to it. The number of // the samples in `frame_to_add` are added to it. The number of
// channels and number of samples per channel must match except when // channels and number of samples per channel must match except when
// |result_frame| is empty. // `result_frame` is empty.
static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame); static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame);
// |frame.num_channels_| will be updated. This version checks for sufficient // |frame.num_channels_| will be updated. This version checks for sufficient
// buffer size and that |num_channels_| is mono. Use UpmixChannels // buffer size and that `num_channels_` is mono. Use UpmixChannels
// instead. TODO(bugs.webrtc.org/8649): remove. // instead. TODO(bugs.webrtc.org/8649): remove.
ABSL_DEPRECATED("bugs.webrtc.org/8649") ABSL_DEPRECATED("bugs.webrtc.org/8649")
static int MonoToStereo(AudioFrame* frame); static int MonoToStereo(AudioFrame* frame);
// |frame.num_channels_| will be updated. This version checks that // |frame.num_channels_| will be updated. This version checks that
// |num_channels_| is stereo. Use DownmixChannels // `num_channels_` is stereo. Use DownmixChannels
// instead. TODO(bugs.webrtc.org/8649): remove. // instead. TODO(bugs.webrtc.org/8649): remove.
ABSL_DEPRECATED("bugs.webrtc.org/8649") ABSL_DEPRECATED("bugs.webrtc.org/8649")
static int StereoToMono(AudioFrame* frame); static int StereoToMono(AudioFrame* frame);
// Downmixes 4 channels |src_audio| to stereo |dst_audio|. This is an in-place // Downmixes 4 channels `src_audio` to stereo `dst_audio`. This is an in-place
// operation, meaning |src_audio| and |dst_audio| may point to the same // operation, meaning `src_audio` and `dst_audio` may point to the same
// buffer. // buffer.
static void QuadToStereo(const int16_t* src_audio, static void QuadToStereo(const int16_t* src_audio,
size_t samples_per_channel, size_t samples_per_channel,
int16_t* dst_audio); int16_t* dst_audio);
// |frame.num_channels_| will be updated. This version checks that // |frame.num_channels_| will be updated. This version checks that
// |num_channels_| is 4 channels. // `num_channels_` is 4 channels.
static int QuadToStereo(AudioFrame* frame); static int QuadToStereo(AudioFrame* frame);
// Downmixes |src_channels| |src_audio| to |dst_channels| |dst_audio|. // Downmixes `src_channels` `src_audio` to `dst_channels` `dst_audio`.
// This is an in-place operation, meaning |src_audio| and |dst_audio| // This is an in-place operation, meaning `src_audio` and `dst_audio`
// may point to the same buffer. Supported channel combinations are // may point to the same buffer. Supported channel combinations are
// Stereo to Mono, Quad to Mono, and Quad to Stereo. // Stereo to Mono, Quad to Mono, and Quad to Stereo.
static void DownmixChannels(const int16_t* src_audio, static void DownmixChannels(const int16_t* src_audio,
@ -67,26 +67,26 @@ class AudioFrameOperations {
int16_t* dst_audio); int16_t* dst_audio);
// |frame.num_channels_| will be updated. This version checks that // |frame.num_channels_| will be updated. This version checks that
// |num_channels_| and |dst_channels| are valid and performs relevant downmix. // `num_channels_` and `dst_channels` are valid and performs relevant downmix.
// Supported channel combinations are N channels to Mono, and Quad to Stereo. // Supported channel combinations are N channels to Mono, and Quad to Stereo.
static void DownmixChannels(size_t dst_channels, AudioFrame* frame); static void DownmixChannels(size_t dst_channels, AudioFrame* frame);
// |frame.num_channels_| will be updated. This version checks that // |frame.num_channels_| will be updated. This version checks that
// |num_channels_| and |dst_channels| are valid and performs relevant // `num_channels_` and `dst_channels` are valid and performs relevant
// downmix. Supported channel combinations are Mono to N // downmix. Supported channel combinations are Mono to N
// channels. The single channel is replicated. // channels. The single channel is replicated.
static void UpmixChannels(size_t target_number_of_channels, static void UpmixChannels(size_t target_number_of_channels,
AudioFrame* frame); AudioFrame* frame);
// Swap the left and right channels of |frame|. Fails silently if |frame| is // Swap the left and right channels of `frame`. Fails silently if `frame` is
// not stereo. // not stereo.
static void SwapStereoChannels(AudioFrame* frame); static void SwapStereoChannels(AudioFrame* frame);
// Conditionally zero out contents of |frame| for implementing audio mute: // Conditionally zero out contents of `frame` for implementing audio mute:
// |previous_frame_muted| && |current_frame_muted| - Zero out whole frame. // `previous_frame_muted` && `current_frame_muted` - Zero out whole frame.
// |previous_frame_muted| && !|current_frame_muted| - Fade-in at frame start. // `previous_frame_muted` && !`current_frame_muted` - Fade-in at frame start.
// !|previous_frame_muted| && |current_frame_muted| - Fade-out at frame end. // !`previous_frame_muted` && `current_frame_muted` - Fade-out at frame end.
// !|previous_frame_muted| && !|current_frame_muted| - Leave frame untouched. // !`previous_frame_muted` && !`current_frame_muted` - Leave frame untouched.
static void Mute(AudioFrame* frame, static void Mute(AudioFrame* frame,
bool previous_frame_muted, bool previous_frame_muted,
bool current_frame_muted); bool current_frame_muted);
@ -94,7 +94,7 @@ class AudioFrameOperations {
// Zero out contents of frame. // Zero out contents of frame.
static void Mute(AudioFrame* frame); static void Mute(AudioFrame* frame);
// Halve samples in |frame|. // Halve samples in `frame`.
static void ApplyHalfGain(AudioFrame* frame); static void ApplyHalfGain(AudioFrame* frame);
static int Scale(float left, float right, AudioFrame* frame); static int Scale(float left, float right, AudioFrame* frame);

View file

@ -90,7 +90,7 @@ void ChannelMixer::Transform(AudioFrame* frame) {
frame->num_channels_ = output_channels_; frame->num_channels_ = output_channels_;
frame->channel_layout_ = output_layout_; frame->channel_layout_ = output_layout_;
// Copy the output result to the audio frame in |frame|. // Copy the output result to the audio frame in `frame`.
memcpy( memcpy(
frame->mutable_data(), out_audio, frame->mutable_data(), out_audio,
sizeof(int16_t) * frame->samples_per_channel() * frame->num_channels()); sizeof(int16_t) * frame->samples_per_channel() * frame->num_channels());

View file

@ -38,8 +38,8 @@ class ChannelMixer {
ChannelMixer(ChannelLayout input_layout, ChannelLayout output_layout); ChannelMixer(ChannelLayout input_layout, ChannelLayout output_layout);
~ChannelMixer(); ~ChannelMixer();
// Transforms all input channels corresponding to the selected |input_layout| // Transforms all input channels corresponding to the selected `input_layout`
// to the number of channels in the selected |output_layout|. // to the number of channels in the selected `output_layout`.
// Example usage (downmix from stereo to mono): // Example usage (downmix from stereo to mono):
// //
// ChannelMixer mixer(CHANNEL_LAYOUT_STEREO, CHANNEL_LAYOUT_MONO); // ChannelMixer mixer(CHANNEL_LAYOUT_STEREO, CHANNEL_LAYOUT_MONO);
@ -69,11 +69,11 @@ class ChannelMixer {
// 1D array used as temporary storage during the transformation. // 1D array used as temporary storage during the transformation.
std::unique_ptr<int16_t[]> audio_vector_; std::unique_ptr<int16_t[]> audio_vector_;
// Number of elements allocated for |audio_vector_|. // Number of elements allocated for `audio_vector_`.
size_t audio_vector_size_ = 0; size_t audio_vector_size_ = 0;
// Optimization case for when we can simply remap the input channels to output // Optimization case for when we can simply remap the input channels to output
// channels, i.e., when all scaling factors in |matrix_| equals 1.0. // channels, i.e., when all scaling factors in `matrix_` equals 1.0.
bool remapping_; bool remapping_;
// Delete the copy constructor and assignment operator. // Delete the copy constructor and assignment operator.

View file

@ -274,7 +274,7 @@ bool ChannelMixingMatrix::CreateTransformationMatrix(
// All channels should now be accounted for. // All channels should now be accounted for.
RTC_DCHECK(unaccounted_inputs_.empty()); RTC_DCHECK(unaccounted_inputs_.empty());
// See if the output |matrix_| is simply a remapping matrix. If each input // See if the output `matrix_` is simply a remapping matrix. If each input
// channel maps to a single output channel we can simply remap. Doing this // channel maps to a single output channel we can simply remap. Doing this
// programmatically is less fragile than logic checks on channel mappings. // programmatically is less fragile than logic checks on channel mappings.
for (int output_ch = 0; output_ch < output_channels_; ++output_ch) { for (int output_ch = 0; output_ch < output_channels_; ++output_ch) {
@ -287,7 +287,7 @@ bool ChannelMixingMatrix::CreateTransformationMatrix(
} }
} }
// If we've gotten here, |matrix_| is simply a remapping. // If we've gotten here, `matrix_` is simply a remapping.
return true; return true;
} }

View file

@ -29,7 +29,7 @@ class ChannelMixingMatrix {
// Create the transformation matrix of input channels to output channels. // Create the transformation matrix of input channels to output channels.
// Updates the empty matrix with the transformation, and returns true // Updates the empty matrix with the transformation, and returns true
// if the transformation is just a remapping of channels (no mixing). // if the transformation is just a remapping of channels (no mixing).
// The size of |matrix| is |output_channels| x |input_channels|, i.e., the // The size of `matrix` is `output_channels` x `input_channels`, i.e., the
// number of rows equals the number of output channels and the number of // number of rows equals the number of output channels and the number of
// columns corresponds to the number of input channels. // columns corresponds to the number of input channels.
// This file is derived from Chromium's media/base/channel_mixing_matrix.h. // This file is derived from Chromium's media/base/channel_mixing_matrix.h.
@ -55,14 +55,14 @@ class ChannelMixingMatrix {
void AccountFor(Channels ch); void AccountFor(Channels ch);
bool IsUnaccounted(Channels ch) const; bool IsUnaccounted(Channels ch) const;
// Helper methods for checking if |ch| exists in either |input_layout_| or // Helper methods for checking if `ch` exists in either `input_layout_` or
// |output_layout_| respectively. // `output_layout_` respectively.
bool HasInputChannel(Channels ch) const; bool HasInputChannel(Channels ch) const;
bool HasOutputChannel(Channels ch) const; bool HasOutputChannel(Channels ch) const;
// Helper methods for updating |matrix_| with the proper value for // Helper methods for updating `matrix_` with the proper value for
// mixing |input_ch| into |output_ch|. MixWithoutAccounting() does not // mixing `input_ch` into `output_ch`. MixWithoutAccounting() does not
// remove the channel from |unaccounted_inputs_|. // remove the channel from `unaccounted_inputs_`.
void Mix(Channels input_ch, Channels output_ch, float scale); void Mix(Channels input_ch, Channels output_ch, float scale);
void MixWithoutAccounting(Channels input_ch, Channels output_ch, float scale); void MixWithoutAccounting(Channels input_ch, Channels output_ch, float scale);

View file

@ -75,7 +75,7 @@ AudioChannel::~AudioChannel() {
audio_mixer_->RemoveSource(ingress_.get()); audio_mixer_->RemoveSource(ingress_.get());
// TODO(bugs.webrtc.org/11581): unclear if we still need to clear |egress_| // TODO(bugs.webrtc.org/11581): unclear if we still need to clear `egress_`
// here. // here.
egress_.reset(); egress_.reset();
ingress_.reset(); ingress_.reset();

View file

@ -52,7 +52,7 @@ class AudioEgress : public AudioSender, public AudioPacketizationCallback {
// Set the encoder format and payload type for AudioCodingModule. // Set the encoder format and payload type for AudioCodingModule.
// It's possible to change the encoder type during its active usage. // It's possible to change the encoder type during its active usage.
// |payload_type| must be the type that is negotiated with peer through // `payload_type` must be the type that is negotiated with peer through
// offer/answer. // offer/answer.
void SetEncoder(int payload_type, void SetEncoder(int payload_type,
const SdpAudioFormat& encoder_format, const SdpAudioFormat& encoder_format,
@ -84,7 +84,7 @@ class AudioEgress : public AudioSender, public AudioPacketizationCallback {
// Send DTMF named event as specified by // Send DTMF named event as specified by
// https://tools.ietf.org/html/rfc4733#section-3.2 // https://tools.ietf.org/html/rfc4733#section-3.2
// |duration_ms| specifies the duration of DTMF packets that will be emitted // `duration_ms` specifies the duration of DTMF packets that will be emitted
// in place of real RTP packets instead. // in place of real RTP packets instead.
// This will return true when requested dtmf event is successfully scheduled // This will return true when requested dtmf event is successfully scheduled
// otherwise false when the dtmf queue reached maximum of 20 events. // otherwise false when the dtmf queue reached maximum of 20 events.
@ -139,7 +139,7 @@ class AudioEgress : public AudioSender, public AudioPacketizationCallback {
// newly received audio frame from AudioTransport. // newly received audio frame from AudioTransport.
uint32_t frame_rtp_timestamp_ = 0; uint32_t frame_rtp_timestamp_ = 0;
// Flag to track mute state from caller. |previously_muted_| is used to // Flag to track mute state from caller. `previously_muted_` is used to
// track previous state as part of input to AudioFrameOperations::Mute // track previous state as part of input to AudioFrameOperations::Mute
// to implement fading effect when (un)mute is invoked. // to implement fading effect when (un)mute is invoked.
bool mute_ = false; bool mute_ = false;

View file

@ -55,7 +55,7 @@ VoipCore::VoipCore(rtc::scoped_refptr<AudioEncoderFactory> encoder_factory,
} }
bool VoipCore::InitializeIfNeeded() { bool VoipCore::InitializeIfNeeded() {
// |audio_device_module_| internally owns a lock and the whole logic here // `audio_device_module_` internally owns a lock and the whole logic here
// needs to be executed atomically once using another lock in VoipCore. // needs to be executed atomically once using another lock in VoipCore.
// Further changes in this method will need to make sure that no deadlock is // Further changes in this method will need to make sure that no deadlock is
// introduced in the future. // introduced in the future.
@ -178,7 +178,7 @@ VoipResult VoipCore::ReleaseChannel(ChannelId channel_id) {
} }
if (no_channels_after_release) { if (no_channels_after_release) {
// TODO(bugs.webrtc.org/11581): unclear if we still need to clear |channel| // TODO(bugs.webrtc.org/11581): unclear if we still need to clear `channel`
// here. // here.
channel = nullptr; channel = nullptr;

View file

@ -53,7 +53,7 @@ class VoipCore : public VoipEngine,
public VoipVolumeControl { public VoipVolumeControl {
public: public:
// Construct VoipCore with provided arguments. // Construct VoipCore with provided arguments.
// ProcessThread implementation can be injected by |process_thread| // ProcessThread implementation can be injected by `process_thread`
// (mainly for testing purpose) and when set to nullptr, default // (mainly for testing purpose) and when set to nullptr, default
// implementation will be used. // implementation will be used.
VoipCore(rtc::scoped_refptr<AudioEncoderFactory> encoder_factory, VoipCore(rtc::scoped_refptr<AudioEncoderFactory> encoder_factory,
@ -128,7 +128,7 @@ class VoipCore : public VoipEngine,
// mode. Therefore it would be better to delay the logic as late as possible. // mode. Therefore it would be better to delay the logic as late as possible.
bool InitializeIfNeeded(); bool InitializeIfNeeded();
// Fetches the corresponding AudioChannel assigned with given |channel|. // Fetches the corresponding AudioChannel assigned with given `channel`.
// Returns nullptr if not found. // Returns nullptr if not found.
rtc::scoped_refptr<AudioChannel> GetChannel(ChannelId channel_id); rtc::scoped_refptr<AudioChannel> GetChannel(ChannelId channel_id);
@ -144,15 +144,15 @@ class VoipCore : public VoipEngine,
std::unique_ptr<TaskQueueFactory> task_queue_factory_; std::unique_ptr<TaskQueueFactory> task_queue_factory_;
// Synchronization is handled internally by AudioProcessing. // Synchronization is handled internally by AudioProcessing.
// Must be placed before |audio_device_module_| for proper destruction. // Must be placed before `audio_device_module_` for proper destruction.
rtc::scoped_refptr<AudioProcessing> audio_processing_; rtc::scoped_refptr<AudioProcessing> audio_processing_;
// Synchronization is handled internally by AudioMixer. // Synchronization is handled internally by AudioMixer.
// Must be placed before |audio_device_module_| for proper destruction. // Must be placed before `audio_device_module_` for proper destruction.
rtc::scoped_refptr<AudioMixer> audio_mixer_; rtc::scoped_refptr<AudioMixer> audio_mixer_;
// Synchronization is handled internally by AudioTransportImpl. // Synchronization is handled internally by AudioTransportImpl.
// Must be placed before |audio_device_module_| for proper destruction. // Must be placed before `audio_device_module_` for proper destruction.
std::unique_ptr<AudioTransportImpl> audio_transport_; std::unique_ptr<AudioTransportImpl> audio_transport_;
// Synchronization is handled internally by AudioDeviceModule. // Synchronization is handled internally by AudioDeviceModule.