mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-13 05:40:42 +01:00
Use backticks not vertical bars to denote variables in comments for /audio
Bug: webrtc:12338 Change-Id: Ief89269aa39d0cb6749a1c6cc995ce8830ca327f Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/226942 Reviewed-by: Harald Alvestrand <hta@webrtc.org> Commit-Queue: Artem Titov <titovartem@webrtc.org> Cr-Commit-Position: refs/heads/master@{#34564}
This commit is contained in:
parent
179b46b5ae
commit
b0ea637ec2
20 changed files with 85 additions and 85 deletions
|
@ -189,7 +189,7 @@ class AudioSendStream final : public webrtc::AudioSendStream,
|
|||
|
||||
BitrateAllocatorInterface* const bitrate_allocator_
|
||||
RTC_GUARDED_BY(rtp_transport_queue_);
|
||||
// Constrains cached to be accessed from |rtp_transport_queue_|.
|
||||
// Constrains cached to be accessed from `rtp_transport_queue_`.
|
||||
absl::optional<AudioSendStream::TargetAudioBitrateConstraints>
|
||||
cached_constraints_ RTC_GUARDED_BY(rtp_transport_queue_) = absl::nullopt;
|
||||
RtpTransportControllerSendInterface* const rtp_transport_;
|
||||
|
|
|
@ -172,7 +172,7 @@ struct ConfigHelper {
|
|||
SetupMockForSetupSendCodec(expect_set_encoder_call);
|
||||
SetupMockForCallEncoder();
|
||||
|
||||
// Use ISAC as default codec so as to prevent unnecessary |channel_proxy_|
|
||||
// Use ISAC as default codec so as to prevent unnecessary `channel_proxy_`
|
||||
// calls from the default ctor behavior.
|
||||
stream_config_.send_codec_spec =
|
||||
AudioSendStream::Config::SendCodecSpec(kIsacPayloadType, kIsacFormat);
|
||||
|
@ -336,7 +336,7 @@ struct ConfigHelper {
|
|||
::testing::NiceMock<MockRtpRtcpInterface> rtp_rtcp_;
|
||||
::testing::NiceMock<MockLimitObserver> limit_observer_;
|
||||
BitrateAllocator bitrate_allocator_;
|
||||
// |worker_queue| is defined last to ensure all pending tasks are cancelled
|
||||
// `worker_queue` is defined last to ensure all pending tasks are cancelled
|
||||
// and deleted before any other members.
|
||||
TaskQueueForTest worker_queue_;
|
||||
std::unique_ptr<AudioEncoder> audio_encoder_;
|
||||
|
|
|
@ -64,8 +64,8 @@ void ProcessCaptureFrame(uint32_t delay_ms,
|
|||
}
|
||||
}
|
||||
|
||||
// Resample audio in |frame| to given sample rate preserving the
|
||||
// channel count and place the result in |destination|.
|
||||
// Resample audio in `frame` to given sample rate preserving the
|
||||
// channel count and place the result in `destination`.
|
||||
int Resample(const AudioFrame& frame,
|
||||
const int destination_sample_rate,
|
||||
PushResampler<int16_t>* resampler,
|
||||
|
|
|
@ -429,8 +429,8 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
|
|||
}
|
||||
|
||||
// Measure audio level (0-9)
|
||||
// TODO(henrik.lundin) Use the |muted| information here too.
|
||||
// TODO(deadbeef): Use RmsLevel for |_outputAudioLevel| (see
|
||||
// TODO(henrik.lundin) Use the `muted` information here too.
|
||||
// TODO(deadbeef): Use RmsLevel for `_outputAudioLevel` (see
|
||||
// https://crbug.com/webrtc/7517).
|
||||
_outputAudioLevel.ComputeLevel(*audio_frame, kAudioSampleDurationSeconds);
|
||||
|
||||
|
@ -454,10 +454,10 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
|
|||
// Compute ntp time.
|
||||
audio_frame->ntp_time_ms_ =
|
||||
ntp_estimator_.Estimate(audio_frame->timestamp_);
|
||||
// |ntp_time_ms_| won't be valid until at least 2 RTCP SRs are received.
|
||||
// `ntp_time_ms_` won't be valid until at least 2 RTCP SRs are received.
|
||||
if (audio_frame->ntp_time_ms_ > 0) {
|
||||
// Compute |capture_start_ntp_time_ms_| so that
|
||||
// |capture_start_ntp_time_ms_| + |elapsed_time_ms_| == |ntp_time_ms_|
|
||||
// Compute `capture_start_ntp_time_ms_` so that
|
||||
// `capture_start_ntp_time_ms_` + `elapsed_time_ms_` == `ntp_time_ms_`
|
||||
capture_start_ntp_time_ms_ =
|
||||
audio_frame->ntp_time_ms_ - audio_frame->elapsed_time_ms_;
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ namespace webrtc {
|
|||
|
||||
// Delegates calls to FrameTransformerInterface to transform frames, and to
|
||||
// ChannelReceive to receive the transformed frames using the
|
||||
// |receive_frame_callback_| on the |channel_receive_thread_|.
|
||||
// `receive_frame_callback_` on the `channel_receive_thread_`.
|
||||
class ChannelReceiveFrameTransformerDelegate : public TransformedFrameCallback {
|
||||
public:
|
||||
using ReceiveFrameCallback =
|
||||
|
@ -34,12 +34,12 @@ class ChannelReceiveFrameTransformerDelegate : public TransformedFrameCallback {
|
|||
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
|
||||
TaskQueueBase* channel_receive_thread);
|
||||
|
||||
// Registers |this| as callback for |frame_transformer_|, to get the
|
||||
// Registers `this` as callback for `frame_transformer_`, to get the
|
||||
// transformed frames.
|
||||
void Init();
|
||||
|
||||
// Unregisters and releases the |frame_transformer_| reference, and resets
|
||||
// |receive_frame_callback_| on |channel_receive_thread_|. Called from
|
||||
// Unregisters and releases the `frame_transformer_` reference, and resets
|
||||
// `receive_frame_callback_` on `channel_receive_thread_`. Called from
|
||||
// ChannelReceive destructor to prevent running the callback on a dangling
|
||||
// channel.
|
||||
void Reset();
|
||||
|
@ -55,7 +55,7 @@ class ChannelReceiveFrameTransformerDelegate : public TransformedFrameCallback {
|
|||
std::unique_ptr<TransformableFrameInterface> frame) override;
|
||||
|
||||
// Delegates the call to ChannelReceive::OnReceivedPayloadData on the
|
||||
// |channel_receive_thread_|, by calling |receive_frame_callback_|.
|
||||
// `channel_receive_thread_`, by calling `receive_frame_callback_`.
|
||||
void ReceiveFrame(std::unique_ptr<TransformableFrameInterface> frame) const;
|
||||
|
||||
protected:
|
||||
|
|
|
@ -98,12 +98,12 @@ class ChannelSendInterface {
|
|||
std::unique_ptr<AudioFrame> audio_frame) = 0;
|
||||
virtual RtpRtcpInterface* GetRtpRtcp() const = 0;
|
||||
|
||||
// In RTP we currently rely on RTCP packets (|ReceivedRTCPPacket|) to inform
|
||||
// In RTP we currently rely on RTCP packets (`ReceivedRTCPPacket`) to inform
|
||||
// about RTT.
|
||||
// In media transport we rely on the TargetTransferRateObserver instead.
|
||||
// In other words, if you are using RTP, you should expect
|
||||
// |ReceivedRTCPPacket| to be called, if you are using media transport,
|
||||
// |OnTargetTransferRate| will be called.
|
||||
// `ReceivedRTCPPacket` to be called, if you are using media transport,
|
||||
// `OnTargetTransferRate` will be called.
|
||||
//
|
||||
// In future, RTP media will move to the media transport implementation and
|
||||
// these conditions will be removed.
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
namespace webrtc {
|
||||
|
||||
// Delegates calls to FrameTransformerInterface to transform frames, and to
|
||||
// ChannelSend to send the transformed frames using |send_frame_callback_| on
|
||||
// the |encoder_queue_|.
|
||||
// ChannelSend to send the transformed frames using `send_frame_callback_` on
|
||||
// the `encoder_queue_`.
|
||||
// OnTransformedFrame() can be called from any thread, the delegate ensures
|
||||
// thread-safe access to the ChannelSend callback.
|
||||
class ChannelSendFrameTransformerDelegate : public TransformedFrameCallback {
|
||||
|
@ -40,12 +40,12 @@ class ChannelSendFrameTransformerDelegate : public TransformedFrameCallback {
|
|||
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
|
||||
rtc::TaskQueue* encoder_queue);
|
||||
|
||||
// Registers |this| as callback for |frame_transformer_|, to get the
|
||||
// Registers `this` as callback for `frame_transformer_`, to get the
|
||||
// transformed frames.
|
||||
void Init();
|
||||
|
||||
// Unregisters and releases the |frame_transformer_| reference, and resets
|
||||
// |send_frame_callback_| under lock. Called from ChannelSend destructor to
|
||||
// Unregisters and releases the `frame_transformer_` reference, and resets
|
||||
// `send_frame_callback_` under lock. Called from ChannelSend destructor to
|
||||
// prevent running the callback on a dangling channel.
|
||||
void Reset();
|
||||
|
||||
|
@ -64,8 +64,8 @@ class ChannelSendFrameTransformerDelegate : public TransformedFrameCallback {
|
|||
void OnTransformedFrame(
|
||||
std::unique_ptr<TransformableFrameInterface> frame) override;
|
||||
|
||||
// Delegates the call to ChannelSend::SendRtpAudio on the |encoder_queue_|,
|
||||
// by calling |send_audio_callback_|.
|
||||
// Delegates the call to ChannelSend::SendRtpAudio on the `encoder_queue_`,
|
||||
// by calling `send_audio_callback_`.
|
||||
void SendFrame(std::unique_ptr<TransformableFrameInterface> frame) const;
|
||||
|
||||
protected:
|
||||
|
|
|
@ -47,7 +47,7 @@ void NullAudioPoller::OnMessage(rtc::Message* msg) {
|
|||
|
||||
// Buffer to hold the audio samples.
|
||||
int16_t buffer[kNumSamples * kNumChannels];
|
||||
// Output variables from |NeedMorePlayData|.
|
||||
// Output variables from `NeedMorePlayData`.
|
||||
size_t n_samples;
|
||||
int64_t elapsed_time_ms;
|
||||
int64_t ntp_time_ms;
|
||||
|
|
|
@ -17,19 +17,19 @@
|
|||
namespace webrtc {
|
||||
namespace voe {
|
||||
|
||||
// Upmix or downmix and resample the audio to |dst_frame|. Expects |dst_frame|
|
||||
// Upmix or downmix and resample the audio to `dst_frame`. Expects `dst_frame`
|
||||
// to have its sample rate and channels members set to the desired values.
|
||||
// Updates the |samples_per_channel_| member accordingly.
|
||||
// Updates the `samples_per_channel_` member accordingly.
|
||||
//
|
||||
// This version has an AudioFrame |src_frame| as input and sets the output
|
||||
// |timestamp_|, |elapsed_time_ms_| and |ntp_time_ms_| members equals to the
|
||||
// This version has an AudioFrame `src_frame` as input and sets the output
|
||||
// `timestamp_`, `elapsed_time_ms_` and `ntp_time_ms_` members equals to the
|
||||
// input ones.
|
||||
void RemixAndResample(const AudioFrame& src_frame,
|
||||
PushResampler<int16_t>* resampler,
|
||||
AudioFrame* dst_frame);
|
||||
|
||||
// This version has a pointer to the samples |src_data| as input and receives
|
||||
// |samples_per_channel|, |num_channels| and |sample_rate_hz| of the data as
|
||||
// This version has a pointer to the samples `src_data` as input and receives
|
||||
// `samples_per_channel`, `num_channels` and `sample_rate_hz` of the data as
|
||||
// parameters.
|
||||
void RemixAndResample(const int16_t* src_data,
|
||||
size_t samples_per_channel,
|
||||
|
|
|
@ -43,7 +43,7 @@ class UtilityTest : public ::testing::Test {
|
|||
AudioFrame golden_frame_;
|
||||
};
|
||||
|
||||
// Sets the signal value to increase by |data| with every sample. Floats are
|
||||
// Sets the signal value to increase by `data` with every sample. Floats are
|
||||
// used so non-integer values result in rounding error, but not an accumulating
|
||||
// error.
|
||||
void SetMonoFrame(float data, int sample_rate_hz, AudioFrame* frame) {
|
||||
|
@ -62,7 +62,7 @@ void SetMonoFrame(float data, AudioFrame* frame) {
|
|||
SetMonoFrame(data, frame->sample_rate_hz_, frame);
|
||||
}
|
||||
|
||||
// Sets the signal value to increase by |left| and |right| with every sample in
|
||||
// Sets the signal value to increase by `left` and `right` with every sample in
|
||||
// each channel respectively.
|
||||
void SetStereoFrame(float left,
|
||||
float right,
|
||||
|
@ -84,7 +84,7 @@ void SetStereoFrame(float left, float right, AudioFrame* frame) {
|
|||
SetStereoFrame(left, right, frame->sample_rate_hz_, frame);
|
||||
}
|
||||
|
||||
// Sets the signal value to increase by |ch1|, |ch2|, |ch3|, |ch4| with every
|
||||
// Sets the signal value to increase by `ch1`, `ch2`, `ch3`, `ch4` with every
|
||||
// sample in each channel respectively.
|
||||
void SetQuadFrame(float ch1,
|
||||
float ch2,
|
||||
|
@ -111,8 +111,8 @@ void VerifyParams(const AudioFrame& ref_frame, const AudioFrame& test_frame) {
|
|||
EXPECT_EQ(ref_frame.sample_rate_hz_, test_frame.sample_rate_hz_);
|
||||
}
|
||||
|
||||
// Computes the best SNR based on the error between |ref_frame| and
|
||||
// |test_frame|. It allows for up to a |max_delay| in samples between the
|
||||
// Computes the best SNR based on the error between `ref_frame` and
|
||||
// `test_frame`. It allows for up to a `max_delay` in samples between the
|
||||
// signals to compensate for the resampling delay.
|
||||
float ComputeSNR(const AudioFrame& ref_frame,
|
||||
const AudioFrame& test_frame,
|
||||
|
|
|
@ -222,14 +222,14 @@ void AudioFrameOperations::Mute(AudioFrame* frame,
|
|||
size_t end = count;
|
||||
float start_g = 0.0f;
|
||||
if (current_frame_muted) {
|
||||
// Fade out the last |count| samples of frame.
|
||||
// Fade out the last `count` samples of frame.
|
||||
RTC_DCHECK(!previous_frame_muted);
|
||||
start = frame->samples_per_channel_ - count;
|
||||
end = frame->samples_per_channel_;
|
||||
start_g = 1.0f;
|
||||
inc = -inc;
|
||||
} else {
|
||||
// Fade in the first |count| samples of frame.
|
||||
// Fade in the first `count` samples of frame.
|
||||
RTC_DCHECK(previous_frame_muted);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,40 +24,40 @@ namespace webrtc {
|
|||
// than a class.
|
||||
class AudioFrameOperations {
|
||||
public:
|
||||
// Add samples in |frame_to_add| with samples in |result_frame|
|
||||
// putting the results in |results_frame|. The fields
|
||||
// |vad_activity_| and |speech_type_| of the result frame are
|
||||
// updated. If |result_frame| is empty (|samples_per_channel_|==0),
|
||||
// the samples in |frame_to_add| are added to it. The number of
|
||||
// Add samples in `frame_to_add` with samples in `result_frame`
|
||||
// putting the results in `results_frame`. The fields
|
||||
// `vad_activity_` and `speech_type_` of the result frame are
|
||||
// updated. If `result_frame` is empty (`samples_per_channel_`==0),
|
||||
// the samples in `frame_to_add` are added to it. The number of
|
||||
// channels and number of samples per channel must match except when
|
||||
// |result_frame| is empty.
|
||||
// `result_frame` is empty.
|
||||
static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame);
|
||||
|
||||
// |frame.num_channels_| will be updated. This version checks for sufficient
|
||||
// buffer size and that |num_channels_| is mono. Use UpmixChannels
|
||||
// buffer size and that `num_channels_` is mono. Use UpmixChannels
|
||||
// instead. TODO(bugs.webrtc.org/8649): remove.
|
||||
ABSL_DEPRECATED("bugs.webrtc.org/8649")
|
||||
static int MonoToStereo(AudioFrame* frame);
|
||||
|
||||
// |frame.num_channels_| will be updated. This version checks that
|
||||
// |num_channels_| is stereo. Use DownmixChannels
|
||||
// `num_channels_` is stereo. Use DownmixChannels
|
||||
// instead. TODO(bugs.webrtc.org/8649): remove.
|
||||
ABSL_DEPRECATED("bugs.webrtc.org/8649")
|
||||
static int StereoToMono(AudioFrame* frame);
|
||||
|
||||
// Downmixes 4 channels |src_audio| to stereo |dst_audio|. This is an in-place
|
||||
// operation, meaning |src_audio| and |dst_audio| may point to the same
|
||||
// Downmixes 4 channels `src_audio` to stereo `dst_audio`. This is an in-place
|
||||
// operation, meaning `src_audio` and `dst_audio` may point to the same
|
||||
// buffer.
|
||||
static void QuadToStereo(const int16_t* src_audio,
|
||||
size_t samples_per_channel,
|
||||
int16_t* dst_audio);
|
||||
|
||||
// |frame.num_channels_| will be updated. This version checks that
|
||||
// |num_channels_| is 4 channels.
|
||||
// `num_channels_` is 4 channels.
|
||||
static int QuadToStereo(AudioFrame* frame);
|
||||
|
||||
// Downmixes |src_channels| |src_audio| to |dst_channels| |dst_audio|.
|
||||
// This is an in-place operation, meaning |src_audio| and |dst_audio|
|
||||
// Downmixes `src_channels` `src_audio` to `dst_channels` `dst_audio`.
|
||||
// This is an in-place operation, meaning `src_audio` and `dst_audio`
|
||||
// may point to the same buffer. Supported channel combinations are
|
||||
// Stereo to Mono, Quad to Mono, and Quad to Stereo.
|
||||
static void DownmixChannels(const int16_t* src_audio,
|
||||
|
@ -67,26 +67,26 @@ class AudioFrameOperations {
|
|||
int16_t* dst_audio);
|
||||
|
||||
// |frame.num_channels_| will be updated. This version checks that
|
||||
// |num_channels_| and |dst_channels| are valid and performs relevant downmix.
|
||||
// `num_channels_` and `dst_channels` are valid and performs relevant downmix.
|
||||
// Supported channel combinations are N channels to Mono, and Quad to Stereo.
|
||||
static void DownmixChannels(size_t dst_channels, AudioFrame* frame);
|
||||
|
||||
// |frame.num_channels_| will be updated. This version checks that
|
||||
// |num_channels_| and |dst_channels| are valid and performs relevant
|
||||
// `num_channels_` and `dst_channels` are valid and performs relevant
|
||||
// downmix. Supported channel combinations are Mono to N
|
||||
// channels. The single channel is replicated.
|
||||
static void UpmixChannels(size_t target_number_of_channels,
|
||||
AudioFrame* frame);
|
||||
|
||||
// Swap the left and right channels of |frame|. Fails silently if |frame| is
|
||||
// Swap the left and right channels of `frame`. Fails silently if `frame` is
|
||||
// not stereo.
|
||||
static void SwapStereoChannels(AudioFrame* frame);
|
||||
|
||||
// Conditionally zero out contents of |frame| for implementing audio mute:
|
||||
// |previous_frame_muted| && |current_frame_muted| - Zero out whole frame.
|
||||
// |previous_frame_muted| && !|current_frame_muted| - Fade-in at frame start.
|
||||
// !|previous_frame_muted| && |current_frame_muted| - Fade-out at frame end.
|
||||
// !|previous_frame_muted| && !|current_frame_muted| - Leave frame untouched.
|
||||
// Conditionally zero out contents of `frame` for implementing audio mute:
|
||||
// `previous_frame_muted` && `current_frame_muted` - Zero out whole frame.
|
||||
// `previous_frame_muted` && !`current_frame_muted` - Fade-in at frame start.
|
||||
// !`previous_frame_muted` && `current_frame_muted` - Fade-out at frame end.
|
||||
// !`previous_frame_muted` && !`current_frame_muted` - Leave frame untouched.
|
||||
static void Mute(AudioFrame* frame,
|
||||
bool previous_frame_muted,
|
||||
bool current_frame_muted);
|
||||
|
@ -94,7 +94,7 @@ class AudioFrameOperations {
|
|||
// Zero out contents of frame.
|
||||
static void Mute(AudioFrame* frame);
|
||||
|
||||
// Halve samples in |frame|.
|
||||
// Halve samples in `frame`.
|
||||
static void ApplyHalfGain(AudioFrame* frame);
|
||||
|
||||
static int Scale(float left, float right, AudioFrame* frame);
|
||||
|
|
|
@ -90,7 +90,7 @@ void ChannelMixer::Transform(AudioFrame* frame) {
|
|||
frame->num_channels_ = output_channels_;
|
||||
frame->channel_layout_ = output_layout_;
|
||||
|
||||
// Copy the output result to the audio frame in |frame|.
|
||||
// Copy the output result to the audio frame in `frame`.
|
||||
memcpy(
|
||||
frame->mutable_data(), out_audio,
|
||||
sizeof(int16_t) * frame->samples_per_channel() * frame->num_channels());
|
||||
|
|
|
@ -38,8 +38,8 @@ class ChannelMixer {
|
|||
ChannelMixer(ChannelLayout input_layout, ChannelLayout output_layout);
|
||||
~ChannelMixer();
|
||||
|
||||
// Transforms all input channels corresponding to the selected |input_layout|
|
||||
// to the number of channels in the selected |output_layout|.
|
||||
// Transforms all input channels corresponding to the selected `input_layout`
|
||||
// to the number of channels in the selected `output_layout`.
|
||||
// Example usage (downmix from stereo to mono):
|
||||
//
|
||||
// ChannelMixer mixer(CHANNEL_LAYOUT_STEREO, CHANNEL_LAYOUT_MONO);
|
||||
|
@ -69,11 +69,11 @@ class ChannelMixer {
|
|||
// 1D array used as temporary storage during the transformation.
|
||||
std::unique_ptr<int16_t[]> audio_vector_;
|
||||
|
||||
// Number of elements allocated for |audio_vector_|.
|
||||
// Number of elements allocated for `audio_vector_`.
|
||||
size_t audio_vector_size_ = 0;
|
||||
|
||||
// Optimization case for when we can simply remap the input channels to output
|
||||
// channels, i.e., when all scaling factors in |matrix_| equals 1.0.
|
||||
// channels, i.e., when all scaling factors in `matrix_` equals 1.0.
|
||||
bool remapping_;
|
||||
|
||||
// Delete the copy constructor and assignment operator.
|
||||
|
|
|
@ -274,7 +274,7 @@ bool ChannelMixingMatrix::CreateTransformationMatrix(
|
|||
// All channels should now be accounted for.
|
||||
RTC_DCHECK(unaccounted_inputs_.empty());
|
||||
|
||||
// See if the output |matrix_| is simply a remapping matrix. If each input
|
||||
// See if the output `matrix_` is simply a remapping matrix. If each input
|
||||
// channel maps to a single output channel we can simply remap. Doing this
|
||||
// programmatically is less fragile than logic checks on channel mappings.
|
||||
for (int output_ch = 0; output_ch < output_channels_; ++output_ch) {
|
||||
|
@ -287,7 +287,7 @@ bool ChannelMixingMatrix::CreateTransformationMatrix(
|
|||
}
|
||||
}
|
||||
|
||||
// If we've gotten here, |matrix_| is simply a remapping.
|
||||
// If we've gotten here, `matrix_` is simply a remapping.
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ class ChannelMixingMatrix {
|
|||
// Create the transformation matrix of input channels to output channels.
|
||||
// Updates the empty matrix with the transformation, and returns true
|
||||
// if the transformation is just a remapping of channels (no mixing).
|
||||
// The size of |matrix| is |output_channels| x |input_channels|, i.e., the
|
||||
// The size of `matrix` is `output_channels` x `input_channels`, i.e., the
|
||||
// number of rows equals the number of output channels and the number of
|
||||
// columns corresponds to the number of input channels.
|
||||
// This file is derived from Chromium's media/base/channel_mixing_matrix.h.
|
||||
|
@ -55,14 +55,14 @@ class ChannelMixingMatrix {
|
|||
void AccountFor(Channels ch);
|
||||
bool IsUnaccounted(Channels ch) const;
|
||||
|
||||
// Helper methods for checking if |ch| exists in either |input_layout_| or
|
||||
// |output_layout_| respectively.
|
||||
// Helper methods for checking if `ch` exists in either `input_layout_` or
|
||||
// `output_layout_` respectively.
|
||||
bool HasInputChannel(Channels ch) const;
|
||||
bool HasOutputChannel(Channels ch) const;
|
||||
|
||||
// Helper methods for updating |matrix_| with the proper value for
|
||||
// mixing |input_ch| into |output_ch|. MixWithoutAccounting() does not
|
||||
// remove the channel from |unaccounted_inputs_|.
|
||||
// Helper methods for updating `matrix_` with the proper value for
|
||||
// mixing `input_ch` into `output_ch`. MixWithoutAccounting() does not
|
||||
// remove the channel from `unaccounted_inputs_`.
|
||||
void Mix(Channels input_ch, Channels output_ch, float scale);
|
||||
void MixWithoutAccounting(Channels input_ch, Channels output_ch, float scale);
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ AudioChannel::~AudioChannel() {
|
|||
|
||||
audio_mixer_->RemoveSource(ingress_.get());
|
||||
|
||||
// TODO(bugs.webrtc.org/11581): unclear if we still need to clear |egress_|
|
||||
// TODO(bugs.webrtc.org/11581): unclear if we still need to clear `egress_`
|
||||
// here.
|
||||
egress_.reset();
|
||||
ingress_.reset();
|
||||
|
|
|
@ -52,7 +52,7 @@ class AudioEgress : public AudioSender, public AudioPacketizationCallback {
|
|||
|
||||
// Set the encoder format and payload type for AudioCodingModule.
|
||||
// It's possible to change the encoder type during its active usage.
|
||||
// |payload_type| must be the type that is negotiated with peer through
|
||||
// `payload_type` must be the type that is negotiated with peer through
|
||||
// offer/answer.
|
||||
void SetEncoder(int payload_type,
|
||||
const SdpAudioFormat& encoder_format,
|
||||
|
@ -84,7 +84,7 @@ class AudioEgress : public AudioSender, public AudioPacketizationCallback {
|
|||
|
||||
// Send DTMF named event as specified by
|
||||
// https://tools.ietf.org/html/rfc4733#section-3.2
|
||||
// |duration_ms| specifies the duration of DTMF packets that will be emitted
|
||||
// `duration_ms` specifies the duration of DTMF packets that will be emitted
|
||||
// in place of real RTP packets instead.
|
||||
// This will return true when requested dtmf event is successfully scheduled
|
||||
// otherwise false when the dtmf queue reached maximum of 20 events.
|
||||
|
@ -139,7 +139,7 @@ class AudioEgress : public AudioSender, public AudioPacketizationCallback {
|
|||
// newly received audio frame from AudioTransport.
|
||||
uint32_t frame_rtp_timestamp_ = 0;
|
||||
|
||||
// Flag to track mute state from caller. |previously_muted_| is used to
|
||||
// Flag to track mute state from caller. `previously_muted_` is used to
|
||||
// track previous state as part of input to AudioFrameOperations::Mute
|
||||
// to implement fading effect when (un)mute is invoked.
|
||||
bool mute_ = false;
|
||||
|
|
|
@ -55,7 +55,7 @@ VoipCore::VoipCore(rtc::scoped_refptr<AudioEncoderFactory> encoder_factory,
|
|||
}
|
||||
|
||||
bool VoipCore::InitializeIfNeeded() {
|
||||
// |audio_device_module_| internally owns a lock and the whole logic here
|
||||
// `audio_device_module_` internally owns a lock and the whole logic here
|
||||
// needs to be executed atomically once using another lock in VoipCore.
|
||||
// Further changes in this method will need to make sure that no deadlock is
|
||||
// introduced in the future.
|
||||
|
@ -178,7 +178,7 @@ VoipResult VoipCore::ReleaseChannel(ChannelId channel_id) {
|
|||
}
|
||||
|
||||
if (no_channels_after_release) {
|
||||
// TODO(bugs.webrtc.org/11581): unclear if we still need to clear |channel|
|
||||
// TODO(bugs.webrtc.org/11581): unclear if we still need to clear `channel`
|
||||
// here.
|
||||
channel = nullptr;
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ class VoipCore : public VoipEngine,
|
|||
public VoipVolumeControl {
|
||||
public:
|
||||
// Construct VoipCore with provided arguments.
|
||||
// ProcessThread implementation can be injected by |process_thread|
|
||||
// ProcessThread implementation can be injected by `process_thread`
|
||||
// (mainly for testing purpose) and when set to nullptr, default
|
||||
// implementation will be used.
|
||||
VoipCore(rtc::scoped_refptr<AudioEncoderFactory> encoder_factory,
|
||||
|
@ -128,7 +128,7 @@ class VoipCore : public VoipEngine,
|
|||
// mode. Therefore it would be better to delay the logic as late as possible.
|
||||
bool InitializeIfNeeded();
|
||||
|
||||
// Fetches the corresponding AudioChannel assigned with given |channel|.
|
||||
// Fetches the corresponding AudioChannel assigned with given `channel`.
|
||||
// Returns nullptr if not found.
|
||||
rtc::scoped_refptr<AudioChannel> GetChannel(ChannelId channel_id);
|
||||
|
||||
|
@ -144,15 +144,15 @@ class VoipCore : public VoipEngine,
|
|||
std::unique_ptr<TaskQueueFactory> task_queue_factory_;
|
||||
|
||||
// Synchronization is handled internally by AudioProcessing.
|
||||
// Must be placed before |audio_device_module_| for proper destruction.
|
||||
// Must be placed before `audio_device_module_` for proper destruction.
|
||||
rtc::scoped_refptr<AudioProcessing> audio_processing_;
|
||||
|
||||
// Synchronization is handled internally by AudioMixer.
|
||||
// Must be placed before |audio_device_module_| for proper destruction.
|
||||
// Must be placed before `audio_device_module_` for proper destruction.
|
||||
rtc::scoped_refptr<AudioMixer> audio_mixer_;
|
||||
|
||||
// Synchronization is handled internally by AudioTransportImpl.
|
||||
// Must be placed before |audio_device_module_| for proper destruction.
|
||||
// Must be placed before `audio_device_module_` for proper destruction.
|
||||
std::unique_ptr<AudioTransportImpl> audio_transport_;
|
||||
|
||||
// Synchronization is handled internally by AudioDeviceModule.
|
||||
|
|
Loading…
Reference in a new issue