mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-13 05:40:42 +01:00
Use backticks not vertical bars to denote variables in comments for /api
Bug: webrtc:12338 Change-Id: Ib97b2c3d64dbd895f261ffa76a2e885bd934a87f Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/226940 Reviewed-by: Harald Alvestrand <hta@webrtc.org> Commit-Queue: Artem Titov <titovartem@webrtc.org> Cr-Commit-Position: refs/heads/master@{#34554}
This commit is contained in:
parent
7750d802a5
commit
0e61fdd27c
94 changed files with 455 additions and 455 deletions
|
@ -57,7 +57,7 @@ class RTC_EXPORT Resource : public rtc::RefCountInterface {
|
|||
~Resource() override;
|
||||
|
||||
virtual std::string Name() const = 0;
|
||||
// The |listener| may be informed of resource usage measurements on any task
|
||||
// The `listener` may be informed of resource usage measurements on any task
|
||||
// queue, but not after this method is invoked with the null argument.
|
||||
virtual void SetResourceListener(ResourceListener* listener) = 0;
|
||||
};
|
||||
|
|
|
@ -41,10 +41,10 @@ namespace webrtc {
|
|||
class AsyncDnsResolverResult {
|
||||
public:
|
||||
virtual ~AsyncDnsResolverResult() = default;
|
||||
// Returns true iff the address from |Start| was successfully resolved.
|
||||
// If the address was successfully resolved, sets |addr| to a copy of the
|
||||
// address from |Start| with the IP address set to the top most resolved
|
||||
// address of |family| (|addr| will have both hostname and the resolved ip).
|
||||
// Returns true iff the address from `Start` was successfully resolved.
|
||||
// If the address was successfully resolved, sets `addr` to a copy of the
|
||||
// address from `Start` with the IP address set to the top most resolved
|
||||
// address of `family` (`addr` will have both hostname and the resolved ip).
|
||||
virtual bool GetResolvedAddress(int family,
|
||||
rtc::SocketAddress* addr) const = 0;
|
||||
// Returns error from resolver.
|
||||
|
@ -55,7 +55,7 @@ class RTC_EXPORT AsyncDnsResolverInterface {
|
|||
public:
|
||||
virtual ~AsyncDnsResolverInterface() = default;
|
||||
|
||||
// Start address resolution of the hostname in |addr|.
|
||||
// Start address resolution of the hostname in `addr`.
|
||||
virtual void Start(const rtc::SocketAddress& addr,
|
||||
std::function<void()> callback) = 0;
|
||||
virtual const AsyncDnsResolverResult& result() const = 0;
|
||||
|
|
|
@ -52,7 +52,7 @@ void AudioFrame::Reset() {
|
|||
}
|
||||
|
||||
void AudioFrame::ResetWithoutMuting() {
|
||||
// TODO(wu): Zero is a valid value for |timestamp_|. We should initialize
|
||||
// TODO(wu): Zero is a valid value for `timestamp_`. We should initialize
|
||||
// to an invalid value, or add a new member to indicate invalidity.
|
||||
timestamp_ = 0;
|
||||
elapsed_time_ms_ = -1;
|
||||
|
|
|
@ -139,7 +139,7 @@ class AudioFrame {
|
|||
int64_t profile_timestamp_ms_ = 0;
|
||||
|
||||
// Information about packets used to assemble this audio frame. This is needed
|
||||
// by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's
|
||||
// by `SourceTracker` when the frame is delivered to the RTCRtpReceiver's
|
||||
// MediaStreamTrack, in order to implement getContributingSources(). See:
|
||||
// https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
|
||||
//
|
||||
|
@ -149,7 +149,7 @@ class AudioFrame {
|
|||
// sync buffer is the small sample-holding buffer located after the audio
|
||||
// decoder and before where samples are assembled into output frames.
|
||||
//
|
||||
// |RtpPacketInfos| may also be empty if the audio samples did not come from
|
||||
// `RtpPacketInfos` may also be empty if the audio samples did not come from
|
||||
// RTP packets. E.g. if the audio were locally generated by packet loss
|
||||
// concealment, comfort noise generation, etc.
|
||||
RtpPacketInfos packet_infos_;
|
||||
|
@ -165,7 +165,7 @@ class AudioFrame {
|
|||
|
||||
// Absolute capture timestamp when this audio frame was originally captured.
|
||||
// This is only valid for audio frames captured on this machine. The absolute
|
||||
// capture timestamp of a received frame is found in |packet_infos_|.
|
||||
// capture timestamp of a received frame is found in `packet_infos_`.
|
||||
// This timestamp MUST be based on the same clock as rtc::TimeMillis().
|
||||
absl::optional<int64_t> absolute_capture_timestamp_ms_;
|
||||
|
||||
|
|
|
@ -28,12 +28,12 @@ class AudioFrameProcessor {
|
|||
|
||||
// Processes the frame received from WebRTC, is called by WebRTC off the
|
||||
// realtime audio capturing path. AudioFrameProcessor must reply with
|
||||
// processed frames by calling |sink_callback| if it was provided in SetSink()
|
||||
// call. |sink_callback| can be called in the context of Process().
|
||||
// processed frames by calling `sink_callback` if it was provided in SetSink()
|
||||
// call. `sink_callback` can be called in the context of Process().
|
||||
virtual void Process(std::unique_ptr<AudioFrame> frame) = 0;
|
||||
|
||||
// Atomically replaces the current sink with the new one. Before the
|
||||
// first call to this function, or if the provided |sink_callback| is nullptr,
|
||||
// first call to this function, or if the provided `sink_callback` is nullptr,
|
||||
// processed frames are simply discarded.
|
||||
virtual void SetSink(OnAudioFrameCallback sink_callback) = 0;
|
||||
};
|
||||
|
|
|
@ -35,9 +35,9 @@ class AudioMixer : public rtc::RefCountInterface {
|
|||
kError, // The audio_frame will not be used.
|
||||
};
|
||||
|
||||
// Overwrites |audio_frame|. The data_ field is overwritten with
|
||||
// Overwrites `audio_frame`. The data_ field is overwritten with
|
||||
// 10 ms of new audio (either 1 or 2 interleaved channels) at
|
||||
// |sample_rate_hz|. All fields in |audio_frame| must be updated.
|
||||
// `sample_rate_hz`. All fields in `audio_frame` must be updated.
|
||||
virtual AudioFrameInfo GetAudioFrameWithInfo(int sample_rate_hz,
|
||||
AudioFrame* audio_frame) = 0;
|
||||
|
||||
|
@ -66,7 +66,7 @@ class AudioMixer : public rtc::RefCountInterface {
|
|||
// should mix at a rate that doesn't cause quality loss of the
|
||||
// sources' audio. The mixing rate is one of the rates listed in
|
||||
// AudioProcessing::NativeRate. All fields in
|
||||
// |audio_frame_for_mixing| must be updated.
|
||||
// `audio_frame_for_mixing` must be updated.
|
||||
virtual void Mix(size_t number_of_channels,
|
||||
AudioFrame* audio_frame_for_mixing) = 0;
|
||||
|
||||
|
|
|
@ -53,8 +53,8 @@ class AudioDecoder {
|
|||
// Returns true if this packet contains DTX.
|
||||
virtual bool IsDtxPacket() const;
|
||||
|
||||
// Decodes this frame of audio and writes the result in |decoded|.
|
||||
// |decoded| must be large enough to store as many samples as indicated by a
|
||||
// Decodes this frame of audio and writes the result in `decoded`.
|
||||
// `decoded` must be large enough to store as many samples as indicated by a
|
||||
// call to Duration() . On success, returns an absl::optional containing the
|
||||
// total number of samples across all channels, as well as whether the
|
||||
// decoder produced comfort noise or speech. On failure, returns an empty
|
||||
|
@ -85,8 +85,8 @@ class AudioDecoder {
|
|||
// Let the decoder parse this payload and prepare zero or more decodable
|
||||
// frames. Each frame must be between 10 ms and 120 ms long. The caller must
|
||||
// ensure that the AudioDecoder object outlives any frame objects returned by
|
||||
// this call. The decoder is free to swap or move the data from the |payload|
|
||||
// buffer. |timestamp| is the input timestamp, in samples, corresponding to
|
||||
// this call. The decoder is free to swap or move the data from the `payload`
|
||||
// buffer. `timestamp` is the input timestamp, in samples, corresponding to
|
||||
// the start of the payload.
|
||||
virtual std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
|
||||
uint32_t timestamp);
|
||||
|
@ -95,12 +95,12 @@ class AudioDecoder {
|
|||
// obsolete; callers should call ParsePayload instead. For now, subclasses
|
||||
// must still implement DecodeInternal.
|
||||
|
||||
// Decodes |encode_len| bytes from |encoded| and writes the result in
|
||||
// |decoded|. The maximum bytes allowed to be written into |decoded| is
|
||||
// |max_decoded_bytes|. Returns the total number of samples across all
|
||||
// channels. If the decoder produced comfort noise, |speech_type|
|
||||
// Decodes `encode_len` bytes from `encoded` and writes the result in
|
||||
// `decoded`. The maximum bytes allowed to be written into `decoded` is
|
||||
// `max_decoded_bytes`. Returns the total number of samples across all
|
||||
// channels. If the decoder produced comfort noise, `speech_type`
|
||||
// is set to kComfortNoise, otherwise it is kSpeech. The desired output
|
||||
// sample rate is provided in |sample_rate_hz|, which must be valid for the
|
||||
// sample rate is provided in `sample_rate_hz`, which must be valid for the
|
||||
// codec at hand.
|
||||
int Decode(const uint8_t* encoded,
|
||||
size_t encoded_len,
|
||||
|
@ -123,11 +123,11 @@ class AudioDecoder {
|
|||
|
||||
// Calls the packet-loss concealment of the decoder to update the state after
|
||||
// one or several lost packets. The caller has to make sure that the
|
||||
// memory allocated in |decoded| should accommodate |num_frames| frames.
|
||||
// memory allocated in `decoded` should accommodate `num_frames` frames.
|
||||
virtual size_t DecodePlc(size_t num_frames, int16_t* decoded);
|
||||
|
||||
// Asks the decoder to generate packet-loss concealment and append it to the
|
||||
// end of |concealment_audio|. The concealment audio should be in
|
||||
// end of `concealment_audio`. The concealment audio should be in
|
||||
// channel-interleaved format, with as many channels as the last decoded
|
||||
// packet produced. The implementation must produce at least
|
||||
// requested_samples_per_channel, or nothing at all. This is a signal to the
|
||||
|
@ -146,19 +146,19 @@ class AudioDecoder {
|
|||
// Returns the last error code from the decoder.
|
||||
virtual int ErrorCode();
|
||||
|
||||
// Returns the duration in samples-per-channel of the payload in |encoded|
|
||||
// which is |encoded_len| bytes long. Returns kNotImplemented if no duration
|
||||
// Returns the duration in samples-per-channel of the payload in `encoded`
|
||||
// which is `encoded_len` bytes long. Returns kNotImplemented if no duration
|
||||
// estimate is available, or -1 in case of an error.
|
||||
virtual int PacketDuration(const uint8_t* encoded, size_t encoded_len) const;
|
||||
|
||||
// Returns the duration in samples-per-channel of the redandant payload in
|
||||
// |encoded| which is |encoded_len| bytes long. Returns kNotImplemented if no
|
||||
// `encoded` which is `encoded_len` bytes long. Returns kNotImplemented if no
|
||||
// duration estimate is available, or -1 in case of an error.
|
||||
virtual int PacketDurationRedundant(const uint8_t* encoded,
|
||||
size_t encoded_len) const;
|
||||
|
||||
// Detects whether a packet has forward error correction. The packet is
|
||||
// comprised of the samples in |encoded| which is |encoded_len| bytes long.
|
||||
// comprised of the samples in `encoded` which is `encoded_len` bytes long.
|
||||
// Returns true if the packet has FEC and false otherwise.
|
||||
virtual bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const;
|
||||
|
||||
|
|
|
@ -89,8 +89,8 @@ class AudioDecoderFactoryT : public AudioDecoderFactory {
|
|||
// Each decoder type is given as a template argument to the function; it should
|
||||
// be a struct with the following static member functions:
|
||||
//
|
||||
// // Converts |audio_format| to a ConfigType instance. Returns an empty
|
||||
// // optional if |audio_format| doesn't correctly specify a decoder of our
|
||||
// // Converts `audio_format` to a ConfigType instance. Returns an empty
|
||||
// // optional if `audio_format` doesn't correctly specify a decoder of our
|
||||
// // type.
|
||||
// absl::optional<ConfigType> SdpToConfig(const SdpAudioFormat& audio_format);
|
||||
//
|
||||
|
|
|
@ -95,13 +95,13 @@ class AudioEncoder {
|
|||
|
||||
// This is the main struct for auxiliary encoding information. Each encoded
|
||||
// packet should be accompanied by one EncodedInfo struct, containing the
|
||||
// total number of |encoded_bytes|, the |encoded_timestamp| and the
|
||||
// |payload_type|. If the packet contains redundant encodings, the |redundant|
|
||||
// total number of `encoded_bytes`, the `encoded_timestamp` and the
|
||||
// `payload_type`. If the packet contains redundant encodings, the `redundant`
|
||||
// vector will be populated with EncodedInfoLeaf structs. Each struct in the
|
||||
// vector represents one encoding; the order of structs in the vector is the
|
||||
// same as the order in which the actual payloads are written to the byte
|
||||
// stream. When EncoderInfoLeaf structs are present in the vector, the main
|
||||
// struct's |encoded_bytes| will be the sum of all the |encoded_bytes| in the
|
||||
// struct's `encoded_bytes` will be the sum of all the `encoded_bytes` in the
|
||||
// vector.
|
||||
struct EncodedInfo : public EncodedInfoLeaf {
|
||||
EncodedInfo();
|
||||
|
@ -143,7 +143,7 @@ class AudioEncoder {
|
|||
|
||||
// Accepts one 10 ms block of input audio (i.e., SampleRateHz() / 100 *
|
||||
// NumChannels() samples). Multi-channel audio must be sample-interleaved.
|
||||
// The encoder appends zero or more bytes of output to |encoded| and returns
|
||||
// The encoder appends zero or more bytes of output to `encoded` and returns
|
||||
// additional encoding information. Encode() checks some preconditions, calls
|
||||
// EncodeImpl() which does the actual work, and then checks some
|
||||
// postconditions.
|
||||
|
@ -205,7 +205,7 @@ class AudioEncoder {
|
|||
virtual void DisableAudioNetworkAdaptor();
|
||||
|
||||
// Provides uplink packet loss fraction to this encoder to allow it to adapt.
|
||||
// |uplink_packet_loss_fraction| is in the range [0.0, 1.0].
|
||||
// `uplink_packet_loss_fraction` is in the range [0.0, 1.0].
|
||||
virtual void OnReceivedUplinkPacketLossFraction(
|
||||
float uplink_packet_loss_fraction);
|
||||
|
||||
|
|
|
@ -103,8 +103,8 @@ class AudioEncoderFactoryT : public AudioEncoderFactory {
|
|||
// Each encoder type is given as a template argument to the function; it should
|
||||
// be a struct with the following static member functions:
|
||||
//
|
||||
// // Converts |audio_format| to a ConfigType instance. Returns an empty
|
||||
// // optional if |audio_format| doesn't correctly specify an encoder of our
|
||||
// // Converts `audio_format` to a ConfigType instance. Returns an empty
|
||||
// // optional if `audio_format` doesn't correctly specify an encoder of our
|
||||
// // type.
|
||||
// absl::optional<ConfigType> SdpToConfig(const SdpAudioFormat& audio_format);
|
||||
//
|
||||
|
|
|
@ -39,7 +39,7 @@ struct RTC_EXPORT SdpAudioFormat {
|
|||
Parameters&& param);
|
||||
~SdpAudioFormat();
|
||||
|
||||
// Returns true if this format is compatible with |o|. In SDP terminology:
|
||||
// Returns true if this format is compatible with `o`. In SDP terminology:
|
||||
// would it represent the same codec between an offer and an answer? As
|
||||
// opposed to operator==, this method disregards codec parameters.
|
||||
bool Matches(const SdpAudioFormat& o) const;
|
||||
|
|
|
@ -49,10 +49,10 @@ struct RTC_EXPORT AudioEncoderOpusConfig {
|
|||
bool cbr_enabled;
|
||||
int max_playback_rate_hz;
|
||||
|
||||
// |complexity| is used when the bitrate goes above
|
||||
// |complexity_threshold_bps| + |complexity_threshold_window_bps|;
|
||||
// |low_rate_complexity| is used when the bitrate falls below
|
||||
// |complexity_threshold_bps| - |complexity_threshold_window_bps|. In the
|
||||
// `complexity` is used when the bitrate goes above
|
||||
// `complexity_threshold_bps` + `complexity_threshold_window_bps`;
|
||||
// `low_rate_complexity` is used when the bitrate falls below
|
||||
// `complexity_threshold_bps` - `complexity_threshold_window_bps`. In the
|
||||
// interval in the middle, we keep using the most recent of the two
|
||||
// complexity settings.
|
||||
int complexity;
|
||||
|
|
|
@ -32,7 +32,7 @@ struct BitrateAllocationUpdate {
|
|||
double packet_loss_ratio = 0;
|
||||
// Predicted round trip time.
|
||||
TimeDelta round_trip_time = TimeDelta::PlusInfinity();
|
||||
// |bwe_period| is deprecated, use |stable_target_bitrate| allocation instead.
|
||||
// `bwe_period` is deprecated, use `stable_target_bitrate` allocation instead.
|
||||
TimeDelta bwe_period = TimeDelta::PlusInfinity();
|
||||
// Congestion window pushback bitrate reduction fraction. Used in
|
||||
// VideoStreamEncoder to reduce the bitrate by the given fraction
|
||||
|
|
|
@ -92,7 +92,7 @@ uint32_t Candidate::GetPriority(uint32_t type_preference,
|
|||
// (2^8)*(local preference) +
|
||||
// (2^0)*(256 - component ID)
|
||||
|
||||
// |local_preference| length is 2 bytes, 0-65535 inclusive.
|
||||
// `local_preference` length is 2 bytes, 0-65535 inclusive.
|
||||
// In our implemenation we will partion local_preference into
|
||||
// 0 1
|
||||
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
|
||||
|
|
|
@ -112,7 +112,7 @@ class RTC_EXPORT Candidate {
|
|||
uint32_t generation() const { return generation_; }
|
||||
void set_generation(uint32_t generation) { generation_ = generation; }
|
||||
|
||||
// |network_cost| measures the cost/penalty of using this candidate. A network
|
||||
// `network_cost` measures the cost/penalty of using this candidate. A network
|
||||
// cost of 0 indicates this candidate can be used freely. A value of
|
||||
// rtc::kNetworkCostMax indicates it should be used only as the last resort.
|
||||
void set_network_cost(uint16_t network_cost) {
|
||||
|
@ -167,9 +167,9 @@ class RTC_EXPORT Candidate {
|
|||
bool operator!=(const Candidate& o) const;
|
||||
|
||||
// Returns a sanitized copy configured by the given booleans. If
|
||||
// |use_host_address| is true, the returned copy has its IP removed from
|
||||
// |address()|, which leads |address()| to be a hostname address. If
|
||||
// |filter_related_address|, the returned copy has its related address reset
|
||||
// `use_host_address` is true, the returned copy has its IP removed from
|
||||
// `address()`, which leads `address()` to be a hostname address. If
|
||||
// `filter_related_address`, the returned copy has its related address reset
|
||||
// to the wildcard address (i.e. 0.0.0.0 for IPv4 and :: for IPv6). Note that
|
||||
// setting both booleans to false returns an identical copy to the original
|
||||
// candidate.
|
||||
|
|
|
@ -42,14 +42,14 @@ struct DataChannelInit {
|
|||
// The max period of time in milliseconds in which retransmissions will be
|
||||
// sent. After this time, no more retransmissions will be sent.
|
||||
//
|
||||
// Cannot be set along with |maxRetransmits|.
|
||||
// This is called |maxPacketLifeTime| in the WebRTC JS API.
|
||||
// Cannot be set along with `maxRetransmits`.
|
||||
// This is called `maxPacketLifeTime` in the WebRTC JS API.
|
||||
// Negative values are ignored, and positive values are clamped to [0-65535]
|
||||
absl::optional<int> maxRetransmitTime;
|
||||
|
||||
// The max number of retransmissions.
|
||||
//
|
||||
// Cannot be set along with |maxRetransmitTime|.
|
||||
// Cannot be set along with `maxRetransmitTime`.
|
||||
// Negative values are ignored, and positive values are clamped to [0-65535]
|
||||
absl::optional<int> maxRetransmits;
|
||||
|
||||
|
@ -57,7 +57,7 @@ struct DataChannelInit {
|
|||
std::string protocol;
|
||||
|
||||
// True if the channel has been externally negotiated and we do not send an
|
||||
// in-band signalling in the form of an "open" message. If this is true, |id|
|
||||
// in-band signalling in the form of an "open" message. If this is true, `id`
|
||||
// below must be set; otherwise it should be unset and will be negotiated
|
||||
// in-band.
|
||||
bool negotiated = false;
|
||||
|
@ -70,7 +70,7 @@ struct DataChannelInit {
|
|||
};
|
||||
|
||||
// At the JavaScript level, data can be passed in as a string or a blob, so
|
||||
// this structure's |binary| flag tells whether the data should be interpreted
|
||||
// this structure's `binary` flag tells whether the data should be interpreted
|
||||
// as binary or text.
|
||||
struct DataBuffer {
|
||||
DataBuffer(const rtc::CopyOnWriteBuffer& data, bool binary)
|
||||
|
@ -180,7 +180,7 @@ class RTC_EXPORT DataChannelInterface : public rtc::RefCountInterface {
|
|||
// https://tools.ietf.org/html/draft-ietf-rtcweb-data-channel-13#section-6.7
|
||||
virtual void Close() = 0;
|
||||
|
||||
// Sends |data| to the remote peer. If the data can't be sent at the SCTP
|
||||
// Sends `data` to the remote peer. If the data can't be sent at the SCTP
|
||||
// level (due to congestion control), it's buffered at the data channel level,
|
||||
// up to a maximum of 16MB. If Send is called while this buffer is full, the
|
||||
// data channel will be closed abruptly.
|
||||
|
|
|
@ -23,8 +23,8 @@ namespace webrtc {
|
|||
// DtmfSender.
|
||||
class DtmfSenderObserverInterface {
|
||||
public:
|
||||
// Triggered when DTMF |tone| is sent.
|
||||
// If |tone| is empty that means the DtmfSender has sent out all the given
|
||||
// Triggered when DTMF `tone` is sent.
|
||||
// If `tone` is empty that means the DtmfSender has sent out all the given
|
||||
// tones.
|
||||
// The callback includes the state of the tone buffer at the time when
|
||||
// the tone finished playing.
|
||||
|
@ -58,7 +58,7 @@ class DtmfSenderInterface : public rtc::RefCountInterface {
|
|||
// able to send packets, and a "telephone-event" codec must be negotiated.
|
||||
virtual bool CanInsertDtmf() = 0;
|
||||
|
||||
// Queues a task that sends the DTMF |tones|. The |tones| parameter is treated
|
||||
// Queues a task that sends the DTMF `tones`. The `tones` parameter is treated
|
||||
// as a series of characters. The characters 0 through 9, A through D, #, and
|
||||
// * generate the associated DTMF tones. The characters a to d are equivalent
|
||||
// to A to D. The character ',' indicates a delay of 2 seconds before
|
||||
|
@ -66,18 +66,18 @@ class DtmfSenderInterface : public rtc::RefCountInterface {
|
|||
//
|
||||
// Unrecognized characters are ignored.
|
||||
//
|
||||
// The |duration| parameter indicates the duration in ms to use for each
|
||||
// character passed in the |tones| parameter. The duration cannot be more
|
||||
// The `duration` parameter indicates the duration in ms to use for each
|
||||
// character passed in the `tones` parameter. The duration cannot be more
|
||||
// than 6000 or less than 70.
|
||||
//
|
||||
// The |inter_tone_gap| parameter indicates the gap between tones in ms. The
|
||||
// |inter_tone_gap| must be at least 50 ms but should be as short as
|
||||
// The `inter_tone_gap` parameter indicates the gap between tones in ms. The
|
||||
// `inter_tone_gap` must be at least 50 ms but should be as short as
|
||||
// possible.
|
||||
//
|
||||
// The |comma_delay| parameter indicates the delay after the ','
|
||||
// character. InsertDtmf specifies |comma_delay| as an argument
|
||||
// The `comma_delay` parameter indicates the delay after the ','
|
||||
// character. InsertDtmf specifies `comma_delay` as an argument
|
||||
// with a default value of 2 seconds as per the WebRTC spec. This parameter
|
||||
// allows users to comply with legacy WebRTC clients. The |comma_delay|
|
||||
// allows users to comply with legacy WebRTC clients. The `comma_delay`
|
||||
// must be at least 50 ms.
|
||||
//
|
||||
// If InsertDtmf is called on the same object while an existing task for this
|
||||
|
|
|
@ -38,7 +38,7 @@ class VCMProtectionCallback {
|
|||
// FecController calculates how much of the allocated network
|
||||
// capacity that can be used by an encoder and how much that
|
||||
// is needed for redundant packets such as FEC and NACK. It uses an
|
||||
// implementation of |VCMProtectionCallback| to set new FEC parameters and get
|
||||
// implementation of `VCMProtectionCallback` to set new FEC parameters and get
|
||||
// the bitrate currently used for FEC and NACK.
|
||||
// Usage:
|
||||
// Setup by calling SetProtectionMethod and SetEncodingData.
|
||||
|
|
|
@ -30,7 +30,7 @@ class TransformableFrameInterface {
|
|||
// method call.
|
||||
virtual rtc::ArrayView<const uint8_t> GetData() const = 0;
|
||||
|
||||
// Copies |data| into the owned frame payload data.
|
||||
// Copies `data` into the owned frame payload data.
|
||||
virtual void SetData(rtc::ArrayView<const uint8_t> data) = 0;
|
||||
|
||||
virtual uint32_t GetTimestamp() const = 0;
|
||||
|
@ -78,7 +78,7 @@ class TransformedFrameCallback : public rtc::RefCountInterface {
|
|||
// the TransformedFrameCallback interface (see above).
|
||||
class FrameTransformerInterface : public rtc::RefCountInterface {
|
||||
public:
|
||||
// Transforms |frame| using the implementing class' processing logic.
|
||||
// Transforms `frame` using the implementing class' processing logic.
|
||||
virtual void Transform(
|
||||
std::unique_ptr<TransformableFrameInterface> transformable_frame) = 0;
|
||||
|
||||
|
|
14
api/jsep.h
14
api/jsep.h
|
@ -73,7 +73,7 @@ class RTC_EXPORT IceCandidateInterface {
|
|||
|
||||
// Creates a IceCandidateInterface based on SDP string.
|
||||
// Returns null if the sdp string can't be parsed.
|
||||
// |error| may be null.
|
||||
// `error` may be null.
|
||||
RTC_EXPORT IceCandidateInterface* CreateIceCandidate(const std::string& sdp_mid,
|
||||
int sdp_mline_index,
|
||||
const std::string& sdp,
|
||||
|
@ -91,7 +91,7 @@ class IceCandidateCollection {
|
|||
public:
|
||||
virtual ~IceCandidateCollection() {}
|
||||
virtual size_t count() const = 0;
|
||||
// Returns true if an equivalent |candidate| exist in the collection.
|
||||
// Returns true if an equivalent `candidate` exist in the collection.
|
||||
virtual bool HasCandidate(const IceCandidateInterface* candidate) const = 0;
|
||||
virtual const IceCandidateInterface* at(size_t index) const = 0;
|
||||
};
|
||||
|
@ -158,7 +158,7 @@ class RTC_EXPORT SessionDescriptionInterface {
|
|||
virtual SdpType GetType() const;
|
||||
|
||||
// kOffer/kPrAnswer/kAnswer
|
||||
// TODO(steveanton): Remove this in favor of |GetType| that returns SdpType.
|
||||
// TODO(steveanton): Remove this in favor of `GetType` that returns SdpType.
|
||||
virtual std::string type() const = 0;
|
||||
|
||||
// Adds the specified candidate to the description.
|
||||
|
@ -190,7 +190,7 @@ class RTC_EXPORT SessionDescriptionInterface {
|
|||
|
||||
// Creates a SessionDescriptionInterface based on the SDP string and the type.
|
||||
// Returns null if the sdp string can't be parsed or the type is unsupported.
|
||||
// |error| may be null.
|
||||
// `error` may be null.
|
||||
// TODO(steveanton): This function is deprecated. Please use the functions below
|
||||
// which take an SdpType enum instead. Remove this once it is no longer used.
|
||||
RTC_EXPORT SessionDescriptionInterface* CreateSessionDescription(
|
||||
|
@ -200,8 +200,8 @@ RTC_EXPORT SessionDescriptionInterface* CreateSessionDescription(
|
|||
|
||||
// Creates a SessionDescriptionInterface based on the SDP string and the type.
|
||||
// Returns null if the SDP string cannot be parsed.
|
||||
// If using the signature with |error_out|, details of the parsing error may be
|
||||
// written to |error_out| if it is not null.
|
||||
// If using the signature with `error_out`, details of the parsing error may be
|
||||
// written to `error_out` if it is not null.
|
||||
RTC_EXPORT std::unique_ptr<SessionDescriptionInterface>
|
||||
CreateSessionDescription(SdpType type, const std::string& sdp);
|
||||
RTC_EXPORT std::unique_ptr<SessionDescriptionInterface>
|
||||
|
@ -221,7 +221,7 @@ std::unique_ptr<SessionDescriptionInterface> CreateSessionDescription(
|
|||
class RTC_EXPORT CreateSessionDescriptionObserver
|
||||
: public rtc::RefCountInterface {
|
||||
public:
|
||||
// This callback transfers the ownership of the |desc|.
|
||||
// This callback transfers the ownership of the `desc`.
|
||||
// TODO(deadbeef): Make this take an std::unique_ptr<> to avoid confusion
|
||||
// around ownership.
|
||||
virtual void OnSuccess(SessionDescriptionInterface* desc) = 0;
|
||||
|
|
|
@ -37,7 +37,7 @@ class RTC_EXPORT JsepIceCandidate : public IceCandidateInterface {
|
|||
JsepIceCandidate(const JsepIceCandidate&) = delete;
|
||||
JsepIceCandidate& operator=(const JsepIceCandidate&) = delete;
|
||||
~JsepIceCandidate() override;
|
||||
// |err| may be null.
|
||||
// `err` may be null.
|
||||
bool Initialize(const std::string& sdp, SdpParseError* err);
|
||||
void SetCandidate(const cricket::Candidate& candidate) {
|
||||
candidate_ = candidate;
|
||||
|
|
|
@ -43,7 +43,7 @@ class JsepSessionDescription : public SessionDescriptionInterface {
|
|||
absl::string_view session_version);
|
||||
virtual ~JsepSessionDescription();
|
||||
|
||||
// Takes ownership of |description|.
|
||||
// Takes ownership of `description`.
|
||||
bool Initialize(std::unique_ptr<cricket::SessionDescription> description,
|
||||
const std::string& session_id,
|
||||
const std::string& session_version);
|
||||
|
|
|
@ -200,7 +200,7 @@ class AudioTrackSinkInterface {
|
|||
RTC_NOTREACHED() << "This method must be overridden, or not used.";
|
||||
}
|
||||
|
||||
// In this method, |absolute_capture_timestamp_ms|, when available, is
|
||||
// In this method, `absolute_capture_timestamp_ms`, when available, is
|
||||
// supposed to deliver the timestamp when this audio frame was originally
|
||||
// captured. This timestamp MUST be based on the same clock as
|
||||
// rtc::TimeMillis().
|
||||
|
@ -240,7 +240,7 @@ class RTC_EXPORT AudioSourceInterface : public MediaSourceInterface {
|
|||
// TODO(deadbeef): Makes all the interfaces pure virtual after they're
|
||||
// implemented in chromium.
|
||||
|
||||
// Sets the volume of the source. |volume| is in the range of [0, 10].
|
||||
// Sets the volume of the source. `volume` is in the range of [0, 10].
|
||||
// TODO(tommi): This method should be on the track and ideally volume should
|
||||
// be applied in the track in a way that does not affect clones of the track.
|
||||
virtual void SetVolume(double volume) {}
|
||||
|
@ -268,7 +268,7 @@ class AudioProcessorInterface : public rtc::RefCountInterface {
|
|||
AudioProcessingStats apm_statistics;
|
||||
};
|
||||
|
||||
// Get audio processor statistics. The |has_remote_tracks| argument should be
|
||||
// Get audio processor statistics. The `has_remote_tracks` argument should be
|
||||
// set if there are active remote tracks (this would usually be true during
|
||||
// a call). If there are no remote tracks some of the stats will not be set by
|
||||
// the AudioProcessor, because they only make sense if there is at least one
|
||||
|
|
|
@ -183,7 +183,7 @@ class NetEq {
|
|||
SdpAudioFormat sdp_format;
|
||||
};
|
||||
|
||||
// Creates a new NetEq object, with parameters set in |config|. The |config|
|
||||
// Creates a new NetEq object, with parameters set in `config`. The `config`
|
||||
// object will only have to be valid for the duration of the call to this
|
||||
// method.
|
||||
static NetEq* Create(
|
||||
|
@ -205,15 +205,15 @@ class NetEq {
|
|||
virtual void InsertEmptyPacket(const RTPHeader& rtp_header) = 0;
|
||||
|
||||
// Instructs NetEq to deliver 10 ms of audio data. The data is written to
|
||||
// |audio_frame|. All data in |audio_frame| is wiped; |data_|, |speech_type_|,
|
||||
// |num_channels_|, |sample_rate_hz_|, |samples_per_channel_|, and
|
||||
// |vad_activity_| are updated upon success. If an error is returned, some
|
||||
// `audio_frame`. All data in `audio_frame` is wiped; `data_`, `speech_type_`,
|
||||
// `num_channels_`, `sample_rate_hz_`, `samples_per_channel_`, and
|
||||
// `vad_activity_` are updated upon success. If an error is returned, some
|
||||
// fields may not have been updated, or may contain inconsistent values.
|
||||
// If muted state is enabled (through Config::enable_muted_state), |muted|
|
||||
// If muted state is enabled (through Config::enable_muted_state), `muted`
|
||||
// may be set to true after a prolonged expand period. When this happens, the
|
||||
// |data_| in |audio_frame| is not written, but should be interpreted as being
|
||||
// `data_` in `audio_frame` is not written, but should be interpreted as being
|
||||
// all zeros. For testing purposes, an override can be supplied in the
|
||||
// |action_override| argument, which will cause NetEq to take this action
|
||||
// `action_override` argument, which will cause NetEq to take this action
|
||||
// next, instead of the action it would normally choose. An optional output
|
||||
// argument for fetching the current sample rate can be provided, which
|
||||
// will return the same value as last_output_sample_rate_hz() but will avoid
|
||||
|
@ -228,12 +228,12 @@ class NetEq {
|
|||
// Replaces the current set of decoders with the given one.
|
||||
virtual void SetCodecs(const std::map<int, SdpAudioFormat>& codecs) = 0;
|
||||
|
||||
// Associates |rtp_payload_type| with the given codec, which NetEq will
|
||||
// Associates `rtp_payload_type` with the given codec, which NetEq will
|
||||
// instantiate when it needs it. Returns true iff successful.
|
||||
virtual bool RegisterPayloadType(int rtp_payload_type,
|
||||
const SdpAudioFormat& audio_format) = 0;
|
||||
|
||||
// Removes |rtp_payload_type| from the codec database. Returns 0 on success,
|
||||
// Removes `rtp_payload_type` from the codec database. Returns 0 on success,
|
||||
// -1 on failure. Removing a payload type that is not registered is ok and
|
||||
// will not result in an error.
|
||||
virtual int RemovePayloadType(uint8_t rtp_payload_type) = 0;
|
||||
|
@ -250,12 +250,12 @@ class NetEq {
|
|||
// Sets a maximum delay in milliseconds for packet buffer. The latency will
|
||||
// not exceed the given value, even required delay (given the channel
|
||||
// conditions) is higher. Calling this method has the same effect as setting
|
||||
// the |max_delay_ms| value in the NetEq::Config struct.
|
||||
// the `max_delay_ms` value in the NetEq::Config struct.
|
||||
virtual bool SetMaximumDelay(int delay_ms) = 0;
|
||||
|
||||
// Sets a base minimum delay in milliseconds for packet buffer. The minimum
|
||||
// delay which is set via |SetMinimumDelay| can't be lower than base minimum
|
||||
// delay. Calling this method is similar to setting the |min_delay_ms| value
|
||||
// delay which is set via `SetMinimumDelay` can't be lower than base minimum
|
||||
// delay. Calling this method is similar to setting the `min_delay_ms` value
|
||||
// in the NetEq::Config struct. Returns true if the base minimum is
|
||||
// successfully applied, otherwise false is returned.
|
||||
virtual bool SetBaseMinimumDelayMs(int delay_ms) = 0;
|
||||
|
@ -272,7 +272,7 @@ class NetEq {
|
|||
// The packet buffer part of the delay is not updated during DTX/CNG periods.
|
||||
virtual int FilteredCurrentDelayMs() const = 0;
|
||||
|
||||
// Writes the current network statistics to |stats|. The statistics are reset
|
||||
// Writes the current network statistics to `stats`. The statistics are reset
|
||||
// after the call.
|
||||
virtual int NetworkStatistics(NetEqNetworkStatistics* stats) = 0;
|
||||
|
||||
|
|
|
@ -115,13 +115,13 @@ class NetEqController {
|
|||
virtual void SoftReset() = 0;
|
||||
|
||||
// Given info about the latest received packet, and current jitter buffer
|
||||
// status, returns the operation. |target_timestamp| and |expand_mutefactor|
|
||||
// are provided for reference. |last_packet_samples| is the number of samples
|
||||
// status, returns the operation. `target_timestamp` and `expand_mutefactor`
|
||||
// are provided for reference. `last_packet_samples` is the number of samples
|
||||
// obtained from the last decoded frame. If there is a packet available, it
|
||||
// should be supplied in |packet|. The mode resulting from the last call to
|
||||
// NetEqImpl::GetAudio is supplied in |last_mode|. If there is a DTMF event to
|
||||
// play, |play_dtmf| should be set to true. The output variable
|
||||
// |reset_decoder| will be set to true if a reset is required; otherwise it is
|
||||
// should be supplied in `packet`. The mode resulting from the last call to
|
||||
// NetEqImpl::GetAudio is supplied in `last_mode`. If there is a DTMF event to
|
||||
// play, `play_dtmf` should be set to true. The output variable
|
||||
// `reset_decoder` will be set to true if a reset is required; otherwise it is
|
||||
// left unchanged (i.e., it can remain true if it was true before the call).
|
||||
virtual NetEq::Operation GetDecision(const NetEqStatus& status,
|
||||
bool* reset_decoder) = 0;
|
||||
|
@ -144,11 +144,11 @@ class NetEqController {
|
|||
virtual bool SetBaseMinimumDelay(int delay_ms) = 0;
|
||||
virtual int GetBaseMinimumDelay() const = 0;
|
||||
|
||||
// These methods test the |cng_state_| for different conditions.
|
||||
// These methods test the `cng_state_` for different conditions.
|
||||
virtual bool CngRfc3389On() const = 0;
|
||||
virtual bool CngOff() const = 0;
|
||||
|
||||
// Resets the |cng_state_| to kCngOff.
|
||||
// Resets the `cng_state_` to kCngOff.
|
||||
virtual void SetCngOff() = 0;
|
||||
|
||||
// Reports back to DecisionLogic whether the decision to do expand remains or
|
||||
|
@ -157,7 +157,7 @@ class NetEqController {
|
|||
// sync buffer.
|
||||
virtual void ExpandDecision(NetEq::Operation operation) = 0;
|
||||
|
||||
// Adds |value| to |sample_memory_|.
|
||||
// Adds `value` to `sample_memory_`.
|
||||
virtual void AddSampleMemory(int32_t value) = 0;
|
||||
|
||||
// Returns the target buffer level in ms.
|
||||
|
|
|
@ -23,7 +23,7 @@ class NetEqControllerFactory {
|
|||
public:
|
||||
virtual ~NetEqControllerFactory() = default;
|
||||
|
||||
// Creates a new NetEqController object, with parameters set in |config|.
|
||||
// Creates a new NetEqController object, with parameters set in `config`.
|
||||
virtual std::unique_ptr<NetEqController> CreateNetEqController(
|
||||
const NetEqController::Config& config) const = 0;
|
||||
};
|
||||
|
|
|
@ -24,7 +24,7 @@ class NetEqFactory {
|
|||
public:
|
||||
virtual ~NetEqFactory() = default;
|
||||
|
||||
// Creates a new NetEq object, with parameters set in |config|. The |config|
|
||||
// Creates a new NetEq object, with parameters set in `config`. The `config`
|
||||
// object will only have to be valid for the duration of the call to this
|
||||
// method.
|
||||
virtual std::unique_ptr<NetEq> CreateNetEq(
|
||||
|
|
|
@ -82,7 +82,7 @@ class SamplesStatsCounter {
|
|||
// additions were done. This function may not be called if there are no
|
||||
// samples.
|
||||
//
|
||||
// |percentile| has to be in [0; 1]. 0 percentile is the min in the array and
|
||||
// `percentile` has to be in [0; 1]. 0 percentile is the min in the array and
|
||||
// 1 percentile is the max in the array.
|
||||
double GetPercentile(double percentile);
|
||||
// Returns array view with all samples added into counter. There are no
|
||||
|
@ -105,14 +105,14 @@ class SamplesStatsCounter {
|
|||
bool sorted_ = false;
|
||||
};
|
||||
|
||||
// Multiply all sample values on |value| and return new SamplesStatsCounter
|
||||
// Multiply all sample values on `value` and return new SamplesStatsCounter
|
||||
// with resulted samples. Doesn't change origin SamplesStatsCounter.
|
||||
SamplesStatsCounter operator*(const SamplesStatsCounter& counter, double value);
|
||||
inline SamplesStatsCounter operator*(double value,
|
||||
const SamplesStatsCounter& counter) {
|
||||
return counter * value;
|
||||
}
|
||||
// Divide all sample values on |value| and return new SamplesStatsCounter with
|
||||
// Divide all sample values on `value` and return new SamplesStatsCounter with
|
||||
// resulted samples. Doesn't change origin SamplesStatsCounter.
|
||||
SamplesStatsCounter operator/(const SamplesStatsCounter& counter, double value);
|
||||
|
||||
|
|
|
@ -235,9 +235,9 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
std::string username;
|
||||
std::string password;
|
||||
TlsCertPolicy tls_cert_policy = kTlsCertPolicySecure;
|
||||
// If the URIs in |urls| only contain IP addresses, this field can be used
|
||||
// If the URIs in `urls` only contain IP addresses, this field can be used
|
||||
// to indicate the hostname, which may be necessary for TLS (using the SNI
|
||||
// extension). If |urls| itself contains the hostname, this isn't
|
||||
// extension). If `urls` itself contains the hostname, this isn't
|
||||
// necessary.
|
||||
std::string hostname;
|
||||
// List of protocols to be used in the TLS ALPN extension.
|
||||
|
@ -526,7 +526,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
// re-determining was removed in ICEbis (ICE v2).
|
||||
bool redetermine_role_on_ice_restart = true;
|
||||
|
||||
// This flag is only effective when |continual_gathering_policy| is
|
||||
// This flag is only effective when `continual_gathering_policy` is
|
||||
// GATHER_CONTINUALLY.
|
||||
//
|
||||
// If true, after the ICE transport type is changed such that new types of
|
||||
|
@ -712,8 +712,8 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
};
|
||||
|
||||
// Used by GetStats to decide which stats to include in the stats reports.
|
||||
// |kStatsOutputLevelStandard| includes the standard stats for Javascript API;
|
||||
// |kStatsOutputLevelDebug| includes both the standard stats and additional
|
||||
// `kStatsOutputLevelStandard` includes the standard stats for Javascript API;
|
||||
// `kStatsOutputLevelDebug` includes both the standard stats and additional
|
||||
// stats for debugging purposes.
|
||||
enum StatsOutputLevel {
|
||||
kStatsOutputLevelStandard,
|
||||
|
@ -754,10 +754,10 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
|
||||
// Add a new MediaStreamTrack to be sent on this PeerConnection, and return
|
||||
// the newly created RtpSender. The RtpSender will be associated with the
|
||||
// streams specified in the |stream_ids| list.
|
||||
// streams specified in the `stream_ids` list.
|
||||
//
|
||||
// Errors:
|
||||
// - INVALID_PARAMETER: |track| is null, has a kind other than audio or video,
|
||||
// - INVALID_PARAMETER: `track` is null, has a kind other than audio or video,
|
||||
// or a sender already exists for the track.
|
||||
// - INVALID_STATE: The PeerConnection is closed.
|
||||
virtual RTCErrorOr<rtc::scoped_refptr<RtpSenderInterface>> AddTrack(
|
||||
|
@ -774,7 +774,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
// corresponding RtpTransceiver direction as no longer sending.
|
||||
//
|
||||
// Errors:
|
||||
// - INVALID_PARAMETER: |sender| is null or (Plan B only) the sender is not
|
||||
// - INVALID_PARAMETER: `sender` is null or (Plan B only) the sender is not
|
||||
// associated with this PeerConnection.
|
||||
// - INVALID_STATE: PeerConnection is closed.
|
||||
// TODO(bugs.webrtc.org/9534): Rename to RemoveTrack once the other signature
|
||||
|
@ -786,7 +786,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
// transceivers. Adding a transceiver will cause future calls to CreateOffer
|
||||
// to add a media description for the corresponding transceiver.
|
||||
//
|
||||
// The initial value of |mid| in the returned transceiver is null. Setting a
|
||||
// The initial value of `mid` in the returned transceiver is null. Setting a
|
||||
// new session description may change it to a non-null value.
|
||||
//
|
||||
// https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnection-addtransceiver
|
||||
|
@ -805,7 +805,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
// of the transceiver (and sender/receiver) will be derived from the kind of
|
||||
// the track.
|
||||
// Errors:
|
||||
// - INVALID_PARAMETER: |track| is null.
|
||||
// - INVALID_PARAMETER: `track` is null.
|
||||
virtual RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>
|
||||
AddTransceiver(rtc::scoped_refptr<MediaStreamTrackInterface> track) = 0;
|
||||
virtual RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>
|
||||
|
@ -815,7 +815,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
// Adds a transceiver with the given kind. Can either be MEDIA_TYPE_AUDIO or
|
||||
// MEDIA_TYPE_VIDEO.
|
||||
// Errors:
|
||||
// - INVALID_PARAMETER: |media_type| is not MEDIA_TYPE_AUDIO or
|
||||
// - INVALID_PARAMETER: `media_type` is not MEDIA_TYPE_AUDIO or
|
||||
// MEDIA_TYPE_VIDEO.
|
||||
virtual RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>
|
||||
AddTransceiver(cricket::MediaType media_type) = 0;
|
||||
|
@ -830,9 +830,9 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
// The standard way to do this would be through "addTransceiver", but we
|
||||
// don't support that API yet.
|
||||
//
|
||||
// |kind| must be "audio" or "video".
|
||||
// `kind` must be "audio" or "video".
|
||||
//
|
||||
// |stream_id| is used to populate the msid attribute; if empty, one will
|
||||
// `stream_id` is used to populate the msid attribute; if empty, one will
|
||||
// be generated automatically.
|
||||
//
|
||||
// This method is not supported with kUnifiedPlan semantics. Please use
|
||||
|
@ -986,7 +986,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
// returned by CreateOffer() or CreateAnswer() or else the operation should
|
||||
// fail. Our implementation however allows some amount of "SDP munging", but
|
||||
// please note that this is HIGHLY DISCOURAGED. If you do not intent to munge
|
||||
// SDP, the method below that doesn't take |desc| as an argument will create
|
||||
// SDP, the method below that doesn't take `desc` as an argument will create
|
||||
// the offer or answer for you.
|
||||
//
|
||||
// The observer is invoked as soon as the operation completes, which could be
|
||||
|
@ -1044,10 +1044,10 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
|
||||
virtual PeerConnectionInterface::RTCConfiguration GetConfiguration() = 0;
|
||||
|
||||
// Sets the PeerConnection's global configuration to |config|.
|
||||
// Sets the PeerConnection's global configuration to `config`.
|
||||
//
|
||||
// The members of |config| that may be changed are |type|, |servers|,
|
||||
// |ice_candidate_pool_size| and |prune_turn_ports| (though the candidate
|
||||
// The members of `config` that may be changed are `type`, `servers`,
|
||||
// `ice_candidate_pool_size` and `prune_turn_ports` (though the candidate
|
||||
// pool size can't be changed after the first call to SetLocalDescription).
|
||||
// Note that this means the BUNDLE and RTCP-multiplexing policies cannot be
|
||||
// changed with this method.
|
||||
|
@ -1055,14 +1055,14 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
// Any changes to STUN/TURN servers or ICE candidate policy will affect the
|
||||
// next gathering phase, and cause the next call to createOffer to generate
|
||||
// new ICE credentials, as described in JSEP. This also occurs when
|
||||
// |prune_turn_ports| changes, for the same reasoning.
|
||||
// `prune_turn_ports` changes, for the same reasoning.
|
||||
//
|
||||
// If an error occurs, returns false and populates |error| if non-null:
|
||||
// - INVALID_MODIFICATION if |config| contains a modified parameter other
|
||||
// If an error occurs, returns false and populates `error` if non-null:
|
||||
// - INVALID_MODIFICATION if `config` contains a modified parameter other
|
||||
// than one of the parameters listed above.
|
||||
// - INVALID_RANGE if |ice_candidate_pool_size| is out of range.
|
||||
// - INVALID_RANGE if `ice_candidate_pool_size` is out of range.
|
||||
// - SYNTAX_ERROR if parsing an ICE server URL failed.
|
||||
// - INVALID_PARAMETER if a TURN server is missing |username| or |password|.
|
||||
// - INVALID_PARAMETER if a TURN server is missing `username` or `password`.
|
||||
// - INTERNAL_ERROR if an unexpected error occurred.
|
||||
//
|
||||
// TODO(nisse): Make this pure virtual once all Chrome subclasses of
|
||||
|
@ -1071,9 +1071,9 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
const PeerConnectionInterface::RTCConfiguration& config);
|
||||
|
||||
// Provides a remote candidate to the ICE Agent.
|
||||
// A copy of the |candidate| will be created and added to the remote
|
||||
// A copy of the `candidate` will be created and added to the remote
|
||||
// description. So the caller of this method still has the ownership of the
|
||||
// |candidate|.
|
||||
// `candidate`.
|
||||
// TODO(hbos): The spec mandates chaining this operation onto the operations
|
||||
// chain; deprecate and remove this version in favor of the callback-based
|
||||
// signature.
|
||||
|
@ -1096,13 +1096,13 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
// this PeerConnection. Other limitations might affect these limits and
|
||||
// are respected (for example "b=AS" in SDP).
|
||||
//
|
||||
// Setting |current_bitrate_bps| will reset the current bitrate estimate
|
||||
// Setting `current_bitrate_bps` will reset the current bitrate estimate
|
||||
// to the provided value.
|
||||
virtual RTCError SetBitrate(const BitrateSettings& bitrate) = 0;
|
||||
|
||||
// Enable/disable playout of received audio streams. Enabled by default. Note
|
||||
// that even if playout is enabled, streams will only be played out if the
|
||||
// appropriate SDP is also applied. Setting |playout| to false will stop
|
||||
// appropriate SDP is also applied. Setting `playout` to false will stop
|
||||
// playout of the underlying audio device but starts a task which will poll
|
||||
// for audio data every 10ms to ensure that audio processing happens and the
|
||||
// audio statistics are updated.
|
||||
|
@ -1157,13 +1157,13 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface {
|
|||
virtual void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) {}
|
||||
|
||||
// Start RtcEventLog using an existing output-sink. Takes ownership of
|
||||
// |output| and passes it on to Call, which will take the ownership. If the
|
||||
// `output` and passes it on to Call, which will take the ownership. If the
|
||||
// operation fails the output will be closed and deallocated. The event log
|
||||
// will send serialized events to the output object every |output_period_ms|.
|
||||
// will send serialized events to the output object every `output_period_ms`.
|
||||
// Applications using the event log should generally make their own trade-off
|
||||
// regarding the output period. A long period is generally more efficient,
|
||||
// with potential drawbacks being more bursty thread usage, and more events
|
||||
// lost in case the application crashes. If the |output_period_ms| argument is
|
||||
// lost in case the application crashes. If the `output_period_ms` argument is
|
||||
// omitted, webrtc selects a default deemed to be workable in most cases.
|
||||
virtual bool StartRtcEventLog(std::unique_ptr<RtcEventLogOutput> output,
|
||||
int64_t output_period_ms) = 0;
|
||||
|
@ -1222,7 +1222,7 @@ class PeerConnectionObserver {
|
|||
// Used to fire spec-compliant onnegotiationneeded events, which should only
|
||||
// fire when the Operations Chain is empty. The observer is responsible for
|
||||
// queuing a task (e.g. Chromium: jump to main thread) to maybe fire the
|
||||
// event. The event identified using |event_id| must only fire if
|
||||
// event. The event identified using `event_id` must only fire if
|
||||
// PeerConnection::ShouldFireNegotiationNeededEvent() returns true since it is
|
||||
// possible for the event to become invalidated by operations subsequently
|
||||
// chained.
|
||||
|
@ -1256,7 +1256,7 @@ class PeerConnectionObserver {
|
|||
|
||||
// Gathering of an ICE candidate failed.
|
||||
// See https://w3c.github.io/webrtc-pc/#event-icecandidateerror
|
||||
// |host_candidate| is a stringified socket address.
|
||||
// `host_candidate` is a stringified socket address.
|
||||
virtual void OnIceCandidateError(const std::string& host_candidate,
|
||||
const std::string& url,
|
||||
int error_code,
|
||||
|
@ -1393,7 +1393,7 @@ struct RTC_EXPORT PeerConnectionFactoryDependencies final {
|
|||
network_state_predictor_factory;
|
||||
std::unique_ptr<NetworkControllerFactoryInterface> network_controller_factory;
|
||||
// This will only be used if CreatePeerConnection is called without a
|
||||
// |port_allocator|, causing the default allocator and network manager to be
|
||||
// `port_allocator`, causing the default allocator and network manager to be
|
||||
// used.
|
||||
std::unique_ptr<rtc::NetworkMonitorFactory> network_monitor_factory;
|
||||
std::unique_ptr<NetEqFactory> neteq_factory;
|
||||
|
@ -1467,12 +1467,12 @@ class RTC_EXPORT PeerConnectionFactoryInterface
|
|||
const PeerConnectionInterface::RTCConfiguration& configuration,
|
||||
PeerConnectionDependencies dependencies);
|
||||
|
||||
// Deprecated; |allocator| and |cert_generator| may be null, in which case
|
||||
// Deprecated; `allocator` and `cert_generator` may be null, in which case
|
||||
// default implementations will be used.
|
||||
//
|
||||
// |observer| must not be null.
|
||||
// `observer` must not be null.
|
||||
//
|
||||
// Note that this method does not take ownership of |observer|; it's the
|
||||
// Note that this method does not take ownership of `observer`; it's the
|
||||
// responsibility of the caller to delete it. It can be safely deleted after
|
||||
// Close has been called on the returned PeerConnection, which ensures no
|
||||
// more observer callbacks will be invoked.
|
||||
|
@ -1483,13 +1483,13 @@ class RTC_EXPORT PeerConnectionFactoryInterface
|
|||
std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator,
|
||||
PeerConnectionObserver* observer);
|
||||
|
||||
// Returns the capabilities of an RTP sender of type |kind|.
|
||||
// Returns the capabilities of an RTP sender of type `kind`.
|
||||
// If for some reason you pass in MEDIA_TYPE_DATA, returns an empty structure.
|
||||
// TODO(orphis): Make pure virtual when all subclasses implement it.
|
||||
virtual RtpCapabilities GetRtpSenderCapabilities(
|
||||
cricket::MediaType kind) const;
|
||||
|
||||
// Returns the capabilities of an RTP receiver of type |kind|.
|
||||
// Returns the capabilities of an RTP receiver of type `kind`.
|
||||
// If for some reason you pass in MEDIA_TYPE_DATA, returns an empty structure.
|
||||
// TODO(orphis): Make pure virtual when all subclasses implement it.
|
||||
virtual RtpCapabilities GetRtpReceiverCapabilities(
|
||||
|
@ -1499,22 +1499,22 @@ class RTC_EXPORT PeerConnectionFactoryInterface
|
|||
const std::string& stream_id) = 0;
|
||||
|
||||
// Creates an AudioSourceInterface.
|
||||
// |options| decides audio processing settings.
|
||||
// `options` decides audio processing settings.
|
||||
virtual rtc::scoped_refptr<AudioSourceInterface> CreateAudioSource(
|
||||
const cricket::AudioOptions& options) = 0;
|
||||
|
||||
// Creates a new local VideoTrack. The same |source| can be used in several
|
||||
// Creates a new local VideoTrack. The same `source` can be used in several
|
||||
// tracks.
|
||||
virtual rtc::scoped_refptr<VideoTrackInterface> CreateVideoTrack(
|
||||
const std::string& label,
|
||||
VideoTrackSourceInterface* source) = 0;
|
||||
|
||||
// Creates an new AudioTrack. At the moment |source| can be null.
|
||||
// Creates an new AudioTrack. At the moment `source` can be null.
|
||||
virtual rtc::scoped_refptr<AudioTrackInterface> CreateAudioTrack(
|
||||
const std::string& label,
|
||||
AudioSourceInterface* source) = 0;
|
||||
|
||||
// Starts AEC dump using existing file. Takes ownership of |file| and passes
|
||||
// Starts AEC dump using existing file. Takes ownership of `file` and passes
|
||||
// it on to VoiceEngine (via other objects) immediately, which will take
|
||||
// the ownerhip. If the operation fails, the file will be closed.
|
||||
// A maximum file size in bytes can be specified. When the file size limit is
|
||||
|
@ -1549,8 +1549,8 @@ class RTC_EXPORT PeerConnectionFactoryInterface
|
|||
// video-specific interfaces, and omit the corresponding modules from its
|
||||
// build.
|
||||
//
|
||||
// If |network_thread| or |worker_thread| are null, the PeerConnectionFactory
|
||||
// will create the necessary thread internally. If |signaling_thread| is null,
|
||||
// If `network_thread` or `worker_thread` are null, the PeerConnectionFactory
|
||||
// will create the necessary thread internally. If `signaling_thread` is null,
|
||||
// the PeerConnectionFactory will use the thread on which this method is called
|
||||
// as the signaling thread, wrapping it in an rtc::Thread object if needed.
|
||||
RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
|
||||
|
|
|
@ -176,7 +176,7 @@ inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982)
|
|||
#endif // WEBRTC_UNIT_TEST
|
||||
|
||||
// Helper macro that can be used by implementations to create an error with a
|
||||
// message and log it. |message| should be a string literal or movable
|
||||
// message and log it. `message` should be a string literal or movable
|
||||
// std::string.
|
||||
#define LOG_AND_RETURN_ERROR_EX(type, message, severity) \
|
||||
{ \
|
||||
|
|
|
@ -42,7 +42,7 @@ class RtcEventLog {
|
|||
// which it would be permissible to read and/or modify it.
|
||||
virtual void StopLogging() = 0;
|
||||
|
||||
// Stops logging to file and calls |callback| when the file has been closed.
|
||||
// Stops logging to file and calls `callback` when the file has been closed.
|
||||
// Note that it is not safe to call any other members, including the
|
||||
// destructor, until the callback has been called.
|
||||
// TODO(srte): Remove default implementation when it's safe to do so.
|
||||
|
|
|
@ -29,7 +29,7 @@ class RtcEventLogOutput {
|
|||
// Write encoded events to an output. Returns true if the output was
|
||||
// successfully written in its entirety. Otherwise, no guarantee is given
|
||||
// about how much data was written, if any. The output sink becomes inactive
|
||||
// after the first time |false| is returned. Write() may not be called on
|
||||
// after the first time `false` is returned. Write() may not be called on
|
||||
// an inactive output sink.
|
||||
virtual bool Write(const std::string& output) = 0;
|
||||
|
||||
|
|
|
@ -23,9 +23,9 @@
|
|||
namespace webrtc {
|
||||
|
||||
//
|
||||
// Structure to hold information about a received |RtpPacket|. It is primarily
|
||||
// Structure to hold information about a received `RtpPacket`. It is primarily
|
||||
// used to carry per-packet information from when a packet is received until
|
||||
// the information is passed to |SourceTracker|.
|
||||
// the information is passed to `SourceTracker`.
|
||||
//
|
||||
class RTC_EXPORT RtpPacketInfo {
|
||||
public:
|
||||
|
@ -102,8 +102,8 @@ class RTC_EXPORT RtpPacketInfo {
|
|||
|
||||
// Fields from the Absolute Capture Time header extension:
|
||||
// http://www.webrtc.org/experiments/rtp-hdrext/abs-capture-time
|
||||
// To not be confused with |local_capture_clock_offset_|, the
|
||||
// |estimated_capture_clock_offset| in |absolute_capture_time_| should
|
||||
// To not be confused with `local_capture_clock_offset_`, the
|
||||
// `estimated_capture_clock_offset` in `absolute_capture_time_` should
|
||||
// represent the clock offset between a remote sender and the capturer, and
|
||||
// thus equals to the corresponding values in the received RTP packets,
|
||||
// subjected to possible interpolations.
|
||||
|
|
|
@ -126,7 +126,7 @@ struct RTC_EXPORT RtpCodecCapability {
|
|||
RtpCodecCapability();
|
||||
~RtpCodecCapability();
|
||||
|
||||
// Build MIME "type/subtype" string from |name| and |kind|.
|
||||
// Build MIME "type/subtype" string from `name` and `kind`.
|
||||
std::string mime_type() const { return MediaTypeToString(kind) + "/" + name; }
|
||||
|
||||
// Used to identify the codec. Equivalent to MIME subtype.
|
||||
|
@ -537,7 +537,7 @@ struct RTC_EXPORT RtpCodecParameters {
|
|||
RtpCodecParameters(const RtpCodecParameters&);
|
||||
~RtpCodecParameters();
|
||||
|
||||
// Build MIME "type/subtype" string from |name| and |kind|.
|
||||
// Build MIME "type/subtype" string from `name` and `kind`.
|
||||
std::string mime_type() const { return MediaTypeToString(kind) + "/" + name; }
|
||||
|
||||
// Used to identify the codec. Equivalent to MIME subtype.
|
||||
|
@ -562,7 +562,7 @@ struct RTC_EXPORT RtpCodecParameters {
|
|||
absl::optional<int> num_channels;
|
||||
|
||||
// The maximum packetization time to be used by an RtpSender.
|
||||
// If |ptime| is also set, this will be ignored.
|
||||
// If `ptime` is also set, this will be ignored.
|
||||
// TODO(deadbeef): Not implemented.
|
||||
absl::optional<int> max_ptime;
|
||||
|
||||
|
@ -607,7 +607,7 @@ struct RTC_EXPORT RtpCapabilities {
|
|||
|
||||
// Supported Forward Error Correction (FEC) mechanisms. Note that the RED,
|
||||
// ulpfec and flexfec codecs used by these mechanisms will still appear in
|
||||
// |codecs|.
|
||||
// `codecs`.
|
||||
std::vector<FecMechanism> fec;
|
||||
|
||||
bool operator==(const RtpCapabilities& o) const {
|
||||
|
|
|
@ -54,7 +54,7 @@ class RTC_EXPORT RtpReceiverInterface : public rtc::RefCountInterface {
|
|||
// TODO(https://bugs.webrtc.org/907849) remove default implementation
|
||||
virtual rtc::scoped_refptr<DtlsTransportInterface> dtls_transport() const;
|
||||
|
||||
// The list of streams that |track| is associated with. This is the same as
|
||||
// The list of streams that `track` is associated with. This is the same as
|
||||
// the [[AssociatedRemoteMediaStreams]] internal slot in the spec.
|
||||
// https://w3c.github.io/webrtc-pc/#dfn-associatedremotemediastreams
|
||||
// TODO(hbos): Make pure virtual as soon as Chromium's mock implements this.
|
||||
|
@ -84,8 +84,8 @@ class RTC_EXPORT RtpReceiverInterface : public rtc::RefCountInterface {
|
|||
virtual void SetObserver(RtpReceiverObserverInterface* observer) = 0;
|
||||
|
||||
// Sets the jitter buffer minimum delay until media playout. Actual observed
|
||||
// delay may differ depending on the congestion control. |delay_seconds| is a
|
||||
// positive value including 0.0 measured in seconds. |nullopt| means default
|
||||
// delay may differ depending on the congestion control. `delay_seconds` is a
|
||||
// positive value including 0.0 measured in seconds. `nullopt` means default
|
||||
// value must be used.
|
||||
virtual void SetJitterBufferMinimumDelay(
|
||||
absl::optional<double> delay_seconds) = 0;
|
||||
|
|
|
@ -24,13 +24,13 @@
|
|||
// void some_function() {
|
||||
// scoped_refptr<MyFoo> foo = new MyFoo();
|
||||
// foo->Method(param);
|
||||
// // |foo| is released when this function returns
|
||||
// // `foo` is released when this function returns
|
||||
// }
|
||||
//
|
||||
// void some_other_function() {
|
||||
// scoped_refptr<MyFoo> foo = new MyFoo();
|
||||
// ...
|
||||
// foo = nullptr; // explicitly releases |foo|
|
||||
// foo = nullptr; // explicitly releases `foo`
|
||||
// ...
|
||||
// if (foo)
|
||||
// foo->Method(param);
|
||||
|
@ -45,10 +45,10 @@
|
|||
// scoped_refptr<MyFoo> b;
|
||||
//
|
||||
// b.swap(a);
|
||||
// // now, |b| references the MyFoo object, and |a| references null.
|
||||
// // now, `b` references the MyFoo object, and `a` references null.
|
||||
// }
|
||||
//
|
||||
// To make both |a| and |b| in the above example reference the same MyFoo
|
||||
// To make both `a` and `b` in the above example reference the same MyFoo
|
||||
// object, simply use the assignment operator:
|
||||
//
|
||||
// {
|
||||
|
@ -56,7 +56,7 @@
|
|||
// scoped_refptr<MyFoo> b;
|
||||
//
|
||||
// b = a;
|
||||
// // now, |a| and |b| each own a reference to the same MyFoo object.
|
||||
// // now, `a` and `b` each own a reference to the same MyFoo object.
|
||||
// }
|
||||
//
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ class RTCStatsMemberInterface;
|
|||
// static const char kType[];
|
||||
// It is used as a unique class identifier and a string representation of the
|
||||
// class type, see https://w3c.github.io/webrtc-stats/#rtcstatstype-str*.
|
||||
// Use the |WEBRTC_RTCSTATS_IMPL| macro when implementing subclasses, see macro
|
||||
// Use the `WEBRTC_RTCSTATS_IMPL` macro when implementing subclasses, see macro
|
||||
// for details.
|
||||
//
|
||||
// Derived classes list their dictionary members, RTCStatsMember<T>, as public
|
||||
|
@ -47,7 +47,7 @@ class RTCStatsMemberInterface;
|
|||
// foo.baz->push_back("hello world");
|
||||
// uint32_t x = *foo.bar;
|
||||
//
|
||||
// Pointers to all the members are available with |Members|, allowing iteration:
|
||||
// Pointers to all the members are available with `Members`, allowing iteration:
|
||||
//
|
||||
// for (const RTCStatsMemberInterface* member : foo.Members()) {
|
||||
// printf("%s = %s\n", member->name(), member->ValueToString().c_str());
|
||||
|
@ -65,11 +65,11 @@ class RTC_EXPORT RTCStats {
|
|||
const std::string& id() const { return id_; }
|
||||
// Time relative to the UNIX epoch (Jan 1, 1970, UTC), in microseconds.
|
||||
int64_t timestamp_us() const { return timestamp_us_; }
|
||||
// Returns the static member variable |kType| of the implementing class.
|
||||
// Returns the static member variable `kType` of the implementing class.
|
||||
virtual const char* type() const = 0;
|
||||
// Returns a vector of pointers to all the |RTCStatsMemberInterface| members
|
||||
// Returns a vector of pointers to all the `RTCStatsMemberInterface` members
|
||||
// of this class. This allows for iteration of members. For a given class,
|
||||
// |Members| always returns the same members in the same order.
|
||||
// `Members` always returns the same members in the same order.
|
||||
std::vector<const RTCStatsMemberInterface*> Members() const;
|
||||
// Checks if the two stats objects are of the same type and have the same
|
||||
// member values. Timestamps are not compared. These operators are exposed for
|
||||
|
@ -81,8 +81,8 @@ class RTC_EXPORT RTCStats {
|
|||
// object, listing all of its members (names and values).
|
||||
std::string ToJson() const;
|
||||
|
||||
// Downcasts the stats object to an |RTCStats| subclass |T|. DCHECKs that the
|
||||
// object is of type |T|.
|
||||
// Downcasts the stats object to an `RTCStats` subclass `T`. DCHECKs that the
|
||||
// object is of type `T`.
|
||||
template <typename T>
|
||||
const T& cast_to() const {
|
||||
RTC_DCHECK_EQ(type(), T::kType);
|
||||
|
@ -90,8 +90,8 @@ class RTC_EXPORT RTCStats {
|
|||
}
|
||||
|
||||
protected:
|
||||
// Gets a vector of all members of this |RTCStats| object, including members
|
||||
// derived from parent classes. |additional_capacity| is how many more members
|
||||
// Gets a vector of all members of this `RTCStats` object, including members
|
||||
// derived from parent classes. `additional_capacity` is how many more members
|
||||
// shall be reserved in the vector (so that subclasses can allocate a vector
|
||||
// with room for both parent and child members without it having to resize).
|
||||
virtual std::vector<const RTCStatsMemberInterface*>
|
||||
|
@ -101,21 +101,21 @@ class RTC_EXPORT RTCStats {
|
|||
int64_t timestamp_us_;
|
||||
};
|
||||
|
||||
// All |RTCStats| classes should use these macros.
|
||||
// |WEBRTC_RTCSTATS_DECL| is placed in a public section of the class definition.
|
||||
// |WEBRTC_RTCSTATS_IMPL| is placed outside the class definition (in a .cc).
|
||||
// All `RTCStats` classes should use these macros.
|
||||
// `WEBRTC_RTCSTATS_DECL` is placed in a public section of the class definition.
|
||||
// `WEBRTC_RTCSTATS_IMPL` is placed outside the class definition (in a .cc).
|
||||
//
|
||||
// These macros declare (in _DECL) and define (in _IMPL) the static |kType| and
|
||||
// overrides methods as required by subclasses of |RTCStats|: |copy|, |type| and
|
||||
// |MembersOfThisObjectAndAncestors|. The |...| argument is a list of addresses
|
||||
// These macros declare (in _DECL) and define (in _IMPL) the static `kType` and
|
||||
// overrides methods as required by subclasses of `RTCStats`: `copy`, `type` and
|
||||
// `MembersOfThisObjectAndAncestors`. The |...| argument is a list of addresses
|
||||
// to each member defined in the implementing class. The list must have at least
|
||||
// one member.
|
||||
//
|
||||
// (Since class names need to be known to implement these methods this cannot be
|
||||
// part of the base |RTCStats|. While these methods could be implemented using
|
||||
// part of the base `RTCStats`. While these methods could be implemented using
|
||||
// templates, that would only work for immediate subclasses. Subclasses of
|
||||
// subclasses also have to override these methods, resulting in boilerplate
|
||||
// code. Using a macro avoids this and works for any |RTCStats| class, including
|
||||
// code. Using a macro avoids this and works for any `RTCStats` class, including
|
||||
// grandchildren.)
|
||||
//
|
||||
// Sample usage:
|
||||
|
@ -215,10 +215,10 @@ enum class NonStandardGroupId {
|
|||
kRtcStatsRelativePacketArrivalDelay,
|
||||
};
|
||||
|
||||
// Interface for |RTCStats| members, which have a name and a value of a type
|
||||
// defined in a subclass. Only the types listed in |Type| are supported, these
|
||||
// Interface for `RTCStats` members, which have a name and a value of a type
|
||||
// defined in a subclass. Only the types listed in `Type` are supported, these
|
||||
// are implemented by |RTCStatsMember<T>|. The value of a member may be
|
||||
// undefined, the value can only be read if |is_defined|.
|
||||
// undefined, the value can only be read if `is_defined`.
|
||||
class RTCStatsMemberInterface {
|
||||
public:
|
||||
// Member value types.
|
||||
|
@ -284,7 +284,7 @@ class RTCStatsMemberInterface {
|
|||
bool is_defined_;
|
||||
};
|
||||
|
||||
// Template implementation of |RTCStatsMemberInterface|.
|
||||
// Template implementation of `RTCStatsMemberInterface`.
|
||||
// The supported types are the ones described by
|
||||
// |RTCStatsMemberInterface::Type|.
|
||||
template <typename T>
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
namespace webrtc {
|
||||
|
||||
// A collection of stats.
|
||||
// This is accessible as a map from |RTCStats::id| to |RTCStats|.
|
||||
// This is accessible as a map from `RTCStats::id` to `RTCStats`.
|
||||
class RTC_EXPORT RTCStatsReport final
|
||||
: public rtc::RefCountedNonVirtual<RTCStatsReport> {
|
||||
public:
|
||||
|
@ -71,8 +71,8 @@ class RTC_EXPORT RTCStatsReport final
|
|||
const RTCStats* Get(const std::string& id) const;
|
||||
size_t size() const { return stats_.size(); }
|
||||
|
||||
// Gets the stat object of type |T| by ID, where |T| is any class descending
|
||||
// from |RTCStats|.
|
||||
// Gets the stat object of type `T` by ID, where `T` is any class descending
|
||||
// from `RTCStats`.
|
||||
// Returns null if there is no stats object for the given ID or it is the
|
||||
// wrong type.
|
||||
template <typename T>
|
||||
|
@ -85,17 +85,17 @@ class RTC_EXPORT RTCStatsReport final
|
|||
}
|
||||
|
||||
// Removes the stats object from the report, returning ownership of it or null
|
||||
// if there is no object with |id|.
|
||||
// if there is no object with `id`.
|
||||
std::unique_ptr<const RTCStats> Take(const std::string& id);
|
||||
// Takes ownership of all the stats in |other|, leaving it empty.
|
||||
// Takes ownership of all the stats in `other`, leaving it empty.
|
||||
void TakeMembersFrom(rtc::scoped_refptr<RTCStatsReport> other);
|
||||
|
||||
// Stats iterators. Stats are ordered lexicographically on |RTCStats::id|.
|
||||
ConstIterator begin() const;
|
||||
ConstIterator end() const;
|
||||
|
||||
// Gets the subset of stats that are of type |T|, where |T| is any class
|
||||
// descending from |RTCStats|.
|
||||
// Gets the subset of stats that are of type `T`, where `T` is any class
|
||||
// descending from `RTCStats`.
|
||||
template <typename T>
|
||||
std::vector<const T*> GetStatsOfType() const {
|
||||
std::vector<const T*> stats_of_type;
|
||||
|
|
|
@ -197,7 +197,7 @@ class RTC_EXPORT RTCIceCandidatePairStats final : public RTCStats {
|
|||
};
|
||||
|
||||
// https://w3c.github.io/webrtc-stats/#icecandidate-dict*
|
||||
// TODO(hbos): |RTCStatsCollector| only collects candidates that are part of
|
||||
// TODO(hbos): `RTCStatsCollector` only collects candidates that are part of
|
||||
// ice candidate pairs, but there could be candidates not paired with anything.
|
||||
// crbug.com/632723
|
||||
// TODO(qingsi): Add the stats of STUN binding requests (keepalives) and collect
|
||||
|
@ -221,7 +221,7 @@ class RTC_EXPORT RTCIceCandidateStats : public RTCStats {
|
|||
// TODO(hbos): Support enum types? "RTCStatsMember<RTCIceCandidateType>"?
|
||||
RTCStatsMember<std::string> candidate_type;
|
||||
RTCStatsMember<int32_t> priority;
|
||||
// TODO(hbos): Not collected by |RTCStatsCollector|. crbug.com/632723
|
||||
// TODO(hbos): Not collected by `RTCStatsCollector`. crbug.com/632723
|
||||
RTCStatsMember<std::string> url;
|
||||
|
||||
protected:
|
||||
|
@ -232,8 +232,8 @@ class RTC_EXPORT RTCIceCandidateStats : public RTCStats {
|
|||
};
|
||||
|
||||
// In the spec both local and remote varieties are of type RTCIceCandidateStats.
|
||||
// But here we define them as subclasses of |RTCIceCandidateStats| because the
|
||||
// |kType| need to be different ("RTCStatsType type") in the local/remote case.
|
||||
// But here we define them as subclasses of `RTCIceCandidateStats` because the
|
||||
// `kType` need to be different ("RTCStatsType type") in the local/remote case.
|
||||
// https://w3c.github.io/webrtc-stats/#rtcstatstype-str*
|
||||
// This forces us to have to override copy() and type().
|
||||
class RTC_EXPORT RTCLocalIceCandidateStats final : public RTCIceCandidateStats {
|
||||
|
@ -289,28 +289,28 @@ class RTC_EXPORT RTCMediaStreamTrackStats final : public RTCStats {
|
|||
RTCStatsMember<std::string> media_source_id;
|
||||
RTCStatsMember<bool> remote_source;
|
||||
RTCStatsMember<bool> ended;
|
||||
// TODO(hbos): |RTCStatsCollector| does not return stats for detached tracks.
|
||||
// TODO(hbos): `RTCStatsCollector` does not return stats for detached tracks.
|
||||
// crbug.com/659137
|
||||
RTCStatsMember<bool> detached;
|
||||
// See |RTCMediaStreamTrackKind| for valid values.
|
||||
// See `RTCMediaStreamTrackKind` for valid values.
|
||||
RTCStatsMember<std::string> kind;
|
||||
RTCStatsMember<double> jitter_buffer_delay;
|
||||
RTCStatsMember<uint64_t> jitter_buffer_emitted_count;
|
||||
// Video-only members
|
||||
RTCStatsMember<uint32_t> frame_width;
|
||||
RTCStatsMember<uint32_t> frame_height;
|
||||
// TODO(hbos): Not collected by |RTCStatsCollector|. crbug.com/659137
|
||||
// TODO(hbos): Not collected by `RTCStatsCollector`. crbug.com/659137
|
||||
RTCStatsMember<double> frames_per_second;
|
||||
RTCStatsMember<uint32_t> frames_sent;
|
||||
RTCStatsMember<uint32_t> huge_frames_sent;
|
||||
RTCStatsMember<uint32_t> frames_received;
|
||||
RTCStatsMember<uint32_t> frames_decoded;
|
||||
RTCStatsMember<uint32_t> frames_dropped;
|
||||
// TODO(hbos): Not collected by |RTCStatsCollector|. crbug.com/659137
|
||||
// TODO(hbos): Not collected by `RTCStatsCollector`. crbug.com/659137
|
||||
RTCStatsMember<uint32_t> frames_corrupted;
|
||||
// TODO(hbos): Not collected by |RTCStatsCollector|. crbug.com/659137
|
||||
// TODO(hbos): Not collected by `RTCStatsCollector`. crbug.com/659137
|
||||
RTCStatsMember<uint32_t> partial_frames_lost;
|
||||
// TODO(hbos): Not collected by |RTCStatsCollector|. crbug.com/659137
|
||||
// TODO(hbos): Not collected by `RTCStatsCollector`. crbug.com/659137
|
||||
RTCStatsMember<uint32_t> full_frames_lost;
|
||||
// Audio-only members
|
||||
RTCStatsMember<double> audio_level; // Receive-only
|
||||
|
|
|
@ -835,7 +835,7 @@ StatsReport* StatsCollection::ReplaceOrAddNew(const StatsReport::Id& id) {
|
|||
return InsertNew(id);
|
||||
}
|
||||
|
||||
// Looks for a report with the given |id|. If one is not found, null
|
||||
// Looks for a report with the given `id`. If one is not found, null
|
||||
// will be returned.
|
||||
StatsReport* StatsCollection::Find(const StatsReport::Id& id) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
|
|
|
@ -39,58 +39,58 @@ class RTC_EXPORT StatsReport {
|
|||
|
||||
enum StatsType {
|
||||
// StatsReport types.
|
||||
// A StatsReport of |type| = "googSession" contains overall information
|
||||
// A StatsReport of `type` = "googSession" contains overall information
|
||||
// about the thing libjingle calls a session (which may contain one
|
||||
// or more RTP sessions.
|
||||
kStatsReportTypeSession,
|
||||
|
||||
// A StatsReport of |type| = "googTransport" contains information
|
||||
// A StatsReport of `type` = "googTransport" contains information
|
||||
// about a libjingle "transport".
|
||||
kStatsReportTypeTransport,
|
||||
|
||||
// A StatsReport of |type| = "googComponent" contains information
|
||||
// A StatsReport of `type` = "googComponent" contains information
|
||||
// about a libjingle "channel" (typically, RTP or RTCP for a transport).
|
||||
// This is intended to be the same thing as an ICE "Component".
|
||||
kStatsReportTypeComponent,
|
||||
|
||||
// A StatsReport of |type| = "googCandidatePair" contains information
|
||||
// A StatsReport of `type` = "googCandidatePair" contains information
|
||||
// about a libjingle "connection" - a single source/destination port pair.
|
||||
// This is intended to be the same thing as an ICE "candidate pair".
|
||||
kStatsReportTypeCandidatePair,
|
||||
|
||||
// A StatsReport of |type| = "VideoBWE" is statistics for video Bandwidth
|
||||
// Estimation, which is global per-session. The |id| field is "bweforvideo"
|
||||
// A StatsReport of `type` = "VideoBWE" is statistics for video Bandwidth
|
||||
// Estimation, which is global per-session. The `id` field is "bweforvideo"
|
||||
// (will probably change in the future).
|
||||
kStatsReportTypeBwe,
|
||||
|
||||
// A StatsReport of |type| = "ssrc" is statistics for a specific rtp stream.
|
||||
// The |id| field is the SSRC in decimal form of the rtp stream.
|
||||
// A StatsReport of `type` = "ssrc" is statistics for a specific rtp stream.
|
||||
// The `id` field is the SSRC in decimal form of the rtp stream.
|
||||
kStatsReportTypeSsrc,
|
||||
|
||||
// A StatsReport of |type| = "remoteSsrc" is statistics for a specific
|
||||
// A StatsReport of `type` = "remoteSsrc" is statistics for a specific
|
||||
// rtp stream, generated by the remote end of the connection.
|
||||
kStatsReportTypeRemoteSsrc,
|
||||
|
||||
// A StatsReport of |type| = "googTrack" is statistics for a specific media
|
||||
// track. The |id| field is the track id.
|
||||
// A StatsReport of `type` = "googTrack" is statistics for a specific media
|
||||
// track. The `id` field is the track id.
|
||||
kStatsReportTypeTrack,
|
||||
|
||||
// A StatsReport of |type| = "localcandidate" or "remotecandidate" is
|
||||
// A StatsReport of `type` = "localcandidate" or "remotecandidate" is
|
||||
// attributes on a specific ICE Candidate. It links to its connection pair
|
||||
// by candidate id. The string value is taken from
|
||||
// http://w3c.github.io/webrtc-stats/#rtcstatstype-enum*.
|
||||
kStatsReportTypeIceLocalCandidate,
|
||||
kStatsReportTypeIceRemoteCandidate,
|
||||
|
||||
// A StatsReport of |type| = "googCertificate" contains an SSL certificate
|
||||
// transmitted by one of the endpoints of this connection. The |id| is
|
||||
// A StatsReport of `type` = "googCertificate" contains an SSL certificate
|
||||
// transmitted by one of the endpoints of this connection. The `id` is
|
||||
// controlled by the fingerprint, and is used to identify the certificate in
|
||||
// the Channel stats (as "googLocalCertificateId" or
|
||||
// "googRemoteCertificateId") and in any child certificates (as
|
||||
// "googIssuerId").
|
||||
kStatsReportTypeCertificate,
|
||||
|
||||
// A StatsReport of |type| = "datachannel" with statistics for a
|
||||
// A StatsReport of `type` = "datachannel" with statistics for a
|
||||
// particular DataChannel.
|
||||
kStatsReportTypeDataChannel,
|
||||
};
|
||||
|
@ -331,7 +331,7 @@ class RTC_EXPORT StatsReport {
|
|||
bool bool_val() const;
|
||||
const Id& id_val() const;
|
||||
|
||||
// Returns the string representation of |name|.
|
||||
// Returns the string representation of `name`.
|
||||
const char* display_name() const;
|
||||
|
||||
// Converts the native value to a string representation of the value.
|
||||
|
@ -339,7 +339,7 @@ class RTC_EXPORT StatsReport {
|
|||
|
||||
Type type() const { return type_; }
|
||||
|
||||
// TODO(tommi): Move |name| and |display_name| out of the Value struct.
|
||||
// TODO(tommi): Move `name` and `display_name` out of the Value struct.
|
||||
const StatsValueName name;
|
||||
|
||||
private:
|
||||
|
@ -364,7 +364,7 @@ class RTC_EXPORT StatsReport {
|
|||
typedef rtc::scoped_refptr<Value> ValuePtr;
|
||||
typedef std::map<StatsValueName, ValuePtr> Values;
|
||||
|
||||
// Ownership of |id| is passed to |this|.
|
||||
// Ownership of `id` is passed to `this`.
|
||||
explicit StatsReport(const Id& id);
|
||||
~StatsReport();
|
||||
|
||||
|
@ -434,13 +434,13 @@ class StatsCollection {
|
|||
const_iterator end() const;
|
||||
size_t size() const;
|
||||
|
||||
// Creates a new report object with |id| that does not already
|
||||
// Creates a new report object with `id` that does not already
|
||||
// exist in the list of reports.
|
||||
StatsReport* InsertNew(const StatsReport::Id& id);
|
||||
StatsReport* FindOrAddNew(const StatsReport::Id& id);
|
||||
StatsReport* ReplaceOrAddNew(const StatsReport::Id& id);
|
||||
|
||||
// Looks for a report with the given |id|. If one is not found, null
|
||||
// Looks for a report with the given `id`. If one is not found, null
|
||||
// will be returned.
|
||||
StatsReport* Find(const StatsReport::Id& id);
|
||||
|
||||
|
|
|
@ -20,9 +20,9 @@ class QueuedTask {
|
|||
virtual ~QueuedTask() = default;
|
||||
|
||||
// Main routine that will run when the task is executed on the desired queue.
|
||||
// The task should return |true| to indicate that it should be deleted or
|
||||
// |false| to indicate that the queue should consider ownership of the task
|
||||
// having been transferred. Returning |false| can be useful if a task has
|
||||
// The task should return `true` to indicate that it should be deleted or
|
||||
// `false` to indicate that the queue should consider ownership of the task
|
||||
// having been transferred. Returning `false` can be useful if a task has
|
||||
// re-posted itself to a different queue or is otherwise being re-used.
|
||||
virtual bool Run() = 0;
|
||||
};
|
||||
|
|
|
@ -37,7 +37,7 @@ TEST_P(TaskQueueTest, PostAndCheckCurrent) {
|
|||
rtc::Event event;
|
||||
auto queue = CreateTaskQueue(factory, "PostAndCheckCurrent");
|
||||
|
||||
// We're not running a task, so |queue| shouldn't be current.
|
||||
// We're not running a task, so `queue` shouldn't be current.
|
||||
// Note that because rtc::Thread also supports the TQ interface and
|
||||
// TestMainImpl::Init wraps the main test thread (bugs.webrtc.org/9714), that
|
||||
// means that TaskQueueBase::Current() will still return a valid value.
|
||||
|
@ -190,7 +190,7 @@ TEST_P(TaskQueueTest, PostAndReuse) {
|
|||
}
|
||||
|
||||
TEST_P(TaskQueueTest, PostALot) {
|
||||
// Waits until DecrementCount called |count| times. Thread safe.
|
||||
// Waits until DecrementCount called `count` times. Thread safe.
|
||||
class BlockingCounter {
|
||||
public:
|
||||
explicit BlockingCounter(int initial_count) : count_(initial_count) {}
|
||||
|
|
|
@ -25,9 +25,9 @@ class AudioQualityAnalyzerInterface : public StatsObserverInterface {
|
|||
~AudioQualityAnalyzerInterface() override = default;
|
||||
|
||||
// Will be called by the framework before the test.
|
||||
// |test_case_name| is name of test case, that should be used to report all
|
||||
// `test_case_name` is name of test case, that should be used to report all
|
||||
// audio metrics.
|
||||
// |analyzer_helper| is a pointer to a class that will allow track_id to
|
||||
// `analyzer_helper` is a pointer to a class that will allow track_id to
|
||||
// stream_id matching. The caller is responsible for ensuring the
|
||||
// AnalyzerHelper outlives the instance of the AudioQualityAnalyzerInterface.
|
||||
virtual void Start(std::string test_case_name,
|
||||
|
|
|
@ -23,12 +23,12 @@ namespace test {
|
|||
// utility can be used to simulate the audioprocessing module using a recording
|
||||
// (either an AEC dump or wav files), and generate the output as a wav file.
|
||||
// Any audio_processing object specified in the input is used for the
|
||||
// simulation. The optional |audio_processing| object provides the
|
||||
// simulation. The optional `audio_processing` object provides the
|
||||
// AudioProcessing instance that is used during the simulation. Note that when
|
||||
// the audio_processing object is specified all functionality that relies on
|
||||
// using the AudioProcessingBuilder is deactivated, since the AudioProcessing
|
||||
// object is already created and the builder is not used in the simulation. It
|
||||
// is needed to pass the command line flags as |argc| and |argv|, so these can
|
||||
// is needed to pass the command line flags as `argc` and `argv`, so these can
|
||||
// be interpreted properly by the utility. To see a list of all supported
|
||||
// command line flags, run the executable with the '--help' flag.
|
||||
int AudioprocFloat(rtc::scoped_refptr<AudioProcessing> audio_processing,
|
||||
|
@ -38,10 +38,10 @@ int AudioprocFloat(rtc::scoped_refptr<AudioProcessing> audio_processing,
|
|||
// This is an interface for the audio processing simulation utility. This
|
||||
// utility can be used to simulate the audioprocessing module using a recording
|
||||
// (either an AEC dump or wav files), and generate the output as a wav file.
|
||||
// The |ap_builder| object will be used to create the AudioProcessing instance
|
||||
// that is used during the simulation. The |ap_builder| supports setting of
|
||||
// The `ap_builder` object will be used to create the AudioProcessing instance
|
||||
// that is used during the simulation. The `ap_builder` supports setting of
|
||||
// injectable components, which will be passed on to the created AudioProcessing
|
||||
// instance. It is needed to pass the command line flags as |argc| and |argv|,
|
||||
// instance. It is needed to pass the command line flags as `argc` and `argv`,
|
||||
// so these can be interpreted properly by the utility.
|
||||
// To get a fully-working audioproc_f utility, all that is needed is to write a
|
||||
// main function, create an AudioProcessingBuilder, optionally set custom
|
||||
|
@ -56,9 +56,9 @@ int AudioprocFloat(std::unique_ptr<AudioProcessingBuilder> ap_builder,
|
|||
// Interface for the audio processing simulation utility, which is similar to
|
||||
// the one above, but which adds the option of receiving the input as a string
|
||||
// and returning the output as an array. The first three arguments fulfill the
|
||||
// same purpose as above. Pass the |input_aecdump| to provide the content of an
|
||||
// same purpose as above. Pass the `input_aecdump` to provide the content of an
|
||||
// AEC dump file as a string. After the simulation is completed,
|
||||
// |processed_capture_samples| will contain the the samples processed on the
|
||||
// `processed_capture_samples` will contain the the samples processed on the
|
||||
// capture side.
|
||||
int AudioprocFloat(std::unique_ptr<AudioProcessingBuilder> ap_builder,
|
||||
int argc,
|
||||
|
|
|
@ -24,8 +24,8 @@ namespace test {
|
|||
|
||||
// Creates a frame generator that produces frames with small squares that
|
||||
// move randomly towards the lower right corner.
|
||||
// |type| has the default value FrameGeneratorInterface::OutputType::I420.
|
||||
// |num_squares| has the default value 10.
|
||||
// `type` has the default value FrameGeneratorInterface::OutputType::I420.
|
||||
// `num_squares` has the default value 10.
|
||||
std::unique_ptr<FrameGeneratorInterface> CreateSquareFrameGenerator(
|
||||
int width,
|
||||
int height,
|
||||
|
@ -66,7 +66,7 @@ CreateScrollingInputFromYuvFilesFrameGenerator(
|
|||
|
||||
// Creates a frame generator that produces randomly generated slides. It fills
|
||||
// the frames with randomly sized and colored squares.
|
||||
// |frame_repeat_count| determines how many times each slide is shown.
|
||||
// `frame_repeat_count` determines how many times each slide is shown.
|
||||
std::unique_ptr<FrameGeneratorInterface>
|
||||
CreateSlideFrameGenerator(int width, int height, int frame_repeat_count);
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ void ValidateScreenShareConfig(const VideoConfig& video_config,
|
|||
const ScreenShareConfig& screen_share_config) {
|
||||
if (screen_share_config.slides_yuv_file_names.empty()) {
|
||||
if (screen_share_config.scrolling_params) {
|
||||
// If we have scrolling params, then its |source_width| and |source_heigh|
|
||||
// If we have scrolling params, then its `source_width` and `source_heigh`
|
||||
// will be used as width and height of video input, so we have to validate
|
||||
// it against width and height of default input.
|
||||
RTC_CHECK_EQ(screen_share_config.scrolling_params->source_width,
|
||||
|
|
|
@ -21,7 +21,7 @@ namespace webrtc {
|
|||
namespace webrtc_pc_e2e {
|
||||
|
||||
// Creates a frame generator that produces frames with small squares that move
|
||||
// randomly towards the lower right corner. |type| has the default value
|
||||
// randomly towards the lower right corner. `type` has the default value
|
||||
// FrameGeneratorInterface::OutputType::I420. video_config specifies frame
|
||||
// weight and height.
|
||||
std::unique_ptr<test::FrameGeneratorInterface> CreateSquareFrameGenerator(
|
||||
|
|
|
@ -25,10 +25,10 @@ namespace webrtc_pc_e2e {
|
|||
|
||||
// Create test fixture to establish test call between Alice and Bob.
|
||||
// During the test Alice will be caller and Bob will answer the call.
|
||||
// |test_case_name| is a name of test case, that will be used for all metrics
|
||||
// `test_case_name` is a name of test case, that will be used for all metrics
|
||||
// reporting.
|
||||
// |time_controller| is used to manage all rtc::Thread's and TaskQueue
|
||||
// instances. Instance of |time_controller| have to outlive created fixture.
|
||||
// `time_controller` is used to manage all rtc::Thread's and TaskQueue
|
||||
// instances. Instance of `time_controller` have to outlive created fixture.
|
||||
// Returns a non-null PeerConnectionE2EQualityTestFixture instance.
|
||||
std::unique_ptr<PeerConnectionE2EQualityTestFixture>
|
||||
CreatePeerConnectionE2EQualityTestFixture(
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
namespace webrtc {
|
||||
|
||||
// Creates a time coltroller that wraps |alarm|.
|
||||
// Creates a time coltroller that wraps `alarm`.
|
||||
std::unique_ptr<TimeController> CreateTimeController(
|
||||
ControlledAlarmClock* alarm);
|
||||
|
||||
|
|
|
@ -27,12 +27,12 @@ class CrossTrafficRoute {
|
|||
public:
|
||||
virtual ~CrossTrafficRoute() = default;
|
||||
|
||||
// Triggers sending of dummy packets with size |packet_size| bytes.
|
||||
// Triggers sending of dummy packets with size `packet_size` bytes.
|
||||
virtual void TriggerPacketBurst(size_t num_packets, size_t packet_size) = 0;
|
||||
// Sends a packet over the nodes. The content of the packet is unspecified;
|
||||
// only the size metter for the emulation purposes.
|
||||
virtual void SendPacket(size_t packet_size) = 0;
|
||||
// Sends a packet over the nodes and runs |action| when it has been delivered.
|
||||
// Sends a packet over the nodes and runs `action` when it has been delivered.
|
||||
virtual void NetworkDelayedAction(size_t packet_size,
|
||||
std::function<void()> action) = 0;
|
||||
};
|
||||
|
|
|
@ -204,9 +204,9 @@ class EmulatedNetworkStats {
|
|||
class EmulatedEndpoint : public EmulatedNetworkReceiverInterface {
|
||||
public:
|
||||
// Send packet into network.
|
||||
// |from| will be used to set source address for the packet in destination
|
||||
// `from` will be used to set source address for the packet in destination
|
||||
// socket.
|
||||
// |to| will be used for routing verification and picking right socket by port
|
||||
// `to` will be used for routing verification and picking right socket by port
|
||||
// on destination endpoint.
|
||||
virtual void SendPacket(const rtc::SocketAddress& from,
|
||||
const rtc::SocketAddress& to,
|
||||
|
@ -214,12 +214,12 @@ class EmulatedEndpoint : public EmulatedNetworkReceiverInterface {
|
|||
uint16_t application_overhead = 0) = 0;
|
||||
|
||||
// Binds receiver to this endpoint to send and receive data.
|
||||
// |desired_port| is a port that should be used. If it is equal to 0,
|
||||
// `desired_port` is a port that should be used. If it is equal to 0,
|
||||
// endpoint will pick the first available port starting from
|
||||
// |kFirstEphemeralPort|.
|
||||
// `kFirstEphemeralPort`.
|
||||
//
|
||||
// Returns the port, that should be used (it will be equals to desired, if
|
||||
// |desired_port| != 0 and is free or will be the one, selected by endpoint)
|
||||
// `desired_port` != 0 and is free or will be the one, selected by endpoint)
|
||||
// or absl::nullopt if desired_port in used. Also fails if there are no more
|
||||
// free ports to bind to.
|
||||
//
|
||||
|
@ -256,7 +256,7 @@ class EmulatedEndpoint : public EmulatedNetworkReceiverInterface {
|
|||
// they are guranteed to be delivered eventually, even on lossy networks.
|
||||
class TcpMessageRoute {
|
||||
public:
|
||||
// Sends a TCP message of the given |size| over the route, |on_received| is
|
||||
// Sends a TCP message of the given `size` over the route, `on_received` is
|
||||
// called when the message has been delivered. Note that the connection
|
||||
// parameters are reset iff there's no currently pending message on the route.
|
||||
virtual void SendMessage(size_t size, std::function<void()> on_received) = 0;
|
||||
|
|
|
@ -130,7 +130,7 @@ class EmulatedNetworkManagerInterface {
|
|||
virtual std::vector<EmulatedEndpoint*> endpoints() const = 0;
|
||||
|
||||
// Passes summarized network stats for endpoints for this manager into
|
||||
// specified |stats_callback|. Callback will be executed on network emulation
|
||||
// specified `stats_callback`. Callback will be executed on network emulation
|
||||
// internal task queue.
|
||||
virtual void GetStats(
|
||||
std::function<void(std::unique_ptr<EmulatedNetworkStats>)> stats_callback)
|
||||
|
@ -180,13 +180,13 @@ class NetworkEmulationManager {
|
|||
|
||||
// Creates an emulated network node, which represents single network in
|
||||
// the emulated network layer. Uses default implementation on network behavior
|
||||
// which can be configured with |config|. |random_seed| can be provided to
|
||||
// which can be configured with `config`. `random_seed` can be provided to
|
||||
// alter randomization behavior.
|
||||
virtual EmulatedNetworkNode* CreateEmulatedNode(
|
||||
BuiltInNetworkBehaviorConfig config,
|
||||
uint64_t random_seed = 1) = 0;
|
||||
// Creates an emulated network node, which represents single network in
|
||||
// the emulated network layer. |network_behavior| determines how created node
|
||||
// the emulated network layer. `network_behavior` determines how created node
|
||||
// will forward incoming packets to the next receiver.
|
||||
virtual EmulatedNetworkNode* CreateEmulatedNode(
|
||||
std::unique_ptr<NetworkBehaviorInterface> network_behavior) = 0;
|
||||
|
@ -205,8 +205,8 @@ class NetworkEmulationManager {
|
|||
|
||||
// Creates a route between endpoints going through specified network nodes.
|
||||
// This route is single direction only and describe how traffic that was
|
||||
// sent by network interface |from| have to be delivered to the network
|
||||
// interface |to|. Return object can be used to remove created route. The
|
||||
// sent by network interface `from` have to be delivered to the network
|
||||
// interface `to`. Return object can be used to remove created route. The
|
||||
// route must contains at least one network node inside it.
|
||||
//
|
||||
// Assume that E{0-9} are endpoints and N{0-9} are network nodes, then
|
||||
|
@ -228,7 +228,7 @@ class NetworkEmulationManager {
|
|||
const std::vector<EmulatedNetworkNode*>& via_nodes,
|
||||
EmulatedEndpoint* to) = 0;
|
||||
|
||||
// Creates a route over the given |via_nodes| creating the required endpoints
|
||||
// Creates a route over the given `via_nodes` creating the required endpoints
|
||||
// in the process. The returned EmulatedRoute pointer can be used in other
|
||||
// calls as a transport route for message or cross traffic.
|
||||
virtual EmulatedRoute* CreateRoute(
|
||||
|
@ -239,7 +239,7 @@ class NetworkEmulationManager {
|
|||
// packet's destination IP.
|
||||
//
|
||||
// This route is single direction only and describe how traffic that was
|
||||
// sent by network interface |from| have to be delivered in case if routing
|
||||
// sent by network interface `from` have to be delivered in case if routing
|
||||
// was unspecified. Return object can be used to remove created route. The
|
||||
// route must contains at least one network node inside it.
|
||||
//
|
||||
|
@ -269,29 +269,29 @@ class NetworkEmulationManager {
|
|||
// packets being dropped.
|
||||
virtual void ClearRoute(EmulatedRoute* route) = 0;
|
||||
|
||||
// Creates a simulated TCP connection using |send_route| for traffic and
|
||||
// |ret_route| for feedback. This can be used to emulate HTTP cross traffic
|
||||
// Creates a simulated TCP connection using `send_route` for traffic and
|
||||
// `ret_route` for feedback. This can be used to emulate HTTP cross traffic
|
||||
// and to implement realistic reliable signaling over lossy networks.
|
||||
// TODO(srte): Handle clearing of the routes involved.
|
||||
virtual TcpMessageRoute* CreateTcpRoute(EmulatedRoute* send_route,
|
||||
EmulatedRoute* ret_route) = 0;
|
||||
|
||||
// Creates a route over the given |via_nodes|. Returns an object that can be
|
||||
// Creates a route over the given `via_nodes`. Returns an object that can be
|
||||
// used to emulate network load with cross traffic over the created route.
|
||||
virtual CrossTrafficRoute* CreateCrossTrafficRoute(
|
||||
const std::vector<EmulatedNetworkNode*>& via_nodes) = 0;
|
||||
|
||||
// Starts generating cross traffic using given |generator|. Takes ownership
|
||||
// Starts generating cross traffic using given `generator`. Takes ownership
|
||||
// over the generator.
|
||||
virtual CrossTrafficGenerator* StartCrossTraffic(
|
||||
std::unique_ptr<CrossTrafficGenerator> generator) = 0;
|
||||
|
||||
// Stops generating cross traffic that was started using given |generator|.
|
||||
// The |generator| shouldn't be used after and the reference may be invalid.
|
||||
// Stops generating cross traffic that was started using given `generator`.
|
||||
// The `generator` shouldn't be used after and the reference may be invalid.
|
||||
virtual void StopCrossTraffic(CrossTrafficGenerator* generator) = 0;
|
||||
|
||||
// Creates EmulatedNetworkManagerInterface which can be used then to inject
|
||||
// network emulation layer into PeerConnection. |endpoints| - are available
|
||||
// network emulation layer into PeerConnection. `endpoints` - are available
|
||||
// network interfaces for PeerConnection. If endpoint is enabled, it will be
|
||||
// immediately available for PeerConnection, otherwise user will be able to
|
||||
// enable endpoint later to make it available for PeerConnection.
|
||||
|
@ -299,8 +299,8 @@ class NetworkEmulationManager {
|
|||
CreateEmulatedNetworkManagerInterface(
|
||||
const std::vector<EmulatedEndpoint*>& endpoints) = 0;
|
||||
|
||||
// Passes summarized network stats for specified |endpoints| into specified
|
||||
// |stats_callback|. Callback will be executed on network emulation
|
||||
// Passes summarized network stats for specified `endpoints` into specified
|
||||
// `stats_callback`. Callback will be executed on network emulation
|
||||
// internal task queue.
|
||||
virtual void GetStats(
|
||||
rtc::ArrayView<EmulatedEndpoint* const> endpoints,
|
||||
|
|
|
@ -67,17 +67,17 @@ class PeerConnectionE2EQualityTestFixture {
|
|||
// bottom right corner of the picture.
|
||||
//
|
||||
// In such case source dimensions must be greater or equal to the sliding
|
||||
// window dimensions. So |source_width| and |source_height| are the dimensions
|
||||
// of the source frame, while |VideoConfig::width| and |VideoConfig::height|
|
||||
// window dimensions. So `source_width` and `source_height` are the dimensions
|
||||
// of the source frame, while `VideoConfig::width` and `VideoConfig::height`
|
||||
// are the dimensions of the sliding window.
|
||||
//
|
||||
// Because |source_width| and |source_height| are dimensions of the source
|
||||
// Because `source_width` and `source_height` are dimensions of the source
|
||||
// frame, they have to be width and height of videos from
|
||||
// |ScreenShareConfig::slides_yuv_file_names|.
|
||||
// `ScreenShareConfig::slides_yuv_file_names`.
|
||||
//
|
||||
// Because scrolling have to be done on single slide it also requires, that
|
||||
// |duration| must be less or equal to
|
||||
// |ScreenShareConfig::slide_change_interval|.
|
||||
// `duration` must be less or equal to
|
||||
// `ScreenShareConfig::slide_change_interval`.
|
||||
struct ScrollingParams {
|
||||
ScrollingParams(TimeDelta duration,
|
||||
size_t source_width,
|
||||
|
@ -110,16 +110,16 @@ class PeerConnectionE2EQualityTestFixture {
|
|||
// will be applied in such case.
|
||||
bool generate_slides = false;
|
||||
// If present scrolling will be applied. Please read extra requirement on
|
||||
// |slides_yuv_file_names| for scrolling.
|
||||
// `slides_yuv_file_names` for scrolling.
|
||||
absl::optional<ScrollingParams> scrolling_params;
|
||||
// Contains list of yuv files with slides.
|
||||
//
|
||||
// If empty, default set of slides will be used. In such case
|
||||
// |VideoConfig::width| must be equal to |kDefaultSlidesWidth| and
|
||||
// |VideoConfig::height| must be equal to |kDefaultSlidesHeight| or if
|
||||
// |scrolling_params| are specified, then |ScrollingParams::source_width|
|
||||
// must be equal to |kDefaultSlidesWidth| and
|
||||
// |ScrollingParams::source_height| must be equal to |kDefaultSlidesHeight|.
|
||||
// `VideoConfig::width` must be equal to `kDefaultSlidesWidth` and
|
||||
// `VideoConfig::height` must be equal to `kDefaultSlidesHeight` or if
|
||||
// `scrolling_params` are specified, then `ScrollingParams::source_width`
|
||||
// must be equal to `kDefaultSlidesWidth` and
|
||||
// `ScrollingParams::source_height` must be equal to `kDefaultSlidesHeight`.
|
||||
std::vector<std::string> slides_yuv_file_names;
|
||||
};
|
||||
|
||||
|
@ -128,7 +128,7 @@ class PeerConnectionE2EQualityTestFixture {
|
|||
// SVC support is limited:
|
||||
// During SVC testing there is no SFU, so framework will try to emulate SFU
|
||||
// behavior in regular p2p call. Because of it there are such limitations:
|
||||
// * if |target_spatial_index| is not equal to the highest spatial layer
|
||||
// * if `target_spatial_index` is not equal to the highest spatial layer
|
||||
// then no packet/frame drops are allowed.
|
||||
//
|
||||
// If there will be any drops, that will affect requested layer, then
|
||||
|
@ -154,11 +154,11 @@ class PeerConnectionE2EQualityTestFixture {
|
|||
// Specifies spatial index of the video stream to analyze.
|
||||
// There are 2 cases:
|
||||
// 1. simulcast encoder is used:
|
||||
// in such case |target_spatial_index| will specify the index of
|
||||
// in such case `target_spatial_index` will specify the index of
|
||||
// simulcast stream, that should be analyzed. Other streams will be
|
||||
// dropped.
|
||||
// 2. SVC encoder is used:
|
||||
// in such case |target_spatial_index| will specify the top interesting
|
||||
// in such case `target_spatial_index` will specify the top interesting
|
||||
// spatial layer and all layers below, including target one will be
|
||||
// processed. All layers above target one will be dropped.
|
||||
// If not specified than whatever stream will be received will be analyzed.
|
||||
|
@ -166,8 +166,8 @@ class PeerConnectionE2EQualityTestFixture {
|
|||
// network.
|
||||
absl::optional<int> target_spatial_index;
|
||||
|
||||
// Encoding parameters per simulcast layer. If not empty, |encoding_params|
|
||||
// size have to be equal to |simulcast_streams_count|. Will be used to set
|
||||
// Encoding parameters per simulcast layer. If not empty, `encoding_params`
|
||||
// size have to be equal to `simulcast_streams_count`. Will be used to set
|
||||
// transceiver send encoding params for simulcast layers. Applicable only
|
||||
// for codecs that support simulcast (ex. Vp8) and will be ignored
|
||||
// otherwise. RtpEncodingParameters::rid may be changed by fixture
|
||||
|
@ -220,7 +220,7 @@ class PeerConnectionE2EQualityTestFixture {
|
|||
// was captured during the test for this video stream on sender side.
|
||||
// It is useful when generator is used as input.
|
||||
absl::optional<std::string> input_dump_file_name;
|
||||
// Used only if |input_dump_file_name| is set. Specifies the module for the
|
||||
// Used only if `input_dump_file_name` is set. Specifies the module for the
|
||||
// video frames to be dumped. Modulo equals X means every Xth frame will be
|
||||
// written to the dump file. The value must be greater than 0.
|
||||
int input_dump_sampling_modulo = 1;
|
||||
|
@ -229,7 +229,7 @@ class PeerConnectionE2EQualityTestFixture {
|
|||
// output files will be appended with indexes. The produced files contains
|
||||
// what was rendered for this video stream on receiver side.
|
||||
absl::optional<std::string> output_dump_file_name;
|
||||
// Used only if |output_dump_file_name| is set. Specifies the module for the
|
||||
// Used only if `output_dump_file_name` is set. Specifies the module for the
|
||||
// video frames to be dumped. Modulo equals X means every Xth frame will be
|
||||
// written to the dump file. The value must be greater than 0.
|
||||
int output_dump_sampling_modulo = 1;
|
||||
|
@ -282,9 +282,9 @@ class PeerConnectionE2EQualityTestFixture {
|
|||
std::string name = cricket::kVp8CodecName;
|
||||
// Map of parameters, that have to be specified on SDP codec. Each parameter
|
||||
// is described by key and value. Codec parameters will match the specified
|
||||
// map if and only if for each key from |required_params| there will be
|
||||
// map if and only if for each key from `required_params` there will be
|
||||
// a parameter with name equal to this key and parameter value will be equal
|
||||
// to the value from |required_params| for this key.
|
||||
// to the value from `required_params` for this key.
|
||||
// If empty then only name will be used to match the codec.
|
||||
std::map<std::string, std::string> required_params;
|
||||
};
|
||||
|
@ -351,7 +351,7 @@ class PeerConnectionE2EQualityTestFixture {
|
|||
CapturingDeviceIndex capturing_device_index) = 0;
|
||||
// Set the list of video codecs used by the peer during the test. These
|
||||
// codecs will be negotiated in SDP during offer/answer exchange. The order
|
||||
// of these codecs during negotiation will be the same as in |video_codecs|.
|
||||
// of these codecs during negotiation will be the same as in `video_codecs`.
|
||||
// Codecs have to be available in codecs list provided by peer connection to
|
||||
// be negotiated. If some of specified codecs won't be found, the test will
|
||||
// crash.
|
||||
|
@ -416,9 +416,9 @@ class PeerConnectionE2EQualityTestFixture {
|
|||
|
||||
// Invoked by framework after peer connection factory and peer connection
|
||||
// itself will be created but before offer/answer exchange will be started.
|
||||
// |test_case_name| is name of test case, that should be used to report all
|
||||
// `test_case_name` is name of test case, that should be used to report all
|
||||
// metrics.
|
||||
// |reporter_helper| is a pointer to a class that will allow track_id to
|
||||
// `reporter_helper` is a pointer to a class that will allow track_id to
|
||||
// stream_id matching. The caller is responsible for ensuring the
|
||||
// TrackIdStreamInfoMap will be valid from Start() to
|
||||
// StopAndReportResults().
|
||||
|
@ -433,14 +433,14 @@ class PeerConnectionE2EQualityTestFixture {
|
|||
virtual ~PeerConnectionE2EQualityTestFixture() = default;
|
||||
|
||||
// Add activity that will be executed on the best effort at least after
|
||||
// |target_time_since_start| after call will be set up (after offer/answer
|
||||
// `target_time_since_start` after call will be set up (after offer/answer
|
||||
// exchange, ICE gathering will be done and ICE candidates will passed to
|
||||
// remote side). |func| param is amount of time spent from the call set up.
|
||||
// remote side). `func` param is amount of time spent from the call set up.
|
||||
virtual void ExecuteAt(TimeDelta target_time_since_start,
|
||||
std::function<void(TimeDelta)> func) = 0;
|
||||
// Add activity that will be executed every |interval| with first execution
|
||||
// on the best effort at least after |initial_delay_since_start| after call
|
||||
// will be set up (after all participants will be connected). |func| param is
|
||||
// Add activity that will be executed every `interval` with first execution
|
||||
// on the best effort at least after `initial_delay_since_start` after call
|
||||
// will be set up (after all participants will be connected). `func` param is
|
||||
// amount of time spent from the call set up.
|
||||
virtual void ExecuteEvery(TimeDelta initial_delay_since_start,
|
||||
TimeDelta interval,
|
||||
|
@ -452,15 +452,15 @@ class PeerConnectionE2EQualityTestFixture {
|
|||
|
||||
// Add a new peer to the call and return an object through which caller
|
||||
// can configure peer's behavior.
|
||||
// |network_thread| will be used as network thread for peer's peer connection
|
||||
// |network_manager| will be used to provide network interfaces for peer's
|
||||
// `network_thread` will be used as network thread for peer's peer connection
|
||||
// `network_manager` will be used to provide network interfaces for peer's
|
||||
// peer connection.
|
||||
// |configurer| function will be used to configure peer in the call.
|
||||
// `configurer` function will be used to configure peer in the call.
|
||||
virtual void AddPeer(rtc::Thread* network_thread,
|
||||
rtc::NetworkManager* network_manager,
|
||||
rtc::FunctionView<void(PeerConfigurer*)> configurer) = 0;
|
||||
// Runs the media quality test, which includes setting up the call with
|
||||
// configured participants, running it according to provided |run_params| and
|
||||
// configured participants, running it according to provided `run_params` and
|
||||
// terminating it properly at the end. During call duration media quality
|
||||
// metrics are gathered, which are then reported to stdout and (if configured)
|
||||
// to the json/protobuf output file through the WebRTC perf test results
|
||||
|
|
|
@ -23,7 +23,7 @@ class StatsObserverInterface {
|
|||
virtual ~StatsObserverInterface() = default;
|
||||
|
||||
// Method called when stats reports are available for the PeerConnection
|
||||
// identified by |pc_label|.
|
||||
// identified by `pc_label`.
|
||||
virtual void OnStatsReports(
|
||||
absl::string_view pc_label,
|
||||
const rtc::scoped_refptr<const RTCStatsReport>& report) = 0;
|
||||
|
|
|
@ -44,7 +44,7 @@ class TimeController {
|
|||
// Creates a process thread.
|
||||
virtual std::unique_ptr<ProcessThread> CreateProcessThread(
|
||||
const char* thread_name) = 0;
|
||||
// Creates an rtc::Thread instance. If |socket_server| is nullptr, a default
|
||||
// Creates an rtc::Thread instance. If `socket_server` is nullptr, a default
|
||||
// noop socket server is created.
|
||||
// Returned thread is not null and started.
|
||||
virtual std::unique_ptr<rtc::Thread> CreateThread(
|
||||
|
@ -55,12 +55,12 @@ class TimeController {
|
|||
// thread.
|
||||
virtual rtc::Thread* GetMainThread() = 0;
|
||||
// Allow task queues and process threads created by this instance to execute
|
||||
// for the given |duration|.
|
||||
// for the given `duration`.
|
||||
virtual void AdvanceTime(TimeDelta duration) = 0;
|
||||
|
||||
// Waits until condition() == true, polling condition() in small time
|
||||
// intervals.
|
||||
// Returns true if condition() was evaluated to true before |max_duration|
|
||||
// Returns true if condition() was evaluated to true before `max_duration`
|
||||
// elapsed and false otherwise.
|
||||
bool Wait(const std::function<bool()>& condition,
|
||||
TimeDelta max_duration = TimeDelta::Seconds(5));
|
||||
|
@ -75,17 +75,17 @@ class ControlledAlarmClock {
|
|||
// Gets a clock that tells the alarm clock's notion of time.
|
||||
virtual Clock* GetClock() = 0;
|
||||
|
||||
// Schedules the alarm to fire at |deadline|.
|
||||
// An alarm clock only supports one deadline. Calls to |ScheduleAlarmAt| with
|
||||
// Schedules the alarm to fire at `deadline`.
|
||||
// An alarm clock only supports one deadline. Calls to `ScheduleAlarmAt` with
|
||||
// an earlier deadline will reset the alarm to fire earlier.Calls to
|
||||
// |ScheduleAlarmAt| with a later deadline are ignored. Returns true if the
|
||||
// `ScheduleAlarmAt` with a later deadline are ignored. Returns true if the
|
||||
// deadline changed, false otherwise.
|
||||
virtual bool ScheduleAlarmAt(Timestamp deadline) = 0;
|
||||
|
||||
// Sets the callback that should be run when the alarm fires.
|
||||
virtual void SetCallback(std::function<void()> callback) = 0;
|
||||
|
||||
// Waits for |duration| to pass, according to the alarm clock.
|
||||
// Waits for `duration` to pass, according to the alarm clock.
|
||||
virtual void Sleep(TimeDelta duration) = 0;
|
||||
};
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
namespace webrtc {
|
||||
namespace webrtc_pc_e2e {
|
||||
|
||||
// Instances of |TrackIdStreamInfoMap| provide bookkeeping capabilities that
|
||||
// Instances of `TrackIdStreamInfoMap` provide bookkeeping capabilities that
|
||||
// are useful to associate stats reports track_ids to the remote stream info.
|
||||
class TrackIdStreamInfoMap {
|
||||
public:
|
||||
|
@ -26,12 +26,12 @@ class TrackIdStreamInfoMap {
|
|||
// StatsObserverInterface::OnStatsReports is invoked.
|
||||
|
||||
// Returns a reference to a stream label owned by the TrackIdStreamInfoMap.
|
||||
// Precondition: |track_id| must be already mapped to stream label.
|
||||
// Precondition: `track_id` must be already mapped to stream label.
|
||||
virtual absl::string_view GetStreamLabelFromTrackId(
|
||||
absl::string_view track_id) const = 0;
|
||||
|
||||
// Returns a reference to a sync group name owned by the TrackIdStreamInfoMap.
|
||||
// Precondition: |track_id| must be already mapped to sync group.
|
||||
// Precondition: `track_id` must be already mapped to sync group.
|
||||
virtual absl::string_view GetSyncGroupLabelFromTrackId(
|
||||
absl::string_view track_id) const = 0;
|
||||
};
|
||||
|
|
|
@ -72,9 +72,9 @@ class VideoQualityAnalyzerInterface : public StatsObserverInterface {
|
|||
~VideoQualityAnalyzerInterface() override = default;
|
||||
|
||||
// Will be called by framework before test.
|
||||
// |test_case_name| is name of test case, that should be used to report all
|
||||
// `test_case_name` is name of test case, that should be used to report all
|
||||
// video metrics.
|
||||
// |threads_count| is number of threads that analyzer can use for heavy
|
||||
// `threads_count` is number of threads that analyzer can use for heavy
|
||||
// calculations. Analyzer can perform simple calculations on the calling
|
||||
// thread in each method, but should remember, that it is the same thread,
|
||||
// that is used in video pipeline.
|
||||
|
@ -83,57 +83,57 @@ class VideoQualityAnalyzerInterface : public StatsObserverInterface {
|
|||
int max_threads_count) {}
|
||||
|
||||
// Will be called when frame was generated from the input stream.
|
||||
// |peer_name| is name of the peer on which side frame was captured.
|
||||
// `peer_name` is name of the peer on which side frame was captured.
|
||||
// Returns frame id, that will be set by framework to the frame.
|
||||
virtual uint16_t OnFrameCaptured(absl::string_view peer_name,
|
||||
const std::string& stream_label,
|
||||
const VideoFrame& frame) = 0;
|
||||
// Will be called before calling the encoder.
|
||||
// |peer_name| is name of the peer on which side frame came to encoder.
|
||||
// `peer_name` is name of the peer on which side frame came to encoder.
|
||||
virtual void OnFramePreEncode(absl::string_view peer_name,
|
||||
const VideoFrame& frame) {}
|
||||
// Will be called for each EncodedImage received from encoder. Single
|
||||
// VideoFrame can produce multiple EncodedImages. Each encoded image will
|
||||
// have id from VideoFrame.
|
||||
// |peer_name| is name of the peer on which side frame was encoded.
|
||||
// `peer_name` is name of the peer on which side frame was encoded.
|
||||
virtual void OnFrameEncoded(absl::string_view peer_name,
|
||||
uint16_t frame_id,
|
||||
const EncodedImage& encoded_image,
|
||||
const EncoderStats& stats) {}
|
||||
// Will be called for each frame dropped by encoder.
|
||||
// |peer_name| is name of the peer on which side frame drop was detected.
|
||||
// `peer_name` is name of the peer on which side frame drop was detected.
|
||||
virtual void OnFrameDropped(absl::string_view peer_name,
|
||||
EncodedImageCallback::DropReason reason) {}
|
||||
// Will be called before calling the decoder.
|
||||
// |peer_name| is name of the peer on which side frame was received.
|
||||
// `peer_name` is name of the peer on which side frame was received.
|
||||
virtual void OnFramePreDecode(absl::string_view peer_name,
|
||||
uint16_t frame_id,
|
||||
const EncodedImage& encoded_image) {}
|
||||
// Will be called after decoding the frame.
|
||||
// |peer_name| is name of the peer on which side frame was decoded.
|
||||
// `peer_name` is name of the peer on which side frame was decoded.
|
||||
virtual void OnFrameDecoded(absl::string_view peer_name,
|
||||
const VideoFrame& frame,
|
||||
const DecoderStats& stats) {}
|
||||
// Will be called when frame will be obtained from PeerConnection stack.
|
||||
// |peer_name| is name of the peer on which side frame was rendered.
|
||||
// `peer_name` is name of the peer on which side frame was rendered.
|
||||
virtual void OnFrameRendered(absl::string_view peer_name,
|
||||
const VideoFrame& frame) {}
|
||||
// Will be called if encoder return not WEBRTC_VIDEO_CODEC_OK.
|
||||
// All available codes are listed in
|
||||
// modules/video_coding/include/video_error_codes.h
|
||||
// |peer_name| is name of the peer on which side error acquired.
|
||||
// `peer_name` is name of the peer on which side error acquired.
|
||||
virtual void OnEncoderError(absl::string_view peer_name,
|
||||
const VideoFrame& frame,
|
||||
int32_t error_code) {}
|
||||
// Will be called if decoder return not WEBRTC_VIDEO_CODEC_OK.
|
||||
// All available codes are listed in
|
||||
// modules/video_coding/include/video_error_codes.h
|
||||
// |peer_name| is name of the peer on which side error acquired.
|
||||
// `peer_name` is name of the peer on which side error acquired.
|
||||
virtual void OnDecoderError(absl::string_view peer_name,
|
||||
uint16_t frame_id,
|
||||
int32_t error_code) {}
|
||||
// Will be called every time new stats reports are available for the
|
||||
// Peer Connection identified by |pc_label|.
|
||||
// Peer Connection identified by `pc_label`.
|
||||
void OnStatsReports(
|
||||
absl::string_view pc_label,
|
||||
const rtc::scoped_refptr<const RTCStatsReport>& report) override {}
|
||||
|
|
|
@ -98,7 +98,7 @@ class VideoQualityTestFixtureInterface {
|
|||
InterLayerPredMode inter_layer_pred = InterLayerPredMode::kOn;
|
||||
// If empty, bitrates are generated in VP9Impl automatically.
|
||||
std::vector<SpatialLayer> spatial_layers;
|
||||
// If set, default parameters will be used instead of |streams|.
|
||||
// If set, default parameters will be used instead of `streams`.
|
||||
bool infer_streams = false;
|
||||
} ss[2];
|
||||
struct Logging {
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
namespace webrtc {
|
||||
|
||||
// Configuration of send bitrate. The |start_bitrate_bps| value is
|
||||
// Configuration of send bitrate. The `start_bitrate_bps` value is
|
||||
// used for multiple purposes, both as a prior in the bandwidth
|
||||
// estimator, and for initial configuration of the encoder. We may
|
||||
// want to create separate apis for those, and use a smaller struct
|
||||
|
|
|
@ -48,14 +48,14 @@ struct SendDataParams {
|
|||
// retransmitted by the transport before it is dropped.
|
||||
// Setting this value to zero disables retransmission.
|
||||
// Valid values are in the range [0-UINT16_MAX].
|
||||
// |max_rtx_count| and |max_rtx_ms| may not be set simultaneously.
|
||||
// `max_rtx_count` and `max_rtx_ms` may not be set simultaneously.
|
||||
absl::optional<int> max_rtx_count;
|
||||
|
||||
// If set, the maximum number of milliseconds for which the transport
|
||||
// may retransmit this message before it is dropped.
|
||||
// Setting this value to zero disables retransmission.
|
||||
// Valid values are in the range [0-UINT16_MAX].
|
||||
// |max_rtx_count| and |max_rtx_ms| may not be set simultaneously.
|
||||
// `max_rtx_count` and `max_rtx_ms` may not be set simultaneously.
|
||||
absl::optional<int> max_rtx_ms;
|
||||
};
|
||||
|
||||
|
@ -96,18 +96,18 @@ class DataChannelTransportInterface {
|
|||
public:
|
||||
virtual ~DataChannelTransportInterface() = default;
|
||||
|
||||
// Opens a data |channel_id| for sending. May return an error if the
|
||||
// specified |channel_id| is unusable. Must be called before |SendData|.
|
||||
// Opens a data `channel_id` for sending. May return an error if the
|
||||
// specified `channel_id` is unusable. Must be called before `SendData`.
|
||||
virtual RTCError OpenChannel(int channel_id) = 0;
|
||||
|
||||
// Sends a data buffer to the remote endpoint using the given send parameters.
|
||||
// |buffer| may not be larger than 256 KiB. Returns an error if the send
|
||||
// `buffer` may not be larger than 256 KiB. Returns an error if the send
|
||||
// fails.
|
||||
virtual RTCError SendData(int channel_id,
|
||||
const SendDataParams& params,
|
||||
const rtc::CopyOnWriteBuffer& buffer) = 0;
|
||||
|
||||
// Closes |channel_id| gracefully. Returns an error if |channel_id| is not
|
||||
// Closes `channel_id` gracefully. Returns an error if `channel_id` is not
|
||||
// open. Data sent after the closing procedure begins will not be
|
||||
// transmitted. The channel becomes closed after pending data is transmitted.
|
||||
virtual RTCError CloseChannel(int channel_id) = 0;
|
||||
|
|
|
@ -32,7 +32,7 @@ class SctpTransportFactoryInterface {
|
|||
public:
|
||||
virtual ~SctpTransportFactoryInterface() = default;
|
||||
|
||||
// Create an SCTP transport using |channel| for the underlying transport.
|
||||
// Create an SCTP transport using `channel` for the underlying transport.
|
||||
virtual std::unique_ptr<cricket::SctpTransportInternal> CreateSctpTransport(
|
||||
rtc::PacketTransportInternal* channel) = 0;
|
||||
};
|
||||
|
|
|
@ -254,11 +254,11 @@ class StunMessage {
|
|||
// This is used for testing.
|
||||
void SetStunMagicCookie(uint32_t val);
|
||||
|
||||
// Contruct a copy of |this|.
|
||||
// Contruct a copy of `this`.
|
||||
std::unique_ptr<StunMessage> Clone() const;
|
||||
|
||||
// Check if the attributes of this StunMessage equals those of |other|
|
||||
// for all attributes that |attribute_type_mask| return true
|
||||
// Check if the attributes of this StunMessage equals those of `other`
|
||||
// for all attributes that `attribute_type_mask` return true
|
||||
bool EqualAttributes(const StunMessage* other,
|
||||
std::function<bool(int type)> attribute_type_mask) const;
|
||||
|
||||
|
@ -570,11 +570,11 @@ class StunUInt16ListAttribute : public StunAttribute {
|
|||
std::string StunMethodToString(int msg_type);
|
||||
|
||||
// Returns the (successful) response type for the given request type.
|
||||
// Returns -1 if |request_type| is not a valid request type.
|
||||
// Returns -1 if `request_type` is not a valid request type.
|
||||
int GetStunSuccessResponseType(int request_type);
|
||||
|
||||
// Returns the error response type for the given request type.
|
||||
// Returns -1 if |request_type| is not a valid request type.
|
||||
// Returns -1 if `request_type` is not a valid request type.
|
||||
int GetStunErrorResponseType(int request_type);
|
||||
|
||||
// Returns whether a given message is a request type.
|
||||
|
@ -595,13 +595,13 @@ bool ComputeStunCredentialHash(const std::string& username,
|
|||
const std::string& password,
|
||||
std::string* hash);
|
||||
|
||||
// Make a copy af |attribute| and return a new StunAttribute.
|
||||
// Make a copy af `attribute` and return a new StunAttribute.
|
||||
// This is useful if you don't care about what kind of attribute you
|
||||
// are handling.
|
||||
//
|
||||
// The implementation copies by calling Write() followed by Read().
|
||||
//
|
||||
// If |tmp_buffer| is supplied this buffer will be used, otherwise
|
||||
// If `tmp_buffer` is supplied this buffer will be used, otherwise
|
||||
// a buffer will created in the method.
|
||||
std::unique_ptr<StunAttribute> CopyStunAttribute(
|
||||
const StunAttribute& attribute,
|
||||
|
|
|
@ -29,7 +29,7 @@ class TurnCustomizer {
|
|||
cricket::StunMessage* message) = 0;
|
||||
|
||||
// TURN can send data using channel data messages or Send indication.
|
||||
// This method should return false if |data| should be sent using
|
||||
// This method should return false if `data` should be sent using
|
||||
// a Send indication instead of a ChannelData message, even if a
|
||||
// channel is bound.
|
||||
virtual bool AllowChannelData(cricket::PortInterface* port,
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
// Try to convert |enum_value| into the enum class T. |enum_bitmask| is created
|
||||
// Try to convert `enum_value` into the enum class T. `enum_bitmask` is created
|
||||
// by the funciton below. Returns true if conversion was successful, false
|
||||
// otherwise.
|
||||
template <typename T>
|
||||
|
@ -43,7 +43,7 @@ constexpr int MakeMask(const int index, const int length, T (&values)[N]) {
|
|||
}
|
||||
|
||||
// Create a bitmask where each bit corresponds to one potential enum value.
|
||||
// |values| should be an array listing all possible enum values. The bit is set
|
||||
// `values` should be an array listing all possible enum values. The bit is set
|
||||
// to one if the corresponding enum exists. Only works for enums with values
|
||||
// less than 64.
|
||||
template <typename T, size_t N>
|
||||
|
|
|
@ -46,7 +46,7 @@ class EncodedFrame : public webrtc::VCMEncodedFrame {
|
|||
int64_t Id() const { return id_; }
|
||||
|
||||
// TODO(philipel): Add simple modify/access functions to prevent adding too
|
||||
// many |references|.
|
||||
// many `references`.
|
||||
size_t num_references = 0;
|
||||
int64_t references[kMaxFrameReferences];
|
||||
// Is this subframe the last one in the superframe (In RTP stream that would
|
||||
|
|
|
@ -98,7 +98,7 @@ class RTC_EXPORT EncodedImage {
|
|||
}
|
||||
|
||||
// These methods can be used to set/get size of subframe with spatial index
|
||||
// |spatial_index| on encoded frames that consist of multiple spatial layers.
|
||||
// `spatial_index` on encoded frames that consist of multiple spatial layers.
|
||||
absl::optional<size_t> SpatialLayerFrameSize(int spatial_index) const;
|
||||
void SetSpatialLayerFrameSize(int spatial_index, size_t size_bytes);
|
||||
|
||||
|
@ -195,7 +195,7 @@ class RTC_EXPORT EncodedImage {
|
|||
// carries the webrtc::VideoFrame id field from the sender to the receiver.
|
||||
absl::optional<uint16_t> video_frame_tracking_id_;
|
||||
// Information about packets used to assemble this video frame. This is needed
|
||||
// by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's
|
||||
// by `SourceTracker` when the frame is delivered to the RTCRtpReceiver's
|
||||
// MediaStreamTrack, in order to implement getContributingSources(). See:
|
||||
// https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
|
||||
RtpPacketInfos packet_infos_;
|
||||
|
|
|
@ -34,7 +34,7 @@ class I010Buffer : public I010BufferInterface {
|
|||
// Convert and put I420 buffer into a new buffer.
|
||||
static rtc::scoped_refptr<I010Buffer> Copy(const I420BufferInterface& buffer);
|
||||
|
||||
// Return a rotated copy of |src|.
|
||||
// Return a rotated copy of `src`.
|
||||
static rtc::scoped_refptr<I010Buffer> Rotate(const I010BufferInterface& src,
|
||||
VideoRotation rotation);
|
||||
|
||||
|
@ -55,15 +55,15 @@ class I010Buffer : public I010BufferInterface {
|
|||
uint16_t* MutableDataU();
|
||||
uint16_t* MutableDataV();
|
||||
|
||||
// Scale the cropped area of |src| to the size of |this| buffer, and
|
||||
// write the result into |this|.
|
||||
// Scale the cropped area of `src` to the size of `this` buffer, and
|
||||
// write the result into `this`.
|
||||
void CropAndScaleFrom(const I010BufferInterface& src,
|
||||
int offset_x,
|
||||
int offset_y,
|
||||
int crop_width,
|
||||
int crop_height);
|
||||
|
||||
// Scale all of |src| to the size of |this| buffer, with no cropping.
|
||||
// Scale all of `src` to the size of `this` buffer, with no cropping.
|
||||
void ScaleFrom(const I010BufferInterface& src);
|
||||
|
||||
// Pastes whole picture to canvas at (offset_row, offset_col).
|
||||
|
|
|
@ -49,7 +49,7 @@ class RTC_EXPORT I420Buffer : public I420BufferInterface {
|
|||
const uint8_t* data_v,
|
||||
int stride_v);
|
||||
|
||||
// Returns a rotated copy of |src|.
|
||||
// Returns a rotated copy of `src`.
|
||||
static rtc::scoped_refptr<I420Buffer> Rotate(const I420BufferInterface& src,
|
||||
VideoRotation rotation);
|
||||
// Deprecated.
|
||||
|
@ -83,8 +83,8 @@ class RTC_EXPORT I420Buffer : public I420BufferInterface {
|
|||
uint8_t* MutableDataU();
|
||||
uint8_t* MutableDataV();
|
||||
|
||||
// Scale the cropped area of |src| to the size of |this| buffer, and
|
||||
// write the result into |this|.
|
||||
// Scale the cropped area of `src` to the size of `this` buffer, and
|
||||
// write the result into `this`.
|
||||
void CropAndScaleFrom(const I420BufferInterface& src,
|
||||
int offset_x,
|
||||
int offset_y,
|
||||
|
@ -95,7 +95,7 @@ class RTC_EXPORT I420Buffer : public I420BufferInterface {
|
|||
// aspect ratio without distorting the image.
|
||||
void CropAndScaleFrom(const I420BufferInterface& src);
|
||||
|
||||
// Scale all of |src| to the size of |this| buffer, with no cropping.
|
||||
// Scale all of `src` to the size of `this` buffer, with no cropping.
|
||||
void ScaleFrom(const I420BufferInterface& src);
|
||||
|
||||
// Pastes whole picture to canvas at (offset_row, offset_col).
|
||||
|
|
|
@ -56,8 +56,8 @@ class RTC_EXPORT NV12Buffer : public NV12BufferInterface {
|
|||
// are resolved in a better way. Or in the mean time, use SetBlack.
|
||||
void InitializeData();
|
||||
|
||||
// Scale the cropped area of |src| to the size of |this| buffer, and
|
||||
// write the result into |this|.
|
||||
// Scale the cropped area of `src` to the size of `this` buffer, and
|
||||
// write the result into `this`.
|
||||
void CropAndScaleFrom(const NV12BufferInterface& src,
|
||||
int offset_x,
|
||||
int offset_y,
|
||||
|
|
|
@ -50,8 +50,8 @@ class RTC_EXPORT VideoBitrateAllocation {
|
|||
// Get the sum of all the temporal layer for a specific spatial layer.
|
||||
uint32_t GetSpatialLayerSum(size_t spatial_index) const;
|
||||
|
||||
// Sum of bitrates of temporal layers, from layer 0 to |temporal_index|
|
||||
// inclusive, of specified spatial layer |spatial_index|. Bitrates of lower
|
||||
// Sum of bitrates of temporal layers, from layer 0 to `temporal_index`
|
||||
// inclusive, of specified spatial layer `spatial_index`. Bitrates of lower
|
||||
// spatial layers are not included.
|
||||
uint32_t GetTemporalLayerSum(size_t spatial_index,
|
||||
size_t temporal_index) const;
|
||||
|
|
|
@ -272,7 +272,7 @@ class RTC_EXPORT VideoFrame {
|
|||
// update_rect() will return a rectangle corresponding to the entire frame.
|
||||
absl::optional<UpdateRect> update_rect_;
|
||||
// Information about packets used to assemble this video frame. This is needed
|
||||
// by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's
|
||||
// by `SourceTracker` when the frame is delivered to the RTCRtpReceiver's
|
||||
// MediaStreamTrack, in order to implement getContributingSources(). See:
|
||||
// https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
|
||||
RtpPacketInfos packet_infos_;
|
||||
|
|
|
@ -84,8 +84,8 @@ class RTC_EXPORT VideoFrameBuffer : public rtc::RefCountInterface {
|
|||
// A format specific scale function. Default implementation works by
|
||||
// converting to I420. But more efficient implementations may override it,
|
||||
// especially for kNative.
|
||||
// First, the image is cropped to |crop_width| and |crop_height| and then
|
||||
// scaled to |scaled_width| and |scaled_height|.
|
||||
// First, the image is cropped to `crop_width` and `crop_height` and then
|
||||
// scaled to `scaled_width` and `scaled_height`.
|
||||
virtual rtc::scoped_refptr<VideoFrameBuffer> CropAndScale(int offset_x,
|
||||
int offset_y,
|
||||
int crop_width,
|
||||
|
|
|
@ -54,7 +54,7 @@ struct RTC_EXPORT VideoSinkWants {
|
|||
int max_framerate_fps = std::numeric_limits<int>::max();
|
||||
|
||||
// Tells the source that the sink wants width and height of the video frames
|
||||
// to be divisible by |resolution_alignment|.
|
||||
// to be divisible by `resolution_alignment`.
|
||||
// For example: With I420, this value would be a multiple of 2.
|
||||
// Note that this field is unrelated to any horizontal or vertical stride
|
||||
// requirements the encoder has on the incoming video frame buffers.
|
||||
|
@ -71,13 +71,13 @@ struct RTC_EXPORT VideoSinkWants {
|
|||
// to scaleResolutionDownBy or turning off simulcast or SVC layers.
|
||||
//
|
||||
// For example, we may capture at 720p and due to adaptation (e.g. applying
|
||||
// |max_pixel_count| constraints) create webrtc::VideoFrames of size 480p, but
|
||||
// `max_pixel_count` constraints) create webrtc::VideoFrames of size 480p, but
|
||||
// if we do scaleResolutionDownBy:2 then the only resolution we end up
|
||||
// encoding is 240p. In this case we still need to provide webrtc::VideoFrames
|
||||
// of size 480p but we can optimize internal buffers for 240p, avoiding
|
||||
// downsampling to 480p if possible.
|
||||
//
|
||||
// Note that the |resolutions| can change while frames are in flight and
|
||||
// Note that the `resolutions` can change while frames are in flight and
|
||||
// should only be used as a hint when constructing the webrtc::VideoFrame.
|
||||
std::vector<FrameSize> resolutions;
|
||||
};
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include "api/video_codecs/sdp_video_format.h"
|
||||
|
||||
namespace webrtc {
|
||||
// The |decoder_settings| parameter is a map between:
|
||||
// The `decoder_settings` parameter is a map between:
|
||||
// <payload type> --> <<video format>, <number of cores>>.
|
||||
// The video format is used when instantiating a decoder, and
|
||||
// the number of cores is used when initializing the decoder.
|
||||
|
|
|
@ -68,9 +68,9 @@ class VideoStreamEncoderInterface : public rtc::VideoSinkInterface<VideoFrame> {
|
|||
GetAdaptationResources() = 0;
|
||||
|
||||
// Sets the source that will provide video frames to the VideoStreamEncoder's
|
||||
// OnFrame method. |degradation_preference| control whether or not resolution
|
||||
// OnFrame method. `degradation_preference` control whether or not resolution
|
||||
// or frame rate may be reduced. The VideoStreamEncoder registers itself with
|
||||
// |source|, and signals adaptation decisions to the source in the form of
|
||||
// `source`, and signals adaptation decisions to the source in the form of
|
||||
// VideoSinkWants.
|
||||
// TODO(nisse): When adaptation logic is extracted from this class,
|
||||
// it no longer needs to know the source.
|
||||
|
@ -78,8 +78,8 @@ class VideoStreamEncoderInterface : public rtc::VideoSinkInterface<VideoFrame> {
|
|||
rtc::VideoSourceInterface<VideoFrame>* source,
|
||||
const DegradationPreference& degradation_preference) = 0;
|
||||
|
||||
// Sets the |sink| that gets the encoded frames. |rotation_applied| means
|
||||
// that the source must support rotation. Only set |rotation_applied| if the
|
||||
// Sets the `sink` that gets the encoded frames. `rotation_applied` means
|
||||
// that the source must support rotation. Only set `rotation_applied` if the
|
||||
// remote side does not support the rotation extension.
|
||||
virtual void SetSink(EncoderSink* sink, bool rotation_applied) = 0;
|
||||
|
||||
|
@ -102,13 +102,13 @@ class VideoStreamEncoderInterface : public rtc::VideoSinkInterface<VideoFrame> {
|
|||
virtual void OnLossNotification(
|
||||
const VideoEncoder::LossNotification& loss_notification) = 0;
|
||||
|
||||
// Set the currently estimated network properties. A |target_bitrate|
|
||||
// Set the currently estimated network properties. A `target_bitrate`
|
||||
// of zero pauses the encoder.
|
||||
// |stable_target_bitrate| is a filtered version of |target_bitrate|. It is
|
||||
// `stable_target_bitrate` is a filtered version of `target_bitrate`. It is
|
||||
// always less or equal to it. It can be used to avoid rapid changes of
|
||||
// expensive encoding settings, such as resolution.
|
||||
// |link_allocation| is the bandwidth available for this video stream on the
|
||||
// network link. It is always at least |target_bitrate| but may be higher
|
||||
// `link_allocation` is the bandwidth available for this video stream on the
|
||||
// network link. It is always at least `target_bitrate` but may be higher
|
||||
// if we are not network constrained.
|
||||
virtual void OnBitrateUpdated(DataRate target_bitrate,
|
||||
DataRate stable_target_bitrate,
|
||||
|
@ -122,8 +122,8 @@ class VideoStreamEncoderInterface : public rtc::VideoSinkInterface<VideoFrame> {
|
|||
virtual void SetFecControllerOverride(
|
||||
FecControllerOverride* fec_controller_override) = 0;
|
||||
|
||||
// Creates and configures an encoder with the given |config|. The
|
||||
// |max_data_payload_length| is used to support single NAL unit
|
||||
// Creates and configures an encoder with the given `config`. The
|
||||
// `max_data_payload_length` is used to support single NAL unit
|
||||
// packetization for H.264.
|
||||
virtual void ConfigureEncoder(VideoEncoderConfig config,
|
||||
size_t max_data_payload_length) = 0;
|
||||
|
|
|
@ -101,7 +101,7 @@ class VideoStreamEncoderObserver : public CpuOveruseMetricsObserver {
|
|||
const VideoBitrateAllocation& allocation) {}
|
||||
|
||||
// Informes observer if an internal encoder scaler has reduced video
|
||||
// resolution or not. |is_scaled| is a flag indicating if the video is scaled
|
||||
// resolution or not. `is_scaled` is a flag indicating if the video is scaled
|
||||
// down.
|
||||
virtual void OnEncoderInternalScalerUpdate(bool is_scaled) {}
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ struct TimingFrameInfo {
|
|||
// synchronized, -1 otherwise.
|
||||
int64_t EndToEndDelay() const;
|
||||
|
||||
// Returns true if current frame took longer to process than |other| frame.
|
||||
// Returns true if current frame took longer to process than `other` frame.
|
||||
// If other frame's clocks are not synchronized, current frame is always
|
||||
// preferred.
|
||||
bool IsLongerThan(const TimingFrameInfo& other) const;
|
||||
|
|
|
@ -75,7 +75,7 @@ class VideoDecoderSoftwareFallbackWrapperTest : public ::testing::Test {
|
|||
int reset_count_ = 0;
|
||||
};
|
||||
test::ScopedFieldTrials override_field_trials_;
|
||||
// |fake_decoder_| is owned and released by |fallback_wrapper_|.
|
||||
// `fake_decoder_` is owned and released by `fallback_wrapper_`.
|
||||
CountingFakeDecoder* fake_decoder_;
|
||||
std::unique_ptr<VideoDecoder> fallback_wrapper_;
|
||||
};
|
||||
|
|
|
@ -172,7 +172,7 @@ class VideoEncoderSoftwareFallbackWrapperTestBase : public ::testing::Test {
|
|||
|
||||
test::ScopedFieldTrials override_field_trials_;
|
||||
FakeEncodedImageCallback callback_;
|
||||
// |fake_encoder_| is owned and released by |fallback_wrapper_|.
|
||||
// `fake_encoder_` is owned and released by `fallback_wrapper_`.
|
||||
CountingFakeEncoder* fake_encoder_;
|
||||
CountingFakeEncoder* fake_sw_encoder_;
|
||||
bool wrapper_initialized_;
|
||||
|
|
|
@ -40,7 +40,7 @@ class RTC_EXPORT VideoDecoderFactory {
|
|||
// power efficient, which is currently interpreted as if there is support for
|
||||
// hardware acceleration.
|
||||
// See https://w3c.github.io/webrtc-svc/#scalabilitymodes* for a specification
|
||||
// of valid values for |scalability_mode|.
|
||||
// of valid values for `scalability_mode`.
|
||||
// NOTE: QueryCodecSupport is currently an experimental feature that is
|
||||
// subject to change without notice.
|
||||
virtual CodecSupport QueryCodecSupport(
|
||||
|
|
|
@ -167,7 +167,7 @@ class RTC_EXPORT VideoEncoder {
|
|||
ScalingSettings scaling_settings;
|
||||
|
||||
// The width and height of the incoming video frames should be divisible
|
||||
// by |requested_resolution_alignment|. If they are not, the encoder may
|
||||
// by `requested_resolution_alignment`. If they are not, the encoder may
|
||||
// drop the incoming frame.
|
||||
// For example: With I420, this value would be a multiple of 2.
|
||||
// Note that this field is unrelated to any horizontal or vertical stride
|
||||
|
@ -175,12 +175,12 @@ class RTC_EXPORT VideoEncoder {
|
|||
int requested_resolution_alignment;
|
||||
|
||||
// Same as above but if true, each simulcast layer should also be divisible
|
||||
// by |requested_resolution_alignment|.
|
||||
// Note that scale factors |scale_resolution_down_by| may be adjusted so a
|
||||
// by `requested_resolution_alignment`.
|
||||
// Note that scale factors `scale_resolution_down_by` may be adjusted so a
|
||||
// common multiple is not too large to avoid largely cropped frames and
|
||||
// possibly with an aspect ratio far from the original.
|
||||
// Warning: large values of scale_resolution_down_by could be changed
|
||||
// considerably, especially if |requested_resolution_alignment| is large.
|
||||
// considerably, especially if `requested_resolution_alignment` is large.
|
||||
bool apply_alignment_to_all_simulcast_layers;
|
||||
|
||||
// If true, encoder supports working with a native handle (e.g. texture
|
||||
|
@ -215,7 +215,7 @@ class RTC_EXPORT VideoEncoder {
|
|||
bool has_internal_source;
|
||||
|
||||
// For each spatial layer (simulcast stream or SVC layer), represented as an
|
||||
// element in |fps_allocation| a vector indicates how many temporal layers
|
||||
// element in `fps_allocation` a vector indicates how many temporal layers
|
||||
// the encoder is using for that spatial layer.
|
||||
// For each spatial/temporal layer pair, the frame rate fraction is given as
|
||||
// an 8bit unsigned integer where 0 = 0% and 255 = 100%.
|
||||
|
@ -243,8 +243,8 @@ class RTC_EXPORT VideoEncoder {
|
|||
// Recommended bitrate limits for different resolutions.
|
||||
std::vector<ResolutionBitrateLimits> resolution_bitrate_limits;
|
||||
|
||||
// Obtains the limits from |resolution_bitrate_limits| that best matches the
|
||||
// |frame_size_pixels|.
|
||||
// Obtains the limits from `resolution_bitrate_limits` that best matches the
|
||||
// `frame_size_pixels`.
|
||||
absl::optional<ResolutionBitrateLimits>
|
||||
GetEncoderBitrateLimitsForResolution(int frame_size_pixels) const;
|
||||
|
||||
|
@ -279,7 +279,7 @@ class RTC_EXPORT VideoEncoder {
|
|||
VideoBitrateAllocation bitrate;
|
||||
// Target framerate, in fps. A value <= 0.0 is invalid and should be
|
||||
// interpreted as framerate target not available. In this case the encoder
|
||||
// should fall back to the max framerate specified in |codec_settings| of
|
||||
// should fall back to the max framerate specified in `codec_settings` of
|
||||
// the last InitEncode() call.
|
||||
double framerate_fps;
|
||||
// The network bandwidth available for video. This is at least
|
||||
|
@ -299,15 +299,15 @@ class RTC_EXPORT VideoEncoder {
|
|||
uint32_t timestamp_of_last_received;
|
||||
// Describes whether the dependencies of the last received frame were
|
||||
// all decodable.
|
||||
// |false| if some dependencies were undecodable, |true| if all dependencies
|
||||
// were decodable, and |nullopt| if the dependencies are unknown.
|
||||
// `false` if some dependencies were undecodable, `true` if all dependencies
|
||||
// were decodable, and `nullopt` if the dependencies are unknown.
|
||||
absl::optional<bool> dependencies_of_last_received_decodable;
|
||||
// Describes whether the received frame was decodable.
|
||||
// |false| if some dependency was undecodable or if some packet belonging
|
||||
// `false` if some dependency was undecodable or if some packet belonging
|
||||
// to the last received frame was missed.
|
||||
// |true| if all dependencies were decodable and all packets belonging
|
||||
// `true` if all dependencies were decodable and all packets belonging
|
||||
// to the last received frame were received.
|
||||
// |nullopt| if no packet belonging to the last frame was missed, but the
|
||||
// `nullopt` if no packet belonging to the last frame was missed, but the
|
||||
// last packet in the frame was not yet received.
|
||||
absl::optional<bool> last_received_decodable;
|
||||
};
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
|
||||
namespace webrtc {
|
||||
|
||||
// The |VideoStream| struct describes a simulcast layer, or "stream".
|
||||
// The `VideoStream` struct describes a simulcast layer, or "stream".
|
||||
struct VideoStream {
|
||||
VideoStream();
|
||||
~VideoStream();
|
||||
|
@ -46,7 +46,7 @@ struct VideoStream {
|
|||
int max_bitrate_bps;
|
||||
|
||||
// Scaling factor applied to the stream size.
|
||||
// |width| and |height| values are already scaled down.
|
||||
// `width` and `height` values are already scaled down.
|
||||
double scale_resolution_down_by;
|
||||
|
||||
// Maximum Quantization Parameter to use when encoding the stream.
|
||||
|
@ -171,7 +171,7 @@ class VideoEncoderConfig {
|
|||
// The simulcast layer's configurations set by the application for this video
|
||||
// sender. These are modified by the video_stream_factory before being passed
|
||||
// down to lower layers for the video encoding.
|
||||
// |simulcast_layers| is also used for configuring non-simulcast (when there
|
||||
// `simulcast_layers` is also used for configuring non-simulcast (when there
|
||||
// is a single VideoStream).
|
||||
std::vector<VideoStream> simulcast_layers;
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ class VideoEncoderFactory {
|
|||
public:
|
||||
// TODO(magjed): Try to get rid of this struct.
|
||||
struct CodecInfo {
|
||||
// |has_internal_source| is true if encoders created by this factory of the
|
||||
// `has_internal_source` is true if encoders created by this factory of the
|
||||
// given codec will use internal camera sources, meaning that they don't
|
||||
// require/expect frames to be delivered via webrtc::VideoEncoder::Encode.
|
||||
// This flag is used as the internal_source parameter to
|
||||
|
@ -88,7 +88,7 @@ class VideoEncoderFactory {
|
|||
// power efficient, which is currently interpreted as if there is support for
|
||||
// hardware acceleration.
|
||||
// See https://w3c.github.io/webrtc-svc/#scalabilitymodes* for a specification
|
||||
// of valid values for |scalability_mode|.
|
||||
// of valid values for `scalability_mode`.
|
||||
// NOTE: QueryCodecSupport is currently an experimental feature that is
|
||||
// subject to change without notice.
|
||||
virtual CodecSupport QueryCodecSupport(
|
||||
|
|
|
@ -39,8 +39,8 @@ namespace {
|
|||
// If forced fallback is allowed, either:
|
||||
//
|
||||
// 1) The forced fallback is requested if the resolution is less than or equal
|
||||
// to |max_pixels_|. The resolution is allowed to be scaled down to
|
||||
// |min_pixels_|.
|
||||
// to `max_pixels_`. The resolution is allowed to be scaled down to
|
||||
// `min_pixels_`.
|
||||
//
|
||||
// 2) The forced fallback is requested if temporal support is preferred and the
|
||||
// SW fallback supports temporal layers while the HW encoder does not.
|
||||
|
@ -274,8 +274,8 @@ bool VideoEncoderSoftwareFallbackWrapper::InitFallbackEncoder(bool is_forced) {
|
|||
void VideoEncoderSoftwareFallbackWrapper::SetFecControllerOverride(
|
||||
FecControllerOverride* fec_controller_override) {
|
||||
// It is important that only one of those would ever interact with the
|
||||
// |fec_controller_override| at a given time. This is the responsibility
|
||||
// of |this| to maintain.
|
||||
// `fec_controller_override` at a given time. This is the responsibility
|
||||
// of `this` to maintain.
|
||||
|
||||
fec_controller_override_ = fec_controller_override;
|
||||
current_encoder()->SetFecControllerOverride(fec_controller_override);
|
||||
|
|
|
@ -32,7 +32,7 @@ CreateVideoEncoderSoftwareFallbackWrapper(
|
|||
bool prefer_temporal_support);
|
||||
|
||||
// Default fallback for call-sites not yet updated with
|
||||
// |prefer_temporal_support|.
|
||||
// `prefer_temporal_support`.
|
||||
// TODO(sprang): Remove when usage is gone.
|
||||
RTC_EXPORT inline std::unique_ptr<VideoEncoder>
|
||||
CreateVideoEncoderSoftwareFallbackWrapper(
|
||||
|
|
|
@ -66,7 +66,7 @@ struct Vp8EncoderConfig {
|
|||
// Number of active temporal layers. Set to 0 if not used.
|
||||
uint32_t ts_number_layers;
|
||||
|
||||
// Arrays of length |ts_number_layers|, indicating (cumulative) target
|
||||
// Arrays of length `ts_number_layers`, indicating (cumulative) target
|
||||
// bitrate and rate decimator (e.g. 4 if every 4th frame is in the given
|
||||
// layer) for each active temporal layer, starting with temporal id 0.
|
||||
std::array<uint32_t, kMaxLayers> ts_target_bitrate;
|
||||
|
@ -75,7 +75,7 @@ struct Vp8EncoderConfig {
|
|||
// The periodicity of the temporal pattern. Set to 0 if not used.
|
||||
uint32_t ts_periodicity;
|
||||
|
||||
// Array of length |ts_periodicity| indicating the sequence of temporal id's
|
||||
// Array of length `ts_periodicity` indicating the sequence of temporal id's
|
||||
// to assign to incoming frames.
|
||||
std::array<uint32_t, kMaxPeriodicity> ts_layer_id;
|
||||
};
|
||||
|
@ -106,7 +106,7 @@ class Vp8FrameBufferController {
|
|||
// The limits are suggestion-only; the controller is allowed to exceed them.
|
||||
virtual void SetQpLimits(size_t stream_index, int min_qp, int max_qp) = 0;
|
||||
|
||||
// Number of streamed controlled by |this|.
|
||||
// Number of streamed controlled by `this`.
|
||||
virtual size_t StreamCount() const = 0;
|
||||
|
||||
// If this method returns true, the encoder is free to drop frames for
|
||||
|
@ -121,7 +121,7 @@ class Vp8FrameBufferController {
|
|||
virtual bool SupportsEncoderFrameDropping(size_t stream_index) const = 0;
|
||||
|
||||
// New target bitrate for a stream (each entry in
|
||||
// |bitrates_bps| is for another temporal layer).
|
||||
// `bitrates_bps` is for another temporal layer).
|
||||
virtual void OnRatesUpdated(size_t stream_index,
|
||||
const std::vector<uint32_t>& bitrates_bps,
|
||||
int framerate_fps) = 0;
|
||||
|
@ -130,7 +130,7 @@ class Vp8FrameBufferController {
|
|||
// the controller wishes to enact in the encoder's configuration.
|
||||
// If a value is not overridden, previous overrides are still in effect.
|
||||
// However, if |Vp8EncoderConfig::reset_previous_configuration_overrides|
|
||||
// is set to |true|, all previous overrides are reset.
|
||||
// is set to `true`, all previous overrides are reset.
|
||||
virtual Vp8EncoderConfig UpdateConfiguration(size_t stream_index) = 0;
|
||||
|
||||
// Returns the recommended VP8 encode flags needed.
|
||||
|
@ -142,13 +142,13 @@ class Vp8FrameBufferController {
|
|||
virtual Vp8FrameConfig NextFrameConfig(size_t stream_index,
|
||||
uint32_t rtp_timestamp) = 0;
|
||||
|
||||
// Called after the encode step is done. |rtp_timestamp| must match the
|
||||
// Called after the encode step is done. `rtp_timestamp` must match the
|
||||
// parameter use in the NextFrameConfig() call.
|
||||
// |is_keyframe| must be true iff the encoder decided to encode this frame as
|
||||
// `is_keyframe` must be true iff the encoder decided to encode this frame as
|
||||
// a keyframe.
|
||||
// If |info| is not null, the encoder may update |info| with codec specific
|
||||
// data such as temporal id. |qp| should indicate the frame-level QP this
|
||||
// frame was encoded at. If the encoder does not support extracting this, |qp|
|
||||
// If `info` is not null, the encoder may update `info` with codec specific
|
||||
// data such as temporal id. `qp` should indicate the frame-level QP this
|
||||
// frame was encoded at. If the encoder does not support extracting this, `qp`
|
||||
// should be set to 0.
|
||||
virtual void OnEncodeDone(size_t stream_index,
|
||||
uint32_t rtp_timestamp,
|
||||
|
@ -161,7 +161,7 @@ class Vp8FrameBufferController {
|
|||
virtual void OnFrameDropped(size_t stream_index, uint32_t rtp_timestamp) = 0;
|
||||
|
||||
// Called by the encoder when the packet loss rate changes.
|
||||
// |packet_loss_rate| runs between 0.0 (no loss) and 1.0 (everything lost).
|
||||
// `packet_loss_rate` runs between 0.0 (no loss) and 1.0 (everything lost).
|
||||
virtual void OnPacketLossRateUpdate(float packet_loss_rate) = 0;
|
||||
|
||||
// Called by the encoder when the round trip time changes.
|
||||
|
|
|
@ -15,9 +15,9 @@
|
|||
|
||||
namespace webrtc {
|
||||
|
||||
// Creates a proxy source for |source| which makes sure the real
|
||||
// Creates a proxy source for `source` which makes sure the real
|
||||
// VideoTrackSourceInterface implementation is destroyed on the signaling thread
|
||||
// and marshals calls to |worker_thread| and |signaling_thread|.
|
||||
// and marshals calls to `worker_thread` and `signaling_thread`.
|
||||
rtc::scoped_refptr<VideoTrackSourceInterface> RTC_EXPORT
|
||||
CreateVideoTrackSourceProxy(rtc::Thread* signaling_thread,
|
||||
rtc::Thread* worker_thread,
|
||||
|
|
|
@ -56,53 +56,53 @@ class VoipBase {
|
|||
// Creates a channel.
|
||||
// Each channel handle maps into one audio media session where each has
|
||||
// its own separate module for send/receive rtp packet with one peer.
|
||||
// Caller must set |transport|, webrtc::Transport callback pointer to
|
||||
// Caller must set `transport`, webrtc::Transport callback pointer to
|
||||
// receive rtp/rtcp packets from corresponding media session in VoIP engine.
|
||||
// VoipEngine framework expects applications to handle network I/O directly
|
||||
// and injection for incoming RTP from remote endpoint is handled via
|
||||
// VoipNetwork interface. |local_ssrc| is optional and when local_ssrc is not
|
||||
// VoipNetwork interface. `local_ssrc` is optional and when local_ssrc is not
|
||||
// set, some random value will be used by voip engine.
|
||||
// Returns a ChannelId created for caller to handle subsequent Channel
|
||||
// operations.
|
||||
virtual ChannelId CreateChannel(Transport* transport,
|
||||
absl::optional<uint32_t> local_ssrc) = 0;
|
||||
|
||||
// Releases |channel_id| that no longer has any use.
|
||||
// Releases `channel_id` that no longer has any use.
|
||||
// Returns following VoipResult;
|
||||
// kOk - |channel_id| is released.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kOk - `channel_id` is released.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
// kInternal - Fails to stop audio output device.
|
||||
virtual VoipResult ReleaseChannel(ChannelId channel_id) = 0;
|
||||
|
||||
// Starts sending on |channel_id|. This starts microphone if not started yet.
|
||||
// Starts sending on `channel_id`. This starts microphone if not started yet.
|
||||
// Returns following VoipResult;
|
||||
// kOk - Channel successfully started to send.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
// kFailedPrecondition - Missing prerequisite on VoipCodec::SetSendCodec.
|
||||
// kInternal - initialization has failed on selected microphone.
|
||||
virtual VoipResult StartSend(ChannelId channel_id) = 0;
|
||||
|
||||
// Stops sending on |channel_id|. If this is the last active channel, it will
|
||||
// Stops sending on `channel_id`. If this is the last active channel, it will
|
||||
// stop microphone input from underlying audio platform layer.
|
||||
// Returns following VoipResult;
|
||||
// kOk - Channel successfully stopped to send.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
// kInternal - Failed to stop the active microphone device.
|
||||
virtual VoipResult StopSend(ChannelId channel_id) = 0;
|
||||
|
||||
// Starts playing on speaker device for |channel_id|.
|
||||
// Starts playing on speaker device for `channel_id`.
|
||||
// This will start underlying platform speaker device if not started.
|
||||
// Returns following VoipResult;
|
||||
// kOk - Channel successfully started to play out.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
// kFailedPrecondition - Missing prerequisite on VoipCodec::SetReceiveCodecs.
|
||||
// kInternal - Failed to initializate the selected speaker device.
|
||||
virtual VoipResult StartPlayout(ChannelId channel_id) = 0;
|
||||
|
||||
// Stops playing on speaker device for |channel_id|.
|
||||
// Stops playing on speaker device for `channel_id`.
|
||||
// Returns following VoipResult;
|
||||
// kOk - Channel successfully stopped t play out.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
virtual VoipResult StopPlayout(ChannelId channel_id) = 0;
|
||||
|
||||
protected:
|
||||
|
|
|
@ -31,7 +31,7 @@ class VoipCodec {
|
|||
// Set encoder type here along with its payload type to use.
|
||||
// Returns following VoipResult;
|
||||
// kOk - sending codec is set as provided.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
virtual VoipResult SetSendCodec(ChannelId channel_id,
|
||||
int payload_type,
|
||||
const SdpAudioFormat& encoder_spec) = 0;
|
||||
|
@ -42,7 +42,7 @@ class VoipCodec {
|
|||
// direction.
|
||||
// Returns following VoipResult;
|
||||
// kOk - receiving codecs are set as provided.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
virtual VoipResult SetReceiveCodecs(
|
||||
ChannelId channel_id,
|
||||
const std::map<int, SdpAudioFormat>& decoder_specs) = 0;
|
||||
|
|
|
@ -45,20 +45,20 @@ class VoipDtmf {
|
|||
// type has been negotiated with remote.
|
||||
// Returns following VoipResult;
|
||||
// kOk - telephone event type is registered as provided.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
virtual VoipResult RegisterTelephoneEventType(ChannelId channel_id,
|
||||
int rtp_payload_type,
|
||||
int sample_rate_hz) = 0;
|
||||
|
||||
// Send DTMF named event as specified by
|
||||
// https://tools.ietf.org/html/rfc4733#section-3.2
|
||||
// |duration_ms| specifies the duration of DTMF packets that will be emitted
|
||||
// `duration_ms` specifies the duration of DTMF packets that will be emitted
|
||||
// in place of real RTP packets instead.
|
||||
// Must be called after RegisterTelephoneEventType and VoipBase::StartSend
|
||||
// have been called.
|
||||
// Returns following VoipResult;
|
||||
// kOk - requested DTMF event is successfully scheduled.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
// kFailedPrecondition - Missing prerequisite on RegisterTelephoneEventType
|
||||
// or sending state.
|
||||
virtual VoipResult SendDtmfEvent(ChannelId channel_id,
|
||||
|
|
|
@ -24,7 +24,7 @@ class VoipNetwork {
|
|||
// The data received from the network including RTP header is passed here.
|
||||
// Returns following VoipResult;
|
||||
// kOk - received RTP packet is processed.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
virtual VoipResult ReceivedRTPPacket(
|
||||
ChannelId channel_id,
|
||||
rtc::ArrayView<const uint8_t> rtp_packet) = 0;
|
||||
|
@ -32,7 +32,7 @@ class VoipNetwork {
|
|||
// The data received from the network including RTCP header is passed here.
|
||||
// Returns following VoipResult;
|
||||
// kOk - received RTCP packet is processed.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
virtual VoipResult ReceivedRTCPPacket(
|
||||
ChannelId channel_id,
|
||||
rtc::ArrayView<const uint8_t> rtcp_packet) = 0;
|
||||
|
|
|
@ -75,17 +75,17 @@ struct ChannelStatistics {
|
|||
// the jitter buffer (NetEq) performance.
|
||||
class VoipStatistics {
|
||||
public:
|
||||
// Gets the audio ingress statistics by |ingress_stats| reference.
|
||||
// Gets the audio ingress statistics by `ingress_stats` reference.
|
||||
// Returns following VoipResult;
|
||||
// kOk - successfully set provided IngressStatistics reference.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
virtual VoipResult GetIngressStatistics(ChannelId channel_id,
|
||||
IngressStatistics& ingress_stats) = 0;
|
||||
|
||||
// Gets the channel statistics by |channel_stats| reference.
|
||||
// Gets the channel statistics by `channel_stats` reference.
|
||||
// Returns following VoipResult;
|
||||
// kOk - successfully set provided ChannelStatistics reference.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
virtual VoipResult GetChannelStatistics(ChannelId channel_id,
|
||||
ChannelStatistics& channel_stats) = 0;
|
||||
|
||||
|
|
|
@ -37,21 +37,21 @@ class VoipVolumeControl {
|
|||
// mute doesn't affect audio input level and energy values as input sample is
|
||||
// silenced after the measurement.
|
||||
// Returns following VoipResult;
|
||||
// kOk - input source muted or unmuted as provided by |enable|.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kOk - input source muted or unmuted as provided by `enable`.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
virtual VoipResult SetInputMuted(ChannelId channel_id, bool enable) = 0;
|
||||
|
||||
// Gets the microphone volume info via |volume_info| reference.
|
||||
// Gets the microphone volume info via `volume_info` reference.
|
||||
// Returns following VoipResult;
|
||||
// kOk - successfully set provided input volume info.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
virtual VoipResult GetInputVolumeInfo(ChannelId channel_id,
|
||||
VolumeInfo& volume_info) = 0;
|
||||
|
||||
// Gets the speaker volume info via |volume_info| reference.
|
||||
// Gets the speaker volume info via `volume_info` reference.
|
||||
// Returns following VoipResult;
|
||||
// kOk - successfully set provided output volume info.
|
||||
// kInvalidArgument - |channel_id| is invalid.
|
||||
// kInvalidArgument - `channel_id` is invalid.
|
||||
virtual VoipResult GetOutputVolumeInfo(ChannelId channel_id,
|
||||
VolumeInfo& volume_info) = 0;
|
||||
|
||||
|
|
Loading…
Reference in a new issue