mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-13 05:40:42 +01:00
Use backticks not vertical bars to denote variables in comments for /modules/video_coding
Bug: webrtc:12338 Change-Id: Ia8a9adea291d594e4f59a6a1203a7bfb0758adac Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227165 Commit-Queue: Artem Titov <titovartem@webrtc.org> Reviewed-by: Harald Alvestrand <hta@webrtc.org> Cr-Commit-Position: refs/heads/master@{#34684}
This commit is contained in:
parent
7f854bce1f
commit
dcd7fc7ea8
83 changed files with 268 additions and 268 deletions
|
@ -18,7 +18,7 @@ namespace {
|
|||
|
||||
// The first kIgnoredSampleCount samples will be ignored.
|
||||
const int kIgnoredSampleCount = 5;
|
||||
// Return the |kPercentile| value in RequiredDecodeTimeMs().
|
||||
// Return the `kPercentile` value in RequiredDecodeTimeMs().
|
||||
const float kPercentile = 0.95f;
|
||||
// The window size in ms.
|
||||
const int64_t kTimeLimitMs = 10000;
|
||||
|
@ -30,7 +30,7 @@ VCMCodecTimer::VCMCodecTimer()
|
|||
VCMCodecTimer::~VCMCodecTimer() = default;
|
||||
|
||||
void VCMCodecTimer::AddTiming(int64_t decode_time_ms, int64_t now_ms) {
|
||||
// Ignore the first |kIgnoredSampleCount| samples.
|
||||
// Ignore the first `kIgnoredSampleCount` samples.
|
||||
if (ignored_sample_count_ < kIgnoredSampleCount) {
|
||||
++ignored_sample_count_;
|
||||
return;
|
||||
|
|
|
@ -40,7 +40,7 @@ class VCMCodecTimer {
|
|||
int ignored_sample_count_;
|
||||
// Queue with history of latest decode time values.
|
||||
std::queue<Sample> history_;
|
||||
// |filter_| contains the same values as |history_|, but in a data structure
|
||||
// `filter_` contains the same values as `history_`, but in a data structure
|
||||
// that allows efficient retrieval of the percentile value.
|
||||
PercentileFilter<int64_t> filter_;
|
||||
};
|
||||
|
|
|
@ -69,9 +69,9 @@ ScopedAVPacket MakeScopedAVPacket() {
|
|||
int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
|
||||
AVFrame* av_frame,
|
||||
int flags) {
|
||||
// Set in |InitDecode|.
|
||||
// Set in `InitDecode`.
|
||||
H264DecoderImpl* decoder = static_cast<H264DecoderImpl*>(context->opaque);
|
||||
// DCHECK values set in |InitDecode|.
|
||||
// DCHECK values set in `InitDecode`.
|
||||
RTC_DCHECK(decoder);
|
||||
// Necessary capability to be allowed to provide our own buffers.
|
||||
RTC_DCHECK(context->codec->capabilities | AV_CODEC_CAP_DR1);
|
||||
|
@ -85,12 +85,12 @@ int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
|
|||
// |context->coded_width| due to reordering.
|
||||
int width = av_frame->width;
|
||||
int height = av_frame->height;
|
||||
// See |lowres|, if used the decoder scales the image by 1/2^(lowres). This
|
||||
// See `lowres`, if used the decoder scales the image by 1/2^(lowres). This
|
||||
// has implications on which resolutions are valid, but we don't use it.
|
||||
RTC_CHECK_EQ(context->lowres, 0);
|
||||
// Adjust the |width| and |height| to values acceptable by the decoder.
|
||||
// Without this, FFmpeg may overflow the buffer. If modified, |width| and/or
|
||||
// |height| are larger than the actual image and the image has to be cropped
|
||||
// Adjust the `width` and `height` to values acceptable by the decoder.
|
||||
// Without this, FFmpeg may overflow the buffer. If modified, `width` and/or
|
||||
// `height` are larger than the actual image and the image has to be cropped
|
||||
// (top-left corner) after decoding to avoid visible borders to the right and
|
||||
// bottom of the actual image.
|
||||
avcodec_align_dimensions(context, &width, &height);
|
||||
|
@ -105,8 +105,8 @@ int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
|
|||
return ret;
|
||||
}
|
||||
|
||||
// The video frame is stored in |frame_buffer|. |av_frame| is FFmpeg's version
|
||||
// of a video frame and will be set up to reference |frame_buffer|'s data.
|
||||
// The video frame is stored in `frame_buffer`. `av_frame` is FFmpeg's version
|
||||
// of a video frame and will be set up to reference `frame_buffer`'s data.
|
||||
|
||||
// FFmpeg expects the initial allocation to be zero-initialized according to
|
||||
// http://crbug.com/390941. Our pool is set up to zero-initialize new buffers.
|
||||
|
@ -125,7 +125,7 @@ int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
|
|||
av_frame->format = context->pix_fmt;
|
||||
av_frame->reordered_opaque = context->reordered_opaque;
|
||||
|
||||
// Set |av_frame| members as required by FFmpeg.
|
||||
// Set `av_frame` members as required by FFmpeg.
|
||||
av_frame->data[kYPlaneIndex] = frame_buffer->MutableDataY();
|
||||
av_frame->linesize[kYPlaneIndex] = frame_buffer->StrideY();
|
||||
av_frame->data[kUPlaneIndex] = frame_buffer->MutableDataU();
|
||||
|
@ -152,8 +152,8 @@ int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
|
|||
}
|
||||
|
||||
void H264DecoderImpl::AVFreeBuffer2(void* opaque, uint8_t* data) {
|
||||
// The buffer pool recycles the buffer used by |video_frame| when there are no
|
||||
// more references to it. |video_frame| is a thin buffer holder and is not
|
||||
// The buffer pool recycles the buffer used by `video_frame` when there are no
|
||||
// more references to it. `video_frame` is a thin buffer holder and is not
|
||||
// recycled.
|
||||
VideoFrame* video_frame = static_cast<VideoFrame*>(opaque);
|
||||
delete video_frame;
|
||||
|
@ -208,8 +208,8 @@ int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings,
|
|||
|
||||
// Function used by FFmpeg to get buffers to store decoded frames in.
|
||||
av_context_->get_buffer2 = AVGetBuffer2;
|
||||
// |get_buffer2| is called with the context, there |opaque| can be used to get
|
||||
// a pointer |this|.
|
||||
// `get_buffer2` is called with the context, there `opaque` can be used to get
|
||||
// a pointer `this`.
|
||||
av_context_->opaque = this;
|
||||
|
||||
const AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);
|
||||
|
@ -311,7 +311,7 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
|||
h264_bitstream_parser_.ParseBitstream(input_image);
|
||||
absl::optional<int> qp = h264_bitstream_parser_.GetLastSliceQp();
|
||||
|
||||
// Obtain the |video_frame| containing the decoded image.
|
||||
// Obtain the `video_frame` containing the decoded image.
|
||||
VideoFrame* input_frame =
|
||||
static_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
|
||||
RTC_DCHECK(input_frame);
|
||||
|
@ -377,7 +377,7 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
|||
// interface to pass a VideoFrameBuffer instead of a VideoFrame?
|
||||
decoded_image_callback_->Decoded(decoded_frame, absl::nullopt, qp);
|
||||
|
||||
// Stop referencing it, possibly freeing |input_frame|.
|
||||
// Stop referencing it, possibly freeing `input_frame`.
|
||||
av_frame_unref(av_frame_.get());
|
||||
input_frame = nullptr;
|
||||
|
||||
|
|
|
@ -60,8 +60,8 @@ class H264DecoderImpl : public H264Decoder {
|
|||
H264DecoderImpl();
|
||||
~H264DecoderImpl() override;
|
||||
|
||||
// If |codec_settings| is NULL it is ignored. If it is not NULL,
|
||||
// |codec_settings->codecType| must be |kVideoCodecH264|.
|
||||
// If `codec_settings` is NULL it is ignored. If it is not NULL,
|
||||
// |codec_settings->codecType| must be `kVideoCodecH264`.
|
||||
int32_t InitDecode(const VideoCodec* codec_settings,
|
||||
int32_t number_of_cores) override;
|
||||
int32_t Release() override;
|
||||
|
@ -69,7 +69,7 @@ class H264DecoderImpl : public H264Decoder {
|
|||
int32_t RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) override;
|
||||
|
||||
// |missing_frames|, |fragmentation| and |render_time_ms| are ignored.
|
||||
// `missing_frames`, `fragmentation` and `render_time_ms` are ignored.
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool /*missing_frames*/,
|
||||
int64_t render_time_ms = -1) override;
|
||||
|
@ -78,12 +78,12 @@ class H264DecoderImpl : public H264Decoder {
|
|||
|
||||
private:
|
||||
// Called by FFmpeg when it needs a frame buffer to store decoded frames in.
|
||||
// The |VideoFrame| returned by FFmpeg at |Decode| originate from here. Their
|
||||
// buffers are reference counted and freed by FFmpeg using |AVFreeBuffer2|.
|
||||
// The `VideoFrame` returned by FFmpeg at `Decode` originate from here. Their
|
||||
// buffers are reference counted and freed by FFmpeg using `AVFreeBuffer2`.
|
||||
static int AVGetBuffer2(AVCodecContext* context,
|
||||
AVFrame* av_frame,
|
||||
int flags);
|
||||
// Called by FFmpeg when it is done with a video frame, see |AVGetBuffer2|.
|
||||
// Called by FFmpeg when it is done with a video frame, see `AVGetBuffer2`.
|
||||
static void AVFreeBuffer2(void* opaque, uint8_t* data);
|
||||
|
||||
bool IsInitialized() const;
|
||||
|
@ -92,7 +92,7 @@ class H264DecoderImpl : public H264Decoder {
|
|||
void ReportInit();
|
||||
void ReportError();
|
||||
|
||||
// Used by ffmpeg via |AVGetBuffer2()| to allocate I420 images.
|
||||
// Used by ffmpeg via `AVGetBuffer2()` to allocate I420 images.
|
||||
VideoFrameBufferPool ffmpeg_buffer_pool_;
|
||||
// Used to allocate NV12 images if NV12 output is preferred.
|
||||
VideoFrameBufferPool output_buffer_pool_;
|
||||
|
|
|
@ -88,11 +88,11 @@ VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
|
|||
} // namespace
|
||||
|
||||
// Helper method used by H264EncoderImpl::Encode.
|
||||
// Copies the encoded bytes from |info| to |encoded_image|. The
|
||||
// Copies the encoded bytes from `info` to `encoded_image`. The
|
||||
// |encoded_image->_buffer| may be deleted and reallocated if a bigger buffer is
|
||||
// required.
|
||||
//
|
||||
// After OpenH264 encoding, the encoded bytes are stored in |info| spread out
|
||||
// After OpenH264 encoding, the encoded bytes are stored in `info` spread out
|
||||
// over a number of layers and "NAL units". Each NAL unit is a fragment starting
|
||||
// with the four-byte start code {0,0,0,1}. All of this data (including the
|
||||
// start codes) is copied to the |encoded_image->_buffer|.
|
||||
|
@ -104,7 +104,7 @@ static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) {
|
|||
const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
|
||||
for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++fragments_count) {
|
||||
RTC_CHECK_GE(layerInfo.pNalLengthInByte[nal], 0);
|
||||
// Ensure |required_capacity| will not overflow.
|
||||
// Ensure `required_capacity` will not overflow.
|
||||
RTC_CHECK_LE(layerInfo.pNalLengthInByte[nal],
|
||||
std::numeric_limits<size_t>::max() - required_capacity);
|
||||
required_capacity += layerInfo.pNalLengthInByte[nal];
|
||||
|
@ -124,8 +124,8 @@ static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) {
|
|||
// Iterate NAL units making up this layer, noting fragments.
|
||||
size_t layer_len = 0;
|
||||
for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++frag) {
|
||||
// Because the sum of all layer lengths, |required_capacity|, fits in a
|
||||
// |size_t|, we know that any indices in-between will not overflow.
|
||||
// Because the sum of all layer lengths, `required_capacity`, fits in a
|
||||
// `size_t`, we know that any indices in-between will not overflow.
|
||||
RTC_DCHECK_GE(layerInfo.pNalLengthInByte[nal], 4);
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 0], start_code[0]);
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 1], start_code[1]);
|
||||
|
@ -459,7 +459,7 @@ int32_t H264EncoderImpl::Encode(
|
|||
}
|
||||
if (send_key_frame) {
|
||||
// API doc says ForceIntraFrame(false) does nothing, but calling this
|
||||
// function forces a key frame regardless of the |bIDR| argument's value.
|
||||
// function forces a key frame regardless of the `bIDR` argument's value.
|
||||
// (If every frame is a key frame we get lag/delays.)
|
||||
encoders_[i]->ForceIntraFrame(true);
|
||||
configurations_[i].key_frame_request = false;
|
||||
|
@ -485,7 +485,7 @@ int32_t H264EncoderImpl::Encode(
|
|||
encoded_images_[i].SetSpatialIndex(configurations_[i].simulcast_idx);
|
||||
|
||||
// Split encoded image up into fragments. This also updates
|
||||
// |encoded_image_|.
|
||||
// `encoded_image_`.
|
||||
RtpFragmentize(&encoded_images_[i], &info);
|
||||
|
||||
// Encoder can skip frames to save bandwidth in which case
|
||||
|
@ -552,8 +552,8 @@ SEncParamExt H264EncoderImpl::CreateEncoderParams(size_t i) const {
|
|||
// The following parameters are extension parameters (they're in SEncParamExt,
|
||||
// not in SEncParamBase).
|
||||
encoder_params.bEnableFrameSkip = configurations_[i].frame_dropping_on;
|
||||
// |uiIntraPeriod| - multiple of GOP size
|
||||
// |keyFrameInterval| - number of frames
|
||||
// `uiIntraPeriod` - multiple of GOP size
|
||||
// `keyFrameInterval` - number of frames
|
||||
encoder_params.uiIntraPeriod = configurations_[i].key_frame_interval;
|
||||
// Reuse SPS id if possible. This helps to avoid reset of chromium HW decoder
|
||||
// on each key-frame.
|
||||
|
|
|
@ -58,7 +58,7 @@ class H264EncoderImpl : public H264Encoder {
|
|||
~H264EncoderImpl() override;
|
||||
|
||||
// |settings.max_payload_size| is ignored.
|
||||
// The following members of |codec_settings| are used. The rest are ignored.
|
||||
// The following members of `codec_settings` are used. The rest are ignored.
|
||||
// - codecType (must be kVideoCodecH264)
|
||||
// - targetBitrate
|
||||
// - maxFramerate
|
||||
|
|
|
@ -32,7 +32,7 @@ CreateH264Format(H264Profile profile,
|
|||
const std::string& packetization_mode);
|
||||
|
||||
// Set to disable the H.264 encoder/decoder implementations that are provided if
|
||||
// |rtc_use_h264| build flag is true (if false, this function does nothing).
|
||||
// `rtc_use_h264` build flag is true (if false, this function does nothing).
|
||||
// This function should only be called before or during WebRTC initialization
|
||||
// and is not thread-safe.
|
||||
RTC_EXPORT void DisableRtcUseH264();
|
||||
|
|
|
@ -24,7 +24,7 @@ namespace webrtc {
|
|||
|
||||
class MultiplexDecoderAdapter : public VideoDecoder {
|
||||
public:
|
||||
// |factory| is not owned and expected to outlive this class.
|
||||
// `factory` is not owned and expected to outlive this class.
|
||||
MultiplexDecoderAdapter(VideoDecoderFactory* factory,
|
||||
const SdpVideoFormat& associated_format,
|
||||
bool supports_augmenting_data = false);
|
||||
|
|
|
@ -33,7 +33,7 @@ enum AlphaCodecStream {
|
|||
|
||||
class MultiplexEncoderAdapter : public VideoEncoder {
|
||||
public:
|
||||
// |factory| is not owned and expected to outlive this class.
|
||||
// `factory` is not owned and expected to outlive this class.
|
||||
MultiplexEncoderAdapter(VideoEncoderFactory* factory,
|
||||
const SdpVideoFormat& associated_format,
|
||||
bool supports_augmenting_data = false);
|
||||
|
|
|
@ -25,11 +25,11 @@ namespace webrtc {
|
|||
// bitstream data.
|
||||
struct MultiplexImageHeader {
|
||||
// The number of frame components making up the complete picture data.
|
||||
// For example, |frame_count| = 2 for the case of YUV frame with Alpha frame.
|
||||
// For example, `frame_count` = 2 for the case of YUV frame with Alpha frame.
|
||||
uint8_t component_count;
|
||||
|
||||
// The increasing image ID given by the encoder. For different components
|
||||
// of a single picture, they have the same |picture_index|.
|
||||
// of a single picture, they have the same `picture_index`.
|
||||
uint16_t image_index;
|
||||
|
||||
// The location of the first MultiplexImageComponentHeader in the bitstream,
|
||||
|
@ -111,7 +111,7 @@ class MultiplexEncodedImagePacker {
|
|||
// Note: It is caller responsibility to release the buffer of the result.
|
||||
static EncodedImage PackAndRelease(const MultiplexImage& image);
|
||||
|
||||
// Note: The image components just share the memory with |combined_image|.
|
||||
// Note: The image components just share the memory with `combined_image`.
|
||||
static MultiplexImage Unpack(const EncodedImage& combined_image);
|
||||
};
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
namespace webrtc {
|
||||
|
||||
// Callback wrapper that helps distinguish returned results from |encoders_|
|
||||
// Callback wrapper that helps distinguish returned results from `encoders_`
|
||||
// instances.
|
||||
class MultiplexEncoderAdapter::AdapterEncodedImageCallback
|
||||
: public webrtc::EncodedImageCallback {
|
||||
|
@ -158,7 +158,7 @@ int MultiplexEncoderAdapter::Encode(
|
|||
}
|
||||
|
||||
// The input image is forwarded as-is, unless it is a native buffer and
|
||||
// |supports_augmented_data_| is true in which case we need to map it in order
|
||||
// `supports_augmented_data_` is true in which case we need to map it in order
|
||||
// to access the underlying AugmentedVideoFrameBuffer.
|
||||
VideoFrame forwarded_image = input_image;
|
||||
if (supports_augmented_data_ &&
|
||||
|
@ -216,7 +216,7 @@ int MultiplexEncoderAdapter::Encode(
|
|||
encoders_[kYUVStream]->Encode(forwarded_image, &adjusted_frame_types);
|
||||
|
||||
// If we do not receive an alpha frame, we send a single frame for this
|
||||
// |picture_index_|. The receiver will receive |frame_count| as 1 which
|
||||
// `picture_index_`. The receiver will receive `frame_count` as 1 which
|
||||
// specifies this case.
|
||||
if (rv || !has_alpha)
|
||||
return rv;
|
||||
|
@ -259,7 +259,7 @@ void MultiplexEncoderAdapter::SetRates(
|
|||
bitrate_allocation.SetBitrate(
|
||||
0, 0, parameters.bitrate.GetBitrate(0, 0) - augmenting_data_size_);
|
||||
for (auto& encoder : encoders_) {
|
||||
// TODO(emircan): |framerate| is used to calculate duration in encoder
|
||||
// TODO(emircan): `framerate` is used to calculate duration in encoder
|
||||
// instances. We report the total frame rate to keep real time for now.
|
||||
// Remove this after refactoring duration logic.
|
||||
encoder->SetRates(RateControlParameters(
|
||||
|
|
|
@ -201,7 +201,7 @@ class TestMultiplexAdapter : public VideoCodecUnitTest,
|
|||
};
|
||||
|
||||
// TODO(emircan): Currently VideoCodecUnitTest tests do a complete setup
|
||||
// step that goes beyond constructing |decoder_|. Simplify these tests to do
|
||||
// step that goes beyond constructing `decoder_`. Simplify these tests to do
|
||||
// less.
|
||||
TEST_P(TestMultiplexAdapter, ConstructAndDestructDecoder) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Release());
|
||||
|
|
|
@ -83,7 +83,7 @@ class VideoCodecUnitTest : public ::testing::Test {
|
|||
CodecSpecificInfo* codec_specific_info);
|
||||
|
||||
// Helper methods for waiting for multiple encoded frames. Caller must
|
||||
// define how many frames are to be waited for via |num_frames| before calling
|
||||
// define how many frames are to be waited for via `num_frames` before calling
|
||||
// Encode(). Then, they can expect to retrive them via WaitForEncodedFrames().
|
||||
void SetWaitForEncodedFramesThreshold(size_t num_frames);
|
||||
bool WaitForEncodedFrames(
|
||||
|
|
|
@ -168,7 +168,7 @@ void VideoCodecTestFixtureImpl::Config::SetCodecSettings(
|
|||
VideoCodecType codec_type = PayloadStringToCodecType(codec_name);
|
||||
webrtc::test::CodecSettings(codec_type, &codec_settings);
|
||||
|
||||
// TODO(brandtr): Move the setting of |width| and |height| to the tests, and
|
||||
// TODO(brandtr): Move the setting of `width` and `height` to the tests, and
|
||||
// DCHECK that they are set before initializing the codec instead.
|
||||
codec_settings.width = static_cast<uint16_t>(width);
|
||||
codec_settings.height = static_cast<uint16_t>(height);
|
||||
|
|
|
@ -202,7 +202,7 @@ VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcVideoStatistic(
|
|||
const size_t target_bitrate_kbps =
|
||||
CalcLayerTargetBitrateKbps(first_frame_num, last_frame_num, spatial_idx,
|
||||
temporal_idx, aggregate_independent_layers);
|
||||
RTC_CHECK_GT(target_bitrate_kbps, 0); // We divide by |target_bitrate_kbps|.
|
||||
RTC_CHECK_GT(target_bitrate_kbps, 0); // We divide by `target_bitrate_kbps`.
|
||||
|
||||
for (size_t frame_num = first_frame_num; frame_num <= last_frame_num;
|
||||
++frame_num) {
|
||||
|
|
|
@ -31,7 +31,7 @@ class VideoCodecTestStatsImpl : public VideoCodecTestStats {
|
|||
// Creates a FrameStatistics for the next frame to be processed.
|
||||
void AddFrame(const FrameStatistics& frame_stat);
|
||||
|
||||
// Returns the FrameStatistics corresponding to |frame_number| or |timestamp|.
|
||||
// Returns the FrameStatistics corresponding to `frame_number` or `timestamp`.
|
||||
FrameStatistics* GetFrame(size_t frame_number, size_t spatial_idx);
|
||||
FrameStatistics* GetFrameWithTimestamp(size_t timestamp, size_t spatial_idx);
|
||||
|
||||
|
|
|
@ -591,7 +591,7 @@ void VideoProcessor::FrameDecoded(const VideoFrame& decoded_frame,
|
|||
|
||||
// Erase all buffered input frames that we have moved past for all
|
||||
// simulcast/spatial layers. Never buffer more than
|
||||
// |kMaxBufferedInputFrames| frames, to protect against long runs of
|
||||
// `kMaxBufferedInputFrames` frames, to protect against long runs of
|
||||
// consecutive frame drops for a particular layer.
|
||||
const auto min_last_decoded_frame_num = std::min_element(
|
||||
last_decoded_frame_num_.cbegin(), last_decoded_frame_num_.cend());
|
||||
|
|
|
@ -219,7 +219,7 @@ class VideoProcessor {
|
|||
std::vector<std::unique_ptr<VideoProcessorDecodeCompleteCallback>>
|
||||
decode_callback_;
|
||||
|
||||
// Each call to ProcessFrame() will read one frame from |input_frame_reader_|.
|
||||
// Each call to ProcessFrame() will read one frame from `input_frame_reader_`.
|
||||
FrameReader* const input_frame_reader_;
|
||||
|
||||
// Input frames are used as reference for frame quality evaluations.
|
||||
|
|
|
@ -310,7 +310,7 @@ void DefaultTemporalLayers::OnRatesUpdated(
|
|||
RTC_DCHECK_LT(stream_index, StreamCount());
|
||||
RTC_DCHECK_GT(bitrates_bps.size(), 0);
|
||||
RTC_DCHECK_LE(bitrates_bps.size(), num_layers_);
|
||||
// |bitrates_bps| uses individual rate per layer, but Vp8EncoderConfig wants
|
||||
// `bitrates_bps` uses individual rate per layer, but Vp8EncoderConfig wants
|
||||
// the accumulated rate, so sum them up.
|
||||
new_bitrates_bps_ = bitrates_bps;
|
||||
new_bitrates_bps_->resize(num_layers_);
|
||||
|
@ -419,11 +419,11 @@ Vp8FrameConfig DefaultTemporalLayers::NextFrameConfig(size_t stream_index,
|
|||
// base-layer references).
|
||||
tl_config.layer_sync = IsSyncFrame(tl_config);
|
||||
|
||||
// Increment frame age, this needs to be in sync with |pattern_idx_|,
|
||||
// Increment frame age, this needs to be in sync with `pattern_idx_`,
|
||||
// so must update it here. Resetting age to 0 must be done when encoding is
|
||||
// complete though, and so in the case of pipelining encoder it might lag.
|
||||
// To prevent this data spill over into the next iteration,
|
||||
// the |pedning_frames_| map is reset in loops. If delay is constant,
|
||||
// the `pedning_frames_` map is reset in loops. If delay is constant,
|
||||
// the relative age should still be OK for the search order.
|
||||
for (size_t& n : frames_since_buffer_refresh_) {
|
||||
++n;
|
||||
|
@ -444,7 +444,7 @@ Vp8FrameConfig DefaultTemporalLayers::NextFrameConfig(size_t stream_index,
|
|||
|
||||
void DefaultTemporalLayers::ValidateReferences(BufferFlags* flags,
|
||||
Vp8BufferReference ref) const {
|
||||
// Check if the buffer specified by |ref| is actually referenced, and if so
|
||||
// Check if the buffer specified by `ref` is actually referenced, and if so
|
||||
// if it also a dynamically updating one (buffers always just containing
|
||||
// keyframes are always safe to reference).
|
||||
if ((*flags & BufferFlags::kReference) &&
|
||||
|
@ -552,7 +552,7 @@ void DefaultTemporalLayers::OnEncodeDone(size_t stream_index,
|
|||
for (Vp8BufferReference buffer : kAllBuffers) {
|
||||
if (is_static_buffer_[BufferToIndex(buffer)]) {
|
||||
// Update frame count of all kf-only buffers, regardless of state of
|
||||
// |pending_frames_|.
|
||||
// `pending_frames_`.
|
||||
ResetNumFramesSinceBufferRefresh(buffer);
|
||||
} else {
|
||||
// Key-frames update all buffers, this should be reflected when
|
||||
|
|
|
@ -653,8 +653,8 @@ TEST_F(TemporalLayersTest, KeyFrame) {
|
|||
|
||||
uint32_t timestamp = 0;
|
||||
for (int i = 0; i < 7; ++i) {
|
||||
// Temporal pattern starts from 0 after key frame. Let the first |i| - 1
|
||||
// frames be delta frames, and the |i|th one key frame.
|
||||
// Temporal pattern starts from 0 after key frame. Let the first `i` - 1
|
||||
// frames be delta frames, and the `i`th one key frame.
|
||||
for (int j = 1; j <= i; ++j) {
|
||||
// Since last frame was always a keyframe and thus index 0 in the pattern,
|
||||
// this loop starts at index 1.
|
||||
|
@ -780,7 +780,7 @@ TEST_P(TemporalLayersReferenceTest, ValidFrameConfigs) {
|
|||
// of the buffer state; which buffers references which temporal layers (if
|
||||
// (any). If a given buffer is never updated, it is legal to reference it
|
||||
// even for sync frames. In order to be general, don't assume TL0 always
|
||||
// updates |last|.
|
||||
// updates `last`.
|
||||
std::vector<Vp8FrameConfig> tl_configs(kMaxPatternLength);
|
||||
for (int i = 0; i < kMaxPatternLength; ++i) {
|
||||
Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp_);
|
||||
|
|
|
@ -46,7 +46,7 @@ class LibvpxVp8Decoder : public VideoDecoder {
|
|||
DeblockParams(int max_level, int degrade_qp, int min_qp)
|
||||
: max_level(max_level), degrade_qp(degrade_qp), min_qp(min_qp) {}
|
||||
int max_level; // Deblocking strength: [0, 16].
|
||||
int degrade_qp; // If QP value is below, start lowering |max_level|.
|
||||
int degrade_qp; // If QP value is below, start lowering `max_level`.
|
||||
int min_qp; // If QP value is below, turn off deblocking.
|
||||
};
|
||||
|
||||
|
|
|
@ -107,10 +107,10 @@ bool MaybeSetNewValue(const absl::optional<T>& new_value,
|
|||
}
|
||||
}
|
||||
|
||||
// Adds configuration from |new_config| to |base_config|. Both configs consist
|
||||
// of optionals, and only optionals which are set in |new_config| can have
|
||||
// an effect. (That is, set values in |base_config| cannot be unset.)
|
||||
// Returns |true| iff any changes were made to |base_config|.
|
||||
// Adds configuration from `new_config` to `base_config`. Both configs consist
|
||||
// of optionals, and only optionals which are set in `new_config` can have
|
||||
// an effect. (That is, set values in `base_config` cannot be unset.)
|
||||
// Returns `true` iff any changes were made to `base_config`.
|
||||
bool MaybeExtendVp8EncoderConfig(const Vp8EncoderConfig& new_config,
|
||||
Vp8EncoderConfig* base_config) {
|
||||
bool changes_made = false;
|
||||
|
@ -711,7 +711,7 @@ int LibvpxVp8Encoder::GetCpuSpeed(int width, int height) {
|
|||
#else
|
||||
// For non-ARM, increase encoding complexity (i.e., use lower speed setting)
|
||||
// if resolution is below CIF. Otherwise, keep the default/user setting
|
||||
// (|cpu_speed_default_|) set on InitEncode via VP8().complexity.
|
||||
// (`cpu_speed_default_`) set on InitEncode via VP8().complexity.
|
||||
if (width * height < 352 * 288)
|
||||
return (cpu_speed_default_ < -4) ? -4 : cpu_speed_default_;
|
||||
else
|
||||
|
@ -976,8 +976,8 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
|
|||
flags[i] = send_key_frame ? VPX_EFLAG_FORCE_KF : EncodeFlags(tl_configs[i]);
|
||||
}
|
||||
|
||||
// Scale and map buffers and set |raw_images_| to hold pointers to the result.
|
||||
// Because |raw_images_| are set to hold pointers to the prepared buffers, we
|
||||
// Scale and map buffers and set `raw_images_` to hold pointers to the result.
|
||||
// Because `raw_images_` are set to hold pointers to the prepared buffers, we
|
||||
// need to keep these buffers alive through reference counting until after
|
||||
// encoding is complete.
|
||||
std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers =
|
||||
|
@ -1017,7 +1017,7 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
|
|||
// Set the encoder frame flags and temporal layer_id for each spatial stream.
|
||||
// Note that streams are defined starting from lowest resolution at
|
||||
// position 0 to highest resolution at position |encoders_.size() - 1|,
|
||||
// whereas |encoder_| is from highest to lowest resolution.
|
||||
// whereas `encoder_` is from highest to lowest resolution.
|
||||
for (size_t i = 0; i < encoders_.size(); ++i) {
|
||||
const size_t stream_idx = encoders_.size() - 1 - i;
|
||||
|
||||
|
@ -1048,7 +1048,7 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
|
|||
(num_tries == 1 &&
|
||||
error == WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT)) {
|
||||
++num_tries;
|
||||
// Note we must pass 0 for |flags| field in encode call below since they are
|
||||
// Note we must pass 0 for `flags` field in encode call below since they are
|
||||
// set above in |libvpx_interface_->vpx_codec_control_| function for each
|
||||
// encoder/spatial layer.
|
||||
error = libvpx_->codec_encode(&encoders_[0], &raw_images_[0], timestamp_,
|
||||
|
@ -1237,8 +1237,8 @@ VideoEncoder::EncoderInfo LibvpxVp8Encoder::GetEncoderInfo() const {
|
|||
VideoFrameBuffer::Type::kNV12};
|
||||
|
||||
if (inited_) {
|
||||
// |encoder_idx| is libvpx index where 0 is highest resolution.
|
||||
// |si| is simulcast index, where 0 is lowest resolution.
|
||||
// `encoder_idx` is libvpx index where 0 is highest resolution.
|
||||
// `si` is simulcast index, where 0 is lowest resolution.
|
||||
for (size_t si = 0, encoder_idx = encoders_.size() - 1;
|
||||
si < encoders_.size(); ++si, --encoder_idx) {
|
||||
info.fps_allocation[si].clear();
|
||||
|
@ -1308,7 +1308,7 @@ LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr<VideoFrameBuffer> buffer) {
|
|||
|
||||
rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
|
||||
if (buffer->type() != VideoFrameBuffer::Type::kNative) {
|
||||
// |buffer| is already mapped.
|
||||
// `buffer` is already mapped.
|
||||
mapped_buffer = buffer;
|
||||
} else {
|
||||
// Attempt to map to one of the supported formats.
|
||||
|
@ -1330,7 +1330,7 @@ LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr<VideoFrameBuffer> buffer) {
|
|||
RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 ||
|
||||
converted_buffer->type() == VideoFrameBuffer::Type::kI420A);
|
||||
|
||||
// Because |buffer| had to be converted, use |converted_buffer| instead...
|
||||
// Because `buffer` had to be converted, use `converted_buffer` instead...
|
||||
buffer = mapped_buffer = converted_buffer;
|
||||
}
|
||||
|
||||
|
@ -1349,15 +1349,15 @@ LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr<VideoFrameBuffer> buffer) {
|
|||
RTC_NOTREACHED();
|
||||
}
|
||||
|
||||
// Prepare |raw_images_| from |mapped_buffer| and, if simulcast, scaled
|
||||
// versions of |buffer|.
|
||||
// Prepare `raw_images_` from `mapped_buffer` and, if simulcast, scaled
|
||||
// versions of `buffer`.
|
||||
std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers;
|
||||
SetRawImagePlanes(&raw_images_[0], mapped_buffer);
|
||||
prepared_buffers.push_back(mapped_buffer);
|
||||
for (size_t i = 1; i < encoders_.size(); ++i) {
|
||||
// Native buffers should implement optimized scaling and is the preferred
|
||||
// buffer to scale. But if the buffer isn't native, it should be cheaper to
|
||||
// scale from the previously prepared buffer which is smaller than |buffer|.
|
||||
// scale from the previously prepared buffer which is smaller than `buffer`.
|
||||
VideoFrameBuffer* buffer_to_scale =
|
||||
buffer->type() == VideoFrameBuffer::Type::kNative
|
||||
? buffer.get()
|
||||
|
|
|
@ -83,7 +83,7 @@ class LibvpxVp8Encoder : public VideoEncoder {
|
|||
int GetEncodedPartitions(const VideoFrame& input_image,
|
||||
bool retransmission_allowed);
|
||||
|
||||
// Set the stream state for stream |stream_idx|.
|
||||
// Set the stream state for stream `stream_idx`.
|
||||
void SetStreamState(bool send_stream, int stream_idx);
|
||||
|
||||
uint32_t MaxIntraTarget(uint32_t optimal_buffer_size);
|
||||
|
@ -95,8 +95,8 @@ class LibvpxVp8Encoder : public VideoEncoder {
|
|||
bool UpdateVpxConfiguration(size_t stream_index);
|
||||
|
||||
void MaybeUpdatePixelFormat(vpx_img_fmt fmt);
|
||||
// Prepares |raw_image_| to reference image data of |buffer|, or of mapped or
|
||||
// scaled versions of |buffer|. Returns a list of buffers that got referenced
|
||||
// Prepares `raw_image_` to reference image data of `buffer`, or of mapped or
|
||||
// scaled versions of `buffer`. Returns a list of buffers that got referenced
|
||||
// as a result, allowing the caller to keep references to them until after
|
||||
// encoding has finished. On failure to convert the buffer, an empty list is
|
||||
// returned.
|
||||
|
|
|
@ -255,7 +255,7 @@ void ScreenshareLayers::OnRatesUpdated(
|
|||
RTC_DCHECK_GE(bitrates_bps.size(), 1);
|
||||
RTC_DCHECK_LE(bitrates_bps.size(), 2);
|
||||
|
||||
// |bitrates_bps| uses individual rates per layer, but we want to use the
|
||||
// `bitrates_bps` uses individual rates per layer, but we want to use the
|
||||
// accumulated rate here.
|
||||
uint32_t tl0_kbps = bitrates_bps[0] / 1000;
|
||||
uint32_t tl1_kbps = tl0_kbps;
|
||||
|
@ -354,7 +354,7 @@ void ScreenshareLayers::OnEncodeDone(size_t stream_index,
|
|||
RTC_DCHECK_EQ(vp8_info.referencedBuffersCount, 0u);
|
||||
RTC_DCHECK_EQ(vp8_info.updatedBuffersCount, 0u);
|
||||
|
||||
// Note that |frame_config| is not derefernced if |is_keyframe|,
|
||||
// Note that `frame_config` is not derefernced if `is_keyframe`,
|
||||
// meaning it's never dereferenced if the optional may be unset.
|
||||
for (int i = 0; i < static_cast<int>(Vp8FrameConfig::Buffer::kCount); ++i) {
|
||||
bool references = false;
|
||||
|
|
|
@ -34,7 +34,7 @@ class VP9Encoder : public VideoEncoder {
|
|||
// Deprecated. Returns default implementation using VP9 Profile 0.
|
||||
// TODO(emircan): Remove once this is no longer used.
|
||||
static std::unique_ptr<VP9Encoder> Create();
|
||||
// Parses VP9 Profile from |codec| and returns the appropriate implementation.
|
||||
// Parses VP9 Profile from `codec` and returns the appropriate implementation.
|
||||
static std::unique_ptr<VP9Encoder> Create(const cricket::VideoCodec& codec);
|
||||
|
||||
~VP9Encoder() override {}
|
||||
|
|
|
@ -240,7 +240,7 @@ int LibvpxVp9Decoder::Decode(const EncodedImage& input_image,
|
|||
buffer = nullptr; // Triggers full frame concealment.
|
||||
}
|
||||
// During decode libvpx may get and release buffers from
|
||||
// |libvpx_buffer_pool_|. In practice libvpx keeps a few (~3-4) buffers alive
|
||||
// `libvpx_buffer_pool_`. In practice libvpx keeps a few (~3-4) buffers alive
|
||||
// at a time.
|
||||
if (vpx_codec_decode(decoder_, buffer,
|
||||
static_cast<unsigned int>(input_image.size()), 0,
|
||||
|
@ -273,7 +273,7 @@ int LibvpxVp9Decoder::ReturnFrame(
|
|||
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
|
||||
}
|
||||
|
||||
// This buffer contains all of |img|'s image data, a reference counted
|
||||
// This buffer contains all of `img`'s image data, a reference counted
|
||||
// Vp9FrameBuffer. (libvpx is done with the buffers after a few
|
||||
// vpx_codec_decode calls or vpx_codec_destroy).
|
||||
rtc::scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer> img_buffer =
|
||||
|
@ -310,7 +310,7 @@ int LibvpxVp9Decoder::ReturnFrame(
|
|||
img->stride[VPX_PLANE_V],
|
||||
// WrappedI420Buffer's mechanism for allowing the release of its
|
||||
// frame buffer is through a callback function. This is where we
|
||||
// should release |img_buffer|.
|
||||
// should release `img_buffer`.
|
||||
[img_buffer] {});
|
||||
}
|
||||
} else if (img->fmt == VPX_IMG_FMT_I444) {
|
||||
|
@ -321,7 +321,7 @@ int LibvpxVp9Decoder::ReturnFrame(
|
|||
img->stride[VPX_PLANE_V],
|
||||
// WrappedI444Buffer's mechanism for allowing the release of its
|
||||
// frame buffer is through a callback function. This is where we
|
||||
// should release |img_buffer|.
|
||||
// should release `img_buffer`.
|
||||
[img_buffer] {});
|
||||
} else {
|
||||
RTC_LOG(LS_ERROR)
|
||||
|
@ -373,7 +373,7 @@ int LibvpxVp9Decoder::Release() {
|
|||
if (decoder_ != nullptr) {
|
||||
if (inited_) {
|
||||
// When a codec is destroyed libvpx will release any buffers of
|
||||
// |libvpx_buffer_pool_| it is currently using.
|
||||
// `libvpx_buffer_pool_` it is currently using.
|
||||
if (vpx_codec_destroy(decoder_)) {
|
||||
ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
|
||||
}
|
||||
|
|
|
@ -1041,7 +1041,7 @@ int LibvpxVp9Encoder::Encode(const VideoFrame& input_image,
|
|||
// doing this.
|
||||
input_image_ = &input_image;
|
||||
|
||||
// In case we need to map the buffer, |mapped_buffer| is used to keep it alive
|
||||
// In case we need to map the buffer, `mapped_buffer` is used to keep it alive
|
||||
// through reference counting until after encoding has finished.
|
||||
rtc::scoped_refptr<const VideoFrameBuffer> mapped_buffer;
|
||||
const I010BufferInterface* i010_buffer;
|
||||
|
@ -1888,7 +1888,7 @@ rtc::scoped_refptr<VideoFrameBuffer> LibvpxVp9Encoder::PrepareBufferForProfile0(
|
|||
|
||||
rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
|
||||
if (buffer->type() != VideoFrameBuffer::Type::kNative) {
|
||||
// |buffer| is already mapped.
|
||||
// `buffer` is already mapped.
|
||||
mapped_buffer = buffer;
|
||||
} else {
|
||||
// Attempt to map to one of the supported formats.
|
||||
|
@ -1910,11 +1910,11 @@ rtc::scoped_refptr<VideoFrameBuffer> LibvpxVp9Encoder::PrepareBufferForProfile0(
|
|||
RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 ||
|
||||
converted_buffer->type() == VideoFrameBuffer::Type::kI420A);
|
||||
|
||||
// Because |buffer| had to be converted, use |converted_buffer| instead.
|
||||
// Because `buffer` had to be converted, use `converted_buffer` instead.
|
||||
buffer = mapped_buffer = converted_buffer;
|
||||
}
|
||||
|
||||
// Prepare |raw_| from |mapped_buffer|.
|
||||
// Prepare `raw_` from `mapped_buffer`.
|
||||
switch (mapped_buffer->type()) {
|
||||
case VideoFrameBuffer::Type::kI420:
|
||||
case VideoFrameBuffer::Type::kI420A: {
|
||||
|
|
|
@ -103,8 +103,8 @@ class LibvpxVp9Encoder : public VP9Encoder {
|
|||
size_t SteadyStateSize(int sid, int tid);
|
||||
|
||||
void MaybeRewrapRawWithFormat(const vpx_img_fmt fmt);
|
||||
// Prepares |raw_| to reference image data of |buffer|, or of mapped or scaled
|
||||
// versions of |buffer|. Returns the buffer that got referenced as a result,
|
||||
// Prepares `raw_` to reference image data of `buffer`, or of mapped or scaled
|
||||
// versions of `buffer`. Returns the buffer that got referenced as a result,
|
||||
// allowing the caller to keep a reference to it until after encoding has
|
||||
// finished. On failure to convert the buffer, null is returned.
|
||||
rtc::scoped_refptr<VideoFrameBuffer> PrepareBufferForProfile0(
|
||||
|
@ -202,9 +202,9 @@ class LibvpxVp9Encoder : public VP9Encoder {
|
|||
// Flags that can affect speed vs quality tradeoff, and are configureable per
|
||||
// resolution ranges.
|
||||
struct PerformanceFlags {
|
||||
// If false, a lookup will be made in |settings_by_resolution| base on the
|
||||
// If false, a lookup will be made in `settings_by_resolution` base on the
|
||||
// highest currently active resolution, and the overall speed then set to
|
||||
// to the |base_layer_speed| matching that entry.
|
||||
// to the `base_layer_speed` matching that entry.
|
||||
// If true, each active resolution will have it's speed and deblock_mode set
|
||||
// based on it resolution, and the high layer speed configured for non
|
||||
// base temporal layer frames.
|
||||
|
@ -223,9 +223,9 @@ class LibvpxVp9Encoder : public VP9Encoder {
|
|||
// setting B at wvga and above, you'd use map {{0, A}, {230400, B}}.
|
||||
std::map<int, ParameterSet> settings_by_resolution;
|
||||
};
|
||||
// Performance flags, ordered by |min_pixel_count|.
|
||||
// Performance flags, ordered by `min_pixel_count`.
|
||||
const PerformanceFlags performance_flags_;
|
||||
// Caching of of |speed_configs_|, where index i maps to the resolution as
|
||||
// Caching of of `speed_configs_`, where index i maps to the resolution as
|
||||
// specified in |codec_.spatialLayer[i]|.
|
||||
std::vector<PerformanceFlags::ParameterSet>
|
||||
performance_flags_by_spatial_index_;
|
||||
|
|
|
@ -538,7 +538,7 @@ TEST(Vp9ImplTest, EnableDisableSpatialLayersWithSvcController) {
|
|||
bitrate_allocation, codec_settings.maxFramerate));
|
||||
|
||||
frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
|
||||
// With |sl_idx| spatial layer disabled, there are |sl_idx| spatial layers
|
||||
// With `sl_idx` spatial layer disabled, there are `sl_idx` spatial layers
|
||||
// left.
|
||||
ASSERT_THAT(frames, SizeIs(num_frames_to_encode * sl_idx));
|
||||
for (size_t i = 0; i < frames.size(); ++i) {
|
||||
|
|
|
@ -44,7 +44,7 @@ bool Vp9FrameBufferPool::InitializeVpxUsePool(
|
|||
&Vp9FrameBufferPool::VpxGetFrameBuffer,
|
||||
// Called by libvpx when it no longer uses a frame buffer.
|
||||
&Vp9FrameBufferPool::VpxReleaseFrameBuffer,
|
||||
// |this| will be passed as |user_priv| to VpxGetFrameBuffer.
|
||||
// `this` will be passed as `user_priv` to VpxGetFrameBuffer.
|
||||
this)) {
|
||||
// Failed to configure libvpx to use Vp9FrameBufferPool.
|
||||
return false;
|
||||
|
@ -152,11 +152,11 @@ int32_t Vp9FrameBufferPool::VpxGetFrameBuffer(void* user_priv,
|
|||
rtc::scoped_refptr<Vp9FrameBuffer> buffer = pool->GetFrameBuffer(min_size);
|
||||
fb->data = buffer->GetData();
|
||||
fb->size = buffer->GetDataSize();
|
||||
// Store Vp9FrameBuffer* in |priv| for use in VpxReleaseFrameBuffer.
|
||||
// This also makes vpx_codec_get_frame return images with their |fb_priv| set
|
||||
// to |buffer| which is important for external reference counting.
|
||||
// Release from refptr so that the buffer's |ref_count_| remains 1 when
|
||||
// |buffer| goes out of scope.
|
||||
// Store Vp9FrameBuffer* in `priv` for use in VpxReleaseFrameBuffer.
|
||||
// This also makes vpx_codec_get_frame return images with their `fb_priv` set
|
||||
// to `buffer` which is important for external reference counting.
|
||||
// Release from refptr so that the buffer's `ref_count_` remains 1 when
|
||||
// `buffer` goes out of scope.
|
||||
fb->priv = static_cast<void*>(buffer.release());
|
||||
return 0;
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ int32_t Vp9FrameBufferPool::VpxReleaseFrameBuffer(void* user_priv,
|
|||
buffer->Release();
|
||||
// When libvpx fails to decode and you continue to try to decode (and fail)
|
||||
// libvpx can for some reason try to release the same buffer multiple times.
|
||||
// Setting |priv| to null protects against trying to Release multiple times.
|
||||
// Setting `priv` to null protects against trying to Release multiple times.
|
||||
fb->priv = nullptr;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -83,7 +83,7 @@ class Vp9FrameBufferPool {
|
|||
// buffers used to decompress frames. This is only supported for VP9.
|
||||
bool InitializeVpxUsePool(vpx_codec_ctx* vpx_codec_context);
|
||||
|
||||
// Gets a frame buffer of at least |min_size|, recycling an available one or
|
||||
// Gets a frame buffer of at least `min_size`, recycling an available one or
|
||||
// creating a new one. When no longer referenced from the outside the buffer
|
||||
// becomes recyclable.
|
||||
rtc::scoped_refptr<Vp9FrameBuffer> GetFrameBuffer(size_t min_size);
|
||||
|
@ -99,10 +99,10 @@ class Vp9FrameBufferPool {
|
|||
|
||||
// InitializeVpxUsePool configures libvpx to call this function when it needs
|
||||
// a new frame buffer. Parameters:
|
||||
// |user_priv| Private data passed to libvpx, InitializeVpxUsePool sets it up
|
||||
// `user_priv` Private data passed to libvpx, InitializeVpxUsePool sets it up
|
||||
// to be a pointer to the pool.
|
||||
// |min_size| Minimum size needed by libvpx (to decompress a frame).
|
||||
// |fb| Pointer to the libvpx frame buffer object, this is updated to
|
||||
// `min_size` Minimum size needed by libvpx (to decompress a frame).
|
||||
// `fb` Pointer to the libvpx frame buffer object, this is updated to
|
||||
// use the pool's buffer.
|
||||
// Returns 0 on success. Returns < 0 on failure.
|
||||
static int32_t VpxGetFrameBuffer(void* user_priv,
|
||||
|
@ -111,15 +111,15 @@ class Vp9FrameBufferPool {
|
|||
|
||||
// InitializeVpxUsePool configures libvpx to call this function when it has
|
||||
// finished using one of the pool's frame buffer. Parameters:
|
||||
// |user_priv| Private data passed to libvpx, InitializeVpxUsePool sets it up
|
||||
// `user_priv` Private data passed to libvpx, InitializeVpxUsePool sets it up
|
||||
// to be a pointer to the pool.
|
||||
// |fb| Pointer to the libvpx frame buffer object, its |priv| will be
|
||||
// `fb` Pointer to the libvpx frame buffer object, its `priv` will be
|
||||
// a pointer to one of the pool's Vp9FrameBuffer.
|
||||
static int32_t VpxReleaseFrameBuffer(void* user_priv,
|
||||
vpx_codec_frame_buffer* fb);
|
||||
|
||||
private:
|
||||
// Protects |allocated_buffers_|.
|
||||
// Protects `allocated_buffers_`.
|
||||
mutable Mutex buffers_lock_;
|
||||
// All buffers, in use or ready to be recycled.
|
||||
std::vector<rtc::scoped_refptr<Vp9FrameBuffer>> allocated_buffers_
|
||||
|
|
|
@ -40,7 +40,7 @@ class VCMDecoderDataBase {
|
|||
bool DeregisterReceiveCodec(uint8_t payload_type);
|
||||
|
||||
// Returns a decoder specified by frame.PayloadType. The decoded frame
|
||||
// callback of the decoder is set to |decoded_frame_callback|. If no such
|
||||
// callback of the decoder is set to `decoded_frame_callback`. If no such
|
||||
// decoder already exists an instance will be created and initialized.
|
||||
// nullptr is returned if no decoder with the specified payload type was found
|
||||
// and the function failed to create one.
|
||||
|
|
|
@ -130,7 +130,7 @@ int DEPRECATED_NackModule::OnReceivedPacket(uint16_t seq_num,
|
|||
return 0;
|
||||
}
|
||||
|
||||
// Since the |newest_seq_num_| is a packet we have actually received we know
|
||||
// Since the `newest_seq_num_` is a packet we have actually received we know
|
||||
// that packet has never been Nacked.
|
||||
if (seq_num == newest_seq_num_)
|
||||
return 0;
|
||||
|
|
|
@ -93,7 +93,7 @@ class DEPRECATED_NackModule : public Module {
|
|||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
|
||||
// Returns how many packets we have to wait in order to receive the packet
|
||||
// with probability |probabilty| or higher.
|
||||
// with probability `probabilty` or higher.
|
||||
int WaitNumberOfPackets(float probability) const
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
|
||||
|
@ -103,7 +103,7 @@ class DEPRECATED_NackModule : public Module {
|
|||
KeyFrameRequestSender* const keyframe_request_sender_;
|
||||
|
||||
// TODO(philipel): Some of the variables below are consistently used on a
|
||||
// known thread (e.g. see |initialized_|). Those probably do not need
|
||||
// known thread (e.g. see `initialized_`). Those probably do not need
|
||||
// synchronized access.
|
||||
std::map<uint16_t, NackInfo, DescendingSeqNumComp<uint16_t>> nack_list_
|
||||
RTC_GUARDED_BY(mutex_);
|
||||
|
|
|
@ -37,7 +37,7 @@ class EventWrapper {
|
|||
// be released. It is possible that multiple (random) threads are released
|
||||
// Depending on timing.
|
||||
//
|
||||
// |max_time_ms| is the maximum time to wait in milliseconds.
|
||||
// `max_time_ms` is the maximum time to wait in milliseconds.
|
||||
virtual EventTypeWrapper Wait(int max_time_ms) = 0;
|
||||
};
|
||||
|
||||
|
|
|
@ -125,17 +125,17 @@ uint32_t FecControllerDefault::UpdateFecRates(
|
|||
// Get the FEC code rate for Delta frames (set to 0 when NA).
|
||||
delta_fec_params.fec_rate =
|
||||
loss_prot_logic_->SelectedMethod()->RequiredProtectionFactorD();
|
||||
// The RTP module currently requires the same |max_fec_frames| for both
|
||||
// The RTP module currently requires the same `max_fec_frames` for both
|
||||
// key and delta frames.
|
||||
delta_fec_params.max_fec_frames =
|
||||
loss_prot_logic_->SelectedMethod()->MaxFramesFec();
|
||||
key_fec_params.max_fec_frames =
|
||||
loss_prot_logic_->SelectedMethod()->MaxFramesFec();
|
||||
}
|
||||
// Set the FEC packet mask type. |kFecMaskBursty| is more effective for
|
||||
// Set the FEC packet mask type. `kFecMaskBursty` is more effective for
|
||||
// consecutive losses and little/no packet re-ordering. As we currently
|
||||
// do not have feedback data on the degree of correlated losses and packet
|
||||
// re-ordering, we keep default setting to |kFecMaskRandom| for now.
|
||||
// re-ordering, we keep default setting to `kFecMaskRandom` for now.
|
||||
delta_fec_params.fec_mask_type = kFecMaskRandom;
|
||||
key_fec_params.fec_mask_type = kFecMaskRandom;
|
||||
// Update protection callback with protection settings.
|
||||
|
|
|
@ -133,7 +133,7 @@ int64_t FrameBuffer::FindNextFrame(int64_t now_ms) {
|
|||
int64_t wait_ms = latest_return_time_ms_ - now_ms;
|
||||
frames_to_decode_.clear();
|
||||
|
||||
// |last_continuous_frame_| may be empty below, but nullopt is smaller
|
||||
// `last_continuous_frame_` may be empty below, but nullopt is smaller
|
||||
// than everything else and loop will immediately terminate as expected.
|
||||
for (auto frame_it = frames_.begin();
|
||||
frame_it != frames_.end() && frame_it->first <= last_continuous_frame_;
|
||||
|
@ -232,7 +232,7 @@ int64_t FrameBuffer::FindNextFrame(int64_t now_ms) {
|
|||
EncodedFrame* FrameBuffer::GetNextFrame() {
|
||||
RTC_DCHECK_RUN_ON(&callback_checker_);
|
||||
int64_t now_ms = clock_->TimeInMilliseconds();
|
||||
// TODO(ilnik): remove |frames_out| use frames_to_decode_ directly.
|
||||
// TODO(ilnik): remove `frames_out` use frames_to_decode_ directly.
|
||||
std::vector<EncodedFrame*> frames_out;
|
||||
|
||||
RTC_DCHECK(!frames_to_decode_.empty());
|
||||
|
@ -549,13 +549,13 @@ bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
|
|||
auto last_decoded_frame = decoded_frames_history_.GetLastDecodedFrameId();
|
||||
RTC_DCHECK(!last_decoded_frame || *last_decoded_frame < info->first);
|
||||
|
||||
// In this function we determine how many missing dependencies this |frame|
|
||||
// has to become continuous/decodable. If a frame that this |frame| depend
|
||||
// In this function we determine how many missing dependencies this `frame`
|
||||
// has to become continuous/decodable. If a frame that this `frame` depend
|
||||
// on has already been decoded then we can ignore that dependency since it has
|
||||
// already been fulfilled.
|
||||
//
|
||||
// For all other frames we will register a backwards reference to this |frame|
|
||||
// so that |num_missing_continuous| and |num_missing_decodable| can be
|
||||
// For all other frames we will register a backwards reference to this `frame`
|
||||
// so that `num_missing_continuous` and `num_missing_decodable` can be
|
||||
// decremented as frames become continuous/are decoded.
|
||||
struct Dependency {
|
||||
int64_t frame_id;
|
||||
|
@ -565,9 +565,9 @@ bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
|
|||
|
||||
// Find all dependencies that have not yet been fulfilled.
|
||||
for (size_t i = 0; i < frame.num_references; ++i) {
|
||||
// Does |frame| depend on a frame earlier than the last decoded one?
|
||||
// Does `frame` depend on a frame earlier than the last decoded one?
|
||||
if (last_decoded_frame && frame.references[i] <= *last_decoded_frame) {
|
||||
// Was that frame decoded? If not, this |frame| will never become
|
||||
// Was that frame decoded? If not, this `frame` will never become
|
||||
// decodable.
|
||||
if (!decoded_frames_history_.WasDecoded(frame.references[i])) {
|
||||
int64_t now_ms = clock_->TimeInMilliseconds();
|
||||
|
|
|
@ -61,7 +61,7 @@ class FrameBuffer {
|
|||
int64_t InsertFrame(std::unique_ptr<EncodedFrame> frame);
|
||||
|
||||
// Get the next frame for decoding. Will return at latest after
|
||||
// |max_wait_time_ms|.
|
||||
// `max_wait_time_ms`.
|
||||
void NextFrame(
|
||||
int64_t max_wait_time_ms,
|
||||
bool keyframe_required,
|
||||
|
@ -116,7 +116,7 @@ class FrameBuffer {
|
|||
|
||||
using FrameMap = std::map<int64_t, FrameInfo>;
|
||||
|
||||
// Check that the references of |frame| are valid.
|
||||
// Check that the references of `frame` are valid.
|
||||
bool ValidReferences(const EncodedFrame& frame) const;
|
||||
|
||||
int64_t FindNextFrame(int64_t now_ms) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
|
@ -134,9 +134,9 @@ class FrameBuffer {
|
|||
void PropagateDecodability(const FrameInfo& info)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
|
||||
// Update the corresponding FrameInfo of |frame| and all FrameInfos that
|
||||
// |frame| references.
|
||||
// Return false if |frame| will never be decodable, true otherwise.
|
||||
// Update the corresponding FrameInfo of `frame` and all FrameInfos that
|
||||
// `frame` references.
|
||||
// Return false if `frame` will never be decodable, true otherwise.
|
||||
bool UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
|
||||
FrameMap::iterator info)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
|
|
|
@ -214,7 +214,7 @@ void VCMDecodedFrameCallback::Map(uint32_t timestamp,
|
|||
MutexLock lock(&lock_);
|
||||
int initial_size = _timestampMap.Size();
|
||||
_timestampMap.Add(timestamp, frameInfo);
|
||||
// If no frame is dropped, the new size should be |initial_size| + 1
|
||||
// If no frame is dropped, the new size should be `initial_size` + 1
|
||||
dropped_frames = (initial_size + 1) - _timestampMap.Size();
|
||||
}
|
||||
if (dropped_frames > 0) {
|
||||
|
|
|
@ -48,7 +48,7 @@ class VCMDecodedFrameCallback : public DecodedImageCallback {
|
|||
|
||||
private:
|
||||
SequenceChecker construction_thread_;
|
||||
// Protect |_timestampMap|.
|
||||
// Protect `_timestampMap`.
|
||||
Clock* const _clock;
|
||||
// This callback must be set before the decoder thread starts running
|
||||
// and must only be unset when external threads (e.g decoder thread)
|
||||
|
@ -63,12 +63,12 @@ class VCMDecodedFrameCallback : public DecodedImageCallback {
|
|||
// Set by the field trial WebRTC-SlowDownDecoder to simulate a slow decoder.
|
||||
FieldTrialOptional<TimeDelta> _extra_decode_time;
|
||||
|
||||
// Set by the field trial WebRTC-LowLatencyRenderer. The parameter |enabled|
|
||||
// Set by the field trial WebRTC-LowLatencyRenderer. The parameter `enabled`
|
||||
// determines if the low-latency renderer algorithm should be used for the
|
||||
// case min playout delay=0 and max playout delay>0.
|
||||
FieldTrialParameter<bool> low_latency_renderer_enabled_;
|
||||
// Set by the field trial WebRTC-LowLatencyRenderer. The parameter
|
||||
// |include_predecode_buffer| determines if the predecode buffer should be
|
||||
// `include_predecode_buffer` determines if the predecode buffer should be
|
||||
// taken into account when calculating maximum number of frames in composition
|
||||
// queue.
|
||||
FieldTrialParameter<bool> low_latency_renderer_include_predecode_buffer_;
|
||||
|
|
|
@ -102,7 +102,7 @@ H264SpsPpsTracker::FixedBitstream H264SpsPpsTracker::CopyAndFixBitstream(
|
|||
video_header->height = sps->second.height;
|
||||
|
||||
// If the SPS/PPS was supplied out of band then we will have saved
|
||||
// the actual bitstream in |data|.
|
||||
// the actual bitstream in `data`.
|
||||
if (sps->second.data && pps->second.data) {
|
||||
RTC_DCHECK_GT(sps->second.size, 0);
|
||||
RTC_DCHECK_GT(pps->second.size, 0);
|
||||
|
|
|
@ -35,7 +35,7 @@ class H264SpsPpsTracker {
|
|||
H264SpsPpsTracker();
|
||||
~H264SpsPpsTracker();
|
||||
|
||||
// Returns fixed bitstream and modifies |video_header|.
|
||||
// Returns fixed bitstream and modifies `video_header`.
|
||||
FixedBitstream CopyAndFixBitstream(rtc::ArrayView<const uint8_t> bitstream,
|
||||
RTPVideoHeader* video_header);
|
||||
|
||||
|
|
|
@ -36,12 +36,12 @@ struct CodecSpecificInfoVP8 {
|
|||
int8_t keyIdx; // Negative value to skip keyIdx.
|
||||
|
||||
// Used to generate the list of dependency frames.
|
||||
// |referencedBuffers| and |updatedBuffers| contain buffer IDs.
|
||||
// `referencedBuffers` and `updatedBuffers` contain buffer IDs.
|
||||
// Note that the buffer IDs here have a one-to-one mapping with the actual
|
||||
// codec buffers, but the exact mapping (i.e. whether 0 refers to Last,
|
||||
// to Golden or to Arf) is not pre-determined.
|
||||
// More references may be specified than are strictly necessary, but not less.
|
||||
// TODO(bugs.webrtc.org/10242): Remove |useExplicitDependencies| once all
|
||||
// TODO(bugs.webrtc.org/10242): Remove `useExplicitDependencies` once all
|
||||
// encoder-wrappers are updated.
|
||||
bool useExplicitDependencies;
|
||||
static constexpr size_t kBuffersCount = 3;
|
||||
|
|
|
@ -134,10 +134,10 @@ class VideoCodingModule : public Module {
|
|||
|
||||
// Sets the maximum number of sequence numbers that we are allowed to NACK
|
||||
// and the oldest sequence number that we will consider to NACK. If a
|
||||
// sequence number older than |max_packet_age_to_nack| is missing
|
||||
// sequence number older than `max_packet_age_to_nack` is missing
|
||||
// a key frame will be requested. A key frame will also be requested if the
|
||||
// time of incomplete or non-continuous frames in the jitter buffer is above
|
||||
// |max_incomplete_time_ms|.
|
||||
// `max_incomplete_time_ms`.
|
||||
virtual void SetNackSettings(size_t max_nack_list_size,
|
||||
int max_packet_age_to_nack,
|
||||
int max_incomplete_time_ms) = 0;
|
||||
|
|
|
@ -32,8 +32,8 @@ namespace webrtc {
|
|||
|
||||
enum {
|
||||
// Timing frames settings. Timing frames are sent every
|
||||
// |kDefaultTimingFramesDelayMs|, or if the frame is at least
|
||||
// |kDefaultOutliserFrameSizePercent| in size of average frame.
|
||||
// `kDefaultTimingFramesDelayMs`, or if the frame is at least
|
||||
// `kDefaultOutliserFrameSizePercent` in size of average frame.
|
||||
kDefaultTimingFramesDelayMs = 200,
|
||||
kDefaultOutlierFrameSizePercent = 500,
|
||||
// Maximum number of frames for what we store encode start timing information.
|
||||
|
|
|
@ -210,8 +210,8 @@ int VCMJitterBuffer::num_duplicated_packets() const {
|
|||
return num_duplicated_packets_;
|
||||
}
|
||||
|
||||
// Returns immediately or a |max_wait_time_ms| ms event hang waiting for a
|
||||
// complete frame, |max_wait_time_ms| decided by caller.
|
||||
// Returns immediately or a `max_wait_time_ms` ms event hang waiting for a
|
||||
// complete frame, `max_wait_time_ms` decided by caller.
|
||||
VCMEncodedFrame* VCMJitterBuffer::NextCompleteFrame(uint32_t max_wait_time_ms) {
|
||||
MutexLock lock(&mutex_);
|
||||
if (!running_) {
|
||||
|
@ -820,7 +820,7 @@ void VCMJitterBuffer::UpdateAveragePacketsPerFrame(int current_number_packets) {
|
|||
}
|
||||
}
|
||||
|
||||
// Must be called under the critical section |mutex_|.
|
||||
// Must be called under the critical section `mutex_`.
|
||||
void VCMJitterBuffer::CleanUpOldOrEmptyFrames() {
|
||||
decodable_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_,
|
||||
&free_frames_);
|
||||
|
@ -831,13 +831,13 @@ void VCMJitterBuffer::CleanUpOldOrEmptyFrames() {
|
|||
}
|
||||
}
|
||||
|
||||
// Must be called from within |mutex_|.
|
||||
// Must be called from within `mutex_`.
|
||||
bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const {
|
||||
return missing_sequence_numbers_.find(packet.seqNum) !=
|
||||
missing_sequence_numbers_.end();
|
||||
}
|
||||
|
||||
// Must be called under the critical section |mutex_|. Should never be
|
||||
// Must be called under the critical section `mutex_`. Should never be
|
||||
// called with retransmitted frames, they must be filtered out before this
|
||||
// function is called.
|
||||
void VCMJitterBuffer::UpdateJitterEstimate(const VCMJitterSample& sample,
|
||||
|
@ -863,7 +863,7 @@ void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame,
|
|||
frame.size(), incomplete_frame);
|
||||
}
|
||||
|
||||
// Must be called under the critical section |mutex_|. Should never be
|
||||
// Must be called under the critical section `mutex_`. Should never be
|
||||
// called with retransmitted frames, they must be filtered out before this
|
||||
// function is called.
|
||||
void VCMJitterBuffer::UpdateJitterEstimate(int64_t latest_packet_time_ms,
|
||||
|
|
|
@ -93,7 +93,7 @@ class VCMJitterBuffer {
|
|||
// Gets number of duplicated packets received.
|
||||
int num_duplicated_packets() const;
|
||||
|
||||
// Wait |max_wait_time_ms| for a complete frame to arrive.
|
||||
// Wait `max_wait_time_ms` for a complete frame to arrive.
|
||||
// If found, a pointer to the frame is returned. Returns nullptr otherwise.
|
||||
VCMEncodedFrame* NextCompleteFrame(uint32_t max_wait_time_ms);
|
||||
|
||||
|
@ -112,7 +112,7 @@ class VCMJitterBuffer {
|
|||
bool* retransmitted) const;
|
||||
|
||||
// Inserts a packet into a frame returned from GetFrame().
|
||||
// If the return value is <= 0, |frame| is invalidated and the pointer must
|
||||
// If the return value is <= 0, `frame` is invalidated and the pointer must
|
||||
// be dropped after this function returns.
|
||||
VCMFrameBufferEnum InsertPacket(const VCMPacket& packet, bool* retransmitted);
|
||||
|
||||
|
@ -138,36 +138,36 @@ class VCMJitterBuffer {
|
|||
|
||||
// Gets the frame assigned to the timestamp of the packet. May recycle
|
||||
// existing frames if no free frames are available. Returns an error code if
|
||||
// failing, or kNoError on success. |frame_list| contains which list the
|
||||
// failing, or kNoError on success. `frame_list` contains which list the
|
||||
// packet was in, or NULL if it was not in a FrameList (a new frame).
|
||||
VCMFrameBufferEnum GetFrame(const VCMPacket& packet,
|
||||
VCMFrameBuffer** frame,
|
||||
FrameList** frame_list)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
|
||||
// Returns true if |frame| is continuous in |decoding_state|, not taking
|
||||
// Returns true if `frame` is continuous in `decoding_state`, not taking
|
||||
// decodable frames into account.
|
||||
bool IsContinuousInState(const VCMFrameBuffer& frame,
|
||||
const VCMDecodingState& decoding_state) const
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
// Returns true if |frame| is continuous in the |last_decoded_state_|, taking
|
||||
// Returns true if `frame` is continuous in the `last_decoded_state_`, taking
|
||||
// all decodable frames into account.
|
||||
bool IsContinuous(const VCMFrameBuffer& frame) const
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
// Looks for frames in |incomplete_frames_| which are continuous in the
|
||||
// provided |decoded_state|. Starts the search from the timestamp of
|
||||
// |decoded_state|.
|
||||
// Looks for frames in `incomplete_frames_` which are continuous in the
|
||||
// provided `decoded_state`. Starts the search from the timestamp of
|
||||
// `decoded_state`.
|
||||
void FindAndInsertContinuousFramesWithState(
|
||||
const VCMDecodingState& decoded_state)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
// Looks for frames in |incomplete_frames_| which are continuous in
|
||||
// |last_decoded_state_| taking all decodable frames into account. Starts
|
||||
// the search from |new_frame|.
|
||||
// Looks for frames in `incomplete_frames_` which are continuous in
|
||||
// `last_decoded_state_` taking all decodable frames into account. Starts
|
||||
// the search from `new_frame`.
|
||||
void FindAndInsertContinuousFrames(const VCMFrameBuffer& new_frame)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
VCMFrameBuffer* NextFrame() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
// Returns true if the NACK list was updated to cover sequence numbers up to
|
||||
// |sequence_number|. If false a key frame is needed to get into a state where
|
||||
// `sequence_number`. If false a key frame is needed to get into a state where
|
||||
// we can continue decoding.
|
||||
bool UpdateNackList(uint16_t sequence_number)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
|
@ -182,7 +182,7 @@ class VCMJitterBuffer {
|
|||
// continue decoding.
|
||||
bool HandleTooOldPackets(uint16_t latest_sequence_number)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
// Drops all packets in the NACK list up until |last_decoded_sequence_number|.
|
||||
// Drops all packets in the NACK list up until `last_decoded_sequence_number`.
|
||||
void DropPacketsFromNackList(uint16_t last_decoded_sequence_number);
|
||||
|
||||
// Gets an empty frame, creating a new frame if necessary (i.e. increases
|
||||
|
@ -204,7 +204,7 @@ class VCMJitterBuffer {
|
|||
// Should only be called prior to actual use.
|
||||
void CleanUpOldOrEmptyFrames() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
|
||||
// Returns true if |packet| is likely to have been retransmitted.
|
||||
// Returns true if `packet` is likely to have been retransmitted.
|
||||
bool IsPacketRetransmitted(const VCMPacket& packet) const;
|
||||
|
||||
// The following three functions update the jitter estimate with the
|
||||
|
|
|
@ -1544,7 +1544,7 @@ TEST_F(TestJitterBufferNack, NackTooOldPackets) {
|
|||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
|
||||
// Drop one frame and insert |kNackHistoryLength| to trigger NACKing a too
|
||||
// Drop one frame and insert `kNackHistoryLength` to trigger NACKing a too
|
||||
// old packet.
|
||||
DropFrame(1);
|
||||
// Insert a frame which should trigger a recycle until the next key frame.
|
||||
|
@ -1597,7 +1597,7 @@ TEST_F(TestJitterBufferNack, NackListFull) {
|
|||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
|
||||
// Generate and drop |kNackHistoryLength| packets to fill the NACK list.
|
||||
// Generate and drop `kNackHistoryLength` packets to fill the NACK list.
|
||||
DropFrame(max_nack_list_size_ + 1);
|
||||
// Insert a frame which should trigger a recycle until the next key frame.
|
||||
EXPECT_EQ(kFlushIndicator, InsertFrame(VideoFrameType::kVideoFrameDelta));
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
// Keep a container's size no higher than |max_allowed_size|, by paring its size
|
||||
// down to |target_size| whenever it has more than |max_allowed_size| elements.
|
||||
// Keep a container's size no higher than `max_allowed_size`, by paring its size
|
||||
// down to `target_size` whenever it has more than `max_allowed_size` elements.
|
||||
template <typename Container>
|
||||
void PareDown(Container* container,
|
||||
size_t max_allowed_size,
|
||||
|
@ -67,7 +67,7 @@ void LossNotificationController::OnReceivedPacket(
|
|||
|
||||
last_received_seq_num_ = rtp_seq_num;
|
||||
|
||||
// |frame| is not nullptr iff the packet is the first packet in the frame.
|
||||
// `frame` is not nullptr iff the packet is the first packet in the frame.
|
||||
if (frame != nullptr) {
|
||||
// Ignore repeated or reordered frames.
|
||||
// TODO(bugs.webrtc.org/10336): Handle frame reordering.
|
||||
|
|
|
@ -36,7 +36,7 @@ class LossNotificationController {
|
|||
~LossNotificationController();
|
||||
|
||||
// An RTP packet was received from the network.
|
||||
// |frame| is non-null iff the packet is the first packet in the frame.
|
||||
// `frame` is non-null iff the packet is the first packet in the frame.
|
||||
void OnReceivedPacket(uint16_t rtp_seq_num, const FrameDetails* frame);
|
||||
|
||||
// A frame was assembled from packets previously received.
|
||||
|
@ -54,9 +54,9 @@ class LossNotificationController {
|
|||
|
||||
// When the loss of a packet or the non-decodability of a frame is detected,
|
||||
// produces a key frame request or a loss notification.
|
||||
// 1. |last_received_seq_num| is the last received sequence number.
|
||||
// 2. |decodability_flag| refers to the frame associated with the last packet.
|
||||
// It is set to |true| if and only if all of that frame's dependencies are
|
||||
// 1. `last_received_seq_num` is the last received sequence number.
|
||||
// 2. `decodability_flag` refers to the frame associated with the last packet.
|
||||
// It is set to `true` if and only if all of that frame's dependencies are
|
||||
// known to be decodable, and the frame itself is not yet known to be
|
||||
// unassemblable (i.e. no earlier parts of it were lost).
|
||||
// Clarifications:
|
||||
|
@ -90,7 +90,7 @@ class LossNotificationController {
|
|||
// the last decodable-and-non-discardable frame. Since this is a bit of
|
||||
// a mouthful, last_decodable_non_discardable_.first_seq_num is used,
|
||||
// which hopefully is a bit easier for human beings to parse
|
||||
// than |first_seq_num_of_last_decodable_non_discardable_|.
|
||||
// than `first_seq_num_of_last_decodable_non_discardable_`.
|
||||
struct FrameInfo {
|
||||
explicit FrameInfo(uint16_t first_seq_num) : first_seq_num(first_seq_num) {}
|
||||
uint16_t first_seq_num;
|
||||
|
|
|
@ -153,7 +153,7 @@ int VCMNackFecMethod::ComputeMaxFramesFec(
|
|||
rtc::saturated_cast<int>(
|
||||
2.0f * base_layer_framerate * parameters->rtt / 1000.0f + 0.5f),
|
||||
1);
|
||||
// |kUpperLimitFramesFec| is the upper limit on how many frames we
|
||||
// `kUpperLimitFramesFec` is the upper limit on how many frames we
|
||||
// allow any FEC to be based on.
|
||||
if (max_frames_fec > kUpperLimitFramesFec) {
|
||||
max_frames_fec = kUpperLimitFramesFec;
|
||||
|
@ -171,7 +171,7 @@ bool VCMNackFecMethod::BitRateTooLowForFec(
|
|||
// The condition should depend on resolution and content. For now, use
|
||||
// threshold on bytes per frame, with some effect for the frame size.
|
||||
// The condition for turning off FEC is also based on other factors,
|
||||
// such as |_numLayers|, |_maxFramesFec|, and |_rtt|.
|
||||
// such as `_numLayers`, `_maxFramesFec`, and `_rtt`.
|
||||
int estimate_bytes_per_frame = 1000 * BitsPerFrame(parameters) / 8;
|
||||
int max_bytes_per_frame = kMaxBytesPerFrameForFec;
|
||||
int num_pixels = parameters->codecWidth * parameters->codecHeight;
|
||||
|
|
|
@ -177,7 +177,7 @@ class VCMFecMethod : public VCMProtectionMethod {
|
|||
protected:
|
||||
enum { kUpperLimitFramesFec = 6 };
|
||||
// Thresholds values for the bytes/frame and round trip time, below which we
|
||||
// may turn off FEC, depending on |_numLayers| and |_maxFramesFec|.
|
||||
// may turn off FEC, depending on `_numLayers` and `_maxFramesFec`.
|
||||
// Max bytes/frame for VGA, corresponds to ~140k at 25fps.
|
||||
enum { kMaxBytesPerFrameForFec = 700 };
|
||||
// Max bytes/frame for CIF and lower: corresponds to ~80k at 25fps.
|
||||
|
@ -306,8 +306,8 @@ class VCMLossProtectionLogic {
|
|||
|
||||
// Updates the filtered loss for the average and max window packet loss,
|
||||
// and returns the filtered loss probability in the interval [0, 255].
|
||||
// The returned filtered loss value depends on the parameter |filter_mode|.
|
||||
// The input parameter |lossPr255| is the received packet loss.
|
||||
// The returned filtered loss value depends on the parameter `filter_mode`.
|
||||
// The input parameter `lossPr255` is the received packet loss.
|
||||
|
||||
// Return value : The filtered loss probability
|
||||
uint8_t FilteredLoss(int64_t nowMs,
|
||||
|
|
|
@ -195,7 +195,7 @@ int NackRequester::OnReceivedPacket(uint16_t seq_num,
|
|||
return 0;
|
||||
}
|
||||
|
||||
// Since the |newest_seq_num_| is a packet we have actually received we know
|
||||
// Since the `newest_seq_num_` is a packet we have actually received we know
|
||||
// that packet has never been Nacked.
|
||||
if (seq_num == newest_seq_num_)
|
||||
return 0;
|
||||
|
|
|
@ -129,7 +129,7 @@ class NackRequester final : public NackRequesterBase {
|
|||
RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_);
|
||||
|
||||
// Returns how many packets we have to wait in order to receive the packet
|
||||
// with probability |probabilty| or higher.
|
||||
// with probability `probabilty` or higher.
|
||||
int WaitNumberOfPackets(float probability) const
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_);
|
||||
|
||||
|
@ -139,7 +139,7 @@ class NackRequester final : public NackRequesterBase {
|
|||
KeyFrameRequestSender* const keyframe_request_sender_;
|
||||
|
||||
// TODO(philipel): Some of the variables below are consistently used on a
|
||||
// known thread (e.g. see |initialized_|). Those probably do not need
|
||||
// known thread (e.g. see `initialized_`). Those probably do not need
|
||||
// synchronized access.
|
||||
std::map<uint16_t, NackInfo, DescendingSeqNumComp<uint16_t>> nack_list_
|
||||
RTC_GUARDED_BY(worker_thread_);
|
||||
|
|
|
@ -123,7 +123,7 @@ void PacketBuffer::ClearTo(uint16_t seq_num) {
|
|||
return;
|
||||
|
||||
// Avoid iterating over the buffer more than once by capping the number of
|
||||
// iterations to the |size_| of the buffer.
|
||||
// iterations to the `size_` of the buffer.
|
||||
++seq_num;
|
||||
size_t diff = ForwardDiff<uint16_t>(first_seq_num_, seq_num);
|
||||
size_t iterations = std::min(diff, buffer_.size());
|
||||
|
@ -135,8 +135,8 @@ void PacketBuffer::ClearTo(uint16_t seq_num) {
|
|||
++first_seq_num_;
|
||||
}
|
||||
|
||||
// If |diff| is larger than |iterations| it means that we don't increment
|
||||
// |first_seq_num_| until we reach |seq_num|, so we set it here.
|
||||
// If `diff` is larger than `iterations` it means that we don't increment
|
||||
// `first_seq_num_` until we reach `seq_num`, so we set it here.
|
||||
first_seq_num_ = seq_num;
|
||||
|
||||
is_cleared_to_first_seq_num_ = true;
|
||||
|
@ -229,7 +229,7 @@ std::vector<std::unique_ptr<PacketBuffer::Packet>> PacketBuffer::FindFrames(
|
|||
uint16_t start_seq_num = seq_num;
|
||||
|
||||
// Find the start index by searching backward until the packet with
|
||||
// the |frame_begin| flag is set.
|
||||
// the `frame_begin` flag is set.
|
||||
int start_index = index;
|
||||
size_t tested_packets = 0;
|
||||
int64_t frame_timestamp = buffer_[start_index]->timestamp;
|
||||
|
@ -285,7 +285,7 @@ std::vector<std::unique_ptr<PacketBuffer::Packet>> PacketBuffer::FindFrames(
|
|||
start_index = start_index > 0 ? start_index - 1 : buffer_.size() - 1;
|
||||
|
||||
// In the case of H264 we don't have a frame_begin bit (yes,
|
||||
// |frame_begin| might be set to true but that is a lie). So instead
|
||||
// `frame_begin` might be set to true but that is a lie). So instead
|
||||
// we traverese backwards as long as we have a previous packet and
|
||||
// the timestamp of that packet is the same as this one. This may cause
|
||||
// the PacketBuffer to hand out incomplete frames.
|
||||
|
|
|
@ -71,7 +71,7 @@ class PacketBuffer {
|
|||
bool buffer_cleared = false;
|
||||
};
|
||||
|
||||
// Both |start_buffer_size| and |max_buffer_size| must be a power of 2.
|
||||
// Both `start_buffer_size` and `max_buffer_size` must be a power of 2.
|
||||
PacketBuffer(size_t start_buffer_size, size_t max_buffer_size);
|
||||
~PacketBuffer();
|
||||
|
||||
|
@ -107,7 +107,7 @@ class PacketBuffer {
|
|||
// If the packet buffer has received its first packet.
|
||||
bool first_packet_received_;
|
||||
|
||||
// If the buffer is cleared to |first_seq_num_|.
|
||||
// If the buffer is cleared to `first_seq_num_`.
|
||||
bool is_cleared_to_first_seq_num_;
|
||||
|
||||
// Buffer that holds the the inserted packets and information needed to
|
||||
|
|
|
@ -377,9 +377,9 @@ TEST_F(PacketBufferTest, InsertPacketAfterSequenceNumberWrapAround) {
|
|||
EXPECT_THAT(packets, SizeIs(7));
|
||||
}
|
||||
|
||||
// If |sps_pps_idr_is_keyframe| is true, we require keyframes to contain
|
||||
// If `sps_pps_idr_is_keyframe` is true, we require keyframes to contain
|
||||
// SPS/PPS/IDR and the keyframes we create as part of the test do contain
|
||||
// SPS/PPS/IDR. If |sps_pps_idr_is_keyframe| is false, we only require and
|
||||
// SPS/PPS/IDR. If `sps_pps_idr_is_keyframe` is false, we only require and
|
||||
// create keyframes containing only IDR.
|
||||
class PacketBufferH264Test : public PacketBufferTest {
|
||||
protected:
|
||||
|
|
|
@ -244,15 +244,15 @@ class SimulatedClockWithFrames : public SimulatedClock {
|
|||
receiver_(receiver) {}
|
||||
virtual ~SimulatedClockWithFrames() {}
|
||||
|
||||
// If |stop_on_frame| is true and next frame arrives between now and
|
||||
// now+|milliseconds|, the clock will be advanced to the arrival time of next
|
||||
// If `stop_on_frame` is true and next frame arrives between now and
|
||||
// now+`milliseconds`, the clock will be advanced to the arrival time of next
|
||||
// frame.
|
||||
// Otherwise, the clock will be advanced by |milliseconds|.
|
||||
// Otherwise, the clock will be advanced by `milliseconds`.
|
||||
//
|
||||
// For both cases, a frame will be inserted into the jitter buffer at the
|
||||
// instant when the clock time is timestamps_.front().arrive_time.
|
||||
//
|
||||
// Return true if some frame arrives between now and now+|milliseconds|.
|
||||
// Return true if some frame arrives between now and now+`milliseconds`.
|
||||
bool AdvanceTimeMilliseconds(int64_t milliseconds, bool stop_on_frame) {
|
||||
return AdvanceTimeMicroseconds(milliseconds * 1000, stop_on_frame);
|
||||
}
|
||||
|
@ -282,10 +282,10 @@ class SimulatedClockWithFrames : public SimulatedClock {
|
|||
}
|
||||
|
||||
// Input timestamps are in unit Milliseconds.
|
||||
// And |arrive_timestamps| must be positive and in increasing order.
|
||||
// |arrive_timestamps| determine when we are going to insert frames into the
|
||||
// And `arrive_timestamps` must be positive and in increasing order.
|
||||
// `arrive_timestamps` determine when we are going to insert frames into the
|
||||
// jitter buffer.
|
||||
// |render_timestamps| are the timestamps on the frame.
|
||||
// `render_timestamps` are the timestamps on the frame.
|
||||
void SetFrames(const int64_t* arrive_timestamps,
|
||||
const int64_t* render_timestamps,
|
||||
size_t size) {
|
||||
|
@ -328,12 +328,12 @@ class SimulatedClockWithFrames : public SimulatedClock {
|
|||
|
||||
// Use a SimulatedClockWithFrames
|
||||
// Wait call will do either of these:
|
||||
// 1. If |stop_on_frame| is true, the clock will be turned to the exact instant
|
||||
// 1. If `stop_on_frame` is true, the clock will be turned to the exact instant
|
||||
// that the first frame comes and the frame will be inserted into the jitter
|
||||
// buffer, or the clock will be turned to now + |max_time| if no frame comes in
|
||||
// buffer, or the clock will be turned to now + `max_time` if no frame comes in
|
||||
// the window.
|
||||
// 2. If |stop_on_frame| is false, the clock will be turn to now + |max_time|,
|
||||
// and all the frames arriving between now and now + |max_time| will be
|
||||
// 2. If `stop_on_frame` is false, the clock will be turn to now + `max_time`,
|
||||
// and all the frames arriving between now and now + `max_time` will be
|
||||
// inserted into the jitter buffer.
|
||||
//
|
||||
// This is used to simulate the JitterBuffer getting packets from internet as
|
||||
|
@ -382,9 +382,9 @@ class VCMReceiverTimingTest : public ::testing::Test {
|
|||
};
|
||||
|
||||
// Test whether VCMReceiver::FrameForDecoding handles parameter
|
||||
// |max_wait_time_ms| correctly:
|
||||
// 1. The function execution should never take more than |max_wait_time_ms|.
|
||||
// 2. If the function exit before now + |max_wait_time_ms|, a frame must be
|
||||
// `max_wait_time_ms` correctly:
|
||||
// 1. The function execution should never take more than `max_wait_time_ms`.
|
||||
// 2. If the function exit before now + `max_wait_time_ms`, a frame must be
|
||||
// returned.
|
||||
TEST_F(VCMReceiverTimingTest, FrameForDecoding) {
|
||||
const size_t kNumFrames = 100;
|
||||
|
@ -436,9 +436,9 @@ TEST_F(VCMReceiverTimingTest, FrameForDecoding) {
|
|||
}
|
||||
|
||||
// Test whether VCMReceiver::FrameForDecoding handles parameter
|
||||
// |prefer_late_decoding| and |max_wait_time_ms| correctly:
|
||||
// 1. The function execution should never take more than |max_wait_time_ms|.
|
||||
// 2. If the function exit before now + |max_wait_time_ms|, a frame must be
|
||||
// `prefer_late_decoding` and `max_wait_time_ms` correctly:
|
||||
// 1. The function execution should never take more than `max_wait_time_ms`.
|
||||
// 2. If the function exit before now + `max_wait_time_ms`, a frame must be
|
||||
// returned and the end time must be equal to the render timestamp - delay
|
||||
// for decoding and rendering.
|
||||
TEST_F(VCMReceiverTimingTest, FrameForDecodingPreferLateDecoding) {
|
||||
|
|
|
@ -31,7 +31,7 @@ class RtpFrameReferenceFinder {
|
|||
// The RtpFrameReferenceFinder will hold onto the frame until:
|
||||
// - the required information to determine its references has been received,
|
||||
// in which case it (and possibly other) frames are returned, or
|
||||
// - There are too many stashed frames (determined by |kMaxStashedFrames|),
|
||||
// - There are too many stashed frames (determined by `kMaxStashedFrames`),
|
||||
// in which case it gets dropped, or
|
||||
// - It gets cleared by ClearTo, in which case its dropped.
|
||||
// - The frame is old, in which case it also gets dropped.
|
||||
|
@ -41,7 +41,7 @@ class RtpFrameReferenceFinder {
|
|||
// might need to calculate the references of a frame.
|
||||
ReturnVector PaddingReceived(uint16_t seq_num);
|
||||
|
||||
// Clear all stashed frames that include packets older than |seq_num|.
|
||||
// Clear all stashed frames that include packets older than `seq_num`.
|
||||
void ClearTo(uint16_t seq_num);
|
||||
|
||||
private:
|
||||
|
@ -49,7 +49,7 @@ class RtpFrameReferenceFinder {
|
|||
|
||||
// How far frames have been cleared out of the buffer by RTP sequence number.
|
||||
// A frame will be cleared if it contains a packet with a sequence number
|
||||
// older than |cleared_to_seq_num_|.
|
||||
// older than `cleared_to_seq_num_`.
|
||||
int cleared_to_seq_num_ = -1;
|
||||
const int64_t picture_id_offset_;
|
||||
std::unique_ptr<internal::RtpFrameReferenceFinderImpl> impl_;
|
||||
|
|
|
@ -106,9 +106,9 @@ class TestRtpFrameReferenceFinder : public ::testing::Test {
|
|||
OnCompleteFrames(reference_finder_->PaddingReceived(seq_num));
|
||||
}
|
||||
|
||||
// Check if a frame with picture id |pid| and spatial index |sidx| has been
|
||||
// Check if a frame with picture id `pid` and spatial index `sidx` has been
|
||||
// delivered from the packet buffer, and if so, if it has the references
|
||||
// specified by |refs|.
|
||||
// specified by `refs`.
|
||||
template <typename... T>
|
||||
void CheckReferences(int64_t picture_id_offset,
|
||||
uint16_t sidx,
|
||||
|
|
|
@ -65,7 +65,7 @@ RtpVp8RefFinder::FrameDecision RtpVp8RefFinder::ManageFrameInternal(
|
|||
last_picture_id_ = old_picture_id;
|
||||
}
|
||||
// Find if there has been a gap in fully received frames and save the picture
|
||||
// id of those frames in |not_yet_received_frames_|.
|
||||
// id of those frames in `not_yet_received_frames_`.
|
||||
if (AheadOf<uint16_t, kFrameIdLength>(frame->Id(), last_picture_id_)) {
|
||||
do {
|
||||
last_picture_id_ = Add<kFrameIdLength>(last_picture_id_, 1);
|
||||
|
|
|
@ -80,7 +80,7 @@ RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal(
|
|||
|
||||
// The VP9 `tl0_pic_idx` is 8 bits and therefor wraps often. In the case of
|
||||
// packet loss the next received frame could have a `tl0_pic_idx` that looks
|
||||
// older than the previously received frame. Always wrap forward if |frame| is
|
||||
// older than the previously received frame. Always wrap forward if `frame` is
|
||||
// newer in RTP packet sequence number order.
|
||||
int64_t unwrapped_tl0;
|
||||
auto tl0_it = gof_info_.rbegin();
|
||||
|
@ -233,7 +233,7 @@ bool RtpVp9RefFinder::MissingRequiredFrameVp9(uint16_t picture_id,
|
|||
}
|
||||
|
||||
// For every reference this frame has, check if there is a frame missing in
|
||||
// the interval (|ref_pid|, |picture_id|) in any of the lower temporal
|
||||
// the interval (`ref_pid`, `picture_id`) in any of the lower temporal
|
||||
// layers. If so, we are missing a required frame.
|
||||
uint8_t num_references = info.gof->num_ref_pics[gof_idx];
|
||||
for (size_t i = 0; i < num_references; ++i) {
|
||||
|
|
|
@ -72,7 +72,7 @@ class RtpVp9RefFinder {
|
|||
std::deque<std::unique_ptr<RtpFrameObject>> stashed_frames_;
|
||||
|
||||
// Where the current scalability structure is in the
|
||||
// |scalability_structures_| array.
|
||||
// `scalability_structures_` array.
|
||||
uint8_t current_ss_idx_ = 0;
|
||||
|
||||
// Holds received scalability structures.
|
||||
|
|
|
@ -292,7 +292,7 @@ bool VCMSessionInfo::complete() const {
|
|||
return complete_;
|
||||
}
|
||||
|
||||
// Find the end of the NAL unit which the packet pointed to by |packet_it|
|
||||
// Find the end of the NAL unit which the packet pointed to by `packet_it`
|
||||
// belongs to. Returns an iterator to the last packet of the frame if the end
|
||||
// of the NAL unit wasn't found.
|
||||
VCMSessionInfo::PacketIterator VCMSessionInfo::FindNaluEnd(
|
||||
|
@ -491,7 +491,7 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
|
|||
}
|
||||
}
|
||||
|
||||
// The insert operation invalidates the iterator |rit|.
|
||||
// The insert operation invalidates the iterator `rit`.
|
||||
PacketIterator packet_list_it = packets_.insert(rit.base(), packet);
|
||||
|
||||
size_t returnLength = InsertBuffer(frame_buffer, packet_list_it);
|
||||
|
|
|
@ -80,13 +80,13 @@ class VCMSessionInfo {
|
|||
|
||||
// Finds the packet of the beginning of the next VP8 partition. If
|
||||
// none is found the returned iterator points to |packets_.end()|.
|
||||
// |it| is expected to point to the last packet of the previous partition,
|
||||
// or to the first packet of the frame. |packets_skipped| is incremented
|
||||
// `it` is expected to point to the last packet of the previous partition,
|
||||
// or to the first packet of the frame. `packets_skipped` is incremented
|
||||
// for each packet found which doesn't have the beginning bit set.
|
||||
PacketIterator FindNextPartitionBeginning(PacketIterator it) const;
|
||||
|
||||
// Returns an iterator pointing to the last packet of the partition pointed to
|
||||
// by |it|.
|
||||
// by `it`.
|
||||
PacketIterator FindPartitionEnd(PacketIterator it) const;
|
||||
static bool InSequence(const PacketIterator& it,
|
||||
const PacketIterator& prev_it);
|
||||
|
@ -97,7 +97,7 @@ class VCMSessionInfo {
|
|||
uint8_t* frame_buffer);
|
||||
void ShiftSubsequentPackets(PacketIterator it, int steps_to_shift);
|
||||
PacketIterator FindNaluEnd(PacketIterator packet_iter) const;
|
||||
// Deletes the data of all packets between |start| and |end|, inclusively.
|
||||
// Deletes the data of all packets between `start` and `end`, inclusively.
|
||||
// Note that this function doesn't delete the actual packets.
|
||||
size_t DeletePacketData(PacketIterator start, PacketIterator end);
|
||||
void UpdateCompleteSession();
|
||||
|
|
|
@ -117,8 +117,8 @@ static std::vector<DataRate> SplitBitrate(size_t num_layers,
|
|||
return bitrates;
|
||||
}
|
||||
|
||||
// Returns the minimum bitrate needed for |num_active_layers| spatial layers to
|
||||
// become active using the configuration specified by |codec|.
|
||||
// Returns the minimum bitrate needed for `num_active_layers` spatial layers to
|
||||
// become active using the configuration specified by `codec`.
|
||||
DataRate FindLayerTogglingThreshold(const VideoCodec& codec,
|
||||
size_t first_active_layer,
|
||||
size_t num_active_layers) {
|
||||
|
@ -142,7 +142,7 @@ DataRate FindLayerTogglingThreshold(const VideoCodec& codec,
|
|||
.minBitrate);
|
||||
|
||||
// Do a binary search until upper and lower bound is the highest bitrate for
|
||||
// |num_active_layers| - 1 layers and lowest bitrate for |num_active_layers|
|
||||
// `num_active_layers` - 1 layers and lowest bitrate for `num_active_layers`
|
||||
// layers respectively.
|
||||
while (upper_bound - lower_bound > DataRate::BitsPerSec(1)) {
|
||||
DataRate try_rate = (lower_bound + upper_bound) / 2;
|
||||
|
|
|
@ -31,8 +31,8 @@ class StreamGenerator {
|
|||
StreamGenerator(uint16_t start_seq_num, int64_t current_time);
|
||||
void Init(uint16_t start_seq_num, int64_t current_time);
|
||||
|
||||
// |time_ms| denotes the timestamp you want to put on the frame, and the unit
|
||||
// is millisecond. GenerateFrame will translate |time_ms| into a 90kHz
|
||||
// `time_ms` denotes the timestamp you want to put on the frame, and the unit
|
||||
// is millisecond. GenerateFrame will translate `time_ms` into a 90kHz
|
||||
// timestamp and put it on the frame.
|
||||
void GenerateFrame(VideoFrameType type,
|
||||
int num_media_packets,
|
||||
|
|
|
@ -62,7 +62,7 @@ bool VCMTimestampMap::IsEmpty() const {
|
|||
}
|
||||
|
||||
size_t VCMTimestampMap::Size() const {
|
||||
// The maximum number of elements in the list is |capacity_| - 1. The list is
|
||||
// The maximum number of elements in the list is `capacity_` - 1. The list is
|
||||
// empty if the add and pop indices are equal.
|
||||
return next_add_idx_ >= next_pop_idx_
|
||||
? next_add_idx_ - next_pop_idx_
|
||||
|
|
|
@ -195,8 +195,8 @@ int64_t VCMTiming::RenderTimeMsInternal(uint32_t frame_timestamp,
|
|||
estimated_complete_time_ms = now_ms;
|
||||
}
|
||||
|
||||
// Make sure the actual delay stays in the range of |min_playout_delay_ms_|
|
||||
// and |max_playout_delay_ms_|.
|
||||
// Make sure the actual delay stays in the range of `min_playout_delay_ms_`
|
||||
// and `max_playout_delay_ms_`.
|
||||
int actual_delay = std::max(current_delay_ms_, min_playout_delay_ms_);
|
||||
actual_delay = std::min(actual_delay, max_playout_delay_ms_);
|
||||
return estimated_complete_time_ms + actual_delay;
|
||||
|
@ -213,10 +213,10 @@ int64_t VCMTiming::MaxWaitingTime(int64_t render_time_ms,
|
|||
MutexLock lock(&mutex_);
|
||||
|
||||
if (render_time_ms == 0 && zero_playout_delay_min_pacing_->us() > 0) {
|
||||
// |render_time_ms| == 0 indicates that the frame should be decoded and
|
||||
// `render_time_ms` == 0 indicates that the frame should be decoded and
|
||||
// rendered as soon as possible. However, the decoder can be choked if too
|
||||
// many frames are sent at ones. Therefore, limit the interframe delay to
|
||||
// |zero_playout_delay_min_pacing_|.
|
||||
// `zero_playout_delay_min_pacing_`.
|
||||
int64_t earliest_next_decode_start_time =
|
||||
last_decode_scheduled_ts_ + zero_playout_delay_min_pacing_->ms();
|
||||
int64_t max_wait_time_ms = now_ms >= earliest_next_decode_start_time
|
||||
|
|
|
@ -77,8 +77,8 @@ class VCMTiming {
|
|||
void IncomingTimestamp(uint32_t time_stamp, int64_t last_packet_time_ms);
|
||||
|
||||
// Returns the receiver system time when the frame with timestamp
|
||||
// |frame_timestamp| should be rendered, assuming that the system time
|
||||
// currently is |now_ms|.
|
||||
// `frame_timestamp` should be rendered, assuming that the system time
|
||||
// currently is `now_ms`.
|
||||
virtual int64_t RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms) const;
|
||||
|
||||
// Returns the maximum time in ms that we can wait for a frame to become
|
||||
|
@ -126,8 +126,8 @@ class VCMTiming {
|
|||
RTC_PT_GUARDED_BY(mutex_);
|
||||
int render_delay_ms_ RTC_GUARDED_BY(mutex_);
|
||||
// Best-effort playout delay range for frames from capture to render.
|
||||
// The receiver tries to keep the delay between |min_playout_delay_ms_|
|
||||
// and |max_playout_delay_ms_| taking the network jitter into account.
|
||||
// The receiver tries to keep the delay between `min_playout_delay_ms_`
|
||||
// and `max_playout_delay_ms_` taking the network jitter into account.
|
||||
// A special case is where min_playout_delay_ms_ = max_playout_delay_ms_ = 0,
|
||||
// in which case the receiver tries to play the frames as they arrive.
|
||||
int min_playout_delay_ms_ RTC_GUARDED_BY(mutex_);
|
||||
|
|
|
@ -26,7 +26,7 @@ class UniqueTimestampCounter {
|
|||
~UniqueTimestampCounter() = default;
|
||||
|
||||
void Add(uint32_t timestamp);
|
||||
// Returns number of different |timestamp| passed to the UniqueCounter.
|
||||
// Returns number of different `timestamp` passed to the UniqueCounter.
|
||||
int GetUniqueSeen() const { return unique_seen_; }
|
||||
|
||||
private:
|
||||
|
|
|
@ -33,7 +33,7 @@ const float kDefaultTargetBitrateKbps = 300.0f;
|
|||
const float kDefaultIncomingFrameRate = 30;
|
||||
const float kLeakyBucketSizeSeconds = 0.5f;
|
||||
|
||||
// A delta frame that is bigger than |kLargeDeltaFactor| times the average
|
||||
// A delta frame that is bigger than `kLargeDeltaFactor` times the average
|
||||
// delta frame is a large frame that is spread out for accumulation.
|
||||
const int kLargeDeltaFactor = 3;
|
||||
|
||||
|
|
|
@ -67,13 +67,13 @@ class FrameDropper {
|
|||
// drops on the following packets that may be much smaller. Instead these
|
||||
// large frames are accumulated over several frames when the bucket leaks.
|
||||
|
||||
// |large_frame_accumulation_spread_| represents the number of frames over
|
||||
// `large_frame_accumulation_spread_` represents the number of frames over
|
||||
// which a large frame is accumulated.
|
||||
float large_frame_accumulation_spread_;
|
||||
// |large_frame_accumulation_count_| represents the number of frames left
|
||||
// `large_frame_accumulation_count_` represents the number of frames left
|
||||
// to finish accumulating a large frame.
|
||||
int large_frame_accumulation_count_;
|
||||
// |large_frame_accumulation_chunk_size_| represents the size of a single
|
||||
// `large_frame_accumulation_chunk_size_` represents the size of a single
|
||||
// chunk for large frame accumulation.
|
||||
float large_frame_accumulation_chunk_size_;
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ class FrameDropperTest : public ::testing::Test {
|
|||
void ValidateNoDropsAtTargetBitrate(int large_frame_size_bytes,
|
||||
int large_frame_rate,
|
||||
bool is_large_frame_delta) {
|
||||
// Smaller frame size is computed to meet |kTargetBitRateKbps|.
|
||||
// Smaller frame size is computed to meet `kTargetBitRateKbps`.
|
||||
int small_frame_size_bytes =
|
||||
kFrameSizeBytes -
|
||||
(large_frame_size_bytes * large_frame_rate) / kIncomingFrameRate;
|
||||
|
|
|
@ -28,8 +28,8 @@ class IvfFileWriter {
|
|||
public:
|
||||
// Takes ownership of the file, which will be closed either through
|
||||
// Close or ~IvfFileWriter. If writing a frame would take the file above the
|
||||
// |byte_limit| the file will be closed, the write (and all future writes)
|
||||
// will fail. A |byte_limit| of 0 is equivalent to no limit.
|
||||
// `byte_limit` the file will be closed, the write (and all future writes)
|
||||
// will fail. A `byte_limit` of 0 is equivalent to no limit.
|
||||
static std::unique_ptr<IvfFileWriter> Wrap(FileWrapper file,
|
||||
size_t byte_limit);
|
||||
~IvfFileWriter();
|
||||
|
|
|
@ -108,7 +108,7 @@ class QualityScaler::CheckQpTask {
|
|||
switch (quality_scaler_->CheckQp()) {
|
||||
case QualityScaler::CheckQpResult::kInsufficientSamples: {
|
||||
result_.observed_enough_frames = false;
|
||||
// After this line, |this| may be deleted.
|
||||
// After this line, `this` may be deleted.
|
||||
break;
|
||||
}
|
||||
case QualityScaler::CheckQpResult::kNormalQp: {
|
||||
|
@ -133,7 +133,7 @@ class QualityScaler::CheckQpTask {
|
|||
}
|
||||
state_ = State::kCompleted;
|
||||
// Starting the next task deletes the pending task. After this line,
|
||||
// |this| has been deleted.
|
||||
// `this` has been deleted.
|
||||
quality_scaler_->StartNextCheckQpTask();
|
||||
}),
|
||||
GetCheckingQpDelayMs());
|
||||
|
|
|
@ -38,7 +38,7 @@ class QualityScalerQpUsageHandlerInterface;
|
|||
// video stream down or up).
|
||||
class QualityScaler {
|
||||
public:
|
||||
// Construct a QualityScaler with given |thresholds| and |handler|.
|
||||
// Construct a QualityScaler with given `thresholds` and `handler`.
|
||||
// This starts the quality scaler periodically checking what the average QP
|
||||
// has been recently.
|
||||
QualityScaler(QualityScalerQpUsageHandlerInterface* handler,
|
||||
|
|
|
@ -169,7 +169,7 @@ void SetPlane(uint8_t* data, uint8_t value, int width, int height, int stride) {
|
|||
}
|
||||
}
|
||||
|
||||
// Fills in an I420Buffer from |plane_colors|.
|
||||
// Fills in an I420Buffer from `plane_colors`.
|
||||
void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer,
|
||||
int plane_colors[kNumOfPlanes]) {
|
||||
SetPlane(buffer->MutableDataY(), plane_colors[0], buffer->width(),
|
||||
|
@ -465,7 +465,7 @@ void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
|
|||
|
||||
void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
|
||||
// We are just below limit of sending second stream, so we should get
|
||||
// the first stream maxed out (at |maxBitrate|), and padding for two.
|
||||
// the first stream maxed out (at `maxBitrate`), and padding for two.
|
||||
SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
|
@ -492,7 +492,7 @@ void SimulcastTestFixtureImpl::TestPaddingOneStream() {
|
|||
|
||||
void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
|
||||
// We are just below limit of sending third stream, so we should get
|
||||
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
|
||||
// first stream's rate maxed out at `targetBitrate`, second at `maxBitrate`.
|
||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
|
@ -649,7 +649,7 @@ void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
|
|||
EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
|
||||
SetRates(settings_.startBitrate, 30);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
|
||||
// Resize |input_frame_| to the new resolution.
|
||||
// Resize `input_frame_` to the new resolution.
|
||||
input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
|
||||
input_buffer_->InitializeData();
|
||||
input_frame_ = std::make_unique<webrtc::VideoFrame>(
|
||||
|
|
|
@ -72,7 +72,7 @@ class BitstreamReader {
|
|||
// Reads a bit from the input stream and returns:
|
||||
// * false if bit cannot be read
|
||||
// * true if bit matches expected_val
|
||||
// * false if bit does not match expected_val - in which case |error_msg| is
|
||||
// * false if bit does not match expected_val - in which case `error_msg` is
|
||||
// logged as warning, if provided.
|
||||
bool VerifyNextBooleanIs(bool expected_val, absl::string_view error_msg) {
|
||||
uint32_t val;
|
||||
|
@ -88,7 +88,7 @@ class BitstreamReader {
|
|||
return true;
|
||||
}
|
||||
|
||||
// Reads |bits| bits from the bitstream and interprets them as an unsigned
|
||||
// Reads `bits` bits from the bitstream and interprets them as an unsigned
|
||||
// integer that gets cast to the type T before returning.
|
||||
// Returns nullopt if all bits cannot be read.
|
||||
// If number of bits matches size of data type, the bits parameter may be
|
||||
|
@ -106,11 +106,11 @@ class BitstreamReader {
|
|||
return (static_cast<T>(val));
|
||||
}
|
||||
|
||||
// Helper method that reads |num_bits| from the bitstream, returns:
|
||||
// Helper method that reads `num_bits` from the bitstream, returns:
|
||||
// * false if bits cannot be read.
|
||||
// * true if |expected_val| matches the read bits
|
||||
// * false if |expected_val| does not match the read bits, and logs
|
||||
// |error_msg| as a warning (if provided).
|
||||
// * true if `expected_val` matches the read bits
|
||||
// * false if `expected_val` does not match the read bits, and logs
|
||||
// `error_msg` as a warning (if provided).
|
||||
bool VerifyNextUnsignedIs(int num_bits,
|
||||
uint32_t expected_val,
|
||||
absl::string_view error_msg) {
|
||||
|
@ -128,7 +128,7 @@ class BitstreamReader {
|
|||
}
|
||||
|
||||
// Basically the same as ReadUnsigned() - but for signed integers.
|
||||
// Here |bits| indicates the size of the value - number of bits read from the
|
||||
// Here `bits` indicates the size of the value - number of bits read from the
|
||||
// bit buffer is one higher (the sign bit). This is made to matche the spec in
|
||||
// which eg s(4) = f(1) sign-bit, plus an f(4).
|
||||
template <typename T>
|
||||
|
@ -148,7 +148,7 @@ class BitstreamReader {
|
|||
return {static_cast<T>(sign_val)};
|
||||
}
|
||||
|
||||
// Reads |bits| from the bitstream, disregarding their value.
|
||||
// Reads `bits` from the bitstream, disregarding their value.
|
||||
// Returns true if full number of bits were read, false otherwise.
|
||||
bool ConsumeBits(int bits) { return buffer_->ConsumeBits(bits); }
|
||||
|
||||
|
|
|
@ -76,8 +76,8 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec(
|
|||
static_cast<unsigned char>(streams.size());
|
||||
video_codec.minBitrate = streams[0].min_bitrate_bps / 1000;
|
||||
bool codec_active = false;
|
||||
// Active configuration might not be fully copied to |streams| for SVC yet.
|
||||
// Therefore the |config| is checked here.
|
||||
// Active configuration might not be fully copied to `streams` for SVC yet.
|
||||
// Therefore the `config` is checked here.
|
||||
for (const VideoStream& stream : config.simulcast_layers) {
|
||||
if (stream.active) {
|
||||
codec_active = true;
|
||||
|
|
|
@ -120,7 +120,7 @@ class VideoReceiver : public Module {
|
|||
size_t max_nack_list_size_;
|
||||
|
||||
// Callbacks are set before the decoder thread starts.
|
||||
// Once the decoder thread has been started, usage of |_codecDataBase| moves
|
||||
// Once the decoder thread has been started, usage of `_codecDataBase` moves
|
||||
// over to the decoder thread.
|
||||
VCMDecoderDataBase _codecDataBase;
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ int32_t VideoReceiver2::RegisterReceiveCallback(
|
|||
|
||||
// Register an externally defined decoder object. This may be called on either
|
||||
// the construction sequence or the decoder sequence to allow for lazy creation
|
||||
// of video decoders. If called on the decoder sequence |externalDecoder| cannot
|
||||
// of video decoders. If called on the decoder sequence `externalDecoder` cannot
|
||||
// be a nullptr. It's the responsibility of the caller to make sure that the
|
||||
// access from the two sequences are mutually exclusive.
|
||||
void VideoReceiver2::RegisterExternalDecoder(VideoDecoder* externalDecoder,
|
||||
|
|
|
@ -43,7 +43,7 @@ class VideoReceiver2 {
|
|||
|
||||
// Notification methods that are used to check our internal state and validate
|
||||
// threading assumptions. These are called by VideoReceiveStream.
|
||||
// See |IsDecoderThreadRunning()| for more details.
|
||||
// See `IsDecoderThreadRunning()` for more details.
|
||||
void DecoderThreadStarting();
|
||||
void DecoderThreadStopped();
|
||||
|
||||
|
@ -62,7 +62,7 @@ class VideoReceiver2 {
|
|||
VCMDecodedFrameCallback decodedFrameCallback_;
|
||||
|
||||
// Callbacks are set before the decoder thread starts.
|
||||
// Once the decoder thread has been started, usage of |_codecDataBase| moves
|
||||
// Once the decoder thread has been started, usage of `_codecDataBase` moves
|
||||
// over to the decoder thread.
|
||||
VCMDecoderDataBase codecDataBase_;
|
||||
|
||||
|
|
Loading…
Reference in a new issue