mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-13 05:40:42 +01:00
Make VideoFrameType an enum class, and move to separate file and target
Bug: webrtc:5876, webrtc:6883 Change-Id: I1435cfa9e8e54c4ba2978261048ff3fbb993ce0e Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/126225 Commit-Queue: Niels Moller <nisse@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Cr-Commit-Position: refs/heads/master@{#27239}
This commit is contained in:
parent
3198fa4956
commit
8f7ce222e7
85 changed files with 685 additions and 589 deletions
|
@ -350,8 +350,8 @@ rtc_source_set("fec_controller_api") {
|
|||
]
|
||||
|
||||
deps = [
|
||||
"..:webrtc_common",
|
||||
"../modules:module_fec_api",
|
||||
"video:video_frame_type",
|
||||
]
|
||||
}
|
||||
|
||||
|
@ -480,6 +480,7 @@ if (rtc_include_tests) {
|
|||
"..:webrtc_common",
|
||||
"../modules/video_coding:video_codec_interface",
|
||||
"../rtc_base:stringutils",
|
||||
"video:video_frame_type",
|
||||
"video_codecs:video_codecs_api",
|
||||
]
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "api/video/video_frame_type.h"
|
||||
#include "modules/include/module_fec_types.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
|
|
@ -24,7 +24,7 @@ std::string VideoCodecTestStats::FrameStatistics::ToString() const {
|
|||
ss << " temporal_idx " << temporal_idx;
|
||||
ss << " inter_layer_predicted " << inter_layer_predicted;
|
||||
ss << " non_ref_for_inter_layer_pred " << non_ref_for_inter_layer_pred;
|
||||
ss << " frame_type " << frame_type;
|
||||
ss << " frame_type " << static_cast<int>(frame_type);
|
||||
ss << " length_bytes " << length_bytes;
|
||||
ss << " qp " << qp;
|
||||
ss << " psnr " << psnr;
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "api/video/video_frame_type.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace test {
|
||||
|
@ -43,7 +43,7 @@ class VideoCodecTestStats {
|
|||
size_t encode_time_us = 0;
|
||||
size_t target_bitrate_kbps = 0;
|
||||
size_t length_bytes = 0;
|
||||
webrtc::VideoFrameType frame_type = kVideoFrameDelta;
|
||||
VideoFrameType frame_type = VideoFrameType::kVideoFrameDelta;
|
||||
|
||||
// Layering.
|
||||
size_t spatial_idx = 0;
|
||||
|
|
|
@ -41,6 +41,13 @@ rtc_source_set("video_frame") {
|
|||
]
|
||||
}
|
||||
|
||||
rtc_source_set("video_frame_type") {
|
||||
visibility = [ "*" ]
|
||||
sources = [
|
||||
"video_frame_type.h",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_source_set("video_frame_i420") {
|
||||
visibility = [ "*" ]
|
||||
sources = [
|
||||
|
@ -83,6 +90,7 @@ rtc_source_set("encoded_image") {
|
|||
deps = [
|
||||
":video_codec_constants",
|
||||
":video_frame",
|
||||
":video_frame_type",
|
||||
"../..:webrtc_common",
|
||||
"../../rtc_base:checks",
|
||||
"../../rtc_base:rtc_base_approved",
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "api/video/video_codec_constants.h"
|
||||
#include "api/video/video_codec_type.h"
|
||||
#include "api/video/video_content_type.h"
|
||||
#include "api/video/video_frame_type.h"
|
||||
#include "api/video/video_rotation.h"
|
||||
#include "api/video/video_timing.h"
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
|
@ -109,7 +110,7 @@ class RTC_EXPORT EncodedImage {
|
|||
// NTP time of the capture time in local timebase in milliseconds.
|
||||
int64_t ntp_time_ms_ = 0;
|
||||
int64_t capture_time_ms_ = 0;
|
||||
VideoFrameType _frameType = kVideoFrameDelta;
|
||||
VideoFrameType _frameType = VideoFrameType::kVideoFrameDelta;
|
||||
VideoRotation rotation_ = kVideoRotation_0;
|
||||
VideoContentType content_type_ = VideoContentType::UNSPECIFIED;
|
||||
bool _completeFrame = false;
|
||||
|
|
26
api/video/video_frame_type.h
Normal file
26
api/video/video_frame_type.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_VIDEO_VIDEO_FRAME_TYPE_H_
|
||||
#define API_VIDEO_VIDEO_FRAME_TYPE_H_
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
enum class VideoFrameType {
|
||||
kEmptyFrame = 0,
|
||||
// Wire format for MultiplexEncodedImagePacker seems to depend on numerical
|
||||
// values of these constants.
|
||||
kVideoFrameKey = 3,
|
||||
kVideoFrameDelta = 4,
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_VIDEO_VIDEO_FRAME_TYPE_H_
|
|
@ -88,7 +88,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, InitializesDecoder) {
|
|||
EXPECT_EQ(1, fake_decoder_->init_decode_count_);
|
||||
|
||||
EncodedImage encoded_image;
|
||||
encoded_image._frameType = kVideoFrameKey;
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
|
||||
fallback_wrapper_->Decode(encoded_image, false, nullptr, -1);
|
||||
EXPECT_EQ(1, fake_decoder_->init_decode_count_)
|
||||
<< "Initialized decoder should not be reinitialized.";
|
||||
|
@ -103,7 +103,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
|
|||
EXPECT_EQ(1, fake_decoder_->init_decode_count_);
|
||||
|
||||
EncodedImage encoded_image;
|
||||
encoded_image._frameType = kVideoFrameKey;
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
|
||||
fallback_wrapper_->Decode(encoded_image, false, nullptr, -1);
|
||||
EXPECT_EQ(1, fake_decoder_->init_decode_count_)
|
||||
<< "Should not have attempted reinitializing the fallback decoder on "
|
||||
|
@ -124,7 +124,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, IsSoftwareFallbackSticky) {
|
|||
EXPECT_EQ(1, fake_decoder_->decode_count_);
|
||||
|
||||
// Software fallback should be sticky, fake_decoder_ shouldn't be used.
|
||||
encoded_image._frameType = kVideoFrameKey;
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
|
||||
fallback_wrapper_->Decode(encoded_image, false, nullptr, -1);
|
||||
EXPECT_EQ(1, fake_decoder_->decode_count_)
|
||||
<< "Decoder shouldn't be used after failure.";
|
||||
|
@ -242,7 +242,7 @@ TEST_F(ForcedSoftwareDecoderFallbackTest, UsesForcedFallback) {
|
|||
EXPECT_EQ(1, sw_fallback_decoder_->init_decode_count_);
|
||||
|
||||
EncodedImage encoded_image;
|
||||
encoded_image._frameType = kVideoFrameKey;
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
|
||||
fallback_wrapper_->Decode(encoded_image, false, nullptr, -1);
|
||||
EXPECT_EQ(1, sw_fallback_decoder_->init_decode_count_);
|
||||
EXPECT_EQ(1, sw_fallback_decoder_->decode_count_);
|
||||
|
|
|
@ -180,7 +180,7 @@ void VideoEncoderSoftwareFallbackWrapperTest::EncodeFrame(int expected_ret) {
|
|||
rtc::scoped_refptr<I420Buffer> buffer =
|
||||
I420Buffer::Create(codec_.width, codec_.height);
|
||||
I420Buffer::SetBlack(buffer);
|
||||
std::vector<VideoFrameType> types(1, kVideoFrameKey);
|
||||
std::vector<VideoFrameType> types(1, VideoFrameType::kVideoFrameKey);
|
||||
|
||||
frame_ =
|
||||
absl::make_unique<VideoFrame>(VideoFrame::Builder()
|
||||
|
@ -292,7 +292,7 @@ TEST_F(VideoEncoderSoftwareFallbackWrapperTest,
|
|||
EXPECT_EQ(&callback2, fake_encoder_->encode_complete_callback_);
|
||||
|
||||
// Encoding a frame using the fallback should arrive at the new callback.
|
||||
std::vector<VideoFrameType> types(1, kVideoFrameKey);
|
||||
std::vector<VideoFrameType> types(1, VideoFrameType::kVideoFrameKey);
|
||||
frame_->set_timestamp(frame_->timestamp() + 1000);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, fallback_wrapper_->Encode(*frame_, &types));
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ RTPVideoHeader RtpPayloadParams::GetRtpVideoHeader(
|
|||
: absl::nullopt;
|
||||
SetVideoTiming(image, &rtp_video_header.video_timing);
|
||||
|
||||
const bool is_keyframe = image._frameType == kVideoFrameKey;
|
||||
const bool is_keyframe = image._frameType == VideoFrameType::kVideoFrameKey;
|
||||
const bool first_frame_in_picture =
|
||||
(codec_specific_info && codec_specific_info->codecType == kVideoCodecVP9)
|
||||
? codec_specific_info->codecSpecific.VP9.first_frame_in_picture
|
||||
|
|
|
@ -386,16 +386,16 @@ class RtpPayloadParamsVp8ToGenericTest : public ::testing::Test {
|
|||
};
|
||||
|
||||
TEST_F(RtpPayloadParamsVp8ToGenericTest, Keyframe) {
|
||||
ConvertAndCheck(0, 0, kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||
ConvertAndCheck(0, 1, kVideoFrameDelta, kNoSync, {0});
|
||||
ConvertAndCheck(0, 2, kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||
ConvertAndCheck(0, 0, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||
ConvertAndCheck(0, 1, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
|
||||
ConvertAndCheck(0, 2, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||
}
|
||||
|
||||
TEST_F(RtpPayloadParamsVp8ToGenericTest, TooHighTemporalIndex) {
|
||||
ConvertAndCheck(0, 0, kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||
ConvertAndCheck(0, 0, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||
|
||||
EncodedImage encoded_image;
|
||||
encoded_image._frameType = kVideoFrameDelta;
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
|
||||
CodecSpecificInfo codec_info;
|
||||
codec_info.codecType = kVideoCodecVP8;
|
||||
codec_info.codecSpecific.VP8.temporalIdx =
|
||||
|
@ -409,27 +409,28 @@ TEST_F(RtpPayloadParamsVp8ToGenericTest, TooHighTemporalIndex) {
|
|||
|
||||
TEST_F(RtpPayloadParamsVp8ToGenericTest, LayerSync) {
|
||||
// 02120212 pattern
|
||||
ConvertAndCheck(0, 0, kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||
ConvertAndCheck(2, 1, kVideoFrameDelta, kNoSync, {0});
|
||||
ConvertAndCheck(1, 2, kVideoFrameDelta, kNoSync, {0});
|
||||
ConvertAndCheck(2, 3, kVideoFrameDelta, kNoSync, {0, 1, 2});
|
||||
ConvertAndCheck(0, 0, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||
ConvertAndCheck(2, 1, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
|
||||
ConvertAndCheck(1, 2, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
|
||||
ConvertAndCheck(2, 3, VideoFrameType::kVideoFrameDelta, kNoSync, {0, 1, 2});
|
||||
|
||||
ConvertAndCheck(0, 4, kVideoFrameDelta, kNoSync, {0});
|
||||
ConvertAndCheck(2, 5, kVideoFrameDelta, kNoSync, {2, 3, 4});
|
||||
ConvertAndCheck(1, 6, kVideoFrameDelta, kSync, {4}); // layer sync
|
||||
ConvertAndCheck(2, 7, kVideoFrameDelta, kNoSync, {4, 5, 6});
|
||||
ConvertAndCheck(0, 4, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
|
||||
ConvertAndCheck(2, 5, VideoFrameType::kVideoFrameDelta, kNoSync, {2, 3, 4});
|
||||
ConvertAndCheck(1, 6, VideoFrameType::kVideoFrameDelta, kSync,
|
||||
{4}); // layer sync
|
||||
ConvertAndCheck(2, 7, VideoFrameType::kVideoFrameDelta, kNoSync, {4, 5, 6});
|
||||
}
|
||||
|
||||
TEST_F(RtpPayloadParamsVp8ToGenericTest, FrameIdGaps) {
|
||||
// 0101 pattern
|
||||
ConvertAndCheck(0, 0, kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||
ConvertAndCheck(1, 1, kVideoFrameDelta, kNoSync, {0});
|
||||
ConvertAndCheck(0, 0, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||
ConvertAndCheck(1, 1, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
|
||||
|
||||
ConvertAndCheck(0, 5, kVideoFrameDelta, kNoSync, {0});
|
||||
ConvertAndCheck(1, 10, kVideoFrameDelta, kNoSync, {1, 5});
|
||||
ConvertAndCheck(0, 5, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
|
||||
ConvertAndCheck(1, 10, VideoFrameType::kVideoFrameDelta, kNoSync, {1, 5});
|
||||
|
||||
ConvertAndCheck(0, 15, kVideoFrameDelta, kNoSync, {5});
|
||||
ConvertAndCheck(1, 20, kVideoFrameDelta, kNoSync, {10, 15});
|
||||
ConvertAndCheck(0, 15, VideoFrameType::kVideoFrameDelta, kNoSync, {5});
|
||||
ConvertAndCheck(1, 20, VideoFrameType::kVideoFrameDelta, kNoSync, {10, 15});
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
|
@ -414,7 +414,7 @@ EncodedImageCallback::Result RtpVideoSender::OnEncodedImage(
|
|||
if (!rtp_streams_[stream_index].rtp_rtcp->OnSendingRtpFrame(
|
||||
encoded_image.Timestamp(), encoded_image.capture_time_ms_,
|
||||
rtp_config_.payload_type,
|
||||
encoded_image._frameType == kVideoFrameKey)) {
|
||||
encoded_image._frameType == VideoFrameType::kVideoFrameKey)) {
|
||||
// The payload router could be active but this module isn't sending.
|
||||
return Result(Result::ERROR_SEND_FAILED);
|
||||
}
|
||||
|
@ -428,12 +428,12 @@ EncodedImageCallback::Result RtpVideoSender::OnEncodedImage(
|
|||
expected_retransmission_time_ms);
|
||||
if (frame_count_observer_) {
|
||||
FrameCounts& counts = frame_counts_[stream_index];
|
||||
if (encoded_image._frameType == kVideoFrameKey) {
|
||||
if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
|
||||
++counts.key_frames;
|
||||
} else if (encoded_image._frameType == kVideoFrameDelta) {
|
||||
} else if (encoded_image._frameType == VideoFrameType::kVideoFrameDelta) {
|
||||
++counts.delta_frames;
|
||||
} else {
|
||||
RTC_DCHECK_EQ(encoded_image._frameType, kEmptyFrame);
|
||||
RTC_DCHECK(encoded_image._frameType == VideoFrameType::kEmptyFrame);
|
||||
}
|
||||
frame_count_observer_->FrameCountUpdated(counts,
|
||||
rtp_config_.ssrcs[stream_index]);
|
||||
|
|
|
@ -140,7 +140,7 @@ TEST(RtpVideoSenderTest, SendOnOneModule) {
|
|||
EncodedImage encoded_image;
|
||||
encoded_image.SetTimestamp(1);
|
||||
encoded_image.capture_time_ms_ = 2;
|
||||
encoded_image._frameType = kVideoFrameKey;
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
|
||||
encoded_image.Allocate(1);
|
||||
encoded_image.data()[0] = kPayload;
|
||||
encoded_image.set_size(1);
|
||||
|
@ -171,7 +171,7 @@ TEST(RtpVideoSenderTest, SendSimulcastSetActive) {
|
|||
EncodedImage encoded_image_1;
|
||||
encoded_image_1.SetTimestamp(1);
|
||||
encoded_image_1.capture_time_ms_ = 2;
|
||||
encoded_image_1._frameType = kVideoFrameKey;
|
||||
encoded_image_1._frameType = VideoFrameType::kVideoFrameKey;
|
||||
encoded_image_1.Allocate(1);
|
||||
encoded_image_1.data()[0] = kPayload;
|
||||
encoded_image_1.set_size(1);
|
||||
|
@ -215,7 +215,7 @@ TEST(RtpVideoSenderTest, SendSimulcastSetActiveModules) {
|
|||
EncodedImage encoded_image_1;
|
||||
encoded_image_1.SetTimestamp(1);
|
||||
encoded_image_1.capture_time_ms_ = 2;
|
||||
encoded_image_1._frameType = kVideoFrameKey;
|
||||
encoded_image_1._frameType = VideoFrameType::kVideoFrameKey;
|
||||
encoded_image_1.Allocate(1);
|
||||
encoded_image_1.data()[0] = kPayload;
|
||||
encoded_image_1.set_size(1);
|
||||
|
@ -304,12 +304,12 @@ TEST(RtpVideoSenderTest, FrameCountCallbacks) {
|
|||
EncodedImage encoded_image;
|
||||
encoded_image.SetTimestamp(1);
|
||||
encoded_image.capture_time_ms_ = 2;
|
||||
encoded_image._frameType = kVideoFrameKey;
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
|
||||
encoded_image.Allocate(1);
|
||||
encoded_image.data()[0] = kPayload;
|
||||
encoded_image.set_size(1);
|
||||
|
||||
encoded_image._frameType = kVideoFrameKey;
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
|
||||
|
||||
// No callbacks when not active.
|
||||
EXPECT_CALL(callback, FrameCountUpdated).Times(0);
|
||||
|
@ -332,7 +332,7 @@ TEST(RtpVideoSenderTest, FrameCountCallbacks) {
|
|||
|
||||
testing::Mock::VerifyAndClearExpectations(&callback);
|
||||
|
||||
encoded_image._frameType = kVideoFrameDelta;
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
|
||||
EXPECT_CALL(callback, FrameCountUpdated(_, kSsrc1))
|
||||
.WillOnce(SaveArg<0>(&frame_counts));
|
||||
EXPECT_EQ(
|
||||
|
|
|
@ -28,21 +28,6 @@
|
|||
|
||||
namespace webrtc {
|
||||
|
||||
// TODO(bugs.webrtc.org/6883): This type should be split into separate types for
|
||||
// audio and video, and then moved out of this file.
|
||||
enum FrameTypeDeprecated {
|
||||
kEmptyFrame = 0,
|
||||
kAudioFrameSpeech = 1,
|
||||
kAudioFrameCN = 2,
|
||||
kVideoFrameKey = 3,
|
||||
kVideoFrameDelta = 4,
|
||||
};
|
||||
|
||||
// Can't use RTC_DEPRECATED until Chromium is updated.
|
||||
typedef FrameTypeDeprecated FrameType;
|
||||
|
||||
using VideoFrameType = FrameTypeDeprecated;
|
||||
|
||||
// Statistics for RTCP packet types.
|
||||
struct RtcpPacketTypeCounter {
|
||||
RtcpPacketTypeCounter()
|
||||
|
|
|
@ -353,7 +353,7 @@ int SimulcastEncoderAdapter::Encode(
|
|||
bool send_key_frame = false;
|
||||
if (frame_types) {
|
||||
for (size_t i = 0; i < frame_types->size(); ++i) {
|
||||
if (frame_types->at(i) == kVideoFrameKey) {
|
||||
if (frame_types->at(i) == VideoFrameType::kVideoFrameKey) {
|
||||
send_key_frame = true;
|
||||
break;
|
||||
}
|
||||
|
@ -377,10 +377,10 @@ int SimulcastEncoderAdapter::Encode(
|
|||
|
||||
std::vector<VideoFrameType> stream_frame_types;
|
||||
if (send_key_frame) {
|
||||
stream_frame_types.push_back(kVideoFrameKey);
|
||||
stream_frame_types.push_back(VideoFrameType::kVideoFrameKey);
|
||||
streaminfos_[stream_idx].key_frame_request = false;
|
||||
} else {
|
||||
stream_frame_types.push_back(kVideoFrameDelta);
|
||||
stream_frame_types.push_back(VideoFrameType::kVideoFrameDelta);
|
||||
}
|
||||
|
||||
int dst_width = streaminfos_[stream_idx].width;
|
||||
|
|
|
@ -572,7 +572,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
|
|||
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
EXPECT_CALL(*original_encoders[2], Encode(_, _))
|
||||
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
frame_types.resize(3, kVideoFrameKey);
|
||||
frame_types.resize(3, VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
|
||||
EXPECT_CALL(*original_encoders[0], Release())
|
||||
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
|
@ -597,7 +597,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
|
|||
ASSERT_EQ(original_encoders[1], new_encoders[1]);
|
||||
EXPECT_CALL(*original_encoders[1], Encode(_, _))
|
||||
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
frame_types.resize(2, kVideoFrameKey);
|
||||
frame_types.resize(2, VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
|
||||
EXPECT_CALL(*original_encoders[0], Release())
|
||||
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
|
@ -617,7 +617,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
|
|||
ASSERT_EQ(original_encoders[0], new_encoders[0]);
|
||||
EXPECT_CALL(*original_encoders[0], Encode(_, _))
|
||||
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
frame_types.resize(1, kVideoFrameKey);
|
||||
frame_types.resize(1, VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
|
||||
EXPECT_CALL(*original_encoders[0], Release())
|
||||
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
|
@ -641,7 +641,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
|
|||
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
EXPECT_CALL(*new_encoders[2], Encode(_, _))
|
||||
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
frame_types.resize(3, kVideoFrameKey);
|
||||
frame_types.resize(3, VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
|
||||
EXPECT_CALL(*original_encoders[0], Release())
|
||||
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
|
@ -889,7 +889,7 @@ TEST_F(TestSimulcastEncoderAdapterFake,
|
|||
// frame and can't otherwise be modified/resized.
|
||||
for (MockVideoEncoder* encoder : helper_->factory()->encoders())
|
||||
EXPECT_CALL(*encoder, Encode(::testing::Ref(input_frame), _)).Times(1);
|
||||
std::vector<VideoFrameType> frame_types(3, kVideoFrameKey);
|
||||
std::vector<VideoFrameType> frame_types(3, VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
|
||||
}
|
||||
|
||||
|
@ -915,7 +915,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, TestFailureReturnCodesFromEncodeCalls) {
|
|||
.set_timestamp_us(0)
|
||||
.set_rotation(kVideoRotation_0)
|
||||
.build();
|
||||
std::vector<VideoFrameType> frame_types(3, kVideoFrameKey);
|
||||
std::vector<VideoFrameType> frame_types(3, VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE,
|
||||
adapter_->Encode(input_frame, &frame_types));
|
||||
}
|
||||
|
@ -1031,7 +1031,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ActivatesCorrectStreamsInInitEncode) {
|
|||
EXPECT_CALL(*original_encoders[2], Encode(_, _)).Times(0);
|
||||
|
||||
std::vector<VideoFrameType> frame_types;
|
||||
frame_types.resize(3, kVideoFrameKey);
|
||||
frame_types.resize(3, VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
|
||||
}
|
||||
|
||||
|
|
|
@ -52,6 +52,7 @@ rtc_source_set("module_api") {
|
|||
":module_fec_api",
|
||||
"..:webrtc_common",
|
||||
"../api:rtp_headers",
|
||||
"../api/video:video_frame_type",
|
||||
"../modules/rtp_rtcp:rtp_video_header",
|
||||
"../rtc_base:safe_conversions",
|
||||
"../rtc_base/system:rtc_export",
|
||||
|
|
|
@ -226,8 +226,8 @@ void TestOpusDtx::Perform() {
|
|||
out_filename, false, expects);
|
||||
|
||||
EXPECT_EQ(0, acm_send_->EnableOpusDtx());
|
||||
expects[kEmptyFrame] = 1;
|
||||
expects[kAudioFrameCN] = 1;
|
||||
expects[static_cast<int>(AudioFrameType::kEmptyFrame)] = 1;
|
||||
expects[static_cast<int>(AudioFrameType::kAudioFrameCN)] = 1;
|
||||
Run(webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"), 32000, 1,
|
||||
out_filename, true, expects);
|
||||
|
||||
|
@ -235,15 +235,15 @@ void TestOpusDtx::Perform() {
|
|||
out_filename = webrtc::test::OutputPath() + "testOpusDtx_outFile_stereo.pcm";
|
||||
RegisterCodec({"opus", 48000, 2, {{"stereo", "1"}}}, absl::nullopt);
|
||||
EXPECT_EQ(0, acm_send_->DisableOpusDtx());
|
||||
expects[kEmptyFrame] = 0;
|
||||
expects[kAudioFrameCN] = 0;
|
||||
expects[static_cast<int>(AudioFrameType::kEmptyFrame)] = 0;
|
||||
expects[static_cast<int>(AudioFrameType::kAudioFrameCN)] = 0;
|
||||
Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"), 32000,
|
||||
2, out_filename, false, expects);
|
||||
|
||||
EXPECT_EQ(0, acm_send_->EnableOpusDtx());
|
||||
|
||||
expects[kEmptyFrame] = 1;
|
||||
expects[kAudioFrameCN] = 1;
|
||||
expects[static_cast<int>(AudioFrameType::kEmptyFrame)] = 1;
|
||||
expects[static_cast<int>(AudioFrameType::kAudioFrameCN)] = 1;
|
||||
Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"), 32000,
|
||||
2, out_filename, true, expects);
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include <vector>
|
||||
|
||||
#include "api/rtp_headers.h"
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "api/video/video_frame_type.h"
|
||||
#include "modules/include/module_common_types_public.h"
|
||||
#include "modules/include/module_fec_types.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_video_header.h"
|
||||
|
|
|
@ -212,8 +212,9 @@ class RtpRtcpRtxNackTest : public ::testing::Test {
|
|||
EXPECT_TRUE(rtp_rtcp_module_->OnSendingRtpFrame(timestamp, timestamp / 90,
|
||||
kPayloadType, false));
|
||||
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
||||
webrtc::kVideoFrameDelta, kPayloadType, timestamp, timestamp / 90,
|
||||
payload_data, payload_data_length, nullptr, &video_header, 0));
|
||||
VideoFrameType::kVideoFrameDelta, kPayloadType, timestamp,
|
||||
timestamp / 90, payload_data, payload_data_length, nullptr,
|
||||
&video_header, 0));
|
||||
// Min required delay until retransmit = 5 + RTT ms (RTT = 0).
|
||||
fake_clock.AdvanceTimeMilliseconds(5);
|
||||
int length = BuildNackList(nack_list);
|
||||
|
@ -263,8 +264,9 @@ TEST_F(RtpRtcpRtxNackTest, LongNackList) {
|
|||
EXPECT_TRUE(rtp_rtcp_module_->OnSendingRtpFrame(timestamp, timestamp / 90,
|
||||
kPayloadType, false));
|
||||
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
||||
webrtc::kVideoFrameDelta, kPayloadType, timestamp, timestamp / 90,
|
||||
payload_data, payload_data_length, nullptr, &video_header, 0));
|
||||
VideoFrameType::kVideoFrameDelta, kPayloadType, timestamp,
|
||||
timestamp / 90, payload_data, payload_data_length, nullptr,
|
||||
&video_header, 0));
|
||||
// Prepare next frame.
|
||||
timestamp += 3000;
|
||||
fake_clock.AdvanceTimeMilliseconds(33);
|
||||
|
|
|
@ -486,7 +486,7 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
|
|||
nalu_start_offsets.push_back(0);
|
||||
}
|
||||
h264_header.nalu_type = nal_type;
|
||||
parsed_payload->frame_type = kVideoFrameDelta;
|
||||
parsed_payload->frame_type = VideoFrameType::kVideoFrameDelta;
|
||||
|
||||
nalu_start_offsets.push_back(length_ + kLengthFieldSize); // End offset.
|
||||
for (size_t i = 0; i < nalu_start_offsets.size() - 1; ++i) {
|
||||
|
@ -572,7 +572,7 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
|
|||
} else {
|
||||
RTC_LOG(LS_WARNING) << "Failed to parse SPS id from SPS slice.";
|
||||
}
|
||||
parsed_payload->frame_type = kVideoFrameKey;
|
||||
parsed_payload->frame_type = VideoFrameType::kVideoFrameKey;
|
||||
break;
|
||||
}
|
||||
case H264::NaluType::kPps: {
|
||||
|
@ -590,7 +590,7 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
|
|||
break;
|
||||
}
|
||||
case H264::NaluType::kIdr:
|
||||
parsed_payload->frame_type = kVideoFrameKey;
|
||||
parsed_payload->frame_type = VideoFrameType::kVideoFrameKey;
|
||||
RTC_FALLTHROUGH();
|
||||
case H264::NaluType::kSlice: {
|
||||
absl::optional<uint32_t> pps_id = PpsParser::ParsePpsIdFromSlice(
|
||||
|
@ -665,9 +665,9 @@ bool RtpDepacketizerH264::ParseFuaNalu(
|
|||
}
|
||||
|
||||
if (original_nal_type == H264::NaluType::kIdr) {
|
||||
parsed_payload->frame_type = kVideoFrameKey;
|
||||
parsed_payload->frame_type = VideoFrameType::kVideoFrameKey;
|
||||
} else {
|
||||
parsed_payload->frame_type = kVideoFrameDelta;
|
||||
parsed_payload->frame_type = VideoFrameType::kVideoFrameDelta;
|
||||
}
|
||||
parsed_payload->video_header().width = 0;
|
||||
parsed_payload->video_header().height = 0;
|
||||
|
|
|
@ -608,7 +608,7 @@ TEST_F(RtpDepacketizerH264Test, TestSingleNalu) {
|
|||
|
||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||
ExpectPacket(&payload, packet, sizeof(packet));
|
||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||
EXPECT_EQ(kH264SingleNalu, payload.h264().packetization_type);
|
||||
|
@ -623,7 +623,7 @@ TEST_F(RtpDepacketizerH264Test, TestSingleNaluSpsWithResolution) {
|
|||
|
||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||
ExpectPacket(&payload, packet, sizeof(packet));
|
||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||
EXPECT_EQ(kH264SingleNalu, payload.h264().packetization_type);
|
||||
|
@ -652,7 +652,7 @@ TEST_F(RtpDepacketizerH264Test, TestStapAKey) {
|
|||
H264ParsedPayload payload;
|
||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||
ExpectPacket(&payload, packet, sizeof(packet));
|
||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||
const RTPVideoHeaderH264& h264 = payload.h264();
|
||||
|
@ -683,7 +683,7 @@ TEST_F(RtpDepacketizerH264Test, TestStapANaluSpsWithResolution) {
|
|||
|
||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||
ExpectPacket(&payload, packet, sizeof(packet));
|
||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||
EXPECT_EQ(kH264StapA, payload.h264().packetization_type);
|
||||
|
@ -810,7 +810,7 @@ TEST_F(RtpDepacketizerH264Test, TestStapADelta) {
|
|||
|
||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||
ExpectPacket(&payload, packet, sizeof(packet));
|
||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||
EXPECT_EQ(kH264StapA, payload.h264().packetization_type);
|
||||
|
@ -849,7 +849,7 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
|||
// has been replaced by the original nal header.
|
||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet1, sizeof(packet1)));
|
||||
ExpectPacket(&payload, kExpected1, sizeof(kExpected1));
|
||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||
const RTPVideoHeaderH264& h264 = payload.h264();
|
||||
|
@ -865,7 +865,7 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
|||
payload = H264ParsedPayload();
|
||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet2, sizeof(packet2)));
|
||||
ExpectPacket(&payload, kExpected2, sizeof(kExpected2));
|
||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||
EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
|
||||
{
|
||||
|
@ -879,7 +879,7 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
|||
payload = H264ParsedPayload();
|
||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet3, sizeof(packet3)));
|
||||
ExpectPacket(&payload, kExpected3, sizeof(kExpected3));
|
||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||
EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
|
||||
{
|
||||
|
@ -936,7 +936,7 @@ TEST_F(RtpDepacketizerH264Test, TestSeiPacket) {
|
|||
H264ParsedPayload payload;
|
||||
ASSERT_TRUE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
||||
const RTPVideoHeaderH264& h264 = payload.h264();
|
||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(kH264SingleNalu, h264.packetization_type);
|
||||
EXPECT_EQ(kSei, h264.nalu_type);
|
||||
ASSERT_EQ(1u, h264.nalus_length);
|
||||
|
|
|
@ -75,7 +75,7 @@ void RtpPacketizerGeneric::BuildHeader(const RTPVideoHeader& rtp_video_header,
|
|||
VideoFrameType frame_type) {
|
||||
header_size_ = kGenericHeaderLength;
|
||||
header_[0] = RtpFormatVideoGeneric::kFirstPacketBit;
|
||||
if (frame_type == kVideoFrameKey) {
|
||||
if (frame_type == VideoFrameType::kVideoFrameKey) {
|
||||
header_[0] |= RtpFormatVideoGeneric::kKeyFrameBit;
|
||||
}
|
||||
if (rtp_video_header.generic.has_value()) {
|
||||
|
@ -105,8 +105,8 @@ bool RtpDepacketizerGeneric::Parse(ParsedPayload* parsed_payload,
|
|||
|
||||
parsed_payload->frame_type =
|
||||
((generic_header & RtpFormatVideoGeneric::kKeyFrameBit) != 0)
|
||||
? kVideoFrameKey
|
||||
: kVideoFrameDelta;
|
||||
? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
parsed_payload->video_header().is_first_packet_in_frame =
|
||||
(generic_header & RtpFormatVideoGeneric::kFirstPacketBit) != 0;
|
||||
parsed_payload->video_header().codec = kVideoCodecGeneric;
|
||||
|
|
|
@ -49,7 +49,7 @@ TEST(RtpPacketizerVideoGeneric, RespectsMaxPayloadSize) {
|
|||
RtpPacketizer::PayloadSizeLimits limits;
|
||||
limits.max_payload_len = 6;
|
||||
RtpPacketizerGeneric packetizer(kPayload, limits, RTPVideoHeader(),
|
||||
kVideoFrameKey);
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
|
||||
std::vector<int> payload_sizes = NextPacketFillPayloadSizes(&packetizer);
|
||||
|
||||
|
@ -63,7 +63,7 @@ TEST(RtpPacketizerVideoGeneric, UsesMaxPayloadSize) {
|
|||
RtpPacketizer::PayloadSizeLimits limits;
|
||||
limits.max_payload_len = 6;
|
||||
RtpPacketizerGeneric packetizer(kPayload, limits, RTPVideoHeader(),
|
||||
kVideoFrameKey);
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
|
||||
std::vector<int> payload_sizes = NextPacketFillPayloadSizes(&packetizer);
|
||||
|
||||
|
@ -79,7 +79,7 @@ TEST(RtpPacketizerVideoGeneric, WritesExtendedHeaderWhenPictureIdIsSet) {
|
|||
RTPVideoHeader rtp_video_header;
|
||||
rtp_video_header.generic.emplace().frame_id = 37;
|
||||
RtpPacketizerGeneric packetizer(kPayload, kNoSizeLimits, rtp_video_header,
|
||||
kVideoFrameKey);
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
|
||||
RtpPacketToSend packet(nullptr);
|
||||
ASSERT_TRUE(packetizer.NextPacket(&packet));
|
||||
|
@ -101,7 +101,7 @@ TEST(RtpPacketizerVideoGeneric, RespectsMaxPayloadSizeWithExtendedHeader) {
|
|||
RTPVideoHeader rtp_video_header;
|
||||
rtp_video_header.generic.emplace().frame_id = 37;
|
||||
RtpPacketizerGeneric packetizer(kPayload, limits, rtp_video_header,
|
||||
kVideoFrameKey);
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
|
||||
std::vector<int> payload_sizes = NextPacketFillPayloadSizes(&packetizer);
|
||||
|
||||
|
@ -117,7 +117,7 @@ TEST(RtpPacketizerVideoGeneric, UsesMaxPayloadSizeWithExtendedHeader) {
|
|||
RTPVideoHeader rtp_video_header;
|
||||
rtp_video_header.generic.emplace().frame_id = 37;
|
||||
RtpPacketizerGeneric packetizer(kPayload, limits, rtp_video_header,
|
||||
kVideoFrameKey);
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
std::vector<int> payload_sizes = NextPacketFillPayloadSizes(&packetizer);
|
||||
|
||||
// With kPayloadSize > max_payload_len^2, there should be packets that use
|
||||
|
@ -132,7 +132,7 @@ TEST(RtpPacketizerVideoGeneric, FrameIdOver15bitsWrapsAround) {
|
|||
RTPVideoHeader rtp_video_header;
|
||||
rtp_video_header.generic.emplace().frame_id = 0x8137;
|
||||
RtpPacketizerGeneric packetizer(kPayload, kNoSizeLimits, rtp_video_header,
|
||||
kVideoFrameKey);
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
|
||||
RtpPacketToSend packet(nullptr);
|
||||
ASSERT_TRUE(packetizer.NextPacket(&packet));
|
||||
|
@ -149,7 +149,7 @@ TEST(RtpPacketizerVideoGeneric, NoFrameIdDoesNotWriteExtendedHeader) {
|
|||
const uint8_t kPayload[kPayloadSize] = {};
|
||||
|
||||
RtpPacketizerGeneric packetizer(kPayload, kNoSizeLimits, RTPVideoHeader(),
|
||||
kVideoFrameKey);
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
|
||||
RtpPacketToSend packet(nullptr);
|
||||
ASSERT_TRUE(packetizer.NextPacket(&packet));
|
||||
|
|
|
@ -131,7 +131,7 @@ int ParseVP8Extension(RTPVideoHeaderVP8* vp8,
|
|||
int ParseVP8FrameSize(RtpDepacketizer::ParsedPayload* parsed_payload,
|
||||
const uint8_t* data,
|
||||
size_t data_length) {
|
||||
if (parsed_payload->frame_type != kVideoFrameKey) {
|
||||
if (parsed_payload->frame_type != VideoFrameType::kVideoFrameKey) {
|
||||
// Included in payload header for I-frames.
|
||||
return 0;
|
||||
}
|
||||
|
@ -357,10 +357,11 @@ bool RtpDepacketizerVp8::Parse(ParsedPayload* parsed_payload,
|
|||
|
||||
// Read P bit from payload header (only at beginning of first partition).
|
||||
if (beginning_of_partition && partition_id == 0) {
|
||||
parsed_payload->frame_type =
|
||||
(*payload_data & 0x01) ? kVideoFrameDelta : kVideoFrameKey;
|
||||
parsed_payload->frame_type = (*payload_data & 0x01)
|
||||
? VideoFrameType::kVideoFrameDelta
|
||||
: VideoFrameType::kVideoFrameKey;
|
||||
} else {
|
||||
parsed_payload->frame_type = kVideoFrameDelta;
|
||||
parsed_payload->frame_type = VideoFrameType::kVideoFrameDelta;
|
||||
}
|
||||
|
||||
if (ParseVP8FrameSize(parsed_payload, payload_data, payload_data_length) !=
|
||||
|
|
|
@ -198,7 +198,7 @@ TEST_F(RtpDepacketizerVp8Test, BasicHeader) {
|
|||
ExpectPacket(&payload, packet + kHeaderLength,
|
||||
sizeof(packet) - kHeaderLength);
|
||||
|
||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||
VerifyBasicHeader(&payload.video_header(), 0, 1, 4);
|
||||
VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx,
|
||||
|
@ -218,7 +218,7 @@ TEST_F(RtpDepacketizerVp8Test, PictureID) {
|
|||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||
ExpectPacket(&payload, packet + kHeaderLength1,
|
||||
sizeof(packet) - kHeaderLength1);
|
||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||
VerifyBasicHeader(&payload.video_header(), 1, 0, 0);
|
||||
VerifyExtensions(&payload.video_header(), kPictureId, kNoTl0PicIdx,
|
||||
|
@ -249,7 +249,7 @@ TEST_F(RtpDepacketizerVp8Test, Tl0PicIdx) {
|
|||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||
ExpectPacket(&payload, packet + kHeaderLength,
|
||||
sizeof(packet) - kHeaderLength);
|
||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||
VerifyBasicHeader(&payload.video_header(), 0, 1, 0);
|
||||
VerifyExtensions(&payload.video_header(), kNoPictureId, kTl0PicIdx,
|
||||
|
@ -267,7 +267,7 @@ TEST_F(RtpDepacketizerVp8Test, TIDAndLayerSync) {
|
|||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||
ExpectPacket(&payload, packet + kHeaderLength,
|
||||
sizeof(packet) - kHeaderLength);
|
||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||
VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
|
||||
VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx, 2,
|
||||
|
@ -289,7 +289,7 @@ TEST_F(RtpDepacketizerVp8Test, KeyIdx) {
|
|||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||
ExpectPacket(&payload, packet + kHeaderLength,
|
||||
sizeof(packet) - kHeaderLength);
|
||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||
VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
|
||||
VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx,
|
||||
|
@ -310,7 +310,7 @@ TEST_F(RtpDepacketizerVp8Test, MultipleExtensions) {
|
|||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||
ExpectPacket(&payload, packet + kHeaderLength,
|
||||
sizeof(packet) - kHeaderLength);
|
||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||
VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
|
||||
VerifyExtensions(&payload.video_header(), (17 << 8) + 17, 42, 1, 17);
|
||||
|
@ -351,7 +351,7 @@ TEST_F(RtpDepacketizerVp8Test, TestWithPacketizer) {
|
|||
depacketizer_->Parse(&payload, rtp_payload.data(), rtp_payload.size()));
|
||||
auto vp8_payload = rtp_payload.subview(kHeaderLength);
|
||||
ExpectPacket(&payload, vp8_payload.data(), vp8_payload.size());
|
||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
|
||||
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||
VerifyBasicHeader(&payload.video_header(), 1, 1, 0);
|
||||
VerifyExtensions(&payload.video_header(), input_header.pictureId,
|
||||
|
|
|
@ -608,7 +608,8 @@ bool RtpDepacketizerVp9::Parse(ParsedPayload* parsed_payload,
|
|||
parsed_payload->video_header().simulcastIdx = 0;
|
||||
parsed_payload->video_header().codec = kVideoCodecVP9;
|
||||
|
||||
parsed_payload->frame_type = p_bit ? kVideoFrameDelta : kVideoFrameKey;
|
||||
parsed_payload->frame_type =
|
||||
p_bit ? VideoFrameType::kVideoFrameDelta : VideoFrameType::kVideoFrameKey;
|
||||
|
||||
auto& vp9_header = parsed_payload->video_header()
|
||||
.video_type_header.emplace<RTPVideoHeaderVP9>();
|
||||
|
|
|
@ -749,7 +749,7 @@ TEST_F(RtpDepacketizerVp9Test, ParseFirstPacketInKeyFrame) {
|
|||
|
||||
RtpDepacketizer::ParsedPayload parsed;
|
||||
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
|
||||
EXPECT_EQ(kVideoFrameKey, parsed.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, parsed.frame_type);
|
||||
EXPECT_TRUE(parsed.video_header().is_first_packet_in_frame);
|
||||
}
|
||||
|
||||
|
@ -759,7 +759,7 @@ TEST_F(RtpDepacketizerVp9Test, ParseLastPacketInDeltaFrame) {
|
|||
|
||||
RtpDepacketizer::ParsedPayload parsed;
|
||||
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
|
||||
EXPECT_EQ(kVideoFrameDelta, parsed.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, parsed.frame_type);
|
||||
EXPECT_FALSE(parsed.video_header().is_first_packet_in_frame);
|
||||
}
|
||||
|
||||
|
|
|
@ -231,9 +231,9 @@ class RtpRtcpImplTest : public ::testing::Test {
|
|||
|
||||
const uint8_t payload[100] = {0};
|
||||
EXPECT_TRUE(module->impl_->OnSendingRtpFrame(0, 0, codec_.plType, true));
|
||||
EXPECT_TRUE(sender->SendVideo(kVideoFrameKey, codec_.plType, 0, 0, payload,
|
||||
sizeof(payload), nullptr, &rtp_video_header,
|
||||
0));
|
||||
EXPECT_TRUE(sender->SendVideo(VideoFrameType::kVideoFrameKey, codec_.plType,
|
||||
0, 0, payload, sizeof(payload), nullptr,
|
||||
&rtp_video_header, 0));
|
||||
}
|
||||
|
||||
void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) {
|
||||
|
|
|
@ -520,7 +520,7 @@ TEST_P(RtpSenderTestWithoutPacer, OnSendSideDelayUpdated) {
|
|||
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
|
||||
fake_clock_.AdvanceTimeMilliseconds(10);
|
||||
EXPECT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameKey, kPayloadType,
|
||||
VideoFrameType::kVideoFrameKey, kPayloadType,
|
||||
capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
|
||||
kPayloadData, sizeof(kPayloadData), nullptr, &video_header,
|
||||
kDefaultExpectedRetransmissionTimeMs));
|
||||
|
@ -531,7 +531,7 @@ TEST_P(RtpSenderTestWithoutPacer, OnSendSideDelayUpdated) {
|
|||
.Times(1);
|
||||
fake_clock_.AdvanceTimeMilliseconds(10);
|
||||
EXPECT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameKey, kPayloadType,
|
||||
VideoFrameType::kVideoFrameKey, kPayloadType,
|
||||
capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
|
||||
kPayloadData, sizeof(kPayloadData), nullptr, &video_header,
|
||||
kDefaultExpectedRetransmissionTimeMs));
|
||||
|
@ -543,7 +543,7 @@ TEST_P(RtpSenderTestWithoutPacer, OnSendSideDelayUpdated) {
|
|||
.Times(1);
|
||||
capture_time_ms = fake_clock_.TimeInMilliseconds();
|
||||
EXPECT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameKey, kPayloadType,
|
||||
VideoFrameType::kVideoFrameKey, kPayloadType,
|
||||
capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
|
||||
kPayloadData, sizeof(kPayloadData), nullptr, &video_header,
|
||||
kDefaultExpectedRetransmissionTimeMs));
|
||||
|
@ -556,7 +556,7 @@ TEST_P(RtpSenderTestWithoutPacer, OnSendSideDelayUpdated) {
|
|||
EXPECT_CALL(send_side_delay_observer_, SendSideDelayUpdated(1, 1, kSsrc))
|
||||
.Times(1);
|
||||
EXPECT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameKey, kPayloadType,
|
||||
VideoFrameType::kVideoFrameKey, kPayloadType,
|
||||
capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
|
||||
kPayloadData, sizeof(kPayloadData), nullptr, &video_header,
|
||||
kDefaultExpectedRetransmissionTimeMs));
|
||||
|
@ -1078,8 +1078,9 @@ TEST_P(RtpSenderTestWithoutPacer, SendGenericVideo) {
|
|||
// Send keyframe
|
||||
RTPVideoHeader video_header;
|
||||
ASSERT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload),
|
||||
nullptr, &video_header, kDefaultExpectedRetransmissionTimeMs));
|
||||
VideoFrameType::kVideoFrameKey, payload_type, 1234, 4321, payload,
|
||||
sizeof(payload), nullptr, &video_header,
|
||||
kDefaultExpectedRetransmissionTimeMs));
|
||||
|
||||
auto sent_payload = transport_.last_sent_packet().payload();
|
||||
uint8_t generic_header = sent_payload[0];
|
||||
|
@ -1093,8 +1094,9 @@ TEST_P(RtpSenderTestWithoutPacer, SendGenericVideo) {
|
|||
payload[4] = 13;
|
||||
|
||||
ASSERT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload),
|
||||
nullptr, &video_header, kDefaultExpectedRetransmissionTimeMs));
|
||||
VideoFrameType::kVideoFrameDelta, payload_type, 1234, 4321, payload,
|
||||
sizeof(payload), nullptr, &video_header,
|
||||
kDefaultExpectedRetransmissionTimeMs));
|
||||
|
||||
sent_payload = transport_.last_sent_packet().payload();
|
||||
generic_header = sent_payload[0];
|
||||
|
@ -1148,7 +1150,7 @@ TEST_P(RtpSenderTest, SendFlexfecPackets) {
|
|||
|
||||
RTPVideoHeader video_header;
|
||||
EXPECT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameKey, kMediaPayloadType, kTimestamp,
|
||||
VideoFrameType::kVideoFrameKey, kMediaPayloadType, kTimestamp,
|
||||
fake_clock_.TimeInMilliseconds(), kPayloadData, sizeof(kPayloadData),
|
||||
nullptr, &video_header, kDefaultExpectedRetransmissionTimeMs));
|
||||
|
||||
|
@ -1226,9 +1228,9 @@ TEST_P(RtpSenderTest, NoFlexfecForTimingFrames) {
|
|||
RTPVideoHeader video_header;
|
||||
video_header.video_timing.flags = VideoSendTiming::kTriggeredByTimer;
|
||||
EXPECT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameKey, kMediaPayloadType, kTimestamp, kCaptureTimeMs,
|
||||
kPayloadData, sizeof(kPayloadData), nullptr, &video_header,
|
||||
kDefaultExpectedRetransmissionTimeMs));
|
||||
VideoFrameType::kVideoFrameKey, kMediaPayloadType, kTimestamp,
|
||||
kCaptureTimeMs, kPayloadData, sizeof(kPayloadData), nullptr,
|
||||
&video_header, kDefaultExpectedRetransmissionTimeMs));
|
||||
|
||||
EXPECT_CALL(mock_rtc_event_log_,
|
||||
LogProxy(SameRtcEventTypeAs(RtcEvent::Type::RtpPacketOutgoing)))
|
||||
|
@ -1252,9 +1254,9 @@ TEST_P(RtpSenderTest, NoFlexfecForTimingFrames) {
|
|||
kSeqNum + 1, _, _, false));
|
||||
video_header.video_timing.flags = VideoSendTiming::kInvalid;
|
||||
EXPECT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameKey, kMediaPayloadType, kTimestamp + 1, kCaptureTimeMs + 1,
|
||||
kPayloadData, sizeof(kPayloadData), nullptr, &video_header,
|
||||
kDefaultExpectedRetransmissionTimeMs));
|
||||
VideoFrameType::kVideoFrameKey, kMediaPayloadType, kTimestamp + 1,
|
||||
kCaptureTimeMs + 1, kPayloadData, sizeof(kPayloadData), nullptr,
|
||||
&video_header, kDefaultExpectedRetransmissionTimeMs));
|
||||
|
||||
EXPECT_CALL(mock_rtc_event_log_,
|
||||
LogProxy(SameRtcEventTypeAs(RtcEvent::Type::RtpPacketOutgoing)))
|
||||
|
@ -1315,7 +1317,7 @@ TEST_P(RtpSenderTestWithoutPacer, SendFlexfecPackets) {
|
|||
.Times(2);
|
||||
RTPVideoHeader video_header;
|
||||
EXPECT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameKey, kMediaPayloadType, kTimestamp,
|
||||
VideoFrameType::kVideoFrameKey, kMediaPayloadType, kTimestamp,
|
||||
fake_clock_.TimeInMilliseconds(), kPayloadData, sizeof(kPayloadData),
|
||||
nullptr, &video_header, kDefaultExpectedRetransmissionTimeMs));
|
||||
|
||||
|
@ -1448,7 +1450,7 @@ TEST_P(RtpSenderTest, FecOverheadRate) {
|
|||
RTPVideoHeader video_header;
|
||||
|
||||
EXPECT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameKey, kMediaPayloadType, kTimestamp,
|
||||
VideoFrameType::kVideoFrameKey, kMediaPayloadType, kTimestamp,
|
||||
fake_clock_.TimeInMilliseconds(), kPayloadData, sizeof(kPayloadData),
|
||||
nullptr, &video_header, kDefaultExpectedRetransmissionTimeMs));
|
||||
|
||||
|
@ -1526,8 +1528,9 @@ TEST_P(RtpSenderTest, BitrateCallbacks) {
|
|||
RTPVideoHeader video_header;
|
||||
for (uint32_t i = 0; i < kNumPackets; ++i) {
|
||||
ASSERT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload),
|
||||
nullptr, &video_header, kDefaultExpectedRetransmissionTimeMs));
|
||||
VideoFrameType::kVideoFrameKey, payload_type, 1234, 4321, payload,
|
||||
sizeof(payload), nullptr, &video_header,
|
||||
kDefaultExpectedRetransmissionTimeMs));
|
||||
fake_clock_.AdvanceTimeMilliseconds(kPacketInterval);
|
||||
}
|
||||
|
||||
|
@ -1598,8 +1601,9 @@ TEST_P(RtpSenderTestWithoutPacer, StreamDataCountersCallbacks) {
|
|||
// Send a frame.
|
||||
RTPVideoHeader video_header;
|
||||
ASSERT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload),
|
||||
nullptr, &video_header, kDefaultExpectedRetransmissionTimeMs));
|
||||
VideoFrameType::kVideoFrameKey, payload_type, 1234, 4321, payload,
|
||||
sizeof(payload), nullptr, &video_header,
|
||||
kDefaultExpectedRetransmissionTimeMs));
|
||||
StreamDataCounters expected;
|
||||
expected.transmitted.payload_bytes = 6;
|
||||
expected.transmitted.header_bytes = 12;
|
||||
|
@ -1640,8 +1644,9 @@ TEST_P(RtpSenderTestWithoutPacer, StreamDataCountersCallbacks) {
|
|||
fec_params.max_fec_frames = 1;
|
||||
rtp_sender_video.SetFecParameters(fec_params, fec_params);
|
||||
ASSERT_TRUE(rtp_sender_video.SendVideo(
|
||||
kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload),
|
||||
nullptr, &video_header, kDefaultExpectedRetransmissionTimeMs));
|
||||
VideoFrameType::kVideoFrameDelta, payload_type, 1234, 4321, payload,
|
||||
sizeof(payload), nullptr, &video_header,
|
||||
kDefaultExpectedRetransmissionTimeMs));
|
||||
expected.transmitted.payload_bytes = 40;
|
||||
expected.transmitted.header_bytes = 60;
|
||||
expected.transmitted.packets = 5;
|
||||
|
|
|
@ -72,7 +72,7 @@ void AddRtpHeaderExtensions(const RTPVideoHeader& video_header,
|
|||
packet->SetExtension<VideoOrientation>(video_header.rotation);
|
||||
|
||||
// Report content type only for key frames.
|
||||
if (last_packet && frame_type == kVideoFrameKey &&
|
||||
if (last_packet && frame_type == VideoFrameType::kVideoFrameKey &&
|
||||
video_header.content_type != VideoContentType::UNSPECIFIED)
|
||||
packet->SetExtension<VideoContentTypeExtension>(video_header.content_type);
|
||||
|
||||
|
@ -116,7 +116,7 @@ void AddRtpHeaderExtensions(const RTPVideoHeader& video_header,
|
|||
|
||||
generic_descriptor.SetTemporalLayer(video_header.generic->temporal_index);
|
||||
|
||||
if (frame_type == kVideoFrameKey) {
|
||||
if (frame_type == VideoFrameType::kVideoFrameKey) {
|
||||
generic_descriptor.SetResolution(video_header.width,
|
||||
video_header.height);
|
||||
}
|
||||
|
@ -168,11 +168,11 @@ bool IsBaseLayer(const RTPVideoHeader& video_header) {
|
|||
|
||||
const char* FrameTypeToString(VideoFrameType frame_type) {
|
||||
switch (frame_type) {
|
||||
case kEmptyFrame:
|
||||
case VideoFrameType::kEmptyFrame:
|
||||
return "empty";
|
||||
case kVideoFrameKey:
|
||||
case VideoFrameType::kVideoFrameKey:
|
||||
return "video_key";
|
||||
case kVideoFrameDelta:
|
||||
case VideoFrameType::kVideoFrameDelta:
|
||||
return "video_delta";
|
||||
default:
|
||||
RTC_NOTREACHED();
|
||||
|
@ -429,13 +429,10 @@ bool RTPSenderVideo::SendVideo(VideoFrameType frame_type,
|
|||
const RTPFragmentationHeader* fragmentation,
|
||||
const RTPVideoHeader* video_header,
|
||||
int64_t expected_retransmission_time_ms) {
|
||||
RTC_DCHECK(frame_type == kVideoFrameKey || frame_type == kVideoFrameDelta ||
|
||||
frame_type == kEmptyFrame);
|
||||
|
||||
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms, "Send", "type",
|
||||
FrameTypeToString(frame_type));
|
||||
|
||||
if (frame_type == kEmptyFrame)
|
||||
if (frame_type == VideoFrameType::kEmptyFrame)
|
||||
return true;
|
||||
|
||||
if (payload_size == 0)
|
||||
|
@ -466,7 +463,7 @@ bool RTPSenderVideo::SendVideo(VideoFrameType frame_type,
|
|||
// value sent.
|
||||
// Set rotation when key frame or when changed (to follow standard).
|
||||
// Or when different from 0 (to follow current receiver implementation).
|
||||
set_video_rotation = frame_type == kVideoFrameKey ||
|
||||
set_video_rotation = frame_type == VideoFrameType::kVideoFrameKey ||
|
||||
video_header->rotation != last_rotation_ ||
|
||||
video_header->rotation != kVideoRotation_0;
|
||||
last_rotation_ = video_header->rotation;
|
||||
|
@ -479,8 +476,8 @@ bool RTPSenderVideo::SendVideo(VideoFrameType frame_type,
|
|||
set_color_space = true;
|
||||
transmit_color_space_next_frame_ = !IsBaseLayer(*video_header);
|
||||
} else {
|
||||
set_color_space =
|
||||
frame_type == kVideoFrameKey || transmit_color_space_next_frame_;
|
||||
set_color_space = frame_type == VideoFrameType::kVideoFrameKey ||
|
||||
transmit_color_space_next_frame_;
|
||||
transmit_color_space_next_frame_ = transmit_color_space_next_frame_
|
||||
? !IsBaseLayer(*video_header)
|
||||
: false;
|
||||
|
@ -488,7 +485,8 @@ bool RTPSenderVideo::SendVideo(VideoFrameType frame_type,
|
|||
|
||||
// FEC settings.
|
||||
const FecProtectionParams& fec_params =
|
||||
frame_type == kVideoFrameKey ? key_fec_params_ : delta_fec_params_;
|
||||
frame_type == VideoFrameType::kVideoFrameKey ? key_fec_params_
|
||||
: delta_fec_params_;
|
||||
if (flexfec_enabled())
|
||||
flexfec_sender_->SetFecParameters(fec_params);
|
||||
if (ulpfec_enabled())
|
||||
|
|
|
@ -188,9 +188,9 @@ TEST_P(RtpSenderVideoTest, KeyFrameHasCVO) {
|
|||
|
||||
RTPVideoHeader hdr;
|
||||
hdr.rotation = kVideoRotation_0;
|
||||
rtp_sender_video_.SendVideo(kVideoFrameKey, kPayload, kTimestamp, 0, kFrame,
|
||||
sizeof(kFrame), nullptr, &hdr,
|
||||
kDefaultExpectedRetransmissionTimeMs);
|
||||
rtp_sender_video_.SendVideo(VideoFrameType::kVideoFrameKey, kPayload,
|
||||
kTimestamp, 0, kFrame, sizeof(kFrame), nullptr,
|
||||
&hdr, kDefaultExpectedRetransmissionTimeMs);
|
||||
|
||||
VideoRotation rotation;
|
||||
EXPECT_TRUE(
|
||||
|
@ -214,9 +214,10 @@ TEST_P(RtpSenderVideoTest, TimingFrameHasPacketizationTimstampSet) {
|
|||
hdr.video_timing.encode_finish_delta_ms = kEncodeFinishDeltaMs;
|
||||
|
||||
fake_clock_.AdvanceTimeMilliseconds(kPacketizationTimeMs);
|
||||
rtp_sender_video_.SendVideo(
|
||||
kVideoFrameKey, kPayload, kTimestamp, kCaptureTimestamp, kFrame,
|
||||
sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs);
|
||||
rtp_sender_video_.SendVideo(VideoFrameType::kVideoFrameKey, kPayload,
|
||||
kTimestamp, kCaptureTimestamp, kFrame,
|
||||
sizeof(kFrame), nullptr, &hdr,
|
||||
kDefaultExpectedRetransmissionTimeMs);
|
||||
VideoSendTiming timing;
|
||||
EXPECT_TRUE(transport_.last_sent_packet().GetExtension<VideoTimingExtension>(
|
||||
&timing));
|
||||
|
@ -233,13 +234,13 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenChanged) {
|
|||
RTPVideoHeader hdr;
|
||||
hdr.rotation = kVideoRotation_90;
|
||||
EXPECT_TRUE(rtp_sender_video_.SendVideo(
|
||||
kVideoFrameKey, kPayload, kTimestamp, 0, kFrame, sizeof(kFrame), nullptr,
|
||||
&hdr, kDefaultExpectedRetransmissionTimeMs));
|
||||
VideoFrameType::kVideoFrameKey, kPayload, kTimestamp, 0, kFrame,
|
||||
sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs));
|
||||
|
||||
hdr.rotation = kVideoRotation_0;
|
||||
EXPECT_TRUE(rtp_sender_video_.SendVideo(
|
||||
kVideoFrameDelta, kPayload, kTimestamp + 1, 0, kFrame, sizeof(kFrame),
|
||||
nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs));
|
||||
VideoFrameType::kVideoFrameDelta, kPayload, kTimestamp + 1, 0, kFrame,
|
||||
sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs));
|
||||
|
||||
VideoRotation rotation;
|
||||
EXPECT_TRUE(
|
||||
|
@ -255,12 +256,12 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenNonZero) {
|
|||
RTPVideoHeader hdr;
|
||||
hdr.rotation = kVideoRotation_90;
|
||||
EXPECT_TRUE(rtp_sender_video_.SendVideo(
|
||||
kVideoFrameKey, kPayload, kTimestamp, 0, kFrame, sizeof(kFrame), nullptr,
|
||||
&hdr, kDefaultExpectedRetransmissionTimeMs));
|
||||
VideoFrameType::kVideoFrameKey, kPayload, kTimestamp, 0, kFrame,
|
||||
sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs));
|
||||
|
||||
EXPECT_TRUE(rtp_sender_video_.SendVideo(
|
||||
kVideoFrameDelta, kPayload, kTimestamp + 1, 0, kFrame, sizeof(kFrame),
|
||||
nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs));
|
||||
VideoFrameType::kVideoFrameDelta, kPayload, kTimestamp + 1, 0, kFrame,
|
||||
sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs));
|
||||
|
||||
VideoRotation rotation;
|
||||
EXPECT_TRUE(
|
||||
|
@ -285,18 +286,18 @@ TEST_P(RtpSenderVideoTest, CheckH264FrameMarking) {
|
|||
hdr.frame_marking.temporal_id = kNoTemporalIdx;
|
||||
hdr.frame_marking.tl0_pic_idx = 99;
|
||||
hdr.frame_marking.base_layer_sync = true;
|
||||
rtp_sender_video_.SendVideo(kVideoFrameDelta, kPayload,
|
||||
kTimestamp, 0, kFrame, sizeof(kFrame), &frag,
|
||||
&hdr, kDefaultExpectedRetransmissionTimeMs);
|
||||
rtp_sender_video_.SendVideo(VideoFrameType::kVideoFrameDelta, kPayload,
|
||||
kTimestamp, 0, kFrame, sizeof(kFrame), &frag,
|
||||
&hdr, kDefaultExpectedRetransmissionTimeMs);
|
||||
|
||||
FrameMarking fm;
|
||||
EXPECT_FALSE(
|
||||
transport_.last_sent_packet().GetExtension<FrameMarkingExtension>(&fm));
|
||||
|
||||
hdr.frame_marking.temporal_id = 0;
|
||||
rtp_sender_video_.SendVideo(kVideoFrameDelta, kPayload,
|
||||
kTimestamp + 1, 0, kFrame, sizeof(kFrame), &frag,
|
||||
&hdr, kDefaultExpectedRetransmissionTimeMs);
|
||||
rtp_sender_video_.SendVideo(VideoFrameType::kVideoFrameDelta, kPayload,
|
||||
kTimestamp + 1, 0, kFrame, sizeof(kFrame), &frag,
|
||||
&hdr, kDefaultExpectedRetransmissionTimeMs);
|
||||
|
||||
EXPECT_TRUE(
|
||||
transport_.last_sent_packet().GetExtension<FrameMarkingExtension>(&fm));
|
||||
|
@ -563,9 +564,9 @@ void RtpSenderVideoTest::PopulateGenericFrameDescriptor(int version) {
|
|||
generic.higher_spatial_layers.push_back(4);
|
||||
generic.dependencies.push_back(kFrameId - 1);
|
||||
generic.dependencies.push_back(kFrameId - 500);
|
||||
rtp_sender_video_.SendVideo(kVideoFrameDelta, kPayload, kTimestamp, 0, kFrame,
|
||||
sizeof(kFrame), nullptr, &hdr,
|
||||
kDefaultExpectedRetransmissionTimeMs);
|
||||
rtp_sender_video_.SendVideo(VideoFrameType::kVideoFrameDelta, kPayload,
|
||||
kTimestamp, 0, kFrame, sizeof(kFrame), nullptr,
|
||||
&hdr, kDefaultExpectedRetransmissionTimeMs);
|
||||
|
||||
RtpGenericFrameDescriptor descriptor_wire;
|
||||
EXPECT_EQ(1, transport_.packets_sent());
|
||||
|
@ -618,9 +619,9 @@ void RtpSenderVideoTest::
|
|||
RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace();
|
||||
generic.frame_id = kFrameId;
|
||||
rtp_sender_video_.RegisterPayloadType(kPayload, "vp8");
|
||||
rtp_sender_video_.SendVideo(kVideoFrameDelta, kPayload, kTimestamp, 0, kFrame,
|
||||
sizeof(kFrame), nullptr, &hdr,
|
||||
kDefaultExpectedRetransmissionTimeMs);
|
||||
rtp_sender_video_.SendVideo(VideoFrameType::kVideoFrameDelta, kPayload,
|
||||
kTimestamp, 0, kFrame, sizeof(kFrame), nullptr,
|
||||
&hdr, kDefaultExpectedRetransmissionTimeMs);
|
||||
|
||||
ASSERT_EQ(transport_.packets_sent(), 1);
|
||||
// Expect only minimal 1-byte vp8 descriptor was generated.
|
||||
|
|
|
@ -64,6 +64,7 @@ rtc_static_library("packet") {
|
|||
"..:module_api",
|
||||
"../../:webrtc_common",
|
||||
"../../api:rtp_headers",
|
||||
"../../api/video:video_frame_type",
|
||||
"../rtp_rtcp:rtp_rtcp_format",
|
||||
"../rtp_rtcp:rtp_video_header",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
|
|
|
@ -67,17 +67,17 @@ int NumberOfThreads(int width, int height, int number_of_cores) {
|
|||
VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
|
||||
switch (type) {
|
||||
case videoFrameTypeIDR:
|
||||
return kVideoFrameKey;
|
||||
return VideoFrameType::kVideoFrameKey;
|
||||
case videoFrameTypeSkip:
|
||||
case videoFrameTypeI:
|
||||
case videoFrameTypeP:
|
||||
case videoFrameTypeIPMixed:
|
||||
return kVideoFrameDelta;
|
||||
return VideoFrameType::kVideoFrameDelta;
|
||||
case videoFrameTypeInvalid:
|
||||
break;
|
||||
}
|
||||
RTC_NOTREACHED() << "Unexpected/invalid frame type: " << type;
|
||||
return kEmptyFrame;
|
||||
return VideoFrameType::kEmptyFrame;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
@ -409,7 +409,8 @@ int32_t H264EncoderImpl::Encode(
|
|||
if (!send_key_frame && frame_types) {
|
||||
for (size_t i = 0; i < frame_types->size() && i < configurations_.size();
|
||||
++i) {
|
||||
if ((*frame_types)[i] == kVideoFrameKey && configurations_[i].sending) {
|
||||
if ((*frame_types)[i] == VideoFrameType::kVideoFrameKey &&
|
||||
configurations_[i].sending) {
|
||||
send_key_frame = true;
|
||||
break;
|
||||
}
|
||||
|
@ -462,7 +463,7 @@ int32_t H264EncoderImpl::Encode(
|
|||
}
|
||||
if (frame_types != nullptr) {
|
||||
// Skip frame?
|
||||
if ((*frame_types)[i] == kEmptyFrame) {
|
||||
if ((*frame_types)[i] == VideoFrameType::kEmptyFrame) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
|
|||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
|
@ -97,7 +97,7 @@ TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
|
|||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
|
|
|
@ -88,7 +88,8 @@ int PackFrameHeader(uint8_t* buffer,
|
|||
ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, frame_header.codec_type);
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, frame_header.frame_type);
|
||||
ByteWriter<uint8_t>::WriteBigEndian(
|
||||
buffer + offset, static_cast<uint8_t>(frame_header.frame_type));
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
RTC_DCHECK_EQ(offset, kMultiplexImageComponentHeaderSize);
|
||||
|
|
|
@ -145,9 +145,9 @@ int MultiplexEncoderAdapter::Encode(
|
|||
|
||||
std::vector<VideoFrameType> adjusted_frame_types;
|
||||
if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) {
|
||||
adjusted_frame_types.push_back(kVideoFrameKey);
|
||||
adjusted_frame_types.push_back(VideoFrameType::kVideoFrameKey);
|
||||
} else {
|
||||
adjusted_frame_types.push_back(kVideoFrameDelta);
|
||||
adjusted_frame_types.push_back(VideoFrameType::kVideoFrameDelta);
|
||||
}
|
||||
const bool has_alpha = input_image.video_frame_buffer()->type() ==
|
||||
VideoFrameBuffer::Type::kI420A;
|
||||
|
|
|
@ -276,7 +276,7 @@ TEST_P(TestMultiplexAdapter, CheckSingleFrameEncodedBitstream) {
|
|||
const MultiplexImageComponent& component = unpacked_frame.image_components[0];
|
||||
EXPECT_EQ(0, component.component_index);
|
||||
EXPECT_NE(nullptr, component.encoded_image.data());
|
||||
EXPECT_EQ(kVideoFrameKey, component.encoded_image._frameType);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, component.encoded_image._frameType);
|
||||
}
|
||||
|
||||
TEST_P(TestMultiplexAdapter, CheckDoubleFramesEncodedBitstream) {
|
||||
|
@ -299,7 +299,8 @@ TEST_P(TestMultiplexAdapter, CheckDoubleFramesEncodedBitstream) {
|
|||
unpacked_frame.image_components[i];
|
||||
EXPECT_EQ(i, component.component_index);
|
||||
EXPECT_NE(nullptr, component.encoded_image.data());
|
||||
EXPECT_EQ(kVideoFrameKey, component.encoded_image._frameType);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey,
|
||||
component.encoded_image._frameType);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -314,7 +315,9 @@ TEST_P(TestMultiplexAdapter, ImageIndexIncreases) {
|
|||
const MultiplexImage& unpacked_frame =
|
||||
MultiplexEncodedImagePacker::Unpack(encoded_frame);
|
||||
EXPECT_EQ(i, unpacked_frame.image_index);
|
||||
EXPECT_EQ(i ? kVideoFrameDelta : kVideoFrameKey, encoded_frame._frameType);
|
||||
EXPECT_EQ(
|
||||
i ? VideoFrameType::kVideoFrameDelta : VideoFrameType::kVideoFrameKey,
|
||||
encoded_frame._frameType);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -332,11 +332,11 @@ void VideoCodecTestFixtureImpl::H264KeyframeChecker::CheckEncodedFrame(
|
|||
contains_idr = true;
|
||||
}
|
||||
}
|
||||
if (encoded_frame._frameType == kVideoFrameKey) {
|
||||
if (encoded_frame._frameType == VideoFrameType::kVideoFrameKey) {
|
||||
EXPECT_TRUE(contains_sps) << "Keyframe should contain SPS.";
|
||||
EXPECT_TRUE(contains_pps) << "Keyframe should contain PPS.";
|
||||
EXPECT_TRUE(contains_idr) << "Keyframe should contain IDR.";
|
||||
} else if (encoded_frame._frameType == kVideoFrameDelta) {
|
||||
} else if (encoded_frame._frameType == VideoFrameType::kVideoFrameDelta) {
|
||||
EXPECT_FALSE(contains_sps) << "Delta frame should not contain SPS.";
|
||||
EXPECT_FALSE(contains_pps) << "Delta frame should not contain PPS.";
|
||||
EXPECT_FALSE(contains_idr) << "Delta frame should not contain IDR.";
|
||||
|
|
|
@ -222,7 +222,7 @@ VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcVideoStatistic(
|
|||
if (frame_stat.encoding_successful) {
|
||||
++video_stat.num_encoded_frames;
|
||||
|
||||
if (frame_stat.frame_type == kVideoFrameKey) {
|
||||
if (frame_stat.frame_type == VideoFrameType::kVideoFrameKey) {
|
||||
key_frame_size_bytes.AddSample(frame_stat.length_bytes);
|
||||
++video_stat.num_key_frames;
|
||||
} else {
|
||||
|
|
|
@ -287,8 +287,9 @@ void VideoProcessor::ProcessFrame() {
|
|||
|
||||
// Encode.
|
||||
const std::vector<VideoFrameType> frame_types =
|
||||
(frame_number == 0) ? std::vector<VideoFrameType>{kVideoFrameKey}
|
||||
: std::vector<VideoFrameType>{kVideoFrameDelta};
|
||||
(frame_number == 0)
|
||||
? std::vector<VideoFrameType>{VideoFrameType::kVideoFrameKey}
|
||||
: std::vector<VideoFrameType>{VideoFrameType::kVideoFrameDelta};
|
||||
const int encode_return_code = encoder_->Encode(input_frame, &frame_types);
|
||||
for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
|
||||
FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);
|
||||
|
|
|
@ -209,7 +209,7 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
|||
|
||||
// Always start with a complete key frame.
|
||||
if (key_frame_required_) {
|
||||
if (input_image._frameType != kVideoFrameKey)
|
||||
if (input_image._frameType != VideoFrameType::kVideoFrameKey)
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
// We have a key frame - is it complete?
|
||||
if (input_image._completeFrame) {
|
||||
|
@ -220,7 +220,8 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
|||
}
|
||||
// Restrict error propagation using key frame requests.
|
||||
// Reset on a key frame refresh.
|
||||
if (input_image._frameType == kVideoFrameKey && input_image._completeFrame) {
|
||||
if (input_image._frameType == VideoFrameType::kVideoFrameKey &&
|
||||
input_image._completeFrame) {
|
||||
propagation_cnt_ = -1;
|
||||
// Start count on first loss.
|
||||
} else if ((!input_image._completeFrame || missing_frames) &&
|
||||
|
|
|
@ -756,7 +756,8 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
|
|||
if (!send_key_frame && frame_types) {
|
||||
for (size_t i = 0; i < frame_types->size() && i < send_stream_.size();
|
||||
++i) {
|
||||
if ((*frame_types)[i] == kVideoFrameKey && send_stream_[i]) {
|
||||
if ((*frame_types)[i] == VideoFrameType::kVideoFrameKey &&
|
||||
send_stream_[i]) {
|
||||
send_key_frame = true;
|
||||
break;
|
||||
}
|
||||
|
@ -925,7 +926,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
|||
++encoder_idx, --stream_idx) {
|
||||
vpx_codec_iter_t iter = NULL;
|
||||
encoded_images_[encoder_idx].set_size(0);
|
||||
encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
|
||||
encoded_images_[encoder_idx]._frameType = VideoFrameType::kVideoFrameDelta;
|
||||
CodecSpecificInfo codec_specific;
|
||||
const vpx_codec_cx_pkt_t* pkt = NULL;
|
||||
while ((pkt = libvpx_->codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
|
||||
|
@ -947,7 +948,8 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
|||
if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) {
|
||||
// check if encoded frame is a key frame
|
||||
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
|
||||
encoded_images_[encoder_idx]._frameType = kVideoFrameKey;
|
||||
encoded_images_[encoder_idx]._frameType =
|
||||
VideoFrameType::kVideoFrameKey;
|
||||
}
|
||||
encoded_images_[encoder_idx].SetSpatialIndex(stream_idx);
|
||||
PopulateCodecSpecific(&codec_specific, *pkt, stream_idx, encoder_idx,
|
||||
|
|
|
@ -209,7 +209,7 @@ TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
|
|||
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
|
@ -323,7 +323,7 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
|
|||
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
encoded_frame.ntp_time_ms_ = kTestNtpTimeMs;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
|
@ -354,12 +354,12 @@ TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
|
|||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
// Setting complete back to true. Forcing a delta frame.
|
||||
encoded_frame._frameType = kVideoFrameDelta;
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameDelta;
|
||||
encoded_frame._completeFrame = true;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
// Now setting a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
|
@ -484,7 +484,8 @@ TEST_F(TestVp8Impl, KeepsTimestampOnReencode) {
|
|||
.Times(2)
|
||||
.WillRepeatedly(Return(vpx_codec_err_t::VPX_CODEC_OK));
|
||||
|
||||
auto delta_frame = std::vector<VideoFrameType>{kVideoFrameDelta};
|
||||
auto delta_frame =
|
||||
std::vector<VideoFrameType>{VideoFrameType::kVideoFrameDelta};
|
||||
encoder.Encode(*NextInputFrame(), &delta_frame);
|
||||
}
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ TEST_F(TestVp9Impl, EncodeDecode) {
|
|||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
|
@ -227,7 +227,7 @@ TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) {
|
|||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
|
@ -566,15 +566,19 @@ TEST_F(TestVp9Impl,
|
|||
const bool is_first_upper_layer_frame = (sl_idx > 0 && frame_num == 0);
|
||||
if (is_first_upper_layer_frame) {
|
||||
if (inter_layer_pred == InterLayerPredMode::kOn) {
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType,
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
} else {
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameKey);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType,
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
}
|
||||
} else if (sl_idx == 0 && frame_num == 0) {
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameKey);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType,
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
} else {
|
||||
for (size_t i = 0; i <= sl_idx; ++i) {
|
||||
EXPECT_EQ(encoded_frame[i]._frameType, kVideoFrameDelta);
|
||||
EXPECT_EQ(encoded_frame[i]._frameType,
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -623,7 +627,7 @@ TEST_F(TestVp9Impl,
|
|||
|
||||
for (size_t i = 0; i <= sl_idx; ++i) {
|
||||
const bool is_keyframe =
|
||||
encoded_frame[0]._frameType == kVideoFrameKey;
|
||||
encoded_frame[0]._frameType == VideoFrameType::kVideoFrameKey;
|
||||
const bool is_first_upper_layer_frame =
|
||||
(i == sl_idx && frame_num == 0);
|
||||
// Interframe references are there, unless it's a keyframe,
|
||||
|
@ -693,7 +697,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerInTheSameGof) {
|
|||
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
|
||||
ASSERT_EQ(codec_specific_info.size(), 1u);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
|
||||
|
||||
|
@ -712,7 +716,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerInTheSameGof) {
|
|||
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
|
||||
ASSERT_EQ(codec_specific_info.size(), 2u);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
|
||||
EXPECT_EQ(codec_specific_info[1].codecSpecific.VP9.inter_pic_predicted, true);
|
||||
|
@ -772,7 +776,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerAccrossGof) {
|
|||
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
|
||||
ASSERT_EQ(codec_specific_info.size(), 1u);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1 - i % 2);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted,
|
||||
true);
|
||||
|
@ -793,7 +797,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerAccrossGof) {
|
|||
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
|
||||
ASSERT_EQ(codec_specific_info.size(), 2u);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
|
||||
EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameDelta);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0);
|
||||
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.inter_pic_predicted, true);
|
||||
EXPECT_EQ(codec_specific_info[1].codecSpecific.VP9.inter_pic_predicted,
|
||||
|
@ -1442,7 +1446,7 @@ TEST_F(TestVp9ImplProfile2, EncodeDecode) {
|
|||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
|
|
|
@ -727,7 +727,7 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
|
|||
|
||||
// We only support one stream at the moment.
|
||||
if (frame_types && !frame_types->empty()) {
|
||||
if ((*frame_types)[0] == kVideoFrameKey) {
|
||||
if ((*frame_types)[0] == VideoFrameType::kVideoFrameKey) {
|
||||
force_key_frame_ = true;
|
||||
}
|
||||
}
|
||||
|
@ -1324,9 +1324,9 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
|||
RTC_DCHECK(is_key_frame || !force_key_frame_);
|
||||
|
||||
// Check if encoded frame is a key frame.
|
||||
encoded_image_._frameType = kVideoFrameDelta;
|
||||
encoded_image_._frameType = VideoFrameType::kVideoFrameDelta;
|
||||
if (is_key_frame) {
|
||||
encoded_image_._frameType = kVideoFrameKey;
|
||||
encoded_image_._frameType = VideoFrameType::kVideoFrameKey;
|
||||
force_key_frame_ = false;
|
||||
}
|
||||
RTC_DCHECK_LE(encoded_image_.size(), encoded_image_.capacity());
|
||||
|
@ -1539,7 +1539,7 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
|
|||
}
|
||||
// Always start with a complete key frame.
|
||||
if (key_frame_required_) {
|
||||
if (input_image._frameType != kVideoFrameKey)
|
||||
if (input_image._frameType != VideoFrameType::kVideoFrameKey)
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
// We have a key frame - is it complete?
|
||||
if (input_image._completeFrame) {
|
||||
|
|
|
@ -100,7 +100,7 @@ void VCMDecodingState::SetState(const VCMFrameBuffer* frame) {
|
|||
uint16_t frame_index = picture_id_ % kFrameDecodedLength;
|
||||
if (in_initial_state_) {
|
||||
frame_decoded_cleared_to_ = frame_index;
|
||||
} else if (frame->FrameType() == kVideoFrameKey) {
|
||||
} else if (frame->FrameType() == VideoFrameType::kVideoFrameKey) {
|
||||
memset(frame_decoded_, 0, sizeof(frame_decoded_));
|
||||
frame_decoded_cleared_to_ = frame_index;
|
||||
} else {
|
||||
|
@ -176,7 +176,8 @@ void VCMDecodingState::UpdateSyncState(const VCMFrameBuffer* frame) {
|
|||
if (frame->TemporalId() == kNoTemporalIdx ||
|
||||
frame->Tl0PicId() == kNoTl0PicIdx) {
|
||||
full_sync_ = true;
|
||||
} else if (frame->FrameType() == kVideoFrameKey || frame->LayerSync()) {
|
||||
} else if (frame->FrameType() == VideoFrameType::kVideoFrameKey ||
|
||||
frame->LayerSync()) {
|
||||
full_sync_ = true;
|
||||
} else if (full_sync_) {
|
||||
// Verify that we are still in sync.
|
||||
|
@ -207,7 +208,7 @@ bool VCMDecodingState::ContinuousFrame(const VCMFrameBuffer* frame) const {
|
|||
// A key frame is always considered continuous as it doesn't refer to any
|
||||
// frames and therefore won't introduce any errors even if prior frames are
|
||||
// missing.
|
||||
if (frame->FrameType() == kVideoFrameKey &&
|
||||
if (frame->FrameType() == VideoFrameType::kVideoFrameKey &&
|
||||
HaveSpsAndPps(frame->GetNaluInfos())) {
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ TEST(TestDecodingState, FrameContinuity) {
|
|||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.timestamp = 1;
|
||||
packet.seqNum = 0xffff;
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet.video_header.codec = kVideoCodecVP8;
|
||||
auto& vp8_header =
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
|
@ -50,12 +50,12 @@ TEST(TestDecodingState, FrameContinuity) {
|
|||
// Always start with a key frame.
|
||||
dec_state.Reset();
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
packet.frameType = kVideoFrameKey;
|
||||
packet.frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_LE(0, frame_key.InsertPacket(packet, 0, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame_key));
|
||||
dec_state.SetState(&frame);
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
// Use pictureId
|
||||
packet.video_header.is_first_packet_in_frame = false;
|
||||
vp8_header.pictureId = 0x0002;
|
||||
|
@ -171,7 +171,7 @@ TEST(TestDecodingState, UpdateOldPacket) {
|
|||
VCMPacket packet;
|
||||
packet.timestamp = 1;
|
||||
packet.seqNum = 1;
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
FrameData frame_data;
|
||||
frame_data.rtt_ms = 0;
|
||||
frame_data.rolling_average_packets_per_frame = -1;
|
||||
|
@ -186,14 +186,14 @@ TEST(TestDecodingState, UpdateOldPacket) {
|
|||
// Now insert empty packet belonging to the same frame.
|
||||
packet.timestamp = 1;
|
||||
packet.seqNum = 2;
|
||||
packet.frameType = kEmptyFrame;
|
||||
packet.frameType = VideoFrameType::kEmptyFrame;
|
||||
packet.sizeBytes = 0;
|
||||
dec_state.UpdateOldPacket(&packet);
|
||||
EXPECT_EQ(dec_state.sequence_num(), 2);
|
||||
// Now insert delta packet belonging to the same frame.
|
||||
packet.timestamp = 1;
|
||||
packet.seqNum = 3;
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet.sizeBytes = 1400;
|
||||
dec_state.UpdateOldPacket(&packet);
|
||||
EXPECT_EQ(dec_state.sequence_num(), 3);
|
||||
|
@ -201,7 +201,7 @@ TEST(TestDecodingState, UpdateOldPacket) {
|
|||
// sequence number.
|
||||
packet.timestamp = 0;
|
||||
packet.seqNum = 4;
|
||||
packet.frameType = kEmptyFrame;
|
||||
packet.frameType = VideoFrameType::kEmptyFrame;
|
||||
packet.sizeBytes = 0;
|
||||
dec_state.UpdateOldPacket(&packet);
|
||||
EXPECT_EQ(dec_state.sequence_num(), 3);
|
||||
|
@ -215,7 +215,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
|
|||
// tl0PicIdx 0, temporal id 0.
|
||||
VCMFrameBuffer frame;
|
||||
VCMPacket packet;
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet.video_header.codec = kVideoCodecVP8;
|
||||
packet.timestamp = 0;
|
||||
packet.seqNum = 0;
|
||||
|
@ -266,7 +266,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
|
|||
// Insert key frame - should update sync value.
|
||||
// A key frame is always a base layer.
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameKey;
|
||||
packet.frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.timestamp = 5;
|
||||
packet.seqNum = 5;
|
||||
|
@ -280,7 +280,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
|
|||
// After sync, a continuous PictureId is required
|
||||
// (continuous base layer is not enough )
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet.timestamp = 6;
|
||||
packet.seqNum = 6;
|
||||
vp8_header.tl0PicIdx = 3;
|
||||
|
@ -290,7 +290,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
|
|||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
EXPECT_TRUE(dec_state.full_sync());
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.timestamp = 8;
|
||||
packet.seqNum = 8;
|
||||
|
@ -305,7 +305,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
|
|||
|
||||
// Insert a non-ref frame - should update sync value.
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.timestamp = 9;
|
||||
packet.seqNum = 9;
|
||||
|
@ -325,7 +325,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
|
|||
// Base layer.
|
||||
frame.Reset();
|
||||
dec_state.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.markerBit = 1;
|
||||
packet.timestamp = 0;
|
||||
|
@ -339,7 +339,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
|
|||
EXPECT_TRUE(dec_state.full_sync());
|
||||
// Layer 2 - 2 packets (insert one, lose one).
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.markerBit = 0;
|
||||
packet.timestamp = 1;
|
||||
|
@ -352,7 +352,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
|
|||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
// Layer 1
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.markerBit = 1;
|
||||
packet.timestamp = 2;
|
||||
|
@ -371,7 +371,7 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
|
|||
VCMFrameBuffer frame;
|
||||
VCMPacket packet;
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameKey;
|
||||
packet.frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet.video_header.codec = kVideoCodecVP8;
|
||||
packet.timestamp = 0;
|
||||
packet.seqNum = 0;
|
||||
|
@ -390,7 +390,7 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
|
|||
// Continuous sequence number but discontinuous picture id. This implies a
|
||||
// a loss and we have to fall back to only decoding the base layer.
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet.timestamp += 3000;
|
||||
++packet.seqNum;
|
||||
vp8_header.temporalIdx = 1;
|
||||
|
@ -426,7 +426,7 @@ TEST(TestDecodingState, PictureIdRepeat) {
|
|||
VCMDecodingState dec_state;
|
||||
VCMFrameBuffer frame;
|
||||
VCMPacket packet;
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet.video_header.codec = kVideoCodecVP8;
|
||||
packet.timestamp = 0;
|
||||
packet.seqNum = 0;
|
||||
|
@ -479,7 +479,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeKeyFrame) {
|
|||
frame_data.rolling_average_packets_per_frame = -1;
|
||||
|
||||
// Key frame as first frame
|
||||
packet.frameType = kVideoFrameKey;
|
||||
packet.frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
@ -493,7 +493,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeKeyFrame) {
|
|||
|
||||
// Ref to 11, continuous
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
vp9_hdr.picture_id = 12;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.pid_diff[0] = 1;
|
||||
|
@ -523,14 +523,14 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeOutOfOrderFrames) {
|
|||
frame_data.rolling_average_packets_per_frame = -1;
|
||||
|
||||
// Key frame as first frame
|
||||
packet.frameType = kVideoFrameKey;
|
||||
packet.frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
dec_state.SetState(&frame);
|
||||
|
||||
// Ref to 10, continuous
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
vp9_hdr.picture_id = 15;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.pid_diff[0] = 5;
|
||||
|
@ -579,23 +579,23 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
|
|||
frame_data.rolling_average_packets_per_frame = -1;
|
||||
|
||||
// Key frame as first frame
|
||||
packet.frameType = kVideoFrameKey;
|
||||
packet.frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
|
||||
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
|
||||
|
||||
// Delta frame as first frame
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
|
||||
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
|
||||
|
||||
// Key frame then delta frame
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameKey;
|
||||
packet.frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
|
||||
dec_state.SetState(&frame);
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.picture_id = 15;
|
||||
vp9_hdr.pid_diff[0] = 5;
|
||||
|
@ -639,7 +639,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
|
|||
|
||||
// Key Frame, continuous
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameKey;
|
||||
packet.frameType = VideoFrameType::kVideoFrameKey;
|
||||
vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 2;
|
||||
vp9_hdr.num_ref_pics = 0;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
|
||||
|
@ -648,7 +648,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
|
|||
|
||||
// Frame at last index, ref to KF, continuous
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 1;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.pid_diff[0] = 1;
|
||||
|
@ -684,7 +684,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
|
|||
|
||||
// Key frame, continuous
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameKey;
|
||||
packet.frameType = VideoFrameType::kVideoFrameKey;
|
||||
vp9_hdr.picture_id = 25;
|
||||
vp9_hdr.num_ref_pics = 0;
|
||||
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
|
||||
|
@ -693,7 +693,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
|
|||
|
||||
// Ref to KF, continuous
|
||||
frame.Reset();
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
vp9_hdr.picture_id = 26;
|
||||
vp9_hdr.num_ref_pics = 1;
|
||||
vp9_hdr.pid_diff[0] = 1;
|
||||
|
|
|
@ -39,7 +39,7 @@ void VCMEncodedFrame::Reset() {
|
|||
SetSpatialIndex(absl::nullopt);
|
||||
_renderTimeMs = -1;
|
||||
_payloadType = 0;
|
||||
_frameType = kVideoFrameDelta;
|
||||
_frameType = VideoFrameType::kVideoFrameDelta;
|
||||
_encodedWidth = 0;
|
||||
_encodedHeight = 0;
|
||||
_completeFrame = false;
|
||||
|
|
|
@ -181,7 +181,8 @@ void FecControllerDefault::UpdateWithEncodedData(
|
|||
const size_t encoded_length = encoded_image_length;
|
||||
CritScope lock(&crit_sect_);
|
||||
if (encoded_length > 0) {
|
||||
const bool delta_frame = encoded_image_frametype != kVideoFrameKey;
|
||||
const bool delta_frame =
|
||||
encoded_image_frametype != VideoFrameType::kVideoFrameKey;
|
||||
if (max_payload_size_ > 0 && encoded_length > 0) {
|
||||
const float min_packets_per_frame =
|
||||
encoded_length / static_cast<float>(max_payload_size_);
|
||||
|
|
|
@ -94,7 +94,7 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
|
|||
// We only take the ntp timestamp of the first packet of a frame.
|
||||
ntp_time_ms_ = packet.ntp_time_ms_;
|
||||
_codec = packet.codec();
|
||||
if (packet.frameType != kEmptyFrame) {
|
||||
if (packet.frameType != VideoFrameType::kEmptyFrame) {
|
||||
// first media packet
|
||||
SetState(kStateIncomplete);
|
||||
}
|
||||
|
|
|
@ -220,7 +220,7 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
|
|||
// Set correctly only for key frames. Thus, use latest key frame
|
||||
// content type. If the corresponding key frame was lost, decode will fail
|
||||
// and content type will be ignored.
|
||||
if (frame.FrameType() == kVideoFrameKey) {
|
||||
if (frame.FrameType() == VideoFrameType::kVideoFrameKey) {
|
||||
_frameInfos[_nextFrameInfoIdx].content_type = frame.contentType();
|
||||
_last_keyframe_content_type = frame.contentType();
|
||||
} else {
|
||||
|
|
|
@ -45,7 +45,7 @@ static const int64_t kMaxDiscontinuousFramesTime = 1000;
|
|||
typedef std::pair<uint32_t, VCMFrameBuffer*> FrameListPair;
|
||||
|
||||
bool IsKeyFrame(FrameListPair pair) {
|
||||
return pair.second->FrameType() == kVideoFrameKey;
|
||||
return pair.second->FrameType() == VideoFrameType::kVideoFrameKey;
|
||||
}
|
||||
|
||||
bool HasNonEmptyState(FrameListPair pair) {
|
||||
|
@ -83,7 +83,8 @@ int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
|
|||
free_frames->push_back(it->second);
|
||||
erase(it++);
|
||||
++drop_count;
|
||||
if (it != end() && it->second->FrameType() == kVideoFrameKey) {
|
||||
if (it != end() &&
|
||||
it->second->FrameType() == VideoFrameType::kVideoFrameKey) {
|
||||
*key_frame_it = it;
|
||||
return drop_count;
|
||||
}
|
||||
|
@ -651,7 +652,7 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
|
|||
|
||||
// Empty packets may bias the jitter estimate (lacking size component),
|
||||
// therefore don't let empty packet trigger the following updates:
|
||||
if (packet.frameType != kEmptyFrame) {
|
||||
if (packet.frameType != VideoFrameType::kEmptyFrame) {
|
||||
if (waiting_for_completion_.timestamp == packet.timestamp) {
|
||||
// This can get bad if we have a lot of duplicate packets,
|
||||
// we will then count some packet multiple times.
|
||||
|
@ -690,7 +691,7 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
|
|||
frame->IncrementNackCount();
|
||||
}
|
||||
if (!UpdateNackList(packet.seqNum) &&
|
||||
packet.frameType != kVideoFrameKey) {
|
||||
packet.frameType != VideoFrameType::kVideoFrameKey) {
|
||||
buffer_state = kFlushIndicator;
|
||||
}
|
||||
|
||||
|
@ -926,9 +927,10 @@ std::vector<uint16_t> VCMJitterBuffer::GetNackList(bool* request_key_frame) {
|
|||
}
|
||||
if (last_decoded_state_.in_initial_state()) {
|
||||
VCMFrameBuffer* next_frame = NextFrame();
|
||||
const bool first_frame_is_key = next_frame &&
|
||||
next_frame->FrameType() == kVideoFrameKey &&
|
||||
next_frame->HaveFirstPacket();
|
||||
const bool first_frame_is_key =
|
||||
next_frame &&
|
||||
next_frame->FrameType() == VideoFrameType::kVideoFrameKey &&
|
||||
next_frame->HaveFirstPacket();
|
||||
if (!first_frame_is_key) {
|
||||
bool have_non_empty_frame =
|
||||
decodable_frames_.end() != find_if(decodable_frames_.begin(),
|
||||
|
@ -1131,7 +1133,7 @@ bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() {
|
|||
void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
|
||||
incoming_frame_count_++;
|
||||
|
||||
if (frame.FrameType() == kVideoFrameKey) {
|
||||
if (frame.FrameType() == VideoFrameType::kVideoFrameKey) {
|
||||
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.Timestamp(),
|
||||
"KeyComplete");
|
||||
} else {
|
||||
|
@ -1142,7 +1144,7 @@ void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
|
|||
// Update receive statistics. We count all layers, thus when you use layers
|
||||
// adding all key and delta frames might differ from frame count.
|
||||
if (frame.IsSessionComplete()) {
|
||||
if (frame.FrameType() == kVideoFrameKey) {
|
||||
if (frame.FrameType() == VideoFrameType::kVideoFrameKey) {
|
||||
++receive_statistics_.key_frames;
|
||||
if (receive_statistics_.key_frames == 1) {
|
||||
RTC_LOG(LS_INFO) << "Received first complete key frame";
|
||||
|
|
|
@ -47,7 +47,7 @@ class Vp9SsMapTest : public ::testing::Test {
|
|||
packet_.seqNum = 1234;
|
||||
packet_.timestamp = 1;
|
||||
packet_.markerBit = true;
|
||||
packet_.frameType = kVideoFrameKey;
|
||||
packet_.frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_.video_header.codec = kVideoCodecVP9;
|
||||
packet_.video_header.codec = kVideoCodecVP9;
|
||||
vp9_header.flexible_mode = false;
|
||||
|
@ -248,7 +248,8 @@ class TestBasicJitterBuffer : public ::testing::TestWithParam<std::string>,
|
|||
video_header.codec = kVideoCodecGeneric;
|
||||
video_header.is_first_packet_in_frame = true;
|
||||
packet_.reset(new VCMPacket(data_, size_, rtp_header, video_header,
|
||||
kVideoFrameDelta, /*ntp_time_ms=*/0));
|
||||
VideoFrameType::kVideoFrameDelta,
|
||||
/*ntp_time_ms=*/0));
|
||||
}
|
||||
|
||||
VCMEncodedFrame* DecodeCompleteFrame() {
|
||||
|
@ -364,8 +365,9 @@ class TestRunningJitterBuffer : public ::testing::TestWithParam<std::string>,
|
|||
|
||||
VCMFrameBufferEnum InsertFrame(VideoFrameType frame_type) {
|
||||
stream_generator_->GenerateFrame(
|
||||
frame_type, (frame_type != kEmptyFrame) ? 1 : 0,
|
||||
(frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds());
|
||||
frame_type, (frame_type != VideoFrameType::kEmptyFrame) ? 1 : 0,
|
||||
(frame_type == VideoFrameType::kEmptyFrame) ? 1 : 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
VCMFrameBufferEnum ret = InsertPacketAndPop(0);
|
||||
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
|
||||
return ret;
|
||||
|
@ -385,7 +387,8 @@ class TestRunningJitterBuffer : public ::testing::TestWithParam<std::string>,
|
|||
}
|
||||
|
||||
void DropFrame(int num_packets) {
|
||||
stream_generator_->GenerateFrame(kVideoFrameDelta, num_packets, 0,
|
||||
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta,
|
||||
num_packets, 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
for (int i = 0; i < num_packets; ++i)
|
||||
stream_generator_->DropLastPacket();
|
||||
|
@ -434,7 +437,7 @@ TEST_F(TestBasicJitterBuffer, StopRunning) {
|
|||
|
||||
TEST_F(TestBasicJitterBuffer, SinglePacketFrame) {
|
||||
// Always start with a complete key frame when not allowing errors.
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->timestamp += 123 * 90;
|
||||
|
@ -445,14 +448,14 @@ TEST_F(TestBasicJitterBuffer, SinglePacketFrame) {
|
|||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, VerifyHistogramStats) {
|
||||
metrics::Reset();
|
||||
// Always start with a complete key frame when not allowing errors.
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->timestamp += 123 * 90;
|
||||
|
@ -463,7 +466,7 @@ TEST_F(TestBasicJitterBuffer, VerifyHistogramStats) {
|
|||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
// Verify that histograms are updated when the jitter buffer is stopped.
|
||||
|
@ -487,7 +490,7 @@ TEST_F(TestBasicJitterBuffer, VerifyHistogramStats) {
|
|||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = false;
|
||||
|
||||
|
@ -509,12 +512,12 @@ TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
|
|||
frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, 2 * size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = false;
|
||||
|
||||
|
@ -552,13 +555,13 @@ TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
|
|||
frame_out = DecodeCompleteFrame();
|
||||
|
||||
CheckOutFrame(frame_out, 100 * size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
|
||||
// Always start with a complete key frame.
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
|
||||
|
@ -572,7 +575,7 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
|
|||
++seq_num_;
|
||||
packet_->seqNum = seq_num_;
|
||||
packet_->markerBit = false;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet_->timestamp += 33 * 90;
|
||||
|
||||
EXPECT_EQ(kIncomplete,
|
||||
|
@ -608,14 +611,14 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
|
|||
frame_out = DecodeCompleteFrame();
|
||||
|
||||
CheckOutFrame(frame_out, 100 * size_, false);
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
|
||||
// Insert the "first" packet last.
|
||||
seq_num_ += 100;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = false;
|
||||
packet_->markerBit = true;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -655,12 +658,12 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
|
|||
|
||||
CheckOutFrame(frame_out, 100 * size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = false;
|
||||
|
||||
|
@ -686,7 +689,7 @@ TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
|
|||
|
||||
seq_num_ -= 3;
|
||||
timestamp_ -= 33 * 90;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = false;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -710,17 +713,17 @@ TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
|
|||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, 2 * size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, 2 * size_, false);
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) {
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
|
||||
|
@ -734,7 +737,7 @@ TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) {
|
|||
|
||||
// Now send in a complete delta frame (Frame C), but with a sequence number
|
||||
// gap. No pic index either, so no temporal scalability cheating :)
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
// Leave a gap of 2 sequence numbers and two frames.
|
||||
packet_->seqNum = seq_num_ + 3;
|
||||
packet_->timestamp = timestamp_ + (66 * 90);
|
||||
|
@ -784,7 +787,7 @@ TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) {
|
|||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = false;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -820,14 +823,14 @@ TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
|
|||
ASSERT_TRUE(frame_out != NULL);
|
||||
CheckOutFrame(frame_out, 2 * size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(3, jitter_buffer_->num_packets());
|
||||
EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, DuplicatePreviousDeltaFramePacket) {
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -843,14 +846,14 @@ TEST_F(TestBasicJitterBuffer, DuplicatePreviousDeltaFramePacket) {
|
|||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
ASSERT_TRUE(frame_out != NULL);
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
// Insert 3 delta frames.
|
||||
for (uint16_t i = 1; i <= 3; ++i) {
|
||||
packet_->seqNum = seq_num_ + i;
|
||||
packet_->timestamp = timestamp_ + (i * 33) * 90;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
EXPECT_EQ(kCompleteSession,
|
||||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||
EXPECT_EQ(i + 1, jitter_buffer_->num_packets());
|
||||
|
@ -872,7 +875,7 @@ TEST_F(TestBasicJitterBuffer, DuplicatePreviousDeltaFramePacket) {
|
|||
frame_out = DecodeCompleteFrame();
|
||||
ASSERT_TRUE(frame_out != NULL);
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
}
|
||||
|
@ -902,7 +905,7 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
|
|||
|
||||
packet_->seqNum = 65485;
|
||||
packet_->timestamp = 1000;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
vp9_header.picture_id = 5;
|
||||
vp9_header.tl0_pic_idx = 200;
|
||||
vp9_header.temporal_idx = 0;
|
||||
|
@ -914,7 +917,7 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
|
|||
// Insert next temporal layer 0.
|
||||
packet_->seqNum = 65489;
|
||||
packet_->timestamp = 13000;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
vp9_header.picture_id = 9;
|
||||
vp9_header.tl0_pic_idx = 201;
|
||||
vp9_header.temporal_idx = 0;
|
||||
|
@ -923,12 +926,12 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
|
|||
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(1000U, frame_out->Timestamp());
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(13000U, frame_out->Timestamp());
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
|
@ -957,7 +960,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
|
|||
|
||||
packet_->seqNum = 65486;
|
||||
packet_->timestamp = 6000;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
vp9_header.picture_id = 6;
|
||||
vp9_header.temporal_idx = 2;
|
||||
vp9_header.temporal_up_switch = true;
|
||||
|
@ -965,7 +968,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
|
|||
|
||||
packet_->seqNum = 65487;
|
||||
packet_->timestamp = 9000;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
vp9_header.picture_id = 7;
|
||||
vp9_header.temporal_idx = 1;
|
||||
vp9_header.temporal_up_switch = true;
|
||||
|
@ -974,7 +977,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
|
|||
// Insert first frame with SS data.
|
||||
packet_->seqNum = 65485;
|
||||
packet_->timestamp = 3000;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.width = 352;
|
||||
packet_->video_header.height = 288;
|
||||
vp9_header.picture_id = 5;
|
||||
|
@ -987,7 +990,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
|
|||
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(3000U, frame_out->Timestamp());
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
||||
EXPECT_FALSE(
|
||||
frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
|
||||
|
@ -995,14 +998,14 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
|
|||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(6000U, frame_out->Timestamp());
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
|
||||
EXPECT_EQ(2, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
||||
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(9000U, frame_out->Timestamp());
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
|
||||
EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
||||
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
@ -1034,7 +1037,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
|
|||
packet_->markerBit = false;
|
||||
packet_->seqNum = 65486;
|
||||
packet_->timestamp = 6000;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
vp9_header.spatial_idx = 0;
|
||||
vp9_header.picture_id = 6;
|
||||
vp9_header.temporal_idx = 1;
|
||||
|
@ -1044,7 +1047,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
|
|||
packet_->video_header.is_first_packet_in_frame = false;
|
||||
packet_->markerBit = true;
|
||||
packet_->seqNum = 65487;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
vp9_header.spatial_idx = 1;
|
||||
vp9_header.picture_id = 6;
|
||||
vp9_header.temporal_idx = 1;
|
||||
|
@ -1055,7 +1058,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
|
|||
packet_->markerBit = true;
|
||||
packet_->seqNum = 65485;
|
||||
packet_->timestamp = 3000;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
vp9_header.spatial_idx = 1;
|
||||
vp9_header.picture_id = 5;
|
||||
vp9_header.temporal_idx = 0;
|
||||
|
@ -1066,7 +1069,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
|
|||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = false;
|
||||
packet_->seqNum = 65484;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.width = 352;
|
||||
packet_->video_header.height = 288;
|
||||
vp9_header.spatial_idx = 0;
|
||||
|
@ -1080,7 +1083,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
|
|||
|
||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(3000U, frame_out->Timestamp());
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
||||
EXPECT_FALSE(
|
||||
frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
|
||||
|
@ -1088,14 +1091,14 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
|
|||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(6000U, frame_out->Timestamp());
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
|
||||
EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
||||
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = false;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -1121,7 +1124,7 @@ TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
|
|||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, size_ * 2 + 4 * 2, true);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
|
@ -1129,7 +1132,7 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
|
|||
auto& h264_header =
|
||||
packet_->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
packet_->timestamp = timestamp_;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->video_header.codec = kVideoCodecH264;
|
||||
|
@ -1148,7 +1151,7 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
|
|||
packet_->timestamp = timestamp_;
|
||||
++seq_num_;
|
||||
packet_->seqNum = seq_num_;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = false;
|
||||
packet_->video_header.codec = kVideoCodecH264;
|
||||
|
@ -1166,7 +1169,7 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
|
|||
|
||||
++seq_num_;
|
||||
packet_->seqNum = seq_num_;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = false;
|
||||
packet_->markerBit = true;
|
||||
packet_->video_header.codec = kVideoCodecH264;
|
||||
|
@ -1187,7 +1190,7 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
|
|||
packet_->timestamp = timestamp_;
|
||||
++seq_num_;
|
||||
packet_->seqNum = seq_num_;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->video_header.codec = kVideoCodecH264;
|
||||
|
@ -1206,7 +1209,7 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
|
|||
|
||||
TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
|
||||
seq_num_ = 0xfff0;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = false;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -1249,14 +1252,14 @@ TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
|
|||
|
||||
CheckOutFrame(frame_out, 100 * size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
|
||||
// Insert "first" packet last seqnum.
|
||||
seq_num_ = 10;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = false;
|
||||
packet_->markerBit = true;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -1298,7 +1301,7 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
|
|||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, 100 * size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
|
@ -1309,7 +1312,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
|
|||
// t = 3000 t = 2000
|
||||
seq_num_ = 2;
|
||||
timestamp_ = 3000;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->timestamp = timestamp_;
|
||||
|
@ -1322,12 +1325,12 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
|
|||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(3000u, frame_out->Timestamp());
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
seq_num_--;
|
||||
timestamp_ = 2000;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -1344,7 +1347,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
|
|||
|
||||
seq_num_ = 2;
|
||||
timestamp_ = 3000;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -1359,13 +1362,13 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
|
|||
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
seq_num_--;
|
||||
timestamp_ = 0xffffff00;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -1382,7 +1385,7 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
|
|||
// t = 0xffffff00 t = 33*90
|
||||
|
||||
timestamp_ = 0xffffff00;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = false;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -1409,7 +1412,7 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
|
|||
|
||||
seq_num_++;
|
||||
timestamp_ += 33 * 90;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = false;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -1431,7 +1434,7 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
|
|||
|
||||
frame_out = DecodeCompleteFrame();
|
||||
CheckOutFrame(frame_out, 2 * size_, false);
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
|
@ -1442,7 +1445,7 @@ TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
|
|||
// t = 0xffffff00 t = 2700
|
||||
|
||||
timestamp_ = 0xffffff00;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->timestamp = timestamp_;
|
||||
|
@ -1455,7 +1458,7 @@ TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
|
|||
// Insert next frame.
|
||||
seq_num_++;
|
||||
timestamp_ = 2700;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -1467,13 +1470,13 @@ TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
|
|||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(0xffffff00, frame_out->Timestamp());
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
|
||||
EXPECT_EQ(2700u, frame_out2->Timestamp());
|
||||
CheckOutFrame(frame_out2, size_, false);
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out2->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out2);
|
||||
}
|
||||
|
||||
|
@ -1485,7 +1488,7 @@ TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
|
|||
|
||||
seq_num_ = 2;
|
||||
timestamp_ = 2700;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -1498,7 +1501,7 @@ TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
|
|||
// Insert second frame
|
||||
seq_num_--;
|
||||
timestamp_ = 0xffffff00;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
packet_->seqNum = seq_num_;
|
||||
|
@ -1510,13 +1513,13 @@ TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
|
|||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(0xffffff00, frame_out->Timestamp());
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
|
||||
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
|
||||
EXPECT_EQ(2700u, frame_out2->Timestamp());
|
||||
CheckOutFrame(frame_out2, size_, false);
|
||||
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, frame_out2->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out2);
|
||||
}
|
||||
|
||||
|
@ -1584,7 +1587,7 @@ TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
|
|||
|
||||
if (loop == 50) {
|
||||
first_key_frame_timestamp = packet_->timestamp;
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
}
|
||||
|
||||
// Insert frame.
|
||||
|
@ -1611,7 +1614,7 @@ TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
|
|||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||
EXPECT_EQ(first_key_frame_timestamp, frame_out->Timestamp());
|
||||
CheckOutFrame(frame_out, size_, false);
|
||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, frame_out->FrameType());
|
||||
jitter_buffer_->ReleaseFrame(frame_out);
|
||||
}
|
||||
|
||||
|
@ -1628,7 +1631,7 @@ TEST_F(TestBasicJitterBuffer, EmptyLastFrame) {
|
|||
packet_->markerBit = false;
|
||||
packet_->seqNum = seq_num_;
|
||||
packet_->timestamp = timestamp_;
|
||||
packet_->frameType = kEmptyFrame;
|
||||
packet_->frameType = VideoFrameType::kEmptyFrame;
|
||||
|
||||
EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||
}
|
||||
|
@ -1639,7 +1642,7 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
|
|||
// received the marker bit, unless we have received a packet from a later
|
||||
// timestamp.
|
||||
// Start with a complete key frame - insert and decode.
|
||||
packet_->frameType = kVideoFrameKey;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet_->video_header.is_first_packet_in_frame = true;
|
||||
packet_->markerBit = true;
|
||||
bool retransmitted = false;
|
||||
|
@ -1652,7 +1655,7 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
|
|||
|
||||
packet_->seqNum += 2;
|
||||
packet_->timestamp += 33 * 90;
|
||||
packet_->frameType = kVideoFrameDelta;
|
||||
packet_->frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet_->video_header.is_first_packet_in_frame = false;
|
||||
packet_->markerBit = false;
|
||||
|
||||
|
@ -1673,23 +1676,24 @@ TEST_F(TestRunningJitterBuffer, Full) {
|
|||
jitter_buffer_->SetNackMode(kNack, -1, -1);
|
||||
jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
|
||||
// Insert a key frame and decode it.
|
||||
EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
DropFrame(1);
|
||||
// Fill the jitter buffer.
|
||||
EXPECT_GE(InsertFrames(kMaxNumberOfFrames, kVideoFrameDelta), kNoError);
|
||||
EXPECT_GE(InsertFrames(kMaxNumberOfFrames, VideoFrameType::kVideoFrameDelta),
|
||||
kNoError);
|
||||
// Make sure we can't decode these frames.
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
// This frame will make the jitter buffer recycle frames until a key frame.
|
||||
// Since none is found it will have to wait until the next key frame before
|
||||
// decoding.
|
||||
EXPECT_EQ(kFlushIndicator, InsertFrame(kVideoFrameDelta));
|
||||
EXPECT_EQ(kFlushIndicator, InsertFrame(VideoFrameType::kVideoFrameDelta));
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
}
|
||||
|
||||
TEST_F(TestRunningJitterBuffer, EmptyPackets) {
|
||||
// Make sure a frame can get complete even though empty packets are missing.
|
||||
stream_generator_->GenerateFrame(kVideoFrameKey, 3, 3,
|
||||
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 3,
|
||||
clock_->TimeInMilliseconds());
|
||||
bool request_key_frame = false;
|
||||
// Insert empty packet.
|
||||
|
@ -1719,11 +1723,11 @@ TEST_F(TestRunningJitterBuffer, StatisticsTest) {
|
|||
EXPECT_EQ(0u, bitrate);
|
||||
|
||||
// Insert a couple of key and delta frames.
|
||||
InsertFrame(kVideoFrameKey);
|
||||
InsertFrame(kVideoFrameDelta);
|
||||
InsertFrame(kVideoFrameDelta);
|
||||
InsertFrame(kVideoFrameKey);
|
||||
InsertFrame(kVideoFrameDelta);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
InsertFrame(VideoFrameType::kVideoFrameDelta);
|
||||
InsertFrame(VideoFrameType::kVideoFrameDelta);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
InsertFrame(VideoFrameType::kVideoFrameDelta);
|
||||
// Decode some of them to make sure the statistics doesn't depend on frames
|
||||
// being decoded.
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
|
@ -1735,7 +1739,7 @@ TEST_F(TestRunningJitterBuffer, StatisticsTest) {
|
|||
// Insert 20 more frames to get estimates of bitrate and framerate over
|
||||
// 1 second.
|
||||
for (int i = 0; i < 20; ++i) {
|
||||
InsertFrame(kVideoFrameDelta);
|
||||
InsertFrame(VideoFrameType::kVideoFrameDelta);
|
||||
}
|
||||
jitter_buffer_->IncomingRateStatistics(&framerate, &bitrate);
|
||||
// TODO(holmer): The current implementation returns the average of the last
|
||||
|
@ -1746,7 +1750,7 @@ TEST_F(TestRunningJitterBuffer, StatisticsTest) {
|
|||
// Insert 25 more frames to get estimates of bitrate and framerate over
|
||||
// 2 seconds.
|
||||
for (int i = 0; i < 25; ++i) {
|
||||
InsertFrame(kVideoFrameDelta);
|
||||
InsertFrame(VideoFrameType::kVideoFrameDelta);
|
||||
}
|
||||
jitter_buffer_->IncomingRateStatistics(&framerate, &bitrate);
|
||||
EXPECT_EQ(kDefaultFrameRate, framerate);
|
||||
|
@ -1755,45 +1759,48 @@ TEST_F(TestRunningJitterBuffer, StatisticsTest) {
|
|||
|
||||
TEST_F(TestRunningJitterBuffer, SkipToKeyFrame) {
|
||||
// Insert delta frames.
|
||||
EXPECT_GE(InsertFrames(5, kVideoFrameDelta), kNoError);
|
||||
EXPECT_GE(InsertFrames(5, VideoFrameType::kVideoFrameDelta), kNoError);
|
||||
// Can't decode without a key frame.
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
InsertFrame(kVideoFrameKey);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
// Skip to the next key frame.
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
}
|
||||
|
||||
TEST_F(TestRunningJitterBuffer, DontSkipToKeyFrameIfDecodable) {
|
||||
InsertFrame(kVideoFrameKey);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
const int kNumDeltaFrames = 5;
|
||||
EXPECT_GE(InsertFrames(kNumDeltaFrames, kVideoFrameDelta), kNoError);
|
||||
InsertFrame(kVideoFrameKey);
|
||||
EXPECT_GE(InsertFrames(kNumDeltaFrames, VideoFrameType::kVideoFrameDelta),
|
||||
kNoError);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
for (int i = 0; i < kNumDeltaFrames + 1; ++i) {
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TestRunningJitterBuffer, KeyDeltaKeyDelta) {
|
||||
InsertFrame(kVideoFrameKey);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
const int kNumDeltaFrames = 5;
|
||||
EXPECT_GE(InsertFrames(kNumDeltaFrames, kVideoFrameDelta), kNoError);
|
||||
InsertFrame(kVideoFrameKey);
|
||||
EXPECT_GE(InsertFrames(kNumDeltaFrames, kVideoFrameDelta), kNoError);
|
||||
InsertFrame(kVideoFrameKey);
|
||||
EXPECT_GE(InsertFrames(kNumDeltaFrames, VideoFrameType::kVideoFrameDelta),
|
||||
kNoError);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_GE(InsertFrames(kNumDeltaFrames, VideoFrameType::kVideoFrameDelta),
|
||||
kNoError);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
for (int i = 0; i < 2 * (kNumDeltaFrames + 1); ++i) {
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TestRunningJitterBuffer, TwoPacketsNonContinuous) {
|
||||
InsertFrame(kVideoFrameKey);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
stream_generator_->GenerateFrame(kVideoFrameDelta, 1, 0,
|
||||
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 1, 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
|
||||
stream_generator_->GenerateFrame(kVideoFrameDelta, 2, 0,
|
||||
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 2, 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
|
||||
EXPECT_EQ(kCompleteSession, InsertPacketAndPop(1));
|
||||
|
@ -1806,22 +1813,23 @@ TEST_F(TestRunningJitterBuffer, TwoPacketsNonContinuous) {
|
|||
TEST_F(TestJitterBufferNack, EmptyPackets) {
|
||||
// Make sure empty packets doesn't clog the jitter buffer.
|
||||
jitter_buffer_->SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
|
||||
EXPECT_GE(InsertFrames(kMaxNumberOfFrames, kEmptyFrame), kNoError);
|
||||
InsertFrame(kVideoFrameKey);
|
||||
EXPECT_GE(InsertFrames(kMaxNumberOfFrames, VideoFrameType::kEmptyFrame),
|
||||
kNoError);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
}
|
||||
|
||||
TEST_F(TestJitterBufferNack, NackTooOldPackets) {
|
||||
// Insert a key frame and decode it.
|
||||
EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
|
||||
// Drop one frame and insert |kNackHistoryLength| to trigger NACKing a too
|
||||
// old packet.
|
||||
DropFrame(1);
|
||||
// Insert a frame which should trigger a recycle until the next key frame.
|
||||
EXPECT_EQ(kFlushIndicator,
|
||||
InsertFrames(oldest_packet_to_nack_ + 1, kVideoFrameDelta));
|
||||
EXPECT_EQ(kFlushIndicator, InsertFrames(oldest_packet_to_nack_ + 1,
|
||||
VideoFrameType::kVideoFrameDelta));
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
|
||||
bool request_key_frame = false;
|
||||
|
@ -1831,25 +1839,27 @@ TEST_F(TestJitterBufferNack, NackTooOldPackets) {
|
|||
EXPECT_FALSE(request_key_frame);
|
||||
EXPECT_EQ(0u, nack_list.size());
|
||||
|
||||
EXPECT_GE(InsertFrame(kVideoFrameDelta), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta), kNoError);
|
||||
// Waiting for a key frame.
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
|
||||
// The next complete continuous frame isn't a key frame, but we're waiting
|
||||
// for one.
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
|
||||
// Skipping ahead to the key frame.
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
}
|
||||
|
||||
TEST_F(TestJitterBufferNack, NackLargeJitterBuffer) {
|
||||
// Insert a key frame and decode it.
|
||||
EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
|
||||
// Insert a frame which should trigger a recycle until the next key frame.
|
||||
EXPECT_GE(InsertFrames(oldest_packet_to_nack_, kVideoFrameDelta), kNoError);
|
||||
EXPECT_GE(
|
||||
InsertFrames(oldest_packet_to_nack_, VideoFrameType::kVideoFrameDelta),
|
||||
kNoError);
|
||||
|
||||
bool request_key_frame = false;
|
||||
std::vector<uint16_t> nack_list =
|
||||
|
@ -1864,13 +1874,13 @@ TEST_F(TestJitterBufferNack, NackLargeJitterBuffer) {
|
|||
|
||||
TEST_F(TestJitterBufferNack, NackListFull) {
|
||||
// Insert a key frame and decode it.
|
||||
EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
|
||||
// Generate and drop |kNackHistoryLength| packets to fill the NACK list.
|
||||
DropFrame(max_nack_list_size_ + 1);
|
||||
// Insert a frame which should trigger a recycle until the next key frame.
|
||||
EXPECT_EQ(kFlushIndicator, InsertFrame(kVideoFrameDelta));
|
||||
EXPECT_EQ(kFlushIndicator, InsertFrame(VideoFrameType::kVideoFrameDelta));
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
|
||||
bool request_key_frame = false;
|
||||
|
@ -1879,7 +1889,7 @@ TEST_F(TestJitterBufferNack, NackListFull) {
|
|||
// packet.
|
||||
EXPECT_FALSE(request_key_frame);
|
||||
|
||||
EXPECT_GE(InsertFrame(kVideoFrameDelta), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta), kNoError);
|
||||
// Now we have a packet in the jitter buffer, a key frame will be requested
|
||||
// since it's not a key frame.
|
||||
jitter_buffer_->GetNackList(&request_key_frame);
|
||||
|
@ -1889,7 +1899,7 @@ TEST_F(TestJitterBufferNack, NackListFull) {
|
|||
// The next complete continuous frame isn't a key frame, but we're waiting
|
||||
// for one.
|
||||
EXPECT_FALSE(DecodeCompleteFrame());
|
||||
EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
|
||||
// Skipping ahead to the key frame.
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
}
|
||||
|
@ -1897,7 +1907,7 @@ TEST_F(TestJitterBufferNack, NackListFull) {
|
|||
TEST_F(TestJitterBufferNack, NoNackListReturnedBeforeFirstDecode) {
|
||||
DropFrame(10);
|
||||
// Insert a frame and try to generate a NACK list. Shouldn't get one.
|
||||
EXPECT_GE(InsertFrame(kVideoFrameDelta), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta), kNoError);
|
||||
bool request_key_frame = false;
|
||||
std::vector<uint16_t> nack_list =
|
||||
jitter_buffer_->GetNackList(&request_key_frame);
|
||||
|
@ -1908,8 +1918,8 @@ TEST_F(TestJitterBufferNack, NoNackListReturnedBeforeFirstDecode) {
|
|||
|
||||
TEST_F(TestJitterBufferNack, NackListBuiltBeforeFirstDecode) {
|
||||
stream_generator_->Init(0, clock_->TimeInMilliseconds());
|
||||
InsertFrame(kVideoFrameKey);
|
||||
stream_generator_->GenerateFrame(kVideoFrameDelta, 2, 0,
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 2, 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
stream_generator_->NextPacket(NULL); // Drop packet.
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
|
||||
|
@ -1921,7 +1931,7 @@ TEST_F(TestJitterBufferNack, NackListBuiltBeforeFirstDecode) {
|
|||
|
||||
TEST_F(TestJitterBufferNack, VerifyRetransmittedFlag) {
|
||||
stream_generator_->Init(0, clock_->TimeInMilliseconds());
|
||||
stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,
|
||||
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
VCMPacket packet;
|
||||
stream_generator_->PopPacket(&packet, 0);
|
||||
|
@ -1948,7 +1958,7 @@ TEST_F(TestJitterBufferNack, VerifyRetransmittedFlag) {
|
|||
|
||||
TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrame) {
|
||||
stream_generator_->Init(0, clock_->TimeInMilliseconds());
|
||||
stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,
|
||||
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
|
||||
// Drop second packet.
|
||||
|
@ -1968,14 +1978,14 @@ TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrameSecondInQueue) {
|
|||
VCMPacket packet;
|
||||
stream_generator_->Init(0, clock_->TimeInMilliseconds());
|
||||
// First frame is delta.
|
||||
stream_generator_->GenerateFrame(kVideoFrameDelta, 3, 0,
|
||||
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 3, 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
|
||||
// Drop second packet in frame.
|
||||
ASSERT_TRUE(stream_generator_->PopPacket(&packet, 0));
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
|
||||
// Second frame is key.
|
||||
stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,
|
||||
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 3, 0,
|
||||
clock_->TimeInMilliseconds() + 10);
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
|
||||
// Drop second packet in frame.
|
||||
|
@ -1993,13 +2003,13 @@ TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrameSecondInQueue) {
|
|||
TEST_F(TestJitterBufferNack, NormalOperation) {
|
||||
EXPECT_EQ(kNack, jitter_buffer_->nack_mode());
|
||||
|
||||
EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey), kNoError);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
// | 1 | 2 | .. | 8 | 9 | x | 11 | 12 | .. | 19 | x | 21 | .. | 100 |
|
||||
// ----------------------------------------------------------------
|
||||
stream_generator_->GenerateFrame(kVideoFrameKey, 100, 0,
|
||||
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey, 100, 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
|
||||
|
@ -2032,10 +2042,10 @@ TEST_F(TestJitterBufferNack, NormalOperationWrap) {
|
|||
// | 65532 | | 65533 | 65534 | 65535 | x | 1 | .. | 9 | x | 11 |.....| 96 |
|
||||
// ------- ------------------------------------------------------------
|
||||
stream_generator_->Init(65532, clock_->TimeInMilliseconds());
|
||||
InsertFrame(kVideoFrameKey);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_FALSE(request_key_frame);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
stream_generator_->GenerateFrame(kVideoFrameDelta, 100, 0,
|
||||
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 100, 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
|
||||
while (stream_generator_->PacketsRemaining() > 1) {
|
||||
|
@ -2066,10 +2076,10 @@ TEST_F(TestJitterBufferNack, NormalOperationWrap2) {
|
|||
// | 65532 | 65533 | 65534 | x | 0 | 1 |
|
||||
// -----------------------------------
|
||||
stream_generator_->Init(65532, clock_->TimeInMilliseconds());
|
||||
InsertFrame(kVideoFrameKey);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_FALSE(request_key_frame);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
stream_generator_->GenerateFrame(kVideoFrameDelta, 1, 0,
|
||||
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 1, 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
|
@ -2079,7 +2089,7 @@ TEST_F(TestJitterBufferNack, NormalOperationWrap2) {
|
|||
} else {
|
||||
stream_generator_->NextPacket(NULL); // Drop packet
|
||||
}
|
||||
stream_generator_->GenerateFrame(kVideoFrameDelta, 1, 0,
|
||||
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameDelta, 1, 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
|
||||
}
|
||||
|
@ -2094,7 +2104,7 @@ TEST_F(TestJitterBufferNack, NormalOperationWrap2) {
|
|||
|
||||
TEST_F(TestJitterBufferNack, ResetByFutureKeyFrameDoesntError) {
|
||||
stream_generator_->Init(0, clock_->TimeInMilliseconds());
|
||||
InsertFrame(kVideoFrameKey);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
bool extended = false;
|
||||
std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
|
||||
|
@ -2105,14 +2115,14 @@ TEST_F(TestJitterBufferNack, ResetByFutureKeyFrameDoesntError) {
|
|||
// a keyframe, even if all of the nack list needs to be flushed.
|
||||
stream_generator_->Init(10000, clock_->TimeInMilliseconds());
|
||||
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
|
||||
InsertFrame(kVideoFrameKey);
|
||||
InsertFrame(VideoFrameType::kVideoFrameKey);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
nack_list = jitter_buffer_->GetNackList(&extended);
|
||||
EXPECT_EQ(0u, nack_list.size());
|
||||
|
||||
// Stream should be decodable from this point.
|
||||
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
|
||||
InsertFrame(kVideoFrameDelta);
|
||||
InsertFrame(VideoFrameType::kVideoFrameDelta);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
nack_list = jitter_buffer_->GetNackList(&extended);
|
||||
EXPECT_EQ(0u, nack_list.size());
|
||||
|
|
|
@ -23,7 +23,7 @@ VCMPacket::VCMPacket()
|
|||
sizeBytes(0),
|
||||
markerBit(false),
|
||||
timesNacked(-1),
|
||||
frameType(kEmptyFrame),
|
||||
frameType(VideoFrameType::kEmptyFrame),
|
||||
completeNALU(kNaluUnset),
|
||||
insertStartCode(false),
|
||||
video_header(),
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/rtp_headers.h"
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "api/video/video_frame_type.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_video_header.h"
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ bool PacketBuffer::InsertPacket(VCMPacket* packet) {
|
|||
|
||||
int64_t now_ms = clock_->TimeInMilliseconds();
|
||||
last_received_packet_ms_ = now_ms;
|
||||
if (packet->frameType == kVideoFrameKey)
|
||||
if (packet->frameType == VideoFrameType::kVideoFrameKey)
|
||||
last_received_keyframe_packet_ms_ = now_ms;
|
||||
|
||||
found_frames = FindFrames(seq_num);
|
||||
|
@ -378,9 +378,11 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
|||
const size_t first_packet_index = start_seq_num % size_;
|
||||
RTC_CHECK_LT(first_packet_index, size_);
|
||||
if (is_h264_keyframe) {
|
||||
data_buffer_[first_packet_index].frameType = kVideoFrameKey;
|
||||
data_buffer_[first_packet_index].frameType =
|
||||
VideoFrameType::kVideoFrameKey;
|
||||
} else {
|
||||
data_buffer_[first_packet_index].frameType = kVideoFrameDelta;
|
||||
data_buffer_[first_packet_index].frameType =
|
||||
VideoFrameType::kVideoFrameDelta;
|
||||
}
|
||||
|
||||
// If this is not a keyframe, make sure there are no gaps in the
|
||||
|
|
|
@ -59,8 +59,10 @@ class TestVCMReceiver : public ::testing::Test {
|
|||
int32_t InsertFrame(VideoFrameType frame_type, bool complete) {
|
||||
int num_of_packets = complete ? 1 : 2;
|
||||
stream_generator_->GenerateFrame(
|
||||
frame_type, (frame_type != kEmptyFrame) ? num_of_packets : 0,
|
||||
(frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds());
|
||||
frame_type,
|
||||
(frame_type != VideoFrameType::kEmptyFrame) ? num_of_packets : 0,
|
||||
(frame_type == VideoFrameType::kEmptyFrame) ? 1 : 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
int32_t ret = InsertPacketAndPop(0);
|
||||
if (!complete) {
|
||||
// Drop the second packet.
|
||||
|
@ -94,7 +96,7 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_Empty) {
|
|||
const int kMinDelayMs = 500;
|
||||
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
|
||||
kMaxNonDecodableDuration);
|
||||
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
|
||||
// Advance time until it's time to decode the key frame.
|
||||
clock_->AdvanceTimeMilliseconds(kMinDelayMs);
|
||||
EXPECT_TRUE(DecodeNextFrame());
|
||||
|
@ -113,7 +115,7 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoKeyFrame) {
|
|||
kMaxNonDecodableDuration);
|
||||
const int kNumFrames = kDefaultFrameRate * kMaxNonDecodableDuration / 1000;
|
||||
for (int i = 0; i < kNumFrames; ++i) {
|
||||
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
|
||||
}
|
||||
bool request_key_frame = false;
|
||||
std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
|
||||
|
@ -133,12 +135,12 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_OneIncomplete) {
|
|||
kMaxNonDecodableDuration);
|
||||
timing_.set_min_playout_delay(kMinDelayMs);
|
||||
int64_t key_frame_inserted = clock_->TimeInMilliseconds();
|
||||
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
|
||||
// Insert an incomplete frame.
|
||||
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
|
||||
// Insert enough frames to have too long non-decodable sequence.
|
||||
for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
|
||||
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
|
||||
}
|
||||
// Advance time until it's time to decode the key frame.
|
||||
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
|
||||
|
@ -163,13 +165,13 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger) {
|
|||
kMaxNonDecodableDuration);
|
||||
timing_.set_min_playout_delay(kMinDelayMs);
|
||||
int64_t key_frame_inserted = clock_->TimeInMilliseconds();
|
||||
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
|
||||
// Insert an incomplete frame.
|
||||
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
|
||||
// Insert all but one frame to not trigger a key frame request due to
|
||||
// too long duration of non-decodable frames.
|
||||
for (int i = 0; i < kMaxNonDecodableDurationFrames - 1; ++i) {
|
||||
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
|
||||
}
|
||||
// Advance time until it's time to decode the key frame.
|
||||
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
|
||||
|
@ -195,14 +197,14 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger2) {
|
|||
kMaxNonDecodableDuration);
|
||||
timing_.set_min_playout_delay(kMinDelayMs);
|
||||
int64_t key_frame_inserted = clock_->TimeInMilliseconds();
|
||||
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
|
||||
// Insert enough frames to have too long non-decodable sequence, except that
|
||||
// we don't have any losses.
|
||||
for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
|
||||
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
|
||||
}
|
||||
// Insert an incomplete frame.
|
||||
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
|
||||
// Advance time until it's time to decode the key frame.
|
||||
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
|
||||
key_frame_inserted);
|
||||
|
@ -227,14 +229,14 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_KeyFrameAfterIncompleteFrames) {
|
|||
kMaxNonDecodableDuration);
|
||||
timing_.set_min_playout_delay(kMinDelayMs);
|
||||
int64_t key_frame_inserted = clock_->TimeInMilliseconds();
|
||||
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
|
||||
// Insert an incomplete frame.
|
||||
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError);
|
||||
// Insert enough frames to have too long non-decodable sequence.
|
||||
for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
|
||||
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError);
|
||||
}
|
||||
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
|
||||
EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError);
|
||||
// Advance time until it's time to decode the key frame.
|
||||
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
|
||||
key_frame_inserted);
|
||||
|
|
|
@ -195,12 +195,13 @@ RtpFrameReferenceFinder::ManageFramePidOrSeqNum(RtpFrameObject* frame,
|
|||
// otherwise we use sequence number.
|
||||
if (picture_id != kNoPictureId) {
|
||||
frame->id.picture_id = unwrapper_.Unwrap(picture_id);
|
||||
frame->num_references = frame->frame_type() == kVideoFrameKey ? 0 : 1;
|
||||
frame->num_references =
|
||||
frame->frame_type() == VideoFrameType::kVideoFrameKey ? 0 : 1;
|
||||
frame->references[0] = frame->id.picture_id - 1;
|
||||
return kHandOff;
|
||||
}
|
||||
|
||||
if (frame->frame_type() == kVideoFrameKey) {
|
||||
if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
|
||||
last_seq_num_gop_.insert(std::make_pair(
|
||||
frame->last_seq_num(),
|
||||
std::make_pair(frame->last_seq_num(), frame->last_seq_num())));
|
||||
|
@ -234,7 +235,7 @@ RtpFrameReferenceFinder::ManageFramePidOrSeqNum(RtpFrameObject* frame,
|
|||
// this frame.
|
||||
uint16_t last_picture_id_gop = seq_num_it->second.first;
|
||||
uint16_t last_picture_id_with_padding_gop = seq_num_it->second.second;
|
||||
if (frame->frame_type() == kVideoFrameDelta) {
|
||||
if (frame->frame_type() == VideoFrameType::kVideoFrameDelta) {
|
||||
uint16_t prev_seq_num = frame->first_seq_num() - 1;
|
||||
|
||||
if (prev_seq_num != last_picture_id_with_padding_gop)
|
||||
|
@ -246,7 +247,8 @@ RtpFrameReferenceFinder::ManageFramePidOrSeqNum(RtpFrameObject* frame,
|
|||
// Since keyframes can cause reordering we can't simply assign the
|
||||
// picture id according to some incrementing counter.
|
||||
frame->id.picture_id = frame->last_seq_num();
|
||||
frame->num_references = frame->frame_type() == kVideoFrameDelta;
|
||||
frame->num_references =
|
||||
frame->frame_type() == VideoFrameType::kVideoFrameDelta;
|
||||
frame->references[0] = rtp_seq_num_unwrapper_.Unwrap(last_picture_id_gop);
|
||||
if (AheadOf<uint16_t>(frame->id.picture_id, last_picture_id_gop)) {
|
||||
seq_num_it->second.first = frame->id.picture_id;
|
||||
|
@ -306,7 +308,7 @@ RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp8(
|
|||
not_yet_received_frames_.erase(not_yet_received_frames_.begin(),
|
||||
clean_frames_to);
|
||||
|
||||
if (frame->frame_type() == kVideoFrameKey) {
|
||||
if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
|
||||
frame->num_references = 0;
|
||||
layer_info_[unwrapped_tl0].fill(-1);
|
||||
UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx);
|
||||
|
@ -483,13 +485,13 @@ RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp9(
|
|||
|
||||
info = &gof_info_it->second;
|
||||
|
||||
if (frame->frame_type() == kVideoFrameKey) {
|
||||
if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
|
||||
frame->num_references = 0;
|
||||
FrameReceivedVp9(frame->id.picture_id, info);
|
||||
UnwrapPictureIds(frame);
|
||||
return kHandOff;
|
||||
}
|
||||
} else if (frame->frame_type() == kVideoFrameKey) {
|
||||
} else if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
|
||||
if (frame->id.spatial_layer == 0) {
|
||||
RTC_LOG(LS_WARNING) << "Received keyframe without scalability structure";
|
||||
return kDrop;
|
||||
|
@ -500,7 +502,7 @@ RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp9(
|
|||
|
||||
info = &gof_info_it->second;
|
||||
|
||||
if (frame->frame_type() == kVideoFrameKey) {
|
||||
if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
|
||||
frame->num_references = 0;
|
||||
FrameReceivedVp9(frame->id.picture_id, info);
|
||||
UnwrapPictureIds(frame);
|
||||
|
|
|
@ -82,7 +82,8 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
|
|||
VCMPacket packet;
|
||||
packet.video_header.codec = kVideoCodecGeneric;
|
||||
packet.seqNum = seq_num_start;
|
||||
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
|
||||
packet.frameType = keyframe ? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
ref_packet_buffer_->InsertPacket(&packet);
|
||||
|
||||
packet.seqNum = seq_num_end;
|
||||
|
@ -106,7 +107,8 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
|
|||
packet.seqNum = seq_num_start;
|
||||
packet.video_header.is_last_packet_in_frame =
|
||||
(seq_num_start == seq_num_end);
|
||||
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
|
||||
packet.frameType = keyframe ? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
auto& vp8_header =
|
||||
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
vp8_header.pictureId = pid % (1 << 15);
|
||||
|
@ -144,7 +146,8 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
|
|||
packet.seqNum = seq_num_start;
|
||||
packet.video_header.is_last_packet_in_frame =
|
||||
(seq_num_start == seq_num_end);
|
||||
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
|
||||
packet.frameType = keyframe ? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
vp9_header.flexible_mode = false;
|
||||
vp9_header.picture_id = pid % (1 << 15);
|
||||
vp9_header.temporal_idx = tid;
|
||||
|
@ -186,7 +189,8 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
|
|||
packet.seqNum = seq_num_start;
|
||||
packet.video_header.is_last_packet_in_frame =
|
||||
(seq_num_start == seq_num_end);
|
||||
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
|
||||
packet.frameType = keyframe ? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
vp9_header.inter_layer_predicted = inter;
|
||||
vp9_header.flexible_mode = true;
|
||||
vp9_header.picture_id = pid % (1 << 15);
|
||||
|
|
|
@ -36,7 +36,7 @@ uint16_t BufferToUWord16(const uint8_t* dataBuffer) {
|
|||
|
||||
VCMSessionInfo::VCMSessionInfo()
|
||||
: complete_(false),
|
||||
frame_type_(kVideoFrameDelta),
|
||||
frame_type_(VideoFrameType::kVideoFrameDelta),
|
||||
packets_(),
|
||||
empty_seq_num_low_(-1),
|
||||
empty_seq_num_high_(-1),
|
||||
|
@ -172,7 +172,7 @@ void VCMSessionInfo::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
|
|||
|
||||
void VCMSessionInfo::Reset() {
|
||||
complete_ = false;
|
||||
frame_type_ = kVideoFrameDelta;
|
||||
frame_type_ = VideoFrameType::kVideoFrameDelta;
|
||||
packets_.clear();
|
||||
empty_seq_num_low_ = -1;
|
||||
empty_seq_num_high_ = -1;
|
||||
|
@ -426,7 +426,7 @@ bool VCMSessionInfo::HaveLastPacket() const {
|
|||
int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
|
||||
uint8_t* frame_buffer,
|
||||
const FrameData& frame_data) {
|
||||
if (packet.frameType == kEmptyFrame) {
|
||||
if (packet.frameType == VideoFrameType::kEmptyFrame) {
|
||||
// Update sequence number of an empty packet.
|
||||
// Only media packets are inserted into the packet list.
|
||||
InformOfEmptyPacket(packet.seqNum);
|
||||
|
@ -479,7 +479,8 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
|
|||
<< "Received packet with a sequence number which is out "
|
||||
"of frame boundaries";
|
||||
return -3;
|
||||
} else if (frame_type_ == kEmptyFrame && packet.frameType != kEmptyFrame) {
|
||||
} else if (frame_type_ == VideoFrameType::kEmptyFrame &&
|
||||
packet.frameType != VideoFrameType::kEmptyFrame) {
|
||||
// Update the frame type with the type of the first media packet.
|
||||
// TODO(mikhal): Can this trigger?
|
||||
frame_type_ = packet.frameType;
|
||||
|
|
|
@ -23,7 +23,7 @@ class TestSessionInfo : public ::testing::Test {
|
|||
memset(packet_buffer_, 0, sizeof(packet_buffer_));
|
||||
memset(frame_buffer_, 0, sizeof(frame_buffer_));
|
||||
session_.Reset();
|
||||
packet_.frameType = kVideoFrameDelta;
|
||||
packet_.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
packet_.sizeBytes = packet_buffer_size();
|
||||
packet_.dataPtr = packet_buffer_;
|
||||
packet_.seqNum = 0;
|
||||
|
@ -116,12 +116,12 @@ TEST_F(TestSessionInfo, TestSimpleAPIs) {
|
|||
packet_.video_header.is_first_packet_in_frame = true;
|
||||
packet_.seqNum = 0xFFFE;
|
||||
packet_.sizeBytes = packet_buffer_size();
|
||||
packet_.frameType = kVideoFrameKey;
|
||||
packet_.frameType = VideoFrameType::kVideoFrameKey;
|
||||
FillPacket(0);
|
||||
EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
|
||||
packet_, frame_buffer_, frame_data)));
|
||||
EXPECT_FALSE(session_.HaveLastPacket());
|
||||
EXPECT_EQ(kVideoFrameKey, session_.FrameType());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, session_.FrameType());
|
||||
|
||||
packet_.video_header.is_first_packet_in_frame = false;
|
||||
packet_.markerBit = true;
|
||||
|
@ -138,7 +138,7 @@ TEST_F(TestSessionInfo, TestSimpleAPIs) {
|
|||
packet_.markerBit = true;
|
||||
packet_.seqNum = 2;
|
||||
packet_.sizeBytes = 0;
|
||||
packet_.frameType = kEmptyFrame;
|
||||
packet_.frameType = VideoFrameType::kEmptyFrame;
|
||||
EXPECT_EQ(0, session_.InsertPacket(packet_, frame_buffer_, frame_data));
|
||||
EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
|
||||
}
|
||||
|
@ -309,7 +309,7 @@ TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
|
|||
TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
|
||||
packet_.video_header.is_first_packet_in_frame = false;
|
||||
packet_.completeNALU = kNaluComplete;
|
||||
packet_.frameType = kEmptyFrame;
|
||||
packet_.frameType = VideoFrameType::kEmptyFrame;
|
||||
packet_.sizeBytes = 0;
|
||||
packet_.seqNum = 0;
|
||||
packet_.markerBit = false;
|
||||
|
|
|
@ -44,7 +44,7 @@ void StreamGenerator::GenerateFrame(VideoFrameType type,
|
|||
}
|
||||
for (int i = 0; i < num_empty_packets; ++i) {
|
||||
packets_.push_back(GeneratePacket(sequence_number_, timestamp, 0, false,
|
||||
false, kEmptyFrame));
|
||||
false, VideoFrameType::kEmptyFrame));
|
||||
++sequence_number_;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,11 +76,11 @@ class SimulcastTestFixtureImpl::TestEncodedImageCallback
|
|||
bool is_vp8 = (codec_specific_info->codecType == kVideoCodecVP8);
|
||||
// Only store the base layer.
|
||||
if (encoded_image.SpatialIndex().value_or(0) == 0) {
|
||||
if (encoded_image._frameType == kVideoFrameKey) {
|
||||
if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
|
||||
// TODO(nisse): Why not size() ?
|
||||
encoded_key_frame_.Allocate(encoded_image.capacity());
|
||||
encoded_key_frame_.set_size(encoded_image.size());
|
||||
encoded_key_frame_._frameType = kVideoFrameKey;
|
||||
encoded_key_frame_._frameType = VideoFrameType::kVideoFrameKey;
|
||||
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
|
||||
memcpy(encoded_key_frame_.data(), encoded_image.data(),
|
||||
encoded_image.size());
|
||||
|
@ -295,17 +295,17 @@ void SimulcastTestFixtureImpl::SetRates(uint32_t bitrate_kbps, uint32_t fps) {
|
|||
void SimulcastTestFixtureImpl::RunActiveStreamsTest(
|
||||
const std::vector<bool> active_streams) {
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
UpdateActiveStreams(active_streams);
|
||||
// Set sufficient bitrate for all streams so we can test active without
|
||||
// bitrate being an issue.
|
||||
SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
|
||||
|
||||
ExpectStreams(kVideoFrameKey, active_streams);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, active_streams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, active_streams);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, active_streams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
@ -397,33 +397,36 @@ void SimulcastTestFixtureImpl::VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|||
void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
|
||||
SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
frame_types[0] = kVideoFrameKey;
|
||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
frame_types[0] = VideoFrameType::kVideoFrameKey;
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
||||
frame_types[1] = kVideoFrameKey;
|
||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
std::fill(frame_types.begin(), frame_types.end(),
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
frame_types[1] = VideoFrameType::kVideoFrameKey;
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
||||
frame_types[2] = kVideoFrameKey;
|
||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
std::fill(frame_types.begin(), frame_types.end(),
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
frame_types[2] = VideoFrameType::kVideoFrameKey;
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||
std::fill(frame_types.begin(), frame_types.end(),
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
@ -432,11 +435,11 @@ void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
|
|||
// We should always encode the base layer.
|
||||
SetRates(kMinBitrates[0] - 1, 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 1);
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
@ -445,11 +448,11 @@ void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
|
|||
// We have just enough to get only the first stream and padding for two.
|
||||
SetRates(kMinBitrates[0], 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 1);
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
@ -459,11 +462,11 @@ void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
|
|||
// the first stream maxed out (at |maxBitrate|), and padding for two.
|
||||
SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 1);
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
@ -472,11 +475,11 @@ void SimulcastTestFixtureImpl::TestPaddingOneStream() {
|
|||
// We have just enough to send two streams, so padding for one stream.
|
||||
SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 2);
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 2);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
@ -486,11 +489,11 @@ void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
|
|||
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
|
||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 2);
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 2);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
@ -499,11 +502,11 @@ void SimulcastTestFixtureImpl::TestSendAllStreams() {
|
|||
// We have just enough to send all streams.
|
||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 3);
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 3);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
@ -512,44 +515,44 @@ void SimulcastTestFixtureImpl::TestDisablingStreams() {
|
|||
// We should get three media streams.
|
||||
SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 3);
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 3);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
// We should only get two streams and padding for one.
|
||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
|
||||
ExpectStreams(kVideoFrameDelta, 2);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
// We should only get the first stream and padding for two.
|
||||
SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
// We don't have enough bitrate for the thumbnail stream, but we should get
|
||||
// it anyway with current configuration.
|
||||
SetRates(kTargetBitrates[0] - 1, 30);
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
// We should only get two streams and padding for one.
|
||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
|
||||
// We get a key frame because a new stream is being enabled.
|
||||
ExpectStreams(kVideoFrameKey, 2);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
|
||||
// We should get all three streams.
|
||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
|
||||
// We get a key frame because a new stream is being enabled.
|
||||
ExpectStreams(kVideoFrameKey, 3);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
|
||||
}
|
||||
|
@ -618,10 +621,11 @@ void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
|
|||
// Encode one frame and verify.
|
||||
SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
EXPECT_CALL(
|
||||
encoder_callback_,
|
||||
OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
|
||||
OnEncodedImage(AllOf(Field(&EncodedImage::_frameType,
|
||||
VideoFrameType::kVideoFrameKey),
|
||||
Field(&EncodedImage::_encodedWidth, width),
|
||||
Field(&EncodedImage::_encodedHeight, height)),
|
||||
_, _))
|
||||
|
@ -637,7 +641,7 @@ void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
|
|||
SetUpRateAllocator();
|
||||
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
|
||||
SetRates(settings_.startBitrate, 30);
|
||||
ExpectStreams(kVideoFrameKey, 1);
|
||||
ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
|
||||
// Resize |input_frame_| to the new resolution.
|
||||
input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
|
||||
input_buffer_->InitializeData();
|
||||
|
@ -853,7 +857,7 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
|
|||
testing::Invoke([&](const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
EXPECT_EQ(encoded_image._frameType, kVideoFrameKey);
|
||||
EXPECT_EQ(encoded_image._frameType, VideoFrameType::kVideoFrameKey);
|
||||
|
||||
size_t index = encoded_image.SpatialIndex().value_or(0);
|
||||
// TODO(nisse): Why not size()
|
||||
|
|
|
@ -65,8 +65,8 @@ class TestPacketBuffer : public ::testing::Test,
|
|||
packet.video_header.codec = kVideoCodecGeneric;
|
||||
packet.timestamp = timestamp;
|
||||
packet.seqNum = seq_num;
|
||||
packet.frameType =
|
||||
keyframe == kKeyFrame ? kVideoFrameKey : kVideoFrameDelta;
|
||||
packet.frameType = keyframe == kKeyFrame ? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
packet.video_header.is_first_packet_in_frame = first == kFirst;
|
||||
packet.video_header.is_last_packet_in_frame = last == kLast;
|
||||
packet.sizeBytes = data_size;
|
||||
|
@ -163,7 +163,7 @@ TEST_F(TestPacketBuffer, NackCount) {
|
|||
VCMPacket packet;
|
||||
packet.video_header.codec = kVideoCodecGeneric;
|
||||
packet.seqNum = seq_num;
|
||||
packet.frameType = kVideoFrameKey;
|
||||
packet.frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.video_header.is_last_packet_in_frame = false;
|
||||
packet.timesNacked = 0;
|
||||
|
@ -788,7 +788,7 @@ TEST_F(TestPacketBuffer, IncomingCodecChange) {
|
|||
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
packet.timestamp = 1;
|
||||
packet.seqNum = 1;
|
||||
packet.frameType = kVideoFrameKey;
|
||||
packet.frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
|
||||
|
||||
packet.video_header.codec = kVideoCodecH264;
|
||||
|
@ -803,7 +803,7 @@ TEST_F(TestPacketBuffer, IncomingCodecChange) {
|
|||
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
packet.timestamp = 2;
|
||||
packet.seqNum = 2;
|
||||
packet.frameType = kVideoFrameDelta;
|
||||
packet.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
|
||||
EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
|
||||
|
||||
|
@ -815,7 +815,7 @@ TEST_F(TestPacketBuffer, TooManyNalusInPacket) {
|
|||
packet.video_header.codec = kVideoCodecH264;
|
||||
packet.timestamp = 1;
|
||||
packet.seqNum = 1;
|
||||
packet.frameType = kVideoFrameKey;
|
||||
packet.frameType = VideoFrameType::kVideoFrameKey;
|
||||
packet.video_header.is_first_packet_in_frame = true;
|
||||
packet.video_header.is_last_packet_in_frame = true;
|
||||
auto& h264_header =
|
||||
|
@ -922,7 +922,8 @@ TEST_F(TestPacketBufferH264IdrIsKeyframe, IdrIsKeyframe) {
|
|||
packet_buffer_->InsertPacket(&packet_);
|
||||
|
||||
ASSERT_EQ(1u, frames_from_callback_.size());
|
||||
EXPECT_EQ(kVideoFrameKey, frames_from_callback_[kSeqNum]->frame_type());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey,
|
||||
frames_from_callback_[kSeqNum]->frame_type());
|
||||
}
|
||||
|
||||
TEST_F(TestPacketBufferH264IdrIsKeyframe, SpsPpsIdrIsKeyframe) {
|
||||
|
@ -936,7 +937,8 @@ TEST_F(TestPacketBufferH264IdrIsKeyframe, SpsPpsIdrIsKeyframe) {
|
|||
packet_buffer_->InsertPacket(&packet_);
|
||||
|
||||
ASSERT_EQ(1u, frames_from_callback_.size());
|
||||
EXPECT_EQ(kVideoFrameKey, frames_from_callback_[kSeqNum]->frame_type());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey,
|
||||
frames_from_callback_[kSeqNum]->frame_type());
|
||||
}
|
||||
|
||||
class TestPacketBufferH264SpsPpsIdrIsKeyframe
|
||||
|
@ -955,7 +957,8 @@ TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, IdrIsNotKeyframe) {
|
|||
packet_buffer_->InsertPacket(&packet_);
|
||||
|
||||
ASSERT_EQ(1u, frames_from_callback_.size());
|
||||
EXPECT_EQ(kVideoFrameDelta, frames_from_callback_[5]->frame_type());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta,
|
||||
frames_from_callback_[5]->frame_type());
|
||||
}
|
||||
|
||||
TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIsNotKeyframe) {
|
||||
|
@ -968,7 +971,8 @@ TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIsNotKeyframe) {
|
|||
packet_buffer_->InsertPacket(&packet_);
|
||||
|
||||
ASSERT_EQ(1u, frames_from_callback_.size());
|
||||
EXPECT_EQ(kVideoFrameDelta, frames_from_callback_[kSeqNum]->frame_type());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameDelta,
|
||||
frames_from_callback_[kSeqNum]->frame_type());
|
||||
}
|
||||
|
||||
TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIdrIsKeyframe) {
|
||||
|
@ -982,7 +986,8 @@ TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIdrIsKeyframe) {
|
|||
packet_buffer_->InsertPacket(&packet_);
|
||||
|
||||
ASSERT_EQ(1u, frames_from_callback_.size());
|
||||
EXPECT_EQ(kVideoFrameKey, frames_from_callback_[kSeqNum]->frame_type());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey,
|
||||
frames_from_callback_[kSeqNum]->frame_type());
|
||||
}
|
||||
|
||||
} // namespace video_coding
|
||||
|
|
|
@ -288,7 +288,7 @@ int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
|
|||
if (drop_frames_until_keyframe_) {
|
||||
// Still getting delta frames, schedule another keyframe request as if
|
||||
// decode failed.
|
||||
if (frame->FrameType() != kVideoFrameKey) {
|
||||
if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
|
||||
drop_frame = true;
|
||||
_scheduleKeyRequest = true;
|
||||
// TODO(tommi): Consider if we could instead post a task to the module
|
||||
|
@ -384,7 +384,7 @@ int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
|
|||
size_t payloadLength,
|
||||
const WebRtcRTPHeader& rtpInfo) {
|
||||
RTC_DCHECK_RUN_ON(&module_thread_checker_);
|
||||
if (rtpInfo.frameType == kVideoFrameKey) {
|
||||
if (rtpInfo.frameType == VideoFrameType::kVideoFrameKey) {
|
||||
TRACE_EVENT1("webrtc", "VCM::PacketKeyFrame", "seqnum",
|
||||
rtpInfo.header.sequenceNumber);
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ class TestVideoReceiver : public ::testing::Test {
|
|||
|
||||
WebRtcRTPHeader GetDefaultVp8Header() const {
|
||||
WebRtcRTPHeader header = {};
|
||||
header.frameType = kEmptyFrame;
|
||||
header.frameType = VideoFrameType::kEmptyFrame;
|
||||
header.header.markerBit = false;
|
||||
header.header.payloadType = kUnusedPayloadType;
|
||||
header.header.ssrc = 1;
|
||||
|
@ -122,14 +122,14 @@ TEST_F(TestVideoReceiver, PaddingOnlyFramesWithLosses) {
|
|||
header.video_header().video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
|
||||
// Insert one video frame to get one frame decoded.
|
||||
header.frameType = kVideoFrameKey;
|
||||
header.frameType = VideoFrameType::kVideoFrameKey;
|
||||
header.video_header().is_first_packet_in_frame = true;
|
||||
header.header.markerBit = true;
|
||||
InsertAndVerifyDecodableFrame(kPayload, kFrameSize, &header);
|
||||
|
||||
clock_.AdvanceTimeMilliseconds(33);
|
||||
header.header.timestamp += 3000;
|
||||
header.frameType = kEmptyFrame;
|
||||
header.frameType = VideoFrameType::kEmptyFrame;
|
||||
header.video_header().is_first_packet_in_frame = false;
|
||||
header.header.markerBit = false;
|
||||
// Insert padding frames.
|
||||
|
@ -172,9 +172,9 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
|
|||
// Insert 2 video frames.
|
||||
for (int j = 0; j < 2; ++j) {
|
||||
if (i == 0 && j == 0) // First frame should be a key frame.
|
||||
header.frameType = kVideoFrameKey;
|
||||
header.frameType = VideoFrameType::kVideoFrameKey;
|
||||
else
|
||||
header.frameType = kVideoFrameDelta;
|
||||
header.frameType = VideoFrameType::kVideoFrameDelta;
|
||||
header.video_header().is_first_packet_in_frame = true;
|
||||
header.header.markerBit = true;
|
||||
InsertAndVerifyDecodableFrame(kPayload, kFrameSize, &header);
|
||||
|
@ -183,7 +183,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
|
|||
}
|
||||
|
||||
// Insert 2 padding only frames.
|
||||
header.frameType = kEmptyFrame;
|
||||
header.frameType = VideoFrameType::kEmptyFrame;
|
||||
header.video_header().is_first_packet_in_frame = false;
|
||||
header.header.markerBit = false;
|
||||
for (int j = 0; j < 2; ++j) {
|
||||
|
|
|
@ -686,6 +686,7 @@ if (is_android) {
|
|||
"../../api/task_queue",
|
||||
"../../api/video:encoded_image",
|
||||
"../../api/video:video_frame",
|
||||
"../../api/video:video_frame_type",
|
||||
"../../api/video_codecs:rtc_software_fallback_wrappers",
|
||||
"../../api/video_codecs:video_codecs_api",
|
||||
"../../common_video",
|
||||
|
|
|
@ -398,7 +398,7 @@ int32_t MediaCodecVideoDecoder::Decode(
|
|||
|
||||
// Always start with a complete key frame.
|
||||
if (key_frame_required_) {
|
||||
if (inputImage._frameType != kVideoFrameKey) {
|
||||
if (inputImage._frameType != VideoFrameType::kVideoFrameKey) {
|
||||
ALOGE << "Decode() - key frame is required";
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
@ -487,8 +487,8 @@ int32_t MediaCodecVideoDecoder::DecodeOnCodecThread(
|
|||
|
||||
if (frames_decoded_ < frames_decoded_logged_) {
|
||||
ALOGD << "Decoder frame in # " << frames_received_
|
||||
<< ". Type: " << inputImage._frameType << ". Buffer # "
|
||||
<< j_input_buffer_index
|
||||
<< ". Type: " << static_cast<int>(inputImage._frameType)
|
||||
<< ". Buffer # " << j_input_buffer_index
|
||||
<< ". TS: " << presentation_timestamp_us / 1000
|
||||
<< ". Size: " << inputImage.size();
|
||||
}
|
||||
|
|
|
@ -686,7 +686,8 @@ int32_t MediaCodecVideoEncoder::Encode(
|
|||
}
|
||||
|
||||
const bool key_frame =
|
||||
frame_types->front() != kVideoFrameDelta || send_key_frame;
|
||||
frame_types->front() != VideoFrameType::kVideoFrameDelta ||
|
||||
send_key_frame;
|
||||
bool encode_status = true;
|
||||
|
||||
int j_input_buffer_index = -1;
|
||||
|
@ -1009,7 +1010,8 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
|
|||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
image->timing_.flags = VideoSendTiming::kInvalid;
|
||||
image->_frameType = (key_frame ? kVideoFrameKey : kVideoFrameDelta);
|
||||
image->_frameType = (key_frame ? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta);
|
||||
image->_completeFrame = true;
|
||||
CodecSpecificInfo info;
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
|
|
@ -21,7 +21,7 @@ namespace jni {
|
|||
|
||||
ScopedJavaLocalRef<jobject> NativeToJavaFrameType(JNIEnv* env,
|
||||
VideoFrameType frame_type) {
|
||||
return Java_FrameType_fromNativeIndex(env, frame_type);
|
||||
return Java_FrameType_fromNativeIndex(env, static_cast<int>(frame_type));
|
||||
}
|
||||
|
||||
ScopedJavaLocalRef<jobject> NativeToJavaEncodedImage(
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <jni.h>
|
||||
#include <vector>
|
||||
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "api/video/video_frame_type.h"
|
||||
|
||||
#include "sdk/android/native_api/jni/scoped_java_ref.h"
|
||||
|
||||
|
|
|
@ -383,7 +383,7 @@ int VideoEncoderWrapper::ParseQp(const std::vector<uint8_t>& buffer) {
|
|||
|
||||
CodecSpecificInfo VideoEncoderWrapper::ParseCodecSpecificInfo(
|
||||
const EncodedImage& frame) {
|
||||
const bool key_frame = frame._frameType == kVideoFrameKey;
|
||||
const bool key_frame = frame._frameType == VideoFrameType::kVideoFrameKey;
|
||||
|
||||
CodecSpecificInfo info;
|
||||
info.codecType = codec_settings_.codecType;
|
||||
|
|
|
@ -51,7 +51,7 @@ int32_t ConfigurableFrameSizeEncoder::Encode(
|
|||
encodedImage._completeFrame = true;
|
||||
encodedImage._encodedHeight = inputImage.height();
|
||||
encodedImage._encodedWidth = inputImage.width();
|
||||
encodedImage._frameType = kVideoFrameKey;
|
||||
encodedImage._frameType = VideoFrameType::kVideoFrameKey;
|
||||
encodedImage.SetTimestamp(inputImage.timestamp());
|
||||
encodedImage.capture_time_ms_ = inputImage.render_time_ms();
|
||||
RTPFragmentationHeader* fragmentation = NULL;
|
||||
|
|
|
@ -132,8 +132,8 @@ int32_t FakeEncoder::Encode(const VideoFrame& input_image,
|
|||
WriteCounter(encoded.data() + frame_info.layers[i].size - 4, counter);
|
||||
encoded.SetTimestamp(input_image.timestamp());
|
||||
encoded.capture_time_ms_ = input_image.render_time_ms();
|
||||
encoded._frameType =
|
||||
frame_info.keyframe ? kVideoFrameKey : kVideoFrameDelta;
|
||||
encoded._frameType = frame_info.keyframe ? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
encoded._encodedWidth = simulcast_streams[i].width;
|
||||
encoded._encodedHeight = simulcast_streams[i].height;
|
||||
encoded.rotation_ = input_image.rotation();
|
||||
|
@ -172,7 +172,7 @@ FakeEncoder::FrameInfo FakeEncoder::NextFrame(
|
|||
|
||||
if (frame_types) {
|
||||
for (VideoFrameType frame_type : *frame_types) {
|
||||
if (frame_type == kVideoFrameKey) {
|
||||
if (frame_type == VideoFrameType::kVideoFrameKey) {
|
||||
frame_info.keyframe = true;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -80,9 +80,9 @@ void FakeVP8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
|
|||
codec_specific->codecType = kVideoCodecVP8;
|
||||
codec_specific->codecSpecific.VP8.keyIdx = kNoKeyIdx;
|
||||
codec_specific->codecSpecific.VP8.nonReference = false;
|
||||
frame_buffer_controller_->OnEncodeDone(stream_idx, timestamp, size_bytes,
|
||||
frame_type == kVideoFrameKey, -1,
|
||||
codec_specific);
|
||||
frame_buffer_controller_->OnEncodeDone(
|
||||
stream_idx, timestamp, size_bytes,
|
||||
frame_type == VideoFrameType::kVideoFrameKey, -1, codec_specific);
|
||||
}
|
||||
|
||||
std::unique_ptr<RTPFragmentationHeader> FakeVP8Encoder::EncodeHook(
|
||||
|
@ -100,7 +100,7 @@ std::unique_ptr<RTPFragmentationHeader> FakeVP8Encoder::EncodeHook(
|
|||
// does.
|
||||
WriteFakeVp8(encoded_image->data(), encoded_image->_encodedWidth,
|
||||
encoded_image->_encodedHeight,
|
||||
encoded_image->_frameType == kVideoFrameKey);
|
||||
encoded_image->_frameType == VideoFrameType::kVideoFrameKey);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -86,7 +86,8 @@ class BufferedFrameDecryptorTest
|
|||
VCMPacket packet;
|
||||
packet.video_header.codec = kVideoCodecGeneric;
|
||||
packet.seqNum = seq_num_;
|
||||
packet.frameType = key_frame ? kVideoFrameKey : kVideoFrameDelta;
|
||||
packet.frameType = key_frame ? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
packet.generic_descriptor = RtpGenericFrameDescriptor();
|
||||
fake_packet_buffer_->InsertPacket(&packet);
|
||||
packet.seqNum = seq_num_;
|
||||
|
|
|
@ -146,7 +146,7 @@ class PictureIdObserver : public test::RtpRtcpObserver {
|
|||
if (diff > 1) {
|
||||
// If the VideoSendStream is destroyed, any frames still in queue is lost.
|
||||
// Gaps only possible for first frame after a recreation, i.e. key frames.
|
||||
EXPECT_EQ(kVideoFrameKey, current.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, current.frame_type);
|
||||
EXPECT_LE(diff - 1, max_expected_picture_id_gap_);
|
||||
}
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ class PictureIdObserver : public test::RtpRtcpObserver {
|
|||
if (diff > 1) {
|
||||
// If the VideoSendStream is destroyed, any frames still in queue is lost.
|
||||
// Gaps only possible for first frame after a recreation, i.e. key frames.
|
||||
EXPECT_EQ(kVideoFrameKey, current.frame_type);
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, current.frame_type);
|
||||
EXPECT_LE(diff - 1, max_expected_tl0_idx_gap_);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -228,7 +228,7 @@ int32_t RtpVideoStreamReceiver::OnReceivedPayloadData(
|
|||
size_t payload_size,
|
||||
const RTPHeader& rtp_header,
|
||||
const RTPVideoHeader& video_header,
|
||||
FrameType frame_type,
|
||||
VideoFrameType frame_type,
|
||||
const absl::optional<RtpGenericFrameDescriptor>& generic_descriptor,
|
||||
bool is_recovered) {
|
||||
VCMPacket packet(payload_data, payload_size, rtp_header, video_header,
|
||||
|
@ -236,8 +236,8 @@ int32_t RtpVideoStreamReceiver::OnReceivedPayloadData(
|
|||
packet.generic_descriptor = generic_descriptor;
|
||||
|
||||
if (nack_module_) {
|
||||
const bool is_keyframe =
|
||||
video_header.is_first_packet_in_frame && frame_type == kVideoFrameKey;
|
||||
const bool is_keyframe = video_header.is_first_packet_in_frame &&
|
||||
frame_type == VideoFrameType::kVideoFrameKey;
|
||||
|
||||
packet.timesNacked = nack_module_->OnReceivedPacket(
|
||||
rtp_header.sequenceNumber, is_keyframe, is_recovered);
|
||||
|
@ -417,7 +417,7 @@ void RtpVideoStreamReceiver::OnAssembledFrame(
|
|||
descriptor->FrameDependenciesDiffs());
|
||||
} else if (!has_received_frame_) {
|
||||
// Request a key frame as soon as possible.
|
||||
if (frame->FrameType() != kVideoFrameKey) {
|
||||
if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
|
||||
keyframe_request_sender_->RequestKeyFrame();
|
||||
}
|
||||
}
|
||||
|
@ -541,7 +541,8 @@ void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) {
|
|||
packet.GetExtension<FrameMarkingExtension>(&video_header.frame_marking);
|
||||
|
||||
video_header.color_space = packet.GetExtension<ColorSpaceExtension>();
|
||||
if (video_header.color_space || parsed_payload.frame_type == kVideoFrameKey) {
|
||||
if (video_header.color_space ||
|
||||
parsed_payload.frame_type == VideoFrameType::kVideoFrameKey) {
|
||||
// Store color space since it's only transmitted when changed or for key
|
||||
// frames. Color space will be cleared if a key frame is transmitted without
|
||||
// color space information.
|
||||
|
@ -580,8 +581,8 @@ void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) {
|
|||
if (generic_descriptor_wire->FirstPacketInSubFrame()) {
|
||||
parsed_payload.frame_type =
|
||||
generic_descriptor_wire->FrameDependenciesDiffs().empty()
|
||||
? kVideoFrameKey
|
||||
: kVideoFrameDelta;
|
||||
? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta;
|
||||
}
|
||||
|
||||
video_header.width = generic_descriptor_wire->Width();
|
||||
|
|
|
@ -111,7 +111,7 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
|
|||
size_t payload_size,
|
||||
const RTPHeader& rtp_header,
|
||||
const RTPVideoHeader& video_header,
|
||||
FrameType frame_type,
|
||||
VideoFrameType frame_type,
|
||||
const absl::optional<RtpGenericFrameDescriptor>& generic_descriptor,
|
||||
bool is_recovered);
|
||||
|
||||
|
|
|
@ -213,8 +213,8 @@ TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrame) {
|
|||
data.size());
|
||||
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
|
||||
rtp_video_stream_receiver_->OnReceivedPayloadData(
|
||||
data.data(), data.size(), rtp_header, video_header, kVideoFrameKey,
|
||||
absl::nullopt, false);
|
||||
data.data(), data.size(), rtp_header, video_header,
|
||||
VideoFrameType::kVideoFrameKey, absl::nullopt, false);
|
||||
}
|
||||
|
||||
TEST_F(RtpVideoStreamReceiverTest, NoInfiniteRecursionOnEncapsulatedRedPacket) {
|
||||
|
@ -273,8 +273,8 @@ TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrameBitstreamError) {
|
|||
EXPECT_CALL(mock_on_complete_frame_callback_,
|
||||
DoOnCompleteFrameFailBitstream(_));
|
||||
rtp_video_stream_receiver_->OnReceivedPayloadData(
|
||||
data.data(), data.size(), rtp_header, video_header, kVideoFrameKey,
|
||||
absl::nullopt, false);
|
||||
data.data(), data.size(), rtp_header, video_header,
|
||||
VideoFrameType::kVideoFrameKey, absl::nullopt, false);
|
||||
}
|
||||
|
||||
class RtpVideoStreamReceiverTestH264
|
||||
|
@ -301,7 +301,7 @@ TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
|
|||
sps_data.size());
|
||||
rtp_video_stream_receiver_->OnReceivedPayloadData(
|
||||
sps_data.data(), sps_data.size(), rtp_header, sps_video_header,
|
||||
kEmptyFrame, absl::nullopt, false);
|
||||
VideoFrameType::kEmptyFrame, absl::nullopt, false);
|
||||
|
||||
std::vector<uint8_t> pps_data;
|
||||
RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
|
||||
|
@ -314,7 +314,7 @@ TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
|
|||
pps_data.size());
|
||||
rtp_video_stream_receiver_->OnReceivedPayloadData(
|
||||
pps_data.data(), pps_data.size(), rtp_header, pps_video_header,
|
||||
kEmptyFrame, absl::nullopt, false);
|
||||
VideoFrameType::kEmptyFrame, absl::nullopt, false);
|
||||
|
||||
std::vector<uint8_t> idr_data;
|
||||
RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
|
||||
|
@ -330,7 +330,7 @@ TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
|
|||
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
|
||||
rtp_video_stream_receiver_->OnReceivedPayloadData(
|
||||
idr_data.data(), idr_data.size(), rtp_header, idr_video_header,
|
||||
kVideoFrameKey, absl::nullopt, false);
|
||||
VideoFrameType::kVideoFrameKey, absl::nullopt, false);
|
||||
}
|
||||
|
||||
TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) {
|
||||
|
@ -371,8 +371,8 @@ TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) {
|
|||
data.size());
|
||||
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
|
||||
rtp_video_stream_receiver_->OnReceivedPayloadData(
|
||||
data.data(), data.size(), rtp_header, video_header, kVideoFrameKey,
|
||||
absl::nullopt, false);
|
||||
data.data(), data.size(), rtp_header, video_header,
|
||||
VideoFrameType::kVideoFrameKey, absl::nullopt, false);
|
||||
}
|
||||
|
||||
TEST_F(RtpVideoStreamReceiverTest, PaddingInMediaStream) {
|
||||
|
@ -390,30 +390,30 @@ TEST_F(RtpVideoStreamReceiverTest, PaddingInMediaStream) {
|
|||
|
||||
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
|
||||
rtp_video_stream_receiver_->OnReceivedPayloadData(
|
||||
data.data(), data.size(), rtp_header, video_header, kVideoFrameKey,
|
||||
absl::nullopt, false);
|
||||
data.data(), data.size(), rtp_header, video_header,
|
||||
VideoFrameType::kVideoFrameKey, absl::nullopt, false);
|
||||
|
||||
rtp_header.sequenceNumber = 3;
|
||||
rtp_video_stream_receiver_->OnReceivedPayloadData(
|
||||
nullptr, 0, rtp_header, video_header, kVideoFrameKey, absl::nullopt,
|
||||
false);
|
||||
nullptr, 0, rtp_header, video_header, VideoFrameType::kVideoFrameKey,
|
||||
absl::nullopt, false);
|
||||
|
||||
rtp_header.sequenceNumber = 4;
|
||||
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
|
||||
rtp_video_stream_receiver_->OnReceivedPayloadData(
|
||||
data.data(), data.size(), rtp_header, video_header, kVideoFrameDelta,
|
||||
absl::nullopt, false);
|
||||
data.data(), data.size(), rtp_header, video_header,
|
||||
VideoFrameType::kVideoFrameDelta, absl::nullopt, false);
|
||||
|
||||
rtp_header.sequenceNumber = 6;
|
||||
rtp_video_stream_receiver_->OnReceivedPayloadData(
|
||||
data.data(), data.size(), rtp_header, video_header, kVideoFrameDelta,
|
||||
absl::nullopt, false);
|
||||
data.data(), data.size(), rtp_header, video_header,
|
||||
VideoFrameType::kVideoFrameDelta, absl::nullopt, false);
|
||||
|
||||
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
|
||||
rtp_header.sequenceNumber = 5;
|
||||
rtp_video_stream_receiver_->OnReceivedPayloadData(
|
||||
nullptr, 0, rtp_header, video_header, kVideoFrameDelta, absl::nullopt,
|
||||
false);
|
||||
nullptr, 0, rtp_header, video_header, VideoFrameType::kVideoFrameDelta,
|
||||
absl::nullopt, false);
|
||||
}
|
||||
|
||||
TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeIfFirstFrameIsDelta) {
|
||||
|
@ -427,8 +427,8 @@ TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeIfFirstFrameIsDelta) {
|
|||
|
||||
EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
|
||||
rtp_video_stream_receiver_->OnReceivedPayloadData(
|
||||
data.data(), data.size(), rtp_header, video_header, kVideoFrameDelta,
|
||||
absl::nullopt, false);
|
||||
data.data(), data.size(), rtp_header, video_header,
|
||||
VideoFrameType::kVideoFrameDelta, absl::nullopt, false);
|
||||
}
|
||||
|
||||
TEST_F(RtpVideoStreamReceiverTest, SecondarySinksGetRtpNotifications) {
|
||||
|
|
|
@ -927,7 +927,7 @@ void SendStatisticsProxy::OnSendEncodedImage(
|
|||
}
|
||||
|
||||
uma_container_->key_frame_counter_.Add(encoded_image._frameType ==
|
||||
kVideoFrameKey);
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
|
||||
if (encoded_image.qp_ != -1) {
|
||||
if (!stats_.qp_sum)
|
||||
|
|
|
@ -598,7 +598,7 @@ EncodedImageCallback::Result VideoSendStreamImpl::OnEncodedImage(
|
|||
// will need to do some translation to produce reference info using frame
|
||||
// ids.
|
||||
std::vector<int64_t> referenced_frame_ids;
|
||||
if (encoded_image._frameType != kVideoFrameKey) {
|
||||
if (encoded_image._frameType != VideoFrameType::kVideoFrameKey) {
|
||||
RTC_DCHECK_GT(frame_id, 0);
|
||||
referenced_frame_ids.push_back(frame_id - 1);
|
||||
}
|
||||
|
|
|
@ -463,7 +463,7 @@ VideoStreamEncoder::VideoStreamEncoder(
|
|||
force_disable_frame_dropper_(false),
|
||||
input_framerate_(kFrameRateAvergingWindowSizeMs, 1000),
|
||||
pending_frame_drops_(0),
|
||||
next_frame_types_(1, kVideoFrameDelta),
|
||||
next_frame_types_(1, VideoFrameType::kVideoFrameDelta),
|
||||
frame_encoder_timer_(this),
|
||||
experiment_groups_(GetExperimentGroups()),
|
||||
encoder_queue_(task_queue_factory->CreateTaskQueue(
|
||||
|
@ -728,7 +728,7 @@ void VideoStreamEncoder::ReconfigureEncoder() {
|
|||
next_frame_types_.clear();
|
||||
next_frame_types_.resize(
|
||||
std::max(static_cast<int>(codec.numberOfSimulcastStreams), 1),
|
||||
kVideoFrameKey);
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
RTC_LOG(LS_VERBOSE) << " max bitrate " << codec.maxBitrate
|
||||
<< " start bitrate " << codec.startBitrate
|
||||
<< " max frame rate " << codec.maxFramerate
|
||||
|
@ -1291,7 +1291,7 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
|
|||
}
|
||||
|
||||
for (auto& it : next_frame_types_) {
|
||||
it = kVideoFrameDelta;
|
||||
it = VideoFrameType::kVideoFrameDelta;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1303,7 +1303,7 @@ void VideoStreamEncoder::SendKeyFrame() {
|
|||
RTC_DCHECK_RUN_ON(&encoder_queue_);
|
||||
TRACE_EVENT0("webrtc", "OnKeyFrameRequest");
|
||||
RTC_DCHECK(!next_frame_types_.empty());
|
||||
next_frame_types_[0] = kVideoFrameKey;
|
||||
next_frame_types_[0] = VideoFrameType::kVideoFrameKey;
|
||||
if (HasInternalSource()) {
|
||||
// Try to request the frame if we have an external encoder with
|
||||
// internal source since AddVideoFrame never will be called.
|
||||
|
@ -1322,7 +1322,7 @@ void VideoStreamEncoder::SendKeyFrame() {
|
|||
.build(),
|
||||
&next_frame_types_) == WEBRTC_VIDEO_CODEC_OK) {
|
||||
// Try to remove just-performed keyframe request, if stream still exists.
|
||||
next_frame_types_[0] = kVideoFrameDelta;
|
||||
next_frame_types_[0] = VideoFrameType::kVideoFrameDelta;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -612,7 +612,8 @@ class VideoStreamEncoderTest : public ::testing::Test {
|
|||
|
||||
void InjectFrame(const VideoFrame& input_image, bool keyframe) {
|
||||
const std::vector<VideoFrameType> frame_type = {
|
||||
keyframe ? kVideoFrameKey : kVideoFrameDelta};
|
||||
keyframe ? VideoFrameType::kVideoFrameKey
|
||||
: VideoFrameType::kVideoFrameDelta};
|
||||
{
|
||||
rtc::CritScope lock(&local_crit_sect_);
|
||||
last_frame_types_ = frame_type;
|
||||
|
@ -3597,21 +3598,24 @@ TEST_F(VideoStreamEncoderTest, SetsFrameTypes) {
|
|||
// First frame is always keyframe.
|
||||
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
|
||||
WaitForEncodedFrame(1);
|
||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAre(VideoFrameType{kVideoFrameKey}));
|
||||
EXPECT_THAT(
|
||||
fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAre(VideoFrameType{VideoFrameType::kVideoFrameKey}));
|
||||
|
||||
// Insert delta frame.
|
||||
video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
|
||||
WaitForEncodedFrame(2);
|
||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAre(VideoFrameType{kVideoFrameDelta}));
|
||||
EXPECT_THAT(
|
||||
fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAre(VideoFrameType{VideoFrameType::kVideoFrameDelta}));
|
||||
|
||||
// Request next frame be a key-frame.
|
||||
video_stream_encoder_->SendKeyFrame();
|
||||
video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr));
|
||||
WaitForEncodedFrame(3);
|
||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAre(VideoFrameType{kVideoFrameKey}));
|
||||
EXPECT_THAT(
|
||||
fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAre(VideoFrameType{VideoFrameType::kVideoFrameKey}));
|
||||
|
||||
video_stream_encoder_->Stop();
|
||||
}
|
||||
|
@ -3628,15 +3632,17 @@ TEST_F(VideoStreamEncoderTest, SetsFrameTypesSimulcast) {
|
|||
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
|
||||
WaitForEncodedFrame(1);
|
||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAreArray(
|
||||
{kVideoFrameKey, kVideoFrameKey, kVideoFrameKey}));
|
||||
testing::ElementsAreArray({VideoFrameType::kVideoFrameKey,
|
||||
VideoFrameType::kVideoFrameKey,
|
||||
VideoFrameType::kVideoFrameKey}));
|
||||
|
||||
// Insert delta frame.
|
||||
video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
|
||||
WaitForEncodedFrame(2);
|
||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAreArray(
|
||||
{kVideoFrameDelta, kVideoFrameDelta, kVideoFrameDelta}));
|
||||
testing::ElementsAreArray({VideoFrameType::kVideoFrameDelta,
|
||||
VideoFrameType::kVideoFrameDelta,
|
||||
VideoFrameType::kVideoFrameDelta}));
|
||||
|
||||
// Request next frame be a key-frame.
|
||||
// Only first stream is configured to produce key-frame.
|
||||
|
@ -3644,8 +3650,9 @@ TEST_F(VideoStreamEncoderTest, SetsFrameTypesSimulcast) {
|
|||
video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr));
|
||||
WaitForEncodedFrame(3);
|
||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAreArray(
|
||||
{kVideoFrameKey, kVideoFrameDelta, kVideoFrameDelta}));
|
||||
testing::ElementsAreArray({VideoFrameType::kVideoFrameKey,
|
||||
VideoFrameType::kVideoFrameDelta,
|
||||
VideoFrameType::kVideoFrameDelta}));
|
||||
|
||||
video_stream_encoder_->Stop();
|
||||
}
|
||||
|
@ -3661,24 +3668,28 @@ TEST_F(VideoStreamEncoderTest, RequestKeyframeInternalSource) {
|
|||
// callback in VideoStreamEncoder is called despite no OnFrame().
|
||||
fake_encoder_.InjectFrame(CreateFrame(1, nullptr), true);
|
||||
EXPECT_TRUE(WaitForFrame(kDefaultTimeoutMs));
|
||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAre(VideoFrameType{kVideoFrameKey}));
|
||||
EXPECT_THAT(
|
||||
fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAre(VideoFrameType{VideoFrameType::kVideoFrameKey}));
|
||||
|
||||
const std::vector<VideoFrameType> kDeltaFrame = {kVideoFrameDelta};
|
||||
const std::vector<VideoFrameType> kDeltaFrame = {
|
||||
VideoFrameType::kVideoFrameDelta};
|
||||
// Need to set timestamp manually since manually for injected frame.
|
||||
VideoFrame frame = CreateFrame(101, nullptr);
|
||||
frame.set_timestamp(101);
|
||||
fake_encoder_.InjectFrame(frame, false);
|
||||
EXPECT_TRUE(WaitForFrame(kDefaultTimeoutMs));
|
||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAre(VideoFrameType{kVideoFrameDelta}));
|
||||
EXPECT_THAT(
|
||||
fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAre(VideoFrameType{VideoFrameType::kVideoFrameDelta}));
|
||||
|
||||
// Request key-frame. The forces a dummy frame down into the encoder.
|
||||
fake_encoder_.ExpectNullFrame();
|
||||
video_stream_encoder_->SendKeyFrame();
|
||||
EXPECT_TRUE(WaitForFrame(kDefaultTimeoutMs));
|
||||
EXPECT_THAT(fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAre(VideoFrameType{kVideoFrameKey}));
|
||||
EXPECT_THAT(
|
||||
fake_encoder_.LastFrameTypes(),
|
||||
testing::ElementsAre(VideoFrameType{VideoFrameType::kVideoFrameKey}));
|
||||
|
||||
video_stream_encoder_->Stop();
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue