Remove RTPVideoHeader::vp8() accessors.

Bug: none
Change-Id: Ia7d65148fb36a8f26647bee8a876ce7217ff8a68
Reviewed-on: https://webrtc-review.googlesource.com/93321
Reviewed-by: Niels Moller <nisse@webrtc.org>
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
Commit-Queue: Philip Eliasson <philipel@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#24626}
This commit is contained in:
philipel 2018-09-06 13:20:09 +02:00 committed by Commit Bot
parent 5e007b77f1
commit af7afc6642
19 changed files with 237 additions and 185 deletions

View file

@ -27,11 +27,12 @@ void PopulateRtpWithCodecSpecifics(const CodecSpecificInfo& info,
rtp->codec = info.codecType;
switch (info.codecType) {
case kVideoCodecVP8: {
rtp->vp8().InitRTPVideoHeaderVP8();
rtp->vp8().nonReference = info.codecSpecific.VP8.nonReference;
rtp->vp8().temporalIdx = info.codecSpecific.VP8.temporalIdx;
rtp->vp8().layerSync = info.codecSpecific.VP8.layerSync;
rtp->vp8().keyIdx = info.codecSpecific.VP8.keyIdx;
auto& vp8_header = rtp->video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.InitRTPVideoHeaderVP8();
vp8_header.nonReference = info.codecSpecific.VP8.nonReference;
vp8_header.temporalIdx = info.codecSpecific.VP8.temporalIdx;
vp8_header.layerSync = info.codecSpecific.VP8.layerSync;
vp8_header.keyIdx = info.codecSpecific.VP8.keyIdx;
rtp->simulcastIdx = spatial_index.value_or(0);
return;
}
@ -171,13 +172,15 @@ void RtpPayloadParams::SetCodecSpecific(RTPVideoHeader* rtp_video_header,
state_.picture_id = (static_cast<uint16_t>(state_.picture_id) + 1) & 0x7FFF;
}
if (rtp_video_header->codec == kVideoCodecVP8) {
rtp_video_header->vp8().pictureId = state_.picture_id;
auto& vp8_header =
absl::get<RTPVideoHeaderVP8>(rtp_video_header->video_type_header);
vp8_header.pictureId = state_.picture_id;
if (rtp_video_header->vp8().temporalIdx != kNoTemporalIdx) {
if (rtp_video_header->vp8().temporalIdx == 0) {
if (vp8_header.temporalIdx != kNoTemporalIdx) {
if (vp8_header.temporalIdx == 0) {
++state_.tl0_pic_idx;
}
rtp_video_header->vp8().tl0PicIdx = state_.tl0_pic_idx;
vp8_header.tl0PicIdx = state_.tl0_pic_idx;
}
}
if (rtp_video_header->codec == kVideoCodecVP9) {

View file

@ -61,12 +61,14 @@ TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp8) {
EXPECT_EQ(VideoContentType::SCREENSHARE, header.content_type);
EXPECT_EQ(1, header.simulcastIdx);
EXPECT_EQ(kVideoCodecVP8, header.codec);
EXPECT_EQ(kPictureId + 2, header.vp8().pictureId);
EXPECT_EQ(kTemporalIdx, header.vp8().temporalIdx);
EXPECT_EQ(kTl0PicIdx + 1, header.vp8().tl0PicIdx);
EXPECT_EQ(kNoKeyIdx, header.vp8().keyIdx);
EXPECT_TRUE(header.vp8().layerSync);
EXPECT_TRUE(header.vp8().nonReference);
const auto& vp8_header =
absl::get<RTPVideoHeaderVP8>(header.video_type_header);
EXPECT_EQ(kPictureId + 2, vp8_header.pictureId);
EXPECT_EQ(kTemporalIdx, vp8_header.temporalIdx);
EXPECT_EQ(kTl0PicIdx + 1, vp8_header.tl0PicIdx);
EXPECT_EQ(kNoKeyIdx, vp8_header.keyIdx);
EXPECT_TRUE(vp8_header.layerSync);
EXPECT_TRUE(vp8_header.nonReference);
}
TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp9) {
@ -157,7 +159,8 @@ TEST(RtpPayloadParamsTest, PictureIdIsSetForVp8) {
RTPVideoHeader header =
params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
EXPECT_EQ(kVideoCodecVP8, header.codec);
EXPECT_EQ(kInitialPictureId1 + 1, header.vp8().pictureId);
EXPECT_EQ(kInitialPictureId1 + 1,
absl::get<RTPVideoHeaderVP8>(header.video_type_header).pictureId);
// State should hold latest used picture id and tl0_pic_idx.
state = params.state();
@ -180,7 +183,8 @@ TEST(RtpPayloadParamsTest, PictureIdWraps) {
RTPVideoHeader header =
params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
EXPECT_EQ(kVideoCodecVP8, header.codec);
EXPECT_EQ(0, header.vp8().pictureId);
EXPECT_EQ(0,
absl::get<RTPVideoHeaderVP8>(header.video_type_header).pictureId);
// State should hold latest used picture id and tl0_pic_idx.
EXPECT_EQ(0, params.state().picture_id); // Wrapped.
@ -205,16 +209,18 @@ TEST(RtpPayloadParamsTest, Tl0PicIdxUpdatedForVp8) {
params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
EXPECT_EQ(kVideoCodecVP8, header.codec);
EXPECT_EQ(kInitialPictureId1 + 1, header.vp8().pictureId);
EXPECT_EQ(kInitialTl0PicIdx1, header.vp8().tl0PicIdx);
const auto& vp8_header =
absl::get<RTPVideoHeaderVP8>(header.video_type_header);
EXPECT_EQ(kInitialPictureId1 + 1, vp8_header.pictureId);
EXPECT_EQ(kInitialTl0PicIdx1, vp8_header.tl0PicIdx);
// OnEncodedImage, temporalIdx: 0.
codec_info.codecSpecific.VP8.temporalIdx = 0;
header = params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
EXPECT_EQ(kVideoCodecVP8, header.codec);
EXPECT_EQ(kInitialPictureId1 + 2, header.vp8().pictureId);
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header.vp8().tl0PicIdx);
EXPECT_EQ(kInitialPictureId1 + 2, vp8_header.pictureId);
EXPECT_EQ(kInitialTl0PicIdx1 + 1, vp8_header.tl0PicIdx);
// State should hold latest used picture id and tl0_pic_idx.
EXPECT_EQ(kInitialPictureId1 + 2, params.state().picture_id);

View file

@ -318,16 +318,16 @@ bool RtpDepacketizerVp8::Parse(ParsedPayload* parsed_payload,
beginning_of_partition && (partition_id == 0);
parsed_payload->video_header().simulcastIdx = 0;
parsed_payload->video_header().codec = kVideoCodecVP8;
parsed_payload->video_header().vp8().nonReference =
(*payload_data & 0x20) ? true : false; // N bit
parsed_payload->video_header().vp8().partitionId = partition_id;
parsed_payload->video_header().vp8().beginningOfPartition =
beginning_of_partition;
parsed_payload->video_header().vp8().pictureId = kNoPictureId;
parsed_payload->video_header().vp8().tl0PicIdx = kNoTl0PicIdx;
parsed_payload->video_header().vp8().temporalIdx = kNoTemporalIdx;
parsed_payload->video_header().vp8().layerSync = false;
parsed_payload->video_header().vp8().keyIdx = kNoKeyIdx;
auto& vp8_header = parsed_payload->video_header()
.video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.nonReference = (*payload_data & 0x20) ? true : false; // N bit
vp8_header.partitionId = partition_id;
vp8_header.beginningOfPartition = beginning_of_partition;
vp8_header.pictureId = kNoPictureId;
vp8_header.tl0PicIdx = kNoTl0PicIdx;
vp8_header.temporalIdx = kNoTemporalIdx;
vp8_header.layerSync = false;
vp8_header.keyIdx = kNoKeyIdx;
if (partition_id > 8) {
// Weak check for corrupt payload_data: PartID MUST NOT be larger than 8.
@ -344,8 +344,7 @@ bool RtpDepacketizerVp8::Parse(ParsedPayload* parsed_payload,
if (extension) {
const int parsed_bytes =
ParseVP8Extension(&parsed_payload->video_header().vp8(), payload_data,
payload_data_length);
ParseVP8Extension(&vp8_header, payload_data, payload_data_length);
if (parsed_bytes < 0)
return false;
payload_data += parsed_bytes;

View file

@ -57,9 +57,11 @@ constexpr RtpPacketizer::PayloadSizeLimits kNoSizeLimits;
// +-+-+-+-+-+-+-+-+
void VerifyBasicHeader(RTPVideoHeader* header, bool N, bool S, int part_id) {
ASSERT_TRUE(header != NULL);
EXPECT_EQ(N, header->vp8().nonReference);
EXPECT_EQ(S, header->vp8().beginningOfPartition);
EXPECT_EQ(part_id, header->vp8().partitionId);
const auto& vp8_header =
absl::get<RTPVideoHeaderVP8>(header->video_type_header);
EXPECT_EQ(N, vp8_header.nonReference);
EXPECT_EQ(S, vp8_header.beginningOfPartition);
EXPECT_EQ(part_id, vp8_header.partitionId);
}
void VerifyExtensions(RTPVideoHeader* header,
@ -68,10 +70,12 @@ void VerifyExtensions(RTPVideoHeader* header,
uint8_t temporal_idx, /* T */
int key_idx /* K */) {
ASSERT_TRUE(header != NULL);
EXPECT_EQ(picture_id, header->vp8().pictureId);
EXPECT_EQ(tl0_pic_idx, header->vp8().tl0PicIdx);
EXPECT_EQ(temporal_idx, header->vp8().temporalIdx);
EXPECT_EQ(key_idx, header->vp8().keyIdx);
const auto& vp8_header =
absl::get<RTPVideoHeaderVP8>(header->video_type_header);
EXPECT_EQ(picture_id, vp8_header.pictureId);
EXPECT_EQ(tl0_pic_idx, vp8_header.tl0PicIdx);
EXPECT_EQ(temporal_idx, vp8_header.temporalIdx);
EXPECT_EQ(key_idx, vp8_header.keyIdx);
}
} // namespace
@ -268,7 +272,9 @@ TEST_F(RtpDepacketizerVp8Test, TIDAndLayerSync) {
VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx, 2,
kNoKeyIdx);
EXPECT_FALSE(payload.video_header().vp8().layerSync);
EXPECT_FALSE(
absl::get<RTPVideoHeaderVP8>(payload.video_header().video_type_header)
.layerSync);
}
TEST_F(RtpDepacketizerVp8Test, KeyIdx) {
@ -351,7 +357,10 @@ TEST_F(RtpDepacketizerVp8Test, TestWithPacketizer) {
VerifyExtensions(&payload.video_header(), input_header.pictureId,
input_header.tl0PicIdx, input_header.temporalIdx,
input_header.keyIdx);
EXPECT_EQ(payload.video_header().vp8().layerSync, input_header.layerSync);
EXPECT_EQ(
absl::get<RTPVideoHeaderVP8>(payload.video_header().video_type_header)
.layerSync,
input_header.layerSync);
}
TEST_F(RtpDepacketizerVp8Test, TestEmptyPayload) {

View file

@ -240,7 +240,7 @@ class RtpRtcpImplTest : public ::testing::Test {
rtp_video_header.is_first_packet_in_frame = true;
rtp_video_header.simulcastIdx = 0;
rtp_video_header.codec = kVideoCodecVP8;
rtp_video_header.vp8() = vp8_header;
rtp_video_header.video_type_header = vp8_header;
rtp_video_header.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false};
const uint8_t payload[100] = {0};

View file

@ -1842,7 +1842,8 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesH264) {
TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8BaseLayer) {
RTPVideoHeader header;
header.codec = kVideoCodecVP8;
header.vp8().temporalIdx = 0;
auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.temporalIdx = 0;
EXPECT_EQ(kDontRetransmit,
rtp_sender_video_->GetStorageType(
@ -1874,8 +1875,9 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8HigherLayers) {
RTPVideoHeader header;
header.codec = kVideoCodecVP8;
auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
for (int tid = 1; tid <= kMaxTemporalStreams; ++tid) {
header.vp8().temporalIdx = tid;
vp8_header.temporalIdx = tid;
EXPECT_EQ(kDontRetransmit, rtp_sender_video_->GetStorageType(
header, kRetransmitOff,
@ -1938,8 +1940,9 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmit) {
(RTPSenderVideo::kTLRateWindowSizeMs + (kFrameIntervalMs / 2)) /
kFrameIntervalMs;
constexpr int kPattern[] = {0, 2, 1, 2};
auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) {
header.vp8().temporalIdx = kPattern[i % arraysize(kPattern)];
vp8_header.temporalIdx = kPattern[i % arraysize(kPattern)];
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs);
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
}
@ -1948,7 +1951,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmit) {
// right now. We will wait at most one expected retransmission time before
// acknowledging that it did not arrive, which means this frame and the next
// will not be retransmitted.
header.vp8().temporalIdx = 1;
vp8_header.temporalIdx = 1;
EXPECT_EQ(StorageType::kDontRetransmit,
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
@ -1964,7 +1967,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmit) {
// Insert a frame for TL2. We just had frame in TL1, so the next one there is
// in three frames away. TL0 is still too far in the past. So, allow
// retransmission.
header.vp8().temporalIdx = 2;
vp8_header.temporalIdx = 2;
EXPECT_EQ(StorageType::kAllowRetransmission,
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
@ -1995,8 +1998,9 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) {
(RTPSenderVideo::kTLRateWindowSizeMs + (kFrameIntervalMs / 2)) /
kFrameIntervalMs;
constexpr int kPattern[] = {0, 2, 2, 2};
auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) {
header.vp8().temporalIdx = kPattern[i % arraysize(kPattern)];
vp8_header.temporalIdx = kPattern[i % arraysize(kPattern)];
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs);
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
@ -2007,7 +2011,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) {
// we don't store for retransmission because we expect a frame in a lower
// layer, but that last frame in TL1 was a long time ago in absolute terms,
// so allow retransmission anyway.
header.vp8().temporalIdx = 1;
vp8_header.temporalIdx = 1;
EXPECT_EQ(StorageType::kAllowRetransmission,
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
}

View file

@ -466,15 +466,15 @@ StorageType RTPSenderVideo::GetStorageType(
}
uint8_t RTPSenderVideo::GetTemporalId(const RTPVideoHeader& header) {
switch (header.codec) {
case kVideoCodecVP8:
return header.vp8().temporalIdx;
case kVideoCodecVP9:
return absl::get<RTPVideoHeaderVP9>(header.video_type_header)
.temporal_idx;
default:
return kNoTemporalIdx;
}
struct TemporalIdGetter {
uint8_t operator()(const RTPVideoHeaderVP8& vp8) { return vp8.temporalIdx; }
uint8_t operator()(const RTPVideoHeaderVP9& vp9) {
return vp9.temporal_idx;
}
uint8_t operator()(const RTPVideoHeaderH264&) { return kNoTemporalIdx; }
uint8_t operator()(const absl::monostate&) { return kNoTemporalIdx; }
};
return absl::visit(TemporalIdGetter(), header.video_type_header);
}
bool RTPSenderVideo::UpdateConditionalRetransmit(

View file

@ -21,8 +21,10 @@
#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
namespace webrtc {
using RTPVideoTypeHeader =
absl::variant<RTPVideoHeaderVP8, RTPVideoHeaderVP9, RTPVideoHeaderH264>;
using RTPVideoTypeHeader = absl::variant<absl::monostate,
RTPVideoHeaderVP8,
RTPVideoHeaderVP9,
RTPVideoHeaderH264>;
struct RTPVideoHeader {
struct GenericDescriptorInfo {
@ -42,21 +44,6 @@ struct RTPVideoHeader {
~RTPVideoHeader();
// TODO(philipel): Remove when downstream projects have been updated.
RTPVideoHeaderVP8& vp8() {
if (!absl::holds_alternative<RTPVideoHeaderVP8>(video_type_header))
video_type_header.emplace<RTPVideoHeaderVP8>();
return absl::get<RTPVideoHeaderVP8>(video_type_header);
}
// TODO(philipel): Remove when downstream projects have been updated.
const RTPVideoHeaderVP8& vp8() const {
if (!absl::holds_alternative<RTPVideoHeaderVP8>(video_type_header))
video_type_header.emplace<RTPVideoHeaderVP8>();
return absl::get<RTPVideoHeaderVP8>(video_type_header);
}
absl::optional<GenericDescriptorInfo> generic;
uint16_t width = 0;
@ -69,8 +56,7 @@ struct RTPVideoHeader {
PlayoutDelay playout_delay;
VideoSendTiming video_timing;
// TODO(philipel): remove mutable when downstream projects have been updated.
mutable RTPVideoTypeHeader video_type_header;
RTPVideoTypeHeader video_type_header;
};
} // namespace webrtc

View file

@ -37,7 +37,9 @@ TEST(TestDecodingState, FrameContinuity) {
packet.seqNum = 0xffff;
packet.frameType = kVideoFrameDelta;
packet.video_header.codec = kVideoCodecVP8;
packet.video_header.vp8().pictureId = 0x007F;
auto& vp8_header =
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.pictureId = 0x007F;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@ -53,17 +55,17 @@ TEST(TestDecodingState, FrameContinuity) {
packet.frameType = kVideoFrameDelta;
// Use pictureId
packet.is_first_packet_in_frame = false;
packet.video_header.vp8().pictureId = 0x0002;
vp8_header.pictureId = 0x0002;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
frame.Reset();
packet.video_header.vp8().pictureId = 0;
vp8_header.pictureId = 0;
packet.seqNum = 10;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
// Use sequence numbers.
packet.video_header.vp8().pictureId = kNoPictureId;
vp8_header.pictureId = kNoPictureId;
frame.Reset();
packet.seqNum = dec_state.sequence_num() - 1u;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -82,9 +84,9 @@ TEST(TestDecodingState, FrameContinuity) {
// Insert packet with temporal info.
dec_state.Reset();
frame.Reset();
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 0;
vp8_header.tl0PicIdx = 0;
vp8_header.temporalIdx = 0;
vp8_header.pictureId = 0;
packet.seqNum = 1;
packet.timestamp = 1;
EXPECT_TRUE(dec_state.full_sync());
@ -93,9 +95,9 @@ TEST(TestDecodingState, FrameContinuity) {
EXPECT_TRUE(dec_state.full_sync());
frame.Reset();
// 1 layer up - still good.
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 1;
packet.video_header.vp8().pictureId = 1;
vp8_header.tl0PicIdx = 0;
vp8_header.temporalIdx = 1;
vp8_header.pictureId = 1;
packet.seqNum = 2;
packet.timestamp = 2;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -104,18 +106,18 @@ TEST(TestDecodingState, FrameContinuity) {
EXPECT_TRUE(dec_state.full_sync());
frame.Reset();
// Lost non-base layer packet => should update sync parameter.
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 3;
packet.video_header.vp8().pictureId = 3;
vp8_header.tl0PicIdx = 0;
vp8_header.temporalIdx = 3;
vp8_header.pictureId = 3;
packet.seqNum = 4;
packet.timestamp = 4;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
// Now insert the next non-base layer (belonging to a next tl0PicId).
frame.Reset();
packet.video_header.vp8().tl0PicIdx = 1;
packet.video_header.vp8().temporalIdx = 2;
packet.video_header.vp8().pictureId = 4;
vp8_header.tl0PicIdx = 1;
vp8_header.temporalIdx = 2;
vp8_header.pictureId = 4;
packet.seqNum = 5;
packet.timestamp = 5;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -125,9 +127,9 @@ TEST(TestDecodingState, FrameContinuity) {
EXPECT_TRUE(dec_state.full_sync());
// Next base layer (dropped interim non-base layers) - should update sync.
frame.Reset();
packet.video_header.vp8().tl0PicIdx = 1;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 5;
vp8_header.tl0PicIdx = 1;
vp8_header.temporalIdx = 0;
vp8_header.pictureId = 5;
packet.seqNum = 6;
packet.timestamp = 6;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -137,18 +139,18 @@ TEST(TestDecodingState, FrameContinuity) {
// Check wrap for temporal layers.
frame.Reset();
packet.video_header.vp8().tl0PicIdx = 0x00FF;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 6;
vp8_header.tl0PicIdx = 0x00FF;
vp8_header.temporalIdx = 0;
vp8_header.pictureId = 6;
packet.seqNum = 7;
packet.timestamp = 7;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
dec_state.SetState(&frame);
EXPECT_FALSE(dec_state.full_sync());
frame.Reset();
packet.video_header.vp8().tl0PicIdx = 0x0000;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 7;
vp8_header.tl0PicIdx = 0x0000;
vp8_header.temporalIdx = 0;
vp8_header.pictureId = 7;
packet.seqNum = 8;
packet.timestamp = 8;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -214,9 +216,11 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.video_header.codec = kVideoCodecVP8;
packet.timestamp = 0;
packet.seqNum = 0;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 0;
auto& vp8_header =
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.tl0PicIdx = 0;
vp8_header.temporalIdx = 0;
vp8_header.pictureId = 0;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@ -226,9 +230,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
frame.Reset();
packet.timestamp = 1;
packet.seqNum = 1;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 1;
packet.video_header.vp8().pictureId = 1;
vp8_header.tl0PicIdx = 0;
vp8_header.temporalIdx = 1;
vp8_header.pictureId = 1;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -238,9 +242,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
frame.Reset();
packet.timestamp = 3;
packet.seqNum = 3;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 3;
packet.video_header.vp8().pictureId = 3;
vp8_header.tl0PicIdx = 0;
vp8_header.temporalIdx = 3;
vp8_header.pictureId = 3;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -249,9 +253,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
frame.Reset();
packet.timestamp = 4;
packet.seqNum = 4;
packet.video_header.vp8().tl0PicIdx = 1;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 4;
vp8_header.tl0PicIdx = 1;
vp8_header.temporalIdx = 0;
vp8_header.pictureId = 4;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -263,9 +267,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.is_first_packet_in_frame = 1;
packet.timestamp = 5;
packet.seqNum = 5;
packet.video_header.vp8().tl0PicIdx = 2;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 5;
vp8_header.tl0PicIdx = 2;
vp8_header.temporalIdx = 0;
vp8_header.pictureId = 5;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -276,9 +280,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.frameType = kVideoFrameDelta;
packet.timestamp = 6;
packet.seqNum = 6;
packet.video_header.vp8().tl0PicIdx = 3;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 6;
vp8_header.tl0PicIdx = 3;
vp8_header.temporalIdx = 0;
vp8_header.pictureId = 6;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
@ -287,9 +291,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.is_first_packet_in_frame = 1;
packet.timestamp = 8;
packet.seqNum = 8;
packet.video_header.vp8().tl0PicIdx = 4;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 8;
vp8_header.tl0PicIdx = 4;
vp8_header.temporalIdx = 0;
vp8_header.pictureId = 8;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
@ -302,10 +306,10 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.is_first_packet_in_frame = 1;
packet.timestamp = 9;
packet.seqNum = 9;
packet.video_header.vp8().tl0PicIdx = 4;
packet.video_header.vp8().temporalIdx = 2;
packet.video_header.vp8().pictureId = 9;
packet.video_header.vp8().layerSync = true;
vp8_header.tl0PicIdx = 4;
vp8_header.temporalIdx = 2;
vp8_header.pictureId = 9;
vp8_header.layerSync = true;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
@ -323,10 +327,10 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.markerBit = 1;
packet.timestamp = 0;
packet.seqNum = 0;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 0;
packet.video_header.vp8().layerSync = false;
vp8_header.tl0PicIdx = 0;
vp8_header.temporalIdx = 0;
vp8_header.pictureId = 0;
vp8_header.layerSync = false;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
@ -337,10 +341,10 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.markerBit = 0;
packet.timestamp = 1;
packet.seqNum = 1;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 2;
packet.video_header.vp8().pictureId = 1;
packet.video_header.vp8().layerSync = true;
vp8_header.tl0PicIdx = 0;
vp8_header.temporalIdx = 2;
vp8_header.pictureId = 1;
vp8_header.layerSync = true;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
// Layer 1
@ -350,10 +354,10 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.markerBit = 1;
packet.timestamp = 2;
packet.seqNum = 3;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 1;
packet.video_header.vp8().pictureId = 2;
packet.video_header.vp8().layerSync = true;
vp8_header.tl0PicIdx = 0;
vp8_header.temporalIdx = 1;
vp8_header.pictureId = 2;
vp8_header.layerSync = true;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
@ -368,9 +372,11 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
packet.video_header.codec = kVideoCodecVP8;
packet.timestamp = 0;
packet.seqNum = 0;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 0;
auto& vp8_header =
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.tl0PicIdx = 0;
vp8_header.temporalIdx = 0;
vp8_header.pictureId = 0;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@ -384,8 +390,8 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
packet.frameType = kVideoFrameDelta;
packet.timestamp += 3000;
++packet.seqNum;
packet.video_header.vp8().temporalIdx = 1;
packet.video_header.vp8().pictureId = 2;
vp8_header.temporalIdx = 1;
vp8_header.pictureId = 2;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -421,9 +427,11 @@ TEST(TestDecodingState, PictureIdRepeat) {
packet.video_header.codec = kVideoCodecVP8;
packet.timestamp = 0;
packet.seqNum = 0;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 0;
auto& vp8_header =
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.tl0PicIdx = 0;
vp8_header.temporalIdx = 0;
vp8_header.pictureId = 0;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@ -433,15 +441,15 @@ TEST(TestDecodingState, PictureIdRepeat) {
frame.Reset();
++packet.timestamp;
++packet.seqNum;
packet.video_header.vp8().temporalIdx++;
packet.video_header.vp8().pictureId++;
vp8_header.temporalIdx++;
vp8_header.pictureId++;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
frame.Reset();
// Testing only gap in tl0PicIdx when tl0PicIdx in continuous.
packet.video_header.vp8().tl0PicIdx += 3;
packet.video_header.vp8().temporalIdx++;
packet.video_header.vp8().tl0PicIdx = 1;
vp8_header.tl0PicIdx += 3;
vp8_header.temporalIdx++;
vp8_header.tl0PicIdx = 1;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
}

View file

@ -56,6 +56,8 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
if (header) {
switch (header->codec) {
case kVideoCodecVP8: {
const auto& vp8_header =
absl::get<RTPVideoHeaderVP8>(header->video_type_header);
if (_codecSpecificInfo.codecType != kVideoCodecVP8) {
// This is the first packet for this frame.
_codecSpecificInfo.codecSpecific.VP8.temporalIdx = 0;
@ -64,15 +66,14 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
_codecSpecificInfo.codecType = kVideoCodecVP8;
}
_codecSpecificInfo.codecSpecific.VP8.nonReference =
header->vp8().nonReference;
if (header->vp8().temporalIdx != kNoTemporalIdx) {
vp8_header.nonReference;
if (vp8_header.temporalIdx != kNoTemporalIdx) {
_codecSpecificInfo.codecSpecific.VP8.temporalIdx =
header->vp8().temporalIdx;
_codecSpecificInfo.codecSpecific.VP8.layerSync =
header->vp8().layerSync;
vp8_header.temporalIdx;
_codecSpecificInfo.codecSpecific.VP8.layerSync = vp8_header.layerSync;
}
if (header->vp8().keyIdx != kNoKeyIdx) {
_codecSpecificInfo.codecSpecific.VP8.keyIdx = header->vp8().keyIdx;
if (vp8_header.keyIdx != kNoKeyIdx) {
_codecSpecificInfo.codecSpecific.VP8.keyIdx = vp8_header.keyIdx;
}
break;
}

View file

@ -108,10 +108,12 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
packet.seqNum = seq_num_start;
packet.markerBit = (seq_num_start == seq_num_end);
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
packet.video_header.vp8().pictureId = pid % (1 << 15);
packet.video_header.vp8().temporalIdx = tid;
packet.video_header.vp8().tl0PicIdx = tl0;
packet.video_header.vp8().layerSync = sync;
auto& vp8_header =
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.pictureId = pid % (1 << 15);
vp8_header.temporalIdx = tid;
vp8_header.tl0PicIdx = tl0;
vp8_header.layerSync = sync;
ref_packet_buffer_->InsertPacket(&packet);
if (seq_num_start != seq_num_end) {

View file

@ -62,7 +62,9 @@ int VCMSessionInfo::PictureId() const {
if (packets_.empty())
return kNoPictureId;
if (packets_.front().video_header.codec == kVideoCodecVP8) {
return packets_.front().video_header.vp8().pictureId;
return absl::get<RTPVideoHeaderVP8>(
packets_.front().video_header.video_type_header)
.pictureId;
} else if (packets_.front().video_header.codec == kVideoCodecVP9) {
return absl::get<RTPVideoHeaderVP9>(
packets_.front().video_header.video_type_header)
@ -76,7 +78,9 @@ int VCMSessionInfo::TemporalId() const {
if (packets_.empty())
return kNoTemporalIdx;
if (packets_.front().video_header.codec == kVideoCodecVP8) {
return packets_.front().video_header.vp8().temporalIdx;
return absl::get<RTPVideoHeaderVP8>(
packets_.front().video_header.video_type_header)
.temporalIdx;
} else if (packets_.front().video_header.codec == kVideoCodecVP9) {
return absl::get<RTPVideoHeaderVP9>(
packets_.front().video_header.video_type_header)
@ -90,7 +94,9 @@ bool VCMSessionInfo::LayerSync() const {
if (packets_.empty())
return false;
if (packets_.front().video_header.codec == kVideoCodecVP8) {
return packets_.front().video_header.vp8().layerSync;
return absl::get<RTPVideoHeaderVP8>(
packets_.front().video_header.video_type_header)
.layerSync;
} else if (packets_.front().video_header.codec == kVideoCodecVP9) {
return absl::get<RTPVideoHeaderVP9>(
packets_.front().video_header.video_type_header)
@ -104,7 +110,9 @@ int VCMSessionInfo::Tl0PicId() const {
if (packets_.empty())
return kNoTl0PicIdx;
if (packets_.front().video_header.codec == kVideoCodecVP8) {
return packets_.front().video_header.vp8().tl0PicIdx;
return absl::get<RTPVideoHeaderVP8>(
packets_.front().video_header.video_type_header)
.tl0PicIdx;
} else if (packets_.front().video_header.codec == kVideoCodecVP9) {
return absl::get<RTPVideoHeaderVP9>(
packets_.front().video_header.video_type_header)
@ -347,7 +355,8 @@ size_t VCMSessionInfo::DeletePacketData(PacketIterator start,
VCMSessionInfo::PacketIterator VCMSessionInfo::FindNextPartitionBeginning(
PacketIterator it) const {
while (it != packets_.end()) {
if ((*it).video_header.vp8().beginningOfPartition) {
if (absl::get<RTPVideoHeaderVP8>((*it).video_header.video_type_header)
.beginningOfPartition) {
return it;
}
++it;
@ -359,10 +368,16 @@ VCMSessionInfo::PacketIterator VCMSessionInfo::FindPartitionEnd(
PacketIterator it) const {
assert((*it).codec == kVideoCodecVP8);
PacketIterator prev_it = it;
const int partition_id = (*it).video_header.vp8().partitionId;
const int partition_id =
absl::get<RTPVideoHeaderVP8>((*it).video_header.video_type_header)
.partitionId;
while (it != packets_.end()) {
bool beginning = (*it).video_header.vp8().beginningOfPartition;
int current_partition_id = (*it).video_header.vp8().partitionId;
bool beginning =
absl::get<RTPVideoHeaderVP8>((*it).video_header.video_type_header)
.beginningOfPartition;
int current_partition_id =
absl::get<RTPVideoHeaderVP8>((*it).video_header.video_type_header)
.partitionId;
bool packet_loss_found = (!beginning && !InSequence(it, prev_it));
if (packet_loss_found ||
(beginning && current_partition_id != partition_id)) {

View file

@ -129,6 +129,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyFramesWithLosses) {
header.header.ssrc = 1;
header.header.headerLength = 12;
header.video_header().codec = kVideoCodecVP8;
header.video_header().video_type_header.emplace<RTPVideoHeaderVP8>();
// Insert one video frame to get one frame decoded.
header.frameType = kVideoFrameKey;
header.video_header().is_first_packet_in_frame = true;
@ -180,8 +181,10 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
header.header.ssrc = 1;
header.header.headerLength = 12;
header.video_header().codec = kVideoCodecVP8;
header.video_header().vp8().pictureId = -1;
header.video_header().vp8().tl0PicIdx = -1;
auto& vp8_header =
header.video.video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.pictureId = -1;
vp8_header.tl0PicIdx = -1;
for (int i = 0; i < 3; ++i) {
// Insert 2 video frames.
for (int j = 0; j < 2; ++j) {

View file

@ -24,7 +24,8 @@ ConfigurableFrameSizeEncoder::ConfigurableFrameSizeEncoder(
: callback_(NULL),
max_frame_size_(max_frame_size),
current_frame_size_(max_frame_size),
buffer_(new uint8_t[max_frame_size]) {
buffer_(new uint8_t[max_frame_size]),
codec_type_(kVideoCodecGeneric) {
memset(buffer_.get(), 0, max_frame_size);
}
@ -50,8 +51,8 @@ int32_t ConfigurableFrameSizeEncoder::Encode(
encodedImage.SetTimestamp(inputImage.timestamp());
encodedImage.capture_time_ms_ = inputImage.render_time_ms();
RTPFragmentationHeader* fragmentation = NULL;
CodecSpecificInfo specific;
memset(&specific, 0, sizeof(specific));
CodecSpecificInfo specific{};
specific.codecType = codec_type_;
callback_->OnEncodedImage(encodedImage, &specific, fragmentation);
return WEBRTC_VIDEO_CODEC_OK;
@ -84,5 +85,9 @@ int32_t ConfigurableFrameSizeEncoder::SetFrameSize(size_t size) {
return WEBRTC_VIDEO_CODEC_OK;
}
void ConfigurableFrameSizeEncoder::SetCodecType(VideoCodecType codec_type) {
codec_type_ = codec_type;
}
} // namespace test
} // namespace webrtc

View file

@ -44,11 +44,14 @@ class ConfigurableFrameSizeEncoder : public VideoEncoder {
int32_t SetFrameSize(size_t size);
void SetCodecType(VideoCodecType codec_type_);
private:
EncodedImageCallback* callback_;
const size_t max_frame_size_;
size_t current_frame_size_;
std::unique_ptr<uint8_t[]> buffer_;
VideoCodecType codec_type_;
};
} // namespace test

View file

@ -103,7 +103,9 @@ bool LayerFilteringTransport::SendRtp(const uint8_t* packet,
bool end_of_frame;
if (is_vp8) {
temporal_idx = parsed_payload.video_header().vp8().temporalIdx;
temporal_idx = absl::get<RTPVideoHeaderVP8>(
parsed_payload.video_header().video_type_header)
.temporalIdx;
spatial_idx = kNoSpatialIdx;
num_active_spatial_layers_ = 1;
non_ref_for_inter_layer_pred = false;

View file

@ -100,11 +100,14 @@ class PictureIdObserver : public test::RtpRtcpObserver {
&parsed_payload, &packet[header.headerLength], payload_length));
switch (codec_type_) {
case kVideoCodecVP8:
parsed->picture_id = parsed_payload.video_header().vp8().pictureId;
parsed->tl0_pic_idx = parsed_payload.video_header().vp8().tl0PicIdx;
parsed->temporal_idx = parsed_payload.video_header().vp8().temporalIdx;
case kVideoCodecVP8: {
const auto& vp8_header = absl::get<RTPVideoHeaderVP8>(
parsed_payload.video_header().video_type_header);
parsed->picture_id = vp8_header.pictureId;
parsed->tl0_pic_idx = vp8_header.tl0PicIdx;
parsed->temporal_idx = vp8_header.temporalIdx;
break;
}
case kVideoCodecVP9: {
const auto& vp9_header = absl::get<RTPVideoHeaderVP9>(
parsed_payload.video_header().video_type_header);

View file

@ -420,7 +420,9 @@ bool VideoAnalyzer::IsInSelectedSpatialAndTemporalLayer(
int temporal_idx;
int spatial_idx;
if (is_vp8) {
temporal_idx = parsed_payload.video_header().vp8().temporalIdx;
temporal_idx = absl::get<RTPVideoHeaderVP8>(
parsed_payload.video_header().video_type_header)
.temporalIdx;
spatial_idx = kNoTemporalIdx;
} else {
const auto& vp9_header = absl::get<RTPVideoHeaderVP9>(

View file

@ -991,6 +991,8 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
// Fragmentation required, this test doesn't make sense without it.
encoder_.SetFrameSize(start_size);
RTC_DCHECK_GT(stop_size, max_packet_size);
if (!test_generic_packetization_)
encoder_.SetCodecType(kVideoCodecVP8);
}
private:
@ -1127,6 +1129,7 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
if (!test_generic_packetization_)
send_config->rtp.payload_name = "VP8";
send_config->encoder_settings.encoder_factory = &encoder_factory_;
send_config->rtp.max_packet_size = kMaxPacketSize;
send_config->post_encode_callback = this;
@ -2176,8 +2179,6 @@ TEST_P(VideoSendStreamTest, VideoSendStreamUpdateActiveSimulcastLayers) {
sender_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
GetVideoSendConfig()->encoder_settings.encoder_factory = &encoder_factory;
GetVideoSendConfig()->rtp.payload_name = "VP8";
CreateVideoStreams();
// Inject a frame, to force encoder creation.