Add plumbing of RtpPacketInfos to each VideoFrame as input for SourceTracker.

This change adds the plumbing of RtpPacketInfo from RtpVideoStreamReceiver::OnRtpPacket() to VideoReceiveStream::OnFrame() for video. It is a step towards replacing the non-spec compliant ContributingSources that updates itself at packet-receive time, with the spec-compliant SourceTracker that will update itself at frame-delivery-to-track time.

Bug: webrtc:10668
Change-Id: Ib97d430530c5a8487d3b129936c7c51e118889bd
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/139891
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Reviewed-by: Niels Moller <nisse@webrtc.org>
Commit-Queue: Chen Xing <chxg@google.com>
Cr-Commit-Position: refs/heads/master@{#28332}
This commit is contained in:
Chen Xing 2019-06-20 10:05:55 +02:00 committed by Commit Bot
parent 7953ad5dab
commit f00bf42d1c
30 changed files with 187 additions and 29 deletions

View file

@ -13,8 +13,10 @@
#include <stdint.h>
#include <map>
#include <utility>
#include "absl/types/optional.h"
#include "api/rtp_packet_infos.h"
#include "api/scoped_refptr.h"
#include "api/video/color_space.h"
#include "api/video/video_codec_constants.h"
@ -114,6 +116,11 @@ class RTC_EXPORT EncodedImage {
color_space_ = color_space;
}
const RtpPacketInfos& PacketInfos() const { return packet_infos_; }
void SetPacketInfos(RtpPacketInfos packet_infos) {
packet_infos_ = std::move(packet_infos);
}
bool RetransmissionAllowed() const { return retransmission_allowed_; }
void SetRetransmissionAllowed(bool retransmission_allowed) {
retransmission_allowed_ = retransmission_allowed;
@ -210,6 +217,11 @@ class RTC_EXPORT EncodedImage {
absl::optional<int> spatial_index_;
std::map<int, size_t> spatial_layer_frame_size_bytes_;
absl::optional<webrtc::ColorSpace> color_space_;
// Information about packets used to assemble this video frame. This is needed
// by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's
// MediaStreamTrack, in order to implement getContributingSources(). See:
// https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
RtpPacketInfos packet_infos_;
bool retransmission_allowed_ = true;
};

View file

@ -11,6 +11,7 @@
#include "api/video/video_frame.h"
#include <algorithm>
#include <utility>
#include "rtc_base/checks.h"
#include "rtc_base/time_utils.h"
@ -66,7 +67,8 @@ VideoFrame::Builder::~Builder() = default;
VideoFrame VideoFrame::Builder::build() {
RTC_CHECK(video_frame_buffer_ != nullptr);
return VideoFrame(id_, video_frame_buffer_, timestamp_us_, timestamp_rtp_,
ntp_time_ms_, rotation_, color_space_, update_rect_);
ntp_time_ms_, rotation_, color_space_, update_rect_,
packet_infos_);
}
VideoFrame::Builder& VideoFrame::Builder::set_video_frame_buffer(
@ -127,6 +129,12 @@ VideoFrame::Builder& VideoFrame::Builder::set_update_rect(
return *this;
}
VideoFrame::Builder& VideoFrame::Builder::set_packet_infos(
RtpPacketInfos packet_infos) {
packet_infos_ = std::move(packet_infos);
return *this;
}
VideoFrame::VideoFrame(const rtc::scoped_refptr<VideoFrameBuffer>& buffer,
webrtc::VideoRotation rotation,
int64_t timestamp_us)
@ -157,7 +165,8 @@ VideoFrame::VideoFrame(uint16_t id,
int64_t ntp_time_ms,
VideoRotation rotation,
const absl::optional<ColorSpace>& color_space,
const absl::optional<UpdateRect>& update_rect)
const absl::optional<UpdateRect>& update_rect,
RtpPacketInfos packet_infos)
: id_(id),
video_frame_buffer_(buffer),
timestamp_rtp_(timestamp_rtp),
@ -166,7 +175,8 @@ VideoFrame::VideoFrame(uint16_t id,
rotation_(rotation),
color_space_(color_space),
update_rect_(update_rect.value_or(UpdateRect{
0, 0, video_frame_buffer_->width(), video_frame_buffer_->height()})) {
0, 0, video_frame_buffer_->width(), video_frame_buffer_->height()})),
packet_infos_(std::move(packet_infos)) {
RTC_DCHECK_GE(update_rect_.offset_x, 0);
RTC_DCHECK_GE(update_rect_.offset_y, 0);
RTC_DCHECK_LE(update_rect_.offset_x + update_rect_.width, width());

View file

@ -12,8 +12,10 @@
#define API_VIDEO_VIDEO_FRAME_H_
#include <stdint.h>
#include <utility>
#include "absl/types/optional.h"
#include "api/rtp_packet_infos.h"
#include "api/scoped_refptr.h"
#include "api/video/color_space.h"
#include "api/video/hdr_metadata.h"
@ -62,6 +64,7 @@ class RTC_EXPORT VideoFrame {
Builder& set_color_space(const ColorSpace* color_space);
Builder& set_id(uint16_t id);
Builder& set_update_rect(const UpdateRect& update_rect);
Builder& set_packet_infos(RtpPacketInfos packet_infos);
private:
uint16_t id_ = 0;
@ -72,6 +75,7 @@ class RTC_EXPORT VideoFrame {
VideoRotation rotation_ = kVideoRotation_0;
absl::optional<ColorSpace> color_space_;
absl::optional<UpdateRect> update_rect_;
RtpPacketInfos packet_infos_;
};
// To be deprecated. Migrate all use to Builder.
@ -181,6 +185,13 @@ class RTC_EXPORT VideoFrame {
update_rect_ = update_rect;
}
// Get information about packets used to assemble this video frame. Might be
// empty if the information isn't available.
const RtpPacketInfos& packet_infos() const { return packet_infos_; }
void set_packet_infos(RtpPacketInfos value) {
packet_infos_ = std::move(value);
}
private:
VideoFrame(uint16_t id,
const rtc::scoped_refptr<VideoFrameBuffer>& buffer,
@ -189,7 +200,8 @@ class RTC_EXPORT VideoFrame {
int64_t ntp_time_ms,
VideoRotation rotation,
const absl::optional<ColorSpace>& color_space,
const absl::optional<UpdateRect>& update_rect);
const absl::optional<UpdateRect>& update_rect,
RtpPacketInfos packet_infos);
uint16_t id_;
// An opaque reference counted handle that stores the pixel data.
@ -202,6 +214,11 @@ class RTC_EXPORT VideoFrame {
// Updated since the last frame area. Unless set explicitly, will always be
// a full frame rectangle.
UpdateRect update_rect_;
// Information about packets used to assemble this video frame. This is needed
// by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's
// MediaStreamTrack, in order to implement getContributingSources(). See:
// https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
RtpPacketInfos packet_infos_;
};
} // namespace webrtc

View file

@ -9,6 +9,8 @@
*/
#include "common_video/test/utilities.h"
#include <utility>
namespace webrtc {
HdrMetadata CreateTestHdrMetadata() {
@ -39,4 +41,8 @@ ColorSpace CreateTestColorSpace(bool with_hdr_metadata) {
with_hdr_metadata ? &hdr_metadata : nullptr);
}
RtpPacketInfos CreatePacketInfos(size_t count) {
return RtpPacketInfos(RtpPacketInfos::vector_type(count));
}
} // namespace webrtc

View file

@ -11,12 +11,16 @@
#ifndef COMMON_VIDEO_TEST_UTILITIES_H_
#define COMMON_VIDEO_TEST_UTILITIES_H_
#include <initializer_list>
#include "api/rtp_packet_infos.h"
#include "api/video/color_space.h"
namespace webrtc {
HdrMetadata CreateTestHdrMetadata();
ColorSpace CreateTestColorSpace(bool with_hdr_metadata);
RtpPacketInfos CreatePacketInfos(size_t count);
} // namespace webrtc
#endif // COMMON_VIDEO_TEST_UTILITIES_H_

View file

@ -30,6 +30,7 @@ void FakeVideoRenderer::OnFrame(const webrtc::VideoFrame& frame) {
timestamp_us_ = frame.timestamp_us();
ntp_timestamp_ms_ = frame.ntp_time_ms();
color_space_ = frame.color_space();
packet_infos_ = frame.packet_infos();
frame_rendered_event_.Set();
}

View file

@ -71,6 +71,11 @@ class FakeVideoRenderer : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
return color_space_;
}
webrtc::RtpPacketInfos packet_infos() const {
rtc::CritScope cs(&crit_);
return packet_infos_;
}
bool WaitForRenderedFrame(int64_t timeout_ms);
private:
@ -138,6 +143,7 @@ class FakeVideoRenderer : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
rtc::CriticalSection crit_;
rtc::Event frame_rendered_event_;
absl::optional<webrtc::ColorSpace> color_space_;
webrtc::RtpPacketInfos packet_infos_;
};
} // namespace cricket

View file

@ -262,6 +262,7 @@ void MultiplexDecoderAdapter::MergeAlphaImages(
.set_timestamp_us(0)
.set_rotation(decoded_image->rotation())
.set_id(decoded_image->id())
.set_packet_infos(decoded_image->packet_infos())
.build();
decoded_complete_callback_->Decoded(merged_image, decode_time_ms, qp);
}

View file

@ -205,6 +205,7 @@ int MultiplexEncoderAdapter::Encode(
.set_timestamp_ms(input_image.render_time_ms())
.set_rotation(input_image.rotation())
.set_id(input_image.id())
.set_packet_infos(input_image.packet_infos())
.build();
rv = encoders_[kAXXStream]->Encode(alpha_image, &adjusted_frame_types);
return rv;

View file

@ -54,9 +54,11 @@ class VCMEncodedFrame : protected EncodedImage {
using EncodedImage::ColorSpace;
using EncodedImage::data;
using EncodedImage::PacketInfos;
using EncodedImage::set_size;
using EncodedImage::SetColorSpace;
using EncodedImage::SetEncodedData;
using EncodedImage::SetPacketInfos;
using EncodedImage::SetSpatialIndex;
using EncodedImage::SetSpatialLayerFrameSize;
using EncodedImage::SetTimestamp;

View file

@ -11,6 +11,7 @@
#include "modules/video_coding/frame_object.h"
#include <string.h>
#include <utility>
#include "api/video/encoded_image.h"
#include "api/video/video_timing.h"
@ -28,7 +29,8 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
size_t frame_size,
int times_nacked,
int64_t first_packet_received_time,
int64_t last_packet_received_time)
int64_t last_packet_received_time,
RtpPacketInfos packet_infos)
: packet_buffer_(packet_buffer),
first_seq_num_(first_seq_num),
last_seq_num_(last_seq_num),
@ -63,6 +65,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
// EncodedFrame members
SetTimestamp(first_packet->timestamp);
SetPacketInfos(std::move(packet_infos));
VCMPacket* last_packet = packet_buffer_->GetPacket(last_seq_num);
RTC_CHECK(last_packet);

View file

@ -29,7 +29,8 @@ class RtpFrameObject : public EncodedFrame {
size_t frame_size,
int times_nacked,
int64_t first_packet_received_time,
int64_t last_packet_received_time);
int64_t last_packet_received_time,
RtpPacketInfos packet_infos);
~RtpFrameObject() override;
uint16_t first_seq_num() const;

View file

@ -84,6 +84,7 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
if (frameInfo->color_space) {
decodedImage.set_color_space(frameInfo->color_space);
}
decodedImage.set_packet_infos(frameInfo->packet_infos);
decodedImage.set_rotation(frameInfo->rotation);
const int64_t now_ms = _clock->TimeInMilliseconds();
@ -211,6 +212,7 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
} else {
_frameInfos[_nextFrameInfoIdx].color_space = absl::nullopt;
}
_frameInfos[_nextFrameInfoIdx].packet_infos = frame.PacketInfos();
// Set correctly only for key frames. Thus, use latest key frame
// content type. If the corresponding key frame was lost, decode will fail

View file

@ -36,6 +36,7 @@ struct VCMFrameInformation {
EncodedImage::Timing timing;
int64_t ntp_time_ms;
absl::optional<ColorSpace> color_space;
RtpPacketInfos packet_infos;
};
class VCMDecodedFrameCallback : public DecodedImageCallback {

View file

@ -122,5 +122,31 @@ TEST_F(GenericDecoderTest, PassesColorSpaceForDelayedDecoders) {
EXPECT_EQ(*decoded_color_space, color_space);
}
TEST_F(GenericDecoderTest, PassesPacketInfos) {
RtpPacketInfos packet_infos = CreatePacketInfos(3);
VCMEncodedFrame encoded_frame;
encoded_frame.SetPacketInfos(packet_infos);
generic_decoder_.Decode(encoded_frame, clock_.TimeInMilliseconds());
absl::optional<VideoFrame> decoded_frame = user_callback_.WaitForFrame(10);
ASSERT_TRUE(decoded_frame.has_value());
EXPECT_EQ(decoded_frame->packet_infos().size(), 3U);
}
TEST_F(GenericDecoderTest, PassesPacketInfosForDelayedDecoders) {
RtpPacketInfos packet_infos = CreatePacketInfos(3);
decoder_.SetDelayedDecoding(100);
{
// Ensure the original frame is destroyed before the decoding is completed.
VCMEncodedFrame encoded_frame;
encoded_frame.SetPacketInfos(packet_infos);
generic_decoder_.Decode(encoded_frame, clock_.TimeInMilliseconds());
}
absl::optional<VideoFrame> decoded_frame = user_callback_.WaitForFrame(200);
ASSERT_TRUE(decoded_frame.has_value());
EXPECT_EQ(decoded_frame->packet_infos().size(), 3U);
}
} // namespace video_coding
} // namespace webrtc

View file

@ -67,7 +67,8 @@ class TestBasicJitterBuffer : public ::testing::Test {
video_header.is_first_packet_in_frame = true;
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet_.reset(new VCMPacket(data_, size_, rtp_header, video_header,
/*ntp_time_ms=*/0));
/*ntp_time_ms=*/0,
clock_->TimeInMilliseconds()));
}
VCMEncodedFrame* DecodeCompleteFrame() {
@ -542,7 +543,7 @@ TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) {
video_header.codec = kVideoCodecGeneric;
video_header.frame_type = VideoFrameType::kEmptyFrame;
VCMPacket empty_packet(data_, 0, rtp_header, video_header,
/*ntp_time_ms=*/0);
/*ntp_time_ms=*/0, clock_->TimeInMilliseconds());
EXPECT_EQ(kOldPacket,
jitter_buffer_->InsertPacket(empty_packet, &retransmitted));
empty_packet.seqNum += 1;

View file

@ -25,8 +25,7 @@ VCMPacket::VCMPacket()
timesNacked(-1),
completeNALU(kNaluUnset),
insertStartCode(false),
video_header(),
receive_time_ms(0) {
video_header() {
video_header.playout_delay = {-1, -1};
}
@ -34,7 +33,8 @@ VCMPacket::VCMPacket(const uint8_t* ptr,
size_t size,
const RTPHeader& rtp_header,
const RTPVideoHeader& videoHeader,
int64_t ntp_time_ms)
int64_t ntp_time_ms,
int64_t receive_time_ms)
: payloadType(rtp_header.payloadType),
timestamp(rtp_header.timestamp),
ntp_time_ms_(ntp_time_ms),
@ -46,7 +46,8 @@ VCMPacket::VCMPacket(const uint8_t* ptr,
completeNALU(kNaluIncomplete),
insertStartCode(videoHeader.codec == kVideoCodecH264 &&
videoHeader.is_first_packet_in_frame),
video_header(videoHeader) {
video_header(videoHeader),
packet_info(rtp_header, receive_time_ms) {
if (is_first_packet_in_frame() && markerBit) {
completeNALU = kNaluComplete;
} else if (is_first_packet_in_frame()) {

View file

@ -16,6 +16,7 @@
#include "absl/types/optional.h"
#include "api/rtp_headers.h"
#include "api/rtp_packet_info.h"
#include "api/video/video_frame_type.h"
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
#include "modules/rtp_rtcp/source/rtp_video_header.h"
@ -39,7 +40,8 @@ class VCMPacket {
size_t size,
const RTPHeader& rtp_header,
const RTPVideoHeader& video_header,
int64_t ntp_time_ms);
int64_t ntp_time_ms,
int64_t receive_time_ms);
~VCMPacket();
@ -70,7 +72,7 @@ class VCMPacket {
RTPVideoHeader video_header;
absl::optional<RtpGenericFrameDescriptor> generic_descriptor;
int64_t receive_time_ms;
RtpPacketInfo packet_info;
};
} // namespace webrtc

View file

@ -286,8 +286,9 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
size_t frame_size = 0;
int max_nack_count = -1;
uint16_t start_seq_num = seq_num;
int64_t min_recv_time = data_buffer_[index].receive_time_ms;
int64_t max_recv_time = data_buffer_[index].receive_time_ms;
int64_t min_recv_time = data_buffer_[index].packet_info.receive_time_ms();
int64_t max_recv_time = data_buffer_[index].packet_info.receive_time_ms();
RtpPacketInfos::vector_type packet_infos;
// Find the start index by searching backward until the packet with
// the |frame_begin| flag is set.
@ -310,9 +311,16 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
sequence_buffer_[start_index].frame_created = true;
min_recv_time =
std::min(min_recv_time, data_buffer_[start_index].receive_time_ms);
std::min(min_recv_time,
data_buffer_[start_index].packet_info.receive_time_ms());
max_recv_time =
std::max(max_recv_time, data_buffer_[start_index].receive_time_ms);
std::max(max_recv_time,
data_buffer_[start_index].packet_info.receive_time_ms());
// Should use |push_front()| since the loop traverses backwards. But
// it's too inefficient to do so on a vector so we'll instead fix the
// order afterwards.
packet_infos.push_back(data_buffer_[start_index].packet_info);
if (!is_h264 && sequence_buffer_[start_index].frame_begin)
break;
@ -359,6 +367,9 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
--start_seq_num;
}
// Fix the order since the packet-finding loop traverses backwards.
std::reverse(packet_infos.begin(), packet_infos.end());
if (is_h264) {
// Warn if this is an unsafe frame.
if (has_h264_idr && (!has_h264_sps || !has_h264_pps)) {
@ -406,7 +417,8 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
found_frames.emplace_back(
new RtpFrameObject(this, start_seq_num, seq_num, frame_size,
max_nack_count, min_recv_time, max_recv_time));
max_nack_count, min_recv_time, max_recv_time,
RtpPacketInfos(std::move(packet_infos))));
}
++seq_num;
}

View file

@ -92,7 +92,7 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
ref_packet_buffer_->InsertPacket(&packet);
std::unique_ptr<RtpFrameObject> frame(new RtpFrameObject(
ref_packet_buffer_, seq_num_start, seq_num_end, 0, 0, 0, 0));
ref_packet_buffer_, seq_num_start, seq_num_end, 0, 0, 0, 0, {}));
reference_finder_->ManageFrame(std::move(frame));
}
@ -126,7 +126,7 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
}
std::unique_ptr<RtpFrameObject> frame(new RtpFrameObject(
ref_packet_buffer_, seq_num_start, seq_num_end, 0, 0, 0, 0));
ref_packet_buffer_, seq_num_start, seq_num_end, 0, 0, 0, 0, {}));
reference_finder_->ManageFrame(std::move(frame));
}
@ -172,7 +172,7 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
}
std::unique_ptr<RtpFrameObject> frame(new RtpFrameObject(
ref_packet_buffer_, seq_num_start, seq_num_end, 0, 0, 0, 0));
ref_packet_buffer_, seq_num_start, seq_num_end, 0, 0, 0, 0, {}));
reference_finder_->ManageFrame(std::move(frame));
}
@ -213,7 +213,7 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
}
std::unique_ptr<RtpFrameObject> frame(new RtpFrameObject(
ref_packet_buffer_, seq_num_start, seq_num_end, 0, 0, 0, 0));
ref_packet_buffer_, seq_num_start, seq_num_end, 0, 0, 0, 0, {}));
reference_finder_->ManageFrame(std::move(frame));
}
@ -243,7 +243,7 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
}
std::unique_ptr<RtpFrameObject> frame(new RtpFrameObject(
ref_packet_buffer_, seq_num_start, seq_num_end, 0, 0, 0, 0));
ref_packet_buffer_, seq_num_start, seq_num_end, 0, 0, 0, 0, {}));
reference_finder_->ManageFrame(std::move(frame));
}

View file

@ -338,7 +338,8 @@ int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
}
// Callers don't provide any ntp time.
const VCMPacket packet(incomingPayload, payloadLength, rtp_header,
video_header, /*ntp_time_ms=*/0);
video_header, /*ntp_time_ms=*/0,
clock_->TimeInMilliseconds());
int32_t ret = _receiver.InsertPacket(packet);
// TODO(holmer): Investigate if this somehow should use the key frame

View file

@ -551,6 +551,7 @@ webrtc_fuzzer_test("rtp_frame_reference_finder_fuzzer") {
"rtp_frame_reference_finder_fuzzer.cc",
]
deps = [
"../../api:rtp_packet_info",
"../../api:scoped_refptr",
"../../modules/video_coding/",
"../../system_wrappers",

View file

@ -11,6 +11,7 @@
#include "modules/video_coding/rtp_frame_reference_finder.h"
#include "absl/memory/memory.h"
#include "api/rtp_packet_infos.h"
#include "modules/video_coding/frame_object.h"
#include "modules/video_coding/packet_buffer.h"
#include "system_wrappers/include/clock.h"
@ -124,7 +125,8 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
while (reader.MoreToRead()) {
auto frame = absl::make_unique<video_coding::RtpFrameObject>(
pb, reader.GetNum<uint16_t>(), reader.GetNum<uint16_t>(), 0, 0, 0, 0);
pb, reader.GetNum<uint16_t>(), reader.GetNum<uint16_t>(), 0, 0, 0, 0,
RtpPacketInfos());
reference_finder.ManageFrame(std::move(frame));
}
}

View file

@ -107,7 +107,7 @@ class BufferedFrameDecryptorTest
return std::unique_ptr<video_coding::RtpFrameObject>(
new video_coding::RtpFrameObject(fake_packet_buffer_.get(), seq_num_,
seq_num_, 0, 0, 0, 0));
seq_num_, 0, 0, 0, 0, {}));
}
protected:

View file

@ -97,6 +97,7 @@ void FrameEncodeMetadataWriter::OnEncodeStarted(const VideoFrame& frame) {
metadata.timestamp_us = frame.timestamp_us();
metadata.rotation = frame.rotation();
metadata.color_space = frame.color_space();
metadata.packet_infos = frame.packet_infos();
for (size_t si = 0; si < num_spatial_layers; ++si) {
RTC_DCHECK(timing_frames_info_[si].frames.empty() ||
rtc::TimeDiff(
@ -278,6 +279,7 @@ FrameEncodeMetadataWriter::ExtractEncodeStartTimeAndFillMetadata(
encoded_image->ntp_time_ms_ = metadata_list->front().ntp_time_ms;
encoded_image->rotation_ = metadata_list->front().rotation;
encoded_image->SetColorSpace(metadata_list->front().color_space);
encoded_image->SetPacketInfos(metadata_list->front().packet_infos);
metadata_list->pop_front();
} else {
++reordered_frames_logged_messages_;

View file

@ -60,6 +60,7 @@ class FrameEncodeMetadataWriter {
int64_t timestamp_us = 0;
VideoRotation rotation = kVideoRotation_0;
absl::optional<ColorSpace> color_space;
RtpPacketInfos packet_infos;
};
struct TimingFramesLayerInfo {
TimingFramesLayerInfo();

View file

@ -435,6 +435,31 @@ TEST(FrameEncodeMetadataWriterTest, CopiesColorSpace) {
EXPECT_EQ(color_space, *image.ColorSpace());
}
TEST(FrameEncodeMetadataWriterTest, CopiesPacketInfos) {
EncodedImage image;
const int64_t kTimestampMs = 123456;
FakeEncodedImageCallback sink;
FrameEncodeMetadataWriter encode_timer(&sink);
encode_timer.OnEncoderInit(VideoCodec(), false);
// Any non-zero bitrate needed to be set before the first frame.
VideoBitrateAllocation bitrate_allocation;
bitrate_allocation.SetBitrate(0, 0, 500000);
encode_timer.OnSetRates(bitrate_allocation, 30);
RtpPacketInfos packet_infos = CreatePacketInfos(3);
image.SetTimestamp(static_cast<uint32_t>(kTimestampMs * 90));
VideoFrame frame = VideoFrame::Builder()
.set_timestamp_ms(kTimestampMs)
.set_timestamp_rtp(kTimestampMs * 90)
.set_packet_infos(packet_infos)
.set_video_frame_buffer(kFrameBuffer)
.build();
encode_timer.OnEncodeStarted(frame);
encode_timer.FillTimingInfo(0, &image);
EXPECT_EQ(image.PacketInfos().size(), 3U);
}
TEST(FrameEncodeMetadataWriterTest, DoesNotRewriteBitstreamWithoutCodecInfo) {
uint8_t buffer[] = {1, 2, 3};
EncodedImage image(buffer, sizeof(buffer), sizeof(buffer));

View file

@ -327,7 +327,8 @@ int32_t RtpVideoStreamReceiver::OnReceivedPayloadData(
const absl::optional<RtpGenericFrameDescriptor>& generic_descriptor,
bool is_recovered) {
VCMPacket packet(payload_data, payload_size, rtp_header, video_header,
ntp_estimator_.Estimate(rtp_header.timestamp));
ntp_estimator_.Estimate(rtp_header.timestamp),
clock_->TimeInMilliseconds());
packet.generic_descriptor = generic_descriptor;
if (loss_notification_controller_) {
@ -350,7 +351,6 @@ int32_t RtpVideoStreamReceiver::OnReceivedPayloadData(
} else {
packet.timesNacked = -1;
}
packet.receive_time_ms = clock_->TimeInMilliseconds();
if (packet.sizeBytes == 0) {
NotifyReceiverOfEmptyPacket(packet.seqNum);

View file

@ -36,6 +36,7 @@
using ::testing::_;
using ::testing::Invoke;
using ::testing::SizeIs;
using ::testing::Values;
namespace webrtc {
@ -721,6 +722,7 @@ TEST_P(RtpVideoStreamReceiverGenericDescriptorTest,
EXPECT_EQ(frame->references[0], frame->id.picture_id - 90);
EXPECT_EQ(frame->references[1], frame->id.picture_id - 80);
EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex);
EXPECT_THAT(frame->PacketInfos(), SizeIs(1));
}));
rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
@ -785,6 +787,7 @@ TEST_P(RtpVideoStreamReceiverGenericDescriptorTest,
EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex);
EXPECT_EQ(frame->EncodedImage()._encodedWidth, 480u);
EXPECT_EQ(frame->EncodedImage()._encodedHeight, 360u);
EXPECT_THAT(frame->PacketInfos(), SizeIs(2));
}));
rtp_video_stream_receiver_->OnRtpPacket(second_packet);

View file

@ -309,4 +309,18 @@ TEST_F(VideoReceiveStreamTestWithFakeDecoder, PassesColorSpace) {
EXPECT_EQ(color_space, *fake_renderer_.color_space());
}
TEST_F(VideoReceiveStreamTestWithFakeDecoder, PassesPacketInfos) {
auto test_frame = absl::make_unique<FrameObjectFake>();
test_frame->SetPayloadType(99);
test_frame->id.picture_id = 0;
RtpPacketInfos packet_infos = CreatePacketInfos(3);
test_frame->SetPacketInfos(packet_infos);
video_receive_stream_->Start();
video_receive_stream_->OnCompleteFrame(std::move(test_frame));
EXPECT_TRUE(fake_renderer_.WaitForRenderedFrame(kDefaultTimeOutMs));
EXPECT_EQ(fake_renderer_.packet_infos().size(), 3U);
}
} // namespace webrtc