Refactor to free up PacketBuffer as soon as possible

The packets belonging to a frame were kept in PacketBuffer
until the frame was decoded. This CL clears the dependencies
of an existing RtpFrameObject to PacketBuffer so that we can
free up PacketBuffer as soon as the RtpFrameObject is created.

Bug: none
Change-Id: Ic939be91815519ae1d1c67ada82006417b2d26a3
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/149818
Reviewed-by: Erik Språng <sprang@webrtc.org>
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Commit-Queue: Johannes Kron <kron@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#28977}
This commit is contained in:
Johannes Kron 2019-08-26 16:37:11 +02:00 committed by Commit Bot
parent caef51e25a
commit a370556270
9 changed files with 80 additions and 107 deletions

View file

@ -32,16 +32,17 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
int64_t first_packet_received_time,
int64_t last_packet_received_time,
RtpPacketInfos packet_infos)
: packet_buffer_(packet_buffer),
first_seq_num_(first_seq_num),
: first_seq_num_(first_seq_num),
last_seq_num_(last_seq_num),
last_packet_received_time_(last_packet_received_time),
times_nacked_(times_nacked) {
VCMPacket* first_packet = packet_buffer_->GetPacket(first_seq_num);
VCMPacket* first_packet = packet_buffer->GetPacket(first_seq_num);
RTC_CHECK(first_packet);
rtp_video_header_ = first_packet->video_header;
rtp_generic_frame_descriptor_ = first_packet->generic_descriptor;
// EncodedFrame members
frame_type_ = first_packet->video_header.frame_type;
codec_type_ = first_packet->codec();
// TODO(philipel): Remove when encoded image is replaced by EncodedFrame.
@ -59,7 +60,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
// TODO(nisse): Change GetBitstream to return the buffer?
SetEncodedData(EncodedImageBuffer::Create(frame_size));
bool bitstream_copied = packet_buffer_->GetBitstream(*this, data());
bool bitstream_copied = packet_buffer->GetBitstream(*this, data());
RTC_DCHECK(bitstream_copied);
_encodedWidth = first_packet->width();
_encodedHeight = first_packet->height();
@ -68,7 +69,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
SetTimestamp(first_packet->timestamp);
SetPacketInfos(std::move(packet_infos));
VCMPacket* last_packet = packet_buffer_->GetPacket(last_seq_num);
VCMPacket* last_packet = packet_buffer->GetPacket(last_seq_num);
RTC_CHECK(last_packet);
RTC_CHECK(last_packet->is_last_packet_in_frame());
// http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
@ -111,7 +112,6 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
}
RtpFrameObject::~RtpFrameObject() {
packet_buffer_->ReturnFrame(this);
}
uint16_t RtpFrameObject::first_seq_num() const {
@ -127,7 +127,7 @@ int RtpFrameObject::times_nacked() const {
}
VideoFrameType RtpFrameObject::frame_type() const {
return frame_type_;
return rtp_video_header_.frame_type;
}
VideoCodecType RtpFrameObject::codec_type() const {
@ -146,29 +146,17 @@ bool RtpFrameObject::delayed_by_retransmission() const {
return times_nacked() > 0;
}
absl::optional<RTPVideoHeader> RtpFrameObject::GetRtpVideoHeader() const {
rtc::CritScope lock(&packet_buffer_->crit_);
VCMPacket* packet = packet_buffer_->GetPacket(first_seq_num_);
if (!packet)
return absl::nullopt;
return packet->video_header;
const RTPVideoHeader& RtpFrameObject::GetRtpVideoHeader() const {
return rtp_video_header_;
}
absl::optional<RtpGenericFrameDescriptor>
const absl::optional<RtpGenericFrameDescriptor>&
RtpFrameObject::GetGenericFrameDescriptor() const {
rtc::CritScope lock(&packet_buffer_->crit_);
VCMPacket* packet = packet_buffer_->GetPacket(first_seq_num_);
if (!packet)
return absl::nullopt;
return packet->generic_descriptor;
return rtp_generic_frame_descriptor_;
}
absl::optional<FrameMarking> RtpFrameObject::GetFrameMarking() const {
rtc::CritScope lock(&packet_buffer_->crit_);
VCMPacket* packet = packet_buffer_->GetPacket(first_seq_num_);
if (!packet)
return absl::nullopt;
return packet->video_header.frame_marking;
const FrameMarking& RtpFrameObject::GetFrameMarking() const {
return rtp_video_header_.frame_marking;
}
} // namespace video_coding

View file

@ -41,13 +41,14 @@ class RtpFrameObject : public EncodedFrame {
int64_t ReceivedTime() const override;
int64_t RenderTime() const override;
bool delayed_by_retransmission() const override;
absl::optional<RTPVideoHeader> GetRtpVideoHeader() const;
absl::optional<RtpGenericFrameDescriptor> GetGenericFrameDescriptor() const;
absl::optional<FrameMarking> GetFrameMarking() const;
const RTPVideoHeader& GetRtpVideoHeader() const;
const absl::optional<RtpGenericFrameDescriptor>& GetGenericFrameDescriptor()
const;
const FrameMarking& GetFrameMarking() const;
private:
rtc::scoped_refptr<PacketBuffer> packet_buffer_;
VideoFrameType frame_type_;
RTPVideoHeader rtp_video_header_;
absl::optional<RtpGenericFrameDescriptor> rtp_generic_frame_descriptor_;
VideoCodecType codec_type_;
uint16_t first_seq_num_;
uint16_t last_seq_num_;

View file

@ -182,6 +182,23 @@ void PacketBuffer::ClearTo(uint16_t seq_num) {
}
}
void PacketBuffer::ClearInterval(uint16_t start_seq_num,
uint16_t stop_seq_num) {
size_t iterations = ForwardDiff<uint16_t>(start_seq_num, stop_seq_num + 1);
RTC_DCHECK_LE(iterations, size_);
uint16_t seq_num = start_seq_num;
for (size_t i = 0; i < iterations; ++i) {
size_t index = seq_num % size_;
RTC_DCHECK_EQ(sequence_buffer_[index].seq_num, seq_num);
RTC_DCHECK_EQ(sequence_buffer_[index].seq_num, data_buffer_[index].seqNum);
delete[] data_buffer_[index].dataPtr;
data_buffer_[index].dataPtr = nullptr;
sequence_buffer_[index].used = false;
++seq_num;
}
}
void PacketBuffer::Clear() {
rtc::CritScope lock(&crit_);
for (size_t i = 0; i < size_; ++i) {
@ -423,33 +440,13 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
new RtpFrameObject(this, start_seq_num, seq_num, frame_size,
max_nack_count, min_recv_time, max_recv_time,
RtpPacketInfos(std::move(packet_infos))));
ClearInterval(start_seq_num, seq_num);
}
++seq_num;
}
return found_frames;
}
void PacketBuffer::ReturnFrame(RtpFrameObject* frame) {
rtc::CritScope lock(&crit_);
size_t index = frame->first_seq_num() % size_;
size_t end = (frame->last_seq_num() + 1) % size_;
uint16_t seq_num = frame->first_seq_num();
uint32_t timestamp = frame->Timestamp();
while (index != end) {
// Check both seq_num and timestamp to handle the case when seq_num wraps
// around too quickly for high packet rates.
if (sequence_buffer_[index].seq_num == seq_num &&
data_buffer_[index].timestamp == timestamp) {
delete[] data_buffer_[index].dataPtr;
data_buffer_[index].dataPtr = nullptr;
sequence_buffer_[index].used = false;
}
index = (index + 1) % size_;
++seq_num;
}
}
bool PacketBuffer::GetBitstream(const RtpFrameObject& frame,
uint8_t* destination) {
rtc::CritScope lock(&crit_);

View file

@ -123,9 +123,10 @@ class PacketBuffer {
virtual VCMPacket* GetPacket(uint16_t seq_num)
RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
// Mark all slots used by |frame| as not used.
// Virtual for testing.
virtual void ReturnFrame(RtpFrameObject* frame);
// Clears the packet buffer from |start_seq_num| to |stop_seq_num| where the
// endpoints are inclusive.
void ClearInterval(uint16_t start_seq_num, uint16_t stop_seq_num)
RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
void UpdateMissingPackets(uint16_t seq_num)
RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);

View file

@ -99,10 +99,10 @@ RtpFrameReferenceFinder::ManageFrameInternal(RtpFrameObject* frame) {
return ManageFrameH264(frame);
default: {
// Use 15 first bits of frame ID as picture ID if available.
absl::optional<RTPVideoHeader> video_header = frame->GetRtpVideoHeader();
const RTPVideoHeader& video_header = frame->GetRtpVideoHeader();
int picture_id = kNoPictureId;
if (video_header && video_header->generic)
picture_id = video_header->generic->frame_id & 0x7fff;
if (video_header.generic)
picture_id = video_header.generic->frame_id & 0x7fff;
return ManageFramePidOrSeqNum(frame, picture_id);
}
@ -265,13 +265,8 @@ RtpFrameReferenceFinder::ManageFramePidOrSeqNum(RtpFrameObject* frame,
RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp8(
RtpFrameObject* frame) {
absl::optional<RTPVideoHeader> video_header = frame->GetRtpVideoHeader();
if (!video_header) {
RTC_LOG(LS_WARNING)
<< "Failed to get codec header from frame, dropping frame.";
return kDrop;
}
RTPVideoTypeHeader rtp_codec_header = video_header->video_type_header;
const RTPVideoHeader& video_header = frame->GetRtpVideoHeader();
RTPVideoTypeHeader rtp_codec_header = video_header.video_type_header;
const RTPVideoHeaderVP8& codec_header =
absl::get<RTPVideoHeaderVP8>(rtp_codec_header);
@ -415,13 +410,8 @@ void RtpFrameReferenceFinder::UpdateLayerInfoVp8(RtpFrameObject* frame,
RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp9(
RtpFrameObject* frame) {
absl::optional<RTPVideoHeader> video_header = frame->GetRtpVideoHeader();
if (!video_header) {
RTC_LOG(LS_WARNING)
<< "Failed to get codec header from frame, dropping frame.";
return kDrop;
}
RTPVideoTypeHeader rtp_codec_header = video_header->video_type_header;
const RTPVideoHeader& video_header = frame->GetRtpVideoHeader();
RTPVideoTypeHeader rtp_codec_header = video_header.video_type_header;
const RTPVideoHeaderVP9& codec_header =
absl::get<RTPVideoHeaderVP9>(rtp_codec_header);
@ -675,13 +665,10 @@ void RtpFrameReferenceFinder::UnwrapPictureIds(RtpFrameObject* frame) {
RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameH264(
RtpFrameObject* frame) {
absl::optional<FrameMarking> rtp_frame_marking = frame->GetFrameMarking();
if (!rtp_frame_marking) {
return ManageFramePidOrSeqNum(std::move(frame), kNoPictureId);
}
const FrameMarking& rtp_frame_marking = frame->GetFrameMarking();
uint8_t tid = rtp_frame_marking->temporal_id;
bool blSync = rtp_frame_marking->base_layer_sync;
uint8_t tid = rtp_frame_marking.temporal_id;
bool blSync = rtp_frame_marking.base_layer_sync;
if (tid == kNoTemporalIdx)
return ManageFramePidOrSeqNum(std::move(frame), kNoPictureId);
@ -712,7 +699,7 @@ RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameH264(
}
}
int64_t unwrapped_tl0 = tl0_unwrapper_.Unwrap(rtp_frame_marking->tl0_pic_idx);
int64_t unwrapped_tl0 = tl0_unwrapper_.Unwrap(rtp_frame_marking.tl0_pic_idx);
// Clean up info for base layers that are too old.
int64_t old_tl0_pic_idx = unwrapped_tl0 - kMaxLayerInfo;

View file

@ -43,10 +43,6 @@ class FakePacketBuffer : public PacketBuffer {
return true;
}
void ReturnFrame(RtpFrameObject* frame) override {
packets_.erase(frame->first_seq_num());
}
private:
std::map<uint16_t, VCMPacket> packets_;
};

View file

@ -117,8 +117,9 @@ TEST_F(TestPacketBuffer, InsertMultiplePackets) {
TEST_F(TestPacketBuffer, InsertDuplicatePacket) {
const uint16_t seq_num = Rand();
EXPECT_TRUE(Insert(seq_num, kKeyFrame, kFirst, kLast));
EXPECT_TRUE(Insert(seq_num, kKeyFrame, kFirst, kLast));
EXPECT_TRUE(Insert(seq_num, kKeyFrame, kFirst, kNotLast));
EXPECT_TRUE(Insert(seq_num, kKeyFrame, kFirst, kNotLast));
EXPECT_TRUE(Insert(seq_num + 1, kKeyFrame, kNotFirst, kLast));
}
TEST_F(TestPacketBuffer, SeqNumWrapOneFrame) {
@ -266,9 +267,13 @@ TEST_F(TestPacketBuffer, HasHistoryOfUniqueFrames) {
TEST_F(TestPacketBuffer, ExpandBuffer) {
const uint16_t seq_num = Rand();
for (int i = 0; i < kStartSize + 1; ++i) {
EXPECT_TRUE(Insert(seq_num + i, kKeyFrame, kFirst, kLast));
}
EXPECT_TRUE(Insert(seq_num, kKeyFrame, kFirst, kNotLast));
for (int i = 1; i < kStartSize; ++i)
EXPECT_TRUE(Insert(seq_num + i, kKeyFrame, kNotFirst, kNotLast));
// Already inserted kStartSize number of packets, inserting the last packet
// should increase the buffer size and also result in an assembled frame.
EXPECT_TRUE(Insert(seq_num + kStartSize, kKeyFrame, kNotFirst, kLast));
}
TEST_F(TestPacketBuffer, SingleFrameExpandsBuffer) {
@ -286,9 +291,13 @@ TEST_F(TestPacketBuffer, SingleFrameExpandsBuffer) {
TEST_F(TestPacketBuffer, ExpandBufferOverflow) {
const uint16_t seq_num = Rand();
for (int i = 0; i < kMaxSize; ++i)
EXPECT_TRUE(Insert(seq_num + i, kKeyFrame, kFirst, kLast));
EXPECT_FALSE(Insert(seq_num + kMaxSize + 1, kKeyFrame, kFirst, kLast));
EXPECT_TRUE(Insert(seq_num, kKeyFrame, kFirst, kNotLast));
for (int i = 1; i < kMaxSize; ++i)
EXPECT_TRUE(Insert(seq_num + i, kKeyFrame, kNotFirst, kNotLast));
// Already inserted kMaxSize number of packets, inserting the last packet
// should overflow the buffer and result in false being returned.
EXPECT_FALSE(Insert(seq_num + kMaxSize, kKeyFrame, kNotFirst, kLast));
}
TEST_F(TestPacketBuffer, OnePacketOneFrame) {
@ -467,8 +476,8 @@ TEST_F(TestPacketBuffer, GetBitstreamOneFrameOnePacket) {
ASSERT_EQ(1UL, frames_from_callback_.size());
CheckFrame(0);
EXPECT_EQ(frames_from_callback_[0]->size(), sizeof(bitstream_data));
EXPECT_EQ(
memcmp(frames_from_callback_[0]->data(), data, sizeof(bitstream_data)),
EXPECT_EQ(memcmp(frames_from_callback_[0]->data(), bitstream_data,
sizeof(bitstream_data)),
0);
}
@ -647,12 +656,12 @@ TEST_P(TestPacketBufferH264Parameterized, GetBitstreamBufferPadding) {
sizeof(data_data));
EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage().capacity(),
sizeof(data_data));
EXPECT_EQ(
memcmp(frames_from_callback_[seq_num]->data(), data, sizeof(data_data)),
EXPECT_EQ(memcmp(frames_from_callback_[seq_num]->data(), data_data,
sizeof(data_data)),
0);
}
TEST_F(TestPacketBuffer, FreeSlotsOnFrameDestruction) {
TEST_F(TestPacketBuffer, FreeSlotsOnFrameCreation) {
const uint16_t seq_num = Rand();
EXPECT_TRUE(Insert(seq_num, kKeyFrame, kFirst, kNotLast));
@ -661,15 +670,15 @@ TEST_F(TestPacketBuffer, FreeSlotsOnFrameDestruction) {
EXPECT_EQ(1UL, frames_from_callback_.size());
CheckFrame(seq_num);
frames_from_callback_.clear();
// Insert frame that fills the whole buffer.
EXPECT_TRUE(Insert(seq_num + 3, kKeyFrame, kFirst, kNotLast));
for (int i = 0; i < kMaxSize - 2; ++i)
EXPECT_TRUE(Insert(seq_num + i + 4, kDeltaFrame, kNotFirst, kNotLast));
EXPECT_TRUE(Insert(seq_num + kMaxSize + 2, kKeyFrame, kNotFirst, kLast));
EXPECT_EQ(1UL, frames_from_callback_.size());
EXPECT_EQ(2UL, frames_from_callback_.size());
CheckFrame(seq_num + 3);
frames_from_callback_.clear();
}
TEST_F(TestPacketBuffer, Clear) {

View file

@ -104,8 +104,6 @@ class FuzzyPacketBuffer : public video_coding::PacketBuffer {
return true;
}
void ReturnFrame(video_coding::RtpFrameObject* frame) override {}
private:
std::map<uint16_t, VCMPacket> packets;
VideoCodecType codec;

View file

@ -47,10 +47,6 @@ class FakePacketBuffer : public video_coding::PacketBuffer {
return true;
}
void ReturnFrame(video_coding::RtpFrameObject* frame) override {
packets_.erase(frame->first_seq_num());
}
private:
std::map<uint16_t, VCMPacket> packets_;
};