mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-13 13:50:40 +01:00
Add Timestamp accessor methods to the EncodedImage class.
Bug: webrtc:9378 Change-Id: I59bf14f631f92f0f4e05f60d4af25641a23a53f9 Reviewed-on: https://webrtc-review.googlesource.com/82100 Reviewed-by: Stefan Holmer <stefan@webrtc.org> Reviewed-by: Philip Eliasson <philipel@webrtc.org> Reviewed-by: Rasmus Brandt <brandtr@webrtc.org> Commit-Queue: Niels Moller <nisse@webrtc.org> Cr-Commit-Position: refs/heads/master@{#23734}
This commit is contained in:
parent
f7789c6e89
commit
f34d467b03
15 changed files with 70 additions and 82 deletions
|
@ -17,13 +17,5 @@ bool EncodedFrame::delayed_by_retransmission() const {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t EncodedFrame::Timestamp() const {
|
|
||||||
return timestamp;
|
|
||||||
}
|
|
||||||
|
|
||||||
void EncodedFrame::SetTimestamp(uint32_t rtp_timestamp) {
|
|
||||||
timestamp = rtp_timestamp;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace video_coding
|
} // namespace video_coding
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
|
|
@ -58,10 +58,6 @@ class EncodedFrame : public webrtc::VCMEncodedFrame {
|
||||||
|
|
||||||
virtual bool GetBitstream(uint8_t* destination) const = 0;
|
virtual bool GetBitstream(uint8_t* destination) const = 0;
|
||||||
|
|
||||||
// The capture timestamp of this frame, using the 90 kHz RTP clock.
|
|
||||||
virtual uint32_t Timestamp() const;
|
|
||||||
virtual void SetTimestamp(uint32_t rtp_timestamp);
|
|
||||||
|
|
||||||
// When this frame was received.
|
// When this frame was received.
|
||||||
virtual int64_t ReceivedTime() const = 0;
|
virtual int64_t ReceivedTime() const = 0;
|
||||||
|
|
||||||
|
@ -78,7 +74,6 @@ class EncodedFrame : public webrtc::VCMEncodedFrame {
|
||||||
bool is_keyframe() const { return num_references == 0; }
|
bool is_keyframe() const { return num_references == 0; }
|
||||||
|
|
||||||
VideoLayerFrameId id;
|
VideoLayerFrameId id;
|
||||||
uint32_t timestamp = 0;
|
|
||||||
|
|
||||||
// TODO(philipel): Add simple modify/access functions to prevent adding too
|
// TODO(philipel): Add simple modify/access functions to prevent adding too
|
||||||
// many |references|.
|
// many |references|.
|
||||||
|
|
|
@ -37,6 +37,14 @@ class EncodedImage {
|
||||||
EncodedImage(const EncodedImage&);
|
EncodedImage(const EncodedImage&);
|
||||||
EncodedImage(uint8_t* buffer, size_t length, size_t size);
|
EncodedImage(uint8_t* buffer, size_t length, size_t size);
|
||||||
|
|
||||||
|
// TODO(nisse): Change style to timestamp(), set_timestamp(), for consistency
|
||||||
|
// with the VideoFrame class.
|
||||||
|
// Set frame timestamp (90kHz).
|
||||||
|
void SetTimestamp(uint32_t timestamp) { _timeStamp = timestamp; }
|
||||||
|
|
||||||
|
// Get frame timestamp (90kHz).
|
||||||
|
uint32_t Timestamp() const { return _timeStamp; }
|
||||||
|
|
||||||
void SetEncodeTime(int64_t encode_start_ms, int64_t encode_finish_ms);
|
void SetEncodeTime(int64_t encode_start_ms, int64_t encode_finish_ms);
|
||||||
|
|
||||||
uint32_t _encodedWidth = 0;
|
uint32_t _encodedWidth = 0;
|
||||||
|
|
|
@ -58,7 +58,7 @@ bool VCMDecodingState::IsOldFrame(const VCMFrameBuffer* frame) const {
|
||||||
assert(frame != NULL);
|
assert(frame != NULL);
|
||||||
if (in_initial_state_)
|
if (in_initial_state_)
|
||||||
return false;
|
return false;
|
||||||
return !IsNewerTimestamp(frame->TimeStamp(), time_stamp_);
|
return !IsNewerTimestamp(frame->Timestamp(), time_stamp_);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
|
bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
|
||||||
|
@ -73,7 +73,7 @@ void VCMDecodingState::SetState(const VCMFrameBuffer* frame) {
|
||||||
if (!UsingFlexibleMode(frame))
|
if (!UsingFlexibleMode(frame))
|
||||||
UpdateSyncState(frame);
|
UpdateSyncState(frame);
|
||||||
sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
|
sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
|
||||||
time_stamp_ = frame->TimeStamp();
|
time_stamp_ = frame->Timestamp();
|
||||||
picture_id_ = frame->PictureId();
|
picture_id_ = frame->PictureId();
|
||||||
temporal_id_ = frame->TemporalId();
|
temporal_id_ = frame->TemporalId();
|
||||||
tl0_pic_id_ = frame->Tl0PicId();
|
tl0_pic_id_ = frame->Tl0PicId();
|
||||||
|
@ -143,7 +143,7 @@ bool VCMDecodingState::UpdateEmptyFrame(const VCMFrameBuffer* frame) {
|
||||||
// Continuous empty packets or continuous frames can be dropped if we
|
// Continuous empty packets or continuous frames can be dropped if we
|
||||||
// advance the sequence number.
|
// advance the sequence number.
|
||||||
sequence_num_ = frame->GetHighSeqNum();
|
sequence_num_ = frame->GetHighSeqNum();
|
||||||
time_stamp_ = frame->TimeStamp();
|
time_stamp_ = frame->Timestamp();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -65,10 +65,12 @@ class VCMEncodedFrame : protected EncodedImage {
|
||||||
* Get frame length
|
* Get frame length
|
||||||
*/
|
*/
|
||||||
size_t Length() const { return _length; }
|
size_t Length() const { return _length; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get frame timestamp (90kHz)
|
* Frame RTP timestamp (90kHz)
|
||||||
*/
|
*/
|
||||||
uint32_t TimeStamp() const { return _timeStamp; }
|
using EncodedImage::Timestamp;
|
||||||
|
using EncodedImage::SetTimestamp;
|
||||||
/**
|
/**
|
||||||
* Get render time in milliseconds
|
* Get render time in milliseconds
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -117,7 +117,8 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
|
||||||
|
|
||||||
next_frame_it_ = frame_it;
|
next_frame_it_ = frame_it;
|
||||||
if (frame->RenderTime() == -1)
|
if (frame->RenderTime() == -1)
|
||||||
frame->SetRenderTime(timing_->RenderTimeMs(frame->timestamp, now_ms));
|
frame->SetRenderTime(
|
||||||
|
timing_->RenderTimeMs(frame->Timestamp(), now_ms));
|
||||||
wait_ms = timing_->MaxWaitingTime(frame->RenderTime(), now_ms);
|
wait_ms = timing_->MaxWaitingTime(frame->RenderTime(), now_ms);
|
||||||
|
|
||||||
// This will cause the frame buffer to prefer high framerate rather
|
// This will cause the frame buffer to prefer high framerate rather
|
||||||
|
@ -146,7 +147,7 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
|
||||||
if (!frame->delayed_by_retransmission()) {
|
if (!frame->delayed_by_retransmission()) {
|
||||||
int64_t frame_delay;
|
int64_t frame_delay;
|
||||||
|
|
||||||
if (inter_frame_delay_.CalculateDelay(frame->timestamp, &frame_delay,
|
if (inter_frame_delay_.CalculateDelay(frame->Timestamp(), &frame_delay,
|
||||||
frame->ReceivedTime())) {
|
frame->ReceivedTime())) {
|
||||||
jitter_estimator_->UpdateEstimate(frame_delay, frame->size());
|
jitter_estimator_->UpdateEstimate(frame_delay, frame->size());
|
||||||
}
|
}
|
||||||
|
@ -163,7 +164,7 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
|
||||||
if (HasBadRenderTiming(*frame, now_ms)) {
|
if (HasBadRenderTiming(*frame, now_ms)) {
|
||||||
jitter_estimator_->Reset();
|
jitter_estimator_->Reset();
|
||||||
timing_->Reset();
|
timing_->Reset();
|
||||||
frame->SetRenderTime(timing_->RenderTimeMs(frame->timestamp, now_ms));
|
frame->SetRenderTime(timing_->RenderTimeMs(frame->Timestamp(), now_ms));
|
||||||
}
|
}
|
||||||
|
|
||||||
UpdateJitterDelay();
|
UpdateJitterDelay();
|
||||||
|
@ -177,17 +178,17 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
|
||||||
const VideoLayerFrameId& frame_key = next_frame_it_->first;
|
const VideoLayerFrameId& frame_key = next_frame_it_->first;
|
||||||
|
|
||||||
const bool frame_is_higher_spatial_layer_of_last_decoded_frame =
|
const bool frame_is_higher_spatial_layer_of_last_decoded_frame =
|
||||||
last_decoded_frame_timestamp_ == frame->timestamp &&
|
last_decoded_frame_timestamp_ == frame->Timestamp() &&
|
||||||
last_decoded_frame_key.picture_id == frame_key.picture_id &&
|
last_decoded_frame_key.picture_id == frame_key.picture_id &&
|
||||||
last_decoded_frame_key.spatial_layer < frame_key.spatial_layer;
|
last_decoded_frame_key.spatial_layer < frame_key.spatial_layer;
|
||||||
|
|
||||||
if (AheadOrAt(last_decoded_frame_timestamp_, frame->timestamp) &&
|
if (AheadOrAt(last_decoded_frame_timestamp_, frame->Timestamp()) &&
|
||||||
!frame_is_higher_spatial_layer_of_last_decoded_frame) {
|
!frame_is_higher_spatial_layer_of_last_decoded_frame) {
|
||||||
// TODO(brandtr): Consider clearing the entire buffer when we hit
|
// TODO(brandtr): Consider clearing the entire buffer when we hit
|
||||||
// these conditions.
|
// these conditions.
|
||||||
RTC_LOG(LS_WARNING)
|
RTC_LOG(LS_WARNING)
|
||||||
<< "Frame with (timestamp:picture_id:spatial_id) ("
|
<< "Frame with (timestamp:picture_id:spatial_id) ("
|
||||||
<< frame->timestamp << ":" << frame->id.picture_id << ":"
|
<< frame->Timestamp() << ":" << frame->id.picture_id << ":"
|
||||||
<< static_cast<int>(frame->id.spatial_layer) << ")"
|
<< static_cast<int>(frame->id.spatial_layer) << ")"
|
||||||
<< " sent to decoder after frame with"
|
<< " sent to decoder after frame with"
|
||||||
<< " (timestamp:picture_id:spatial_id) ("
|
<< " (timestamp:picture_id:spatial_id) ("
|
||||||
|
@ -198,7 +199,7 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
|
||||||
}
|
}
|
||||||
|
|
||||||
AdvanceLastDecodedFrame(next_frame_it_);
|
AdvanceLastDecodedFrame(next_frame_it_);
|
||||||
last_decoded_frame_timestamp_ = frame->timestamp;
|
last_decoded_frame_timestamp_ = frame->Timestamp();
|
||||||
*frame_out = std::move(frame);
|
*frame_out = std::move(frame);
|
||||||
return kFrameFound;
|
return kFrameFound;
|
||||||
}
|
}
|
||||||
|
@ -297,7 +298,7 @@ void FrameBuffer::UpdatePlayoutDelays(const EncodedFrame& frame) {
|
||||||
timing_->set_max_playout_delay(playout_delay.max_ms);
|
timing_->set_max_playout_delay(playout_delay.max_ms);
|
||||||
|
|
||||||
if (!frame.delayed_by_retransmission())
|
if (!frame.delayed_by_retransmission())
|
||||||
timing_->IncomingTimestamp(frame.timestamp, frame.ReceivedTime());
|
timing_->IncomingTimestamp(frame.Timestamp(), frame.ReceivedTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
|
int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
|
||||||
|
@ -343,7 +344,7 @@ int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
|
||||||
|
|
||||||
if (last_decoded_frame_it_ != frames_.end() &&
|
if (last_decoded_frame_it_ != frames_.end() &&
|
||||||
id <= last_decoded_frame_it_->first) {
|
id <= last_decoded_frame_it_->first) {
|
||||||
if (AheadOf(frame->timestamp, last_decoded_frame_timestamp_) &&
|
if (AheadOf(frame->Timestamp(), last_decoded_frame_timestamp_) &&
|
||||||
frame->is_keyframe()) {
|
frame->is_keyframe()) {
|
||||||
// If this frame has a newer timestamp but an earlier picture id then we
|
// If this frame has a newer timestamp but an earlier picture id then we
|
||||||
// assume there has been a jump in the picture id due to some encoder
|
// assume there has been a jump in the picture id due to some encoder
|
||||||
|
|
|
@ -90,8 +90,6 @@ class FrameObjectFake : public EncodedFrame {
|
||||||
public:
|
public:
|
||||||
bool GetBitstream(uint8_t* destination) const override { return true; }
|
bool GetBitstream(uint8_t* destination) const override { return true; }
|
||||||
|
|
||||||
uint32_t Timestamp() const override { return timestamp; }
|
|
||||||
|
|
||||||
int64_t ReceivedTime() const override { return 0; }
|
int64_t ReceivedTime() const override { return 0; }
|
||||||
|
|
||||||
int64_t RenderTime() const override { return _renderTimeMs; }
|
int64_t RenderTime() const override { return _renderTimeMs; }
|
||||||
|
@ -165,7 +163,7 @@ class TestFrameBuffer2 : public ::testing::Test {
|
||||||
std::unique_ptr<FrameObjectFake> frame(new FrameObjectFake());
|
std::unique_ptr<FrameObjectFake> frame(new FrameObjectFake());
|
||||||
frame->id.picture_id = picture_id;
|
frame->id.picture_id = picture_id;
|
||||||
frame->id.spatial_layer = spatial_layer;
|
frame->id.spatial_layer = spatial_layer;
|
||||||
frame->timestamp = ts_ms * 90;
|
frame->SetTimestamp(ts_ms * 90);
|
||||||
frame->num_references = references.size();
|
frame->num_references = references.size();
|
||||||
frame->inter_layer_predicted = inter_layer_predicted;
|
frame->inter_layer_predicted = inter_layer_predicted;
|
||||||
for (size_t r = 0; r < references.size(); ++r)
|
for (size_t r = 0; r < references.size(); ++r)
|
||||||
|
@ -520,7 +518,7 @@ TEST_F(TestFrameBuffer2, StatsCallback) {
|
||||||
frame->SetSize(kFrameSize);
|
frame->SetSize(kFrameSize);
|
||||||
frame->id.picture_id = pid;
|
frame->id.picture_id = pid;
|
||||||
frame->id.spatial_layer = 0;
|
frame->id.spatial_layer = 0;
|
||||||
frame->timestamp = ts;
|
frame->SetTimestamp(ts);
|
||||||
frame->num_references = 0;
|
frame->num_references = 0;
|
||||||
frame->inter_layer_predicted = false;
|
frame->inter_layer_predicted = false;
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,6 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
|
||||||
: packet_buffer_(packet_buffer),
|
: packet_buffer_(packet_buffer),
|
||||||
first_seq_num_(first_seq_num),
|
first_seq_num_(first_seq_num),
|
||||||
last_seq_num_(last_seq_num),
|
last_seq_num_(last_seq_num),
|
||||||
timestamp_(0),
|
|
||||||
received_time_(received_time),
|
received_time_(received_time),
|
||||||
times_nacked_(times_nacked) {
|
times_nacked_(times_nacked) {
|
||||||
VCMPacket* first_packet = packet_buffer_->GetPacket(first_seq_num);
|
VCMPacket* first_packet = packet_buffer_->GetPacket(first_seq_num);
|
||||||
|
@ -69,7 +68,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
|
||||||
_encodedHeight = first_packet->height;
|
_encodedHeight = first_packet->height;
|
||||||
|
|
||||||
// EncodedFrame members
|
// EncodedFrame members
|
||||||
timestamp = first_packet->timestamp;
|
SetTimestamp(first_packet->timestamp);
|
||||||
|
|
||||||
VCMPacket* last_packet = packet_buffer_->GetPacket(last_seq_num);
|
VCMPacket* last_packet = packet_buffer_->GetPacket(last_seq_num);
|
||||||
RTC_CHECK(last_packet);
|
RTC_CHECK(last_packet);
|
||||||
|
@ -140,10 +139,6 @@ bool RtpFrameObject::GetBitstream(uint8_t* destination) const {
|
||||||
return packet_buffer_->GetBitstream(*this, destination);
|
return packet_buffer_->GetBitstream(*this, destination);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t RtpFrameObject::Timestamp() const {
|
|
||||||
return timestamp_;
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t RtpFrameObject::ReceivedTime() const {
|
int64_t RtpFrameObject::ReceivedTime() const {
|
||||||
return received_time_;
|
return received_time_;
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,6 @@ class RtpFrameObject : public EncodedFrame {
|
||||||
enum FrameType frame_type() const;
|
enum FrameType frame_type() const;
|
||||||
VideoCodecType codec_type() const;
|
VideoCodecType codec_type() const;
|
||||||
bool GetBitstream(uint8_t* destination) const override;
|
bool GetBitstream(uint8_t* destination) const override;
|
||||||
uint32_t Timestamp() const override;
|
|
||||||
int64_t ReceivedTime() const override;
|
int64_t ReceivedTime() const override;
|
||||||
int64_t RenderTime() const override;
|
int64_t RenderTime() const override;
|
||||||
bool delayed_by_retransmission() const override;
|
bool delayed_by_retransmission() const override;
|
||||||
|
@ -49,7 +48,6 @@ class RtpFrameObject : public EncodedFrame {
|
||||||
VideoCodecType codec_type_;
|
VideoCodecType codec_type_;
|
||||||
uint16_t first_seq_num_;
|
uint16_t first_seq_num_;
|
||||||
uint16_t last_seq_num_;
|
uint16_t last_seq_num_;
|
||||||
uint32_t timestamp_;
|
|
||||||
int64_t received_time_;
|
int64_t received_time_;
|
||||||
|
|
||||||
// Equal to times nacked of the packet with the highet times nacked
|
// Equal to times nacked of the packet with the highet times nacked
|
||||||
|
|
|
@ -225,7 +225,7 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
|
||||||
} else {
|
} else {
|
||||||
_frameInfos[_nextFrameInfoIdx].content_type = _last_keyframe_content_type;
|
_frameInfos[_nextFrameInfoIdx].content_type = _last_keyframe_content_type;
|
||||||
}
|
}
|
||||||
_callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
|
_callback->Map(frame.Timestamp(), &_frameInfos[_nextFrameInfoIdx]);
|
||||||
|
|
||||||
_nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
|
_nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
|
||||||
int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
|
int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
|
||||||
|
@ -234,13 +234,13 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
|
||||||
_callback->OnDecoderImplementationName(decoder_->ImplementationName());
|
_callback->OnDecoderImplementationName(decoder_->ImplementationName());
|
||||||
if (ret < WEBRTC_VIDEO_CODEC_OK) {
|
if (ret < WEBRTC_VIDEO_CODEC_OK) {
|
||||||
RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
|
RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
|
||||||
<< frame.TimeStamp() << ", error code: " << ret;
|
<< frame.Timestamp() << ", error code: " << ret;
|
||||||
_callback->Pop(frame.TimeStamp());
|
_callback->Pop(frame.Timestamp());
|
||||||
return ret;
|
return ret;
|
||||||
} else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
|
} else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
|
||||||
ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI) {
|
ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI) {
|
||||||
// No output
|
// No output
|
||||||
_callback->Pop(frame.TimeStamp());
|
_callback->Pop(frame.Timestamp());
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,7 +54,7 @@ bool HasNonEmptyState(FrameListPair pair) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void FrameList::InsertFrame(VCMFrameBuffer* frame) {
|
void FrameList::InsertFrame(VCMFrameBuffer* frame) {
|
||||||
insert(rbegin().base(), FrameListPair(frame->TimeStamp(), frame));
|
insert(rbegin().base(), FrameListPair(frame->Timestamp(), frame));
|
||||||
}
|
}
|
||||||
|
|
||||||
VCMFrameBuffer* FrameList::PopFrame(uint32_t timestamp) {
|
VCMFrameBuffer* FrameList::PopFrame(uint32_t timestamp) {
|
||||||
|
@ -110,7 +110,7 @@ void FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
|
||||||
}
|
}
|
||||||
free_frames->push_back(oldest_frame);
|
free_frames->push_back(oldest_frame);
|
||||||
TRACE_EVENT_INSTANT1("webrtc", "JB::OldOrEmptyFrameDropped", "timestamp",
|
TRACE_EVENT_INSTANT1("webrtc", "JB::OldOrEmptyFrameDropped", "timestamp",
|
||||||
oldest_frame->TimeStamp());
|
oldest_frame->Timestamp());
|
||||||
erase(begin());
|
erase(begin());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -206,7 +206,7 @@ void Vp9SsMap::UpdateFrames(FrameList* frames) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
SsMap::iterator ss_it;
|
SsMap::iterator ss_it;
|
||||||
if (Find(frame_it.second->TimeStamp(), &ss_it)) {
|
if (Find(frame_it.second->Timestamp(), &ss_it)) {
|
||||||
if (gof_idx >= ss_it->second.num_frames_in_gof) {
|
if (gof_idx >= ss_it->second.num_frames_in_gof) {
|
||||||
continue; // Assume corresponding SS not yet received.
|
continue; // Assume corresponding SS not yet received.
|
||||||
}
|
}
|
||||||
|
@ -522,7 +522,7 @@ bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*timestamp = oldest_frame->TimeStamp();
|
*timestamp = oldest_frame->Timestamp();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -558,7 +558,7 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
|
||||||
// Wait for this one to get complete.
|
// Wait for this one to get complete.
|
||||||
waiting_for_completion_.frame_size = frame->Length();
|
waiting_for_completion_.frame_size = frame->Length();
|
||||||
waiting_for_completion_.latest_packet_time = frame->LatestPacketTimeMs();
|
waiting_for_completion_.latest_packet_time = frame->LatestPacketTimeMs();
|
||||||
waiting_for_completion_.timestamp = frame->TimeStamp();
|
waiting_for_completion_.timestamp = frame->Timestamp();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -709,8 +709,8 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
|
||||||
frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);
|
frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);
|
||||||
|
|
||||||
if (previous_state != kStateComplete) {
|
if (previous_state != kStateComplete) {
|
||||||
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(), "timestamp",
|
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->Timestamp(), "timestamp",
|
||||||
frame->TimeStamp());
|
frame->Timestamp());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (buffer_state > 0) {
|
if (buffer_state > 0) {
|
||||||
|
@ -825,7 +825,7 @@ bool VCMJitterBuffer::IsContinuous(const VCMFrameBuffer& frame) const {
|
||||||
for (FrameList::const_iterator it = decodable_frames_.begin();
|
for (FrameList::const_iterator it = decodable_frames_.begin();
|
||||||
it != decodable_frames_.end(); ++it) {
|
it != decodable_frames_.end(); ++it) {
|
||||||
VCMFrameBuffer* decodable_frame = it->second;
|
VCMFrameBuffer* decodable_frame = it->second;
|
||||||
if (IsNewerTimestamp(decodable_frame->TimeStamp(), frame.TimeStamp())) {
|
if (IsNewerTimestamp(decodable_frame->Timestamp(), frame.Timestamp())) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
decoding_state.SetState(decodable_frame);
|
decoding_state.SetState(decodable_frame);
|
||||||
|
@ -859,7 +859,7 @@ void VCMJitterBuffer::FindAndInsertContinuousFramesWithState(
|
||||||
it != incomplete_frames_.end();) {
|
it != incomplete_frames_.end();) {
|
||||||
VCMFrameBuffer* frame = it->second;
|
VCMFrameBuffer* frame = it->second;
|
||||||
if (IsNewerTimestamp(original_decoded_state.time_stamp(),
|
if (IsNewerTimestamp(original_decoded_state.time_stamp(),
|
||||||
frame->TimeStamp())) {
|
frame->Timestamp())) {
|
||||||
++it;
|
++it;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -941,11 +941,11 @@ int VCMJitterBuffer::NonContinuousOrIncompleteDuration() {
|
||||||
if (incomplete_frames_.empty()) {
|
if (incomplete_frames_.empty()) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
uint32_t start_timestamp = incomplete_frames_.Front()->TimeStamp();
|
uint32_t start_timestamp = incomplete_frames_.Front()->Timestamp();
|
||||||
if (!decodable_frames_.empty()) {
|
if (!decodable_frames_.empty()) {
|
||||||
start_timestamp = decodable_frames_.Back()->TimeStamp();
|
start_timestamp = decodable_frames_.Back()->Timestamp();
|
||||||
}
|
}
|
||||||
return incomplete_frames_.Back()->TimeStamp() - start_timestamp;
|
return incomplete_frames_.Back()->Timestamp() - start_timestamp;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber(
|
uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber(
|
||||||
|
@ -1178,10 +1178,10 @@ void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
|
||||||
incoming_frame_count_++;
|
incoming_frame_count_++;
|
||||||
|
|
||||||
if (frame.FrameType() == kVideoFrameKey) {
|
if (frame.FrameType() == kVideoFrameKey) {
|
||||||
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
|
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.Timestamp(),
|
||||||
"KeyComplete");
|
"KeyComplete");
|
||||||
} else {
|
} else {
|
||||||
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
|
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.Timestamp(),
|
||||||
"DeltaComplete");
|
"DeltaComplete");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1257,7 +1257,7 @@ void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame,
|
||||||
}
|
}
|
||||||
// No retransmitted frames should be a part of the jitter
|
// No retransmitted frames should be a part of the jitter
|
||||||
// estimate.
|
// estimate.
|
||||||
UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.TimeStamp(),
|
UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.Timestamp(),
|
||||||
frame.Length(), incomplete_frame);
|
frame.Length(), incomplete_frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -250,7 +250,7 @@ class TestBasicJitterBuffer : public ::testing::TestWithParam<std::string>,
|
||||||
VCMEncodedFrame* found_frame = jitter_buffer_->NextCompleteFrame(10);
|
VCMEncodedFrame* found_frame = jitter_buffer_->NextCompleteFrame(10);
|
||||||
if (!found_frame)
|
if (!found_frame)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
return jitter_buffer_->ExtractAndSetDecode(found_frame->TimeStamp());
|
return jitter_buffer_->ExtractAndSetDecode(found_frame->Timestamp());
|
||||||
}
|
}
|
||||||
|
|
||||||
VCMEncodedFrame* DecodeIncompleteFrame() {
|
VCMEncodedFrame* DecodeIncompleteFrame() {
|
||||||
|
@ -405,7 +405,7 @@ class TestRunningJitterBuffer : public ::testing::TestWithParam<std::string>,
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
VCMEncodedFrame* frame =
|
VCMEncodedFrame* frame =
|
||||||
jitter_buffer_->ExtractAndSetDecode(found_frame->TimeStamp());
|
jitter_buffer_->ExtractAndSetDecode(found_frame->Timestamp());
|
||||||
bool ret = (frame != NULL);
|
bool ret = (frame != NULL);
|
||||||
jitter_buffer_->ReleaseFrame(frame);
|
jitter_buffer_->ReleaseFrame(frame);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -952,12 +952,12 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
|
||||||
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
|
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
|
||||||
|
|
||||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(1000U, frame_out->TimeStamp());
|
EXPECT_EQ(1000U, frame_out->Timestamp());
|
||||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||||
jitter_buffer_->ReleaseFrame(frame_out);
|
jitter_buffer_->ReleaseFrame(frame_out);
|
||||||
|
|
||||||
frame_out = DecodeCompleteFrame();
|
frame_out = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(13000U, frame_out->TimeStamp());
|
EXPECT_EQ(13000U, frame_out->Timestamp());
|
||||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||||
jitter_buffer_->ReleaseFrame(frame_out);
|
jitter_buffer_->ReleaseFrame(frame_out);
|
||||||
}
|
}
|
||||||
|
@ -1014,7 +1014,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
|
||||||
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
|
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
|
||||||
|
|
||||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(3000U, frame_out->TimeStamp());
|
EXPECT_EQ(3000U, frame_out->Timestamp());
|
||||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||||
EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
||||||
EXPECT_FALSE(
|
EXPECT_FALSE(
|
||||||
|
@ -1022,14 +1022,14 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
|
||||||
jitter_buffer_->ReleaseFrame(frame_out);
|
jitter_buffer_->ReleaseFrame(frame_out);
|
||||||
|
|
||||||
frame_out = DecodeCompleteFrame();
|
frame_out = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(6000U, frame_out->TimeStamp());
|
EXPECT_EQ(6000U, frame_out->Timestamp());
|
||||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||||
EXPECT_EQ(2, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
EXPECT_EQ(2, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
||||||
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
|
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
|
||||||
jitter_buffer_->ReleaseFrame(frame_out);
|
jitter_buffer_->ReleaseFrame(frame_out);
|
||||||
|
|
||||||
frame_out = DecodeCompleteFrame();
|
frame_out = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(9000U, frame_out->TimeStamp());
|
EXPECT_EQ(9000U, frame_out->Timestamp());
|
||||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||||
EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
||||||
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
|
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
|
||||||
|
@ -1105,7 +1105,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
|
||||||
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
|
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
|
||||||
|
|
||||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(3000U, frame_out->TimeStamp());
|
EXPECT_EQ(3000U, frame_out->Timestamp());
|
||||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||||
EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
||||||
EXPECT_FALSE(
|
EXPECT_FALSE(
|
||||||
|
@ -1113,7 +1113,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
|
||||||
jitter_buffer_->ReleaseFrame(frame_out);
|
jitter_buffer_->ReleaseFrame(frame_out);
|
||||||
|
|
||||||
frame_out = DecodeCompleteFrame();
|
frame_out = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(6000U, frame_out->TimeStamp());
|
EXPECT_EQ(6000U, frame_out->Timestamp());
|
||||||
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
|
||||||
EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
|
||||||
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
|
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
|
||||||
|
@ -1461,8 +1461,8 @@ TEST_F(TestBasicJitterBuffer, DiscontinuousStreamWhenDecodingWithErrors) {
|
||||||
uint32_t next_timestamp;
|
uint32_t next_timestamp;
|
||||||
VCMEncodedFrame* frame = jitter_buffer_->NextCompleteFrame(0);
|
VCMEncodedFrame* frame = jitter_buffer_->NextCompleteFrame(0);
|
||||||
EXPECT_NE(frame, nullptr);
|
EXPECT_NE(frame, nullptr);
|
||||||
EXPECT_EQ(packet_->timestamp, frame->TimeStamp());
|
EXPECT_EQ(packet_->timestamp, frame->Timestamp());
|
||||||
frame = jitter_buffer_->ExtractAndSetDecode(frame->TimeStamp());
|
frame = jitter_buffer_->ExtractAndSetDecode(frame->Timestamp());
|
||||||
EXPECT_TRUE(frame != NULL);
|
EXPECT_TRUE(frame != NULL);
|
||||||
jitter_buffer_->ReleaseFrame(frame);
|
jitter_buffer_->ReleaseFrame(frame);
|
||||||
|
|
||||||
|
@ -1708,7 +1708,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
|
||||||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||||
|
|
||||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(3000u, frame_out->TimeStamp());
|
EXPECT_EQ(3000u, frame_out->Timestamp());
|
||||||
CheckOutFrame(frame_out, size_, false);
|
CheckOutFrame(frame_out, size_, false);
|
||||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||||
jitter_buffer_->ReleaseFrame(frame_out);
|
jitter_buffer_->ReleaseFrame(frame_out);
|
||||||
|
@ -1743,7 +1743,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
|
||||||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||||
|
|
||||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(timestamp_, frame_out->TimeStamp());
|
EXPECT_EQ(timestamp_, frame_out->Timestamp());
|
||||||
|
|
||||||
CheckOutFrame(frame_out, size_, false);
|
CheckOutFrame(frame_out, size_, false);
|
||||||
|
|
||||||
|
@ -1853,13 +1853,13 @@ TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
|
||||||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||||
|
|
||||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
|
EXPECT_EQ(0xffffff00, frame_out->Timestamp());
|
||||||
CheckOutFrame(frame_out, size_, false);
|
CheckOutFrame(frame_out, size_, false);
|
||||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||||
jitter_buffer_->ReleaseFrame(frame_out);
|
jitter_buffer_->ReleaseFrame(frame_out);
|
||||||
|
|
||||||
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
|
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(2700u, frame_out2->TimeStamp());
|
EXPECT_EQ(2700u, frame_out2->Timestamp());
|
||||||
CheckOutFrame(frame_out2, size_, false);
|
CheckOutFrame(frame_out2, size_, false);
|
||||||
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
|
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
|
||||||
jitter_buffer_->ReleaseFrame(frame_out2);
|
jitter_buffer_->ReleaseFrame(frame_out2);
|
||||||
|
@ -1896,13 +1896,13 @@ TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
|
||||||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||||
|
|
||||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
|
EXPECT_EQ(0xffffff00, frame_out->Timestamp());
|
||||||
CheckOutFrame(frame_out, size_, false);
|
CheckOutFrame(frame_out, size_, false);
|
||||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||||
jitter_buffer_->ReleaseFrame(frame_out);
|
jitter_buffer_->ReleaseFrame(frame_out);
|
||||||
|
|
||||||
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
|
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(2700u, frame_out2->TimeStamp());
|
EXPECT_EQ(2700u, frame_out2->Timestamp());
|
||||||
CheckOutFrame(frame_out2, size_, false);
|
CheckOutFrame(frame_out2, size_, false);
|
||||||
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
|
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
|
||||||
jitter_buffer_->ReleaseFrame(frame_out2);
|
jitter_buffer_->ReleaseFrame(frame_out2);
|
||||||
|
@ -1997,7 +1997,7 @@ TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
|
||||||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||||
|
|
||||||
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
|
||||||
EXPECT_EQ(first_key_frame_timestamp, frame_out->TimeStamp());
|
EXPECT_EQ(first_key_frame_timestamp, frame_out->Timestamp());
|
||||||
CheckOutFrame(frame_out, size_, false);
|
CheckOutFrame(frame_out, size_, false);
|
||||||
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
|
||||||
jitter_buffer_->ReleaseFrame(frame_out);
|
jitter_buffer_->ReleaseFrame(frame_out);
|
||||||
|
@ -2023,7 +2023,7 @@ TEST_F(TestBasicJitterBuffer, EmptyLastFrame) {
|
||||||
VCMEncodedFrame* testFrame = DecodeIncompleteFrame();
|
VCMEncodedFrame* testFrame = DecodeIncompleteFrame();
|
||||||
// Timestamp should never be the last TS inserted.
|
// Timestamp should never be the last TS inserted.
|
||||||
if (testFrame != NULL) {
|
if (testFrame != NULL) {
|
||||||
EXPECT_TRUE(testFrame->TimeStamp() < timestamp_);
|
EXPECT_TRUE(testFrame->Timestamp() < timestamp_);
|
||||||
jitter_buffer_->ReleaseFrame(testFrame);
|
jitter_buffer_->ReleaseFrame(testFrame);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -140,7 +140,7 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
|
||||||
jitter_buffer_.NextCompleteFrame(max_wait_time_ms);
|
jitter_buffer_.NextCompleteFrame(max_wait_time_ms);
|
||||||
|
|
||||||
if (found_frame) {
|
if (found_frame) {
|
||||||
frame_timestamp = found_frame->TimeStamp();
|
frame_timestamp = found_frame->Timestamp();
|
||||||
min_playout_delay_ms = found_frame->EncodedImage().playout_delay_.min_ms;
|
min_playout_delay_ms = found_frame->EncodedImage().playout_delay_.min_ms;
|
||||||
max_playout_delay_ms = found_frame->EncodedImage().playout_delay_.max_ms;
|
max_playout_delay_ms = found_frame->EncodedImage().playout_delay_.max_ms;
|
||||||
} else {
|
} else {
|
||||||
|
@ -212,7 +212,7 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
frame->SetRenderTime(render_time_ms);
|
frame->SetRenderTime(render_time_ms);
|
||||||
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(), "SetRenderTS",
|
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->Timestamp(), "SetRenderTS",
|
||||||
"render_time", frame->RenderTimeMs());
|
"render_time", frame->RenderTimeMs());
|
||||||
if (!frame->Complete()) {
|
if (!frame->Complete()) {
|
||||||
// Update stats for incomplete frames.
|
// Update stats for incomplete frames.
|
||||||
|
|
|
@ -57,7 +57,6 @@ class FuzzyFrameObject : public video_coding::EncodedFrame {
|
||||||
~FuzzyFrameObject() {}
|
~FuzzyFrameObject() {}
|
||||||
|
|
||||||
bool GetBitstream(uint8_t* destination) const override { return false; }
|
bool GetBitstream(uint8_t* destination) const override { return false; }
|
||||||
uint32_t Timestamp() const override { return timestamp; }
|
|
||||||
int64_t ReceivedTime() const override { return 0; }
|
int64_t ReceivedTime() const override { return 0; }
|
||||||
int64_t RenderTime() const override { return _renderTimeMs; }
|
int64_t RenderTime() const override { return _renderTimeMs; }
|
||||||
};
|
};
|
||||||
|
@ -76,7 +75,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
|
||||||
std::unique_ptr<FuzzyFrameObject> frame(new FuzzyFrameObject());
|
std::unique_ptr<FuzzyFrameObject> frame(new FuzzyFrameObject());
|
||||||
frame->id.picture_id = reader.GetNum<int64_t>();
|
frame->id.picture_id = reader.GetNum<int64_t>();
|
||||||
frame->id.spatial_layer = reader.GetNum<uint8_t>();
|
frame->id.spatial_layer = reader.GetNum<uint8_t>();
|
||||||
frame->timestamp = reader.GetNum<uint32_t>();
|
frame->SetTimestamp(reader.GetNum<uint32_t>());
|
||||||
frame->num_references = reader.GetNum<uint8_t>() %
|
frame->num_references = reader.GetNum<uint8_t>() %
|
||||||
video_coding::EncodedFrame::kMaxFrameReferences;
|
video_coding::EncodedFrame::kMaxFrameReferences;
|
||||||
|
|
||||||
|
|
|
@ -186,7 +186,7 @@ VideoStreamDecoderImpl::DecodeResult VideoStreamDecoderImpl::DecodeNextFrame(
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t decode_start_time_ms = rtc::TimeMillis();
|
int64_t decode_start_time_ms = rtc::TimeMillis();
|
||||||
int64_t timestamp = frame->timestamp;
|
int64_t timestamp = frame->Timestamp();
|
||||||
int64_t render_time_us = frame->RenderTimeMs() * 1000;
|
int64_t render_time_us = frame->RenderTimeMs() * 1000;
|
||||||
bookkeeping_queue_.PostTask(
|
bookkeeping_queue_.PostTask(
|
||||||
[this, decode_start_time_ms, timestamp, render_time_us]() {
|
[this, decode_start_time_ms, timestamp, render_time_us]() {
|
||||||
|
|
Loading…
Reference in a new issue