mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-13 22:00:47 +01:00

In https://webrtc-review.googlesource.com/c/src/+/1560 we moved WebRTC from src/webrtc to src/ (in order to preserve an healthy git history). This CL takes care of fixing header guards, #include paths, etc... NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true TBR=tommi@webrtc.org Bug: chromium:611808 Change-Id: Iea91618212bee0af16aa3f05071eab8f93706578 Reviewed-on: https://webrtc-review.googlesource.com/1561 Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org> Reviewed-by: Henrik Kjellander <kjellander@webrtc.org> Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org> Cr-Commit-Position: refs/heads/master@{#19846}
199 lines
6.7 KiB
C++
199 lines
6.7 KiB
C++
/*
|
|
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
|
|
*
|
|
* Use of this source code is governed by a BSD-style license
|
|
* that can be found in the LICENSE file in the root of the source
|
|
* tree. An additional intellectual property rights grant can be found
|
|
* in the file PATENTS. All contributing project authors may
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
*/
|
|
|
|
#include "modules/video_coding/frame_object.h"
|
|
#include "common_video/h264/h264_common.h"
|
|
#include "modules/video_coding/packet_buffer.h"
|
|
#include "rtc_base/checks.h"
|
|
|
|
namespace webrtc {
|
|
namespace video_coding {
|
|
|
|
FrameObject::FrameObject()
|
|
: picture_id(0),
|
|
spatial_layer(0),
|
|
timestamp(0),
|
|
num_references(0),
|
|
inter_layer_predicted(false) {}
|
|
|
|
RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
|
|
uint16_t first_seq_num,
|
|
uint16_t last_seq_num,
|
|
size_t frame_size,
|
|
int times_nacked,
|
|
int64_t received_time)
|
|
: packet_buffer_(packet_buffer),
|
|
first_seq_num_(first_seq_num),
|
|
last_seq_num_(last_seq_num),
|
|
timestamp_(0),
|
|
received_time_(received_time),
|
|
times_nacked_(times_nacked) {
|
|
VCMPacket* first_packet = packet_buffer_->GetPacket(first_seq_num);
|
|
RTC_CHECK(first_packet);
|
|
|
|
// RtpFrameObject members
|
|
frame_type_ = first_packet->frameType;
|
|
codec_type_ = first_packet->codec;
|
|
|
|
// TODO(philipel): Remove when encoded image is replaced by FrameObject.
|
|
// VCMEncodedFrame members
|
|
CopyCodecSpecific(&first_packet->video_header);
|
|
_completeFrame = true;
|
|
_payloadType = first_packet->payloadType;
|
|
_timeStamp = first_packet->timestamp;
|
|
ntp_time_ms_ = first_packet->ntp_time_ms_;
|
|
|
|
// Setting frame's playout delays to the same values
|
|
// as of the first packet's.
|
|
SetPlayoutDelay(first_packet->video_header.playout_delay);
|
|
|
|
// Since FFmpeg use an optimized bitstream reader that reads in chunks of
|
|
// 32/64 bits we have to add at least that much padding to the buffer
|
|
// to make sure the decoder doesn't read out of bounds.
|
|
// NOTE! EncodedImage::_size is the size of the buffer (think capacity of
|
|
// an std::vector) and EncodedImage::_length is the actual size of
|
|
// the bitstream (think size of an std::vector).
|
|
if (codec_type_ == kVideoCodecH264)
|
|
_size = frame_size + EncodedImage::kBufferPaddingBytesH264;
|
|
else
|
|
_size = frame_size;
|
|
|
|
_buffer = new uint8_t[_size];
|
|
_length = frame_size;
|
|
|
|
// For H264 frames we can't determine the frame type by just looking at the
|
|
// first packet. Instead we consider the frame to be a keyframe if it
|
|
// contains an IDR NALU.
|
|
if (codec_type_ == kVideoCodecH264) {
|
|
_frameType = kVideoFrameDelta;
|
|
frame_type_ = kVideoFrameDelta;
|
|
for (uint16_t seq_num = first_seq_num;
|
|
seq_num != static_cast<uint16_t>(last_seq_num + 1) &&
|
|
_frameType == kVideoFrameDelta;
|
|
++seq_num) {
|
|
VCMPacket* packet = packet_buffer_->GetPacket(seq_num);
|
|
RTC_CHECK(packet);
|
|
const RTPVideoHeaderH264& header = packet->video_header.codecHeader.H264;
|
|
for (size_t i = 0; i < header.nalus_length; ++i) {
|
|
if (header.nalus[i].type == H264::NaluType::kIdr) {
|
|
_frameType = kVideoFrameKey;
|
|
frame_type_ = kVideoFrameKey;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
_frameType = first_packet->frameType;
|
|
frame_type_ = first_packet->frameType;
|
|
}
|
|
|
|
bool bitstream_copied = GetBitstream(_buffer);
|
|
RTC_DCHECK(bitstream_copied);
|
|
_encodedWidth = first_packet->width;
|
|
_encodedHeight = first_packet->height;
|
|
|
|
// FrameObject members
|
|
timestamp = first_packet->timestamp;
|
|
|
|
VCMPacket* last_packet = packet_buffer_->GetPacket(last_seq_num);
|
|
RTC_CHECK(last_packet);
|
|
RTC_CHECK(last_packet->markerBit);
|
|
// http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
|
|
// ts_126114v120700p.pdf Section 7.4.5.
|
|
// The MTSI client shall add the payload bytes as defined in this clause
|
|
// onto the last RTP packet in each group of packets which make up a key
|
|
// frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
|
|
// (HEVC)).
|
|
rotation_ = last_packet->video_header.rotation;
|
|
_rotation_set = true;
|
|
content_type_ = last_packet->video_header.content_type;
|
|
if (last_packet->video_header.video_timing.flags !=
|
|
TimingFrameFlags::kInvalid) {
|
|
// ntp_time_ms_ may be -1 if not estimated yet. This is not a problem,
|
|
// as this will be dealt with at the time of reporting.
|
|
timing_.encode_start_ms =
|
|
ntp_time_ms_ +
|
|
last_packet->video_header.video_timing.encode_start_delta_ms;
|
|
timing_.encode_finish_ms =
|
|
ntp_time_ms_ +
|
|
last_packet->video_header.video_timing.encode_finish_delta_ms;
|
|
timing_.packetization_finish_ms =
|
|
ntp_time_ms_ +
|
|
last_packet->video_header.video_timing.packetization_finish_delta_ms;
|
|
timing_.pacer_exit_ms =
|
|
ntp_time_ms_ +
|
|
last_packet->video_header.video_timing.pacer_exit_delta_ms;
|
|
timing_.network_timestamp_ms =
|
|
ntp_time_ms_ +
|
|
last_packet->video_header.video_timing.network_timstamp_delta_ms;
|
|
timing_.network2_timestamp_ms =
|
|
ntp_time_ms_ +
|
|
last_packet->video_header.video_timing.network2_timstamp_delta_ms;
|
|
|
|
timing_.receive_start_ms = first_packet->receive_time_ms;
|
|
timing_.receive_finish_ms = last_packet->receive_time_ms;
|
|
}
|
|
timing_.flags = last_packet->video_header.video_timing.flags;
|
|
}
|
|
|
|
RtpFrameObject::~RtpFrameObject() {
|
|
packet_buffer_->ReturnFrame(this);
|
|
}
|
|
|
|
uint16_t RtpFrameObject::first_seq_num() const {
|
|
return first_seq_num_;
|
|
}
|
|
|
|
uint16_t RtpFrameObject::last_seq_num() const {
|
|
return last_seq_num_;
|
|
}
|
|
|
|
int RtpFrameObject::times_nacked() const {
|
|
return times_nacked_;
|
|
}
|
|
|
|
FrameType RtpFrameObject::frame_type() const {
|
|
return frame_type_;
|
|
}
|
|
|
|
VideoCodecType RtpFrameObject::codec_type() const {
|
|
return codec_type_;
|
|
}
|
|
|
|
bool RtpFrameObject::GetBitstream(uint8_t* destination) const {
|
|
return packet_buffer_->GetBitstream(*this, destination);
|
|
}
|
|
|
|
uint32_t RtpFrameObject::Timestamp() const {
|
|
return timestamp_;
|
|
}
|
|
|
|
int64_t RtpFrameObject::ReceivedTime() const {
|
|
return received_time_;
|
|
}
|
|
|
|
int64_t RtpFrameObject::RenderTime() const {
|
|
return _renderTimeMs;
|
|
}
|
|
|
|
bool RtpFrameObject::delayed_by_retransmission() const {
|
|
return times_nacked() > 0;
|
|
}
|
|
|
|
rtc::Optional<RTPVideoTypeHeader> RtpFrameObject::GetCodecHeader() const {
|
|
rtc::CritScope lock(&packet_buffer_->crit_);
|
|
VCMPacket* packet = packet_buffer_->GetPacket(first_seq_num_);
|
|
if (!packet)
|
|
return rtc::Optional<RTPVideoTypeHeader>();
|
|
return rtc::Optional<RTPVideoTypeHeader>(packet->video_header.codecHeader);
|
|
}
|
|
|
|
} // namespace video_coding
|
|
} // namespace webrtc
|