mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-13 13:50:40 +01:00
Propagate spatial index to EncodedImage.
Set spatial index of assembled VP9 picture equal to spatial index of its top spatial layer frame. Bug: webrtc:10151 Change-Id: Iae40505864b14b01cc6787f8da99a9e3fe283956 Reviewed-on: https://webrtc-review.googlesource.com/c/115280 Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org> Commit-Queue: Sergey Silkin <ssilkin@webrtc.org> Cr-Commit-Position: refs/heads/master@{#26075}
This commit is contained in:
parent
76fd7b402e
commit
61832dd018
4 changed files with 29 additions and 19 deletions
|
@ -44,6 +44,7 @@ void VCMEncodedFrame::Free() {
|
||||||
|
|
||||||
void VCMEncodedFrame::Reset() {
|
void VCMEncodedFrame::Reset() {
|
||||||
SetTimestamp(0);
|
SetTimestamp(0);
|
||||||
|
SetSpatialIndex(absl::nullopt);
|
||||||
_renderTimeMs = -1;
|
_renderTimeMs = -1;
|
||||||
_payloadType = 0;
|
_payloadType = 0;
|
||||||
_frameType = kVideoFrameDelta;
|
_frameType = kVideoFrameDelta;
|
||||||
|
@ -116,6 +117,7 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
|
||||||
if (vp9_header.spatial_idx != kNoSpatialIdx) {
|
if (vp9_header.spatial_idx != kNoSpatialIdx) {
|
||||||
_codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted =
|
_codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted =
|
||||||
vp9_header.inter_layer_predicted;
|
vp9_header.inter_layer_predicted;
|
||||||
|
SetSpatialIndex(vp9_header.spatial_idx);
|
||||||
}
|
}
|
||||||
if (vp9_header.gof_idx != kNoGofIdx) {
|
if (vp9_header.gof_idx != kNoGofIdx) {
|
||||||
_codecSpecificInfo.codecSpecific.VP9.gof_idx = vp9_header.gof_idx;
|
_codecSpecificInfo.codecSpecific.VP9.gof_idx = vp9_header.gof_idx;
|
||||||
|
|
|
@ -68,6 +68,7 @@ class VCMEncodedFrame : protected EncodedImage {
|
||||||
* Frame RTP timestamp (90kHz)
|
* Frame RTP timestamp (90kHz)
|
||||||
*/
|
*/
|
||||||
using EncodedImage::set_size;
|
using EncodedImage::set_size;
|
||||||
|
using EncodedImage::SetSpatialIndex;
|
||||||
using EncodedImage::SetTimestamp;
|
using EncodedImage::SetTimestamp;
|
||||||
using EncodedImage::size;
|
using EncodedImage::size;
|
||||||
using EncodedImage::Timestamp;
|
using EncodedImage::Timestamp;
|
||||||
|
|
|
@ -668,28 +668,34 @@ void FrameBuffer::ClearFramesAndHistory() {
|
||||||
EncodedFrame* FrameBuffer::CombineAndDeleteFrames(
|
EncodedFrame* FrameBuffer::CombineAndDeleteFrames(
|
||||||
const std::vector<EncodedFrame*>& frames) const {
|
const std::vector<EncodedFrame*>& frames) const {
|
||||||
RTC_DCHECK(!frames.empty());
|
RTC_DCHECK(!frames.empty());
|
||||||
EncodedFrame* frame = frames[0];
|
EncodedFrame* first_frame = frames[0];
|
||||||
|
EncodedFrame* last_frame = frames.back();
|
||||||
size_t total_length = 0;
|
size_t total_length = 0;
|
||||||
for (size_t i = 0; i < frames.size(); ++i) {
|
for (size_t i = 0; i < frames.size(); ++i) {
|
||||||
total_length += frames[i]->size();
|
total_length += frames[i]->size();
|
||||||
}
|
}
|
||||||
frame->VerifyAndAllocate(total_length);
|
first_frame->VerifyAndAllocate(total_length);
|
||||||
uint8_t* buffer = frame->MutableBuffer();
|
|
||||||
|
// Spatial index of combined frame is set equal to spatial index of its top
|
||||||
|
// spatial layer.
|
||||||
|
first_frame->SetSpatialIndex(last_frame->id.spatial_layer);
|
||||||
|
first_frame->id.spatial_layer = last_frame->id.spatial_layer;
|
||||||
|
|
||||||
|
first_frame->video_timing_mutable()->network2_timestamp_ms =
|
||||||
|
last_frame->video_timing().network2_timestamp_ms;
|
||||||
|
first_frame->video_timing_mutable()->receive_finish_ms =
|
||||||
|
last_frame->video_timing().receive_finish_ms;
|
||||||
|
|
||||||
// Append all remaining frames to the first one.
|
// Append all remaining frames to the first one.
|
||||||
size_t used_buffer_bytes = frame->size();
|
uint8_t* buffer = first_frame->MutableBuffer() + first_frame->size();
|
||||||
for (size_t i = 1; i < frames.size(); ++i) {
|
for (size_t i = 1; i < frames.size(); ++i) {
|
||||||
EncodedFrame* frame_to_append = frames[i];
|
EncodedFrame* next_frame = frames[i];
|
||||||
memcpy(buffer + used_buffer_bytes, frame_to_append->Buffer(),
|
memcpy(buffer, next_frame->Buffer(), next_frame->size());
|
||||||
frame_to_append->size());
|
buffer += next_frame->size();
|
||||||
used_buffer_bytes += frame_to_append->size();
|
delete next_frame;
|
||||||
frame->video_timing_mutable()->network2_timestamp_ms =
|
|
||||||
frame_to_append->video_timing().network2_timestamp_ms;
|
|
||||||
frame->video_timing_mutable()->receive_finish_ms =
|
|
||||||
frame_to_append->video_timing().receive_finish_ms;
|
|
||||||
delete frame_to_append;
|
|
||||||
}
|
}
|
||||||
frame->set_size(total_length);
|
first_frame->set_size(total_length);
|
||||||
return frame;
|
return first_frame;
|
||||||
}
|
}
|
||||||
|
|
||||||
FrameBuffer::FrameInfo::FrameInfo() = default;
|
FrameBuffer::FrameInfo::FrameInfo() = default;
|
||||||
|
|
|
@ -161,6 +161,7 @@ class TestFrameBuffer2 : public ::testing::Test {
|
||||||
std::unique_ptr<FrameObjectFake> frame(new FrameObjectFake());
|
std::unique_ptr<FrameObjectFake> frame(new FrameObjectFake());
|
||||||
frame->id.picture_id = picture_id;
|
frame->id.picture_id = picture_id;
|
||||||
frame->id.spatial_layer = spatial_layer;
|
frame->id.spatial_layer = spatial_layer;
|
||||||
|
frame->SetSpatialIndex(spatial_layer);
|
||||||
frame->SetTimestamp(ts_ms * 90);
|
frame->SetTimestamp(ts_ms * 90);
|
||||||
frame->num_references = references.size();
|
frame->num_references = references.size();
|
||||||
frame->inter_layer_predicted = inter_layer_predicted;
|
frame->inter_layer_predicted = inter_layer_predicted;
|
||||||
|
@ -271,7 +272,7 @@ TEST_F(TestFrameBuffer2, OneSuperFrame) {
|
||||||
InsertFrame(pid, 1, ts, true, true);
|
InsertFrame(pid, 1, ts, true, true);
|
||||||
ExtractFrame();
|
ExtractFrame();
|
||||||
|
|
||||||
CheckFrame(0, pid, 0);
|
CheckFrame(0, pid, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestFrameBuffer2, SetPlayoutDelay) {
|
TEST_F(TestFrameBuffer2, SetPlayoutDelay) {
|
||||||
|
@ -599,7 +600,7 @@ TEST_F(TestFrameBuffer2, CombineFramesToSuperframe) {
|
||||||
InsertFrame(pid, 1, ts, true, true);
|
InsertFrame(pid, 1, ts, true, true);
|
||||||
ExtractFrame(0);
|
ExtractFrame(0);
|
||||||
ExtractFrame(0);
|
ExtractFrame(0);
|
||||||
CheckFrame(0, pid, 0);
|
CheckFrame(0, pid, 1);
|
||||||
CheckNoFrame(1);
|
CheckNoFrame(1);
|
||||||
// Two frames should be combined and returned together.
|
// Two frames should be combined and returned together.
|
||||||
CheckFrameSize(0, kFrameSize * 2);
|
CheckFrameSize(0, kFrameSize * 2);
|
||||||
|
@ -613,7 +614,7 @@ TEST_F(TestFrameBuffer2, HigherSpatialLayerNonDecodable) {
|
||||||
InsertFrame(pid, 1, ts, true, true);
|
InsertFrame(pid, 1, ts, true, true);
|
||||||
|
|
||||||
ExtractFrame(0);
|
ExtractFrame(0);
|
||||||
CheckFrame(0, pid, 0);
|
CheckFrame(0, pid, 1);
|
||||||
|
|
||||||
InsertFrame(pid + 1, 1, ts + kFps20, false, true, pid);
|
InsertFrame(pid + 1, 1, ts + kFps20, false, true, pid);
|
||||||
InsertFrame(pid + 2, 0, ts + kFps10, false, false, pid);
|
InsertFrame(pid + 2, 0, ts + kFps10, false, false, pid);
|
||||||
|
@ -627,7 +628,7 @@ TEST_F(TestFrameBuffer2, HigherSpatialLayerNonDecodable) {
|
||||||
ExtractFrame();
|
ExtractFrame();
|
||||||
ExtractFrame();
|
ExtractFrame();
|
||||||
CheckFrame(1, pid + 1, 1);
|
CheckFrame(1, pid + 1, 1);
|
||||||
CheckFrame(2, pid + 2, 0);
|
CheckFrame(2, pid + 2, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace video_coding
|
} // namespace video_coding
|
||||||
|
|
Loading…
Reference in a new issue