Remove deprecated VideoStreamDecoderInterface and FrameBuffer2.

Bug: webrtc:14875
Change-Id: I46ea21d9ed46283ad3f6c9005ad05ec116d841f2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/291701
Reviewed-by: Rasmus Brandt <brandtr@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Commit-Queue: Philip Eliasson <philipel@webrtc.org>
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#39304}
This commit is contained in:
philipel 2023-02-03 14:42:32 +01:00 committed by WebRTC LUCI CQ
parent 831664294c
commit 04e9354557
16 changed files with 1 additions and 2300 deletions

View file

@ -1437,7 +1437,6 @@ if (rtc_include_tests) {
"units:units_unittests",
"video:frame_buffer_unittest",
"video:rtp_video_frame_assembler_unittests",
"video:video_unittests",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",

View file

@ -259,37 +259,6 @@ rtc_source_set("video_bitrate_allocator_factory") {
]
}
rtc_source_set("video_stream_decoder") {
visibility = [ "*" ]
sources = [ "video_stream_decoder.h" ]
deps = [
":encoded_frame",
":video_frame",
":video_rtp_headers",
"../task_queue",
"../units:time_delta",
"../video_codecs:video_codecs_api",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("video_stream_decoder_create") {
visibility = [ "*" ]
sources = [
"video_stream_decoder_create.cc",
"video_stream_decoder_create.h",
]
deps = [
":video_stream_decoder",
"../../api:field_trials_view",
"../../video:video_stream_decoder_impl",
"../task_queue",
"../video_codecs:video_codecs_api",
]
}
rtc_library("video_adaptation") {
visibility = [ "*" ]
sources = [
@ -403,19 +372,3 @@ rtc_library("frame_buffer_unittest") {
"../../test:test_support",
]
}
if (rtc_include_tests) {
rtc_library("video_unittests") {
testonly = true
sources = [ "video_stream_decoder_create_unittest.cc" ]
deps = [
":video_frame_metadata",
":video_frame_type",
":video_stream_decoder_create",
"../../modules/rtp_rtcp:rtp_video_header",
"../../test:test_support",
"../task_queue:default_task_queue_factory",
"../video_codecs:builtin_video_decoder_factory",
]
}
}

View file

@ -1,57 +0,0 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_VIDEO_STREAM_DECODER_H_
#define API_VIDEO_VIDEO_STREAM_DECODER_H_
#include <map>
#include <memory>
#include <utility>
#include "api/units/time_delta.h"
#include "api/video/encoded_frame.h"
#include "api/video/video_content_type.h"
#include "api/video/video_frame.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_decoder_factory.h"
namespace webrtc {
// NOTE: This class is still under development and may change without notice.
class VideoStreamDecoderInterface {
public:
class Callbacks {
public:
virtual ~Callbacks() = default;
struct FrameInfo {
absl::optional<int> qp;
VideoContentType content_type;
};
// Called when the VideoStreamDecoder enters a non-decodable state.
virtual void OnNonDecodableState() = 0;
virtual void OnContinuousUntil(int64_t frame_id) {}
virtual void OnDecodedFrame(VideoFrame frame,
const FrameInfo& frame_info) = 0;
};
virtual ~VideoStreamDecoderInterface() = default;
virtual void OnFrame(std::unique_ptr<EncodedFrame> frame) = 0;
virtual void SetMinPlayoutDelay(TimeDelta min_delay) = 0;
virtual void SetMaxPlayoutDelay(TimeDelta max_delay) = 0;
};
} // namespace webrtc
#endif // API_VIDEO_VIDEO_STREAM_DECODER_H_

View file

@ -1,32 +0,0 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video/video_stream_decoder_create.h"
#include <memory>
#include "video/video_stream_decoder_impl.h"
namespace webrtc {
std::unique_ptr<VideoStreamDecoderInterface> CreateVideoStreamDecoder(
VideoStreamDecoderInterface::Callbacks* callbacks,
VideoDecoderFactory* decoder_factory,
TaskQueueFactory* task_queue_factory,
std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings,
// TODO(jonaso, webrtc:10335): Consider what to do with factories
// vs. field trials.
const FieldTrialsView* field_trials) {
return std::make_unique<VideoStreamDecoderImpl>(
callbacks, decoder_factory, task_queue_factory,
std::move(decoder_settings), field_trials);
}
} // namespace webrtc

View file

@ -1,37 +0,0 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_VIDEO_STREAM_DECODER_CREATE_H_
#define API_VIDEO_VIDEO_STREAM_DECODER_CREATE_H_
#include <map>
#include <memory>
#include <utility>
#include "api/field_trials_view.h"
#include "api/task_queue/task_queue_factory.h"
#include "api/video/video_stream_decoder.h"
#include "api/video_codecs/sdp_video_format.h"
namespace webrtc {
// The `decoder_settings` parameter is a map between:
// <payload type> --> <<video format>, <number of cores>>.
// The video format is used when instantiating a decoder, and
// the number of cores is used when initializing the decoder.
std::unique_ptr<VideoStreamDecoderInterface> CreateVideoStreamDecoder(
VideoStreamDecoderInterface::Callbacks* callbacks,
VideoDecoderFactory* decoder_factory,
TaskQueueFactory* task_queue_factory,
std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings,
const FieldTrialsView* field_trials = nullptr);
} // namespace webrtc
#endif // API_VIDEO_VIDEO_STREAM_DECODER_CREATE_H_

View file

@ -1,46 +0,0 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video/video_stream_decoder_create.h"
#include "api/task_queue/default_task_queue_factory.h"
#include "api/video_codecs/builtin_video_decoder_factory.h"
#include "test/gtest.h"
namespace webrtc {
namespace {
class NullCallbacks : public VideoStreamDecoderInterface::Callbacks {
public:
~NullCallbacks() override = default;
void OnNonDecodableState() override {}
void OnDecodedFrame(VideoFrame frame,
const VideoStreamDecoderInterface::Callbacks::FrameInfo&
frame_info) override {}
};
TEST(VideoStreamDecoderCreate, CreateVideoStreamDecoder) {
std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings = {
{/*payload_type=*/111, {SdpVideoFormat("VP8"), /*number_of_cores=*/2}}};
NullCallbacks callbacks;
std::unique_ptr<VideoDecoderFactory> decoder_factory =
CreateBuiltinVideoDecoderFactory();
std::unique_ptr<TaskQueueFactory> task_queue_factory =
CreateDefaultTaskQueueFactory();
std::unique_ptr<VideoStreamDecoderInterface> decoder =
CreateVideoStreamDecoder(&callbacks, decoder_factory.get(),
task_queue_factory.get(), decoder_settings);
EXPECT_TRUE(decoder);
}
} // namespace
} // namespace webrtc

View file

@ -163,42 +163,6 @@ rtc_library("frame_helpers") {
absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
}
rtc_library("frame_buffer2") {
sources = [
"frame_buffer2.cc",
"frame_buffer2.h",
]
deps = [
":frame_helpers",
":video_codec_interface",
":video_coding_utility",
"../../api:field_trials_view",
"../../api:sequence_checker",
"../../api/task_queue",
"../../api/units:data_size",
"../../api/units:time_delta",
"../../api/video:encoded_frame",
"../../api/video:encoded_image",
"../../api/video:video_rtp_headers",
"../../rtc_base:checks",
"../../rtc_base:event_tracer",
"../../rtc_base:logging",
"../../rtc_base:macromagic",
"../../rtc_base:rtc_event",
"../../rtc_base:rtc_numerics",
"../../rtc_base/experiments:field_trial_parser",
"../../rtc_base/experiments:rtt_mult_experiment",
"../../rtc_base/synchronization:mutex",
"../../rtc_base/system:no_unique_address",
"../../rtc_base/task_utils:repeating_task",
"../../system_wrappers",
"timing:inter_frame_delay",
"timing:jitter_estimator",
"timing:timing_module",
]
absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
}
rtc_library("video_coding") {
visibility = [ "*" ]
sources = [
@ -1179,7 +1143,6 @@ if (rtc_include_tests) {
"decoder_database_unittest.cc",
"decoding_state_unittest.cc",
"fec_controller_unittest.cc",
"frame_buffer2_unittest.cc",
"frame_dependencies_calculator_unittest.cc",
"frame_helpers_unittest.cc",
"generic_decoder_unittest.cc",
@ -1223,7 +1186,6 @@ if (rtc_include_tests) {
":chain_diff_calculator",
":codec_globals_headers",
":encoded_frame",
":frame_buffer2",
":frame_dependencies_calculator",
":frame_helpers",
":h264_packet_buffer",

View file

@ -1,600 +0,0 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/frame_buffer2.h"
#include <algorithm>
#include <cstdlib>
#include <iterator>
#include <memory>
#include <queue>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "api/units/data_size.h"
#include "api/units/time_delta.h"
#include "api/video/encoded_image.h"
#include "api/video/video_timing.h"
#include "modules/video_coding/frame_helpers.h"
#include "modules/video_coding/include/video_coding_defines.h"
#include "modules/video_coding/timing/jitter_estimator.h"
#include "modules/video_coding/timing/timing.h"
#include "rtc_base/checks.h"
#include "rtc_base/experiments/rtt_mult_experiment.h"
#include "rtc_base/logging.h"
#include "rtc_base/numerics/sequence_number_util.h"
#include "rtc_base/trace_event.h"
#include "system_wrappers/include/clock.h"
namespace webrtc {
namespace video_coding {
namespace {
// Max number of frames the buffer will hold.
constexpr size_t kMaxFramesBuffered = 800;
// Default value for the maximum decode queue size that is used when the
// low-latency renderer is used.
constexpr size_t kZeroPlayoutDelayDefaultMaxDecodeQueueSize = 8;
// Max number of decoded frame info that will be saved.
constexpr int kMaxFramesHistory = 1 << 13;
// The time it's allowed for a frame to be late to its rendering prediction and
// still be rendered.
constexpr int kMaxAllowedFrameDelayMs = 5;
constexpr int64_t kLogNonDecodedIntervalMs = 5000;
} // namespace
FrameBuffer::FrameBuffer(Clock* clock,
VCMTiming* timing,
const FieldTrialsView& field_trials)
: decoded_frames_history_(kMaxFramesHistory),
clock_(clock),
callback_queue_(nullptr),
jitter_estimator_(clock, field_trials),
timing_(timing),
stopped_(false),
protection_mode_(kProtectionNack),
last_log_non_decoded_ms_(-kLogNonDecodedIntervalMs),
rtt_mult_settings_(RttMultExperiment::GetRttMultValue()),
zero_playout_delay_max_decode_queue_size_(
"max_decode_queue_size",
kZeroPlayoutDelayDefaultMaxDecodeQueueSize) {
ParseFieldTrial({&zero_playout_delay_max_decode_queue_size_},
field_trials.Lookup("WebRTC-ZeroPlayoutDelay"));
callback_checker_.Detach();
}
FrameBuffer::~FrameBuffer() {
RTC_DCHECK_RUN_ON(&construction_checker_);
}
void FrameBuffer::NextFrame(int64_t max_wait_time_ms,
bool keyframe_required,
TaskQueueBase* callback_queue,
NextFrameCallback handler) {
RTC_DCHECK_RUN_ON(&callback_checker_);
RTC_DCHECK(callback_queue->IsCurrent());
TRACE_EVENT0("webrtc", "FrameBuffer::NextFrame");
int64_t latest_return_time_ms =
clock_->TimeInMilliseconds() + max_wait_time_ms;
MutexLock lock(&mutex_);
if (stopped_) {
return;
}
latest_return_time_ms_ = latest_return_time_ms;
keyframe_required_ = keyframe_required;
frame_handler_ = handler;
callback_queue_ = callback_queue;
StartWaitForNextFrameOnQueue();
}
void FrameBuffer::StartWaitForNextFrameOnQueue() {
RTC_DCHECK(callback_queue_);
RTC_DCHECK(!callback_task_.Running());
int64_t wait_ms = FindNextFrame(clock_->CurrentTime());
callback_task_ = RepeatingTaskHandle::DelayedStart(
callback_queue_, TimeDelta::Millis(wait_ms),
[this] {
RTC_DCHECK_RUN_ON(&callback_checker_);
// If this task has not been cancelled, we did not get any new frames
// while waiting. Continue with frame delivery.
std::unique_ptr<EncodedFrame> frame;
NextFrameCallback frame_handler;
{
MutexLock lock(&mutex_);
if (!frames_to_decode_.empty()) {
// We have frames, deliver!
frame = GetNextFrame();
timing_->SetLastDecodeScheduledTimestamp(clock_->CurrentTime());
} else if (clock_->TimeInMilliseconds() < latest_return_time_ms_) {
// If there's no frames to decode and there is still time left, it
// means that the frame buffer was cleared between creation and
// execution of this task. Continue waiting for the remaining time.
int64_t wait_ms = FindNextFrame(clock_->CurrentTime());
return TimeDelta::Millis(wait_ms);
}
frame_handler = std::move(frame_handler_);
CancelCallback();
}
// Deliver frame, if any. Otherwise signal timeout.
frame_handler(std::move(frame));
return TimeDelta::Zero(); // Ignored.
},
TaskQueueBase::DelayPrecision::kHigh);
}
int64_t FrameBuffer::FindNextFrame(Timestamp now) {
int64_t wait_ms = latest_return_time_ms_ - now.ms();
frames_to_decode_.clear();
// `last_continuous_frame_` may be empty below, but nullopt is smaller
// than everything else and loop will immediately terminate as expected.
for (auto frame_it = frames_.begin();
frame_it != frames_.end() && frame_it->first <= last_continuous_frame_;
++frame_it) {
if (!frame_it->second.continuous ||
frame_it->second.num_missing_decodable > 0) {
continue;
}
EncodedFrame* frame = frame_it->second.frame.get();
if (keyframe_required_ && !frame->is_keyframe())
continue;
auto last_decoded_frame_timestamp =
decoded_frames_history_.GetLastDecodedFrameTimestamp();
// TODO(https://bugs.webrtc.org/9974): consider removing this check
// as it may make a stream undecodable after a very long delay between
// frames.
if (last_decoded_frame_timestamp &&
AheadOf(*last_decoded_frame_timestamp, frame->Timestamp())) {
continue;
}
// Gather all remaining frames for the same superframe.
std::vector<FrameMap::iterator> current_superframe;
current_superframe.push_back(frame_it);
bool last_layer_completed = frame_it->second.frame->is_last_spatial_layer;
FrameMap::iterator next_frame_it = frame_it;
while (!last_layer_completed) {
++next_frame_it;
if (next_frame_it == frames_.end() || !next_frame_it->second.frame) {
break;
}
if (next_frame_it->second.frame->Timestamp() != frame->Timestamp() ||
!next_frame_it->second.continuous) {
break;
}
if (next_frame_it->second.num_missing_decodable > 0) {
bool has_inter_layer_dependency = false;
for (size_t i = 0; i < EncodedFrame::kMaxFrameReferences &&
i < next_frame_it->second.frame->num_references;
++i) {
if (next_frame_it->second.frame->references[i] >= frame_it->first) {
has_inter_layer_dependency = true;
break;
}
}
// If the frame has an undecoded dependency that is not within the same
// temporal unit then this frame is not yet ready to be decoded. If it
// is within the same temporal unit then the not yet decoded dependency
// is just a lower spatial frame, which is ok.
if (!has_inter_layer_dependency ||
next_frame_it->second.num_missing_decodable > 1) {
break;
}
}
current_superframe.push_back(next_frame_it);
last_layer_completed = next_frame_it->second.frame->is_last_spatial_layer;
}
// Check if the current superframe is complete.
// TODO(bugs.webrtc.org/10064): consider returning all available to
// decode frames even if the superframe is not complete yet.
if (!last_layer_completed) {
continue;
}
frames_to_decode_ = std::move(current_superframe);
absl::optional<Timestamp> render_time = frame->RenderTimestamp();
if (!render_time) {
render_time = timing_->RenderTime(frame->Timestamp(), now);
frame->SetRenderTime(render_time->ms());
}
bool too_many_frames_queued =
frames_.size() > zero_playout_delay_max_decode_queue_size_ ? true
: false;
wait_ms =
timing_->MaxWaitingTime(*render_time, now, too_many_frames_queued).ms();
// This will cause the frame buffer to prefer high framerate rather
// than high resolution in the case of the decoder not decoding fast
// enough and the stream has multiple spatial and temporal layers.
// For multiple temporal layers it may cause non-base layer frames to be
// skipped if they are late.
if (wait_ms < -kMaxAllowedFrameDelayMs)
continue;
break;
}
wait_ms = std::min<int64_t>(wait_ms, latest_return_time_ms_ - now.ms());
wait_ms = std::max<int64_t>(wait_ms, 0);
return wait_ms;
}
std::unique_ptr<EncodedFrame> FrameBuffer::GetNextFrame() {
RTC_DCHECK_RUN_ON(&callback_checker_);
Timestamp now = clock_->CurrentTime();
// TODO(ilnik): remove `frames_out` use frames_to_decode_ directly.
std::vector<std::unique_ptr<EncodedFrame>> frames_out;
RTC_DCHECK(!frames_to_decode_.empty());
bool superframe_delayed_by_retransmission = false;
DataSize superframe_size = DataSize::Zero();
const EncodedFrame& first_frame = *frames_to_decode_[0]->second.frame;
absl::optional<Timestamp> render_time = first_frame.RenderTimestamp();
int64_t receive_time_ms = first_frame.ReceivedTime();
// Gracefully handle bad RTP timestamps and render time issues.
if (!render_time || FrameHasBadRenderTiming(*render_time, now) ||
TargetVideoDelayIsTooLarge(timing_->TargetVideoDelay())) {
RTC_LOG(LS_WARNING) << "Resetting jitter estimator and timing module due "
"to bad render timing for rtp_timestamp="
<< first_frame.Timestamp();
jitter_estimator_.Reset();
timing_->Reset();
render_time = timing_->RenderTime(first_frame.Timestamp(), now);
}
for (FrameMap::iterator& frame_it : frames_to_decode_) {
RTC_DCHECK(frame_it != frames_.end());
std::unique_ptr<EncodedFrame> frame = std::move(frame_it->second.frame);
frame->SetRenderTime(render_time->ms());
superframe_delayed_by_retransmission |= frame->delayed_by_retransmission();
receive_time_ms = std::max(receive_time_ms, frame->ReceivedTime());
superframe_size += DataSize::Bytes(frame->size());
PropagateDecodability(frame_it->second);
decoded_frames_history_.InsertDecoded(frame_it->first, frame->Timestamp());
frames_.erase(frames_.begin(), ++frame_it);
frames_out.emplace_back(std::move(frame));
}
if (!superframe_delayed_by_retransmission) {
auto frame_delay = inter_frame_delay_.CalculateDelay(
first_frame.Timestamp(), Timestamp::Millis(receive_time_ms));
if (frame_delay) {
jitter_estimator_.UpdateEstimate(*frame_delay, superframe_size);
}
float rtt_mult = protection_mode_ == kProtectionNackFEC ? 0.0 : 1.0;
absl::optional<TimeDelta> rtt_mult_add_cap_ms = absl::nullopt;
if (rtt_mult_settings_.has_value()) {
rtt_mult = rtt_mult_settings_->rtt_mult_setting;
rtt_mult_add_cap_ms =
TimeDelta::Millis(rtt_mult_settings_->rtt_mult_add_cap_ms);
}
timing_->SetJitterDelay(
jitter_estimator_.GetJitterEstimate(rtt_mult, rtt_mult_add_cap_ms));
timing_->UpdateCurrentDelay(*render_time, now);
} else {
if (RttMultExperiment::RttMultEnabled())
jitter_estimator_.FrameNacked();
}
if (frames_out.size() == 1) {
return std::move(frames_out[0]);
} else {
return CombineAndDeleteFrames(std::move(frames_out));
}
}
void FrameBuffer::SetProtectionMode(VCMVideoProtection mode) {
TRACE_EVENT0("webrtc", "FrameBuffer::SetProtectionMode");
MutexLock lock(&mutex_);
protection_mode_ = mode;
}
void FrameBuffer::Stop() {
TRACE_EVENT0("webrtc", "FrameBuffer::Stop");
MutexLock lock(&mutex_);
if (stopped_)
return;
stopped_ = true;
CancelCallback();
}
void FrameBuffer::Clear() {
MutexLock lock(&mutex_);
ClearFramesAndHistory();
}
int FrameBuffer::Size() {
MutexLock lock(&mutex_);
return frames_.size();
}
void FrameBuffer::UpdateRtt(int64_t rtt_ms) {
MutexLock lock(&mutex_);
jitter_estimator_.UpdateRtt(TimeDelta::Millis(rtt_ms));
}
bool FrameBuffer::ValidReferences(const EncodedFrame& frame) const {
for (size_t i = 0; i < frame.num_references; ++i) {
if (frame.references[i] >= frame.Id())
return false;
for (size_t j = i + 1; j < frame.num_references; ++j) {
if (frame.references[i] == frame.references[j])
return false;
}
}
return true;
}
void FrameBuffer::CancelCallback() {
// Called from the callback queue or from within Stop().
frame_handler_ = {};
callback_task_.Stop();
callback_queue_ = nullptr;
callback_checker_.Detach();
}
int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
TRACE_EVENT0("webrtc", "FrameBuffer::InsertFrame");
RTC_DCHECK(frame);
MutexLock lock(&mutex_);
int64_t last_continuous_frame_id = last_continuous_frame_.value_or(-1);
if (!ValidReferences(*frame)) {
RTC_LOG(LS_WARNING) << "Frame " << frame->Id()
<< " has invalid frame references, dropping frame.";
return last_continuous_frame_id;
}
if (frames_.size() >= kMaxFramesBuffered) {
if (frame->is_keyframe()) {
RTC_LOG(LS_WARNING) << "Inserting keyframe " << frame->Id()
<< " but buffer is full, clearing"
" buffer and inserting the frame.";
ClearFramesAndHistory();
} else {
RTC_LOG(LS_WARNING) << "Frame " << frame->Id()
<< " could not be inserted due to the frame "
"buffer being full, dropping frame.";
return last_continuous_frame_id;
}
}
auto last_decoded_frame = decoded_frames_history_.GetLastDecodedFrameId();
auto last_decoded_frame_timestamp =
decoded_frames_history_.GetLastDecodedFrameTimestamp();
if (last_decoded_frame && frame->Id() <= *last_decoded_frame) {
if (AheadOf(frame->Timestamp(), *last_decoded_frame_timestamp) &&
frame->is_keyframe()) {
// If this frame has a newer timestamp but an earlier frame id then we
// assume there has been a jump in the frame id due to some encoder
// reconfiguration or some other reason. Even though this is not according
// to spec we can still continue to decode from this frame if it is a
// keyframe.
RTC_LOG(LS_WARNING)
<< "A jump in frame id was detected, clearing buffer.";
ClearFramesAndHistory();
last_continuous_frame_id = -1;
} else {
RTC_LOG(LS_WARNING) << "Frame " << frame->Id() << " inserted after frame "
<< *last_decoded_frame
<< " was handed off for decoding, dropping frame.";
return last_continuous_frame_id;
}
}
// Test if inserting this frame would cause the order of the frames to become
// ambiguous (covering more than half the interval of 2^16). This can happen
// when the frame id make large jumps mid stream.
if (!frames_.empty() && frame->Id() < frames_.begin()->first &&
frames_.rbegin()->first < frame->Id()) {
RTC_LOG(LS_WARNING) << "A jump in frame id was detected, clearing buffer.";
ClearFramesAndHistory();
last_continuous_frame_id = -1;
}
auto info = frames_.emplace(frame->Id(), FrameInfo()).first;
if (info->second.frame) {
return last_continuous_frame_id;
}
if (!UpdateFrameInfoWithIncomingFrame(*frame, info))
return last_continuous_frame_id;
// If ReceiveTime is negative then it is not a valid timestamp.
if (!frame->delayed_by_retransmission() && frame->ReceivedTime() >= 0)
timing_->IncomingTimestamp(frame->Timestamp(),
Timestamp::Millis(frame->ReceivedTime()));
// It can happen that a frame will be reported as fully received even if a
// lower spatial layer frame is missing.
info->second.frame = std::move(frame);
if (info->second.num_missing_continuous == 0) {
info->second.continuous = true;
PropagateContinuity(info);
last_continuous_frame_id = *last_continuous_frame_;
// Since we now have new continuous frames there might be a better frame
// to return from NextFrame.
if (callback_queue_) {
callback_queue_->PostTask([this] {
MutexLock lock(&mutex_);
if (!callback_task_.Running())
return;
RTC_CHECK(frame_handler_);
callback_task_.Stop();
StartWaitForNextFrameOnQueue();
});
}
}
return last_continuous_frame_id;
}
void FrameBuffer::PropagateContinuity(FrameMap::iterator start) {
TRACE_EVENT0("webrtc", "FrameBuffer::PropagateContinuity");
RTC_DCHECK(start->second.continuous);
std::queue<FrameMap::iterator> continuous_frames;
continuous_frames.push(start);
// A simple BFS to traverse continuous frames.
while (!continuous_frames.empty()) {
auto frame = continuous_frames.front();
continuous_frames.pop();
if (!last_continuous_frame_ || *last_continuous_frame_ < frame->first) {
last_continuous_frame_ = frame->first;
}
// Loop through all dependent frames, and if that frame no longer has
// any unfulfilled dependencies then that frame is continuous as well.
for (size_t d = 0; d < frame->second.dependent_frames.size(); ++d) {
auto frame_ref = frames_.find(frame->second.dependent_frames[d]);
RTC_DCHECK(frame_ref != frames_.end());
// TODO(philipel): Look into why we've seen this happen.
if (frame_ref != frames_.end()) {
--frame_ref->second.num_missing_continuous;
if (frame_ref->second.num_missing_continuous == 0) {
frame_ref->second.continuous = true;
continuous_frames.push(frame_ref);
}
}
}
}
}
void FrameBuffer::PropagateDecodability(const FrameInfo& info) {
TRACE_EVENT0("webrtc", "FrameBuffer::PropagateDecodability");
for (size_t d = 0; d < info.dependent_frames.size(); ++d) {
auto ref_info = frames_.find(info.dependent_frames[d]);
RTC_DCHECK(ref_info != frames_.end());
// TODO(philipel): Look into why we've seen this happen.
if (ref_info != frames_.end()) {
RTC_DCHECK_GT(ref_info->second.num_missing_decodable, 0U);
--ref_info->second.num_missing_decodable;
}
}
}
bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
FrameMap::iterator info) {
TRACE_EVENT0("webrtc", "FrameBuffer::UpdateFrameInfoWithIncomingFrame");
auto last_decoded_frame = decoded_frames_history_.GetLastDecodedFrameId();
RTC_DCHECK(!last_decoded_frame || *last_decoded_frame < info->first);
// In this function we determine how many missing dependencies this `frame`
// has to become continuous/decodable. If a frame that this `frame` depend
// on has already been decoded then we can ignore that dependency since it has
// already been fulfilled.
//
// For all other frames we will register a backwards reference to this `frame`
// so that `num_missing_continuous` and `num_missing_decodable` can be
// decremented as frames become continuous/are decoded.
struct Dependency {
int64_t frame_id;
bool continuous;
};
std::vector<Dependency> not_yet_fulfilled_dependencies;
// Find all dependencies that have not yet been fulfilled.
for (size_t i = 0; i < frame.num_references; ++i) {
// Does `frame` depend on a frame earlier than the last decoded one?
if (last_decoded_frame && frame.references[i] <= *last_decoded_frame) {
// Was that frame decoded? If not, this `frame` will never become
// decodable.
if (!decoded_frames_history_.WasDecoded(frame.references[i])) {
int64_t now_ms = clock_->TimeInMilliseconds();
if (last_log_non_decoded_ms_ + kLogNonDecodedIntervalMs < now_ms) {
RTC_LOG(LS_WARNING)
<< "Frame " << frame.Id()
<< " depends on a non-decoded frame more previous than the last "
"decoded frame, dropping frame.";
last_log_non_decoded_ms_ = now_ms;
}
return false;
}
} else {
auto ref_info = frames_.find(frame.references[i]);
bool ref_continuous =
ref_info != frames_.end() && ref_info->second.continuous;
not_yet_fulfilled_dependencies.push_back(
{frame.references[i], ref_continuous});
}
}
info->second.num_missing_continuous = not_yet_fulfilled_dependencies.size();
info->second.num_missing_decodable = not_yet_fulfilled_dependencies.size();
for (const Dependency& dep : not_yet_fulfilled_dependencies) {
if (dep.continuous)
--info->second.num_missing_continuous;
frames_[dep.frame_id].dependent_frames.push_back(frame.Id());
}
return true;
}
void FrameBuffer::ClearFramesAndHistory() {
TRACE_EVENT0("webrtc", "FrameBuffer::ClearFramesAndHistory");
frames_.clear();
last_continuous_frame_.reset();
frames_to_decode_.clear();
decoded_frames_history_.Clear();
}
// TODO(philipel): Avoid the concatenation of frames here, by replacing
// NextFrame and GetNextFrame with methods returning multiple frames.
std::unique_ptr<EncodedFrame> FrameBuffer::CombineAndDeleteFrames(
std::vector<std::unique_ptr<EncodedFrame>> frames) const {
RTC_DCHECK(!frames.empty());
absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> inlined;
for (auto& frame : frames) {
inlined.push_back(std::move(frame));
}
return webrtc::CombineAndDeleteFrames(std::move(inlined));
}
FrameBuffer::FrameInfo::FrameInfo() = default;
FrameBuffer::FrameInfo::FrameInfo(FrameInfo&&) = default;
FrameBuffer::FrameInfo::~FrameInfo() = default;
} // namespace video_coding
} // namespace webrtc

View file

@ -1,193 +0,0 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_FRAME_BUFFER2_H_
#define MODULES_VIDEO_CODING_FRAME_BUFFER2_H_
#include <array>
#include <map>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "api/field_trials_view.h"
#include "api/sequence_checker.h"
#include "api/task_queue/task_queue_base.h"
#include "api/video/encoded_frame.h"
#include "modules/video_coding/include/video_coding_defines.h"
#include "modules/video_coding/timing/inter_frame_delay.h"
#include "modules/video_coding/timing/jitter_estimator.h"
#include "modules/video_coding/utility/decoded_frames_history.h"
#include "rtc_base/event.h"
#include "rtc_base/experiments/field_trial_parser.h"
#include "rtc_base/experiments/rtt_mult_experiment.h"
#include "rtc_base/numerics/sequence_number_util.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/system/no_unique_address.h"
#include "rtc_base/task_utils/repeating_task.h"
#include "rtc_base/thread_annotations.h"
namespace webrtc {
class Clock;
class VCMReceiveStatisticsCallback;
class JitterEstimator;
class VCMTiming;
namespace video_coding {
class FrameBuffer {
public:
FrameBuffer(Clock* clock,
VCMTiming* timing,
const FieldTrialsView& field_trials);
FrameBuffer() = delete;
FrameBuffer(const FrameBuffer&) = delete;
FrameBuffer& operator=(const FrameBuffer&) = delete;
virtual ~FrameBuffer();
// Insert a frame into the frame buffer. Returns the picture id
// of the last continuous frame or -1 if there is no continuous frame.
int64_t InsertFrame(std::unique_ptr<EncodedFrame> frame);
using NextFrameCallback = std::function<void(std::unique_ptr<EncodedFrame>)>;
// Get the next frame for decoding. `handler` is invoked with the next frame
// or with nullptr if no frame is ready for decoding after `max_wait_time_ms`.
void NextFrame(int64_t max_wait_time_ms,
bool keyframe_required,
TaskQueueBase* callback_queue,
NextFrameCallback handler);
// Tells the FrameBuffer which protection mode that is in use. Affects
// the frame timing.
// TODO(philipel): Remove this when new timing calculations has been
// implemented.
void SetProtectionMode(VCMVideoProtection mode);
// Stop the frame buffer, causing any sleeping thread in NextFrame to
// return immediately.
void Stop();
// Updates the RTT for jitter buffer estimation.
void UpdateRtt(int64_t rtt_ms);
// Clears the FrameBuffer, removing all the buffered frames.
void Clear();
int Size();
private:
struct FrameInfo {
FrameInfo();
FrameInfo(FrameInfo&&);
~FrameInfo();
// Which other frames that have direct unfulfilled dependencies
// on this frame.
absl::InlinedVector<int64_t, 8> dependent_frames;
// A frame is continiuous if it has all its referenced/indirectly
// referenced frames.
//
// How many unfulfilled frames this frame have until it becomes continuous.
size_t num_missing_continuous = 0;
// A frame is decodable if all its referenced frames have been decoded.
//
// How many unfulfilled frames this frame have until it becomes decodable.
size_t num_missing_decodable = 0;
// If this frame is continuous or not.
bool continuous = false;
// The actual EncodedFrame.
std::unique_ptr<EncodedFrame> frame;
};
using FrameMap = std::map<int64_t, FrameInfo>;
// Check that the references of `frame` are valid.
bool ValidReferences(const EncodedFrame& frame) const;
int64_t FindNextFrame(Timestamp now) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
std::unique_ptr<EncodedFrame> GetNextFrame()
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void StartWaitForNextFrameOnQueue() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void CancelCallback() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Update all directly dependent and indirectly dependent frames and mark
// them as continuous if all their references has been fulfilled.
void PropagateContinuity(FrameMap::iterator start)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Marks the frame as decoded and updates all directly dependent frames.
void PropagateDecodability(const FrameInfo& info)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Update the corresponding FrameInfo of `frame` and all FrameInfos that
// `frame` references.
// Return false if `frame` will never be decodable, true otherwise.
bool UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame,
FrameMap::iterator info)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void ClearFramesAndHistory() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// The cleaner solution would be to have the NextFrame function return a
// vector of frames, but until the decoding pipeline can support decoding
// multiple frames at the same time we combine all frames to one frame and
// return it. See bugs.webrtc.org/10064
std::unique_ptr<EncodedFrame> CombineAndDeleteFrames(
std::vector<std::unique_ptr<EncodedFrame>> frames) const;
RTC_NO_UNIQUE_ADDRESS SequenceChecker construction_checker_;
RTC_NO_UNIQUE_ADDRESS SequenceChecker callback_checker_;
// Stores only undecoded frames.
FrameMap frames_ RTC_GUARDED_BY(mutex_);
DecodedFramesHistory decoded_frames_history_ RTC_GUARDED_BY(mutex_);
Mutex mutex_;
Clock* const clock_;
TaskQueueBase* callback_queue_ RTC_GUARDED_BY(mutex_);
RepeatingTaskHandle callback_task_ RTC_GUARDED_BY(mutex_);
NextFrameCallback frame_handler_ RTC_GUARDED_BY(mutex_);
int64_t latest_return_time_ms_ RTC_GUARDED_BY(mutex_);
bool keyframe_required_ RTC_GUARDED_BY(mutex_);
JitterEstimator jitter_estimator_ RTC_GUARDED_BY(mutex_);
VCMTiming* const timing_ RTC_GUARDED_BY(mutex_);
InterFrameDelay inter_frame_delay_ RTC_GUARDED_BY(mutex_);
absl::optional<int64_t> last_continuous_frame_ RTC_GUARDED_BY(mutex_);
std::vector<FrameMap::iterator> frames_to_decode_ RTC_GUARDED_BY(mutex_);
bool stopped_ RTC_GUARDED_BY(mutex_);
VCMVideoProtection protection_mode_ RTC_GUARDED_BY(mutex_);
int64_t last_log_non_decoded_ms_ RTC_GUARDED_BY(mutex_);
// rtt_mult experiment settings.
const absl::optional<RttMultExperiment::Settings> rtt_mult_settings_;
// Maximum number of frames in the decode queue to allow pacing. If the
// queue grows beyond the max limit, pacing will be disabled and frames will
// be pushed to the decoder as soon as possible. This only has an effect
// when the low-latency rendering path is active, which is indicated by
// the frame's render time == 0.
FieldTrialParameter<unsigned> zero_playout_delay_max_decode_queue_size_;
};
} // namespace video_coding
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_FRAME_BUFFER2_H_

View file

@ -1,665 +0,0 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/frame_buffer2.h"
#include <algorithm>
#include <cstring>
#include <limits>
#include <memory>
#include <vector>
#include "api/task_queue/task_queue_base.h"
#include "api/units/time_delta.h"
#include "api/units/timestamp.h"
#include "modules/rtp_rtcp/source/frame_object.h"
#include "modules/video_coding/timing/jitter_estimator.h"
#include "modules/video_coding/timing/timing.h"
#include "rtc_base/numerics/sequence_number_util.h"
#include "rtc_base/platform_thread.h"
#include "rtc_base/random.h"
#include "system_wrappers/include/clock.h"
#include "test/field_trial.h"
#include "test/gmock.h"
#include "test/gtest.h"
#include "test/scoped_key_value_config.h"
#include "test/time_controller/simulated_time_controller.h"
using ::testing::_;
using ::testing::IsEmpty;
using ::testing::Return;
using ::testing::SizeIs;
namespace webrtc {
namespace video_coding {
class VCMTimingFake : public VCMTiming {
public:
explicit VCMTimingFake(Clock* clock, const FieldTrialsView& field_trials)
: VCMTiming(clock, field_trials) {}
Timestamp RenderTime(uint32_t frame_timestamp, Timestamp now) const override {
if (last_render_time_.IsMinusInfinity()) {
last_render_time_ = now + kDelay;
last_timestamp_ = frame_timestamp;
}
auto diff = MinDiff(frame_timestamp, last_timestamp_);
auto timeDiff = TimeDelta::Millis(diff / 90);
if (AheadOf(frame_timestamp, last_timestamp_))
last_render_time_ += timeDiff;
else
last_render_time_ -= timeDiff;
last_timestamp_ = frame_timestamp;
return last_render_time_;
}
TimeDelta MaxWaitingTime(Timestamp render_time,
Timestamp now,
bool too_many_frames_queued) const override {
return render_time - now - kDecodeTime;
}
TimeDelta GetCurrentJitter() {
return VCMTiming::GetTimings().jitter_buffer_delay;
}
private:
static constexpr TimeDelta kDelay = TimeDelta::Millis(50);
const TimeDelta kDecodeTime = kDelay / 2;
mutable uint32_t last_timestamp_ = 0;
mutable Timestamp last_render_time_ = Timestamp::MinusInfinity();
};
class FrameObjectFake : public EncodedFrame {
public:
int64_t ReceivedTime() const override { return 0; }
int64_t RenderTime() const override { return _renderTimeMs; }
bool delayed_by_retransmission() const override {
return delayed_by_retransmission_;
}
void set_delayed_by_retransmission(bool delayed) {
delayed_by_retransmission_ = delayed;
}
private:
bool delayed_by_retransmission_ = false;
};
class VCMReceiveStatisticsCallbackMock : public VCMReceiveStatisticsCallback {
public:
MOCK_METHOD(void,
OnCompleteFrame,
(bool is_keyframe,
size_t size_bytes,
VideoContentType content_type),
(override));
MOCK_METHOD(void, OnDroppedFrames, (uint32_t frames_dropped), (override));
MOCK_METHOD(void,
OnFrameBufferTimingsUpdated,
(int max_decode,
int current_delay,
int target_delay,
int jitter_buffer,
int min_playout_delay,
int render_delay),
(override));
MOCK_METHOD(void,
OnTimingFrameInfoUpdated,
(const TimingFrameInfo& info),
(override));
};
class TestFrameBuffer2 : public ::testing::Test {
protected:
static constexpr int kMaxReferences = 5;
static constexpr int kFps1 = 1000;
static constexpr int kFps10 = kFps1 / 10;
static constexpr int kFps20 = kFps1 / 20;
static constexpr size_t kFrameSize = 10;
TestFrameBuffer2()
: time_controller_(Timestamp::Seconds(0)),
time_task_queue_(
time_controller_.GetTaskQueueFactory()->CreateTaskQueue(
"extract queue",
TaskQueueFactory::Priority::NORMAL)),
timing_(time_controller_.GetClock(), field_trials_),
buffer_(new FrameBuffer(time_controller_.GetClock(),
&timing_,
field_trials_)),
rand_(0x34678213) {}
template <typename... T>
std::unique_ptr<FrameObjectFake> CreateFrame(uint16_t picture_id,
uint8_t spatial_layer,
int64_t ts_ms,
bool last_spatial_layer,
size_t frame_size_bytes,
T... refs) {
static_assert(sizeof...(refs) <= kMaxReferences,
"To many references specified for EncodedFrame.");
std::array<uint16_t, sizeof...(refs)> references = {
{rtc::checked_cast<uint16_t>(refs)...}};
auto frame = std::make_unique<FrameObjectFake>();
frame->SetId(picture_id);
frame->SetSpatialIndex(spatial_layer);
frame->SetTimestamp(ts_ms * 90);
frame->num_references = references.size();
frame->is_last_spatial_layer = last_spatial_layer;
// Add some data to buffer.
frame->SetEncodedData(EncodedImageBuffer::Create(frame_size_bytes));
for (size_t r = 0; r < references.size(); ++r)
frame->references[r] = references[r];
return frame;
}
template <typename... T>
int InsertFrame(uint16_t picture_id,
uint8_t spatial_layer,
int64_t ts_ms,
bool last_spatial_layer,
size_t frame_size_bytes,
T... refs) {
return buffer_->InsertFrame(CreateFrame(picture_id, spatial_layer, ts_ms,
last_spatial_layer,
frame_size_bytes, refs...));
}
int InsertNackedFrame(uint16_t picture_id, int64_t ts_ms) {
std::unique_ptr<FrameObjectFake> frame =
CreateFrame(picture_id, 0, ts_ms, true, kFrameSize);
frame->set_delayed_by_retransmission(true);
return buffer_->InsertFrame(std::move(frame));
}
void ExtractFrame(int64_t max_wait_time = 0, bool keyframe_required = false) {
time_task_queue_->PostTask([this, max_wait_time, keyframe_required]() {
buffer_->NextFrame(max_wait_time, keyframe_required,
time_task_queue_.get(),
[this](std::unique_ptr<EncodedFrame> frame) {
frames_.emplace_back(std::move(frame));
});
});
if (max_wait_time == 0) {
time_controller_.AdvanceTime(TimeDelta::Zero());
}
}
void CheckFrame(size_t index, int picture_id, int spatial_layer) {
ASSERT_LT(index, frames_.size());
ASSERT_TRUE(frames_[index]);
ASSERT_EQ(picture_id, frames_[index]->Id());
ASSERT_EQ(spatial_layer, frames_[index]->SpatialIndex().value_or(0));
}
void CheckFrameSize(size_t index, size_t size) {
ASSERT_LT(index, frames_.size());
ASSERT_TRUE(frames_[index]);
ASSERT_EQ(frames_[index]->size(), size);
}
void CheckNoFrame(size_t index) {
ASSERT_LT(index, frames_.size());
ASSERT_FALSE(frames_[index]);
}
uint32_t Rand() { return rand_.Rand<uint32_t>(); }
test::ScopedKeyValueConfig field_trials_;
webrtc::GlobalSimulatedTimeController time_controller_;
std::unique_ptr<TaskQueueBase, TaskQueueDeleter> time_task_queue_;
VCMTimingFake timing_;
std::unique_ptr<FrameBuffer> buffer_;
std::vector<std::unique_ptr<EncodedFrame>> frames_;
Random rand_;
};
// From https://en.cppreference.com/w/cpp/language/static: "If ... a constexpr
// static data member (since C++11) is odr-used, a definition at namespace scope
// is still required... This definition is deprecated for constexpr data members
// since C++17."
// kFrameSize is odr-used since it is passed by reference to EXPECT_EQ().
#if __cplusplus < 201703L
constexpr size_t TestFrameBuffer2::kFrameSize;
#endif
TEST_F(TestFrameBuffer2, WaitForFrame) {
uint16_t pid = Rand();
uint32_t ts = Rand();
ExtractFrame(50);
InsertFrame(pid, 0, ts, true, kFrameSize);
time_controller_.AdvanceTime(TimeDelta::Millis(50));
CheckFrame(0, pid, 0);
}
TEST_F(TestFrameBuffer2, ClearWhileWaitingForFrame) {
const uint16_t pid = Rand();
// Insert a frame and wait for it for max 100ms.
InsertFrame(pid, 0, 25, true, kFrameSize);
ExtractFrame(100);
// After 10ms, clear the buffer.
time_controller_.AdvanceTime(TimeDelta::Millis(10));
buffer_->Clear();
// Confirm that the frame was not sent for rendering.
time_controller_.AdvanceTime(TimeDelta::Millis(15));
EXPECT_THAT(frames_, IsEmpty());
// We are still waiting for a frame, since 100ms has not passed. Insert a new
// frame. This new frame should be the one that is returned as the old frame
// was cleared.
const uint16_t new_pid = pid + 1;
InsertFrame(new_pid, 0, 50, true, kFrameSize);
time_controller_.AdvanceTime(TimeDelta::Millis(25));
ASSERT_THAT(frames_, SizeIs(1));
CheckFrame(0, new_pid, 0);
}
TEST_F(TestFrameBuffer2, OneSuperFrame) {
uint16_t pid = Rand();
uint32_t ts = Rand();
InsertFrame(pid, 0, ts, false, kFrameSize);
InsertFrame(pid + 1, 1, ts, true, kFrameSize);
ExtractFrame();
CheckFrame(0, pid, 1);
}
TEST_F(TestFrameBuffer2, ZeroPlayoutDelay) {
test::ScopedKeyValueConfig field_trials;
VCMTiming timing(time_controller_.GetClock(), field_trials);
buffer_ = std::make_unique<FrameBuffer>(time_controller_.GetClock(), &timing,
field_trials);
const VideoPlayoutDelay kPlayoutDelayMs = {0, 0};
std::unique_ptr<FrameObjectFake> test_frame(new FrameObjectFake());
test_frame->SetId(0);
test_frame->SetPlayoutDelay(kPlayoutDelayMs);
buffer_->InsertFrame(std::move(test_frame));
ExtractFrame(0, false);
CheckFrame(0, 0, 0);
EXPECT_EQ(0, frames_[0]->RenderTimeMs());
}
// Flaky test, see bugs.webrtc.org/7068.
TEST_F(TestFrameBuffer2, DISABLED_OneUnorderedSuperFrame) {
uint16_t pid = Rand();
uint32_t ts = Rand();
ExtractFrame(50);
InsertFrame(pid, 1, ts, true, kFrameSize);
InsertFrame(pid, 0, ts, false, kFrameSize);
time_controller_.AdvanceTime(TimeDelta::Zero());
CheckFrame(0, pid, 0);
CheckFrame(1, pid, 1);
}
TEST_F(TestFrameBuffer2, DISABLED_OneLayerStreamReordered) {
uint16_t pid = Rand();
uint32_t ts = Rand();
InsertFrame(pid, 0, ts, false, true, kFrameSize);
ExtractFrame();
CheckFrame(0, pid, 0);
for (int i = 1; i < 10; i += 2) {
ExtractFrame(50);
InsertFrame(pid + i + 1, 0, ts + (i + 1) * kFps10, true, kFrameSize,
pid + i);
time_controller_.AdvanceTime(TimeDelta::Millis(kFps10));
InsertFrame(pid + i, 0, ts + i * kFps10, true, kFrameSize, pid + i - 1);
time_controller_.AdvanceTime(TimeDelta::Millis(kFps10));
ExtractFrame();
CheckFrame(i, pid + i, 0);
CheckFrame(i + 1, pid + i + 1, 0);
}
}
TEST_F(TestFrameBuffer2, ExtractFromEmptyBuffer) {
ExtractFrame();
CheckNoFrame(0);
}
TEST_F(TestFrameBuffer2, MissingFrame) {
uint16_t pid = Rand();
uint32_t ts = Rand();
InsertFrame(pid, 0, ts, true, kFrameSize);
InsertFrame(pid + 2, 0, ts, true, kFrameSize, pid);
InsertFrame(pid + 3, 0, ts, true, kFrameSize, pid + 1, pid + 2);
ExtractFrame();
ExtractFrame();
ExtractFrame();
CheckFrame(0, pid, 0);
CheckFrame(1, pid + 2, 0);
CheckNoFrame(2);
}
TEST_F(TestFrameBuffer2, OneLayerStream) {
uint16_t pid = Rand();
uint32_t ts = Rand();
InsertFrame(pid, 0, ts, true, kFrameSize);
ExtractFrame();
CheckFrame(0, pid, 0);
for (int i = 1; i < 10; ++i) {
InsertFrame(pid + i, 0, ts + i * kFps10, true, kFrameSize, pid + i - 1);
ExtractFrame();
time_controller_.AdvanceTime(TimeDelta::Millis(kFps10));
CheckFrame(i, pid + i, 0);
}
}
TEST_F(TestFrameBuffer2, DropTemporalLayerSlowDecoder) {
uint16_t pid = Rand();
uint32_t ts = Rand();
InsertFrame(pid, 0, ts, true, kFrameSize);
InsertFrame(pid + 1, 0, ts + kFps20, true, kFrameSize, pid);
for (int i = 2; i < 10; i += 2) {
uint32_t ts_tl0 = ts + i / 2 * kFps10;
InsertFrame(pid + i, 0, ts_tl0, true, kFrameSize, pid + i - 2);
InsertFrame(pid + i + 1, 0, ts_tl0 + kFps20, true, kFrameSize, pid + i,
pid + i - 1);
}
for (int i = 0; i < 10; ++i) {
ExtractFrame();
time_controller_.AdvanceTime(TimeDelta::Millis(70));
}
CheckFrame(0, pid, 0);
CheckFrame(1, pid + 1, 0);
CheckFrame(2, pid + 2, 0);
CheckFrame(3, pid + 4, 0);
CheckFrame(4, pid + 6, 0);
CheckFrame(5, pid + 8, 0);
CheckNoFrame(6);
CheckNoFrame(7);
CheckNoFrame(8);
CheckNoFrame(9);
}
TEST_F(TestFrameBuffer2, DropFramesIfSystemIsStalled) {
uint16_t pid = Rand();
uint32_t ts = Rand();
InsertFrame(pid, 0, ts, true, kFrameSize);
InsertFrame(pid + 1, 0, ts + 1 * kFps10, true, kFrameSize, pid);
InsertFrame(pid + 2, 0, ts + 2 * kFps10, true, kFrameSize, pid + 1);
InsertFrame(pid + 3, 0, ts + 3 * kFps10, true, kFrameSize);
ExtractFrame();
// Jump forward in time, simulating the system being stalled for some reason.
time_controller_.AdvanceTime(TimeDelta::Millis(3) * kFps10);
// Extract one more frame, expect second and third frame to be dropped.
ExtractFrame();
CheckFrame(0, pid + 0, 0);
CheckFrame(1, pid + 3, 0);
}
TEST_F(TestFrameBuffer2, DroppedFramesCountedOnClear) {
uint16_t pid = Rand();
uint32_t ts = Rand();
InsertFrame(pid, 0, ts, true, kFrameSize);
for (int i = 1; i < 5; ++i) {
InsertFrame(pid + i, 0, ts + i * kFps10, true, kFrameSize, pid + i - 1);
}
// All frames should be dropped when Clear is called.
buffer_->Clear();
}
TEST_F(TestFrameBuffer2, InsertLateFrame) {
uint16_t pid = Rand();
uint32_t ts = Rand();
InsertFrame(pid, 0, ts, true, kFrameSize);
ExtractFrame();
InsertFrame(pid + 2, 0, ts, true, kFrameSize);
ExtractFrame();
InsertFrame(pid + 1, 0, ts, true, kFrameSize, pid);
ExtractFrame();
CheckFrame(0, pid, 0);
CheckFrame(1, pid + 2, 0);
CheckNoFrame(2);
}
TEST_F(TestFrameBuffer2, ProtectionModeNackFEC) {
uint16_t pid = Rand();
uint32_t ts = Rand();
constexpr int64_t kRttMs = 200;
buffer_->UpdateRtt(kRttMs);
// Jitter estimate unaffected by RTT in this protection mode.
buffer_->SetProtectionMode(kProtectionNackFEC);
InsertNackedFrame(pid, ts);
InsertNackedFrame(pid + 1, ts + 100);
InsertNackedFrame(pid + 2, ts + 200);
InsertFrame(pid + 3, 0, ts + 300, true, kFrameSize);
ExtractFrame();
ExtractFrame();
ExtractFrame();
ExtractFrame();
ASSERT_EQ(4u, frames_.size());
EXPECT_LT(timing_.GetCurrentJitter().ms(), kRttMs);
}
TEST_F(TestFrameBuffer2, NoContinuousFrame) {
uint16_t pid = Rand();
uint32_t ts = Rand();
EXPECT_EQ(-1, InsertFrame(pid + 1, 0, ts, true, kFrameSize, pid));
}
TEST_F(TestFrameBuffer2, LastContinuousFrameSingleLayer) {
uint16_t pid = Rand();
uint32_t ts = Rand();
EXPECT_EQ(pid, InsertFrame(pid, 0, ts, true, kFrameSize));
EXPECT_EQ(pid, InsertFrame(pid + 2, 0, ts, true, kFrameSize, pid + 1));
EXPECT_EQ(pid + 2, InsertFrame(pid + 1, 0, ts, true, kFrameSize, pid));
EXPECT_EQ(pid + 2, InsertFrame(pid + 4, 0, ts, true, kFrameSize, pid + 3));
EXPECT_EQ(pid + 5, InsertFrame(pid + 5, 0, ts, true, kFrameSize));
}
TEST_F(TestFrameBuffer2, LastContinuousFrameTwoLayers) {
uint16_t pid = Rand();
uint32_t ts = Rand();
EXPECT_EQ(pid, InsertFrame(pid, 0, ts, false, kFrameSize));
EXPECT_EQ(pid + 1, InsertFrame(pid + 1, 1, ts, true, kFrameSize));
EXPECT_EQ(pid + 1,
InsertFrame(pid + 3, 1, ts, true, kFrameSize, pid + 1, pid + 2));
EXPECT_EQ(pid + 1, InsertFrame(pid + 4, 0, ts, false, kFrameSize, pid + 2));
EXPECT_EQ(pid + 1,
InsertFrame(pid + 5, 1, ts, true, kFrameSize, pid + 3, pid + 4));
EXPECT_EQ(pid + 1, InsertFrame(pid + 6, 0, ts, false, kFrameSize, pid + 4));
EXPECT_EQ(pid + 6, InsertFrame(pid + 2, 0, ts, false, kFrameSize, pid));
EXPECT_EQ(pid + 7,
InsertFrame(pid + 7, 1, ts, true, kFrameSize, pid + 5, pid + 6));
}
TEST_F(TestFrameBuffer2, PictureIdJumpBack) {
uint16_t pid = Rand();
uint32_t ts = Rand();
EXPECT_EQ(pid, InsertFrame(pid, 0, ts, true, kFrameSize));
EXPECT_EQ(pid + 1, InsertFrame(pid + 1, 0, ts + 1, true, kFrameSize, pid));
ExtractFrame();
CheckFrame(0, pid, 0);
// Jump back in pid but increase ts.
EXPECT_EQ(pid - 1, InsertFrame(pid - 1, 0, ts + 2, true, kFrameSize));
ExtractFrame();
ExtractFrame();
CheckFrame(1, pid - 1, 0);
CheckNoFrame(2);
}
TEST_F(TestFrameBuffer2, ForwardJumps) {
EXPECT_EQ(5453, InsertFrame(5453, 0, 1, true, kFrameSize));
ExtractFrame();
EXPECT_EQ(5454, InsertFrame(5454, 0, 1, true, kFrameSize, 5453));
ExtractFrame();
EXPECT_EQ(15670, InsertFrame(15670, 0, 1, true, kFrameSize));
ExtractFrame();
EXPECT_EQ(29804, InsertFrame(29804, 0, 1, true, kFrameSize));
ExtractFrame();
EXPECT_EQ(29805, InsertFrame(29805, 0, 1, true, kFrameSize, 29804));
ExtractFrame();
EXPECT_EQ(29806, InsertFrame(29806, 0, 1, true, kFrameSize, 29805));
ExtractFrame();
EXPECT_EQ(33819, InsertFrame(33819, 0, 1, true, kFrameSize));
ExtractFrame();
EXPECT_EQ(41248, InsertFrame(41248, 0, 1, true, kFrameSize));
ExtractFrame();
}
TEST_F(TestFrameBuffer2, DuplicateFrames) {
EXPECT_EQ(22256, InsertFrame(22256, 0, 1, true, kFrameSize));
ExtractFrame();
EXPECT_EQ(22256, InsertFrame(22256, 0, 1, true, kFrameSize));
}
// TODO(philipel): implement more unittests related to invalid references.
TEST_F(TestFrameBuffer2, InvalidReferences) {
EXPECT_EQ(-1, InsertFrame(0, 0, 1000, true, kFrameSize, 2));
EXPECT_EQ(1, InsertFrame(1, 0, 2000, true, kFrameSize));
ExtractFrame();
EXPECT_EQ(2, InsertFrame(2, 0, 3000, true, kFrameSize, 1));
}
TEST_F(TestFrameBuffer2, KeyframeRequired) {
EXPECT_EQ(1, InsertFrame(1, 0, 1000, true, kFrameSize));
EXPECT_EQ(2, InsertFrame(2, 0, 2000, true, kFrameSize, 1));
EXPECT_EQ(3, InsertFrame(3, 0, 3000, true, kFrameSize));
ExtractFrame();
ExtractFrame(0, true);
ExtractFrame();
CheckFrame(0, 1, 0);
CheckFrame(1, 3, 0);
CheckNoFrame(2);
}
TEST_F(TestFrameBuffer2, KeyframeClearsFullBuffer) {
const int kMaxBufferSize = 600;
for (int i = 1; i <= kMaxBufferSize; ++i)
EXPECT_EQ(-1, InsertFrame(i, 0, i * 1000, true, kFrameSize, i - 1));
ExtractFrame();
CheckNoFrame(0);
EXPECT_EQ(kMaxBufferSize + 1,
InsertFrame(kMaxBufferSize + 1, 0, (kMaxBufferSize + 1) * 1000,
true, kFrameSize));
ExtractFrame();
CheckFrame(1, kMaxBufferSize + 1, 0);
}
TEST_F(TestFrameBuffer2, DontUpdateOnUndecodableFrame) {
InsertFrame(1, 0, 0, true, kFrameSize);
ExtractFrame(0, true);
InsertFrame(3, 0, 0, true, kFrameSize, 2, 0);
InsertFrame(3, 0, 0, true, kFrameSize, 0);
InsertFrame(2, 0, 0, true, kFrameSize);
ExtractFrame(0, true);
ExtractFrame(0, true);
}
TEST_F(TestFrameBuffer2, DontDecodeOlderTimestamp) {
InsertFrame(2, 0, 1, true, kFrameSize);
InsertFrame(1, 0, 2, true,
kFrameSize); // Older picture id but newer timestamp.
ExtractFrame(0);
ExtractFrame(0);
CheckFrame(0, 1, 0);
CheckNoFrame(1);
InsertFrame(3, 0, 4, true, kFrameSize);
InsertFrame(4, 0, 3, true,
kFrameSize); // Newer picture id but older timestamp.
ExtractFrame(0);
ExtractFrame(0);
CheckFrame(2, 3, 0);
CheckNoFrame(3);
}
TEST_F(TestFrameBuffer2, CombineFramesToSuperframe) {
uint16_t pid = Rand();
uint32_t ts = Rand();
InsertFrame(pid, 0, ts, false, kFrameSize);
InsertFrame(pid + 1, 1, ts, true, 2 * kFrameSize, pid);
ExtractFrame(0);
ExtractFrame(0);
CheckFrame(0, pid, 1);
CheckNoFrame(1);
// Two frames should be combined and returned together.
CheckFrameSize(0, 3 * kFrameSize);
EXPECT_EQ(frames_[0]->SpatialIndex(), 1);
EXPECT_EQ(frames_[0]->SpatialLayerFrameSize(0), kFrameSize);
EXPECT_EQ(frames_[0]->SpatialLayerFrameSize(1), 2 * kFrameSize);
}
TEST_F(TestFrameBuffer2, HigherSpatialLayerNonDecodable) {
uint16_t pid = Rand();
uint32_t ts = Rand();
InsertFrame(pid, 0, ts, false, kFrameSize);
InsertFrame(pid + 1, 1, ts, true, kFrameSize, pid);
ExtractFrame(0);
CheckFrame(0, pid, 1);
InsertFrame(pid + 3, 1, ts + kFps20, true, kFrameSize, pid);
InsertFrame(pid + 4, 0, ts + kFps10, false, kFrameSize, pid);
InsertFrame(pid + 5, 1, ts + kFps10, true, kFrameSize, pid + 3, pid + 4);
time_controller_.AdvanceTime(TimeDelta::Millis(1000));
// Frame pid+3 is decodable but too late.
// In superframe pid+4 is decodable, but frame pid+5 is not.
// Incorrect implementation might skip pid+2 frame and output undecodable
// pid+5 instead.
ExtractFrame();
ExtractFrame();
CheckFrame(1, pid + 3, 1);
CheckFrame(2, pid + 4, 1);
}
TEST_F(TestFrameBuffer2, StopWhileWaitingForFrame) {
uint16_t pid = Rand();
uint32_t ts = Rand();
InsertFrame(pid, 0, ts, true, kFrameSize);
ExtractFrame(10);
buffer_->Stop();
time_controller_.AdvanceTime(TimeDelta::Millis(10));
EXPECT_THAT(frames_, IsEmpty());
// A new frame request should exit immediately and return no new frame.
ExtractFrame(0);
EXPECT_THAT(frames_, IsEmpty());
}
} // namespace video_coding
} // namespace webrtc

View file

@ -173,5 +173,5 @@ performance of different video codec implementations.
[videocodecinitializer]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/include/video_codec_initializer.h?q=VideoCodecInitializer
[packetbuffer]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/packet_buffer.h?q=PacketBuffer
[rtpframereferencefinder]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/rtp_frame_reference_finder.h?q=RtpFrameReferenceFinder
[framebuffer]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/video_coding/frame_buffer2.h?q=FrameBuffer
[framebuffer]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/video/frame_buffer.h
[quantization-wiki]: https://en.wikipedia.org/wiki/Quantization_(signal_processing)

View file

@ -587,17 +587,6 @@ webrtc_fuzzer_test("rtp_frame_reference_finder_fuzzer") {
]
}
webrtc_fuzzer_test("frame_buffer2_fuzzer") {
sources = [ "frame_buffer2_fuzzer.cc" ]
deps = [
"../../api/task_queue",
"../../modules/video_coding:frame_buffer2",
"../../modules/video_coding/timing:timing_module",
"../../test:scoped_key_value_config",
"../time_controller:time_controller",
]
}
webrtc_fuzzer_test("frame_buffer_fuzzer") {
sources = [ "frame_buffer_fuzzer.cc" ]
deps = [

View file

@ -1,116 +0,0 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include "api/task_queue/task_queue_base.h"
#include "modules/video_coding/frame_buffer2.h"
#include "modules/video_coding/timing/timing.h"
#include "test/scoped_key_value_config.h"
#include "test/time_controller/simulated_time_controller.h"
namespace webrtc {
namespace {
// When DataReader runs out of data provided in the constructor it will
// just set/return 0 instead.
struct DataReader {
DataReader(const uint8_t* data, size_t size) : data_(data), size_(size) {}
void CopyTo(void* destination, size_t dest_size) {
memset(destination, 0, dest_size);
size_t bytes_to_copy = std::min(size_ - offset_, dest_size);
memcpy(destination, data_ + offset_, bytes_to_copy);
offset_ += bytes_to_copy;
}
template <typename T>
T GetNum() {
T res;
if (offset_ + sizeof(res) < size_) {
memcpy(&res, data_ + offset_, sizeof(res));
offset_ += sizeof(res);
return res;
}
offset_ = size_;
return T(0);
}
bool MoreToRead() { return offset_ < size_; }
const uint8_t* const data_;
size_t size_;
size_t offset_ = 0;
};
class FuzzyFrameObject : public EncodedFrame {
public:
FuzzyFrameObject() {}
~FuzzyFrameObject() {}
int64_t ReceivedTime() const override { return 0; }
int64_t RenderTime() const override { return _renderTimeMs; }
};
} // namespace
void FuzzOneInput(const uint8_t* data, size_t size) {
if (size > 10000) {
return;
}
DataReader reader(data, size);
GlobalSimulatedTimeController time_controller(Timestamp::Seconds(0));
std::unique_ptr<TaskQueueBase, TaskQueueDeleter> task_queue =
time_controller.GetTaskQueueFactory()->CreateTaskQueue(
"time_tq", TaskQueueFactory::Priority::NORMAL);
test::ScopedKeyValueConfig field_trials;
VCMTiming timing(time_controller.GetClock(), field_trials);
video_coding::FrameBuffer frame_buffer(time_controller.GetClock(), &timing,
field_trials);
bool next_frame_task_running = false;
while (reader.MoreToRead()) {
if (reader.GetNum<uint8_t>() % 2) {
std::unique_ptr<FuzzyFrameObject> frame(new FuzzyFrameObject());
frame->SetId(reader.GetNum<int64_t>());
frame->SetSpatialIndex(reader.GetNum<uint8_t>() % 5);
frame->SetTimestamp(reader.GetNum<uint32_t>());
frame->num_references =
reader.GetNum<uint8_t>() % EncodedFrame::kMaxFrameReferences;
for (size_t r = 0; r < frame->num_references; ++r)
frame->references[r] = reader.GetNum<int64_t>();
frame_buffer.InsertFrame(std::move(frame));
} else {
if (!next_frame_task_running) {
next_frame_task_running = true;
bool keyframe_required = reader.GetNum<uint8_t>() % 2;
int max_wait_time_ms = reader.GetNum<uint8_t>();
task_queue->PostTask([&task_queue, &frame_buffer,
&next_frame_task_running, keyframe_required,
max_wait_time_ms] {
frame_buffer.NextFrame(
max_wait_time_ms, keyframe_required, task_queue.get(),
[&next_frame_task_running](std::unique_ptr<EncodedFrame> frame) {
next_frame_task_running = false;
});
});
}
}
time_controller.AdvanceTime(TimeDelta::Millis(reader.GetNum<uint8_t>()));
}
}
} // namespace webrtc

View file

@ -178,39 +178,6 @@ rtc_library("video") {
}
}
rtc_library("video_stream_decoder_impl") {
visibility = [ "*" ]
sources = [
"video_stream_decoder_impl.cc",
"video_stream_decoder_impl.h",
]
deps = [
"../api:field_trials_view",
"../api:sequence_checker",
"../api/task_queue",
"../api/transport:field_trial_based_config",
"../api/video:encoded_frame",
"../api/video:video_frame",
"../api/video:video_rtp_headers",
"../api/video:video_stream_decoder",
"../api/video_codecs:video_codecs_api",
"../modules/video_coding",
"../modules/video_coding:frame_buffer2",
"../modules/video_coding/timing:timing_module",
"../rtc_base:logging",
"../rtc_base:mod_ops",
"../rtc_base:platform_thread",
"../rtc_base:rtc_task_queue",
"../rtc_base:timeutils",
"../rtc_base/memory:always_valid_pointer",
"../rtc_base/synchronization:mutex",
"../system_wrappers",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("frame_dumping_decoder") {
visibility = [ "*" ]
@ -801,7 +768,6 @@ if (rtc_include_tests) {
"video_send_stream_tests.cc",
"video_source_sink_controller_unittest.cc",
"video_stream_buffer_controller_unittest.cc",
"video_stream_decoder_impl_unittest.cc",
"video_stream_encoder_unittest.cc",
]
deps = [
@ -815,7 +781,6 @@ if (rtc_include_tests) {
":video_mocks",
":video_receive_stream_timeout_tracker",
":video_stream_buffer_controller",
":video_stream_decoder_impl",
":video_stream_encoder_impl",
":video_stream_encoder_interface",
"../api:create_frame_generator",

View file

@ -1,293 +0,0 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "video/video_stream_decoder_impl.h"
#include <memory>
#include "rtc_base/logging.h"
#include "rtc_base/numerics/mod_ops.h"
#include "rtc_base/time_utils.h"
namespace webrtc {
VideoStreamDecoderImpl::VideoStreamDecoderImpl(
VideoStreamDecoderInterface::Callbacks* callbacks,
VideoDecoderFactory* decoder_factory,
TaskQueueFactory* task_queue_factory,
std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings,
const FieldTrialsView* field_trials)
: field_trials_(field_trials),
timing_(Clock::GetRealTimeClock(), *field_trials_),
decode_callbacks_(this),
next_frame_info_index_(0),
callbacks_(callbacks),
keyframe_required_(true),
decoder_factory_(decoder_factory),
decoder_settings_(std::move(decoder_settings)),
shut_down_(false),
frame_buffer_(Clock::GetRealTimeClock(), &timing_, *field_trials_),
bookkeeping_queue_(task_queue_factory->CreateTaskQueue(
"video_stream_decoder_bookkeeping_queue",
TaskQueueFactory::Priority::NORMAL)),
decode_queue_(task_queue_factory->CreateTaskQueue(
"video_stream_decoder_decode_queue",
TaskQueueFactory::Priority::NORMAL)) {
bookkeeping_queue_.PostTask([this]() {
RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
StartNextDecode();
});
}
VideoStreamDecoderImpl::~VideoStreamDecoderImpl() {
MutexLock lock(&shut_down_mutex_);
shut_down_ = true;
}
void VideoStreamDecoderImpl::OnFrame(std::unique_ptr<EncodedFrame> frame) {
if (!bookkeeping_queue_.IsCurrent()) {
bookkeeping_queue_.PostTask([this, frame = std::move(frame)]() mutable {
OnFrame(std::move(frame));
return true;
});
return;
}
RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
int64_t continuous_frame_id = frame_buffer_.InsertFrame(std::move(frame));
if (last_continuous_frame_id_ < continuous_frame_id) {
last_continuous_frame_id_ = continuous_frame_id;
callbacks_->OnContinuousUntil(last_continuous_frame_id_);
}
}
void VideoStreamDecoderImpl::SetMinPlayoutDelay(TimeDelta min_delay) {
timing_.set_min_playout_delay(min_delay);
}
void VideoStreamDecoderImpl::SetMaxPlayoutDelay(TimeDelta max_delay) {
timing_.set_max_playout_delay(max_delay);
}
VideoDecoder* VideoStreamDecoderImpl::GetDecoder(int payload_type) {
if (current_payload_type_ == payload_type) {
RTC_DCHECK(decoder_);
return decoder_.get();
}
current_payload_type_.reset();
decoder_.reset();
auto decoder_settings_it = decoder_settings_.find(payload_type);
if (decoder_settings_it == decoder_settings_.end()) {
RTC_LOG(LS_WARNING) << "Payload type " << payload_type
<< " not registered.";
return nullptr;
}
const SdpVideoFormat& video_format = decoder_settings_it->second.first;
std::unique_ptr<VideoDecoder> decoder =
decoder_factory_->CreateVideoDecoder(video_format);
if (!decoder) {
RTC_LOG(LS_WARNING) << "Failed to create decoder for payload type "
<< payload_type << ".";
return nullptr;
}
VideoDecoder::Settings settings;
settings.set_number_of_cores(decoder_settings_it->second.second);
if (!decoder->Configure(settings)) {
RTC_LOG(LS_WARNING) << "Failed to initialize decoder for payload type "
<< payload_type << ".";
return nullptr;
}
int32_t register_result =
decoder->RegisterDecodeCompleteCallback(&decode_callbacks_);
if (register_result != WEBRTC_VIDEO_CODEC_OK) {
RTC_LOG(LS_WARNING) << "Failed to register decode callback.";
return nullptr;
}
current_payload_type_.emplace(payload_type);
decoder_ = std::move(decoder);
return decoder_.get();
}
void VideoStreamDecoderImpl::SaveFrameInfo(const EncodedFrame& frame) {
FrameInfo* frame_info = &frame_info_[next_frame_info_index_];
frame_info->timestamp = frame.Timestamp();
frame_info->decode_start_time_ms = rtc::TimeMillis();
frame_info->render_time_us = frame.RenderTimeMs() * 1000;
frame_info->content_type = frame.EncodedImage().content_type_;
next_frame_info_index_ = Add<kFrameInfoMemory>(next_frame_info_index_, 1);
}
void VideoStreamDecoderImpl::StartNextDecode() {
int64_t max_wait_time = keyframe_required_ ? 200 : 3000;
frame_buffer_.NextFrame(max_wait_time, keyframe_required_,
bookkeeping_queue_.Get(),
[this](std::unique_ptr<EncodedFrame> frame) {
RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
OnNextFrameCallback(std::move(frame));
});
}
void VideoStreamDecoderImpl::OnNextFrameCallback(
std::unique_ptr<EncodedFrame> frame) {
if (frame) {
RTC_DCHECK(frame);
SaveFrameInfo(*frame);
MutexLock lock(&shut_down_mutex_);
if (shut_down_) {
return;
}
decode_queue_.PostTask([this, frame = std::move(frame)]() mutable {
RTC_DCHECK_RUN_ON(&decode_queue_);
DecodeResult decode_result = DecodeFrame(std::move(frame));
bookkeeping_queue_.PostTask([this, decode_result]() {
RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
switch (decode_result) {
case kOk: {
keyframe_required_ = false;
break;
}
case kOkRequestKeyframe: {
callbacks_->OnNonDecodableState();
keyframe_required_ = false;
break;
}
case kDecodeFailure: {
callbacks_->OnNonDecodableState();
keyframe_required_ = true;
break;
}
}
StartNextDecode();
});
});
} else {
callbacks_->OnNonDecodableState();
// The `frame_buffer_` requires the frame callback function to complete
// before NextFrame is called again. For this reason we call
// StartNextDecode in a later task to allow this task to complete first.
bookkeeping_queue_.PostTask([this]() {
RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
StartNextDecode();
});
}
}
VideoStreamDecoderImpl::DecodeResult VideoStreamDecoderImpl::DecodeFrame(
std::unique_ptr<EncodedFrame> frame) {
RTC_DCHECK(frame);
VideoDecoder* decoder = GetDecoder(frame->PayloadType());
if (!decoder) {
return kDecodeFailure;
}
int32_t decode_result = decoder->Decode(frame->EncodedImage(), //
/*missing_frames=*/false, //
frame->RenderTimeMs());
switch (decode_result) {
case WEBRTC_VIDEO_CODEC_OK: {
return kOk;
}
case WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME: {
return kOkRequestKeyframe;
}
default:
return kDecodeFailure;
}
}
VideoStreamDecoderImpl::FrameInfo* VideoStreamDecoderImpl::GetFrameInfo(
int64_t timestamp) {
int start_time_index = next_frame_info_index_;
for (int i = 0; i < kFrameInfoMemory; ++i) {
start_time_index = Subtract<kFrameInfoMemory>(start_time_index, 1);
if (frame_info_[start_time_index].timestamp == timestamp)
return &frame_info_[start_time_index];
}
return nullptr;
}
void VideoStreamDecoderImpl::OnDecodedFrameCallback(
VideoFrame& decoded_image,
absl::optional<int32_t> decode_time_ms,
absl::optional<uint8_t> qp) {
int64_t decode_stop_time_ms = rtc::TimeMillis();
bookkeeping_queue_.PostTask([this, decode_stop_time_ms, decoded_image,
decode_time_ms, qp]() mutable {
RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
FrameInfo* frame_info = GetFrameInfo(decoded_image.timestamp());
if (!frame_info) {
RTC_LOG(LS_ERROR) << "No frame information found for frame with timestamp"
<< decoded_image.timestamp();
return;
}
Callbacks::FrameInfo callback_info;
callback_info.content_type = frame_info->content_type;
if (qp)
callback_info.qp.emplace(*qp);
if (!decode_time_ms) {
decode_time_ms = decode_stop_time_ms - frame_info->decode_start_time_ms;
}
decoded_image.set_processing_time(
{Timestamp::Millis(frame_info->decode_start_time_ms),
Timestamp::Millis(frame_info->decode_start_time_ms +
*decode_time_ms)});
decoded_image.set_timestamp_us(frame_info->render_time_us);
timing_.StopDecodeTimer(TimeDelta::Millis(*decode_time_ms),
Timestamp::Millis(decode_stop_time_ms));
callbacks_->OnDecodedFrame(decoded_image, callback_info);
});
}
VideoStreamDecoderImpl::DecodeCallbacks::DecodeCallbacks(
VideoStreamDecoderImpl* video_stream_decoder_impl)
: video_stream_decoder_impl_(video_stream_decoder_impl) {}
int32_t VideoStreamDecoderImpl::DecodeCallbacks::Decoded(
VideoFrame& decoded_image) {
Decoded(decoded_image, absl::nullopt, absl::nullopt);
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t VideoStreamDecoderImpl::DecodeCallbacks::Decoded(
VideoFrame& decoded_image,
int64_t decode_time_ms) {
Decoded(decoded_image, decode_time_ms, absl::nullopt);
return WEBRTC_VIDEO_CODEC_OK;
}
void VideoStreamDecoderImpl::DecodeCallbacks::Decoded(
VideoFrame& decoded_image,
absl::optional<int32_t> decode_time_ms,
absl::optional<uint8_t> qp) {
video_stream_decoder_impl_->OnDecodedFrameCallback(decoded_image,
decode_time_ms, qp);
}
} // namespace webrtc

View file

@ -1,128 +0,0 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VIDEO_VIDEO_STREAM_DECODER_IMPL_H_
#define VIDEO_VIDEO_STREAM_DECODER_IMPL_H_
#include <map>
#include <memory>
#include <utility>
#include "absl/types/optional.h"
#include "api/field_trials_view.h"
#include "api/sequence_checker.h"
#include "api/transport/field_trial_based_config.h"
#include "api/video/video_stream_decoder.h"
#include "modules/video_coding/frame_buffer2.h"
#include "modules/video_coding/timing/timing.h"
#include "rtc_base/memory/always_valid_pointer.h"
#include "rtc_base/platform_thread.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/task_queue.h"
#include "system_wrappers/include/clock.h"
namespace webrtc {
class VideoStreamDecoderImpl : public VideoStreamDecoderInterface {
public:
VideoStreamDecoderImpl(
VideoStreamDecoderInterface::Callbacks* callbacks,
VideoDecoderFactory* decoder_factory,
TaskQueueFactory* task_queue_factory,
std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings,
const FieldTrialsView* field_trials);
~VideoStreamDecoderImpl() override;
void OnFrame(std::unique_ptr<EncodedFrame> frame) override;
void SetMinPlayoutDelay(TimeDelta min_delay) override;
void SetMaxPlayoutDelay(TimeDelta max_delay) override;
private:
class DecodeCallbacks : public DecodedImageCallback {
public:
explicit DecodeCallbacks(VideoStreamDecoderImpl* video_stream_decoder_impl);
int32_t Decoded(VideoFrame& decodedImage) override;
int32_t Decoded(VideoFrame& decodedImage, int64_t decode_time_ms) override;
void Decoded(VideoFrame& decodedImage,
absl::optional<int32_t> decode_time_ms,
absl::optional<uint8_t> qp) override;
private:
VideoStreamDecoderImpl* const video_stream_decoder_impl_;
};
enum DecodeResult {
kOk,
kOkRequestKeyframe,
kDecodeFailure,
};
struct FrameInfo {
int64_t timestamp = -1;
int64_t decode_start_time_ms;
int64_t render_time_us;
VideoContentType content_type;
};
void SaveFrameInfo(const EncodedFrame& frame) RTC_RUN_ON(bookkeeping_queue_);
FrameInfo* GetFrameInfo(int64_t timestamp) RTC_RUN_ON(bookkeeping_queue_);
void StartNextDecode() RTC_RUN_ON(bookkeeping_queue_);
void OnNextFrameCallback(std::unique_ptr<EncodedFrame> frame)
RTC_RUN_ON(bookkeeping_queue_);
void OnDecodedFrameCallback(VideoFrame& decodedImage, // NOLINT
absl::optional<int32_t> decode_time_ms,
absl::optional<uint8_t> qp);
VideoDecoder* GetDecoder(int payload_type) RTC_RUN_ON(decode_queue_);
VideoStreamDecoderImpl::DecodeResult DecodeFrame(
std::unique_ptr<EncodedFrame> frame) RTC_RUN_ON(decode_queue_);
AlwaysValidPointer<const FieldTrialsView, FieldTrialBasedConfig>
field_trials_;
VCMTiming timing_;
DecodeCallbacks decode_callbacks_;
// Some decoders are pipelined so it is not sufficient to save frame info
// for the last frame only.
static constexpr int kFrameInfoMemory = 8;
std::array<FrameInfo, kFrameInfoMemory> frame_info_
RTC_GUARDED_BY(bookkeeping_queue_);
int next_frame_info_index_ RTC_GUARDED_BY(bookkeeping_queue_);
VideoStreamDecoderInterface::Callbacks* const callbacks_
RTC_PT_GUARDED_BY(bookkeeping_queue_);
int64_t last_continuous_frame_id_ RTC_GUARDED_BY(bookkeeping_queue_) = -1;
bool keyframe_required_ RTC_GUARDED_BY(bookkeeping_queue_);
absl::optional<int> current_payload_type_ RTC_GUARDED_BY(decode_queue_);
VideoDecoderFactory* const decoder_factory_ RTC_PT_GUARDED_BY(decode_queue_);
std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings_
RTC_GUARDED_BY(decode_queue_);
// The `bookkeeping_queue_` use the `frame_buffer_` and also posts tasks to
// the `decode_queue_`. The `decode_queue_` in turn use the `decoder_` to
// decode frames. When the `decoder_` is done it will post back to the
// `bookkeeping_queue_` with the decoded frame. During shutdown we start by
// isolating the `bookkeeping_queue_` from the `decode_queue_`, so now it's
// safe for the `decode_queue_` to be destructed. After that the `decoder_`
// can be destructed, and then the `bookkeeping_queue_`. Finally the
// `frame_buffer_` can be destructed.
Mutex shut_down_mutex_;
bool shut_down_ RTC_GUARDED_BY(shut_down_mutex_);
video_coding::FrameBuffer frame_buffer_ RTC_GUARDED_BY(bookkeeping_queue_);
rtc::TaskQueue bookkeeping_queue_;
std::unique_ptr<VideoDecoder> decoder_ RTC_GUARDED_BY(decode_queue_);
rtc::TaskQueue decode_queue_;
};
} // namespace webrtc
#endif // VIDEO_VIDEO_STREAM_DECODER_IMPL_H_