Move RtpFrameReferenceFinder out of video_coding namespace.

Namespace used because of copy-pasting an old pattern, should never have been used in the first place. Removing it now to make followup refactoring prettier.

Bug: webrtc:12579
Change-Id: I00a80958401cfa368769dc0a1d8bbdd76aaa4ef5
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/212603
Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Commit-Queue: Philip Eliasson <philipel@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#33536}
This commit is contained in:
philipel 2021-03-22 14:17:09 +01:00 committed by Commit Bot
parent 2ba32f3423
commit 6a6715042a
24 changed files with 93 additions and 108 deletions

View file

@ -15,10 +15,9 @@
#include "rtc_base/logging.h" #include "rtc_base/logging.h"
namespace webrtc { namespace webrtc {
namespace video_coding {
RtpFrameReferenceFinder::ReturnVector RtpFrameIdOnlyRefFinder::ManageFrame( RtpFrameReferenceFinder::ReturnVector RtpFrameIdOnlyRefFinder::ManageFrame(
std::unique_ptr<RtpFrameObject> frame, std::unique_ptr<video_coding::RtpFrameObject> frame,
int frame_id) { int frame_id) {
frame->SetSpatialIndex(0); frame->SetSpatialIndex(0);
frame->SetId(unwrapper_.Unwrap(frame_id & (kFrameIdLength - 1))); frame->SetId(unwrapper_.Unwrap(frame_id & (kFrameIdLength - 1)));
@ -31,5 +30,4 @@ RtpFrameReferenceFinder::ReturnVector RtpFrameIdOnlyRefFinder::ManageFrame(
return res; return res;
} }
} // namespace video_coding
} // namespace webrtc } // namespace webrtc

View file

@ -19,14 +19,13 @@
#include "rtc_base/numerics/sequence_number_util.h" #include "rtc_base/numerics/sequence_number_util.h"
namespace webrtc { namespace webrtc {
namespace video_coding {
class RtpFrameIdOnlyRefFinder { class RtpFrameIdOnlyRefFinder {
public: public:
RtpFrameIdOnlyRefFinder() = default; RtpFrameIdOnlyRefFinder() = default;
RtpFrameReferenceFinder::ReturnVector ManageFrame( RtpFrameReferenceFinder::ReturnVector ManageFrame(
std::unique_ptr<RtpFrameObject> frame, std::unique_ptr<video_coding::RtpFrameObject> frame,
int frame_id); int frame_id);
private: private:
@ -34,7 +33,6 @@ class RtpFrameIdOnlyRefFinder {
SeqNumUnwrapper<uint16_t, kFrameIdLength> unwrapper_; SeqNumUnwrapper<uint16_t, kFrameIdLength> unwrapper_;
}; };
} // namespace video_coding
} // namespace webrtc } // namespace webrtc
#endif // MODULES_VIDEO_CODING_RTP_FRAME_ID_ONLY_REF_FINDER_H_ #endif // MODULES_VIDEO_CODING_RTP_FRAME_ID_ONLY_REF_FINDER_H_

View file

@ -21,14 +21,13 @@
#include "modules/video_coding/rtp_vp9_ref_finder.h" #include "modules/video_coding/rtp_vp9_ref_finder.h"
namespace webrtc { namespace webrtc {
namespace video_coding {
namespace internal { namespace internal {
class RtpFrameReferenceFinderImpl { class RtpFrameReferenceFinderImpl {
public: public:
RtpFrameReferenceFinderImpl() = default; RtpFrameReferenceFinderImpl() = default;
RtpFrameReferenceFinder::ReturnVector ManageFrame( RtpFrameReferenceFinder::ReturnVector ManageFrame(
std::unique_ptr<RtpFrameObject> frame); std::unique_ptr<video_coding::RtpFrameObject> frame);
RtpFrameReferenceFinder::ReturnVector PaddingReceived(uint16_t seq_num); RtpFrameReferenceFinder::ReturnVector PaddingReceived(uint16_t seq_num);
void ClearTo(uint16_t seq_num); void ClearTo(uint16_t seq_num);
@ -46,7 +45,7 @@ class RtpFrameReferenceFinderImpl {
}; };
RtpFrameReferenceFinder::ReturnVector RtpFrameReferenceFinderImpl::ManageFrame( RtpFrameReferenceFinder::ReturnVector RtpFrameReferenceFinderImpl::ManageFrame(
std::unique_ptr<RtpFrameObject> frame) { std::unique_ptr<video_coding::RtpFrameObject> frame) {
const RTPVideoHeader& video_header = frame->GetRtpVideoHeader(); const RTPVideoHeader& video_header = frame->GetRtpVideoHeader();
if (video_header.generic.has_value()) { if (video_header.generic.has_value()) {
@ -157,7 +156,7 @@ RtpFrameReferenceFinder::RtpFrameReferenceFinder(
RtpFrameReferenceFinder::~RtpFrameReferenceFinder() = default; RtpFrameReferenceFinder::~RtpFrameReferenceFinder() = default;
void RtpFrameReferenceFinder::ManageFrame( void RtpFrameReferenceFinder::ManageFrame(
std::unique_ptr<RtpFrameObject> frame) { std::unique_ptr<video_coding::RtpFrameObject> frame) {
// If we have cleared past this frame, drop it. // If we have cleared past this frame, drop it.
if (cleared_to_seq_num_ != -1 && if (cleared_to_seq_num_ != -1 &&
AheadOf<uint16_t>(cleared_to_seq_num_, frame->first_seq_num())) { AheadOf<uint16_t>(cleared_to_seq_num_, frame->first_seq_num())) {
@ -186,5 +185,4 @@ void RtpFrameReferenceFinder::HandOffFrames(ReturnVector frames) {
} }
} }
} // namespace video_coding
} // namespace webrtc } // namespace webrtc

View file

@ -16,7 +16,6 @@
#include "modules/video_coding/frame_object.h" #include "modules/video_coding/frame_object.h"
namespace webrtc { namespace webrtc {
namespace video_coding {
namespace internal { namespace internal {
class RtpFrameReferenceFinderImpl; class RtpFrameReferenceFinderImpl;
} // namespace internal } // namespace internal
@ -26,12 +25,19 @@ class RtpFrameReferenceFinderImpl;
class OnCompleteFrameCallback { class OnCompleteFrameCallback {
public: public:
virtual ~OnCompleteFrameCallback() {} virtual ~OnCompleteFrameCallback() {}
virtual void OnCompleteFrame(std::unique_ptr<EncodedFrame> frame) = 0; virtual void OnCompleteFrame(
std::unique_ptr<video_coding::EncodedFrame> frame) = 0;
}; };
// TODO(bugs.webrtc.org/12579): Remove when downstream has been update.
namespace video_coding {
using OnCompleteFrameCallback = webrtc::OnCompleteFrameCallback;
} // namespace video_coding
class RtpFrameReferenceFinder { class RtpFrameReferenceFinder {
public: public:
using ReturnVector = absl::InlinedVector<std::unique_ptr<RtpFrameObject>, 3>; using ReturnVector =
absl::InlinedVector<std::unique_ptr<video_coding::RtpFrameObject>, 3>;
explicit RtpFrameReferenceFinder(OnCompleteFrameCallback* frame_callback); explicit RtpFrameReferenceFinder(OnCompleteFrameCallback* frame_callback);
explicit RtpFrameReferenceFinder(OnCompleteFrameCallback* frame_callback, explicit RtpFrameReferenceFinder(OnCompleteFrameCallback* frame_callback,
@ -44,7 +50,7 @@ class RtpFrameReferenceFinder {
// - We have too many stashed frames (determined by |kMaxStashedFrames|) // - We have too many stashed frames (determined by |kMaxStashedFrames|)
// so we drop this frame, or // so we drop this frame, or
// - It gets cleared by ClearTo, which also means we drop it. // - It gets cleared by ClearTo, which also means we drop it.
void ManageFrame(std::unique_ptr<RtpFrameObject> frame); void ManageFrame(std::unique_ptr<video_coding::RtpFrameObject> frame);
// Notifies that padding has been received, which the reference finder // Notifies that padding has been received, which the reference finder
// might need to calculate the references of a frame. // might need to calculate the references of a frame.
@ -65,7 +71,6 @@ class RtpFrameReferenceFinder {
std::unique_ptr<internal::RtpFrameReferenceFinderImpl> impl_; std::unique_ptr<internal::RtpFrameReferenceFinderImpl> impl_;
}; };
} // namespace video_coding
} // namespace webrtc } // namespace webrtc
#endif // MODULES_VIDEO_CODING_RTP_FRAME_REFERENCE_FINDER_H_ #endif // MODULES_VIDEO_CODING_RTP_FRAME_REFERENCE_FINDER_H_

View file

@ -27,7 +27,7 @@ namespace webrtc {
namespace video_coding { namespace video_coding {
namespace { namespace {
std::unique_ptr<RtpFrameObject> CreateFrame( std::unique_ptr<video_coding::RtpFrameObject> CreateFrame(
uint16_t seq_num_start, uint16_t seq_num_start,
uint16_t seq_num_end, uint16_t seq_num_end,
bool keyframe, bool keyframe,
@ -39,7 +39,7 @@ std::unique_ptr<RtpFrameObject> CreateFrame(
video_header.video_type_header = video_type_header; video_header.video_type_header = video_type_header;
// clang-format off // clang-format off
return std::make_unique<RtpFrameObject>( return std::make_unique<video_coding::RtpFrameObject>(
seq_num_start, seq_num_start,
seq_num_end, seq_num_end,
/*markerBit=*/true, /*markerBit=*/true,
@ -71,7 +71,8 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
uint16_t Rand() { return rand_.Rand<uint16_t>(); } uint16_t Rand() { return rand_.Rand<uint16_t>(); }
void OnCompleteFrame(std::unique_ptr<EncodedFrame> frame) override { void OnCompleteFrame(
std::unique_ptr<video_coding::EncodedFrame> frame) override {
int64_t pid = frame->Id(); int64_t pid = frame->Id();
uint16_t sidx = *frame->SpatialIndex(); uint16_t sidx = *frame->SpatialIndex();
auto frame_it = frames_from_callback_.find(std::make_pair(pid, sidx)); auto frame_it = frames_from_callback_.find(std::make_pair(pid, sidx));
@ -88,7 +89,7 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
void InsertGeneric(uint16_t seq_num_start, void InsertGeneric(uint16_t seq_num_start,
uint16_t seq_num_end, uint16_t seq_num_end,
bool keyframe) { bool keyframe) {
std::unique_ptr<RtpFrameObject> frame = std::unique_ptr<video_coding::RtpFrameObject> frame =
CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecGeneric, CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecGeneric,
RTPVideoTypeHeader()); RTPVideoTypeHeader());
@ -96,7 +97,7 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
} }
void InsertH264(uint16_t seq_num_start, uint16_t seq_num_end, bool keyframe) { void InsertH264(uint16_t seq_num_start, uint16_t seq_num_end, bool keyframe) {
std::unique_ptr<RtpFrameObject> frame = std::unique_ptr<video_coding::RtpFrameObject> frame =
CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecH264, CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecH264,
RTPVideoTypeHeader()); RTPVideoTypeHeader());
reference_finder_->ManageFrame(std::move(frame)); reference_finder_->ManageFrame(std::move(frame));
@ -155,8 +156,9 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
return f1.first < f2.first; return f1.first < f2.first;
} }
}; };
std:: std::map<std::pair<int64_t, uint8_t>,
map<std::pair<int64_t, uint8_t>, std::unique_ptr<EncodedFrame>, FrameComp> std::unique_ptr<video_coding::EncodedFrame>,
FrameComp>
frames_from_callback_; frames_from_callback_;
}; };
@ -305,7 +307,7 @@ TEST_F(TestRtpFrameReferenceFinder, H264SequenceNumberWrapMulti) {
TEST_F(TestRtpFrameReferenceFinder, Av1FrameNoDependencyDescriptor) { TEST_F(TestRtpFrameReferenceFinder, Av1FrameNoDependencyDescriptor) {
uint16_t sn = 0xFFFF; uint16_t sn = 0xFFFF;
std::unique_ptr<RtpFrameObject> frame = std::unique_ptr<video_coding::RtpFrameObject> frame =
CreateFrame(/*seq_num_start=*/sn, /*seq_num_end=*/sn, /*keyframe=*/true, CreateFrame(/*seq_num_start=*/sn, /*seq_num_end=*/sn, /*keyframe=*/true,
kVideoCodecAV1, RTPVideoTypeHeader()); kVideoCodecAV1, RTPVideoTypeHeader());

View file

@ -15,10 +15,9 @@
#include "rtc_base/logging.h" #include "rtc_base/logging.h"
namespace webrtc { namespace webrtc {
namespace video_coding {
RtpFrameReferenceFinder::ReturnVector RtpGenericFrameRefFinder::ManageFrame( RtpFrameReferenceFinder::ReturnVector RtpGenericFrameRefFinder::ManageFrame(
std::unique_ptr<RtpFrameObject> frame, std::unique_ptr<video_coding::RtpFrameObject> frame,
const RTPVideoHeader::GenericDescriptorInfo& descriptor) { const RTPVideoHeader::GenericDescriptorInfo& descriptor) {
// Frame IDs are unwrapped in the RtpVideoStreamReceiver, no need to unwrap // Frame IDs are unwrapped in the RtpVideoStreamReceiver, no need to unwrap
// them here. // them here.
@ -26,7 +25,8 @@ RtpFrameReferenceFinder::ReturnVector RtpGenericFrameRefFinder::ManageFrame(
frame->SetSpatialIndex(descriptor.spatial_index); frame->SetSpatialIndex(descriptor.spatial_index);
RtpFrameReferenceFinder::ReturnVector res; RtpFrameReferenceFinder::ReturnVector res;
if (EncodedFrame::kMaxFrameReferences < descriptor.dependencies.size()) { if (video_coding::EncodedFrame::kMaxFrameReferences <
descriptor.dependencies.size()) {
RTC_LOG(LS_WARNING) << "Too many dependencies in generic descriptor."; RTC_LOG(LS_WARNING) << "Too many dependencies in generic descriptor.";
return res; return res;
} }
@ -40,5 +40,4 @@ RtpFrameReferenceFinder::ReturnVector RtpGenericFrameRefFinder::ManageFrame(
return res; return res;
} }
} // namespace video_coding
} // namespace webrtc } // namespace webrtc

View file

@ -17,18 +17,16 @@
#include "modules/video_coding/rtp_frame_reference_finder.h" #include "modules/video_coding/rtp_frame_reference_finder.h"
namespace webrtc { namespace webrtc {
namespace video_coding {
class RtpGenericFrameRefFinder { class RtpGenericFrameRefFinder {
public: public:
RtpGenericFrameRefFinder() = default; RtpGenericFrameRefFinder() = default;
RtpFrameReferenceFinder::ReturnVector ManageFrame( RtpFrameReferenceFinder::ReturnVector ManageFrame(
std::unique_ptr<RtpFrameObject> frame, std::unique_ptr<video_coding::RtpFrameObject> frame,
const RTPVideoHeader::GenericDescriptorInfo& descriptor); const RTPVideoHeader::GenericDescriptorInfo& descriptor);
}; };
} // namespace video_coding
} // namespace webrtc } // namespace webrtc
#endif // MODULES_VIDEO_CODING_RTP_GENERIC_REF_FINDER_H_ #endif // MODULES_VIDEO_CODING_RTP_GENERIC_REF_FINDER_H_

View file

@ -15,10 +15,9 @@
#include "rtc_base/logging.h" #include "rtc_base/logging.h"
namespace webrtc { namespace webrtc {
namespace video_coding {
RtpFrameReferenceFinder::ReturnVector RtpSeqNumOnlyRefFinder::ManageFrame( RtpFrameReferenceFinder::ReturnVector RtpSeqNumOnlyRefFinder::ManageFrame(
std::unique_ptr<RtpFrameObject> frame) { std::unique_ptr<video_coding::RtpFrameObject> frame) {
FrameDecision decision = ManageFrameInternal(frame.get()); FrameDecision decision = ManageFrameInternal(frame.get());
RtpFrameReferenceFinder::ReturnVector res; RtpFrameReferenceFinder::ReturnVector res;
@ -40,7 +39,8 @@ RtpFrameReferenceFinder::ReturnVector RtpSeqNumOnlyRefFinder::ManageFrame(
} }
RtpSeqNumOnlyRefFinder::FrameDecision RtpSeqNumOnlyRefFinder::FrameDecision
RtpSeqNumOnlyRefFinder::ManageFrameInternal(RtpFrameObject* frame) { RtpSeqNumOnlyRefFinder::ManageFrameInternal(
video_coding::RtpFrameObject* frame) {
if (frame->frame_type() == VideoFrameType::kVideoFrameKey) { if (frame->frame_type() == VideoFrameType::kVideoFrameKey) {
last_seq_num_gop_.insert(std::make_pair( last_seq_num_gop_.insert(std::make_pair(
frame->last_seq_num(), frame->last_seq_num(),
@ -184,5 +184,4 @@ void RtpSeqNumOnlyRefFinder::ClearTo(uint16_t seq_num) {
} }
} }
} // namespace video_coding
} // namespace webrtc } // namespace webrtc

View file

@ -23,14 +23,13 @@
#include "rtc_base/numerics/sequence_number_util.h" #include "rtc_base/numerics/sequence_number_util.h"
namespace webrtc { namespace webrtc {
namespace video_coding {
class RtpSeqNumOnlyRefFinder { class RtpSeqNumOnlyRefFinder {
public: public:
RtpSeqNumOnlyRefFinder() = default; RtpSeqNumOnlyRefFinder() = default;
RtpFrameReferenceFinder::ReturnVector ManageFrame( RtpFrameReferenceFinder::ReturnVector ManageFrame(
std::unique_ptr<RtpFrameObject> frame); std::unique_ptr<video_coding::RtpFrameObject> frame);
RtpFrameReferenceFinder::ReturnVector PaddingReceived(uint16_t seq_num); RtpFrameReferenceFinder::ReturnVector PaddingReceived(uint16_t seq_num);
void ClearTo(uint16_t seq_num); void ClearTo(uint16_t seq_num);
@ -40,7 +39,7 @@ class RtpSeqNumOnlyRefFinder {
enum FrameDecision { kStash, kHandOff, kDrop }; enum FrameDecision { kStash, kHandOff, kDrop };
FrameDecision ManageFrameInternal(RtpFrameObject* frame); FrameDecision ManageFrameInternal(video_coding::RtpFrameObject* frame);
void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res); void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res);
void UpdateLastPictureIdWithPadding(uint16_t seq_num); void UpdateLastPictureIdWithPadding(uint16_t seq_num);
@ -59,14 +58,13 @@ class RtpSeqNumOnlyRefFinder {
// Frames that have been fully received but didn't have all the information // Frames that have been fully received but didn't have all the information
// needed to determine their references. // needed to determine their references.
std::deque<std::unique_ptr<RtpFrameObject>> stashed_frames_; std::deque<std::unique_ptr<video_coding::RtpFrameObject>> stashed_frames_;
// Unwrapper used to unwrap generic RTP streams. In a generic stream we derive // Unwrapper used to unwrap generic RTP streams. In a generic stream we derive
// a picture id from the packet sequence number. // a picture id from the packet sequence number.
SeqNumUnwrapper<uint16_t> rtp_seq_num_unwrapper_; SeqNumUnwrapper<uint16_t> rtp_seq_num_unwrapper_;
}; };
} // namespace video_coding
} // namespace webrtc } // namespace webrtc
#endif // MODULES_VIDEO_CODING_RTP_SEQ_NUM_ONLY_REF_FINDER_H_ #endif // MODULES_VIDEO_CODING_RTP_SEQ_NUM_ONLY_REF_FINDER_H_

View file

@ -15,10 +15,9 @@
#include "rtc_base/logging.h" #include "rtc_base/logging.h"
namespace webrtc { namespace webrtc {
namespace video_coding {
RtpFrameReferenceFinder::ReturnVector RtpVp8RefFinder::ManageFrame( RtpFrameReferenceFinder::ReturnVector RtpVp8RefFinder::ManageFrame(
std::unique_ptr<RtpFrameObject> frame) { std::unique_ptr<video_coding::RtpFrameObject> frame) {
FrameDecision decision = ManageFrameInternal(frame.get()); FrameDecision decision = ManageFrameInternal(frame.get());
RtpFrameReferenceFinder::ReturnVector res; RtpFrameReferenceFinder::ReturnVector res;
@ -40,7 +39,7 @@ RtpFrameReferenceFinder::ReturnVector RtpVp8RefFinder::ManageFrame(
} }
RtpVp8RefFinder::FrameDecision RtpVp8RefFinder::ManageFrameInternal( RtpVp8RefFinder::FrameDecision RtpVp8RefFinder::ManageFrameInternal(
RtpFrameObject* frame) { video_coding::RtpFrameObject* frame) {
const RTPVideoHeader& video_header = frame->GetRtpVideoHeader(); const RTPVideoHeader& video_header = frame->GetRtpVideoHeader();
const RTPVideoHeaderVP8& codec_header = const RTPVideoHeaderVP8& codec_header =
absl::get<RTPVideoHeaderVP8>(video_header.video_type_header); absl::get<RTPVideoHeaderVP8>(video_header.video_type_header);
@ -179,7 +178,7 @@ RtpVp8RefFinder::FrameDecision RtpVp8RefFinder::ManageFrameInternal(
return kHandOff; return kHandOff;
} }
void RtpVp8RefFinder::UpdateLayerInfoVp8(RtpFrameObject* frame, void RtpVp8RefFinder::UpdateLayerInfoVp8(video_coding::RtpFrameObject* frame,
int64_t unwrapped_tl0, int64_t unwrapped_tl0,
uint8_t temporal_idx) { uint8_t temporal_idx) {
auto layer_info_it = layer_info_.find(unwrapped_tl0); auto layer_info_it = layer_info_.find(unwrapped_tl0);
@ -227,7 +226,7 @@ void RtpVp8RefFinder::RetryStashedFrames(
} while (complete_frame); } while (complete_frame);
} }
void RtpVp8RefFinder::UnwrapPictureIds(RtpFrameObject* frame) { void RtpVp8RefFinder::UnwrapPictureIds(video_coding::RtpFrameObject* frame) {
for (size_t i = 0; i < frame->num_references; ++i) for (size_t i = 0; i < frame->num_references; ++i)
frame->references[i] = unwrapper_.Unwrap(frame->references[i]); frame->references[i] = unwrapper_.Unwrap(frame->references[i]);
frame->SetId(unwrapper_.Unwrap(frame->Id())); frame->SetId(unwrapper_.Unwrap(frame->Id()));
@ -244,5 +243,4 @@ void RtpVp8RefFinder::ClearTo(uint16_t seq_num) {
} }
} }
} // namespace video_coding
} // namespace webrtc } // namespace webrtc

View file

@ -22,14 +22,13 @@
#include "rtc_base/numerics/sequence_number_util.h" #include "rtc_base/numerics/sequence_number_util.h"
namespace webrtc { namespace webrtc {
namespace video_coding {
class RtpVp8RefFinder { class RtpVp8RefFinder {
public: public:
RtpVp8RefFinder() = default; RtpVp8RefFinder() = default;
RtpFrameReferenceFinder::ReturnVector ManageFrame( RtpFrameReferenceFinder::ReturnVector ManageFrame(
std::unique_ptr<RtpFrameObject> frame); std::unique_ptr<video_coding::RtpFrameObject> frame);
void ClearTo(uint16_t seq_num); void ClearTo(uint16_t seq_num);
private: private:
@ -41,12 +40,12 @@ class RtpVp8RefFinder {
enum FrameDecision { kStash, kHandOff, kDrop }; enum FrameDecision { kStash, kHandOff, kDrop };
FrameDecision ManageFrameInternal(RtpFrameObject* frame); FrameDecision ManageFrameInternal(video_coding::RtpFrameObject* frame);
void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res); void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res);
void UpdateLayerInfoVp8(RtpFrameObject* frame, void UpdateLayerInfoVp8(video_coding::RtpFrameObject* frame,
int64_t unwrapped_tl0, int64_t unwrapped_tl0,
uint8_t temporal_idx); uint8_t temporal_idx);
void UnwrapPictureIds(RtpFrameObject* frame); void UnwrapPictureIds(video_coding::RtpFrameObject* frame);
// Save the last picture id in order to detect when there is a gap in frames // Save the last picture id in order to detect when there is a gap in frames
// that have not yet been fully received. // that have not yet been fully received.
@ -59,7 +58,7 @@ class RtpVp8RefFinder {
// Frames that have been fully received but didn't have all the information // Frames that have been fully received but didn't have all the information
// needed to determine their references. // needed to determine their references.
std::deque<std::unique_ptr<RtpFrameObject>> stashed_frames_; std::deque<std::unique_ptr<video_coding::RtpFrameObject>> stashed_frames_;
// Holds the information about the last completed frame for a given temporal // Holds the information about the last completed frame for a given temporal
// layer given an unwrapped Tl0 picture index. // layer given an unwrapped Tl0 picture index.
@ -72,7 +71,6 @@ class RtpVp8RefFinder {
SeqNumUnwrapper<uint8_t> tl0_unwrapper_; SeqNumUnwrapper<uint8_t> tl0_unwrapper_;
}; };
} // namespace video_coding
} // namespace webrtc } // namespace webrtc
#endif // MODULES_VIDEO_CODING_RTP_VP8_REF_FINDER_H_ #endif // MODULES_VIDEO_CODING_RTP_VP8_REF_FINDER_H_

View file

@ -66,7 +66,7 @@ class Frame {
return *this; return *this;
} }
operator std::unique_ptr<RtpFrameObject>() { operator std::unique_ptr<video_coding::RtpFrameObject>() {
RTPVideoHeaderVP8 vp8_header{}; RTPVideoHeaderVP8 vp8_header{};
vp8_header.pictureId = *picture_id_; vp8_header.pictureId = *picture_id_;
vp8_header.temporalIdx = *temporal_id_; vp8_header.temporalIdx = *temporal_id_;
@ -78,7 +78,7 @@ class Frame {
: VideoFrameType::kVideoFrameDelta; : VideoFrameType::kVideoFrameDelta;
video_header.video_type_header = vp8_header; video_header.video_type_header = vp8_header;
// clang-format off // clang-format off
return std::make_unique<RtpFrameObject>( return std::make_unique<video_coding::RtpFrameObject>(
/*seq_num_start=*/0, /*seq_num_start=*/0,
/*seq_num_end=*/0, /*seq_num_end=*/0,
/*markerBit=*/true, /*markerBit=*/true,
@ -113,7 +113,7 @@ class RtpVp8RefFinderTest : public ::testing::Test {
protected: protected:
RtpVp8RefFinderTest() : ref_finder_(std::make_unique<RtpVp8RefFinder>()) {} RtpVp8RefFinderTest() : ref_finder_(std::make_unique<RtpVp8RefFinder>()) {}
void Insert(std::unique_ptr<RtpFrameObject> frame) { void Insert(std::unique_ptr<video_coding::RtpFrameObject> frame) {
for (auto& f : ref_finder_->ManageFrame(std::move(frame))) { for (auto& f : ref_finder_->ManageFrame(std::move(frame))) {
frames_.push_back(std::move(f)); frames_.push_back(std::move(f));
} }

View file

@ -16,10 +16,9 @@
#include "rtc_base/logging.h" #include "rtc_base/logging.h"
namespace webrtc { namespace webrtc {
namespace video_coding {
RtpFrameReferenceFinder::ReturnVector RtpVp9RefFinder::ManageFrame( RtpFrameReferenceFinder::ReturnVector RtpVp9RefFinder::ManageFrame(
std::unique_ptr<RtpFrameObject> frame) { std::unique_ptr<video_coding::RtpFrameObject> frame) {
FrameDecision decision = ManageFrameInternal(frame.get()); FrameDecision decision = ManageFrameInternal(frame.get());
RtpFrameReferenceFinder::ReturnVector res; RtpFrameReferenceFinder::ReturnVector res;
@ -41,7 +40,7 @@ RtpFrameReferenceFinder::ReturnVector RtpVp9RefFinder::ManageFrame(
} }
RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal( RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal(
RtpFrameObject* frame) { video_coding::RtpFrameObject* frame) {
const RTPVideoHeader& video_header = frame->GetRtpVideoHeader(); const RTPVideoHeader& video_header = frame->GetRtpVideoHeader();
const RTPVideoHeaderVP9& codec_header = const RTPVideoHeaderVP9& codec_header =
absl::get<RTPVideoHeaderVP9>(video_header.video_type_header); absl::get<RTPVideoHeaderVP9>(video_header.video_type_header);
@ -58,7 +57,8 @@ RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal(
last_picture_id_ = frame->Id(); last_picture_id_ = frame->Id();
if (codec_header.flexible_mode) { if (codec_header.flexible_mode) {
if (codec_header.num_ref_pics > EncodedFrame::kMaxFrameReferences) { if (codec_header.num_ref_pics >
video_coding::EncodedFrame::kMaxFrameReferences) {
return kDrop; return kDrop;
} }
frame->num_references = codec_header.num_ref_pics; frame->num_references = codec_header.num_ref_pics;
@ -179,7 +179,8 @@ RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal(
ForwardDiff<uint16_t, kFrameIdLength>(info->gof->pid_start, frame->Id()); ForwardDiff<uint16_t, kFrameIdLength>(info->gof->pid_start, frame->Id());
size_t gof_idx = diff % info->gof->num_frames_in_gof; size_t gof_idx = diff % info->gof->num_frames_in_gof;
if (info->gof->num_ref_pics[gof_idx] > EncodedFrame::kMaxFrameReferences) { if (info->gof->num_ref_pics[gof_idx] >
video_coding::EncodedFrame::kMaxFrameReferences) {
return kDrop; return kDrop;
} }
// Populate references according to the scalability structure. // Populate references according to the scalability structure.
@ -323,7 +324,7 @@ void RtpVp9RefFinder::RetryStashedFrames(
} while (complete_frame); } while (complete_frame);
} }
void RtpVp9RefFinder::FlattenFrameIdAndRefs(RtpFrameObject* frame, void RtpVp9RefFinder::FlattenFrameIdAndRefs(video_coding::RtpFrameObject* frame,
bool inter_layer_predicted) { bool inter_layer_predicted) {
for (size_t i = 0; i < frame->num_references; ++i) { for (size_t i = 0; i < frame->num_references; ++i) {
frame->references[i] = frame->references[i] =
@ -334,7 +335,8 @@ void RtpVp9RefFinder::FlattenFrameIdAndRefs(RtpFrameObject* frame,
*frame->SpatialIndex()); *frame->SpatialIndex());
if (inter_layer_predicted && if (inter_layer_predicted &&
frame->num_references + 1 <= EncodedFrame::kMaxFrameReferences) { frame->num_references + 1 <=
video_coding::EncodedFrame::kMaxFrameReferences) {
frame->references[frame->num_references] = frame->Id() - 1; frame->references[frame->num_references] = frame->Id() - 1;
++frame->num_references; ++frame->num_references;
} }
@ -351,5 +353,4 @@ void RtpVp9RefFinder::ClearTo(uint16_t seq_num) {
} }
} }
} // namespace video_coding
} // namespace webrtc } // namespace webrtc

View file

@ -22,14 +22,13 @@
#include "rtc_base/numerics/sequence_number_util.h" #include "rtc_base/numerics/sequence_number_util.h"
namespace webrtc { namespace webrtc {
namespace video_coding {
class RtpVp9RefFinder { class RtpVp9RefFinder {
public: public:
RtpVp9RefFinder() = default; RtpVp9RefFinder() = default;
RtpFrameReferenceFinder::ReturnVector ManageFrame( RtpFrameReferenceFinder::ReturnVector ManageFrame(
std::unique_ptr<RtpFrameObject> frame); std::unique_ptr<video_coding::RtpFrameObject> frame);
void ClearTo(uint16_t seq_num); void ClearTo(uint16_t seq_num);
private: private:
@ -49,7 +48,7 @@ class RtpVp9RefFinder {
uint16_t last_picture_id; uint16_t last_picture_id;
}; };
FrameDecision ManageFrameInternal(RtpFrameObject* frame); FrameDecision ManageFrameInternal(video_coding::RtpFrameObject* frame);
void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res); void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res);
bool MissingRequiredFrameVp9(uint16_t picture_id, const GofInfo& info); bool MissingRequiredFrameVp9(uint16_t picture_id, const GofInfo& info);
@ -59,7 +58,8 @@ class RtpVp9RefFinder {
uint8_t temporal_idx, uint8_t temporal_idx,
uint16_t pid_ref); uint16_t pid_ref);
void FlattenFrameIdAndRefs(RtpFrameObject* frame, bool inter_layer_predicted); void FlattenFrameIdAndRefs(video_coding::RtpFrameObject* frame,
bool inter_layer_predicted);
// Save the last picture id in order to detect when there is a gap in frames // Save the last picture id in order to detect when there is a gap in frames
// that have not yet been fully received. // that have not yet been fully received.
@ -67,7 +67,7 @@ class RtpVp9RefFinder {
// Frames that have been fully received but didn't have all the information // Frames that have been fully received but didn't have all the information
// needed to determine their references. // needed to determine their references.
std::deque<std::unique_ptr<RtpFrameObject>> stashed_frames_; std::deque<std::unique_ptr<video_coding::RtpFrameObject>> stashed_frames_;
// Where the current scalability structure is in the // Where the current scalability structure is in the
// |scalability_structures_| array. // |scalability_structures_| array.
@ -96,7 +96,6 @@ class RtpVp9RefFinder {
SeqNumUnwrapper<uint8_t> tl0_unwrapper_; SeqNumUnwrapper<uint8_t> tl0_unwrapper_;
}; };
} // namespace video_coding
} // namespace webrtc } // namespace webrtc
#endif // MODULES_VIDEO_CODING_RTP_VP9_REF_FINDER_H_ #endif // MODULES_VIDEO_CODING_RTP_VP9_REF_FINDER_H_

View file

@ -83,7 +83,7 @@ class Frame {
return *this; return *this;
} }
operator std::unique_ptr<RtpFrameObject>() { operator std::unique_ptr<video_coding::RtpFrameObject>() {
RTPVideoHeaderVP9 vp9_header{}; RTPVideoHeaderVP9 vp9_header{};
vp9_header.picture_id = *picture_id; vp9_header.picture_id = *picture_id;
vp9_header.temporal_idx = *temporal_id; vp9_header.temporal_idx = *temporal_id;
@ -112,7 +112,7 @@ class Frame {
: VideoFrameType::kVideoFrameDelta; : VideoFrameType::kVideoFrameDelta;
video_header.video_type_header = vp9_header; video_header.video_type_header = vp9_header;
// clang-format off // clang-format off
return std::make_unique<RtpFrameObject>( return std::make_unique<video_coding::RtpFrameObject>(
seq_num_start, seq_num_start,
seq_num_end, seq_num_end,
/*markerBit=*/true, /*markerBit=*/true,
@ -209,7 +209,7 @@ class RtpVp9RefFinderTest : public ::testing::Test {
protected: protected:
RtpVp9RefFinderTest() : ref_finder_(std::make_unique<RtpVp9RefFinder>()) {} RtpVp9RefFinderTest() : ref_finder_(std::make_unique<RtpVp9RefFinder>()) {}
void Insert(std::unique_ptr<RtpFrameObject> frame) { void Insert(std::unique_ptr<video_coding::RtpFrameObject> frame) {
for (auto& f : ref_finder_->ManageFrame(std::move(frame))) { for (auto& f : ref_finder_->ManageFrame(std::move(frame))) {
frames_.push_back(std::move(f)); frames_.push_back(std::move(f));
} }

View file

@ -58,7 +58,7 @@ class DataReader {
size_t offset_ = 0; size_t offset_ = 0;
}; };
class NullCallback : public video_coding::OnCompleteFrameCallback { class NullCallback : public OnCompleteFrameCallback {
void OnCompleteFrame( void OnCompleteFrame(
std::unique_ptr<video_coding::EncodedFrame> frame) override {} std::unique_ptr<video_coding::EncodedFrame> frame) override {}
}; };
@ -93,7 +93,7 @@ GenerateGenericFrameDependencies(DataReader* reader) {
void FuzzOneInput(const uint8_t* data, size_t size) { void FuzzOneInput(const uint8_t* data, size_t size) {
DataReader reader(data, size); DataReader reader(data, size);
NullCallback cb; NullCallback cb;
video_coding::RtpFrameReferenceFinder reference_finder(&cb); RtpFrameReferenceFinder reference_finder(&cb);
auto codec = static_cast<VideoCodecType>(reader.GetNum<uint8_t>() % 5); auto codec = static_cast<VideoCodecType>(reader.GetNum<uint8_t>() % 5);

View file

@ -210,7 +210,7 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver(
ProcessThread* process_thread, ProcessThread* process_thread,
NackSender* nack_sender, NackSender* nack_sender,
KeyFrameRequestSender* keyframe_request_sender, KeyFrameRequestSender* keyframe_request_sender,
video_coding::OnCompleteFrameCallback* complete_frame_callback, OnCompleteFrameCallback* complete_frame_callback,
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor, rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)
: RtpVideoStreamReceiver(clock, : RtpVideoStreamReceiver(clock,
@ -240,7 +240,7 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver(
ProcessThread* process_thread, ProcessThread* process_thread,
NackSender* nack_sender, NackSender* nack_sender,
KeyFrameRequestSender* keyframe_request_sender, KeyFrameRequestSender* keyframe_request_sender,
video_coding::OnCompleteFrameCallback* complete_frame_callback, OnCompleteFrameCallback* complete_frame_callback,
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor, rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)
: clock_(clock), : clock_(clock),
@ -321,8 +321,7 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver(
process_thread_->RegisterModule(nack_module_.get(), RTC_FROM_HERE); process_thread_->RegisterModule(nack_module_.get(), RTC_FROM_HERE);
} }
reference_finder_ = reference_finder_ = std::make_unique<RtpFrameReferenceFinder>(this);
std::make_unique<video_coding::RtpFrameReferenceFinder>(this);
// Only construct the encrypted receiver if frame encryption is enabled. // Only construct the encrypted receiver if frame encryption is enabled.
if (config_.crypto_options.sframe.require_frame_encryption) { if (config_.crypto_options.sframe.require_frame_encryption) {
@ -863,10 +862,9 @@ void RtpVideoStreamReceiver::OnAssembledFrame(
// to overlap with old picture ids. To ensure that doesn't happen we // to overlap with old picture ids. To ensure that doesn't happen we
// start from the |last_completed_picture_id_| and add an offset in case // start from the |last_completed_picture_id_| and add an offset in case
// of reordering. // of reordering.
reference_finder_ = reference_finder_ = std::make_unique<RtpFrameReferenceFinder>(
std::make_unique<video_coding::RtpFrameReferenceFinder>( this,
this, last_completed_picture_id_ + last_completed_picture_id_ + std::numeric_limits<uint16_t>::max());
std::numeric_limits<uint16_t>::max());
current_codec_ = frame->codec_type(); current_codec_ = frame->codec_type();
} else { } else {
// Old frame from before the codec switch, discard it. // Old frame from before the codec switch, discard it.

View file

@ -67,7 +67,7 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
public RecoveredPacketReceiver, public RecoveredPacketReceiver,
public RtpPacketSinkInterface, public RtpPacketSinkInterface,
public KeyFrameRequestSender, public KeyFrameRequestSender,
public video_coding::OnCompleteFrameCallback, public OnCompleteFrameCallback,
public OnDecryptedFrameCallback, public OnDecryptedFrameCallback,
public OnDecryptionStatusChangeCallback, public OnDecryptionStatusChangeCallback,
public RtpVideoFrameReceiver { public RtpVideoFrameReceiver {
@ -89,7 +89,7 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
// The KeyFrameRequestSender is optional; if not provided, key frame // The KeyFrameRequestSender is optional; if not provided, key frame
// requests are sent via the internal RtpRtcp module. // requests are sent via the internal RtpRtcp module.
KeyFrameRequestSender* keyframe_request_sender, KeyFrameRequestSender* keyframe_request_sender,
video_coding::OnCompleteFrameCallback* complete_frame_callback, OnCompleteFrameCallback* complete_frame_callback,
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor, rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer); rtc::scoped_refptr<FrameTransformerInterface> frame_transformer);
@ -110,7 +110,7 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
// The KeyFrameRequestSender is optional; if not provided, key frame // The KeyFrameRequestSender is optional; if not provided, key frame
// requests are sent via the internal RtpRtcp module. // requests are sent via the internal RtpRtcp module.
KeyFrameRequestSender* keyframe_request_sender, KeyFrameRequestSender* keyframe_request_sender,
video_coding::OnCompleteFrameCallback* complete_frame_callback, OnCompleteFrameCallback* complete_frame_callback,
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor, rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer); rtc::scoped_refptr<FrameTransformerInterface> frame_transformer);
~RtpVideoStreamReceiver() override; ~RtpVideoStreamReceiver() override;
@ -329,7 +329,7 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
const std::unique_ptr<RtpRtcp> rtp_rtcp_; const std::unique_ptr<RtpRtcp> rtp_rtcp_;
video_coding::OnCompleteFrameCallback* complete_frame_callback_; OnCompleteFrameCallback* complete_frame_callback_;
KeyFrameRequestSender* const keyframe_request_sender_; KeyFrameRequestSender* const keyframe_request_sender_;
RtcpFeedbackBuffer rtcp_feedback_buffer_; RtcpFeedbackBuffer rtcp_feedback_buffer_;
@ -352,7 +352,7 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
RTC_GUARDED_BY(worker_task_checker_); RTC_GUARDED_BY(worker_task_checker_);
Mutex reference_finder_lock_; Mutex reference_finder_lock_;
std::unique_ptr<video_coding::RtpFrameReferenceFinder> reference_finder_ std::unique_ptr<RtpFrameReferenceFinder> reference_finder_
RTC_GUARDED_BY(reference_finder_lock_); RTC_GUARDED_BY(reference_finder_lock_);
absl::optional<VideoCodecType> current_codec_; absl::optional<VideoCodecType> current_codec_;
uint32_t last_assembled_frame_rtp_timestamp_; uint32_t last_assembled_frame_rtp_timestamp_;

View file

@ -213,7 +213,7 @@ RtpVideoStreamReceiver2::RtpVideoStreamReceiver2(
ProcessThread* process_thread, ProcessThread* process_thread,
NackSender* nack_sender, NackSender* nack_sender,
KeyFrameRequestSender* keyframe_request_sender, KeyFrameRequestSender* keyframe_request_sender,
video_coding::OnCompleteFrameCallback* complete_frame_callback, OnCompleteFrameCallback* complete_frame_callback,
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor, rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)
: clock_(clock), : clock_(clock),
@ -294,8 +294,7 @@ RtpVideoStreamReceiver2::RtpVideoStreamReceiver2(
&rtcp_feedback_buffer_); &rtcp_feedback_buffer_);
} }
reference_finder_ = reference_finder_ = std::make_unique<RtpFrameReferenceFinder>(this);
std::make_unique<video_coding::RtpFrameReferenceFinder>(this);
// Only construct the encrypted receiver if frame encryption is enabled. // Only construct the encrypted receiver if frame encryption is enabled.
if (config_.crypto_options.sframe.require_frame_encryption) { if (config_.crypto_options.sframe.require_frame_encryption) {
@ -832,10 +831,9 @@ void RtpVideoStreamReceiver2::OnAssembledFrame(
// to overlap with old picture ids. To ensure that doesn't happen we // to overlap with old picture ids. To ensure that doesn't happen we
// start from the |last_completed_picture_id_| and add an offset in case // start from the |last_completed_picture_id_| and add an offset in case
// of reordering. // of reordering.
reference_finder_ = reference_finder_ = std::make_unique<RtpFrameReferenceFinder>(
std::make_unique<video_coding::RtpFrameReferenceFinder>( this,
this, last_completed_picture_id_ + last_completed_picture_id_ + std::numeric_limits<uint16_t>::max());
std::numeric_limits<uint16_t>::max());
current_codec_ = frame->codec_type(); current_codec_ = frame->codec_type();
} else { } else {
// Old frame from before the codec switch, discard it. // Old frame from before the codec switch, discard it.

View file

@ -63,7 +63,7 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender,
public RecoveredPacketReceiver, public RecoveredPacketReceiver,
public RtpPacketSinkInterface, public RtpPacketSinkInterface,
public KeyFrameRequestSender, public KeyFrameRequestSender,
public video_coding::OnCompleteFrameCallback, public OnCompleteFrameCallback,
public OnDecryptedFrameCallback, public OnDecryptedFrameCallback,
public OnDecryptionStatusChangeCallback, public OnDecryptionStatusChangeCallback,
public RtpVideoFrameReceiver { public RtpVideoFrameReceiver {
@ -86,7 +86,7 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender,
// The KeyFrameRequestSender is optional; if not provided, key frame // The KeyFrameRequestSender is optional; if not provided, key frame
// requests are sent via the internal RtpRtcp module. // requests are sent via the internal RtpRtcp module.
KeyFrameRequestSender* keyframe_request_sender, KeyFrameRequestSender* keyframe_request_sender,
video_coding::OnCompleteFrameCallback* complete_frame_callback, OnCompleteFrameCallback* complete_frame_callback,
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor, rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer); rtc::scoped_refptr<FrameTransformerInterface> frame_transformer);
~RtpVideoStreamReceiver2() override; ~RtpVideoStreamReceiver2() override;
@ -286,7 +286,7 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender,
const std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_; const std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_;
video_coding::OnCompleteFrameCallback* complete_frame_callback_; OnCompleteFrameCallback* complete_frame_callback_;
KeyFrameRequestSender* const keyframe_request_sender_; KeyFrameRequestSender* const keyframe_request_sender_;
RtcpFeedbackBuffer rtcp_feedback_buffer_; RtcpFeedbackBuffer rtcp_feedback_buffer_;
@ -308,7 +308,7 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender,
absl::optional<int64_t> video_structure_frame_id_ absl::optional<int64_t> video_structure_frame_id_
RTC_GUARDED_BY(worker_task_checker_); RTC_GUARDED_BY(worker_task_checker_);
std::unique_ptr<video_coding::RtpFrameReferenceFinder> reference_finder_ std::unique_ptr<RtpFrameReferenceFinder> reference_finder_
RTC_GUARDED_BY(worker_task_checker_); RTC_GUARDED_BY(worker_task_checker_);
absl::optional<VideoCodecType> current_codec_ absl::optional<VideoCodecType> current_codec_
RTC_GUARDED_BY(worker_task_checker_); RTC_GUARDED_BY(worker_task_checker_);

View file

@ -95,8 +95,7 @@ class MockKeyFrameRequestSender : public KeyFrameRequestSender {
MOCK_METHOD(void, RequestKeyFrame, (), (override)); MOCK_METHOD(void, RequestKeyFrame, (), (override));
}; };
class MockOnCompleteFrameCallback class MockOnCompleteFrameCallback : public OnCompleteFrameCallback {
: public video_coding::OnCompleteFrameCallback {
public: public:
MOCK_METHOD(void, DoOnCompleteFrame, (video_coding::EncodedFrame*), ()); MOCK_METHOD(void, DoOnCompleteFrame, (video_coding::EncodedFrame*), ());
MOCK_METHOD(void, MOCK_METHOD(void,

View file

@ -94,8 +94,7 @@ class MockKeyFrameRequestSender : public KeyFrameRequestSender {
MOCK_METHOD(void, RequestKeyFrame, (), (override)); MOCK_METHOD(void, RequestKeyFrame, (), (override));
}; };
class MockOnCompleteFrameCallback class MockOnCompleteFrameCallback : public OnCompleteFrameCallback {
: public video_coding::OnCompleteFrameCallback {
public: public:
MOCK_METHOD(void, DoOnCompleteFrame, (video_coding::EncodedFrame*), ()); MOCK_METHOD(void, DoOnCompleteFrame, (video_coding::EncodedFrame*), ());
MOCK_METHOD(void, MOCK_METHOD(void,

View file

@ -48,7 +48,7 @@ namespace internal {
class VideoReceiveStream : public webrtc::DEPRECATED_VideoReceiveStream, class VideoReceiveStream : public webrtc::DEPRECATED_VideoReceiveStream,
public rtc::VideoSinkInterface<VideoFrame>, public rtc::VideoSinkInterface<VideoFrame>,
public NackSender, public NackSender,
public video_coding::OnCompleteFrameCallback, public OnCompleteFrameCallback,
public Syncable, public Syncable,
public CallStatsObserver { public CallStatsObserver {
public: public:
@ -111,7 +111,7 @@ class VideoReceiveStream : public webrtc::DEPRECATED_VideoReceiveStream,
void SendNack(const std::vector<uint16_t>& sequence_numbers, void SendNack(const std::vector<uint16_t>& sequence_numbers,
bool buffering_allowed) override; bool buffering_allowed) override;
// Implements video_coding::OnCompleteFrameCallback. // Implements OnCompleteFrameCallback.
void OnCompleteFrame( void OnCompleteFrame(
std::unique_ptr<video_coding::EncodedFrame> frame) override; std::unique_ptr<video_coding::EncodedFrame> frame) override;

View file

@ -77,7 +77,7 @@ struct VideoFrameMetaData {
class VideoReceiveStream2 : public webrtc::VideoReceiveStream, class VideoReceiveStream2 : public webrtc::VideoReceiveStream,
public rtc::VideoSinkInterface<VideoFrame>, public rtc::VideoSinkInterface<VideoFrame>,
public NackSender, public NackSender,
public video_coding::OnCompleteFrameCallback, public OnCompleteFrameCallback,
public Syncable, public Syncable,
public CallStatsObserver { public CallStatsObserver {
public: public:
@ -130,7 +130,7 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream,
void SendNack(const std::vector<uint16_t>& sequence_numbers, void SendNack(const std::vector<uint16_t>& sequence_numbers,
bool buffering_allowed) override; bool buffering_allowed) override;
// Implements video_coding::OnCompleteFrameCallback. // Implements OnCompleteFrameCallback.
void OnCompleteFrame( void OnCompleteFrame(
std::unique_ptr<video_coding::EncodedFrame> frame) override; std::unique_ptr<video_coding::EncodedFrame> frame) override;