mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-12 21:30:45 +01:00
Remove EncodedFrame::MissingFrame and start removing Decode() param
Remove EncodedFrame::MissingFrame, as it was always false in actual in-use code anyway, and remove usages of the Decode missing_frames param within WebRTC. Uses/overrides in other projects will be cleaned up shortly, allowing that variant to be removed from the interface. Bug: webrtc:15444 Change-Id: Id299d82e441a351deff81c0f2812707a985d23d8 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/317802 Reviewed-by: Philip Eliasson <philipel@webrtc.org> Reviewed-by: Harald Alvestrand <hta@webrtc.org> Auto-Submit: Tony Herre <herre@google.com> Commit-Queue: Tony Herre <herre@google.com> Cr-Commit-Position: refs/heads/main@{#40662}
This commit is contained in:
parent
44943c8064
commit
55b593fb6b
42 changed files with 114 additions and 160 deletions
|
@ -18,6 +18,9 @@
|
|||
|
||||
namespace webrtc {
|
||||
|
||||
using testing::_;
|
||||
using testing::Invoke;
|
||||
|
||||
class MockDecodedImageCallback : public DecodedImageCallback {
|
||||
public:
|
||||
MOCK_METHOD(int32_t,
|
||||
|
@ -43,6 +46,14 @@ class MockVideoDecoder : public VideoDecoder {
|
|||
// Make `Configure` succeed by default, so that individual tests that
|
||||
// verify other methods wouldn't need to stub `Configure`.
|
||||
ON_CALL(*this, Configure).WillByDefault(testing::Return(true));
|
||||
|
||||
// TODO(bugs.webrtc.org/15444): Remove once all tests have been migrated to
|
||||
// expecting calls Decode without a missing_frames param.
|
||||
ON_CALL(*this, Decode(_, _))
|
||||
.WillByDefault(Invoke([this](const EncodedImage& input_image,
|
||||
int64_t render_time_ms) {
|
||||
return Decode(input_image, /*missing_frames=*/false, render_time_ms);
|
||||
}));
|
||||
}
|
||||
|
||||
~MockVideoDecoder() override { Destruct(); }
|
||||
|
@ -51,9 +62,13 @@ class MockVideoDecoder : public VideoDecoder {
|
|||
MOCK_METHOD(int32_t,
|
||||
Decode,
|
||||
(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms),
|
||||
(override));
|
||||
MOCK_METHOD(int32_t,
|
||||
Decode,
|
||||
(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms));
|
||||
MOCK_METHOD(int32_t,
|
||||
RegisterDecodeCompleteCallback,
|
||||
(DecodedImageCallback * callback),
|
||||
|
|
|
@ -62,8 +62,6 @@ class EncodedFrame : public EncodedImage {
|
|||
|
||||
uint8_t PayloadType() const { return _payloadType; }
|
||||
|
||||
bool MissingFrame() const { return _missingFrame; }
|
||||
|
||||
void SetRenderTime(const int64_t renderTimeMs) {
|
||||
_renderTimeMs = renderTimeMs;
|
||||
}
|
||||
|
@ -94,7 +92,6 @@ class EncodedFrame : public EncodedImage {
|
|||
// getters/setters as needed.
|
||||
int64_t _renderTimeMs = -1;
|
||||
uint8_t _payloadType = 0;
|
||||
bool _missingFrame = false;
|
||||
CodecSpecificInfo _codecSpecificInfo;
|
||||
VideoCodecType _codec = kVideoCodecGeneric;
|
||||
|
||||
|
|
|
@ -45,7 +45,6 @@ class VideoDecoderSoftwareFallbackWrapperTest : public ::testing::Test {
|
|||
}
|
||||
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) override {
|
||||
++decode_count_;
|
||||
return decode_return_code_;
|
||||
|
@ -84,7 +83,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, InitializesDecoder) {
|
|||
|
||||
EncodedImage encoded_image;
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
EXPECT_EQ(1, fake_decoder_->configure_count_)
|
||||
<< "Initialized decoder should not be reinitialized.";
|
||||
EXPECT_EQ(1, fake_decoder_->decode_count_);
|
||||
|
@ -98,7 +97,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
|
|||
|
||||
EncodedImage encoded_image;
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
EXPECT_EQ(1, fake_decoder_->configure_count_)
|
||||
<< "Should not have attempted reinitializing the fallback decoder on "
|
||||
"keyframe.";
|
||||
|
@ -113,12 +112,12 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, IsSoftwareFallbackSticky) {
|
|||
|
||||
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
||||
EncodedImage encoded_image;
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
EXPECT_EQ(1, fake_decoder_->decode_count_);
|
||||
|
||||
// Software fallback should be sticky, fake_decoder_ shouldn't be used.
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
EXPECT_EQ(1, fake_decoder_->decode_count_)
|
||||
<< "Decoder shouldn't be used after failure.";
|
||||
|
||||
|
@ -131,10 +130,10 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, DoesNotFallbackOnEveryError) {
|
|||
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_ERROR;
|
||||
EncodedImage encoded_image;
|
||||
EXPECT_EQ(fake_decoder_->decode_return_code_,
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1));
|
||||
fallback_wrapper_->Decode(encoded_image, -1));
|
||||
EXPECT_EQ(1, fake_decoder_->decode_count_);
|
||||
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
EXPECT_EQ(2, fake_decoder_->decode_count_)
|
||||
<< "Decoder should be active even though previous decode failed.";
|
||||
}
|
||||
|
@ -144,14 +143,14 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, UsesHwDecoderAfterReinit) {
|
|||
|
||||
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
||||
EncodedImage encoded_image;
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
EXPECT_EQ(1, fake_decoder_->decode_count_);
|
||||
|
||||
fallback_wrapper_->Release();
|
||||
fallback_wrapper_->Configure({});
|
||||
|
||||
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_OK;
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
EXPECT_EQ(2, fake_decoder_->decode_count_)
|
||||
<< "Should not be using fallback after reinit.";
|
||||
}
|
||||
|
@ -164,7 +163,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, ForwardsReleaseCall) {
|
|||
fallback_wrapper_->Configure({});
|
||||
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
||||
EncodedImage encoded_image;
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
EXPECT_EQ(2, fake_decoder_->release_count_)
|
||||
<< "Decoder should be released during fallback.";
|
||||
fallback_wrapper_->Release();
|
||||
|
@ -200,7 +199,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
|
|||
|
||||
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
||||
EncodedImage encoded_image;
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
// Hard coded expected value since libvpx is the software implementation name
|
||||
// for VP8. Change accordingly if the underlying implementation does.
|
||||
EXPECT_STREQ("libvpx (fallback from: fake-decoder)",
|
||||
|
@ -215,13 +214,13 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, FallbacksOnTooManyErrors) {
|
|||
EncodedImage encoded_image;
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
|
||||
// Doesn't fallback from a single error.
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName());
|
||||
|
||||
// However, many frames with the same error, fallback should happen.
|
||||
const int kNumFramesToEncode = 10;
|
||||
for (int i = 0; i < kNumFramesToEncode; ++i) {
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
}
|
||||
// Hard coded expected value since libvpx is the software implementation name
|
||||
// for VP8. Change accordingly if the underlying implementation does.
|
||||
|
@ -241,7 +240,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
|
|||
// Many decoded frames with the same error
|
||||
const int kNumFramesToEncode = 10;
|
||||
for (int i = 0; i < kNumFramesToEncode; ++i) {
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
}
|
||||
EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName());
|
||||
|
||||
|
@ -259,9 +258,9 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
|
|||
for (int i = 0; i < kNumFramesToEncode; ++i) {
|
||||
// Interleaved errors and successful decodes.
|
||||
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_ERROR;
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_OK;
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
}
|
||||
EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName());
|
||||
fallback_wrapper_->Release();
|
||||
|
@ -289,7 +288,7 @@ TEST_F(ForcedSoftwareDecoderFallbackTest, UsesForcedFallback) {
|
|||
|
||||
EncodedImage encoded_image;
|
||||
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
|
||||
fallback_wrapper_->Decode(encoded_image, false, -1);
|
||||
fallback_wrapper_->Decode(encoded_image, -1);
|
||||
EXPECT_EQ(1, sw_fallback_decoder_->configure_count_);
|
||||
EXPECT_EQ(1, sw_fallback_decoder_->decode_count_);
|
||||
|
||||
|
|
|
@ -98,9 +98,20 @@ class RTC_EXPORT VideoDecoder {
|
|||
// times, in such case only latest `settings` are in effect.
|
||||
virtual bool Configure(const Settings& settings) = 0;
|
||||
|
||||
// TODO(bugs.webrtc.org/15444): Make pure virtual once all subclasses have
|
||||
// migrated to implementing this class.
|
||||
virtual int32_t Decode(const EncodedImage& input_image,
|
||||
int64_t render_time_ms) {
|
||||
return Decode(input_image, /*missing_frame=*/false, render_time_ms);
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/15444): Migrate all subclasses to Decode() without
|
||||
// missing_frame and delete this.
|
||||
virtual int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) = 0;
|
||||
int64_t render_time_ms) {
|
||||
return Decode(input_image, render_time_ms);
|
||||
}
|
||||
|
||||
virtual int32_t RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) = 0;
|
||||
|
|
|
@ -41,7 +41,6 @@ class VideoDecoderSoftwareFallbackWrapper final : public VideoDecoder {
|
|||
bool Configure(const Settings& settings) override;
|
||||
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) override;
|
||||
|
||||
int32_t RegisterDecodeCompleteCallback(
|
||||
|
@ -176,7 +175,6 @@ void VideoDecoderSoftwareFallbackWrapper::UpdateFallbackDecoderHistograms() {
|
|||
|
||||
int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
|
||||
const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) {
|
||||
TRACE_EVENT0("webrtc", "VideoDecoderSoftwareFallbackWrapper::Decode");
|
||||
switch (decoder_type_) {
|
||||
|
@ -184,7 +182,7 @@ int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
|
|||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
case DecoderType::kHardware: {
|
||||
int32_t ret = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
||||
ret = hw_decoder_->Decode(input_image, missing_frames, render_time_ms);
|
||||
ret = hw_decoder_->Decode(input_image, render_time_ms);
|
||||
if (ret != WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE) {
|
||||
if (ret != WEBRTC_VIDEO_CODEC_ERROR) {
|
||||
++hw_decoded_frames_since_last_fallback_;
|
||||
|
@ -212,8 +210,7 @@ int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
|
|||
[[fallthrough]];
|
||||
}
|
||||
case DecoderType::kFallback:
|
||||
return fallback_decoder_->Decode(input_image, missing_frames,
|
||||
render_time_ms);
|
||||
return fallback_decoder_->Decode(input_image, render_time_ms);
|
||||
default:
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
|
|
|
@ -60,7 +60,6 @@ bool FakeWebRtcVideoDecoder::Configure(const Settings& settings) {
|
|||
}
|
||||
|
||||
int32_t FakeWebRtcVideoDecoder::Decode(const webrtc::EncodedImage&,
|
||||
bool,
|
||||
int64_t) {
|
||||
num_frames_received_++;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
|
|
|
@ -45,7 +45,7 @@ class FakeWebRtcVideoDecoder : public webrtc::VideoDecoder {
|
|||
~FakeWebRtcVideoDecoder();
|
||||
|
||||
bool Configure(const Settings& settings) override;
|
||||
int32_t Decode(const webrtc::EncodedImage&, bool, int64_t) override;
|
||||
int32_t Decode(const webrtc::EncodedImage&, int64_t) override;
|
||||
int32_t RegisterDecodeCompleteCallback(
|
||||
webrtc::DecodedImageCallback*) override;
|
||||
int32_t Release() override;
|
||||
|
|
|
@ -35,7 +35,6 @@ class Dav1dDecoder : public VideoDecoder {
|
|||
|
||||
bool Configure(const Settings& settings) override;
|
||||
int32_t Decode(const EncodedImage& encoded_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) override;
|
||||
int32_t RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) override;
|
||||
|
@ -119,7 +118,6 @@ const char* Dav1dDecoder::ImplementationName() const {
|
|||
}
|
||||
|
||||
int32_t Dav1dDecoder::Decode(const EncodedImage& encoded_image,
|
||||
bool /*missing_frames*/,
|
||||
int64_t /*render_time_ms*/) {
|
||||
if (!context_ || decode_complete_callback_ == nullptr) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
|
|
|
@ -89,8 +89,8 @@ class TestAv1Decoder {
|
|||
|
||||
void Decode(int64_t frame_id, const EncodedImage& image) {
|
||||
ASSERT_THAT(decoder_, NotNull());
|
||||
int32_t error = decoder_->Decode(image, /*missing_frames=*/false,
|
||||
/*render_time_ms=*/image.capture_time_ms_);
|
||||
int32_t error =
|
||||
decoder_->Decode(image, /*render_time_ms=*/image.capture_time_ms_);
|
||||
if (error != WEBRTC_VIDEO_CODEC_OK) {
|
||||
ADD_FAILURE() << "Failed to decode frame id " << frame_id
|
||||
<< " with error code " << error << " by decoder#"
|
||||
|
|
|
@ -62,7 +62,7 @@ TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
|
|||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
@ -87,7 +87,7 @@ TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
|
|||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
|
|
@ -33,7 +33,6 @@ class MultiplexDecoderAdapter : public VideoDecoder {
|
|||
// Implements VideoDecoder
|
||||
bool Configure(const Settings& settings) override;
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) override;
|
||||
int32_t RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) override;
|
||||
|
|
|
@ -125,7 +125,6 @@ bool MultiplexDecoderAdapter::Configure(const Settings& settings) {
|
|||
}
|
||||
|
||||
int32_t MultiplexDecoderAdapter::Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) {
|
||||
MultiplexImage image = MultiplexEncodedImagePacker::Unpack(input_image);
|
||||
|
||||
|
@ -149,8 +148,7 @@ int32_t MultiplexDecoderAdapter::Decode(const EncodedImage& input_image,
|
|||
int32_t rv = 0;
|
||||
for (size_t i = 0; i < image.image_components.size(); i++) {
|
||||
rv = decoders_[image.image_components[i].component_index]->Decode(
|
||||
image.image_components[i].encoded_image, missing_frames,
|
||||
render_time_ms);
|
||||
image.image_components[i].encoded_image, render_time_ms);
|
||||
if (rv != WEBRTC_VIDEO_CODEC_OK)
|
||||
return rv;
|
||||
}
|
||||
|
|
|
@ -218,7 +218,7 @@ TEST_P(TestMultiplexAdapter, EncodeDecodeI420Frame) {
|
|||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
|
||||
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
@ -235,7 +235,7 @@ TEST_P(TestMultiplexAdapter, EncodeDecodeI420AFrame) {
|
|||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
|
||||
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
|
|
@ -355,8 +355,7 @@ class TestDecoder : public VideoCodecTester::Decoder,
|
|||
callbacks_[frame.Timestamp()] = std::move(callback);
|
||||
}
|
||||
|
||||
decoder_->Decode(frame, /*missing_frames=*/false,
|
||||
/*render_time_ms=*/0);
|
||||
decoder_->Decode(frame, /*render_time_ms=*/0);
|
||||
}
|
||||
|
||||
void Flush() override {
|
||||
|
|
|
@ -638,7 +638,7 @@ void VideoProcessor::DecodeFrame(const EncodedImage& encoded_image,
|
|||
|
||||
frame_stat->decode_start_ns = rtc::TimeNanos();
|
||||
frame_stat->decode_return_code =
|
||||
decoders_->at(spatial_idx)->Decode(encoded_image, false, 0);
|
||||
decoders_->at(spatial_idx)->Decode(encoded_image, 0);
|
||||
}
|
||||
|
||||
const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
constexpr int kVp8ErrorPropagationTh = 30;
|
||||
// vpx_decoder.h documentation indicates decode deadline is time in us, with
|
||||
// "Set to zero for unlimited.", but actual implementation requires this to be
|
||||
// a mode with 0 meaning allow delay and 1 not allowing it.
|
||||
|
@ -122,7 +121,6 @@ LibvpxVp8Decoder::LibvpxVp8Decoder()
|
|||
decode_complete_callback_(NULL),
|
||||
inited_(false),
|
||||
decoder_(NULL),
|
||||
propagation_cnt_(-1),
|
||||
last_frame_width_(0),
|
||||
last_frame_height_(0),
|
||||
key_frame_required_(true),
|
||||
|
@ -156,7 +154,6 @@ bool LibvpxVp8Decoder::Configure(const Settings& settings) {
|
|||
return false;
|
||||
}
|
||||
|
||||
propagation_cnt_ = -1;
|
||||
inited_ = true;
|
||||
|
||||
// Always start with a complete key frame.
|
||||
|
@ -170,7 +167,12 @@ bool LibvpxVp8Decoder::Configure(const Settings& settings) {
|
|||
}
|
||||
|
||||
int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) {
|
||||
return Decode(input_image, /*missing_frames=*/false, render_time_ms);
|
||||
}
|
||||
|
||||
int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
bool /*missing_frames*/,
|
||||
int64_t /*render_time_ms*/) {
|
||||
if (!inited_) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
|
@ -179,9 +181,6 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
|||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
if (input_image.data() == NULL && input_image.size() > 0) {
|
||||
// Reset to avoid requesting key frames too often.
|
||||
if (propagation_cnt_ > 0)
|
||||
propagation_cnt_ = 0;
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
|
||||
|
@ -234,34 +233,6 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
|||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
key_frame_required_ = false;
|
||||
}
|
||||
// Restrict error propagation using key frame requests.
|
||||
// Reset on a key frame refresh.
|
||||
if (input_image._frameType == VideoFrameType::kVideoFrameKey) {
|
||||
propagation_cnt_ = -1;
|
||||
// Start count on first loss.
|
||||
} else if (missing_frames && propagation_cnt_ == -1) {
|
||||
propagation_cnt_ = 0;
|
||||
}
|
||||
if (propagation_cnt_ >= 0) {
|
||||
propagation_cnt_++;
|
||||
}
|
||||
|
||||
vpx_codec_iter_t iter = NULL;
|
||||
vpx_image_t* img;
|
||||
int ret;
|
||||
|
||||
// Check for missing frames.
|
||||
if (missing_frames) {
|
||||
// Call decoder with zero data length to signal missing frames.
|
||||
if (vpx_codec_decode(decoder_, NULL, 0, 0, kDecodeDeadlineRealtime)) {
|
||||
// Reset to avoid requesting key frames too often.
|
||||
if (propagation_cnt_ > 0)
|
||||
propagation_cnt_ = 0;
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
img = vpx_codec_get_frame(decoder_, &iter);
|
||||
iter = NULL;
|
||||
}
|
||||
|
||||
const uint8_t* buffer = input_image.data();
|
||||
if (input_image.size() == 0) {
|
||||
|
@ -269,31 +240,20 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
|||
}
|
||||
if (vpx_codec_decode(decoder_, buffer, input_image.size(), 0,
|
||||
kDecodeDeadlineRealtime)) {
|
||||
// Reset to avoid requesting key frames too often.
|
||||
if (propagation_cnt_ > 0) {
|
||||
propagation_cnt_ = 0;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
img = vpx_codec_get_frame(decoder_, &iter);
|
||||
vpx_codec_iter_t iter = NULL;
|
||||
vpx_image_t* img = vpx_codec_get_frame(decoder_, &iter);
|
||||
int qp;
|
||||
vpx_codec_err_t vpx_ret =
|
||||
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
|
||||
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
|
||||
ret = ReturnFrame(img, input_image.Timestamp(), qp, input_image.ColorSpace());
|
||||
int ret =
|
||||
ReturnFrame(img, input_image.Timestamp(), qp, input_image.ColorSpace());
|
||||
if (ret != 0) {
|
||||
// Reset to avoid requesting key frames too often.
|
||||
if (ret < 0 && propagation_cnt_ > 0)
|
||||
propagation_cnt_ = 0;
|
||||
return ret;
|
||||
}
|
||||
// Check Vs. threshold
|
||||
if (propagation_cnt_ > kVp8ErrorPropagationTh) {
|
||||
// Reset to avoid requesting key frames too often.
|
||||
propagation_cnt_ = 0;
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,11 @@ class LibvpxVp8Decoder : public VideoDecoder {
|
|||
~LibvpxVp8Decoder() override;
|
||||
|
||||
bool Configure(const Settings& settings) override;
|
||||
int Decode(const EncodedImage& input_image,
|
||||
int64_t /*render_time_ms*/) override;
|
||||
|
||||
// TODO(bugs.webrtc.org/15444): Remove once all subclasses have been migrated
|
||||
// to expecting calls Decode without a missing_frames param.
|
||||
int Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t /*render_time_ms*/) override;
|
||||
|
@ -61,7 +66,6 @@ class LibvpxVp8Decoder : public VideoDecoder {
|
|||
DecodedImageCallback* decode_complete_callback_;
|
||||
bool inited_;
|
||||
vpx_codec_ctx_t* decoder_;
|
||||
int propagation_cnt_;
|
||||
int last_frame_width_;
|
||||
int last_frame_height_;
|
||||
bool key_frame_required_;
|
||||
|
|
|
@ -286,7 +286,7 @@ TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
|
|||
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
@ -501,7 +501,7 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
|
|||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
encoded_frame.ntp_time_ms_ = kTestNtpTimeMs;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, -1));
|
||||
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
|
|
|
@ -188,7 +188,6 @@ bool LibvpxVp9Decoder::Configure(const Settings& settings) {
|
|||
}
|
||||
|
||||
int LibvpxVp9Decoder::Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t /*render_time_ms*/) {
|
||||
if (!inited_) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
|
|
|
@ -29,7 +29,6 @@ class LibvpxVp9Decoder : public VP9Decoder {
|
|||
bool Configure(const Settings& settings) override;
|
||||
|
||||
int Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t /*render_time_ms*/) override;
|
||||
|
||||
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
|
||||
|
|
|
@ -143,7 +143,7 @@ TEST_P(TestVp9ImplForPixelFormat, EncodeDecode) {
|
|||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
@ -193,7 +193,7 @@ TEST_P(TestVp9ImplForPixelFormat, DecodedColorSpaceFromBitstream) {
|
|||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
|
||||
// Encoded frame without explicit color space information.
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
@ -211,7 +211,7 @@ TEST_P(TestVp9ImplForPixelFormat, DecodedQpEqualsEncodedQp) {
|
|||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
@ -2063,7 +2063,7 @@ TEST_F(TestVp9ImplProfile2, EncodeDecode) {
|
|||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
|
|
@ -282,17 +282,16 @@ bool VCMGenericDecoder::Configure(const VideoDecoder::Settings& settings) {
|
|||
}
|
||||
|
||||
int32_t VCMGenericDecoder::Decode(const EncodedFrame& frame, Timestamp now) {
|
||||
return Decode(frame, now, frame.RenderTimeMs(), frame.MissingFrame());
|
||||
return Decode(frame, now, frame.RenderTimeMs());
|
||||
}
|
||||
|
||||
int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, Timestamp now) {
|
||||
return Decode(frame, now, frame.RenderTimeMs(), frame.MissingFrame());
|
||||
return Decode(frame, now, frame.RenderTimeMs());
|
||||
}
|
||||
|
||||
int32_t VCMGenericDecoder::Decode(const EncodedImage& frame,
|
||||
Timestamp now,
|
||||
int64_t render_time_ms,
|
||||
int64_t missing_frame) {
|
||||
int64_t render_time_ms) {
|
||||
TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp",
|
||||
frame.Timestamp());
|
||||
FrameInfo frame_info;
|
||||
|
@ -319,7 +318,7 @@ int32_t VCMGenericDecoder::Decode(const EncodedImage& frame,
|
|||
frame_info.frame_type = frame.FrameType();
|
||||
_callback->Map(std::move(frame_info));
|
||||
|
||||
int32_t ret = decoder_->Decode(frame, missing_frame, render_time_ms);
|
||||
int32_t ret = decoder_->Decode(frame, render_time_ms);
|
||||
VideoDecoder::DecoderInfo decoder_info = decoder_->GetDecoderInfo();
|
||||
if (decoder_info != decoder_info_) {
|
||||
RTC_LOG(LS_INFO) << "Changed decoder implementation to: "
|
||||
|
|
|
@ -120,8 +120,7 @@ class VCMGenericDecoder {
|
|||
private:
|
||||
int32_t Decode(const EncodedImage& frame,
|
||||
Timestamp now,
|
||||
int64_t render_time_ms,
|
||||
int64_t missing_frame);
|
||||
int64_t render_time_ms);
|
||||
VCMDecodedFrameCallback* _callback = nullptr;
|
||||
VideoDecoder* const decoder_;
|
||||
VideoContentType _last_keyframe_content_type;
|
||||
|
|
|
@ -894,9 +894,9 @@ void SimulcastTestFixtureImpl::TestStrideEncodeDecode() {
|
|||
EncodedImage encoded_frame;
|
||||
// Only encoding one frame - so will be a key frame.
|
||||
encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
|
||||
EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, 0));
|
||||
EXPECT_EQ(0, decoder_->Decode(encoded_frame, 0));
|
||||
encoder_callback.GetLastEncodedFrame(&encoded_frame);
|
||||
decoder_->Decode(encoded_frame, false, 0);
|
||||
decoder_->Decode(encoded_frame, 0);
|
||||
EXPECT_EQ(2, decoder_callback.DecodedFrames());
|
||||
}
|
||||
|
||||
|
@ -932,7 +932,7 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
|
|||
EXPECT_EQ(decodedImage.width(), kDefaultWidth / 4);
|
||||
EXPECT_EQ(decodedImage.height(), kDefaultHeight / 4);
|
||||
}));
|
||||
EXPECT_EQ(0, decoder_->Decode(encoded_frame[0], false, 0));
|
||||
EXPECT_EQ(0, decoder_->Decode(encoded_frame[0], 0));
|
||||
|
||||
EXPECT_CALL(decoder_callback, Decoded(_, _, _))
|
||||
.WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
|
||||
|
@ -941,7 +941,7 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
|
|||
EXPECT_EQ(decodedImage.width(), kDefaultWidth / 2);
|
||||
EXPECT_EQ(decodedImage.height(), kDefaultHeight / 2);
|
||||
}));
|
||||
EXPECT_EQ(0, decoder_->Decode(encoded_frame[1], false, 0));
|
||||
EXPECT_EQ(0, decoder_->Decode(encoded_frame[1], 0));
|
||||
|
||||
EXPECT_CALL(decoder_callback, Decoded(_, _, _))
|
||||
.WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
|
||||
|
@ -950,7 +950,7 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
|
|||
EXPECT_EQ(decodedImage.width(), kDefaultWidth);
|
||||
EXPECT_EQ(decodedImage.height(), kDefaultHeight);
|
||||
}));
|
||||
EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], false, 0));
|
||||
EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], 0));
|
||||
}
|
||||
|
||||
void SimulcastTestFixtureImpl::
|
||||
|
|
|
@ -123,7 +123,7 @@ TEST_F(VideoReceiver2Test, RegisterReceiveCodecs) {
|
|||
auto decoder = std::make_unique<NiceMock<MockVideoDecoder>>();
|
||||
EXPECT_CALL(*decoder, RegisterDecodeCompleteCallback)
|
||||
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
EXPECT_CALL(*decoder, Decode).WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
EXPECT_CALL(*decoder, Decode(_, _)).WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
EXPECT_CALL(*decoder, Release).WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
|
||||
|
||||
// Register the decoder. Note that this moves ownership of the mock object
|
||||
|
|
|
@ -109,7 +109,7 @@ class TestVideoReceiver : public ::testing::Test {
|
|||
++header->sequenceNumber;
|
||||
}
|
||||
receiver_.Process();
|
||||
EXPECT_CALL(decoder_, Decode(_, _, _)).Times(0);
|
||||
EXPECT_CALL(decoder_, Decode(_, _)).Times(0);
|
||||
EXPECT_EQ(VCM_FRAME_NOT_READY, receiver_.Decode(kMaxWaitTimeMs));
|
||||
}
|
||||
|
||||
|
@ -123,7 +123,7 @@ class TestVideoReceiver : public ::testing::Test {
|
|||
EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
|
||||
|
||||
receiver_.Process();
|
||||
EXPECT_CALL(decoder_, Decode(_, _, _)).Times(1);
|
||||
EXPECT_CALL(decoder_, Decode(_, _)).Times(1);
|
||||
EXPECT_EQ(0, receiver_.Decode(kMaxWaitTimeMs));
|
||||
}
|
||||
|
||||
|
|
|
@ -240,7 +240,6 @@ class DecoderBitstreamFileWriter : public test::FakeDecoder {
|
|||
~DecoderBitstreamFileWriter() override { fclose(file_); }
|
||||
|
||||
int32_t Decode(const EncodedImage& encoded_frame,
|
||||
bool /* missing_frames */,
|
||||
int64_t /* render_time_ms */) override {
|
||||
if (fwrite(encoded_frame.data(), 1, encoded_frame.size(), file_) <
|
||||
encoded_frame.size()) {
|
||||
|
@ -276,7 +275,6 @@ class DecoderIvfFileWriter : public test::FakeDecoder {
|
|||
~DecoderIvfFileWriter() override { file_writer_->Close(); }
|
||||
|
||||
int32_t Decode(const EncodedImage& encoded_frame,
|
||||
bool /* missing_frames */,
|
||||
int64_t render_time_ms) override {
|
||||
if (!file_writer_->WriteFrame(encoded_frame, video_codec_type_)) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
return 0;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/15444): Remove obsolete missingFrames param.
|
||||
- (NSInteger)decode:(RTC_OBJC_TYPE(RTCEncodedImage) *)encodedImage
|
||||
missingFrames:(BOOL)missingFrames
|
||||
codecSpecificInfo:(nullable id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)info
|
||||
|
|
|
@ -29,6 +29,7 @@ RTC_OBJC_EXPORT
|
|||
- (void)setCallback : (RTCVideoDecoderCallback)callback;
|
||||
- (NSInteger)startDecodeWithNumberOfCores:(int)numberOfCores;
|
||||
- (NSInteger)releaseDecoder;
|
||||
// TODO(bugs.webrtc.org/15444): Remove obsolete missingFrames param.
|
||||
- (NSInteger)decode:(RTC_OBJC_TYPE(RTCEncodedImage) *)encodedImage
|
||||
missingFrames:(BOOL)missingFrames
|
||||
codecSpecificInfo:(nullable id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)info
|
||||
|
|
|
@ -98,6 +98,7 @@ void decompressionOutputCallback(void *decoderRef,
|
|||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/15444): Remove obsolete missingFrames param.
|
||||
- (NSInteger)decode:(RTC_OBJC_TYPE(RTCEncodedImage) *)inputImage
|
||||
missingFrames:(BOOL)missingFrames
|
||||
codecSpecificInfo:(nullable id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)info
|
||||
|
|
|
@ -43,13 +43,12 @@ class ObjCVideoDecoder : public VideoDecoder {
|
|||
}
|
||||
|
||||
int32_t Decode(const EncodedImage &input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms = -1) override {
|
||||
RTC_OBJC_TYPE(RTCEncodedImage) *encodedImage =
|
||||
[[RTC_OBJC_TYPE(RTCEncodedImage) alloc] initWithNativeEncodedImage:input_image];
|
||||
|
||||
return [decoder_ decode:encodedImage
|
||||
missingFrames:missing_frames
|
||||
missingFrames:false
|
||||
codecSpecificInfo:nil
|
||||
renderTimeMs:render_time_ms];
|
||||
}
|
||||
|
|
|
@ -41,7 +41,6 @@ bool FakeDecoder::Configure(const Settings& settings) {
|
|||
}
|
||||
|
||||
int32_t FakeDecoder::Decode(const EncodedImage& input,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) {
|
||||
if (input._encodedWidth > 0 && input._encodedHeight > 0) {
|
||||
width_ = input._encodedWidth;
|
||||
|
@ -103,7 +102,6 @@ const char* FakeDecoder::ImplementationName() const {
|
|||
}
|
||||
|
||||
int32_t FakeH264Decoder::Decode(const EncodedImage& input,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) {
|
||||
uint8_t value = 0;
|
||||
for (size_t i = 0; i < input.size(); ++i) {
|
||||
|
@ -119,7 +117,7 @@ int32_t FakeH264Decoder::Decode(const EncodedImage& input,
|
|||
}
|
||||
++value;
|
||||
}
|
||||
return FakeDecoder::Decode(input, missing_frames, render_time_ms);
|
||||
return FakeDecoder::Decode(input, render_time_ms);
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
|
|
|
@ -35,7 +35,6 @@ class FakeDecoder : public VideoDecoder {
|
|||
bool Configure(const Settings& settings) override;
|
||||
|
||||
int32_t Decode(const EncodedImage& input,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) override;
|
||||
|
||||
int32_t RegisterDecodeCompleteCallback(
|
||||
|
@ -64,7 +63,6 @@ class FakeH264Decoder : public FakeDecoder {
|
|||
virtual ~FakeH264Decoder() {}
|
||||
|
||||
int32_t Decode(const EncodedImage& input,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) override;
|
||||
};
|
||||
|
||||
|
|
|
@ -44,7 +44,6 @@ bool FakeVp8Decoder::Configure(const Settings& settings) {
|
|||
}
|
||||
|
||||
int32_t FakeVp8Decoder::Decode(const EncodedImage& input,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) {
|
||||
constexpr size_t kMinPayLoadHeaderLength = 10;
|
||||
if (input.size() < kMinPayLoadHeaderLength) {
|
||||
|
|
|
@ -28,7 +28,6 @@ class FakeVp8Decoder : public VideoDecoder {
|
|||
bool Configure(const Settings& settings) override;
|
||||
|
||||
int32_t Decode(const EncodedImage& input,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) override;
|
||||
|
||||
int32_t RegisterDecodeCompleteCallback(
|
||||
|
|
|
@ -51,7 +51,6 @@ bool QualityAnalyzingVideoDecoder::Configure(const Settings& settings) {
|
|||
}
|
||||
|
||||
int32_t QualityAnalyzingVideoDecoder::Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) {
|
||||
// Image extractor extracts id from provided EncodedImage and also returns
|
||||
// the image with the original buffer. Buffer can be modified in place, so
|
||||
|
@ -96,8 +95,7 @@ int32_t QualityAnalyzingVideoDecoder::Decode(const EncodedImage& input_image,
|
|||
// thread.
|
||||
analyzer_->OnFramePreDecode(
|
||||
peer_name_, out.id.value_or(VideoFrame::kNotSetId), *origin_image);
|
||||
int32_t result =
|
||||
delegate_->Decode(*origin_image, missing_frames, render_time_ms);
|
||||
int32_t result = delegate_->Decode(*origin_image, render_time_ms);
|
||||
if (result != WEBRTC_VIDEO_CODEC_OK) {
|
||||
// If delegate decoder failed, then cleanup data for this image.
|
||||
VideoQualityAnalyzerInterface::DecoderStats stats;
|
||||
|
|
|
@ -59,7 +59,6 @@ class QualityAnalyzingVideoDecoder : public VideoDecoder {
|
|||
// Methods of VideoDecoder interface.
|
||||
bool Configure(const Settings& settings) override;
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) override;
|
||||
int32_t RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) override;
|
||||
|
|
|
@ -79,8 +79,7 @@ FrameGeneratorInterface::VideoFrameData IvfVideoFrameGenerator::NextFrame() {
|
|||
RTC_CHECK(image);
|
||||
// Last parameter is undocumented and there is no usage of it found.
|
||||
RTC_CHECK_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
video_decoder_->Decode(*image, /*missing_frames=*/false,
|
||||
/*render_time_ms=*/0));
|
||||
video_decoder_->Decode(*image, /*render_time_ms=*/0));
|
||||
bool decoded = next_frame_decoded_.Wait(kMaxNextFrameWaitTimeout);
|
||||
RTC_CHECK(decoded) << "Failed to decode next frame in "
|
||||
<< kMaxNextFrameWaitTimeout << ". Can't continue";
|
||||
|
|
|
@ -48,9 +48,8 @@ class VideoDecoderProxyFactory final : public VideoDecoderFactory {
|
|||
|
||||
private:
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) override {
|
||||
return decoder_->Decode(input_image, missing_frames, render_time_ms);
|
||||
return decoder_->Decode(input_image, render_time_ms);
|
||||
}
|
||||
bool Configure(const Settings& settings) override {
|
||||
return decoder_->Configure(settings);
|
||||
|
|
|
@ -26,7 +26,6 @@ class FrameDumpingDecoder : public VideoDecoder {
|
|||
|
||||
bool Configure(const Settings& settings) override;
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) override;
|
||||
int32_t RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) override;
|
||||
|
@ -54,9 +53,8 @@ bool FrameDumpingDecoder::Configure(const Settings& settings) {
|
|||
}
|
||||
|
||||
int32_t FrameDumpingDecoder::Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) {
|
||||
int32_t ret = decoder_->Decode(input_image, missing_frames, render_time_ms);
|
||||
int32_t ret = decoder_->Decode(input_image, render_time_ms);
|
||||
writer_->WriteFrame(input_image, codec_type_);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -134,7 +134,6 @@ class NullVideoDecoder : public webrtc::VideoDecoder {
|
|||
}
|
||||
|
||||
int32_t Decode(const webrtc::EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
int64_t render_time_ms) override {
|
||||
RTC_LOG(LS_ERROR) << "The NullVideoDecoder doesn't support decoding.";
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
|
|
|
@ -210,7 +210,7 @@ class VideoReceiveStream2Test : public ::testing::TestWithParam<bool> {
|
|||
// By default, mock decode will wrap the fake decoder.
|
||||
ON_CALL(mock_decoder_, Configure)
|
||||
.WillByDefault(Invoke(&fake_decoder_, &test::FakeDecoder::Configure));
|
||||
ON_CALL(mock_decoder_, Decode).WillByDefault(DefaultDecodeAction());
|
||||
ON_CALL(mock_decoder_, Decode(_, _)).WillByDefault(DefaultDecodeAction());
|
||||
ON_CALL(mock_decoder_, RegisterDecodeCompleteCallback)
|
||||
.WillByDefault(
|
||||
Invoke(&fake_decoder_,
|
||||
|
@ -304,7 +304,7 @@ TEST_P(VideoReceiveStream2Test, CreateFrameFromH264FmtpSpropAndIdr) {
|
|||
rtppacket.SetTimestamp(0);
|
||||
EXPECT_CALL(mock_decoder_, RegisterDecodeCompleteCallback(_));
|
||||
video_receive_stream_->Start();
|
||||
EXPECT_CALL(mock_decoder_, Decode(_, false, _));
|
||||
EXPECT_CALL(mock_decoder_, Decode(_, _));
|
||||
RtpPacketReceived parsed_packet;
|
||||
ASSERT_TRUE(parsed_packet.Parse(rtppacket.data(), rtppacket.size()));
|
||||
rtp_stream_receiver_controller_.OnRtpPacket(parsed_packet);
|
||||
|
@ -469,7 +469,7 @@ TEST_P(VideoReceiveStream2Test, LazyDecoderCreation) {
|
|||
CreateVideoDecoder(Field(&SdpVideoFormat::name, testing::Eq("H264"))));
|
||||
EXPECT_CALL(mock_decoder_, Configure);
|
||||
EXPECT_CALL(mock_decoder_, RegisterDecodeCompleteCallback);
|
||||
EXPECT_CALL(mock_decoder_, Decode);
|
||||
EXPECT_CALL(mock_decoder_, Decode(_, _));
|
||||
RtpPacketReceived parsed_packet;
|
||||
ASSERT_TRUE(parsed_packet.Parse(rtppacket.data(), rtppacket.size()));
|
||||
rtp_stream_receiver_controller_.OnRtpPacket(parsed_packet);
|
||||
|
@ -764,11 +764,10 @@ TEST_P(VideoReceiveStream2Test, DependantFramesAreScheduled) {
|
|||
|
||||
// Expect frames are decoded in order.
|
||||
InSequence seq;
|
||||
EXPECT_CALL(mock_decoder_,
|
||||
Decode(test::RtpTimestamp(kFirstRtpTimestamp), _, _));
|
||||
EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kFirstRtpTimestamp), _));
|
||||
EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kFirstRtpTimestamp +
|
||||
k30FpsRtpTimestampDelta),
|
||||
_, _))
|
||||
_))
|
||||
.Times(1);
|
||||
video_receive_stream_->OnCompleteFrame(std::move(key_frame));
|
||||
EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame());
|
||||
|
@ -806,14 +805,13 @@ TEST_P(VideoReceiveStream2Test, FramesScheduledInOrder) {
|
|||
|
||||
// Expect frames are decoded in order despite delta_frame1 arriving first.
|
||||
InSequence seq;
|
||||
EXPECT_CALL(mock_decoder_,
|
||||
Decode(test::RtpTimestamp(kFirstRtpTimestamp), _, _))
|
||||
EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kFirstRtpTimestamp), _))
|
||||
.Times(1);
|
||||
EXPECT_CALL(mock_decoder_,
|
||||
Decode(test::RtpTimestamp(RtpTimestampForFrame(1)), _, _))
|
||||
Decode(test::RtpTimestamp(RtpTimestampForFrame(1)), _))
|
||||
.Times(1);
|
||||
EXPECT_CALL(mock_decoder_,
|
||||
Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _, _))
|
||||
Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _))
|
||||
.Times(1);
|
||||
key_frame->SetReceivedTime(clock_->CurrentTime().ms());
|
||||
video_receive_stream_->OnCompleteFrame(std::move(key_frame));
|
||||
|
@ -855,7 +853,7 @@ TEST_P(VideoReceiveStream2Test, WaitsforAllSpatialLayers) {
|
|||
.Build();
|
||||
|
||||
// No decodes should be called until `sl2` is received.
|
||||
EXPECT_CALL(mock_decoder_, Decode).Times(0);
|
||||
EXPECT_CALL(mock_decoder_, Decode(_, _)).Times(0);
|
||||
sl0->SetReceivedTime(clock_->CurrentTime().ms());
|
||||
video_receive_stream_->OnCompleteFrame(std::move(sl0));
|
||||
EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()),
|
||||
|
@ -864,8 +862,7 @@ TEST_P(VideoReceiveStream2Test, WaitsforAllSpatialLayers) {
|
|||
EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()),
|
||||
DidNotReceiveFrame());
|
||||
// When `sl2` arrives decode should happen.
|
||||
EXPECT_CALL(mock_decoder_,
|
||||
Decode(test::RtpTimestamp(kFirstRtpTimestamp), _, _))
|
||||
EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kFirstRtpTimestamp), _))
|
||||
.Times(1);
|
||||
video_receive_stream_->OnCompleteFrame(std::move(sl2));
|
||||
EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame());
|
||||
|
@ -903,15 +900,14 @@ TEST_P(VideoReceiveStream2Test, FramesFastForwardOnSystemHalt) {
|
|||
.AsLast()
|
||||
.Build();
|
||||
InSequence seq;
|
||||
EXPECT_CALL(mock_decoder_,
|
||||
Decode(test::RtpTimestamp(kFirstRtpTimestamp), _, _))
|
||||
EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kFirstRtpTimestamp), _))
|
||||
.WillOnce(testing::DoAll(Invoke([&] {
|
||||
// System halt will be simulated in the decode.
|
||||
time_controller_.AdvanceTime(k30FpsDelay * 2);
|
||||
}),
|
||||
DefaultDecodeAction()));
|
||||
EXPECT_CALL(mock_decoder_,
|
||||
Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _, _));
|
||||
Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _));
|
||||
video_receive_stream_->OnCompleteFrame(std::move(key_frame));
|
||||
video_receive_stream_->OnCompleteFrame(std::move(ffwd_frame));
|
||||
video_receive_stream_->OnCompleteFrame(std::move(rendered_frame));
|
||||
|
@ -959,10 +955,10 @@ TEST_P(VideoReceiveStream2Test, BetterFrameInsertedWhileWaitingToDecodeFrame) {
|
|||
|
||||
InSequence seq;
|
||||
EXPECT_CALL(mock_decoder_,
|
||||
Decode(test::RtpTimestamp(RtpTimestampForFrame(1)), _, _))
|
||||
Decode(test::RtpTimestamp(RtpTimestampForFrame(1)), _))
|
||||
.Times(1);
|
||||
EXPECT_CALL(mock_decoder_,
|
||||
Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _, _))
|
||||
Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _))
|
||||
.Times(1);
|
||||
// Simulate f1 arriving after f2 but before f2 is decoded.
|
||||
video_receive_stream_->OnCompleteFrame(std::move(f2));
|
||||
|
@ -1021,7 +1017,7 @@ TEST_P(VideoReceiveStream2Test, RtpTimestampWrapAround) {
|
|||
.ReceivedTime(clock_->CurrentTime())
|
||||
.AsLast()
|
||||
.Build());
|
||||
EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kWrapAroundRtp), _, _))
|
||||
EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kWrapAroundRtp), _))
|
||||
.Times(1);
|
||||
EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame());
|
||||
|
||||
|
|
Loading…
Reference in a new issue