Remove EncodedFrame::MissingFrame and start removing Decode() param

Remove EncodedFrame::MissingFrame, as it was always false in actual
in-use code anyway, and remove usages of the Decode missing_frames param
within WebRTC. Uses/overrides in other projects will be cleaned up
shortly, allowing that variant to be removed from the interface.

Bug: webrtc:15444
Change-Id: Id299d82e441a351deff81c0f2812707a985d23d8
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/317802
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Auto-Submit: Tony Herre <herre@google.com>
Commit-Queue: Tony Herre <herre@google.com>
Cr-Commit-Position: refs/heads/main@{#40662}
This commit is contained in:
Tony Herre 2023-08-29 16:05:49 +02:00 committed by WebRTC LUCI CQ
parent 44943c8064
commit 55b593fb6b
42 changed files with 114 additions and 160 deletions

View file

@ -18,6 +18,9 @@
namespace webrtc { namespace webrtc {
using testing::_;
using testing::Invoke;
class MockDecodedImageCallback : public DecodedImageCallback { class MockDecodedImageCallback : public DecodedImageCallback {
public: public:
MOCK_METHOD(int32_t, MOCK_METHOD(int32_t,
@ -43,6 +46,14 @@ class MockVideoDecoder : public VideoDecoder {
// Make `Configure` succeed by default, so that individual tests that // Make `Configure` succeed by default, so that individual tests that
// verify other methods wouldn't need to stub `Configure`. // verify other methods wouldn't need to stub `Configure`.
ON_CALL(*this, Configure).WillByDefault(testing::Return(true)); ON_CALL(*this, Configure).WillByDefault(testing::Return(true));
// TODO(bugs.webrtc.org/15444): Remove once all tests have been migrated to
// expecting calls Decode without a missing_frames param.
ON_CALL(*this, Decode(_, _))
.WillByDefault(Invoke([this](const EncodedImage& input_image,
int64_t render_time_ms) {
return Decode(input_image, /*missing_frames=*/false, render_time_ms);
}));
} }
~MockVideoDecoder() override { Destruct(); } ~MockVideoDecoder() override { Destruct(); }
@ -51,9 +62,13 @@ class MockVideoDecoder : public VideoDecoder {
MOCK_METHOD(int32_t, MOCK_METHOD(int32_t,
Decode, Decode,
(const EncodedImage& input_image, (const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms), int64_t render_time_ms),
(override)); (override));
MOCK_METHOD(int32_t,
Decode,
(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms));
MOCK_METHOD(int32_t, MOCK_METHOD(int32_t,
RegisterDecodeCompleteCallback, RegisterDecodeCompleteCallback,
(DecodedImageCallback * callback), (DecodedImageCallback * callback),

View file

@ -62,8 +62,6 @@ class EncodedFrame : public EncodedImage {
uint8_t PayloadType() const { return _payloadType; } uint8_t PayloadType() const { return _payloadType; }
bool MissingFrame() const { return _missingFrame; }
void SetRenderTime(const int64_t renderTimeMs) { void SetRenderTime(const int64_t renderTimeMs) {
_renderTimeMs = renderTimeMs; _renderTimeMs = renderTimeMs;
} }
@ -94,7 +92,6 @@ class EncodedFrame : public EncodedImage {
// getters/setters as needed. // getters/setters as needed.
int64_t _renderTimeMs = -1; int64_t _renderTimeMs = -1;
uint8_t _payloadType = 0; uint8_t _payloadType = 0;
bool _missingFrame = false;
CodecSpecificInfo _codecSpecificInfo; CodecSpecificInfo _codecSpecificInfo;
VideoCodecType _codec = kVideoCodecGeneric; VideoCodecType _codec = kVideoCodecGeneric;

View file

@ -45,7 +45,6 @@ class VideoDecoderSoftwareFallbackWrapperTest : public ::testing::Test {
} }
int32_t Decode(const EncodedImage& input_image, int32_t Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) override { int64_t render_time_ms) override {
++decode_count_; ++decode_count_;
return decode_return_code_; return decode_return_code_;
@ -84,7 +83,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, InitializesDecoder) {
EncodedImage encoded_image; EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameKey; encoded_image._frameType = VideoFrameType::kVideoFrameKey;
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->configure_count_) EXPECT_EQ(1, fake_decoder_->configure_count_)
<< "Initialized decoder should not be reinitialized."; << "Initialized decoder should not be reinitialized.";
EXPECT_EQ(1, fake_decoder_->decode_count_); EXPECT_EQ(1, fake_decoder_->decode_count_);
@ -98,7 +97,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
EncodedImage encoded_image; EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameKey; encoded_image._frameType = VideoFrameType::kVideoFrameKey;
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->configure_count_) EXPECT_EQ(1, fake_decoder_->configure_count_)
<< "Should not have attempted reinitializing the fallback decoder on " << "Should not have attempted reinitializing the fallback decoder on "
"keyframe."; "keyframe.";
@ -113,12 +112,12 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, IsSoftwareFallbackSticky) {
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE; fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
EncodedImage encoded_image; EncodedImage encoded_image;
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->decode_count_); EXPECT_EQ(1, fake_decoder_->decode_count_);
// Software fallback should be sticky, fake_decoder_ shouldn't be used. // Software fallback should be sticky, fake_decoder_ shouldn't be used.
encoded_image._frameType = VideoFrameType::kVideoFrameKey; encoded_image._frameType = VideoFrameType::kVideoFrameKey;
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->decode_count_) EXPECT_EQ(1, fake_decoder_->decode_count_)
<< "Decoder shouldn't be used after failure."; << "Decoder shouldn't be used after failure.";
@ -131,10 +130,10 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, DoesNotFallbackOnEveryError) {
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_ERROR; fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_ERROR;
EncodedImage encoded_image; EncodedImage encoded_image;
EXPECT_EQ(fake_decoder_->decode_return_code_, EXPECT_EQ(fake_decoder_->decode_return_code_,
fallback_wrapper_->Decode(encoded_image, false, -1)); fallback_wrapper_->Decode(encoded_image, -1));
EXPECT_EQ(1, fake_decoder_->decode_count_); EXPECT_EQ(1, fake_decoder_->decode_count_);
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(2, fake_decoder_->decode_count_) EXPECT_EQ(2, fake_decoder_->decode_count_)
<< "Decoder should be active even though previous decode failed."; << "Decoder should be active even though previous decode failed.";
} }
@ -144,14 +143,14 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, UsesHwDecoderAfterReinit) {
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE; fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
EncodedImage encoded_image; EncodedImage encoded_image;
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->decode_count_); EXPECT_EQ(1, fake_decoder_->decode_count_);
fallback_wrapper_->Release(); fallback_wrapper_->Release();
fallback_wrapper_->Configure({}); fallback_wrapper_->Configure({});
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_OK; fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_OK;
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(2, fake_decoder_->decode_count_) EXPECT_EQ(2, fake_decoder_->decode_count_)
<< "Should not be using fallback after reinit."; << "Should not be using fallback after reinit.";
} }
@ -164,7 +163,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, ForwardsReleaseCall) {
fallback_wrapper_->Configure({}); fallback_wrapper_->Configure({});
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE; fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
EncodedImage encoded_image; EncodedImage encoded_image;
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(2, fake_decoder_->release_count_) EXPECT_EQ(2, fake_decoder_->release_count_)
<< "Decoder should be released during fallback."; << "Decoder should be released during fallback.";
fallback_wrapper_->Release(); fallback_wrapper_->Release();
@ -200,7 +199,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE; fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
EncodedImage encoded_image; EncodedImage encoded_image;
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
// Hard coded expected value since libvpx is the software implementation name // Hard coded expected value since libvpx is the software implementation name
// for VP8. Change accordingly if the underlying implementation does. // for VP8. Change accordingly if the underlying implementation does.
EXPECT_STREQ("libvpx (fallback from: fake-decoder)", EXPECT_STREQ("libvpx (fallback from: fake-decoder)",
@ -215,13 +214,13 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, FallbacksOnTooManyErrors) {
EncodedImage encoded_image; EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameKey; encoded_image._frameType = VideoFrameType::kVideoFrameKey;
// Doesn't fallback from a single error. // Doesn't fallback from a single error.
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName()); EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName());
// However, many frames with the same error, fallback should happen. // However, many frames with the same error, fallback should happen.
const int kNumFramesToEncode = 10; const int kNumFramesToEncode = 10;
for (int i = 0; i < kNumFramesToEncode; ++i) { for (int i = 0; i < kNumFramesToEncode; ++i) {
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
} }
// Hard coded expected value since libvpx is the software implementation name // Hard coded expected value since libvpx is the software implementation name
// for VP8. Change accordingly if the underlying implementation does. // for VP8. Change accordingly if the underlying implementation does.
@ -241,7 +240,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
// Many decoded frames with the same error // Many decoded frames with the same error
const int kNumFramesToEncode = 10; const int kNumFramesToEncode = 10;
for (int i = 0; i < kNumFramesToEncode; ++i) { for (int i = 0; i < kNumFramesToEncode; ++i) {
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
} }
EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName()); EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName());
@ -259,9 +258,9 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
for (int i = 0; i < kNumFramesToEncode; ++i) { for (int i = 0; i < kNumFramesToEncode; ++i) {
// Interleaved errors and successful decodes. // Interleaved errors and successful decodes.
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_ERROR; fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_ERROR;
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_OK; fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_OK;
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
} }
EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName()); EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName());
fallback_wrapper_->Release(); fallback_wrapper_->Release();
@ -289,7 +288,7 @@ TEST_F(ForcedSoftwareDecoderFallbackTest, UsesForcedFallback) {
EncodedImage encoded_image; EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameKey; encoded_image._frameType = VideoFrameType::kVideoFrameKey;
fallback_wrapper_->Decode(encoded_image, false, -1); fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, sw_fallback_decoder_->configure_count_); EXPECT_EQ(1, sw_fallback_decoder_->configure_count_);
EXPECT_EQ(1, sw_fallback_decoder_->decode_count_); EXPECT_EQ(1, sw_fallback_decoder_->decode_count_);

View file

@ -98,9 +98,20 @@ class RTC_EXPORT VideoDecoder {
// times, in such case only latest `settings` are in effect. // times, in such case only latest `settings` are in effect.
virtual bool Configure(const Settings& settings) = 0; virtual bool Configure(const Settings& settings) = 0;
// TODO(bugs.webrtc.org/15444): Make pure virtual once all subclasses have
// migrated to implementing this class.
virtual int32_t Decode(const EncodedImage& input_image,
int64_t render_time_ms) {
return Decode(input_image, /*missing_frame=*/false, render_time_ms);
}
// TODO(bugs.webrtc.org/15444): Migrate all subclasses to Decode() without
// missing_frame and delete this.
virtual int32_t Decode(const EncodedImage& input_image, virtual int32_t Decode(const EncodedImage& input_image,
bool missing_frames, bool missing_frames,
int64_t render_time_ms) = 0; int64_t render_time_ms) {
return Decode(input_image, render_time_ms);
}
virtual int32_t RegisterDecodeCompleteCallback( virtual int32_t RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) = 0; DecodedImageCallback* callback) = 0;

View file

@ -41,7 +41,6 @@ class VideoDecoderSoftwareFallbackWrapper final : public VideoDecoder {
bool Configure(const Settings& settings) override; bool Configure(const Settings& settings) override;
int32_t Decode(const EncodedImage& input_image, int32_t Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) override; int64_t render_time_ms) override;
int32_t RegisterDecodeCompleteCallback( int32_t RegisterDecodeCompleteCallback(
@ -176,7 +175,6 @@ void VideoDecoderSoftwareFallbackWrapper::UpdateFallbackDecoderHistograms() {
int32_t VideoDecoderSoftwareFallbackWrapper::Decode( int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
const EncodedImage& input_image, const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) { int64_t render_time_ms) {
TRACE_EVENT0("webrtc", "VideoDecoderSoftwareFallbackWrapper::Decode"); TRACE_EVENT0("webrtc", "VideoDecoderSoftwareFallbackWrapper::Decode");
switch (decoder_type_) { switch (decoder_type_) {
@ -184,7 +182,7 @@ int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
return WEBRTC_VIDEO_CODEC_UNINITIALIZED; return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
case DecoderType::kHardware: { case DecoderType::kHardware: {
int32_t ret = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE; int32_t ret = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
ret = hw_decoder_->Decode(input_image, missing_frames, render_time_ms); ret = hw_decoder_->Decode(input_image, render_time_ms);
if (ret != WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE) { if (ret != WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE) {
if (ret != WEBRTC_VIDEO_CODEC_ERROR) { if (ret != WEBRTC_VIDEO_CODEC_ERROR) {
++hw_decoded_frames_since_last_fallback_; ++hw_decoded_frames_since_last_fallback_;
@ -212,8 +210,7 @@ int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
[[fallthrough]]; [[fallthrough]];
} }
case DecoderType::kFallback: case DecoderType::kFallback:
return fallback_decoder_->Decode(input_image, missing_frames, return fallback_decoder_->Decode(input_image, render_time_ms);
render_time_ms);
default: default:
RTC_DCHECK_NOTREACHED(); RTC_DCHECK_NOTREACHED();
return WEBRTC_VIDEO_CODEC_ERROR; return WEBRTC_VIDEO_CODEC_ERROR;

View file

@ -60,7 +60,6 @@ bool FakeWebRtcVideoDecoder::Configure(const Settings& settings) {
} }
int32_t FakeWebRtcVideoDecoder::Decode(const webrtc::EncodedImage&, int32_t FakeWebRtcVideoDecoder::Decode(const webrtc::EncodedImage&,
bool,
int64_t) { int64_t) {
num_frames_received_++; num_frames_received_++;
return WEBRTC_VIDEO_CODEC_OK; return WEBRTC_VIDEO_CODEC_OK;

View file

@ -45,7 +45,7 @@ class FakeWebRtcVideoDecoder : public webrtc::VideoDecoder {
~FakeWebRtcVideoDecoder(); ~FakeWebRtcVideoDecoder();
bool Configure(const Settings& settings) override; bool Configure(const Settings& settings) override;
int32_t Decode(const webrtc::EncodedImage&, bool, int64_t) override; int32_t Decode(const webrtc::EncodedImage&, int64_t) override;
int32_t RegisterDecodeCompleteCallback( int32_t RegisterDecodeCompleteCallback(
webrtc::DecodedImageCallback*) override; webrtc::DecodedImageCallback*) override;
int32_t Release() override; int32_t Release() override;

View file

@ -35,7 +35,6 @@ class Dav1dDecoder : public VideoDecoder {
bool Configure(const Settings& settings) override; bool Configure(const Settings& settings) override;
int32_t Decode(const EncodedImage& encoded_image, int32_t Decode(const EncodedImage& encoded_image,
bool missing_frames,
int64_t render_time_ms) override; int64_t render_time_ms) override;
int32_t RegisterDecodeCompleteCallback( int32_t RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) override; DecodedImageCallback* callback) override;
@ -119,7 +118,6 @@ const char* Dav1dDecoder::ImplementationName() const {
} }
int32_t Dav1dDecoder::Decode(const EncodedImage& encoded_image, int32_t Dav1dDecoder::Decode(const EncodedImage& encoded_image,
bool /*missing_frames*/,
int64_t /*render_time_ms*/) { int64_t /*render_time_ms*/) {
if (!context_ || decode_complete_callback_ == nullptr) { if (!context_ || decode_complete_callback_ == nullptr) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED; return WEBRTC_VIDEO_CODEC_UNINITIALIZED;

View file

@ -89,8 +89,8 @@ class TestAv1Decoder {
void Decode(int64_t frame_id, const EncodedImage& image) { void Decode(int64_t frame_id, const EncodedImage& image) {
ASSERT_THAT(decoder_, NotNull()); ASSERT_THAT(decoder_, NotNull());
int32_t error = decoder_->Decode(image, /*missing_frames=*/false, int32_t error =
/*render_time_ms=*/image.capture_time_ms_); decoder_->Decode(image, /*render_time_ms=*/image.capture_time_ms_);
if (error != WEBRTC_VIDEO_CODEC_OK) { if (error != WEBRTC_VIDEO_CODEC_OK) {
ADD_FAILURE() << "Failed to decode frame id " << frame_id ADD_FAILURE() << "Failed to decode frame id " << frame_id
<< " with error code " << error << " by decoder#" << " with error code " << error << " by decoder#"

View file

@ -62,7 +62,7 @@ TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame. // First frame should be a key frame.
encoded_frame._frameType = VideoFrameType::kVideoFrameKey; encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
std::unique_ptr<VideoFrame> decoded_frame; std::unique_ptr<VideoFrame> decoded_frame;
absl::optional<uint8_t> decoded_qp; absl::optional<uint8_t> decoded_qp;
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
@ -87,7 +87,7 @@ TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame. // First frame should be a key frame.
encoded_frame._frameType = VideoFrameType::kVideoFrameKey; encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
std::unique_ptr<VideoFrame> decoded_frame; std::unique_ptr<VideoFrame> decoded_frame;
absl::optional<uint8_t> decoded_qp; absl::optional<uint8_t> decoded_qp;
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));

View file

@ -33,7 +33,6 @@ class MultiplexDecoderAdapter : public VideoDecoder {
// Implements VideoDecoder // Implements VideoDecoder
bool Configure(const Settings& settings) override; bool Configure(const Settings& settings) override;
int32_t Decode(const EncodedImage& input_image, int32_t Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) override; int64_t render_time_ms) override;
int32_t RegisterDecodeCompleteCallback( int32_t RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) override; DecodedImageCallback* callback) override;

View file

@ -125,7 +125,6 @@ bool MultiplexDecoderAdapter::Configure(const Settings& settings) {
} }
int32_t MultiplexDecoderAdapter::Decode(const EncodedImage& input_image, int32_t MultiplexDecoderAdapter::Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) { int64_t render_time_ms) {
MultiplexImage image = MultiplexEncodedImagePacker::Unpack(input_image); MultiplexImage image = MultiplexEncodedImagePacker::Unpack(input_image);
@ -149,8 +148,7 @@ int32_t MultiplexDecoderAdapter::Decode(const EncodedImage& input_image,
int32_t rv = 0; int32_t rv = 0;
for (size_t i = 0; i < image.image_components.size(); i++) { for (size_t i = 0; i < image.image_components.size(); i++) {
rv = decoders_[image.image_components[i].component_index]->Decode( rv = decoders_[image.image_components[i].component_index]->Decode(
image.image_components[i].encoded_image, missing_frames, image.image_components[i].encoded_image, render_time_ms);
render_time_ms);
if (rv != WEBRTC_VIDEO_CODEC_OK) if (rv != WEBRTC_VIDEO_CODEC_OK)
return rv; return rv;
} }

View file

@ -218,7 +218,7 @@ TEST_P(TestMultiplexAdapter, EncodeDecodeI420Frame) {
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType); EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, -1));
std::unique_ptr<VideoFrame> decoded_frame; std::unique_ptr<VideoFrame> decoded_frame;
absl::optional<uint8_t> decoded_qp; absl::optional<uint8_t> decoded_qp;
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
@ -235,7 +235,7 @@ TEST_P(TestMultiplexAdapter, EncodeDecodeI420AFrame) {
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType); EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
std::unique_ptr<VideoFrame> decoded_frame; std::unique_ptr<VideoFrame> decoded_frame;
absl::optional<uint8_t> decoded_qp; absl::optional<uint8_t> decoded_qp;
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));

View file

@ -355,8 +355,7 @@ class TestDecoder : public VideoCodecTester::Decoder,
callbacks_[frame.Timestamp()] = std::move(callback); callbacks_[frame.Timestamp()] = std::move(callback);
} }
decoder_->Decode(frame, /*missing_frames=*/false, decoder_->Decode(frame, /*render_time_ms=*/0);
/*render_time_ms=*/0);
} }
void Flush() override { void Flush() override {

View file

@ -638,7 +638,7 @@ void VideoProcessor::DecodeFrame(const EncodedImage& encoded_image,
frame_stat->decode_start_ns = rtc::TimeNanos(); frame_stat->decode_start_ns = rtc::TimeNanos();
frame_stat->decode_return_code = frame_stat->decode_return_code =
decoders_->at(spatial_idx)->Decode(encoded_image, false, 0); decoders_->at(spatial_idx)->Decode(encoded_image, 0);
} }
const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe( const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(

View file

@ -37,7 +37,6 @@
namespace webrtc { namespace webrtc {
namespace { namespace {
constexpr int kVp8ErrorPropagationTh = 30;
// vpx_decoder.h documentation indicates decode deadline is time in us, with // vpx_decoder.h documentation indicates decode deadline is time in us, with
// "Set to zero for unlimited.", but actual implementation requires this to be // "Set to zero for unlimited.", but actual implementation requires this to be
// a mode with 0 meaning allow delay and 1 not allowing it. // a mode with 0 meaning allow delay and 1 not allowing it.
@ -122,7 +121,6 @@ LibvpxVp8Decoder::LibvpxVp8Decoder()
decode_complete_callback_(NULL), decode_complete_callback_(NULL),
inited_(false), inited_(false),
decoder_(NULL), decoder_(NULL),
propagation_cnt_(-1),
last_frame_width_(0), last_frame_width_(0),
last_frame_height_(0), last_frame_height_(0),
key_frame_required_(true), key_frame_required_(true),
@ -156,7 +154,6 @@ bool LibvpxVp8Decoder::Configure(const Settings& settings) {
return false; return false;
} }
propagation_cnt_ = -1;
inited_ = true; inited_ = true;
// Always start with a complete key frame. // Always start with a complete key frame.
@ -170,7 +167,12 @@ bool LibvpxVp8Decoder::Configure(const Settings& settings) {
} }
int LibvpxVp8Decoder::Decode(const EncodedImage& input_image, int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
bool missing_frames, int64_t render_time_ms) {
return Decode(input_image, /*missing_frames=*/false, render_time_ms);
}
int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
bool /*missing_frames*/,
int64_t /*render_time_ms*/) { int64_t /*render_time_ms*/) {
if (!inited_) { if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED; return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
@ -179,9 +181,6 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
return WEBRTC_VIDEO_CODEC_UNINITIALIZED; return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
} }
if (input_image.data() == NULL && input_image.size() > 0) { if (input_image.data() == NULL && input_image.size() > 0) {
// Reset to avoid requesting key frames too often.
if (propagation_cnt_ > 0)
propagation_cnt_ = 0;
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
} }
@ -234,34 +233,6 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
return WEBRTC_VIDEO_CODEC_ERROR; return WEBRTC_VIDEO_CODEC_ERROR;
key_frame_required_ = false; key_frame_required_ = false;
} }
// Restrict error propagation using key frame requests.
// Reset on a key frame refresh.
if (input_image._frameType == VideoFrameType::kVideoFrameKey) {
propagation_cnt_ = -1;
// Start count on first loss.
} else if (missing_frames && propagation_cnt_ == -1) {
propagation_cnt_ = 0;
}
if (propagation_cnt_ >= 0) {
propagation_cnt_++;
}
vpx_codec_iter_t iter = NULL;
vpx_image_t* img;
int ret;
// Check for missing frames.
if (missing_frames) {
// Call decoder with zero data length to signal missing frames.
if (vpx_codec_decode(decoder_, NULL, 0, 0, kDecodeDeadlineRealtime)) {
// Reset to avoid requesting key frames too often.
if (propagation_cnt_ > 0)
propagation_cnt_ = 0;
return WEBRTC_VIDEO_CODEC_ERROR;
}
img = vpx_codec_get_frame(decoder_, &iter);
iter = NULL;
}
const uint8_t* buffer = input_image.data(); const uint8_t* buffer = input_image.data();
if (input_image.size() == 0) { if (input_image.size() == 0) {
@ -269,31 +240,20 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
} }
if (vpx_codec_decode(decoder_, buffer, input_image.size(), 0, if (vpx_codec_decode(decoder_, buffer, input_image.size(), 0,
kDecodeDeadlineRealtime)) { kDecodeDeadlineRealtime)) {
// Reset to avoid requesting key frames too often.
if (propagation_cnt_ > 0) {
propagation_cnt_ = 0;
}
return WEBRTC_VIDEO_CODEC_ERROR; return WEBRTC_VIDEO_CODEC_ERROR;
} }
img = vpx_codec_get_frame(decoder_, &iter); vpx_codec_iter_t iter = NULL;
vpx_image_t* img = vpx_codec_get_frame(decoder_, &iter);
int qp; int qp;
vpx_codec_err_t vpx_ret = vpx_codec_err_t vpx_ret =
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp); vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK); RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
ret = ReturnFrame(img, input_image.Timestamp(), qp, input_image.ColorSpace()); int ret =
ReturnFrame(img, input_image.Timestamp(), qp, input_image.ColorSpace());
if (ret != 0) { if (ret != 0) {
// Reset to avoid requesting key frames too often.
if (ret < 0 && propagation_cnt_ > 0)
propagation_cnt_ = 0;
return ret; return ret;
} }
// Check Vs. threshold
if (propagation_cnt_ > kVp8ErrorPropagationTh) {
// Reset to avoid requesting key frames too often.
propagation_cnt_ = 0;
return WEBRTC_VIDEO_CODEC_ERROR;
}
return WEBRTC_VIDEO_CODEC_OK; return WEBRTC_VIDEO_CODEC_OK;
} }

View file

@ -30,6 +30,11 @@ class LibvpxVp8Decoder : public VideoDecoder {
~LibvpxVp8Decoder() override; ~LibvpxVp8Decoder() override;
bool Configure(const Settings& settings) override; bool Configure(const Settings& settings) override;
int Decode(const EncodedImage& input_image,
int64_t /*render_time_ms*/) override;
// TODO(bugs.webrtc.org/15444): Remove once all subclasses have been migrated
// to expecting calls Decode without a missing_frames param.
int Decode(const EncodedImage& input_image, int Decode(const EncodedImage& input_image,
bool missing_frames, bool missing_frames,
int64_t /*render_time_ms*/) override; int64_t /*render_time_ms*/) override;
@ -61,7 +66,6 @@ class LibvpxVp8Decoder : public VideoDecoder {
DecodedImageCallback* decode_complete_callback_; DecodedImageCallback* decode_complete_callback_;
bool inited_; bool inited_;
vpx_codec_ctx_t* decoder_; vpx_codec_ctx_t* decoder_;
int propagation_cnt_;
int last_frame_width_; int last_frame_width_;
int last_frame_height_; int last_frame_height_;
bool key_frame_required_; bool key_frame_required_;

View file

@ -286,7 +286,7 @@ TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
// First frame should be a key frame. // First frame should be a key frame.
encoded_frame._frameType = VideoFrameType::kVideoFrameKey; encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, -1));
std::unique_ptr<VideoFrame> decoded_frame; std::unique_ptr<VideoFrame> decoded_frame;
absl::optional<uint8_t> decoded_qp; absl::optional<uint8_t> decoded_qp;
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
@ -501,7 +501,7 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
// First frame should be a key frame. // First frame should be a key frame.
encoded_frame._frameType = VideoFrameType::kVideoFrameKey; encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
encoded_frame.ntp_time_ms_ = kTestNtpTimeMs; encoded_frame.ntp_time_ms_ = kTestNtpTimeMs;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, -1));
std::unique_ptr<VideoFrame> decoded_frame; std::unique_ptr<VideoFrame> decoded_frame;
absl::optional<uint8_t> decoded_qp; absl::optional<uint8_t> decoded_qp;

View file

@ -188,7 +188,6 @@ bool LibvpxVp9Decoder::Configure(const Settings& settings) {
} }
int LibvpxVp9Decoder::Decode(const EncodedImage& input_image, int LibvpxVp9Decoder::Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t /*render_time_ms*/) { int64_t /*render_time_ms*/) {
if (!inited_) { if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED; return WEBRTC_VIDEO_CODEC_UNINITIALIZED;

View file

@ -29,7 +29,6 @@ class LibvpxVp9Decoder : public VP9Decoder {
bool Configure(const Settings& settings) override; bool Configure(const Settings& settings) override;
int Decode(const EncodedImage& input_image, int Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t /*render_time_ms*/) override; int64_t /*render_time_ms*/) override;
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override; int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;

View file

@ -143,7 +143,7 @@ TEST_P(TestVp9ImplForPixelFormat, EncodeDecode) {
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame. // First frame should be a key frame.
encoded_frame._frameType = VideoFrameType::kVideoFrameKey; encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
std::unique_ptr<VideoFrame> decoded_frame; std::unique_ptr<VideoFrame> decoded_frame;
absl::optional<uint8_t> decoded_qp; absl::optional<uint8_t> decoded_qp;
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
@ -193,7 +193,7 @@ TEST_P(TestVp9ImplForPixelFormat, DecodedColorSpaceFromBitstream) {
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// Encoded frame without explicit color space information. // Encoded frame without explicit color space information.
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
std::unique_ptr<VideoFrame> decoded_frame; std::unique_ptr<VideoFrame> decoded_frame;
absl::optional<uint8_t> decoded_qp; absl::optional<uint8_t> decoded_qp;
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
@ -211,7 +211,7 @@ TEST_P(TestVp9ImplForPixelFormat, DecodedQpEqualsEncodedQp) {
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame. // First frame should be a key frame.
encoded_frame._frameType = VideoFrameType::kVideoFrameKey; encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
std::unique_ptr<VideoFrame> decoded_frame; std::unique_ptr<VideoFrame> decoded_frame;
absl::optional<uint8_t> decoded_qp; absl::optional<uint8_t> decoded_qp;
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
@ -2063,7 +2063,7 @@ TEST_F(TestVp9ImplProfile2, EncodeDecode) {
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// First frame should be a key frame. // First frame should be a key frame.
encoded_frame._frameType = VideoFrameType::kVideoFrameKey; encoded_frame._frameType = VideoFrameType::kVideoFrameKey;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
std::unique_ptr<VideoFrame> decoded_frame; std::unique_ptr<VideoFrame> decoded_frame;
absl::optional<uint8_t> decoded_qp; absl::optional<uint8_t> decoded_qp;
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));

View file

@ -282,17 +282,16 @@ bool VCMGenericDecoder::Configure(const VideoDecoder::Settings& settings) {
} }
int32_t VCMGenericDecoder::Decode(const EncodedFrame& frame, Timestamp now) { int32_t VCMGenericDecoder::Decode(const EncodedFrame& frame, Timestamp now) {
return Decode(frame, now, frame.RenderTimeMs(), frame.MissingFrame()); return Decode(frame, now, frame.RenderTimeMs());
} }
int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, Timestamp now) { int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, Timestamp now) {
return Decode(frame, now, frame.RenderTimeMs(), frame.MissingFrame()); return Decode(frame, now, frame.RenderTimeMs());
} }
int32_t VCMGenericDecoder::Decode(const EncodedImage& frame, int32_t VCMGenericDecoder::Decode(const EncodedImage& frame,
Timestamp now, Timestamp now,
int64_t render_time_ms, int64_t render_time_ms) {
int64_t missing_frame) {
TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp", TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp",
frame.Timestamp()); frame.Timestamp());
FrameInfo frame_info; FrameInfo frame_info;
@ -319,7 +318,7 @@ int32_t VCMGenericDecoder::Decode(const EncodedImage& frame,
frame_info.frame_type = frame.FrameType(); frame_info.frame_type = frame.FrameType();
_callback->Map(std::move(frame_info)); _callback->Map(std::move(frame_info));
int32_t ret = decoder_->Decode(frame, missing_frame, render_time_ms); int32_t ret = decoder_->Decode(frame, render_time_ms);
VideoDecoder::DecoderInfo decoder_info = decoder_->GetDecoderInfo(); VideoDecoder::DecoderInfo decoder_info = decoder_->GetDecoderInfo();
if (decoder_info != decoder_info_) { if (decoder_info != decoder_info_) {
RTC_LOG(LS_INFO) << "Changed decoder implementation to: " RTC_LOG(LS_INFO) << "Changed decoder implementation to: "

View file

@ -120,8 +120,7 @@ class VCMGenericDecoder {
private: private:
int32_t Decode(const EncodedImage& frame, int32_t Decode(const EncodedImage& frame,
Timestamp now, Timestamp now,
int64_t render_time_ms, int64_t render_time_ms);
int64_t missing_frame);
VCMDecodedFrameCallback* _callback = nullptr; VCMDecodedFrameCallback* _callback = nullptr;
VideoDecoder* const decoder_; VideoDecoder* const decoder_;
VideoContentType _last_keyframe_content_type; VideoContentType _last_keyframe_content_type;

View file

@ -894,9 +894,9 @@ void SimulcastTestFixtureImpl::TestStrideEncodeDecode() {
EncodedImage encoded_frame; EncodedImage encoded_frame;
// Only encoding one frame - so will be a key frame. // Only encoding one frame - so will be a key frame.
encoder_callback.GetLastEncodedKeyFrame(&encoded_frame); encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, 0)); EXPECT_EQ(0, decoder_->Decode(encoded_frame, 0));
encoder_callback.GetLastEncodedFrame(&encoded_frame); encoder_callback.GetLastEncodedFrame(&encoded_frame);
decoder_->Decode(encoded_frame, false, 0); decoder_->Decode(encoded_frame, 0);
EXPECT_EQ(2, decoder_callback.DecodedFrames()); EXPECT_EQ(2, decoder_callback.DecodedFrames());
} }
@ -932,7 +932,7 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
EXPECT_EQ(decodedImage.width(), kDefaultWidth / 4); EXPECT_EQ(decodedImage.width(), kDefaultWidth / 4);
EXPECT_EQ(decodedImage.height(), kDefaultHeight / 4); EXPECT_EQ(decodedImage.height(), kDefaultHeight / 4);
})); }));
EXPECT_EQ(0, decoder_->Decode(encoded_frame[0], false, 0)); EXPECT_EQ(0, decoder_->Decode(encoded_frame[0], 0));
EXPECT_CALL(decoder_callback, Decoded(_, _, _)) EXPECT_CALL(decoder_callback, Decoded(_, _, _))
.WillOnce(::testing::Invoke([](VideoFrame& decodedImage, .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
@ -941,7 +941,7 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
EXPECT_EQ(decodedImage.width(), kDefaultWidth / 2); EXPECT_EQ(decodedImage.width(), kDefaultWidth / 2);
EXPECT_EQ(decodedImage.height(), kDefaultHeight / 2); EXPECT_EQ(decodedImage.height(), kDefaultHeight / 2);
})); }));
EXPECT_EQ(0, decoder_->Decode(encoded_frame[1], false, 0)); EXPECT_EQ(0, decoder_->Decode(encoded_frame[1], 0));
EXPECT_CALL(decoder_callback, Decoded(_, _, _)) EXPECT_CALL(decoder_callback, Decoded(_, _, _))
.WillOnce(::testing::Invoke([](VideoFrame& decodedImage, .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
@ -950,7 +950,7 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
EXPECT_EQ(decodedImage.width(), kDefaultWidth); EXPECT_EQ(decodedImage.width(), kDefaultWidth);
EXPECT_EQ(decodedImage.height(), kDefaultHeight); EXPECT_EQ(decodedImage.height(), kDefaultHeight);
})); }));
EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], false, 0)); EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], 0));
} }
void SimulcastTestFixtureImpl:: void SimulcastTestFixtureImpl::

View file

@ -123,7 +123,7 @@ TEST_F(VideoReceiver2Test, RegisterReceiveCodecs) {
auto decoder = std::make_unique<NiceMock<MockVideoDecoder>>(); auto decoder = std::make_unique<NiceMock<MockVideoDecoder>>();
EXPECT_CALL(*decoder, RegisterDecodeCompleteCallback) EXPECT_CALL(*decoder, RegisterDecodeCompleteCallback)
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK)); .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_CALL(*decoder, Decode).WillOnce(Return(WEBRTC_VIDEO_CODEC_OK)); EXPECT_CALL(*decoder, Decode(_, _)).WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_CALL(*decoder, Release).WillOnce(Return(WEBRTC_VIDEO_CODEC_OK)); EXPECT_CALL(*decoder, Release).WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
// Register the decoder. Note that this moves ownership of the mock object // Register the decoder. Note that this moves ownership of the mock object

View file

@ -109,7 +109,7 @@ class TestVideoReceiver : public ::testing::Test {
++header->sequenceNumber; ++header->sequenceNumber;
} }
receiver_.Process(); receiver_.Process();
EXPECT_CALL(decoder_, Decode(_, _, _)).Times(0); EXPECT_CALL(decoder_, Decode(_, _)).Times(0);
EXPECT_EQ(VCM_FRAME_NOT_READY, receiver_.Decode(kMaxWaitTimeMs)); EXPECT_EQ(VCM_FRAME_NOT_READY, receiver_.Decode(kMaxWaitTimeMs));
} }
@ -123,7 +123,7 @@ class TestVideoReceiver : public ::testing::Test {
EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0); EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
receiver_.Process(); receiver_.Process();
EXPECT_CALL(decoder_, Decode(_, _, _)).Times(1); EXPECT_CALL(decoder_, Decode(_, _)).Times(1);
EXPECT_EQ(0, receiver_.Decode(kMaxWaitTimeMs)); EXPECT_EQ(0, receiver_.Decode(kMaxWaitTimeMs));
} }

View file

@ -240,7 +240,6 @@ class DecoderBitstreamFileWriter : public test::FakeDecoder {
~DecoderBitstreamFileWriter() override { fclose(file_); } ~DecoderBitstreamFileWriter() override { fclose(file_); }
int32_t Decode(const EncodedImage& encoded_frame, int32_t Decode(const EncodedImage& encoded_frame,
bool /* missing_frames */,
int64_t /* render_time_ms */) override { int64_t /* render_time_ms */) override {
if (fwrite(encoded_frame.data(), 1, encoded_frame.size(), file_) < if (fwrite(encoded_frame.data(), 1, encoded_frame.size(), file_) <
encoded_frame.size()) { encoded_frame.size()) {
@ -276,7 +275,6 @@ class DecoderIvfFileWriter : public test::FakeDecoder {
~DecoderIvfFileWriter() override { file_writer_->Close(); } ~DecoderIvfFileWriter() override { file_writer_->Close(); }
int32_t Decode(const EncodedImage& encoded_frame, int32_t Decode(const EncodedImage& encoded_frame,
bool /* missing_frames */,
int64_t render_time_ms) override { int64_t render_time_ms) override {
if (!file_writer_->WriteFrame(encoded_frame, video_codec_type_)) { if (!file_writer_->WriteFrame(encoded_frame, video_codec_type_)) {
return WEBRTC_VIDEO_CODEC_ERROR; return WEBRTC_VIDEO_CODEC_ERROR;

View file

@ -46,6 +46,7 @@
return 0; return 0;
} }
// TODO(bugs.webrtc.org/15444): Remove obsolete missingFrames param.
- (NSInteger)decode:(RTC_OBJC_TYPE(RTCEncodedImage) *)encodedImage - (NSInteger)decode:(RTC_OBJC_TYPE(RTCEncodedImage) *)encodedImage
missingFrames:(BOOL)missingFrames missingFrames:(BOOL)missingFrames
codecSpecificInfo:(nullable id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)info codecSpecificInfo:(nullable id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)info

View file

@ -29,6 +29,7 @@ RTC_OBJC_EXPORT
- (void)setCallback : (RTCVideoDecoderCallback)callback; - (void)setCallback : (RTCVideoDecoderCallback)callback;
- (NSInteger)startDecodeWithNumberOfCores:(int)numberOfCores; - (NSInteger)startDecodeWithNumberOfCores:(int)numberOfCores;
- (NSInteger)releaseDecoder; - (NSInteger)releaseDecoder;
// TODO(bugs.webrtc.org/15444): Remove obsolete missingFrames param.
- (NSInteger)decode:(RTC_OBJC_TYPE(RTCEncodedImage) *)encodedImage - (NSInteger)decode:(RTC_OBJC_TYPE(RTCEncodedImage) *)encodedImage
missingFrames:(BOOL)missingFrames missingFrames:(BOOL)missingFrames
codecSpecificInfo:(nullable id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)info codecSpecificInfo:(nullable id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)info

View file

@ -98,6 +98,7 @@ void decompressionOutputCallback(void *decoderRef,
return WEBRTC_VIDEO_CODEC_OK; return WEBRTC_VIDEO_CODEC_OK;
} }
// TODO(bugs.webrtc.org/15444): Remove obsolete missingFrames param.
- (NSInteger)decode:(RTC_OBJC_TYPE(RTCEncodedImage) *)inputImage - (NSInteger)decode:(RTC_OBJC_TYPE(RTCEncodedImage) *)inputImage
missingFrames:(BOOL)missingFrames missingFrames:(BOOL)missingFrames
codecSpecificInfo:(nullable id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)info codecSpecificInfo:(nullable id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)info

View file

@ -43,13 +43,12 @@ class ObjCVideoDecoder : public VideoDecoder {
} }
int32_t Decode(const EncodedImage &input_image, int32_t Decode(const EncodedImage &input_image,
bool missing_frames,
int64_t render_time_ms = -1) override { int64_t render_time_ms = -1) override {
RTC_OBJC_TYPE(RTCEncodedImage) *encodedImage = RTC_OBJC_TYPE(RTCEncodedImage) *encodedImage =
[[RTC_OBJC_TYPE(RTCEncodedImage) alloc] initWithNativeEncodedImage:input_image]; [[RTC_OBJC_TYPE(RTCEncodedImage) alloc] initWithNativeEncodedImage:input_image];
return [decoder_ decode:encodedImage return [decoder_ decode:encodedImage
missingFrames:missing_frames missingFrames:false
codecSpecificInfo:nil codecSpecificInfo:nil
renderTimeMs:render_time_ms]; renderTimeMs:render_time_ms];
} }

View file

@ -41,7 +41,6 @@ bool FakeDecoder::Configure(const Settings& settings) {
} }
int32_t FakeDecoder::Decode(const EncodedImage& input, int32_t FakeDecoder::Decode(const EncodedImage& input,
bool missing_frames,
int64_t render_time_ms) { int64_t render_time_ms) {
if (input._encodedWidth > 0 && input._encodedHeight > 0) { if (input._encodedWidth > 0 && input._encodedHeight > 0) {
width_ = input._encodedWidth; width_ = input._encodedWidth;
@ -103,7 +102,6 @@ const char* FakeDecoder::ImplementationName() const {
} }
int32_t FakeH264Decoder::Decode(const EncodedImage& input, int32_t FakeH264Decoder::Decode(const EncodedImage& input,
bool missing_frames,
int64_t render_time_ms) { int64_t render_time_ms) {
uint8_t value = 0; uint8_t value = 0;
for (size_t i = 0; i < input.size(); ++i) { for (size_t i = 0; i < input.size(); ++i) {
@ -119,7 +117,7 @@ int32_t FakeH264Decoder::Decode(const EncodedImage& input,
} }
++value; ++value;
} }
return FakeDecoder::Decode(input, missing_frames, render_time_ms); return FakeDecoder::Decode(input, render_time_ms);
} }
} // namespace test } // namespace test

View file

@ -35,7 +35,6 @@ class FakeDecoder : public VideoDecoder {
bool Configure(const Settings& settings) override; bool Configure(const Settings& settings) override;
int32_t Decode(const EncodedImage& input, int32_t Decode(const EncodedImage& input,
bool missing_frames,
int64_t render_time_ms) override; int64_t render_time_ms) override;
int32_t RegisterDecodeCompleteCallback( int32_t RegisterDecodeCompleteCallback(
@ -64,7 +63,6 @@ class FakeH264Decoder : public FakeDecoder {
virtual ~FakeH264Decoder() {} virtual ~FakeH264Decoder() {}
int32_t Decode(const EncodedImage& input, int32_t Decode(const EncodedImage& input,
bool missing_frames,
int64_t render_time_ms) override; int64_t render_time_ms) override;
}; };

View file

@ -44,7 +44,6 @@ bool FakeVp8Decoder::Configure(const Settings& settings) {
} }
int32_t FakeVp8Decoder::Decode(const EncodedImage& input, int32_t FakeVp8Decoder::Decode(const EncodedImage& input,
bool missing_frames,
int64_t render_time_ms) { int64_t render_time_ms) {
constexpr size_t kMinPayLoadHeaderLength = 10; constexpr size_t kMinPayLoadHeaderLength = 10;
if (input.size() < kMinPayLoadHeaderLength) { if (input.size() < kMinPayLoadHeaderLength) {

View file

@ -28,7 +28,6 @@ class FakeVp8Decoder : public VideoDecoder {
bool Configure(const Settings& settings) override; bool Configure(const Settings& settings) override;
int32_t Decode(const EncodedImage& input, int32_t Decode(const EncodedImage& input,
bool missing_frames,
int64_t render_time_ms) override; int64_t render_time_ms) override;
int32_t RegisterDecodeCompleteCallback( int32_t RegisterDecodeCompleteCallback(

View file

@ -51,7 +51,6 @@ bool QualityAnalyzingVideoDecoder::Configure(const Settings& settings) {
} }
int32_t QualityAnalyzingVideoDecoder::Decode(const EncodedImage& input_image, int32_t QualityAnalyzingVideoDecoder::Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) { int64_t render_time_ms) {
// Image extractor extracts id from provided EncodedImage and also returns // Image extractor extracts id from provided EncodedImage and also returns
// the image with the original buffer. Buffer can be modified in place, so // the image with the original buffer. Buffer can be modified in place, so
@ -96,8 +95,7 @@ int32_t QualityAnalyzingVideoDecoder::Decode(const EncodedImage& input_image,
// thread. // thread.
analyzer_->OnFramePreDecode( analyzer_->OnFramePreDecode(
peer_name_, out.id.value_or(VideoFrame::kNotSetId), *origin_image); peer_name_, out.id.value_or(VideoFrame::kNotSetId), *origin_image);
int32_t result = int32_t result = delegate_->Decode(*origin_image, render_time_ms);
delegate_->Decode(*origin_image, missing_frames, render_time_ms);
if (result != WEBRTC_VIDEO_CODEC_OK) { if (result != WEBRTC_VIDEO_CODEC_OK) {
// If delegate decoder failed, then cleanup data for this image. // If delegate decoder failed, then cleanup data for this image.
VideoQualityAnalyzerInterface::DecoderStats stats; VideoQualityAnalyzerInterface::DecoderStats stats;

View file

@ -59,7 +59,6 @@ class QualityAnalyzingVideoDecoder : public VideoDecoder {
// Methods of VideoDecoder interface. // Methods of VideoDecoder interface.
bool Configure(const Settings& settings) override; bool Configure(const Settings& settings) override;
int32_t Decode(const EncodedImage& input_image, int32_t Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) override; int64_t render_time_ms) override;
int32_t RegisterDecodeCompleteCallback( int32_t RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) override; DecodedImageCallback* callback) override;

View file

@ -79,8 +79,7 @@ FrameGeneratorInterface::VideoFrameData IvfVideoFrameGenerator::NextFrame() {
RTC_CHECK(image); RTC_CHECK(image);
// Last parameter is undocumented and there is no usage of it found. // Last parameter is undocumented and there is no usage of it found.
RTC_CHECK_EQ(WEBRTC_VIDEO_CODEC_OK, RTC_CHECK_EQ(WEBRTC_VIDEO_CODEC_OK,
video_decoder_->Decode(*image, /*missing_frames=*/false, video_decoder_->Decode(*image, /*render_time_ms=*/0));
/*render_time_ms=*/0));
bool decoded = next_frame_decoded_.Wait(kMaxNextFrameWaitTimeout); bool decoded = next_frame_decoded_.Wait(kMaxNextFrameWaitTimeout);
RTC_CHECK(decoded) << "Failed to decode next frame in " RTC_CHECK(decoded) << "Failed to decode next frame in "
<< kMaxNextFrameWaitTimeout << ". Can't continue"; << kMaxNextFrameWaitTimeout << ". Can't continue";

View file

@ -48,9 +48,8 @@ class VideoDecoderProxyFactory final : public VideoDecoderFactory {
private: private:
int32_t Decode(const EncodedImage& input_image, int32_t Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) override { int64_t render_time_ms) override {
return decoder_->Decode(input_image, missing_frames, render_time_ms); return decoder_->Decode(input_image, render_time_ms);
} }
bool Configure(const Settings& settings) override { bool Configure(const Settings& settings) override {
return decoder_->Configure(settings); return decoder_->Configure(settings);

View file

@ -26,7 +26,6 @@ class FrameDumpingDecoder : public VideoDecoder {
bool Configure(const Settings& settings) override; bool Configure(const Settings& settings) override;
int32_t Decode(const EncodedImage& input_image, int32_t Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) override; int64_t render_time_ms) override;
int32_t RegisterDecodeCompleteCallback( int32_t RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) override; DecodedImageCallback* callback) override;
@ -54,9 +53,8 @@ bool FrameDumpingDecoder::Configure(const Settings& settings) {
} }
int32_t FrameDumpingDecoder::Decode(const EncodedImage& input_image, int32_t FrameDumpingDecoder::Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) { int64_t render_time_ms) {
int32_t ret = decoder_->Decode(input_image, missing_frames, render_time_ms); int32_t ret = decoder_->Decode(input_image, render_time_ms);
writer_->WriteFrame(input_image, codec_type_); writer_->WriteFrame(input_image, codec_type_);
return ret; return ret;

View file

@ -134,7 +134,6 @@ class NullVideoDecoder : public webrtc::VideoDecoder {
} }
int32_t Decode(const webrtc::EncodedImage& input_image, int32_t Decode(const webrtc::EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) override { int64_t render_time_ms) override {
RTC_LOG(LS_ERROR) << "The NullVideoDecoder doesn't support decoding."; RTC_LOG(LS_ERROR) << "The NullVideoDecoder doesn't support decoding.";
return WEBRTC_VIDEO_CODEC_OK; return WEBRTC_VIDEO_CODEC_OK;

View file

@ -210,7 +210,7 @@ class VideoReceiveStream2Test : public ::testing::TestWithParam<bool> {
// By default, mock decode will wrap the fake decoder. // By default, mock decode will wrap the fake decoder.
ON_CALL(mock_decoder_, Configure) ON_CALL(mock_decoder_, Configure)
.WillByDefault(Invoke(&fake_decoder_, &test::FakeDecoder::Configure)); .WillByDefault(Invoke(&fake_decoder_, &test::FakeDecoder::Configure));
ON_CALL(mock_decoder_, Decode).WillByDefault(DefaultDecodeAction()); ON_CALL(mock_decoder_, Decode(_, _)).WillByDefault(DefaultDecodeAction());
ON_CALL(mock_decoder_, RegisterDecodeCompleteCallback) ON_CALL(mock_decoder_, RegisterDecodeCompleteCallback)
.WillByDefault( .WillByDefault(
Invoke(&fake_decoder_, Invoke(&fake_decoder_,
@ -304,7 +304,7 @@ TEST_P(VideoReceiveStream2Test, CreateFrameFromH264FmtpSpropAndIdr) {
rtppacket.SetTimestamp(0); rtppacket.SetTimestamp(0);
EXPECT_CALL(mock_decoder_, RegisterDecodeCompleteCallback(_)); EXPECT_CALL(mock_decoder_, RegisterDecodeCompleteCallback(_));
video_receive_stream_->Start(); video_receive_stream_->Start();
EXPECT_CALL(mock_decoder_, Decode(_, false, _)); EXPECT_CALL(mock_decoder_, Decode(_, _));
RtpPacketReceived parsed_packet; RtpPacketReceived parsed_packet;
ASSERT_TRUE(parsed_packet.Parse(rtppacket.data(), rtppacket.size())); ASSERT_TRUE(parsed_packet.Parse(rtppacket.data(), rtppacket.size()));
rtp_stream_receiver_controller_.OnRtpPacket(parsed_packet); rtp_stream_receiver_controller_.OnRtpPacket(parsed_packet);
@ -469,7 +469,7 @@ TEST_P(VideoReceiveStream2Test, LazyDecoderCreation) {
CreateVideoDecoder(Field(&SdpVideoFormat::name, testing::Eq("H264")))); CreateVideoDecoder(Field(&SdpVideoFormat::name, testing::Eq("H264"))));
EXPECT_CALL(mock_decoder_, Configure); EXPECT_CALL(mock_decoder_, Configure);
EXPECT_CALL(mock_decoder_, RegisterDecodeCompleteCallback); EXPECT_CALL(mock_decoder_, RegisterDecodeCompleteCallback);
EXPECT_CALL(mock_decoder_, Decode); EXPECT_CALL(mock_decoder_, Decode(_, _));
RtpPacketReceived parsed_packet; RtpPacketReceived parsed_packet;
ASSERT_TRUE(parsed_packet.Parse(rtppacket.data(), rtppacket.size())); ASSERT_TRUE(parsed_packet.Parse(rtppacket.data(), rtppacket.size()));
rtp_stream_receiver_controller_.OnRtpPacket(parsed_packet); rtp_stream_receiver_controller_.OnRtpPacket(parsed_packet);
@ -764,11 +764,10 @@ TEST_P(VideoReceiveStream2Test, DependantFramesAreScheduled) {
// Expect frames are decoded in order. // Expect frames are decoded in order.
InSequence seq; InSequence seq;
EXPECT_CALL(mock_decoder_, EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kFirstRtpTimestamp), _));
Decode(test::RtpTimestamp(kFirstRtpTimestamp), _, _));
EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kFirstRtpTimestamp + EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kFirstRtpTimestamp +
k30FpsRtpTimestampDelta), k30FpsRtpTimestampDelta),
_, _)) _))
.Times(1); .Times(1);
video_receive_stream_->OnCompleteFrame(std::move(key_frame)); video_receive_stream_->OnCompleteFrame(std::move(key_frame));
EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame()); EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame());
@ -806,14 +805,13 @@ TEST_P(VideoReceiveStream2Test, FramesScheduledInOrder) {
// Expect frames are decoded in order despite delta_frame1 arriving first. // Expect frames are decoded in order despite delta_frame1 arriving first.
InSequence seq; InSequence seq;
EXPECT_CALL(mock_decoder_, EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kFirstRtpTimestamp), _))
Decode(test::RtpTimestamp(kFirstRtpTimestamp), _, _))
.Times(1); .Times(1);
EXPECT_CALL(mock_decoder_, EXPECT_CALL(mock_decoder_,
Decode(test::RtpTimestamp(RtpTimestampForFrame(1)), _, _)) Decode(test::RtpTimestamp(RtpTimestampForFrame(1)), _))
.Times(1); .Times(1);
EXPECT_CALL(mock_decoder_, EXPECT_CALL(mock_decoder_,
Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _, _)) Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _))
.Times(1); .Times(1);
key_frame->SetReceivedTime(clock_->CurrentTime().ms()); key_frame->SetReceivedTime(clock_->CurrentTime().ms());
video_receive_stream_->OnCompleteFrame(std::move(key_frame)); video_receive_stream_->OnCompleteFrame(std::move(key_frame));
@ -855,7 +853,7 @@ TEST_P(VideoReceiveStream2Test, WaitsforAllSpatialLayers) {
.Build(); .Build();
// No decodes should be called until `sl2` is received. // No decodes should be called until `sl2` is received.
EXPECT_CALL(mock_decoder_, Decode).Times(0); EXPECT_CALL(mock_decoder_, Decode(_, _)).Times(0);
sl0->SetReceivedTime(clock_->CurrentTime().ms()); sl0->SetReceivedTime(clock_->CurrentTime().ms());
video_receive_stream_->OnCompleteFrame(std::move(sl0)); video_receive_stream_->OnCompleteFrame(std::move(sl0));
EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()),
@ -864,8 +862,7 @@ TEST_P(VideoReceiveStream2Test, WaitsforAllSpatialLayers) {
EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()),
DidNotReceiveFrame()); DidNotReceiveFrame());
// When `sl2` arrives decode should happen. // When `sl2` arrives decode should happen.
EXPECT_CALL(mock_decoder_, EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kFirstRtpTimestamp), _))
Decode(test::RtpTimestamp(kFirstRtpTimestamp), _, _))
.Times(1); .Times(1);
video_receive_stream_->OnCompleteFrame(std::move(sl2)); video_receive_stream_->OnCompleteFrame(std::move(sl2));
EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame()); EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame());
@ -903,15 +900,14 @@ TEST_P(VideoReceiveStream2Test, FramesFastForwardOnSystemHalt) {
.AsLast() .AsLast()
.Build(); .Build();
InSequence seq; InSequence seq;
EXPECT_CALL(mock_decoder_, EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kFirstRtpTimestamp), _))
Decode(test::RtpTimestamp(kFirstRtpTimestamp), _, _))
.WillOnce(testing::DoAll(Invoke([&] { .WillOnce(testing::DoAll(Invoke([&] {
// System halt will be simulated in the decode. // System halt will be simulated in the decode.
time_controller_.AdvanceTime(k30FpsDelay * 2); time_controller_.AdvanceTime(k30FpsDelay * 2);
}), }),
DefaultDecodeAction())); DefaultDecodeAction()));
EXPECT_CALL(mock_decoder_, EXPECT_CALL(mock_decoder_,
Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _, _)); Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _));
video_receive_stream_->OnCompleteFrame(std::move(key_frame)); video_receive_stream_->OnCompleteFrame(std::move(key_frame));
video_receive_stream_->OnCompleteFrame(std::move(ffwd_frame)); video_receive_stream_->OnCompleteFrame(std::move(ffwd_frame));
video_receive_stream_->OnCompleteFrame(std::move(rendered_frame)); video_receive_stream_->OnCompleteFrame(std::move(rendered_frame));
@ -959,10 +955,10 @@ TEST_P(VideoReceiveStream2Test, BetterFrameInsertedWhileWaitingToDecodeFrame) {
InSequence seq; InSequence seq;
EXPECT_CALL(mock_decoder_, EXPECT_CALL(mock_decoder_,
Decode(test::RtpTimestamp(RtpTimestampForFrame(1)), _, _)) Decode(test::RtpTimestamp(RtpTimestampForFrame(1)), _))
.Times(1); .Times(1);
EXPECT_CALL(mock_decoder_, EXPECT_CALL(mock_decoder_,
Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _, _)) Decode(test::RtpTimestamp(RtpTimestampForFrame(2)), _))
.Times(1); .Times(1);
// Simulate f1 arriving after f2 but before f2 is decoded. // Simulate f1 arriving after f2 but before f2 is decoded.
video_receive_stream_->OnCompleteFrame(std::move(f2)); video_receive_stream_->OnCompleteFrame(std::move(f2));
@ -1021,7 +1017,7 @@ TEST_P(VideoReceiveStream2Test, RtpTimestampWrapAround) {
.ReceivedTime(clock_->CurrentTime()) .ReceivedTime(clock_->CurrentTime())
.AsLast() .AsLast()
.Build()); .Build());
EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kWrapAroundRtp), _, _)) EXPECT_CALL(mock_decoder_, Decode(test::RtpTimestamp(kWrapAroundRtp), _))
.Times(1); .Times(1);
EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame()); EXPECT_THAT(fake_renderer_.WaitForFrame(TimeDelta::Zero()), RenderedFrame());