Remove RTPFragmentationHeader creation and propagation through webrtc

Bug: webrtc:6471
Change-Id: I5cb1e10088aaecb5981888082b87ae9957bbaaef
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/181541
Reviewed-by: Erik Språng <sprang@webrtc.org>
Commit-Queue: Danil Chapovalov <danilchap@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#31955}
This commit is contained in:
Danil Chapovalov 2020-08-12 17:30:36 +02:00 committed by Commit Bot
parent c8ac35879c
commit 2549f174b5
41 changed files with 121 additions and 221 deletions

View file

@ -15,7 +15,6 @@
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/include/module_common_types.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "rtc_base/checks.h"

View file

@ -120,10 +120,9 @@ class AdapterEncodedImageCallback : public webrtc::EncodedImageCallback {
EncodedImageCallback::Result OnEncodedImage(
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) override {
const webrtc::CodecSpecificInfo* codec_specific_info) override {
return adapter_->OnEncodedImage(stream_idx_, encoded_image,
codec_specific_info, fragmentation);
codec_specific_info);
}
private:
@ -559,15 +558,14 @@ void SimulcastEncoderAdapter::OnLossNotification(
EncodedImageCallback::Result SimulcastEncoderAdapter::OnEncodedImage(
size_t stream_idx,
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentation) {
const CodecSpecificInfo* codecSpecificInfo) {
EncodedImage stream_image(encodedImage);
CodecSpecificInfo stream_codec_specific = *codecSpecificInfo;
stream_image.SetSpatialIndex(stream_idx);
return encoded_complete_callback_->OnEncodedImage(
stream_image, &stream_codec_specific, fragmentation);
return encoded_complete_callback_->OnEncodedImage(stream_image,
&stream_codec_specific);
}
void SimulcastEncoderAdapter::PopulateStreamCodec(

View file

@ -70,8 +70,7 @@ class RTC_EXPORT SimulcastEncoderAdapter : public VideoEncoder {
EncodedImageCallback::Result OnEncodedImage(
size_t stream_idx,
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation);
const CodecSpecificInfo* codec_specific_info);
EncoderInfo GetEncoderInfo() const override;

View file

@ -251,7 +251,7 @@ class MockVideoEncoder : public VideoEncoder {
image._encodedHeight = height;
CodecSpecificInfo codec_specific_info;
codec_specific_info.codecType = webrtc::kVideoCodecVP8;
callback_->OnEncodedImage(image, &codec_specific_info, nullptr);
callback_->OnEncodedImage(image, &codec_specific_info);
}
void set_supports_native_handle(bool enabled) {
@ -422,8 +422,7 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test,
}
Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
const CodecSpecificInfo* codec_specific_info) override {
last_encoded_image_width_ = encoded_image._encodedWidth;
last_encoded_image_height_ = encoded_image._encodedHeight;
last_encoded_image_simulcast_index_ =

View file

@ -98,7 +98,6 @@ rtc_library("rtp_rtcp_format") {
]
deps = [
"..:module_api",
"..:module_api_public",
"../../api:array_view",
"../../api:function_view",
@ -380,7 +379,6 @@ rtc_library("fec_test_helper") {
deps = [
":rtp_rtcp",
":rtp_rtcp_format",
"..:module_api",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
]
@ -522,7 +520,6 @@ if (rtc_include_tests) {
":rtcp_transceiver",
":rtp_rtcp",
":rtp_rtcp_format",
"..:module_api",
"../..:webrtc_common",
"../../api:array_view",
"../../api:libjingle_peerconnection_api",

View file

@ -16,7 +16,6 @@
#include <vector>
#include "api/array_view.h"
#include "modules/include/module_common_types.h"
#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
#include "modules/rtp_rtcp/source/byte_io.h"
#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"

View file

@ -16,7 +16,6 @@
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "common_video/h264/h264_common.h"
#include "modules/include/module_common_types.h"
#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
#include "modules/rtp_rtcp/source/byte_io.h"
#include "rtc_base/copy_on_write_buffer.h"

View file

@ -22,7 +22,6 @@ rtc_library("encoded_frame") {
"../../api/video:video_frame",
"../../api/video:video_frame_i420",
"../../api/video:video_rtp_headers",
"../../modules:module_api",
"../../modules:module_api_public",
"../../modules/rtp_rtcp:rtp_video_header",
"../../rtc_base:checks",
@ -333,7 +332,6 @@ rtc_library("video_coding_utility") {
deps = [
":video_codec_interface",
"..:module_api",
"../../api:scoped_refptr",
"../../api/video:encoded_frame",
"../../api/video:encoded_image",
@ -427,7 +425,6 @@ rtc_library("webrtc_multiplex") {
deps = [
":video_codec_interface",
":video_coding_utility",
"..:module_api",
"../../api:fec_controller_api",
"../../api:scoped_refptr",
"../../api/video:encoded_image",
@ -463,7 +460,6 @@ rtc_library("webrtc_vp8") {
":video_codec_interface",
":video_coding_utility",
":webrtc_vp8_temporal_layers",
"..:module_api",
"../..:webrtc_common",
"../../api:fec_controller_api",
"../../api:scoped_refptr",
@ -507,7 +503,6 @@ rtc_library("webrtc_vp8_temporal_layers") {
":codec_globals_headers",
":video_codec_interface",
":video_coding_utility",
"..:module_api",
"../..:webrtc_common",
"../../api:fec_controller_api",
"../../api/video_codecs:video_codecs_api",
@ -561,7 +556,6 @@ rtc_library("webrtc_vp9") {
":video_codec_interface",
":video_coding_utility",
":webrtc_vp9_helpers",
"..:module_api",
"../..:webrtc_common",
"../../api:fec_controller_api",
"../../api:scoped_refptr",
@ -619,7 +613,6 @@ if (rtc_include_tests) {
"../../api/video_codecs:video_codecs_api",
"../../media:rtc_audio_video",
"../../media:rtc_media_base",
"../../modules:module_api",
"../../rtc_base:rtc_base_approved",
"../../sdk:native_api",
"../../sdk:peerconnectionfactory_base_objc",
@ -970,7 +963,6 @@ if (rtc_include_tests) {
":webrtc_vp8_temporal_layers",
":webrtc_vp9",
":webrtc_vp9_helpers",
"..:module_api",
"..:module_fec_api",
"../../api:array_view",
"../../api:create_simulcast_test_fixture_api",

View file

@ -527,7 +527,7 @@ int32_t LibaomAv1Encoder::Encode(
}
}
encoded_image_callback_->OnEncodedImage(encoded_image,
&codec_specific_info, nullptr);
&codec_specific_info);
}
}

View file

@ -87,19 +87,15 @@ VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
} // namespace
// Helper method used by H264EncoderImpl::Encode.
// Copies the encoded bytes from |info| to |encoded_image| and updates the
// fragmentation information of |frag_header|. The |encoded_image->_buffer| may
// be deleted and reallocated if a bigger buffer is required.
// Copies the encoded bytes from |info| to |encoded_image|. The
// |encoded_image->_buffer| may be deleted and reallocated if a bigger buffer is
// required.
//
// After OpenH264 encoding, the encoded bytes are stored in |info| spread out
// over a number of layers and "NAL units". Each NAL unit is a fragment starting
// with the four-byte start code {0,0,0,1}. All of this data (including the
// start codes) is copied to the |encoded_image->_buffer| and the |frag_header|
// is updated to point to each fragment, with offsets and lengths set as to
// exclude the start codes.
static void RtpFragmentize(EncodedImage* encoded_image,
SFrameBSInfo* info,
RTPFragmentationHeader* frag_header) {
// start codes) is copied to the |encoded_image->_buffer|.
static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) {
// Calculate minimum buffer size required to hold encoded data.
size_t required_capacity = 0;
size_t fragments_count = 0;
@ -119,7 +115,6 @@ static void RtpFragmentize(EncodedImage* encoded_image,
// Iterate layers and NAL units, note each NAL unit as a fragment and copy
// the data to |encoded_image->_buffer|.
const uint8_t start_code[4] = {0, 0, 0, 1};
frag_header->VerifyAndAllocateFragmentationHeader(fragments_count);
size_t frag = 0;
encoded_image->set_size(0);
for (int layer = 0; layer < info->iLayerNum; ++layer) {
@ -134,10 +129,6 @@ static void RtpFragmentize(EncodedImage* encoded_image,
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 1], start_code[1]);
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 2], start_code[2]);
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 3], start_code[3]);
frag_header->fragmentationOffset[frag] =
encoded_image->size() + layer_len + sizeof(start_code);
frag_header->fragmentationLength[frag] =
layerInfo.pNalLengthInByte[nal] - sizeof(start_code);
layer_len += layerInfo.pNalLengthInByte[nal];
}
// Copy the entire layer's data (including start codes).
@ -485,8 +476,7 @@ int32_t H264EncoderImpl::Encode(
// Split encoded image up into fragments. This also updates
// |encoded_image_|.
RTPFragmentationHeader frag_header;
RtpFragmentize(&encoded_images_[i], &info, &frag_header);
RtpFragmentize(&encoded_images_[i], &info);
// Encoder can skip frames to save bandwidth in which case
// |encoded_images_[i]._length| == 0.
@ -518,7 +508,7 @@ int32_t H264EncoderImpl::Encode(
}
}
encoded_image_callback_->OnEncodedImage(encoded_images_[i],
&codec_specific, &frag_header);
&codec_specific);
}
}
return WEBRTC_VIDEO_CODEC_OK;

View file

@ -72,7 +72,7 @@ class H264EncoderImpl : public H264Encoder {
EncodedImageCallback* callback) override;
void SetRates(const RateControlParameters& parameters) override;
// The result of encoding - an EncodedImage and RTPFragmentationHeader - are
// The result of encoding - an EncodedImage and CodecSpecificInfo - are
// passed to the encode complete callback.
int32_t Encode(const VideoFrame& frame,
const std::vector<VideoFrameType>* frame_types) override;

View file

@ -57,8 +57,7 @@ class MultiplexEncoderAdapter : public VideoEncoder {
EncodedImageCallback::Result OnEncodedImage(
AlphaCodecStream stream_idx,
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentation);
const CodecSpecificInfo* codecSpecificInfo);
private:
// Wrapper class that redirects OnEncodedImage() calls.

View file

@ -17,7 +17,6 @@
#include "common_video/include/video_frame_buffer.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "media/base/video_common.h"
#include "modules/include/module_common_types.h"
#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
#include "rtc_base/keep_ref_until_done.h"
#include "rtc_base/logging.h"
@ -35,12 +34,11 @@ class MultiplexEncoderAdapter::AdapterEncodedImageCallback
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
const CodecSpecificInfo* codec_specific_info) override {
if (!adapter_)
return Result(Result::OK);
return adapter_->OnEncodedImage(stream_idx_, encoded_image,
codec_specific_info, fragmentation);
codec_specific_info);
}
private:
@ -286,8 +284,7 @@ VideoEncoder::EncoderInfo MultiplexEncoderAdapter::GetEncoderInfo() const {
EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
AlphaCodecStream stream_idx,
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentation) {
const CodecSpecificInfo* codecSpecificInfo) {
// Save the image
MultiplexImageComponent image_component;
image_component.component_index = stream_idx;
@ -324,8 +321,7 @@ EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
CodecSpecificInfo codec_info = *codecSpecificInfo;
codec_info.codecType = kVideoCodecMultiplex;
encoded_complete_callback_->OnEncodedImage(combined_image_, &codec_info,
fragmentation);
encoded_complete_callback_->OnEncodedImage(combined_image_, &codec_info);
}
stashed_images_.erase(stashed_images_.begin(), stashed_image_next_itr);

View file

@ -33,10 +33,8 @@ class EncoderCallback : public EncodedImageCallback {
: output_frames_(output_frames) {}
private:
Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* /*fragmentation*/) override {
Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info) override {
output_frames_.push_back({encoded_image, *codec_specific_info});
return Result(Result::Error::OK);
}

View file

@ -35,8 +35,7 @@ const VideoEncoder::Capabilities kCapabilities(false);
EncodedImageCallback::Result
VideoCodecUnitTest::FakeEncodeCompleteCallback::OnEncodedImage(
const EncodedImage& frame,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
const CodecSpecificInfo* codec_specific_info) {
MutexLock lock(&test_->encoded_frame_section_);
test_->encoded_frames_.push_back(frame);
RTC_DCHECK(codec_specific_info);

View file

@ -42,8 +42,7 @@ class VideoCodecUnitTest : public ::testing::Test {
: test_(test) {}
Result OnEncodedImage(const EncodedImage& frame,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation);
const CodecSpecificInfo* codec_specific_info);
private:
VideoCodecUnitTest* const test_;

View file

@ -91,8 +91,7 @@ class VideoProcessor {
Result OnEncodedImage(
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) override {
const webrtc::CodecSpecificInfo* codec_specific_info) override {
RTC_CHECK(codec_specific_info);
// Post the callback to the right task queue, if needed.

View file

@ -1214,7 +1214,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image,
&qp_128);
encoded_images_[encoder_idx].qp_ = qp_128;
encoded_complete_callback_->OnEncodedImage(encoded_images_[encoder_idx],
&codec_specific, nullptr);
&codec_specific);
const size_t steady_state_size = SteadyStateSize(
stream_idx, codec_specific.codecSpecific.VP8.temporalIdx);
if (qp_128 > variable_framerate_experiment_.steady_state_qp ||

View file

@ -1525,15 +1525,8 @@ void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) {
codec_specific_.codecSpecific.VP9.end_of_picture = end_of_picture;
// No data partitioning in VP9, so 1 partition only.
int part_idx = 0;
RTPFragmentationHeader frag_info;
frag_info.VerifyAndAllocateFragmentationHeader(1);
frag_info.fragmentationOffset[part_idx] = 0;
frag_info.fragmentationLength[part_idx] = encoded_image_.size();
encoded_complete_callback_->OnEncodedImage(encoded_image_, &codec_specific_,
&frag_info);
encoded_complete_callback_->OnEncodedImage(encoded_image_,
&codec_specific_);
if (codec_.mode == VideoCodecMode::kScreensharing) {
const uint8_t spatial_idx = encoded_image_.SpatialIndex().value_or(0);

View file

@ -16,7 +16,6 @@
#include "api/rtp_headers.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_decoder.h"
#include "modules/include/module_common_types.h"
#include "modules/utility/include/process_thread.h"
#include "modules/video_coding/decoder_database.h"
#include "modules/video_coding/encoded_frame.h"

View file

@ -764,7 +764,6 @@ rtc_library("fake_video_codecs") {
"../api/video:video_rtp_headers",
"../api/video_codecs:video_codecs_api",
"../api/video_codecs:vp8_temporal_layers_factory",
"../modules:module_api",
"../modules/video_coding:codec_globals_headers",
"../modules/video_coding:video_codec_interface",
"../modules/video_coding:video_coding_utility",

View file

@ -17,7 +17,6 @@
#include <utility>
#include "api/video/encoded_image.h"
#include "modules/include/module_common_types.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "rtc_base/checks.h"
@ -59,10 +58,9 @@ int32_t ConfigurableFrameSizeEncoder::Encode(
encodedImage._frameType = VideoFrameType::kVideoFrameKey;
encodedImage.SetTimestamp(inputImage.timestamp());
encodedImage.capture_time_ms_ = inputImage.render_time_ms();
RTPFragmentationHeader* fragmentation = NULL;
CodecSpecificInfo specific{};
specific.codecType = codec_type_;
callback_->OnEncodedImage(encodedImage, &specific, fragmentation);
callback_->OnEncodedImage(encodedImage, &specific);
if (post_encode_callback_) {
(*post_encode_callback_)();
}

View file

@ -144,23 +144,20 @@ int32_t FakeEncoder::Encode(const VideoFrame& input_image,
if (qp)
encoded.qp_ = *qp;
encoded.SetSpatialIndex(i);
CodecSpecificInfo codec_specific;
std::unique_ptr<RTPFragmentationHeader> fragmentation =
EncodeHook(&encoded, &codec_specific);
CodecSpecificInfo codec_specific = EncodeHook(encoded);
if (callback->OnEncodedImage(encoded, &codec_specific, fragmentation.get())
.error != EncodedImageCallback::Result::OK) {
if (callback->OnEncodedImage(encoded, &codec_specific).error !=
EncodedImageCallback::Result::OK) {
return -1;
}
}
return 0;
}
std::unique_ptr<RTPFragmentationHeader> FakeEncoder::EncodeHook(
EncodedImage* encoded_image,
CodecSpecificInfo* codec_specific) {
codec_specific->codecType = kVideoCodecGeneric;
return nullptr;
CodecSpecificInfo FakeEncoder::EncodeHook(EncodedImage& encoded_image) {
CodecSpecificInfo codec_specific;
codec_specific.codecType = kVideoCodecGeneric;
return codec_specific;
}
FakeEncoder::FrameInfo FakeEncoder::NextFrame(
@ -287,9 +284,7 @@ int FakeEncoder::GetConfiguredInputFramerate() const {
FakeH264Encoder::FakeH264Encoder(Clock* clock)
: FakeEncoder(clock), idr_counter_(0) {}
std::unique_ptr<RTPFragmentationHeader> FakeH264Encoder::EncodeHook(
EncodedImage* encoded_image,
CodecSpecificInfo* codec_specific) {
CodecSpecificInfo FakeH264Encoder::EncodeHook(EncodedImage& encoded_image) {
static constexpr std::array<uint8_t, 3> kStartCode = {0, 0, 1};
const size_t kSpsSize = 8;
const size_t kPpsSize = 11;
@ -300,51 +295,40 @@ std::unique_ptr<RTPFragmentationHeader> FakeH264Encoder::EncodeHook(
current_idr_counter = idr_counter_;
++idr_counter_;
}
for (size_t i = 0; i < encoded_image->size(); ++i) {
encoded_image->data()[i] = static_cast<uint8_t>(i);
for (size_t i = 0; i < encoded_image.size(); ++i) {
encoded_image.data()[i] = static_cast<uint8_t>(i);
}
auto fragmentation = std::make_unique<RTPFragmentationHeader>();
if (current_idr_counter % kIdrFrequency == 0 &&
encoded_image->size() > kSpsSize + kPpsSize + 1 + 3 * kStartCode.size()) {
const size_t kNumSlices = 3;
fragmentation->VerifyAndAllocateFragmentationHeader(kNumSlices);
fragmentation->fragmentationOffset[0] = kStartCode.size();
fragmentation->fragmentationLength[0] = kSpsSize;
fragmentation->fragmentationOffset[1] = 2 * kStartCode.size() + kSpsSize;
fragmentation->fragmentationLength[1] = kPpsSize;
fragmentation->fragmentationOffset[2] =
3 * kStartCode.size() + kSpsSize + kPpsSize;
fragmentation->fragmentationLength[2] =
encoded_image->size() - (3 * kStartCode.size() + kSpsSize + kPpsSize);
encoded_image.size() > kSpsSize + kPpsSize + 1 + 3 * kStartCode.size()) {
const size_t kSpsNalHeader = 0x67;
const size_t kPpsNalHeader = 0x68;
const size_t kIdrNalHeader = 0x65;
memcpy(encoded_image->data(), kStartCode.data(), kStartCode.size());
encoded_image->data()[fragmentation->Offset(0)] = kSpsNalHeader;
memcpy(encoded_image->data() + fragmentation->Offset(1) - kStartCode.size(),
kStartCode.data(), kStartCode.size());
encoded_image->data()[fragmentation->Offset(1)] = kPpsNalHeader;
memcpy(encoded_image->data() + fragmentation->Offset(2) - kStartCode.size(),
kStartCode.data(), kStartCode.size());
encoded_image->data()[fragmentation->Offset(2)] = kIdrNalHeader;
uint8_t* data = encoded_image.data();
memcpy(data, kStartCode.data(), kStartCode.size());
data += kStartCode.size();
data[0] = kSpsNalHeader;
data += kSpsSize;
memcpy(data, kStartCode.data(), kStartCode.size());
data += kStartCode.size();
data[0] = kPpsNalHeader;
data += kPpsSize;
memcpy(data, kStartCode.data(), kStartCode.size());
data += kStartCode.size();
data[0] = kIdrNalHeader;
} else {
const size_t kNumSlices = 1;
fragmentation->VerifyAndAllocateFragmentationHeader(kNumSlices);
fragmentation->fragmentationOffset[0] = kStartCode.size();
fragmentation->fragmentationLength[0] =
encoded_image->size() - kStartCode.size();
memcpy(encoded_image->data(), kStartCode.data(), kStartCode.size());
memcpy(encoded_image.data(), kStartCode.data(), kStartCode.size());
const size_t kNalHeader = 0x41;
encoded_image->data()[fragmentation->fragmentationOffset[0]] = kNalHeader;
encoded_image.data()[kStartCode.size()] = kNalHeader;
}
codec_specific->codecType = kVideoCodecH264;
codec_specific->codecSpecific.H264.packetization_mode =
CodecSpecificInfo codec_specific;
codec_specific.codecType = kVideoCodecH264;
codec_specific.codecSpecific.H264.packetization_mode =
H264PacketizationMode::NonInterleaved;
return fragmentation;
return codec_specific;
}
DelayedEncoder::DelayedEncoder(Clock* clock, int delay_ms)

View file

@ -24,7 +24,6 @@
#include "api/video/video_frame.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/include/module_common_types.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/synchronization/sequence_checker.h"
@ -84,11 +83,8 @@ class FakeEncoder : public VideoEncoder {
int framerate) RTC_LOCKS_EXCLUDED(mutex_);
// Called before the frame is passed to callback_->OnEncodedImage, to let
// subclasses fill out codec_specific, possibly modify encodedImage.
// Returns an RTPFragmentationHeader, if needed by the codec.
virtual std::unique_ptr<RTPFragmentationHeader> EncodeHook(
EncodedImage* encoded_image,
CodecSpecificInfo* codec_specific);
// subclasses fill out CodecSpecificInfo, possibly modify |encoded_image|.
virtual CodecSpecificInfo EncodeHook(EncodedImage& encoded_image);
void SetRatesLocked(const RateControlParameters& parameters)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
@ -117,9 +113,7 @@ class FakeH264Encoder : public FakeEncoder {
virtual ~FakeH264Encoder() = default;
private:
std::unique_ptr<RTPFragmentationHeader> EncodeHook(
EncodedImage* encoded_image,
CodecSpecificInfo* codec_specific) override;
CodecSpecificInfo EncodeHook(EncodedImage& encoded_image) override;
int idr_counter_ RTC_GUARDED_BY(local_mutex_);
Mutex local_mutex_;

View file

@ -70,41 +70,41 @@ int32_t FakeVp8Encoder::Release() {
return result;
}
void FakeVp8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
size_t size_bytes,
VideoFrameType frame_type,
int stream_idx,
uint32_t timestamp) {
CodecSpecificInfo FakeVp8Encoder::PopulateCodecSpecific(
size_t size_bytes,
VideoFrameType frame_type,
int stream_idx,
uint32_t timestamp) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
codec_specific->codecType = kVideoCodecVP8;
codec_specific->codecSpecific.VP8.keyIdx = kNoKeyIdx;
codec_specific->codecSpecific.VP8.nonReference = false;
CodecSpecificInfo codec_specific;
codec_specific.codecType = kVideoCodecVP8;
codec_specific.codecSpecific.VP8.keyIdx = kNoKeyIdx;
codec_specific.codecSpecific.VP8.nonReference = false;
if (size_bytes > 0) {
frame_buffer_controller_->OnEncodeDone(
stream_idx, timestamp, size_bytes,
frame_type == VideoFrameType::kVideoFrameKey, -1, codec_specific);
frame_type == VideoFrameType::kVideoFrameKey, -1, &codec_specific);
} else {
frame_buffer_controller_->OnFrameDropped(stream_idx, timestamp);
}
return codec_specific;
}
std::unique_ptr<RTPFragmentationHeader> FakeVp8Encoder::EncodeHook(
EncodedImage* encoded_image,
CodecSpecificInfo* codec_specific) {
CodecSpecificInfo FakeVp8Encoder::EncodeHook(EncodedImage& encoded_image) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
uint8_t stream_idx = encoded_image->SpatialIndex().value_or(0);
uint8_t stream_idx = encoded_image.SpatialIndex().value_or(0);
frame_buffer_controller_->NextFrameConfig(stream_idx,
encoded_image->Timestamp());
PopulateCodecSpecific(codec_specific, encoded_image->size(),
encoded_image->_frameType, stream_idx,
encoded_image->Timestamp());
encoded_image.Timestamp());
CodecSpecificInfo codec_specific =
PopulateCodecSpecific(encoded_image.size(), encoded_image._frameType,
stream_idx, encoded_image.Timestamp());
// Write width and height to the payload the same way as the real encoder
// does.
WriteFakeVp8(encoded_image->data(), encoded_image->_encodedWidth,
encoded_image->_encodedHeight,
encoded_image->_frameType == VideoFrameType::kVideoFrameKey);
return nullptr;
WriteFakeVp8(encoded_image.data(), encoded_image._encodedWidth,
encoded_image._encodedHeight,
encoded_image._frameType == VideoFrameType::kVideoFrameKey);
return codec_specific;
}
VideoEncoder::EncoderInfo FakeVp8Encoder::GetEncoderInfo() const {

View file

@ -22,7 +22,6 @@
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/vp8_frame_buffer_controller.h"
#include "api/video_codecs/vp8_temporal_layers.h"
#include "modules/include/module_common_types.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "rtc_base/synchronization/sequence_checker.h"
#include "rtc_base/thread_annotations.h"
@ -45,15 +44,12 @@ class FakeVp8Encoder : public FakeEncoder {
EncoderInfo GetEncoderInfo() const override;
private:
void PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
size_t size_bytes,
VideoFrameType frame_type,
int stream_idx,
uint32_t timestamp);
CodecSpecificInfo PopulateCodecSpecific(size_t size_bytes,
VideoFrameType frame_type,
int stream_idx,
uint32_t timestamp);
std::unique_ptr<RTPFragmentationHeader> EncodeHook(
EncodedImage* encoded_image,
CodecSpecificInfo* codec_specific) override;
CodecSpecificInfo EncodeHook(EncodedImage& encoded_image) override;
SequenceChecker sequence_checker_;

View file

@ -232,8 +232,7 @@ VideoEncoder::EncoderInfo QualityAnalyzingVideoEncoder::GetEncoderInfo() const {
// pair - remove the front pair and got to the step 1.
EncodedImageCallback::Result QualityAnalyzingVideoEncoder::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
const CodecSpecificInfo* codec_specific_info) {
uint16_t frame_id;
bool discard = false;
uint32_t target_encode_bitrate = 0;
@ -292,8 +291,7 @@ EncodedImageCallback::Result QualityAnalyzingVideoEncoder::OnEncodedImage(
{
MutexLock lock(&lock_);
RTC_DCHECK(delegate_callback_);
return delegate_callback_->OnEncodedImage(image, codec_specific_info,
fragmentation);
return delegate_callback_->OnEncodedImage(image, codec_specific_info);
}
}

View file

@ -84,8 +84,7 @@ class QualityAnalyzingVideoEncoder : public VideoEncoder,
// Methods of EncodedImageCallback interface.
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
const CodecSpecificInfo* codec_specific_info) override;
void OnDroppedFrame(DropReason reason) override;
private:

View file

@ -68,8 +68,7 @@ class IvfFileWriterEncodedCallback : public EncodedImageCallback {
~IvfFileWriterEncodedCallback() { EXPECT_TRUE(file_writer_->Close()); }
Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
const CodecSpecificInfo* codec_specific_info) override {
EXPECT_TRUE(file_writer_->WriteFrame(encoded_image, video_codec_type_));
MutexLock lock(&lock_);

View file

@ -619,7 +619,6 @@ if (rtc_include_tests) {
"../media:rtc_media_base",
"../media:rtc_media_tests_utils",
"../media:rtc_simulcast_encoder_adapter",
"../modules:module_api",
"../modules:module_api_public",
"../modules/pacing",
"../modules/rtp_rtcp",

View file

@ -40,8 +40,7 @@ class FakeEncodedImageCallback : public EncodedImageCallback {
public:
FakeEncodedImageCallback() : num_frames_dropped_(0) {}
Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
const CodecSpecificInfo* codec_specific_info) override {
return Result(Result::OK);
}
void OnDroppedFrame(DropReason reason) override { ++num_frames_dropped_; }

View file

@ -238,8 +238,7 @@ class QualityTestVideoEncoder : public VideoEncoder,
private:
// Implement EncodedImageCallback
Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
const CodecSpecificInfo* codec_specific_info) override {
if (codec_specific_info) {
int simulcast_index;
if (codec_specific_info->codecType == kVideoCodecVP9) {
@ -258,8 +257,7 @@ class QualityTestVideoEncoder : public VideoEncoder,
}
}
return callback_->OnEncodedImage(encoded_image, codec_specific_info,
fragmentation);
return callback_->OnEncodedImage(encoded_image, codec_specific_info);
}
void OnDroppedFrame(DropReason reason) override {

View file

@ -37,7 +37,6 @@ namespace webrtc {
class CallStats;
class ProcessThread;
class RTPFragmentationHeader;
class RtpStreamReceiverInterface;
class RtpStreamReceiverControllerInterface;
class RtxReceiveStream;

View file

@ -37,7 +37,6 @@
namespace webrtc {
class ProcessThread;
class RTPFragmentationHeader;
class RtpStreamReceiverInterface;
class RtpStreamReceiverControllerInterface;
class RtxReceiveStream;

View file

@ -558,8 +558,7 @@ void VideoSendStreamImpl::OnEncoderConfigurationChanged(
EncodedImageCallback::Result VideoSendStreamImpl::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
const CodecSpecificInfo* codec_specific_info) {
// Encoded is called on whatever thread the real encoder implementation run
// on. In the case of hardware encoders, there might be several encoders
// running in parallel on different threads.
@ -582,8 +581,8 @@ EncodedImageCallback::Result VideoSendStreamImpl::OnEncodedImage(
}
EncodedImageCallback::Result result(EncodedImageCallback::Result::OK);
result = rtp_video_sender_->OnEncodedImage(encoded_image, codec_specific_info,
fragmentation);
result =
rtp_video_sender_->OnEncodedImage(encoded_image, codec_specific_info);
// Check if there's a throttled VideoBitrateAllocation that we should try
// sending.
rtc::WeakPtr<VideoSendStreamImpl> send_stream = weak_ptr_;

View file

@ -124,8 +124,7 @@ class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver,
// Called on an arbitrary encoder callback thread.
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
const CodecSpecificInfo* codec_specific_info) override;
// Implements EncodedImageCallback.
void OnDroppedFrame(EncodedImageCallback::DropReason reason) override;

View file

@ -82,9 +82,7 @@ class MockRtpVideoSender : public RtpVideoSenderInterface {
(override));
MOCK_METHOD(EncodedImageCallback::Result,
OnEncodedImage,
(const EncodedImage&,
const CodecSpecificInfo*,
const RTPFragmentationHeader*),
(const EncodedImage&, const CodecSpecificInfo*),
(override));
MOCK_METHOD(void, OnTransportOverheadChanged, (size_t), (override));
MOCK_METHOD(void,
@ -609,7 +607,7 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) {
EncodedImage encoded_image;
CodecSpecificInfo codec_specific;
EXPECT_CALL(rtp_video_sender_, OnEncodedImage(_, _, _))
EXPECT_CALL(rtp_video_sender_, OnEncodedImage)
.WillRepeatedly(Return(EncodedImageCallback::Result(
EncodedImageCallback::Result::OK)));
@ -651,7 +649,7 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) {
EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc))
.Times(0);
static_cast<EncodedImageCallback*>(vss_impl.get())
->OnEncodedImage(encoded_image, &codec_specific, nullptr);
->OnEncodedImage(encoded_image, &codec_specific);
}
{
@ -661,7 +659,7 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) {
EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc))
.Times(1);
static_cast<EncodedImageCallback*>(vss_impl.get())
->OnEncodedImage(encoded_image, &codec_specific, nullptr);
->OnEncodedImage(encoded_image, &codec_specific);
}
{
@ -671,7 +669,7 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) {
EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc))
.Times(0);
static_cast<EncodedImageCallback*>(vss_impl.get())
->OnEncodedImage(encoded_image, &codec_specific, nullptr);
->OnEncodedImage(encoded_image, &codec_specific);
}
vss_impl->Stop();
@ -804,7 +802,7 @@ TEST_F(VideoSendStreamImplTest, DisablesPaddingOnPausedEncoder) {
.WillRepeatedly(Invoke(
[&](BitrateAllocatorObserver*) { padding_bitrate = 0; }));
EXPECT_CALL(rtp_video_sender_, OnEncodedImage(_, _, _))
EXPECT_CALL(rtp_video_sender_, OnEncodedImage)
.WillRepeatedly(Return(EncodedImageCallback::Result(
EncodedImageCallback::Result::OK)));
const bool kSuspend = false;
@ -852,7 +850,7 @@ TEST_F(VideoSendStreamImplTest, DisablesPaddingOnPausedEncoder) {
EncodedImage encoded_image;
CodecSpecificInfo codec_specific;
static_cast<EncodedImageCallback*>(vss_impl.get())
->OnEncodedImage(encoded_image, &codec_specific, nullptr);
->OnEncodedImage(encoded_image, &codec_specific);
// Only after actual frame is encoded are we enabling the padding.
EXPECT_GT(padding_bitrate, 0);
},
@ -1011,7 +1009,7 @@ TEST_F(VideoSendStreamImplTest, ConfiguresBitratesForSvc) {
Field(&MediaStreamAllocationConfig::enforce_min_bitrate,
!kSuspend))));
static_cast<EncodedImageCallback*>(vss_impl.get())
->OnEncodedImage(encoded_image, &codec_specific, nullptr);
->OnEncodedImage(encoded_image, &codec_specific);
::testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_);
vss_impl->Stop();

View file

@ -3166,7 +3166,7 @@ TEST_F(VideoSendStreamTest, ReportsSentResolution) {
callback = callback_;
}
RTC_DCHECK(callback);
if (callback->OnEncodedImage(encoded, &specifics, nullptr).error !=
if (callback->OnEncodedImage(encoded, &specifics).error !=
EncodedImageCallback::Result::OK) {
return -1;
}

View file

@ -1531,8 +1531,7 @@ void VideoStreamEncoder::OnLossNotification(
EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* /*fragmentation*/) {
const CodecSpecificInfo* codec_specific_info) {
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
"timestamp", encoded_image.Timestamp());
const size_t spatial_idx = encoded_image.SpatialIndex().value_or(0);
@ -1608,7 +1607,7 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
}
EncodedImageCallback::Result result =
sink_->OnEncodedImage(image_copy, codec_specific_info, nullptr);
sink_->OnEncodedImage(image_copy, codec_specific_info);
// We are only interested in propagating the meta-data about the image, not
// encoded data itself, to the post encode function. Since we cannot be sure

View file

@ -189,8 +189,7 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
// Implements EncodedImageCallback.
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
const CodecSpecificInfo* codec_specific_info) override;
void OnDroppedFrame(EncodedImageCallback::DropReason reason) override;

View file

@ -896,7 +896,7 @@ class VideoStreamEncoderTest : public ::testing::Test {
void InjectEncodedImage(const EncodedImage& image) {
MutexLock lock(&local_mutex_);
encoded_image_callback_->OnEncodedImage(image, nullptr, nullptr);
encoded_image_callback_->OnEncodedImage(image, nullptr);
}
void SetEncodedImageData(
@ -959,25 +959,17 @@ class VideoStreamEncoderTest : public ::testing::Test {
return result;
}
std::unique_ptr<RTPFragmentationHeader> EncodeHook(
EncodedImage* encoded_image,
CodecSpecificInfo* codec_specific) override {
CodecSpecificInfo EncodeHook(EncodedImage& encoded_image) override {
CodecSpecificInfo codec_specific;
{
MutexLock lock(&mutex_);
codec_specific->codecType = config_.codecType;
codec_specific.codecType = config_.codecType;
}
MutexLock lock(&local_mutex_);
if (encoded_image_data_) {
encoded_image->SetEncodedData(encoded_image_data_);
if (codec_specific->codecType == kVideoCodecH264) {
auto fragmentation = std::make_unique<RTPFragmentationHeader>();
fragmentation->VerifyAndAllocateFragmentationHeader(1);
fragmentation->fragmentationOffset[0] = 4;
fragmentation->fragmentationLength[0] = encoded_image->size() - 4;
return fragmentation;
}
encoded_image.SetEncodedData(encoded_image_data_);
}
return nullptr;
return codec_specific;
}
int32_t InitEncode(const VideoCodec* config,
@ -1175,8 +1167,7 @@ class VideoStreamEncoderTest : public ::testing::Test {
private:
Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* /*fragmentation*/) override {
const CodecSpecificInfo* codec_specific_info) override {
MutexLock lock(&mutex_);
EXPECT_TRUE(expect_frames_);
last_encoded_image_data_ = std::vector<uint8_t>(