mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-20 09:07:52 +01:00

This reverts commit 20f2133d5d
.
Reason for revert: Breaks downstream project.
Original change's description:
> Add stereo codec header and pass it through RTP
>
> - Defines CodecSpecificInfoStereo that carries stereo specific header info from
> encoded image.
> - Defines RTPVideoHeaderStereo that carries the above info to packetizer,
> see module_common_types.h.
> - Adds an RTPPacketizer and RTPDepacketizer that supports passing specific stereo
> header.
> - Uses new data containers in StereoAdapter classes.
>
> This CL is the step 3 for adding alpha channel support over the wire in webrtc.
> See https://webrtc-review.googlesource.com/c/src/+/7800 for the experimental
> CL that gives an idea about how it will come together.
> Design Doc: https://goo.gl/sFeSUT
>
> Bug: webrtc:7671
> Change-Id: Ia932568fdd7065ba104afd2bc0ecf25a765748ab
> Reviewed-on: https://webrtc-review.googlesource.com/22900
> Reviewed-by: Emircan Uysaler <emircan@webrtc.org>
> Reviewed-by: Erik Språng <sprang@webrtc.org>
> Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
> Reviewed-by: Niklas Enbom <niklas.enbom@webrtc.org>
> Commit-Queue: Emircan Uysaler <emircan@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#20920}
TBR=danilchap@webrtc.org,sprang@webrtc.org,stefan@webrtc.org,niklas.enbom@webrtc.org,emircan@webrtc.org
Change-Id: I57f3172ca3c60a84537d577a574dc8018e12d634
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: webrtc:7671
Reviewed-on: https://webrtc-review.googlesource.com/26940
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Commit-Queue: Philip Eliasson <philipel@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#20931}
172 lines
6.4 KiB
C++
172 lines
6.4 KiB
C++
/*
|
|
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
|
*
|
|
* Use of this source code is governed by a BSD-style license
|
|
* that can be found in the LICENSE file in the root of the source
|
|
* tree. An additional intellectual property rights grant can be found
|
|
* in the file PATENTS. All contributing project authors may
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
*/
|
|
|
|
#include "modules/video_coding/codecs/stereo/include/stereo_encoder_adapter.h"
|
|
|
|
#include "api/video_codecs/sdp_video_format.h"
|
|
#include "common_video/include/video_frame.h"
|
|
#include "common_video/include/video_frame_buffer.h"
|
|
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
|
#include "modules/include/module_common_types.h"
|
|
#include "rtc_base/keep_ref_until_done.h"
|
|
#include "rtc_base/logging.h"
|
|
|
|
namespace webrtc {
|
|
|
|
// Callback wrapper that helps distinguish returned results from |encoders_|
|
|
// instances.
|
|
class StereoEncoderAdapter::AdapterEncodedImageCallback
|
|
: public webrtc::EncodedImageCallback {
|
|
public:
|
|
AdapterEncodedImageCallback(webrtc::StereoEncoderAdapter* adapter,
|
|
AlphaCodecStream stream_idx)
|
|
: adapter_(adapter), stream_idx_(stream_idx) {}
|
|
|
|
EncodedImageCallback::Result OnEncodedImage(
|
|
const EncodedImage& encoded_image,
|
|
const CodecSpecificInfo* codec_specific_info,
|
|
const RTPFragmentationHeader* fragmentation) override {
|
|
if (!adapter_)
|
|
return Result(Result::OK);
|
|
return adapter_->OnEncodedImage(stream_idx_, encoded_image,
|
|
codec_specific_info, fragmentation);
|
|
}
|
|
|
|
private:
|
|
StereoEncoderAdapter* adapter_;
|
|
const AlphaCodecStream stream_idx_;
|
|
};
|
|
|
|
StereoEncoderAdapter::StereoEncoderAdapter(VideoEncoderFactory* factory)
|
|
: factory_(factory), encoded_complete_callback_(nullptr) {}
|
|
|
|
StereoEncoderAdapter::~StereoEncoderAdapter() {
|
|
Release();
|
|
}
|
|
|
|
int StereoEncoderAdapter::InitEncode(const VideoCodec* inst,
|
|
int number_of_cores,
|
|
size_t max_payload_size) {
|
|
const size_t buffer_size =
|
|
CalcBufferSize(VideoType::kI420, inst->width, inst->height);
|
|
stereo_dummy_planes_.resize(buffer_size);
|
|
// It is more expensive to encode 0x00, so use 0x80 instead.
|
|
std::fill(stereo_dummy_planes_.begin(), stereo_dummy_planes_.end(), 0x80);
|
|
|
|
for (size_t i = 0; i < kAlphaCodecStreams; ++i) {
|
|
const SdpVideoFormat format("VP9");
|
|
std::unique_ptr<VideoEncoder> encoder =
|
|
factory_->CreateVideoEncoder(format);
|
|
const int rv = encoder->InitEncode(inst, number_of_cores, max_payload_size);
|
|
if (rv) {
|
|
RTC_LOG(LS_ERROR) << "Failed to create stere codec index " << i;
|
|
return rv;
|
|
}
|
|
adapter_callbacks_.emplace_back(new AdapterEncodedImageCallback(
|
|
this, static_cast<AlphaCodecStream>(i)));
|
|
encoder->RegisterEncodeCompleteCallback(adapter_callbacks_.back().get());
|
|
encoders_.emplace_back(std::move(encoder));
|
|
}
|
|
return WEBRTC_VIDEO_CODEC_OK;
|
|
}
|
|
|
|
int StereoEncoderAdapter::Encode(const VideoFrame& input_image,
|
|
const CodecSpecificInfo* codec_specific_info,
|
|
const std::vector<FrameType>* frame_types) {
|
|
if (!encoded_complete_callback_) {
|
|
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
|
}
|
|
// Encode YUV
|
|
int rv = encoders_[kYUVStream]->Encode(input_image, codec_specific_info,
|
|
frame_types);
|
|
if (rv)
|
|
return rv;
|
|
|
|
const bool has_alpha = input_image.video_frame_buffer()->type() ==
|
|
VideoFrameBuffer::Type::kI420A;
|
|
if (!has_alpha)
|
|
return rv;
|
|
|
|
// Encode AXX
|
|
const I420ABufferInterface* yuva_buffer =
|
|
input_image.video_frame_buffer()->GetI420A();
|
|
rtc::scoped_refptr<I420BufferInterface> alpha_buffer =
|
|
WrapI420Buffer(input_image.width(), input_image.height(),
|
|
yuva_buffer->DataA(), yuva_buffer->StrideA(),
|
|
stereo_dummy_planes_.data(), yuva_buffer->StrideU(),
|
|
stereo_dummy_planes_.data(), yuva_buffer->StrideV(),
|
|
rtc::KeepRefUntilDone(input_image.video_frame_buffer()));
|
|
VideoFrame alpha_image(alpha_buffer, input_image.timestamp(),
|
|
input_image.render_time_ms(), input_image.rotation());
|
|
rv = encoders_[kAXXStream]->Encode(alpha_image, codec_specific_info,
|
|
frame_types);
|
|
return rv;
|
|
}
|
|
|
|
int StereoEncoderAdapter::RegisterEncodeCompleteCallback(
|
|
EncodedImageCallback* callback) {
|
|
encoded_complete_callback_ = callback;
|
|
return WEBRTC_VIDEO_CODEC_OK;
|
|
}
|
|
|
|
int StereoEncoderAdapter::SetChannelParameters(uint32_t packet_loss,
|
|
int64_t rtt) {
|
|
for (auto& encoder : encoders_) {
|
|
const int rv = encoder->SetChannelParameters(packet_loss, rtt);
|
|
if (rv)
|
|
return rv;
|
|
}
|
|
return WEBRTC_VIDEO_CODEC_OK;
|
|
}
|
|
|
|
int StereoEncoderAdapter::SetRateAllocation(const BitrateAllocation& bitrate,
|
|
uint32_t framerate) {
|
|
for (auto& encoder : encoders_) {
|
|
// TODO(emircan): |new_framerate| is used to calculate duration for encoder
|
|
// instances. We report the total frame rate to keep real time for now.
|
|
// Remove this after refactoring duration logic.
|
|
const int rv = encoder->SetRateAllocation(
|
|
bitrate, static_cast<uint32_t>(encoders_.size()) * framerate);
|
|
if (rv)
|
|
return rv;
|
|
}
|
|
return WEBRTC_VIDEO_CODEC_OK;
|
|
}
|
|
|
|
int StereoEncoderAdapter::Release() {
|
|
for (auto& encoder : encoders_) {
|
|
const int rv = encoder->Release();
|
|
if (rv)
|
|
return rv;
|
|
}
|
|
encoders_.clear();
|
|
adapter_callbacks_.clear();
|
|
return WEBRTC_VIDEO_CODEC_OK;
|
|
}
|
|
|
|
const char* StereoEncoderAdapter::ImplementationName() const {
|
|
return "StereoEncoderAdapter";
|
|
}
|
|
|
|
EncodedImageCallback::Result StereoEncoderAdapter::OnEncodedImage(
|
|
AlphaCodecStream stream_idx,
|
|
const EncodedImage& encodedImage,
|
|
const CodecSpecificInfo* codecSpecificInfo,
|
|
const RTPFragmentationHeader* fragmentation) {
|
|
if (stream_idx == kAXXStream)
|
|
return EncodedImageCallback::Result(EncodedImageCallback::Result::OK);
|
|
|
|
// TODO(emircan): Fill |codec_specific_info| with stereo parameters.
|
|
encoded_complete_callback_->OnEncodedImage(encodedImage, codecSpecificInfo,
|
|
fragmentation);
|
|
return EncodedImageCallback::Result(EncodedImageCallback::Result::OK);
|
|
}
|
|
|
|
} // namespace webrtc
|