Add owned data buffer to EncodedImage

Bug: webrtc:9378
Change-Id: I6a66b9301cbadf1d6517bf7a96028099970a20a3
Reviewed-on: https://webrtc-review.googlesource.com/c/117964
Commit-Queue: Niels Moller <nisse@webrtc.org>
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26585}
This commit is contained in:
Niels Möller 2019-02-07 00:02:17 +01:00 committed by Commit Bot
parent e6f6a0cb8d
commit 938dd9f1e8
17 changed files with 105 additions and 108 deletions

View file

@ -74,33 +74,10 @@ MediaTransportEncodedVideoFrame::MediaTransportEncodedVideoFrame(
referenced_frame_ids_(std::move(referenced_frame_ids)) {} referenced_frame_ids_(std::move(referenced_frame_ids)) {}
MediaTransportEncodedVideoFrame& MediaTransportEncodedVideoFrame::operator=( MediaTransportEncodedVideoFrame& MediaTransportEncodedVideoFrame::operator=(
const MediaTransportEncodedVideoFrame& o) { const MediaTransportEncodedVideoFrame&) = default;
payload_type_ = o.payload_type_;
encoded_image_ = o.encoded_image_;
encoded_data_ = o.encoded_data_;
frame_id_ = o.frame_id_;
referenced_frame_ids_ = o.referenced_frame_ids_;
if (!encoded_data_.empty()) {
// We own the underlying data.
encoded_image_.set_buffer(encoded_data_.data(), encoded_data_.size());
}
return *this;
}
MediaTransportEncodedVideoFrame& MediaTransportEncodedVideoFrame::operator=( MediaTransportEncodedVideoFrame& MediaTransportEncodedVideoFrame::operator=(
MediaTransportEncodedVideoFrame&& o) { MediaTransportEncodedVideoFrame&&) = default;
payload_type_ = o.payload_type_;
encoded_image_ = o.encoded_image_;
encoded_data_ = std::move(o.encoded_data_);
frame_id_ = o.frame_id_;
referenced_frame_ids_ = std::move(o.referenced_frame_ids_);
if (!encoded_data_.empty()) {
// We take over ownership of the underlying data.
encoded_image_.set_buffer(encoded_data_.data(), encoded_data_.size());
o.encoded_image_.set_buffer(nullptr, 0);
}
return *this;
}
MediaTransportEncodedVideoFrame::MediaTransportEncodedVideoFrame( MediaTransportEncodedVideoFrame::MediaTransportEncodedVideoFrame(
const MediaTransportEncodedVideoFrame& o) const MediaTransportEncodedVideoFrame& o)
@ -114,14 +91,6 @@ MediaTransportEncodedVideoFrame::MediaTransportEncodedVideoFrame(
*this = std::move(o); *this = std::move(o);
} }
void MediaTransportEncodedVideoFrame::Retain() {
if (encoded_image_.data() && encoded_data_.empty()) {
encoded_data_ = std::vector<uint8_t>(
encoded_image_.data(), encoded_image_.data() + encoded_image_.size());
encoded_image_.set_buffer(encoded_data_.data(), encoded_image_.size());
}
}
SendDataParams::SendDataParams() = default; SendDataParams::SendDataParams() = default;
SendDataParams::SendDataParams(const SendDataParams&) = default; SendDataParams::SendDataParams(const SendDataParams&) = default;

View file

@ -204,23 +204,20 @@ class MediaTransportEncodedVideoFrame final {
return referenced_frame_ids_; return referenced_frame_ids_;
} }
// Hack to workaround lack of ownership of the encoded_image_._buffer. If we // Hack to workaround lack of ownership of the EncodedImage buffer. If we
// don't already own the underlying data, make a copy. // don't already own the underlying data, make a copy.
void Retain(); void Retain() { encoded_image_.Retain(); }
private: private:
MediaTransportEncodedVideoFrame(); MediaTransportEncodedVideoFrame();
int payload_type_; int payload_type_;
// The buffer is not owned by the encoded image. On the sender it means that // The buffer is not always owned by the encoded image. On the sender it means
// it will need to make a copy using the Retain() method, if it wants to // that it will need to make a copy using the Retain() method, if it wants to
// deliver it asynchronously. // deliver it asynchronously.
webrtc::EncodedImage encoded_image_; webrtc::EncodedImage encoded_image_;
// If non-empty, this is the data for the encoded image.
std::vector<uint8_t> encoded_data_;
// Frame id uniquely identifies a frame in a stream. It needs to be unique in // Frame id uniquely identifies a frame in a stream. It needs to be unique in
// a given time window (i.e. technically unique identifier for the lifetime of // a given time window (i.e. technically unique identifier for the lifetime of
// the connection is not needed, but you need to guarantee that remote side // the connection is not needed, but you need to guarantee that remote side

View file

@ -29,10 +29,24 @@ size_t EncodedImage::GetBufferPaddingBytes(VideoCodecType codec_type) {
EncodedImage::EncodedImage() : EncodedImage(nullptr, 0, 0) {} EncodedImage::EncodedImage() : EncodedImage(nullptr, 0, 0) {}
EncodedImage::EncodedImage(EncodedImage&&) = default;
EncodedImage::EncodedImage(const EncodedImage&) = default; EncodedImage::EncodedImage(const EncodedImage&) = default;
EncodedImage::EncodedImage(uint8_t* buffer, size_t size, size_t capacity) EncodedImage::EncodedImage(uint8_t* buffer, size_t size, size_t capacity)
: buffer_(buffer), size_(size), capacity_(capacity) {} : size_(size), buffer_(buffer), capacity_(capacity) {}
EncodedImage::~EncodedImage() = default;
EncodedImage& EncodedImage::operator=(EncodedImage&&) = default;
EncodedImage& EncodedImage::operator=(const EncodedImage&) = default;
void EncodedImage::Retain() {
if (buffer_) {
encoded_data_ = std::vector<uint8_t>(size_);
memcpy(encoded_data_.data(), buffer_, size_);
buffer_ = nullptr;
}
}
void EncodedImage::SetEncodeTime(int64_t encode_start_ms, void EncodedImage::SetEncodeTime(int64_t encode_start_ms,
int64_t encode_finish_ms) { int64_t encode_finish_ms) {

View file

@ -12,6 +12,7 @@
#define API_VIDEO_ENCODED_IMAGE_H_ #define API_VIDEO_ENCODED_IMAGE_H_
#include <stdint.h> #include <stdint.h>
#include <vector>
#include "absl/types/optional.h" #include "absl/types/optional.h"
#include "api/video/color_space.h" #include "api/video/color_space.h"
@ -37,9 +38,17 @@ class RTC_EXPORT EncodedImage {
static size_t GetBufferPaddingBytes(VideoCodecType codec_type); static size_t GetBufferPaddingBytes(VideoCodecType codec_type);
EncodedImage(); EncodedImage();
EncodedImage(EncodedImage&&);
// Discouraged: potentially expensive.
EncodedImage(const EncodedImage&); EncodedImage(const EncodedImage&);
EncodedImage(uint8_t* buffer, size_t length, size_t capacity); EncodedImage(uint8_t* buffer, size_t length, size_t capacity);
~EncodedImage();
EncodedImage& operator=(EncodedImage&&);
// Discouraged: potentially expensive.
EncodedImage& operator=(const EncodedImage&);
// TODO(nisse): Change style to timestamp(), set_timestamp(), for consistency // TODO(nisse): Change style to timestamp(), set_timestamp(), for consistency
// with the VideoFrame class. // with the VideoFrame class.
// Set frame timestamp (90kHz). // Set frame timestamp (90kHz).
@ -68,19 +77,38 @@ class RTC_EXPORT EncodedImage {
size_t size() const { return size_; } size_t size() const { return size_; }
void set_size(size_t new_size) { void set_size(size_t new_size) {
RTC_DCHECK_LE(new_size, capacity_); RTC_DCHECK_LE(new_size, capacity());
size_ = new_size; size_ = new_size;
} }
size_t capacity() const { return capacity_; } size_t capacity() const { return buffer_ ? capacity_ : encoded_data_.size(); }
void set_buffer(uint8_t* buffer, size_t capacity) { void set_buffer(uint8_t* buffer, size_t capacity) {
buffer_ = buffer; buffer_ = buffer;
capacity_ = capacity; capacity_ = capacity;
} }
// TODO(bugs.webrtc.org/9378): When changed to owning the buffer, data() on a void Allocate(size_t capacity) {
// const object should return a const uint8_t*. encoded_data_.resize(capacity);
uint8_t* data() const { return buffer_; } buffer_ = nullptr;
}
uint8_t* data() { return buffer_ ? buffer_ : encoded_data_.data(); }
const uint8_t* data() const {
return buffer_ ? buffer_ : encoded_data_.data();
}
// TODO(nisse): At some places, code accepts a const ref EncodedImage, but
// still writes to it, to clear padding at the end of the encoded data.
// Padding is required by ffmpeg; the best way to deal with that is likely to
// make this class ensure that buffers always have a few zero padding bytes.
uint8_t* mutable_data() const { return const_cast<uint8_t*>(data()); }
// TODO(bugs.webrtc.org/9378): Delete. Used by code that wants to modify a
// buffer corresponding to a const EncodedImage. Requires an un-owned buffer.
uint8_t* buffer() const { return buffer_; }
// Hack to workaround lack of ownership of the encoded data. If we don't
// already own the underlying data, make an owned copy.
void Retain();
uint32_t _encodedWidth = 0; uint32_t _encodedWidth = 0;
uint32_t _encodedHeight = 0; uint32_t _encodedHeight = 0;
@ -111,11 +139,14 @@ class RTC_EXPORT EncodedImage {
} timing_; } timing_;
private: private:
// TODO(bugs.webrtc.org/9378): Fix ownership. Currently not owning the data // TODO(bugs.webrtc.org/9378): We're transitioning to always owning the
// buffer. // encoded data.
uint8_t* buffer_; std::vector<uint8_t> encoded_data_;
size_t size_; // Size of encoded frame data. size_t size_; // Size of encoded frame data.
size_t capacity_; // Allocated size of _buffer. // Non-null when used with an un-owned buffer.
uint8_t* buffer_;
// Allocated size of _buffer; relevant only if it's non-null.
size_t capacity_;
uint32_t timestamp_rtp_ = 0; uint32_t timestamp_rtp_ = 0;
absl::optional<int> spatial_index_; absl::optional<int> spatial_index_;
absl::optional<webrtc::ColorSpace> color_space_; absl::optional<webrtc::ColorSpace> color_space_;

View file

@ -259,12 +259,12 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
// "If the first 23 bits of the additional bytes are not 0, then damaged MPEG // "If the first 23 bits of the additional bytes are not 0, then damaged MPEG
// bitstreams could cause overread and segfault." See // bitstreams could cause overread and segfault." See
// AV_INPUT_BUFFER_PADDING_SIZE. We'll zero the entire padding just in case. // AV_INPUT_BUFFER_PADDING_SIZE. We'll zero the entire padding just in case.
memset(input_image.data() + input_image.size(), 0, memset(input_image.mutable_data() + input_image.size(), 0,
EncodedImage::GetBufferPaddingBytes(kVideoCodecH264)); EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
AVPacket packet; AVPacket packet;
av_init_packet(&packet); av_init_packet(&packet);
packet.data = input_image.data(); packet.data = input_image.mutable_data();
if (input_image.size() > if (input_image.size() >
static_cast<size_t>(std::numeric_limits<int>::max())) { static_cast<size_t>(std::numeric_limits<int>::max())) {
ReportError(); ReportError();

View file

@ -221,7 +221,7 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
for (size_t i = 0; i < images.size(); i++) { for (size_t i = 0; i < images.size(); i++) {
PackBitstream(combined_image.data() + frame_headers[i].bitstream_offset, PackBitstream(combined_image.data() + frame_headers[i].bitstream_offset,
images[i]); images[i]);
delete[] images[i].encoded_image.data(); delete[] images[i].encoded_image.buffer();
} }
return combined_image; return combined_image;
@ -263,7 +263,7 @@ MultiplexImage MultiplexEncodedImagePacker::Unpack(
encoded_image.SetTimestamp(combined_image.Timestamp()); encoded_image.SetTimestamp(combined_image.Timestamp());
encoded_image._frameType = frame_headers[i].frame_type; encoded_image._frameType = frame_headers[i].frame_type;
encoded_image.set_buffer( encoded_image.set_buffer(
combined_image.data() + frame_headers[i].bitstream_offset, combined_image.mutable_data() + frame_headers[i].bitstream_offset,
static_cast<size_t>(frame_headers[i].bitstream_length)); static_cast<size_t>(frame_headers[i].bitstream_length));
const size_t padding = const size_t padding =
EncodedImage::GetBufferPaddingBytes(image_component.codec_type); EncodedImage::GetBufferPaddingBytes(image_component.codec_type);

View file

@ -253,8 +253,8 @@ int MultiplexEncoderAdapter::Release() {
} }
} }
stashed_images_.clear(); stashed_images_.clear();
if (combined_image_.data()) { if (combined_image_.buffer()) {
delete[] combined_image_.data(); delete[] combined_image_.buffer();
combined_image_.set_buffer(nullptr, 0); combined_image_.set_buffer(nullptr, 0);
} }
return WEBRTC_VIDEO_CODEC_OK; return WEBRTC_VIDEO_CODEC_OK;
@ -302,8 +302,8 @@ EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
// We have to send out those stashed frames, otherwise the delta frame // We have to send out those stashed frames, otherwise the delta frame
// dependency chain is broken. // dependency chain is broken.
if (combined_image_.data()) if (combined_image_.buffer())
delete[] combined_image_.data(); delete[] combined_image_.buffer();
combined_image_ = combined_image_ =
MultiplexEncodedImagePacker::PackAndRelease(iter->second); MultiplexEncodedImagePacker::PackAndRelease(iter->second);

View file

@ -587,9 +587,9 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
copied_image.set_size(payload_size_bytes); copied_image.set_size(payload_size_bytes);
// Replace previous EncodedImage for this spatial layer. // Replace previous EncodedImage for this spatial layer.
uint8_t* old_data = merged_encoded_frames_.at(spatial_idx).data(); uint8_t* old_buffer = merged_encoded_frames_.at(spatial_idx).buffer();
if (old_data) { if (old_buffer) {
delete[] old_data; delete[] old_buffer;
} }
merged_encoded_frames_.at(spatial_idx) = copied_image; merged_encoded_frames_.at(spatial_idx) = copied_image;

View file

@ -888,7 +888,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
encoded_images_[encoder_idx].capacity()) { encoded_images_[encoder_idx].capacity()) {
uint8_t* buffer = new uint8_t[pkt->data.frame.sz + length]; uint8_t* buffer = new uint8_t[pkt->data.frame.sz + length];
memcpy(buffer, encoded_images_[encoder_idx].data(), length); memcpy(buffer, encoded_images_[encoder_idx].data(), length);
delete[] encoded_images_[encoder_idx].data(); delete[] encoded_images_[encoder_idx].buffer();
encoded_images_[encoder_idx].set_buffer( encoded_images_[encoder_idx].set_buffer(
buffer, pkt->data.frame.sz + length); buffer, pkt->data.frame.sz + length);
} }

View file

@ -185,8 +185,8 @@ VP9EncoderImpl::~VP9EncoderImpl() {
int VP9EncoderImpl::Release() { int VP9EncoderImpl::Release() {
int ret_val = WEBRTC_VIDEO_CODEC_OK; int ret_val = WEBRTC_VIDEO_CODEC_OK;
if (encoded_image_.data() != nullptr) { if (encoded_image_.buffer() != nullptr) {
delete[] encoded_image_.data(); delete[] encoded_image_.buffer();
encoded_image_.set_buffer(nullptr, 0); encoded_image_.set_buffer(nullptr, 0);
} }
if (encoder_ != nullptr) { if (encoder_ != nullptr) {
@ -1266,7 +1266,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
} }
if (pkt->data.frame.sz > encoded_image_.capacity()) { if (pkt->data.frame.sz > encoded_image_.capacity()) {
delete[] encoded_image_.data(); delete[] encoded_image_.buffer();
encoded_image_.set_buffer(new uint8_t[pkt->data.frame.sz], encoded_image_.set_buffer(new uint8_t[pkt->data.frame.sz],
pkt->data.frame.sz); pkt->data.frame.sz);
} }

View file

@ -31,15 +31,7 @@ VCMEncodedFrame::VCMEncodedFrame()
} }
VCMEncodedFrame::~VCMEncodedFrame() { VCMEncodedFrame::~VCMEncodedFrame() {
Free();
}
void VCMEncodedFrame::Free() {
Reset(); Reset();
if (data() != nullptr) {
delete[] data();
set_buffer(nullptr, 0);
}
} }
void VCMEncodedFrame::Reset() { void VCMEncodedFrame::Reset() {
@ -156,15 +148,10 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize) { void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize) {
size_t old_capacity = capacity(); size_t old_capacity = capacity();
if (minimumSize > old_capacity) { if (minimumSize > old_capacity) {
// create buffer of sufficient size // TODO(nisse): EncodedImage::Allocate is implemented as
uint8_t* old_data = data(); // std::vector::resize, which means that old contents is kept. Find out if
// any code depends on that behavior.
set_buffer(new uint8_t[minimumSize], minimumSize); Allocate(minimumSize);
if (old_data) {
// copy old data
memcpy(data(), old_data, old_capacity);
delete[] old_data;
}
} }
} }

View file

@ -27,10 +27,6 @@ class VCMEncodedFrame : protected EncodedImage {
VCMEncodedFrame(const VCMEncodedFrame&) = delete; VCMEncodedFrame(const VCMEncodedFrame&) = delete;
~VCMEncodedFrame(); ~VCMEncodedFrame();
/**
* Delete VideoFrame and resets members to zero
*/
void Free();
/** /**
* Set render time in milliseconds * Set render time in milliseconds
*/ */

View file

@ -170,15 +170,11 @@ void RtpFrameObject::AllocateBitstreamBuffer(size_t frame_size) {
// Since FFmpeg use an optimized bitstream reader that reads in chunks of // Since FFmpeg use an optimized bitstream reader that reads in chunks of
// 32/64 bits we have to add at least that much padding to the buffer // 32/64 bits we have to add at least that much padding to the buffer
// to make sure the decoder doesn't read out of bounds. // to make sure the decoder doesn't read out of bounds.
// NOTE! EncodedImage::_size is the size of the buffer (think capacity of
// an std::vector) and EncodedImage::_length is the actual size of
// the bitstream (think size of an std::vector).
size_t new_size = frame_size + (codec_type_ == kVideoCodecH264 size_t new_size = frame_size + (codec_type_ == kVideoCodecH264
? EncodedImage::kBufferPaddingBytesH264 ? EncodedImage::kBufferPaddingBytesH264
: 0); : 0);
if (capacity() < new_size) { if (capacity() < new_size) {
delete[] data(); Allocate(new_size);
set_buffer(new uint8_t[new_size], new_size);
} }
set_size(frame_size); set_size(frame_size);

View file

@ -82,7 +82,7 @@ class SimulcastTestFixtureImpl::TestEncodedImageCallback
// Only store the base layer. // Only store the base layer.
if (encoded_image.SpatialIndex().value_or(0) == 0) { if (encoded_image.SpatialIndex().value_or(0) == 0) {
if (encoded_image._frameType == kVideoFrameKey) { if (encoded_image._frameType == kVideoFrameKey) {
delete[] encoded_key_frame_.data(); delete[] encoded_key_frame_.buffer();
encoded_key_frame_.set_buffer(new uint8_t[encoded_image.capacity()], encoded_key_frame_.set_buffer(new uint8_t[encoded_image.capacity()],
encoded_image.capacity()); encoded_image.capacity());
encoded_key_frame_.set_size(encoded_image.size()); encoded_key_frame_.set_size(encoded_image.size());
@ -91,7 +91,7 @@ class SimulcastTestFixtureImpl::TestEncodedImageCallback
memcpy(encoded_key_frame_.data(), encoded_image.data(), memcpy(encoded_key_frame_.data(), encoded_image.data(),
encoded_image.size()); encoded_image.size());
} else { } else {
delete[] encoded_frame_.data(); delete[] encoded_frame_.buffer();
encoded_frame_.set_buffer(new uint8_t[encoded_image.capacity()], encoded_frame_.set_buffer(new uint8_t[encoded_image.capacity()],
encoded_image.capacity()); encoded_image.capacity());
encoded_frame_.set_size(encoded_image.size()); encoded_frame_.set_size(encoded_image.size());
@ -905,7 +905,7 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], false, NULL, 0)); EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], false, NULL, 0));
for (int i = 0; i < 3; ++i) { for (int i = 0; i < 3; ++i) {
delete[] encoded_frame[i].data(); delete[] encoded_frame[i].buffer();
} }
} }

View file

@ -313,23 +313,26 @@ EncodedImageCallback::Result FakeH264Encoder::OnEncodedImage(
const size_t kSpsNalHeader = 0x67; const size_t kSpsNalHeader = 0x67;
const size_t kPpsNalHeader = 0x68; const size_t kPpsNalHeader = 0x68;
const size_t kIdrNalHeader = 0x65; const size_t kIdrNalHeader = 0x65;
encoded_image.data()[fragmentation.fragmentationOffset[0]] = kSpsNalHeader; encoded_image.buffer()[fragmentation.fragmentationOffset[0]] =
encoded_image.data()[fragmentation.fragmentationOffset[1]] = kPpsNalHeader; kSpsNalHeader;
encoded_image.data()[fragmentation.fragmentationOffset[2]] = kIdrNalHeader; encoded_image.buffer()[fragmentation.fragmentationOffset[1]] =
kPpsNalHeader;
encoded_image.buffer()[fragmentation.fragmentationOffset[2]] =
kIdrNalHeader;
} else { } else {
const size_t kNumSlices = 1; const size_t kNumSlices = 1;
fragmentation.VerifyAndAllocateFragmentationHeader(kNumSlices); fragmentation.VerifyAndAllocateFragmentationHeader(kNumSlices);
fragmentation.fragmentationOffset[0] = 0; fragmentation.fragmentationOffset[0] = 0;
fragmentation.fragmentationLength[0] = encoded_image.size(); fragmentation.fragmentationLength[0] = encoded_image.size();
const size_t kNalHeader = 0x41; const size_t kNalHeader = 0x41;
encoded_image.data()[fragmentation.fragmentationOffset[0]] = kNalHeader; encoded_image.buffer()[fragmentation.fragmentationOffset[0]] = kNalHeader;
} }
uint8_t value = 0; uint8_t value = 0;
int fragment_counter = 0; int fragment_counter = 0;
for (size_t i = 0; i < encoded_image.size(); ++i) { for (size_t i = 0; i < encoded_image.size(); ++i) {
if (fragment_counter == fragmentation.fragmentationVectorSize || if (fragment_counter == fragmentation.fragmentationVectorSize ||
i != fragmentation.fragmentationOffset[fragment_counter]) { i != fragmentation.fragmentationOffset[fragment_counter]) {
encoded_image.data()[i] = value++; encoded_image.buffer()[i] = value++;
} else { } else {
++fragment_counter; ++fragment_counter;
} }

View file

@ -127,7 +127,7 @@ EncodedImageCallback::Result FakeVP8Encoder::OnEncodedImage(
// Write width and height to the payload the same way as the real encoder // Write width and height to the payload the same way as the real encoder
// does. // does.
WriteFakeVp8(encoded_image.data(), encoded_image._encodedWidth, WriteFakeVp8(encoded_image.buffer(), encoded_image._encodedWidth,
encoded_image._encodedHeight, encoded_image._encodedHeight,
encoded_image._frameType == kVideoFrameKey); encoded_image._frameType == kVideoFrameKey);
return callback_->OnEncodedImage(encoded_image, &overrided_specific_info, return callback_->OnEncodedImage(encoded_image, &overrided_specific_info,

View file

@ -124,16 +124,20 @@ class EncodedFrameForMediaTransport : public video_coding::EncodedFrame {
public: public:
explicit EncodedFrameForMediaTransport( explicit EncodedFrameForMediaTransport(
MediaTransportEncodedVideoFrame frame) { MediaTransportEncodedVideoFrame frame) {
// TODO(nisse): This is too ugly. We copy the EncodedImage (a base class of // TODO(nisse): This is ugly. We copy the EncodedImage (a base class of
// ours, in several steps), to get all the meta data. But we then need to // ours, in several steps), to get all the meta data. We should be using
// reset the buffer and allocate a new copy, since EncodedFrame must own it. // std::move in some way. Then we also need to handle the case of an unowned
// buffer, in which case we need to make an owned copy.
*static_cast<class EncodedImage*>(this) = frame.encoded_image(); *static_cast<class EncodedImage*>(this) = frame.encoded_image();
// Don't use the copied _buffer pointer.
set_buffer(nullptr, 0);
VerifyAndAllocate(frame.encoded_image().size()); if (buffer()) {
set_size(frame.encoded_image().size()); // Unowned data. Make a copy we own.
memcpy(data(), frame.encoded_image().data(), size()); set_buffer(nullptr, 0);
VerifyAndAllocate(frame.encoded_image().size());
set_size(frame.encoded_image().size());
memcpy(data(), frame.encoded_image().data(), size());
}
_payloadType = static_cast<uint8_t>(frame.payload_type()); _payloadType = static_cast<uint8_t>(frame.payload_type());