mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-13 13:50:40 +01:00
Rename EncodedImage::_buffer --> buffer_, and make private
Bug: webrtc:9378 Change-Id: I0a0636077b270a7c73bafafb958132fa648aca70 Reviewed-on: https://webrtc-review.googlesource.com/c/117722 Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Commit-Queue: Niels Moller <nisse@webrtc.org> Cr-Commit-Position: refs/heads/master@{#26294}
This commit is contained in:
parent
83ed89a45f
commit
24871e4cbe
24 changed files with 91 additions and 88 deletions
|
@ -95,7 +95,7 @@ MediaTransportEncodedVideoFrame& MediaTransportEncodedVideoFrame::operator=(
|
|||
referenced_frame_ids_ = o.referenced_frame_ids_;
|
||||
if (!encoded_data_.empty()) {
|
||||
// We own the underlying data.
|
||||
encoded_image_._buffer = encoded_data_.data();
|
||||
encoded_image_.set_buffer(encoded_data_.data(), encoded_data_.size());
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
@ -110,8 +110,8 @@ MediaTransportEncodedVideoFrame& MediaTransportEncodedVideoFrame::operator=(
|
|||
referenced_frame_ids_ = std::move(o.referenced_frame_ids_);
|
||||
if (!encoded_data_.empty()) {
|
||||
// We take over ownership of the underlying data.
|
||||
encoded_image_._buffer = encoded_data_.data();
|
||||
o.encoded_image_._buffer = nullptr;
|
||||
encoded_image_.set_buffer(encoded_data_.data(), encoded_data_.size());
|
||||
o.encoded_image_.set_buffer(nullptr, 0);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ EncodedImage::EncodedImage() : EncodedImage(nullptr, 0, 0) {}
|
|||
EncodedImage::EncodedImage(const EncodedImage&) = default;
|
||||
|
||||
EncodedImage::EncodedImage(uint8_t* buffer, size_t size, size_t capacity)
|
||||
: _buffer(buffer), size_(size), capacity_(capacity) {}
|
||||
: buffer_(buffer), size_(size), capacity_(capacity) {}
|
||||
|
||||
void EncodedImage::SetEncodeTime(int64_t encode_start_ms,
|
||||
int64_t encode_finish_ms) {
|
||||
|
|
|
@ -75,12 +75,13 @@ class RTC_EXPORT EncodedImage {
|
|||
size_t capacity() const { return capacity_; }
|
||||
|
||||
void set_buffer(uint8_t* buffer, size_t capacity) {
|
||||
_buffer = buffer;
|
||||
buffer_ = buffer;
|
||||
capacity_ = capacity;
|
||||
}
|
||||
|
||||
uint8_t* data() { return _buffer; }
|
||||
const uint8_t* data() const { return _buffer; }
|
||||
// TODO(bugs.webrtc.org/9378): When changed to owning the buffer, data() on a
|
||||
// const object should return a const uint8_t*.
|
||||
uint8_t* data() const { return buffer_; }
|
||||
|
||||
uint32_t _encodedWidth = 0;
|
||||
uint32_t _encodedHeight = 0;
|
||||
|
@ -88,7 +89,6 @@ class RTC_EXPORT EncodedImage {
|
|||
int64_t ntp_time_ms_ = 0;
|
||||
int64_t capture_time_ms_ = 0;
|
||||
FrameType _frameType = kVideoFrameDelta;
|
||||
uint8_t* _buffer;
|
||||
VideoRotation rotation_ = kVideoRotation_0;
|
||||
VideoContentType content_type_ = VideoContentType::UNSPECIFIED;
|
||||
bool _completeFrame = false;
|
||||
|
@ -112,6 +112,9 @@ class RTC_EXPORT EncodedImage {
|
|||
} timing_;
|
||||
|
||||
private:
|
||||
// TODO(bugs.webrtc.org/9378): Fix ownership. Currently not owning the data
|
||||
// buffer.
|
||||
uint8_t* buffer_;
|
||||
size_t size_; // Size of encoded frame data.
|
||||
size_t capacity_; // Allocated size of _buffer.
|
||||
uint32_t timestamp_rtp_ = 0;
|
||||
|
|
|
@ -259,12 +259,12 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
|||
// "If the first 23 bits of the additional bytes are not 0, then damaged MPEG
|
||||
// bitstreams could cause overread and segfault." See
|
||||
// AV_INPUT_BUFFER_PADDING_SIZE. We'll zero the entire padding just in case.
|
||||
memset(input_image._buffer + input_image.size(), 0,
|
||||
memset(input_image.data() + input_image.size(), 0,
|
||||
EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
|
||||
|
||||
AVPacket packet;
|
||||
av_init_packet(&packet);
|
||||
packet.data = input_image._buffer;
|
||||
packet.data = input_image.data();
|
||||
if (input_image.size() >
|
||||
static_cast<size_t>(std::numeric_limits<int>::max())) {
|
||||
ReportError();
|
||||
|
|
|
@ -127,7 +127,7 @@ static void RtpFragmentize(EncodedImage* encoded_image,
|
|||
new_capacity = required_capacity;
|
||||
}
|
||||
encoded_image->set_buffer(new uint8_t[new_capacity], new_capacity);
|
||||
encoded_image_buffer->reset(encoded_image->_buffer);
|
||||
encoded_image_buffer->reset(encoded_image->data());
|
||||
}
|
||||
|
||||
// Iterate layers and NAL units, note each NAL unit as a fragment and copy
|
||||
|
@ -304,7 +304,7 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst,
|
|||
CalcBufferSize(VideoType::kI420, codec_.simulcastStream[idx].width,
|
||||
codec_.simulcastStream[idx].height);
|
||||
encoded_images_[i].set_buffer(new uint8_t[new_capacity], new_capacity);
|
||||
encoded_image_buffers_[i].reset(encoded_images_[i]._buffer);
|
||||
encoded_image_buffers_[i].reset(encoded_images_[i].data());
|
||||
encoded_images_[i]._completeFrame = true;
|
||||
encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width;
|
||||
encoded_images_[i]._encodedHeight = codec_.simulcastStream[idx].height;
|
||||
|
|
|
@ -41,7 +41,7 @@ int PackHeader(uint8_t* buffer, MultiplexImageHeader header) {
|
|||
return offset;
|
||||
}
|
||||
|
||||
MultiplexImageHeader UnpackHeader(uint8_t* buffer) {
|
||||
MultiplexImageHeader UnpackHeader(const uint8_t* buffer) {
|
||||
MultiplexImageHeader header;
|
||||
int offset = 0;
|
||||
header.component_count = ByteReader<uint8_t>::ReadBigEndian(buffer + offset);
|
||||
|
@ -95,7 +95,7 @@ int PackFrameHeader(uint8_t* buffer,
|
|||
return offset;
|
||||
}
|
||||
|
||||
MultiplexImageComponentHeader UnpackFrameHeader(uint8_t* buffer) {
|
||||
MultiplexImageComponentHeader UnpackFrameHeader(const uint8_t* buffer) {
|
||||
MultiplexImageComponentHeader frame_header;
|
||||
int offset = 0;
|
||||
|
||||
|
@ -192,14 +192,14 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
|
|||
combined_image.set_size(bitstream_offset);
|
||||
|
||||
// header
|
||||
header_offset = PackHeader(combined_image._buffer, header);
|
||||
header_offset = PackHeader(combined_image.data(), header);
|
||||
RTC_DCHECK_EQ(header.first_component_header_offset,
|
||||
kMultiplexImageHeaderSize);
|
||||
|
||||
// Frame Header
|
||||
for (size_t i = 0; i < images.size(); i++) {
|
||||
int relative_offset = PackFrameHeader(
|
||||
combined_image._buffer + header_offset, frame_headers[i]);
|
||||
int relative_offset = PackFrameHeader(combined_image.data() + header_offset,
|
||||
frame_headers[i]);
|
||||
RTC_DCHECK_EQ(relative_offset, kMultiplexImageComponentHeaderSize);
|
||||
|
||||
header_offset = frame_headers[i].next_component_header_offset;
|
||||
|
@ -212,16 +212,16 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
|
|||
|
||||
// Augmenting Data
|
||||
if (multiplex_image.augmenting_data_size != 0) {
|
||||
memcpy(combined_image._buffer + header.augmenting_data_offset,
|
||||
memcpy(combined_image.data() + header.augmenting_data_offset,
|
||||
multiplex_image.augmenting_data.get(),
|
||||
multiplex_image.augmenting_data_size);
|
||||
}
|
||||
|
||||
// Bitstreams
|
||||
for (size_t i = 0; i < images.size(); i++) {
|
||||
PackBitstream(combined_image._buffer + frame_headers[i].bitstream_offset,
|
||||
PackBitstream(combined_image.data() + frame_headers[i].bitstream_offset,
|
||||
images[i]);
|
||||
delete[] images[i].encoded_image._buffer;
|
||||
delete[] images[i].encoded_image.data();
|
||||
}
|
||||
|
||||
return combined_image;
|
||||
|
@ -229,14 +229,14 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
|
|||
|
||||
MultiplexImage MultiplexEncodedImagePacker::Unpack(
|
||||
const EncodedImage& combined_image) {
|
||||
const MultiplexImageHeader& header = UnpackHeader(combined_image._buffer);
|
||||
const MultiplexImageHeader& header = UnpackHeader(combined_image.data());
|
||||
|
||||
std::vector<MultiplexImageComponentHeader> frame_headers;
|
||||
int header_offset = header.first_component_header_offset;
|
||||
|
||||
while (header_offset > 0) {
|
||||
frame_headers.push_back(
|
||||
UnpackFrameHeader(combined_image._buffer + header_offset));
|
||||
UnpackFrameHeader(combined_image.data() + header_offset));
|
||||
header_offset = frame_headers.back().next_component_header_offset;
|
||||
}
|
||||
|
||||
|
@ -246,7 +246,7 @@ MultiplexImage MultiplexEncodedImagePacker::Unpack(
|
|||
augmenting_data =
|
||||
std::unique_ptr<uint8_t[]>(new uint8_t[header.augmenting_data_size]);
|
||||
memcpy(augmenting_data.get(),
|
||||
combined_image._buffer + header.augmenting_data_offset,
|
||||
combined_image.data() + header.augmenting_data_offset,
|
||||
header.augmenting_data_size);
|
||||
}
|
||||
|
||||
|
@ -263,7 +263,7 @@ MultiplexImage MultiplexEncodedImagePacker::Unpack(
|
|||
encoded_image.SetTimestamp(combined_image.Timestamp());
|
||||
encoded_image._frameType = frame_headers[i].frame_type;
|
||||
encoded_image.set_buffer(
|
||||
combined_image._buffer + frame_headers[i].bitstream_offset,
|
||||
combined_image.data() + frame_headers[i].bitstream_offset,
|
||||
static_cast<size_t>(frame_headers[i].bitstream_length));
|
||||
const size_t padding =
|
||||
EncodedImage::GetBufferPaddingBytes(image_component.codec_type);
|
||||
|
|
|
@ -249,13 +249,13 @@ int MultiplexEncoderAdapter::Release() {
|
|||
rtc::CritScope cs(&crit_);
|
||||
for (auto& stashed_image : stashed_images_) {
|
||||
for (auto& image_component : stashed_image.second.image_components) {
|
||||
delete[] image_component.encoded_image._buffer;
|
||||
delete[] image_component.encoded_image.data();
|
||||
}
|
||||
}
|
||||
stashed_images_.clear();
|
||||
if (combined_image_._buffer) {
|
||||
delete[] combined_image_._buffer;
|
||||
combined_image_._buffer = nullptr;
|
||||
if (combined_image_.data()) {
|
||||
delete[] combined_image_.data();
|
||||
combined_image_.set_buffer(nullptr, 0);
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
@ -302,8 +302,8 @@ EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
|
|||
|
||||
// We have to send out those stashed frames, otherwise the delta frame
|
||||
// dependency chain is broken.
|
||||
if (combined_image_._buffer)
|
||||
delete[] combined_image_._buffer;
|
||||
if (combined_image_.data())
|
||||
delete[] combined_image_.data();
|
||||
combined_image_ =
|
||||
MultiplexEncodedImagePacker::PackAndRelease(iter->second);
|
||||
|
||||
|
|
|
@ -278,7 +278,7 @@ TEST_P(TestMultiplexAdapter, CheckSingleFrameEncodedBitstream) {
|
|||
EXPECT_EQ(1, unpacked_frame.component_count);
|
||||
const MultiplexImageComponent& component = unpacked_frame.image_components[0];
|
||||
EXPECT_EQ(0, component.component_index);
|
||||
EXPECT_NE(nullptr, component.encoded_image._buffer);
|
||||
EXPECT_NE(nullptr, component.encoded_image.data());
|
||||
EXPECT_EQ(kVideoFrameKey, component.encoded_image._frameType);
|
||||
}
|
||||
|
||||
|
@ -302,7 +302,7 @@ TEST_P(TestMultiplexAdapter, CheckDoubleFramesEncodedBitstream) {
|
|||
const MultiplexImageComponent& component =
|
||||
unpacked_frame.image_components[i];
|
||||
EXPECT_EQ(i, component.component_index);
|
||||
EXPECT_NE(nullptr, component.encoded_image._buffer);
|
||||
EXPECT_NE(nullptr, component.encoded_image.data());
|
||||
EXPECT_EQ(kVideoFrameKey, component.encoded_image._frameType);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -323,7 +323,7 @@ void VideoCodecTestFixtureImpl::H264KeyframeChecker::CheckEncodedFrame(
|
|||
webrtc::H264::FindNaluIndices(encoded_frame.data(), encoded_frame.size());
|
||||
for (const webrtc::H264::NaluIndex& index : nalu_indices) {
|
||||
webrtc::H264::NaluType nalu_type = webrtc::H264::ParseNaluType(
|
||||
encoded_frame._buffer[index.payload_start_offset]);
|
||||
encoded_frame.data()[index.payload_start_offset]);
|
||||
if (nalu_type == webrtc::H264::NaluType::kSps) {
|
||||
contains_sps = true;
|
||||
} else if (nalu_type == webrtc::H264::NaluType::kPps) {
|
||||
|
|
|
@ -249,9 +249,9 @@ VideoProcessor::~VideoProcessor() {
|
|||
|
||||
// Deal with manual memory management of EncodedImage's.
|
||||
for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
|
||||
uint8_t* buffer = merged_encoded_frames_.at(i)._buffer;
|
||||
if (buffer) {
|
||||
delete[] buffer;
|
||||
uint8_t* data = merged_encoded_frames_.at(i).data();
|
||||
if (data) {
|
||||
delete[] data;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -575,7 +575,7 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
|
|||
RTC_CHECK(copied_buffer);
|
||||
|
||||
if (base_image.size()) {
|
||||
RTC_CHECK(base_image._buffer);
|
||||
RTC_CHECK(base_image.data());
|
||||
memcpy(copied_buffer, base_image.data(), base_image.size());
|
||||
}
|
||||
memcpy(copied_buffer + base_image.size(), encoded_image.data(),
|
||||
|
|
|
@ -109,10 +109,10 @@ class VideoProcessor {
|
|||
const webrtc::EncodedImage& encoded_image,
|
||||
const webrtc::CodecSpecificInfo* codec_specific_info)
|
||||
: video_processor_(video_processor),
|
||||
buffer_(encoded_image._buffer, encoded_image.size()),
|
||||
buffer_(encoded_image.data(), encoded_image.size()),
|
||||
encoded_image_(encoded_image),
|
||||
codec_specific_info_(*codec_specific_info) {
|
||||
encoded_image_._buffer = buffer_.data();
|
||||
encoded_image_.set_buffer(buffer_.data(), buffer_.size());
|
||||
}
|
||||
|
||||
bool Run() override {
|
||||
|
|
|
@ -248,7 +248,7 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
|||
iter = NULL;
|
||||
}
|
||||
|
||||
uint8_t* buffer = input_image._buffer;
|
||||
const uint8_t* buffer = input_image.data();
|
||||
if (input_image.size() == 0) {
|
||||
buffer = NULL; // Triggers full frame concealment.
|
||||
}
|
||||
|
|
|
@ -191,7 +191,7 @@ int LibvpxVp8Encoder::Release() {
|
|||
|
||||
while (!encoded_images_.empty()) {
|
||||
EncodedImage& image = encoded_images_.back();
|
||||
delete[] image._buffer;
|
||||
delete[] image.data();
|
||||
encoded_images_.pop_back();
|
||||
}
|
||||
while (!encoders_.empty()) {
|
||||
|
@ -382,8 +382,8 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
|
|||
}
|
||||
for (int i = 0; i < number_of_streams; ++i) {
|
||||
// allocate memory for encoded image
|
||||
if (encoded_images_[i]._buffer != NULL) {
|
||||
delete[] encoded_images_[i]._buffer;
|
||||
if (encoded_images_[i].data() != nullptr) {
|
||||
delete[] encoded_images_[i].data();
|
||||
}
|
||||
size_t frame_capacity =
|
||||
CalcBufferSize(VideoType::kI420, codec_.width, codec_.height);
|
||||
|
@ -883,12 +883,12 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
|||
if (pkt->data.frame.sz + length >
|
||||
encoded_images_[encoder_idx].capacity()) {
|
||||
uint8_t* buffer = new uint8_t[pkt->data.frame.sz + length];
|
||||
memcpy(buffer, encoded_images_[encoder_idx]._buffer, length);
|
||||
delete[] encoded_images_[encoder_idx]._buffer;
|
||||
memcpy(buffer, encoded_images_[encoder_idx].data(), length);
|
||||
delete[] encoded_images_[encoder_idx].data();
|
||||
encoded_images_[encoder_idx].set_buffer(
|
||||
buffer, pkt->data.frame.sz + length);
|
||||
}
|
||||
memcpy(&encoded_images_[encoder_idx]._buffer[length],
|
||||
memcpy(&encoded_images_[encoder_idx].data()[length],
|
||||
pkt->data.frame.buf, pkt->data.frame.sz);
|
||||
encoded_images_[encoder_idx].set_size(
|
||||
encoded_images_[encoder_idx].size() + pkt->data.frame.sz);
|
||||
|
|
|
@ -187,9 +187,9 @@ VP9EncoderImpl::~VP9EncoderImpl() {
|
|||
int VP9EncoderImpl::Release() {
|
||||
int ret_val = WEBRTC_VIDEO_CODEC_OK;
|
||||
|
||||
if (encoded_image_._buffer != nullptr) {
|
||||
delete[] encoded_image_._buffer;
|
||||
encoded_image_._buffer = nullptr;
|
||||
if (encoded_image_.data() != nullptr) {
|
||||
delete[] encoded_image_.data();
|
||||
encoded_image_.set_buffer(nullptr, 0);
|
||||
}
|
||||
if (encoder_ != nullptr) {
|
||||
if (inited_) {
|
||||
|
@ -392,8 +392,8 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
|
|||
is_svc_ = (num_spatial_layers_ > 1 || num_temporal_layers_ > 1);
|
||||
|
||||
// Allocate memory for encoded image
|
||||
if (encoded_image_._buffer != nullptr) {
|
||||
delete[] encoded_image_._buffer;
|
||||
if (encoded_image_.data() != nullptr) {
|
||||
delete[] encoded_image_.data();
|
||||
}
|
||||
size_t frame_capacity =
|
||||
CalcBufferSize(VideoType::kI420, codec_.width, codec_.height);
|
||||
|
@ -1258,11 +1258,11 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
|||
}
|
||||
|
||||
if (pkt->data.frame.sz > encoded_image_.capacity()) {
|
||||
delete[] encoded_image_._buffer;
|
||||
delete[] encoded_image_.data();
|
||||
encoded_image_.set_buffer(new uint8_t[pkt->data.frame.sz],
|
||||
pkt->data.frame.sz);
|
||||
}
|
||||
memcpy(encoded_image_._buffer, pkt->data.frame.buf, pkt->data.frame.sz);
|
||||
memcpy(encoded_image_.data(), pkt->data.frame.buf, pkt->data.frame.sz);
|
||||
encoded_image_.set_size(pkt->data.frame.sz);
|
||||
|
||||
const bool is_key_frame =
|
||||
|
@ -1431,7 +1431,7 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
|
|||
}
|
||||
vpx_codec_iter_t iter = nullptr;
|
||||
vpx_image_t* img;
|
||||
uint8_t* buffer = input_image._buffer;
|
||||
const uint8_t* buffer = input_image.data();
|
||||
if (input_image.size() == 0) {
|
||||
buffer = nullptr; // Triggers full frame concealment.
|
||||
}
|
||||
|
|
|
@ -36,9 +36,9 @@ VCMEncodedFrame::~VCMEncodedFrame() {
|
|||
|
||||
void VCMEncodedFrame::Free() {
|
||||
Reset();
|
||||
if (_buffer != NULL) {
|
||||
delete[] _buffer;
|
||||
_buffer = NULL;
|
||||
if (data() != nullptr) {
|
||||
delete[] data();
|
||||
set_buffer(nullptr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,13 +157,13 @@ void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize) {
|
|||
size_t old_capacity = capacity();
|
||||
if (minimumSize > old_capacity) {
|
||||
// create buffer of sufficient size
|
||||
uint8_t* old_buffer = _buffer;
|
||||
uint8_t* old_data = data();
|
||||
|
||||
set_buffer(new uint8_t[minimumSize], minimumSize);
|
||||
if (old_buffer) {
|
||||
if (old_data) {
|
||||
// copy old data
|
||||
memcpy(_buffer, old_buffer, old_capacity);
|
||||
delete[] old_buffer;
|
||||
memcpy(data(), old_data, old_capacity);
|
||||
delete[] old_data;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
|
|||
(packet.insertStartCode ? kH264StartCodeLengthBytes : 0) +
|
||||
EncodedImage::GetBufferPaddingBytes(packet.codec);
|
||||
if (requiredSizeBytes >= capacity()) {
|
||||
const uint8_t* prevBuffer = _buffer;
|
||||
const uint8_t* prevBuffer = data();
|
||||
const uint32_t increments =
|
||||
requiredSizeBytes / kBufferIncStepSizeBytes +
|
||||
(requiredSizeBytes % kBufferIncStepSizeBytes > 0);
|
||||
|
@ -116,7 +116,7 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
|
|||
return kSizeError;
|
||||
}
|
||||
VerifyAndAllocate(newSize);
|
||||
_sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
|
||||
_sessionInfo.UpdateDataPointers(prevBuffer, data());
|
||||
}
|
||||
|
||||
if (packet.width > 0 && packet.height > 0) {
|
||||
|
@ -128,7 +128,7 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
|
|||
if (packet.sizeBytes > 0)
|
||||
CopyCodecSpecific(&packet.video_header);
|
||||
|
||||
int retVal = _sessionInfo.InsertPacket(packet, _buffer, frame_data);
|
||||
int retVal = _sessionInfo.InsertPacket(packet, data(), frame_data);
|
||||
if (retVal == -1) {
|
||||
return kSizeError;
|
||||
} else if (retVal == -2) {
|
||||
|
|
|
@ -55,7 +55,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
|
|||
SetPlayoutDelay(first_packet->video_header.playout_delay);
|
||||
|
||||
AllocateBitstreamBuffer(frame_size);
|
||||
bool bitstream_copied = packet_buffer_->GetBitstream(*this, _buffer);
|
||||
bool bitstream_copied = packet_buffer_->GetBitstream(*this, data());
|
||||
RTC_DCHECK(bitstream_copied);
|
||||
_encodedWidth = first_packet->width;
|
||||
_encodedHeight = first_packet->height;
|
||||
|
@ -179,7 +179,7 @@ void RtpFrameObject::AllocateBitstreamBuffer(size_t frame_size) {
|
|||
? EncodedImage::kBufferPaddingBytesH264
|
||||
: 0);
|
||||
if (capacity() < new_size) {
|
||||
delete[] _buffer;
|
||||
delete[] data();
|
||||
set_buffer(new uint8_t[new_size], new_size);
|
||||
}
|
||||
|
||||
|
|
|
@ -71,8 +71,8 @@ class SimulcastTestFixtureImpl::TestEncodedImageCallback
|
|||
}
|
||||
|
||||
~TestEncodedImageCallback() {
|
||||
delete[] encoded_key_frame_._buffer;
|
||||
delete[] encoded_frame_._buffer;
|
||||
delete[] encoded_key_frame_.data();
|
||||
delete[] encoded_frame_.data();
|
||||
}
|
||||
|
||||
virtual Result OnEncodedImage(const EncodedImage& encoded_image,
|
||||
|
@ -82,20 +82,20 @@ class SimulcastTestFixtureImpl::TestEncodedImageCallback
|
|||
// Only store the base layer.
|
||||
if (encoded_image.SpatialIndex().value_or(0) == 0) {
|
||||
if (encoded_image._frameType == kVideoFrameKey) {
|
||||
delete[] encoded_key_frame_._buffer;
|
||||
delete[] encoded_key_frame_.data();
|
||||
encoded_key_frame_.set_buffer(new uint8_t[encoded_image.capacity()],
|
||||
encoded_image.capacity());
|
||||
encoded_key_frame_.set_size(encoded_image.size());
|
||||
encoded_key_frame_._frameType = kVideoFrameKey;
|
||||
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
|
||||
memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
|
||||
memcpy(encoded_key_frame_.data(), encoded_image.data(),
|
||||
encoded_image.size());
|
||||
} else {
|
||||
delete[] encoded_frame_._buffer;
|
||||
delete[] encoded_frame_.data();
|
||||
encoded_frame_.set_buffer(new uint8_t[encoded_image.capacity()],
|
||||
encoded_image.capacity());
|
||||
encoded_frame_.set_size(encoded_image.size());
|
||||
memcpy(encoded_frame_._buffer, encoded_image._buffer,
|
||||
memcpy(encoded_frame_.data(), encoded_image.data(),
|
||||
encoded_image.size());
|
||||
}
|
||||
}
|
||||
|
@ -861,7 +861,7 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
|
|||
encoded_frame[index].set_size(encoded_image.size());
|
||||
encoded_frame[index]._frameType = encoded_image._frameType;
|
||||
encoded_frame[index]._completeFrame = encoded_image._completeFrame;
|
||||
memcpy(encoded_frame[index]._buffer, encoded_image._buffer,
|
||||
memcpy(encoded_frame[index].data(), encoded_image.data(),
|
||||
encoded_image.size());
|
||||
return EncodedImageCallback::Result(
|
||||
EncodedImageCallback::Result::OK, 0);
|
||||
|
@ -896,7 +896,7 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
|
|||
EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], false, NULL, 0));
|
||||
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
delete [] encoded_frame[i]._buffer;
|
||||
delete[] encoded_frame[i].data();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1075,9 +1075,9 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
|
|||
H264::FindNaluIndices(payload, payload_size);
|
||||
if (nalu_idxs.empty()) {
|
||||
ALOGE << "Start code is not found!";
|
||||
ALOGE << "Data:" << image->_buffer[0] << " " << image->_buffer[1]
|
||||
<< " " << image->_buffer[2] << " " << image->_buffer[3] << " "
|
||||
<< image->_buffer[4] << " " << image->_buffer[5];
|
||||
ALOGE << "Data:" << image->data()[0] << " " << image->data()[1] << " "
|
||||
<< image->data()[2] << " " << image->data()[3] << " "
|
||||
<< image->data()[4] << " " << image->data()[5];
|
||||
ProcessHWError(true /* reset_if_fallback_unavailable */);
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -83,11 +83,11 @@ int32_t FakeH264Decoder::Decode(const EncodedImage& input,
|
|||
for (size_t i = 0; i < input.size(); ++i) {
|
||||
uint8_t kStartCode[] = {0, 0, 0, 1};
|
||||
if (i < input.size() - sizeof(kStartCode) &&
|
||||
!memcmp(&input._buffer[i], kStartCode, sizeof(kStartCode))) {
|
||||
!memcmp(&input.data()[i], kStartCode, sizeof(kStartCode))) {
|
||||
i += sizeof(kStartCode) + 1; // Skip start code and NAL header.
|
||||
}
|
||||
if (input._buffer[i] != value) {
|
||||
RTC_CHECK_EQ(value, input._buffer[i])
|
||||
if (input.data()[i] != value) {
|
||||
RTC_CHECK_EQ(value, input.data()[i])
|
||||
<< "Bitstream mismatch between sender and receiver.";
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -320,23 +320,23 @@ EncodedImageCallback::Result FakeH264Encoder::OnEncodedImage(
|
|||
const size_t kSpsNalHeader = 0x67;
|
||||
const size_t kPpsNalHeader = 0x68;
|
||||
const size_t kIdrNalHeader = 0x65;
|
||||
encoded_image._buffer[fragmentation.fragmentationOffset[0]] = kSpsNalHeader;
|
||||
encoded_image._buffer[fragmentation.fragmentationOffset[1]] = kPpsNalHeader;
|
||||
encoded_image._buffer[fragmentation.fragmentationOffset[2]] = kIdrNalHeader;
|
||||
encoded_image.data()[fragmentation.fragmentationOffset[0]] = kSpsNalHeader;
|
||||
encoded_image.data()[fragmentation.fragmentationOffset[1]] = kPpsNalHeader;
|
||||
encoded_image.data()[fragmentation.fragmentationOffset[2]] = kIdrNalHeader;
|
||||
} else {
|
||||
const size_t kNumSlices = 1;
|
||||
fragmentation.VerifyAndAllocateFragmentationHeader(kNumSlices);
|
||||
fragmentation.fragmentationOffset[0] = 0;
|
||||
fragmentation.fragmentationLength[0] = encoded_image.size();
|
||||
const size_t kNalHeader = 0x41;
|
||||
encoded_image._buffer[fragmentation.fragmentationOffset[0]] = kNalHeader;
|
||||
encoded_image.data()[fragmentation.fragmentationOffset[0]] = kNalHeader;
|
||||
}
|
||||
uint8_t value = 0;
|
||||
int fragment_counter = 0;
|
||||
for (size_t i = 0; i < encoded_image.size(); ++i) {
|
||||
if (fragment_counter == fragmentation.fragmentationVectorSize ||
|
||||
i != fragmentation.fragmentationOffset[fragment_counter]) {
|
||||
encoded_image._buffer[i] = value++;
|
||||
encoded_image.data()[i] = value++;
|
||||
} else {
|
||||
++fragment_counter;
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ int32_t FakeVp8Decoder::Decode(const EncodedImage& input,
|
|||
if (input.size() < kMinPayLoadHeaderLength) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
ParseFakeVp8(input._buffer, &width_, &height_);
|
||||
ParseFakeVp8(input.data(), &width_, &height_);
|
||||
|
||||
VideoFrame frame =
|
||||
VideoFrame::Builder()
|
||||
|
|
|
@ -127,7 +127,7 @@ EncodedImageCallback::Result FakeVP8Encoder::OnEncodedImage(
|
|||
|
||||
// Write width and height to the payload the same way as the real encoder
|
||||
// does.
|
||||
WriteFakeVp8(encoded_image._buffer, encoded_image._encodedWidth,
|
||||
WriteFakeVp8(encoded_image.data(), encoded_image._encodedWidth,
|
||||
encoded_image._encodedHeight,
|
||||
encoded_image._frameType == kVideoFrameKey);
|
||||
return callback_->OnEncodedImage(encoded_image, &overrided_specific_info,
|
||||
|
|
|
@ -133,7 +133,7 @@ class EncodedFrameForMediaTransport : public video_coding::EncodedFrame {
|
|||
|
||||
VerifyAndAllocate(frame.encoded_image().size());
|
||||
set_size(frame.encoded_image().size());
|
||||
memcpy(_buffer, frame.encoded_image()._buffer, size());
|
||||
memcpy(data(), frame.encoded_image().data(), size());
|
||||
|
||||
_payloadType = static_cast<uint8_t>(frame.payload_type());
|
||||
|
||||
|
|
Loading…
Reference in a new issue