Update ACM to use RTPHeader instead of WebRtcRTPHeader

Bug: webrtc:5876
Change-Id: Id3311dcf508cca34495349197eeac2edf8783772
Reviewed-on: https://webrtc-review.googlesource.com/c/123188
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26729}
This commit is contained in:
Niels Möller 2019-02-15 15:21:47 +01:00 committed by Commit Bot
parent 389b1672a3
commit afb5dbbf4e
15 changed files with 100 additions and 120 deletions

View file

@ -57,36 +57,19 @@ constexpr double kAudioSampleDurationSeconds = 0.01;
constexpr int kVoiceEngineMinMinPlayoutDelayMs = 0;
constexpr int kVoiceEngineMaxMinPlayoutDelayMs = 10000;
webrtc::FrameType WebrtcFrameTypeForMediaTransportFrameType(
MediaTransportEncodedAudioFrame::FrameType frame_type) {
switch (frame_type) {
case MediaTransportEncodedAudioFrame::FrameType::kSpeech:
return kAudioFrameSpeech;
break;
case MediaTransportEncodedAudioFrame::FrameType::
kDiscountinuousTransmission:
return kAudioFrameCN;
break;
}
}
WebRtcRTPHeader CreateWebrtcRTPHeaderForMediaTransportFrame(
RTPHeader CreateRTPHeaderForMediaTransportFrame(
const MediaTransportEncodedAudioFrame& frame,
uint64_t channel_id) {
webrtc::WebRtcRTPHeader webrtc_header = {};
webrtc_header.header.payloadType = frame.payload_type();
webrtc_header.header.payload_type_frequency = frame.sampling_rate_hz();
webrtc_header.header.timestamp = frame.starting_sample_index();
webrtc_header.header.sequenceNumber = frame.sequence_number();
webrtc::RTPHeader rtp_header;
rtp_header.payloadType = frame.payload_type();
rtp_header.payload_type_frequency = frame.sampling_rate_hz();
rtp_header.timestamp = frame.starting_sample_index();
rtp_header.sequenceNumber = frame.sequence_number();
webrtc_header.frameType =
WebrtcFrameTypeForMediaTransportFrameType(frame.frame_type());
webrtc_header.header.ssrc = static_cast<uint32_t>(channel_id);
rtp_header.ssrc = static_cast<uint32_t>(channel_id);
// The rest are initialized by the RTPHeader constructor.
return webrtc_header;
return rtp_header;
}
class ChannelReceive : public ChannelReceiveInterface,
@ -189,7 +172,7 @@ class ChannelReceive : public ChannelReceiveInterface,
int32_t OnReceivedPayloadData(const uint8_t* payloadData,
size_t payloadSize,
const WebRtcRTPHeader* rtpHeader);
const RTPHeader& rtpHeader);
bool Playing() const {
rtc::CritScope lock(&playing_lock_);
@ -277,10 +260,9 @@ class ChannelReceive : public ChannelReceiveInterface,
webrtc::CryptoOptions crypto_options_;
};
int32_t ChannelReceive::OnReceivedPayloadData(
const uint8_t* payloadData,
size_t payloadSize,
const WebRtcRTPHeader* rtpHeader) {
int32_t ChannelReceive::OnReceivedPayloadData(const uint8_t* payloadData,
size_t payloadSize,
const RTPHeader& rtp_header) {
// We should not be receiving any RTP packets if media_transport is set.
RTC_CHECK(!media_transport_);
@ -291,7 +273,7 @@ int32_t ChannelReceive::OnReceivedPayloadData(
}
// Push the incoming payload (parsed and ready for decoding) into the ACM
if (audio_coding_->IncomingPacket(payloadData, payloadSize, *rtpHeader) !=
if (audio_coding_->IncomingPacket(payloadData, payloadSize, rtp_header) !=
0) {
RTC_DLOG(LS_ERROR) << "ChannelReceive::OnReceivedPayloadData() unable to "
"push data to the ACM";
@ -324,8 +306,7 @@ void ChannelReceive::OnData(uint64_t channel_id,
// Send encoded audio frame to Decoder / NetEq.
if (audio_coding_->IncomingPacket(
frame.encoded_data().data(), frame.encoded_data().size(),
CreateWebrtcRTPHeaderForMediaTransportFrame(frame, channel_id)) !=
0) {
CreateRTPHeaderForMediaTransportFrame(frame, channel_id)) != 0) {
RTC_DLOG(LS_ERROR) << "ChannelReceive::OnData: unable to "
"push data to the ACM";
}
@ -637,8 +618,6 @@ bool ChannelReceive::ReceivePacket(const uint8_t* packet,
const uint8_t* payload = packet + header.headerLength;
assert(packet_length >= header.headerLength);
size_t payload_length = packet_length - header.headerLength;
WebRtcRTPHeader webrtc_rtp_header = {};
webrtc_rtp_header.header = header;
size_t payload_data_length = payload_length - header.paddingLength;
@ -677,11 +656,9 @@ bool ChannelReceive::ReceivePacket(const uint8_t* packet,
}
if (payload_data_length == 0) {
webrtc_rtp_header.frameType = kEmptyFrame;
return OnReceivedPayloadData(nullptr, 0, &webrtc_rtp_header);
return OnReceivedPayloadData(nullptr, 0, header);
}
return OnReceivedPayloadData(payload, payload_data_length,
&webrtc_rtp_header);
return OnReceivedPayloadData(payload, payload_data_length, header);
}
// May be called on either worker thread or network thread.

View file

@ -120,18 +120,15 @@ void AcmReceiveTestOldApi::Run() {
AfterGetAudio();
}
// Insert packet after converting from RTPHeader to WebRtcRTPHeader.
WebRtcRTPHeader header;
header.header = packet->header();
header.frameType = kAudioFrameSpeech;
EXPECT_EQ(0,
acm_->IncomingPacket(
packet->payload(),
static_cast<int32_t>(packet->payload_length_bytes()), header))
EXPECT_EQ(0, acm_->IncomingPacket(
packet->payload(),
static_cast<int32_t>(packet->payload_length_bytes()),
packet->header()))
<< "Failure when inserting packet:" << std::endl
<< " PT = " << static_cast<int>(header.header.payloadType) << std::endl
<< " TS = " << header.header.timestamp << std::endl
<< " SN = " << header.header.sequenceNumber;
<< " PT = " << static_cast<int>(packet->header().payloadType)
<< std::endl
<< " TS = " << packet->header().timestamp << std::endl
<< " SN = " << packet->header().sequenceNumber;
}
}

View file

@ -78,15 +78,14 @@ int AcmReceiver::last_output_sample_rate_hz() const {
return neteq_->last_output_sample_rate_hz();
}
int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header,
int AcmReceiver::InsertPacket(const RTPHeader& rtp_header,
rtc::ArrayView<const uint8_t> incoming_payload) {
if (incoming_payload.empty()) {
neteq_->InsertEmptyPacket(rtp_header.header);
neteq_->InsertEmptyPacket(rtp_header);
return 0;
}
const RTPHeader& header = rtp_header.header; // Just a shorthand.
int payload_type = header.payloadType;
int payload_type = rtp_header.payloadType;
auto format = neteq_->GetDecoderFormat(payload_type);
if (format && absl::EqualsIgnoreCase(format->name, "red")) {
// This is a RED packet. Get the format of the audio codec.
@ -115,9 +114,10 @@ int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header,
} // |crit_sect_| is released.
uint32_t receive_timestamp = NowInTimestamp(format->clockrate_hz);
if (neteq_->InsertPacket(header, incoming_payload, receive_timestamp) < 0) {
if (neteq_->InsertPacket(rtp_header, incoming_payload, receive_timestamp) <
0) {
RTC_LOG(LERROR) << "AcmReceiver::InsertPacket "
<< static_cast<int>(header.payloadType)
<< static_cast<int>(rtp_header.payloadType)
<< " Failed to insert packet";
return -1;
}

View file

@ -33,7 +33,6 @@ namespace webrtc {
class Clock;
class NetEq;
struct RTPHeader;
struct WebRtcRTPHeader;
namespace acm2 {
@ -58,7 +57,7 @@ class AcmReceiver {
// Return value : 0 if OK.
// <0 if NetEq returned an error.
//
int InsertPacket(const WebRtcRTPHeader& rtp_header,
int InsertPacket(const RTPHeader& rtp_header,
rtc::ArrayView<const uint8_t> incoming_payload);
//

View file

@ -50,13 +50,12 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
acm_->InitializeReceiver();
acm_->RegisterTransportCallback(this);
rtp_header_.header.sequenceNumber = 0;
rtp_header_.header.timestamp = 0;
rtp_header_.header.markerBit = false;
rtp_header_.header.ssrc = 0x12345678; // Arbitrary.
rtp_header_.header.numCSRCs = 0;
rtp_header_.header.payloadType = 0;
rtp_header_.frameType = kAudioFrameSpeech;
rtp_header_.sequenceNumber = 0;
rtp_header_.timestamp = 0;
rtp_header_.markerBit = false;
rtp_header_.ssrc = 0x12345678; // Arbitrary.
rtp_header_.numCSRCs = 0;
rtp_header_.payloadType = 0;
}
void TearDown() override {}
@ -113,9 +112,8 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
if (frame_type == kEmptyFrame)
return 0;
rtp_header_.header.payloadType = payload_type;
rtp_header_.frameType = frame_type;
rtp_header_.header.timestamp = timestamp;
rtp_header_.payloadType = payload_type;
rtp_header_.timestamp = timestamp;
int ret_val = receiver_->InsertPacket(
rtp_header_,
@ -124,7 +122,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
assert(false);
return -1;
}
rtp_header_.header.sequenceNumber++;
rtp_header_.sequenceNumber++;
packet_sent_ = true;
last_frame_type_ = frame_type;
return 0;
@ -137,7 +135,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
AudioCodingModule::Config config_;
std::unique_ptr<AcmReceiver> receiver_;
std::unique_ptr<AudioCodingModule> acm_;
WebRtcRTPHeader rtp_header_;
RTPHeader rtp_header_;
uint32_t timestamp_;
bool packet_sent_; // Set when SendData is called reset when inserting audio.
uint32_t last_packet_send_timestamp_;

View file

@ -92,7 +92,7 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
// Incoming packet from network parsed and ready for decode.
int IncomingPacket(const uint8_t* incoming_payload,
const size_t payload_length,
const WebRtcRTPHeader& rtp_info) override;
const RTPHeader& rtp_info) override;
// Minimum playout delay.
int SetMinimumPlayoutDelay(int time_ms) override;
@ -688,7 +688,7 @@ absl::optional<std::pair<int, SdpAudioFormat>>
// Incoming packet from network parsed and ready for decode.
int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload,
const size_t payload_length,
const WebRtcRTPHeader& rtp_header) {
const RTPHeader& rtp_header) {
RTC_DCHECK_EQ(payload_length == 0, incoming_payload == nullptr);
return receiver_.InsertPacket(
rtp_header,

View file

@ -71,21 +71,20 @@ class RtpUtility {
virtual ~RtpUtility() {}
void Populate(WebRtcRTPHeader* rtp_header) {
rtp_header->header.sequenceNumber = 0xABCD;
rtp_header->header.timestamp = 0xABCDEF01;
rtp_header->header.payloadType = payload_type_;
rtp_header->header.markerBit = false;
rtp_header->header.ssrc = 0x1234;
rtp_header->header.numCSRCs = 0;
rtp_header->frameType = kAudioFrameSpeech;
void Populate(RTPHeader* rtp_header) {
rtp_header->sequenceNumber = 0xABCD;
rtp_header->timestamp = 0xABCDEF01;
rtp_header->payloadType = payload_type_;
rtp_header->markerBit = false;
rtp_header->ssrc = 0x1234;
rtp_header->numCSRCs = 0;
rtp_header->header.payload_type_frequency = kSampleRateHz;
rtp_header->payload_type_frequency = kSampleRateHz;
}
void Forward(WebRtcRTPHeader* rtp_header) {
++rtp_header->header.sequenceNumber;
rtp_header->header.timestamp += samples_per_packet_;
void Forward(RTPHeader* rtp_header) {
++rtp_header->sequenceNumber;
rtp_header->timestamp += samples_per_packet_;
}
private:
@ -237,7 +236,7 @@ class AudioCodingModuleTestOldApi : public ::testing::Test {
std::unique_ptr<RtpUtility> rtp_utility_;
std::unique_ptr<AudioCodingModule> acm_;
PacketizationCallbackStubOldApi packet_cb_;
WebRtcRTPHeader rtp_header_;
RTPHeader rtp_header_;
AudioFrame input_frame_;
absl::optional<SdpAudioFormat> audio_format_;
@ -792,16 +791,15 @@ class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
++receive_packet_count_;
// Encode new frame.
uint32_t input_timestamp = rtp_header_.header.timestamp;
uint32_t input_timestamp = rtp_header_.timestamp;
while (info.encoded_bytes == 0) {
info = isac_encoder_->Encode(input_timestamp,
audio_loop_.GetNextBlock(), &encoded);
input_timestamp += 160; // 10 ms at 16 kHz.
}
EXPECT_EQ(rtp_header_.header.timestamp + kPacketSizeSamples,
input_timestamp);
EXPECT_EQ(rtp_header_.header.timestamp, info.encoded_timestamp);
EXPECT_EQ(rtp_header_.header.payloadType, info.payload_type);
EXPECT_EQ(rtp_header_.timestamp + kPacketSizeSamples, input_timestamp);
EXPECT_EQ(rtp_header_.timestamp, info.encoded_timestamp);
EXPECT_EQ(rtp_header_.payloadType, info.payload_type);
}
// Now we're not holding the crit sect when calling ACM.

View file

@ -21,17 +21,19 @@
#include "api/audio_codecs/audio_encoder.h"
#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
#include "modules/audio_coding/neteq/include/neteq.h"
#include "modules/include/module_common_types.h"
#include "rtc_base/deprecation.h"
#include "rtc_base/function_view.h"
#include "system_wrappers/include/clock.h"
namespace webrtc {
// forward declarations
struct WebRtcRTPHeader;
class AudioDecoder;
class AudioEncoder;
class AudioFrame;
class RTPFragmentationHeader;
struct RTPHeader;
#define WEBRTC_10MS_PCM_AUDIO 960 // 16 bits super wideband 48 kHz
@ -246,7 +248,13 @@ class AudioCodingModule {
//
virtual int32_t IncomingPacket(const uint8_t* incoming_payload,
const size_t payload_len_bytes,
const WebRtcRTPHeader& rtp_info) = 0;
const RTPHeader& rtp_header) = 0;
RTC_DEPRECATED
int32_t IncomingPacket(const uint8_t* incoming_payload,
const size_t payload_len_bytes,
const WebRtcRTPHeader& rtp_info) {
return IncomingPacket(incoming_payload, payload_len_bytes, rtp_info.header);
}
///////////////////////////////////////////////////////////////////////////
// int SetMinimumPlayoutDelay()

View file

@ -116,7 +116,8 @@ int32_t Channel::SendData(FrameType frameType,
return 0;
}
status = _receiverACM->IncomingPacket(_payloadData, payloadDataSize, rtpInfo);
status = _receiverACM->IncomingPacket(_payloadData, payloadDataSize,
rtpInfo.header);
return status;
}

View file

@ -168,7 +168,7 @@ bool Receiver::IncomingPacket() {
}
EXPECT_EQ(0, _acm->IncomingPacket(_incomingPayload, _realPayloadSizeBytes,
_rtpInfo));
_rtpInfo.header));
_realPayloadSizeBytes = _rtpStream->Read(&_rtpInfo, _incomingPayload,
_payloadSizeBytes, &_nextTime);
if (_realPayloadSizeBytes == 0 && _rtpStream->EndOfFile()) {

View file

@ -57,7 +57,8 @@ bool ReceiverWithPacketLoss::IncomingPacket() {
}
if (!PacketLost()) {
_acm->IncomingPacket(_incomingPayload, _realPayloadSizeBytes, _rtpInfo);
_acm->IncomingPacket(_incomingPayload, _realPayloadSizeBytes,
_rtpInfo.header);
}
packet_counter_++;
_realPayloadSizeBytes = _rtpStream->Read(&_rtpInfo, _incomingPayload,

View file

@ -15,6 +15,7 @@
#include <queue>
#include "modules/audio_coding/include/audio_coding_module.h"
#include "modules/include/module_common_types.h"
#include "rtc_base/synchronization/rw_lock_wrapper.h"
namespace webrtc {

View file

@ -66,14 +66,14 @@ int32_t TestPack::SendData(FrameType frame_type,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation) {
WebRtcRTPHeader rtp_info;
RTPHeader rtp_header;
int32_t status;
rtp_info.header.markerBit = false;
rtp_info.header.ssrc = 0;
rtp_info.header.sequenceNumber = sequence_number_++;
rtp_info.header.payloadType = payload_type;
rtp_info.header.timestamp = timestamp;
rtp_header.markerBit = false;
rtp_header.ssrc = 0;
rtp_header.sequenceNumber = sequence_number_++;
rtp_header.payloadType = payload_type;
rtp_header.timestamp = timestamp;
if (frame_type == kEmptyFrame) {
// Skip this frame.
@ -83,7 +83,8 @@ int32_t TestPack::SendData(FrameType frame_type,
// Only run mono for all test cases.
memcpy(payload_data_, payload_data, payload_size);
status = receiver_acm_->IncomingPacket(payload_data_, payload_size, rtp_info);
status =
receiver_acm_->IncomingPacket(payload_data_, payload_size, rtp_header);
payload_size_ = payload_size;
timestamp_diff_ = timestamp - last_in_timestamp_;

View file

@ -46,14 +46,14 @@ int32_t TestPackStereo::SendData(const FrameType frame_type,
const uint8_t* payload_data,
const size_t payload_size,
const RTPFragmentationHeader* fragmentation) {
WebRtcRTPHeader rtp_info;
RTPHeader rtp_header;
int32_t status = 0;
rtp_info.header.markerBit = false;
rtp_info.header.ssrc = 0;
rtp_info.header.sequenceNumber = seq_no_++;
rtp_info.header.payloadType = payload_type;
rtp_info.header.timestamp = timestamp;
rtp_header.markerBit = false;
rtp_header.ssrc = 0;
rtp_header.sequenceNumber = seq_no_++;
rtp_header.payloadType = payload_type;
rtp_header.timestamp = timestamp;
if (frame_type == kEmptyFrame) {
// Skip this frame
return 0;
@ -61,7 +61,7 @@ int32_t TestPackStereo::SendData(const FrameType frame_type,
if (lost_packet_ == false) {
status =
receiver_acm_->IncomingPacket(payload_data, payload_size, rtp_info);
receiver_acm_->IncomingPacket(payload_data, payload_size, rtp_header);
if (frame_type != kAudioFrameCN) {
payload_size_ = static_cast<int>(payload_size);

View file

@ -37,12 +37,11 @@ class TargetDelayTest : public ::testing::Test {
{{pltype, {"L16", kSampleRateHz, 1}}};
acm_->SetReceiveCodecs(receive_codecs);
rtp_info_.header.payloadType = pltype;
rtp_info_.header.timestamp = 0;
rtp_info_.header.ssrc = 0x12345678;
rtp_info_.header.markerBit = false;
rtp_info_.header.sequenceNumber = 0;
rtp_info_.frameType = kAudioFrameSpeech;
rtp_header_.payloadType = pltype;
rtp_header_.timestamp = 0;
rtp_header_.ssrc = 0x12345678;
rtp_header_.markerBit = false;
rtp_header_.sequenceNumber = 0;
int16_t audio[kFrameSizeSamples];
const int kRange = 0x7FF; // 2047, easy for masking.
@ -98,10 +97,10 @@ class TargetDelayTest : public ::testing::Test {
static const int kInterarrivalJitterPacket = 2;
void Push() {
rtp_info_.header.timestamp += kFrameSizeSamples;
rtp_info_.header.sequenceNumber++;
ASSERT_EQ(0,
acm_->IncomingPacket(payload_, kFrameSizeSamples * 2, rtp_info_));
rtp_header_.timestamp += kFrameSizeSamples;
rtp_header_.sequenceNumber++;
ASSERT_EQ(
0, acm_->IncomingPacket(payload_, kFrameSizeSamples * 2, rtp_header_));
}
// Pull audio equivalent to the amount of audio in one RTP packet.
@ -150,7 +149,7 @@ class TargetDelayTest : public ::testing::Test {
}
std::unique_ptr<AudioCodingModule> acm_;
WebRtcRTPHeader rtp_info_;
RTPHeader rtp_header_;
uint8_t payload_[kPayloadLenBytes];
};