webrtc/modules/audio_coding/neteq/tools/neteq_test.cc
Alessio Bazzica fab3460a82 Revert "Reland "Add plumbing of RtpPacketInfos to each AudioFrame as input for SourceTracker.""
This reverts commit 9973933d2e.

Reason for revert: breaking downstream projects and not reviewed by direct owners

Original change's description:
> Reland "Add plumbing of RtpPacketInfos to each AudioFrame as input for SourceTracker."
> 
> This reverts commit 24192c267a.
> 
> Reason for revert: Analyzed the performance regression in more detail.
> 
> Most of the regression comes from the extra RtpPacketInfos-related memory allocations in every `NetEq::GetAudio()` call. Commit 1796a820f6 has removed roughly 2/3rds of the extra allocations from the impacted perf tests. Remaining perf impact is expected to be about "8 microseconds of CPU time per second" on the Linux benchmarking machines and "15 us per second" on Windows/Mac.
> 
> There are options to optimize further but they are unlikely worth doing. Note for example that `NetEqPerformanceTest` uses the PCM codec while the real-world use cases would likely use the much heavier Opus codec. The numbers from `OpusSpeedTest` and `NetEqPerformanceTest` suggest that Opus decoding is about 10x as expensive as NetEq overall.
> 
> Original change's description:
> > Revert "Add plumbing of RtpPacketInfos to each AudioFrame as input for SourceTracker."
> >
> > This reverts commit 3e8ef940fe.
> >
> > Reason for revert: This CL causes a performance regression in NetEq, see https://bugs.chromium.org/p/chromium/issues/detail?id=982260.
> >
> > Original change's description:
> > > Add plumbing of RtpPacketInfos to each AudioFrame as input for SourceTracker.
> > >
> > > This change adds the plumbing of RtpPacketInfo from ChannelReceive::OnRtpPacket() to ChannelReceive::GetAudioFrameWithInfo() for audio. It is a step towards replacing the non-spec compliant ContributingSources that updates itself at packet-receive time, with the spec-compliant SourceTracker that will update itself at frame-delivery-to-track time.
> > >
> > > Bug: webrtc:10668
> > > Change-Id: I03385d6865bbc7bfbef7634f88de820a934f787a
> > > Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/139890
> > > Reviewed-by: Stefan Holmer <stefan@webrtc.org>
> > > Reviewed-by: Minyue Li <minyue@webrtc.org>
> > > Commit-Queue: Chen Xing <chxg@google.com>
> > > Cr-Commit-Position: refs/heads/master@{#28434}
> >
> > TBR=kwiberg@webrtc.org,stefan@webrtc.org,minyue@webrtc.org,chxg@google.com
> >
> > Bug: webrtc:10668, chromium:982260
> > Change-Id: I5e2cfde78c59d1123e21869564d76ed3f6193a5c
> > Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/145339
> > Reviewed-by: Ivo Creusen <ivoc@webrtc.org>
> > Commit-Queue: Ivo Creusen <ivoc@webrtc.org>
> > Cr-Commit-Position: refs/heads/master@{#28561}
> 
> TBR=kwiberg@webrtc.org,stefan@webrtc.org,ivoc@webrtc.org,minyue@webrtc.org,chxg@google.com
> 
> # Not skipping CQ checks because original CL landed > 1 day ago.
> 
> Bug: webrtc:10668, chromium:982260
> Change-Id: Ie375a0b327ee368317bf3a04b2f1415c3a974470
> Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/146707
> Reviewed-by: Stefan Holmer <stefan@webrtc.org>
> Commit-Queue: Chen Xing <chxg@google.com>
> Cr-Commit-Position: refs/heads/master@{#28664}

TBR=kwiberg@webrtc.org,stefan@webrtc.org,ivoc@webrtc.org,minyue@webrtc.org,chxg@google.com

Change-Id: I652cb0814d83b514d3bee34e65ca3bb693099b22
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: webrtc:10668, chromium:982260
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/146712
Reviewed-by: Alessio Bazzica <alessiob@webrtc.org>
Commit-Queue: Alessio Bazzica <alessiob@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#28671}
2019-07-24 16:41:13 +00:00

328 lines
13 KiB
C++

/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/neteq_test.h"
#include <iomanip>
#include <iostream>
#include "modules/rtp_rtcp/source/byte_io.h"
namespace webrtc {
namespace test {
namespace {
absl::optional<Operations> ActionToOperations(
absl::optional<NetEqSimulator::Action> a) {
if (!a) {
return absl::nullopt;
}
switch (*a) {
case NetEqSimulator::Action::kAccelerate:
return absl::make_optional(kAccelerate);
case NetEqSimulator::Action::kExpand:
return absl::make_optional(kExpand);
case NetEqSimulator::Action::kNormal:
return absl::make_optional(kNormal);
case NetEqSimulator::Action::kPreemptiveExpand:
return absl::make_optional(kPreemptiveExpand);
}
}
} // namespace
void DefaultNetEqTestErrorCallback::OnInsertPacketError(
const NetEqInput::PacketData& packet) {
std::cerr << "InsertPacket returned an error." << std::endl;
std::cerr << "Packet data: " << packet.ToString() << std::endl;
FATAL();
}
void DefaultNetEqTestErrorCallback::OnGetAudioError() {
std::cerr << "GetAudio returned an error." << std::endl;
FATAL();
}
NetEqTest::NetEqTest(const NetEq::Config& config,
rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
const DecoderMap& codecs,
std::unique_ptr<std::ofstream> text_log,
std::unique_ptr<NetEqInput> input,
std::unique_ptr<AudioSink> output,
Callbacks callbacks)
: neteq_(NetEq::Create(config, decoder_factory)),
input_(std::move(input)),
output_(std::move(output)),
callbacks_(callbacks),
sample_rate_hz_(config.sample_rate_hz),
text_log_(std::move(text_log)) {
RTC_CHECK(!config.enable_muted_state)
<< "The code does not handle enable_muted_state";
RegisterDecoders(codecs);
}
NetEqTest::~NetEqTest() = default;
int64_t NetEqTest::Run() {
int64_t simulation_time = 0;
SimulationStepResult step_result;
do {
step_result = RunToNextGetAudio();
simulation_time += step_result.simulation_step_ms;
} while (!step_result.is_simulation_finished);
if (callbacks_.simulation_ended_callback) {
callbacks_.simulation_ended_callback->SimulationEnded(simulation_time);
}
return simulation_time;
}
NetEqTest::SimulationStepResult NetEqTest::RunToNextGetAudio() {
SimulationStepResult result;
const int64_t start_time_ms = *input_->NextEventTime();
int64_t time_now_ms = start_time_ms;
current_state_.packet_iat_ms.clear();
while (!input_->ended()) {
// Advance time to next event.
RTC_DCHECK(input_->NextEventTime());
time_now_ms = *input_->NextEventTime();
// Check if it is time to insert packet.
if (input_->NextPacketTime() && time_now_ms >= *input_->NextPacketTime()) {
std::unique_ptr<NetEqInput::PacketData> packet_data = input_->PopPacket();
RTC_CHECK(packet_data);
const size_t payload_data_length =
packet_data->payload.size() - packet_data->header.paddingLength;
if (payload_data_length != 0) {
int error = neteq_->InsertPacket(
packet_data->header,
rtc::ArrayView<const uint8_t>(packet_data->payload),
static_cast<uint32_t>(packet_data->time_ms * sample_rate_hz_ /
1000));
if (error != NetEq::kOK && callbacks_.error_callback) {
callbacks_.error_callback->OnInsertPacketError(*packet_data);
}
if (callbacks_.post_insert_packet) {
callbacks_.post_insert_packet->AfterInsertPacket(*packet_data,
neteq_.get());
}
} else {
neteq_->InsertEmptyPacket(packet_data->header);
}
if (last_packet_time_ms_) {
current_state_.packet_iat_ms.push_back(time_now_ms -
*last_packet_time_ms_);
}
if (text_log_) {
const auto ops_state = neteq_->GetOperationsAndState();
const auto delta_wallclock =
last_packet_time_ms_ ? (time_now_ms - *last_packet_time_ms_) : -1;
const auto delta_timestamp =
last_packet_timestamp_
? (static_cast<int64_t>(packet_data->header.timestamp) -
*last_packet_timestamp_) *
1000 / sample_rate_hz_
: -1;
const auto packet_size_bytes =
packet_data->payload.size() == 12
? ByteReader<uint32_t>::ReadLittleEndian(
&packet_data->payload[8])
: -1;
*text_log_ << "Packet - wallclock: " << std::setw(5) << time_now_ms
<< ", delta wc: " << std::setw(4) << delta_wallclock
<< ", seq_no: " << packet_data->header.sequenceNumber
<< ", timestamp: " << std::setw(10)
<< packet_data->header.timestamp
<< ", delta ts: " << std::setw(4) << delta_timestamp
<< ", size: " << std::setw(5) << packet_size_bytes
<< ", frame size: " << std::setw(3)
<< ops_state.current_frame_size_ms
<< ", buffer size: " << std::setw(4)
<< ops_state.current_buffer_size_ms << std::endl;
}
last_packet_time_ms_ = absl::make_optional<int>(time_now_ms);
last_packet_timestamp_ =
absl::make_optional<uint32_t>(packet_data->header.timestamp);
}
// Check if it is time to get output audio.
if (input_->NextOutputEventTime() &&
time_now_ms >= *input_->NextOutputEventTime()) {
if (callbacks_.get_audio_callback) {
callbacks_.get_audio_callback->BeforeGetAudio(neteq_.get());
}
AudioFrame out_frame;
bool muted;
int error = neteq_->GetAudio(&out_frame, &muted,
ActionToOperations(next_action_));
next_action_ = absl::nullopt;
RTC_CHECK(!muted) << "The code does not handle enable_muted_state";
if (error != NetEq::kOK) {
if (callbacks_.error_callback) {
callbacks_.error_callback->OnGetAudioError();
}
} else {
sample_rate_hz_ = out_frame.sample_rate_hz_;
}
if (callbacks_.get_audio_callback) {
callbacks_.get_audio_callback->AfterGetAudio(time_now_ms, out_frame,
muted, neteq_.get());
}
if (output_) {
RTC_CHECK(output_->WriteArray(
out_frame.data(),
out_frame.samples_per_channel_ * out_frame.num_channels_));
}
input_->AdvanceOutputEvent();
result.simulation_step_ms =
input_->NextEventTime().value_or(time_now_ms) - start_time_ms;
const auto operations_state = neteq_->GetOperationsAndState();
current_state_.current_delay_ms = operations_state.current_buffer_size_ms;
current_state_.packet_size_ms = operations_state.current_frame_size_ms;
current_state_.next_packet_available =
operations_state.next_packet_available;
current_state_.packet_buffer_flushed =
operations_state.packet_buffer_flushes >
prev_ops_state_.packet_buffer_flushes;
// TODO(ivoc): Add more accurate reporting by tracking the origin of
// samples in the sync buffer.
result.action_times_ms[Action::kExpand] = 0;
result.action_times_ms[Action::kAccelerate] = 0;
result.action_times_ms[Action::kPreemptiveExpand] = 0;
result.action_times_ms[Action::kNormal] = 0;
if (out_frame.speech_type_ == AudioFrame::SpeechType::kPLC ||
out_frame.speech_type_ == AudioFrame::SpeechType::kPLCCNG) {
// Consider the whole frame to be the result of expansion.
result.action_times_ms[Action::kExpand] = 10;
} else if (operations_state.accelerate_samples -
prev_ops_state_.accelerate_samples >
0) {
// Consider the whole frame to be the result of acceleration.
result.action_times_ms[Action::kAccelerate] = 10;
} else if (operations_state.preemptive_samples -
prev_ops_state_.preemptive_samples >
0) {
// Consider the whole frame to be the result of preemptive expansion.
result.action_times_ms[Action::kPreemptiveExpand] = 10;
} else {
// Consider the whole frame to be the result of normal playout.
result.action_times_ms[Action::kNormal] = 10;
}
auto lifetime_stats = LifetimeStats();
if (text_log_) {
const bool plc =
(out_frame.speech_type_ == AudioFrame::SpeechType::kPLC) ||
(out_frame.speech_type_ == AudioFrame::SpeechType::kPLCCNG);
const bool cng = out_frame.speech_type_ == AudioFrame::SpeechType::kCNG;
const bool voice_concealed =
(lifetime_stats.concealed_samples -
lifetime_stats.silent_concealed_samples) >
(prev_lifetime_stats_.concealed_samples -
prev_lifetime_stats_.silent_concealed_samples);
*text_log_ << "GetAudio - wallclock: " << std::setw(5) << time_now_ms
<< ", delta wc: " << std::setw(4)
<< (input_->NextEventTime().value_or(time_now_ms) -
start_time_ms)
<< ", CNG: " << cng << ", PLC: " << plc
<< ", voice concealed: " << voice_concealed
<< ", buffer size: " << std::setw(4)
<< current_state_.current_delay_ms << std::endl;
if (operations_state.discarded_primary_packets >
prev_ops_state_.discarded_primary_packets) {
*text_log_ << "Discarded "
<< (operations_state.discarded_primary_packets -
prev_ops_state_.discarded_primary_packets)
<< " primary packets." << std::endl;
}
if (operations_state.packet_buffer_flushes >
prev_ops_state_.packet_buffer_flushes) {
*text_log_ << "Flushed packet buffer "
<< (operations_state.packet_buffer_flushes -
prev_ops_state_.packet_buffer_flushes)
<< " times." << std::endl;
}
}
prev_lifetime_stats_ = lifetime_stats;
const bool no_more_packets_to_decode =
!input_->NextPacketTime() && !operations_state.next_packet_available;
result.is_simulation_finished =
no_more_packets_to_decode || input_->ended();
prev_ops_state_ = operations_state;
return result;
}
}
result.simulation_step_ms =
input_->NextEventTime().value_or(time_now_ms) - start_time_ms;
result.is_simulation_finished = true;
return result;
}
void NetEqTest::SetNextAction(NetEqTest::Action next_operation) {
next_action_ = absl::optional<Action>(next_operation);
}
NetEqTest::NetEqState NetEqTest::GetNetEqState() {
return current_state_;
}
NetEqNetworkStatistics NetEqTest::SimulationStats() {
NetEqNetworkStatistics stats;
RTC_CHECK_EQ(neteq_->NetworkStatistics(&stats), 0);
return stats;
}
NetEqLifetimeStatistics NetEqTest::LifetimeStats() const {
return neteq_->GetLifetimeStatistics();
}
NetEqTest::DecoderMap NetEqTest::StandardDecoderMap() {
DecoderMap codecs = {
{0, SdpAudioFormat("pcmu", 8000, 1)},
{8, SdpAudioFormat("pcma", 8000, 1)},
#ifdef WEBRTC_CODEC_ILBC
{102, SdpAudioFormat("ilbc", 8000, 1)},
#endif
{103, SdpAudioFormat("isac", 16000, 1)},
#if !defined(WEBRTC_ANDROID)
{104, SdpAudioFormat("isac", 32000, 1)},
#endif
#ifdef WEBRTC_CODEC_OPUS
{111, SdpAudioFormat("opus", 48000, 2)},
#endif
{93, SdpAudioFormat("l16", 8000, 1)},
{94, SdpAudioFormat("l16", 16000, 1)},
{95, SdpAudioFormat("l16", 32000, 1)},
{96, SdpAudioFormat("l16", 48000, 1)},
{9, SdpAudioFormat("g722", 8000, 1)},
{106, SdpAudioFormat("telephone-event", 8000, 1)},
{114, SdpAudioFormat("telephone-event", 16000, 1)},
{115, SdpAudioFormat("telephone-event", 32000, 1)},
{116, SdpAudioFormat("telephone-event", 48000, 1)},
{117, SdpAudioFormat("red", 8000, 1)},
{13, SdpAudioFormat("cn", 8000, 1)},
{98, SdpAudioFormat("cn", 16000, 1)},
{99, SdpAudioFormat("cn", 32000, 1)},
{100, SdpAudioFormat("cn", 48000, 1)}
};
return codecs;
}
void NetEqTest::RegisterDecoders(const DecoderMap& codecs) {
for (const auto& c : codecs) {
RTC_CHECK(neteq_->RegisterPayloadType(c.first, c.second))
<< "Cannot register " << c.second.name << " to payload type "
<< c.first;
}
}
} // namespace test
} // namespace webrtc