mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-13 05:40:42 +01:00
Use MediaTransportInterface, for audio streams.
Bug: webrtc:9719 Change-Id: I6d3db66b781173b207de51d84193fbd34a7f3239 Reviewed-on: https://webrtc-review.googlesource.com/c/104642 Commit-Queue: Niels Moller <nisse@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Reviewed-by: Anton Sukhanov <sukhanov@webrtc.org> Reviewed-by: Peter Slatala <psla@webrtc.org> Cr-Commit-Position: refs/heads/master@{#25385}
This commit is contained in:
parent
2769cd540c
commit
7d76a31f3d
22 changed files with 498 additions and 80 deletions
|
@ -42,7 +42,10 @@ class MediaTransportEncodedAudioFrame final {
|
|||
kSpeech,
|
||||
|
||||
// DTX frame (equivalent to webrtc::kAudioFrameCN).
|
||||
kDiscountinuousTransmission,
|
||||
// DTX frame (equivalent to webrtc::kAudioFrameCN).
|
||||
kDiscontinuousTransmission,
|
||||
// TODO(nisse): Mis-spelled version, update users, then delete.
|
||||
kDiscountinuousTransmission = kDiscontinuousTransmission,
|
||||
};
|
||||
|
||||
MediaTransportEncodedAudioFrame(
|
||||
|
|
|
@ -128,24 +128,34 @@ if (rtc_include_tests) {
|
|||
"mock_voe_channel_proxy.h",
|
||||
"remix_resample_unittest.cc",
|
||||
"test/audio_stats_test.cc",
|
||||
"test/media_transport_test.cc",
|
||||
"time_interval_unittest.cc",
|
||||
"transport_feedback_packet_loss_tracker_unittest.cc",
|
||||
]
|
||||
deps = [
|
||||
":audio",
|
||||
":audio_end_to_end_test",
|
||||
"../api:loopback_media_transport",
|
||||
"../api:mock_audio_mixer",
|
||||
"../api:mock_frame_decryptor",
|
||||
"../api:mock_frame_encryptor",
|
||||
"../api/audio:audio_frame_api",
|
||||
"../api/audio_codecs:audio_codecs_api",
|
||||
"../api/audio_codecs/opus:audio_decoder_opus",
|
||||
"../api/audio_codecs/opus:audio_encoder_opus",
|
||||
"../api/units:time_delta",
|
||||
"../call:mock_bitrate_allocator",
|
||||
"../call:mock_call_interfaces",
|
||||
"../call:mock_rtp_interfaces",
|
||||
"../call:rtp_interfaces",
|
||||
"../call:rtp_receiver",
|
||||
"../common_audio",
|
||||
"../logging:mocks",
|
||||
"../logging:rtc_event_log_api",
|
||||
"../modules/audio_device:mock_audio_device",
|
||||
|
||||
# For TestAudioDeviceModule
|
||||
"../modules/audio_device:audio_device_impl",
|
||||
"../modules/audio_mixer:audio_mixer_impl",
|
||||
"../modules/audio_processing:audio_processing_statistics",
|
||||
"../modules/audio_processing:mocks",
|
||||
|
@ -153,6 +163,7 @@ if (rtc_include_tests) {
|
|||
"../modules/pacing:pacing",
|
||||
"../modules/rtp_rtcp:mock_rtp_rtcp",
|
||||
"../modules/rtp_rtcp:rtp_rtcp_format",
|
||||
"../modules/utility",
|
||||
"../rtc_base:checks",
|
||||
"../rtc_base:rtc_base_approved",
|
||||
"../rtc_base:rtc_base_tests_utils",
|
||||
|
|
|
@ -58,6 +58,7 @@ std::string AudioReceiveStream::Config::ToString() const {
|
|||
ss << "{rtp: " << rtp.ToString();
|
||||
ss << ", rtcp_send_transport: "
|
||||
<< (rtcp_send_transport ? "(Transport)" : "null");
|
||||
ss << ", media_transport: " << (media_transport ? "(Transport)" : "null");
|
||||
if (!sync_group.empty()) {
|
||||
ss << ", sync_group: " << sync_group;
|
||||
}
|
||||
|
@ -78,8 +79,8 @@ std::unique_ptr<voe::ChannelReceiveProxy> CreateChannelAndProxy(
|
|||
return absl::make_unique<voe::ChannelReceiveProxy>(
|
||||
absl::make_unique<voe::ChannelReceive>(
|
||||
module_process_thread, internal_audio_state->audio_device_module(),
|
||||
config.rtcp_send_transport, event_log, config.rtp.remote_ssrc,
|
||||
config.jitter_buffer_max_packets,
|
||||
config.media_transport, config.rtcp_send_transport, event_log,
|
||||
config.rtp.remote_ssrc, config.jitter_buffer_max_packets,
|
||||
config.jitter_buffer_fast_accelerate, config.decoder_factory,
|
||||
config.codec_pair_id, config.frame_decryptor, config.crypto_options));
|
||||
}
|
||||
|
@ -111,8 +112,6 @@ AudioReceiveStream::AudioReceiveStream(
|
|||
std::unique_ptr<voe::ChannelReceiveProxy> channel_proxy)
|
||||
: audio_state_(audio_state), channel_proxy_(std::move(channel_proxy)) {
|
||||
RTC_LOG(LS_INFO) << "AudioReceiveStream: " << config.rtp.remote_ssrc;
|
||||
RTC_DCHECK(receiver_controller);
|
||||
RTC_DCHECK(packet_router);
|
||||
RTC_DCHECK(config.decoder_factory);
|
||||
RTC_DCHECK(config.rtcp_send_transport);
|
||||
RTC_DCHECK(audio_state_);
|
||||
|
@ -120,13 +119,16 @@ AudioReceiveStream::AudioReceiveStream(
|
|||
|
||||
module_process_thread_checker_.DetachFromThread();
|
||||
|
||||
// Configure bandwidth estimation.
|
||||
channel_proxy_->RegisterReceiverCongestionControlObjects(packet_router);
|
||||
|
||||
// Register with transport.
|
||||
rtp_stream_receiver_ = receiver_controller->CreateReceiver(
|
||||
config.rtp.remote_ssrc, channel_proxy_.get());
|
||||
if (!config.media_transport) {
|
||||
RTC_DCHECK(receiver_controller);
|
||||
RTC_DCHECK(packet_router);
|
||||
// Configure bandwidth estimation.
|
||||
channel_proxy_->RegisterReceiverCongestionControlObjects(packet_router);
|
||||
|
||||
// Register with transport.
|
||||
rtp_stream_receiver_ = receiver_controller->CreateReceiver(
|
||||
config.rtp.remote_ssrc, channel_proxy_.get());
|
||||
}
|
||||
ConfigureStream(this, config, true);
|
||||
}
|
||||
|
||||
|
@ -135,7 +137,9 @@ AudioReceiveStream::~AudioReceiveStream() {
|
|||
RTC_LOG(LS_INFO) << "~AudioReceiveStream: " << config_.rtp.remote_ssrc;
|
||||
Stop();
|
||||
channel_proxy_->DisassociateSendChannel();
|
||||
channel_proxy_->ResetReceiverCongestionControlObjects();
|
||||
if (!config_.media_transport) {
|
||||
channel_proxy_->ResetReceiverCongestionControlObjects();
|
||||
}
|
||||
}
|
||||
|
||||
void AudioReceiveStream::Reconfigure(
|
||||
|
|
|
@ -217,7 +217,7 @@ TEST(AudioReceiveStreamTest, ConfigToString) {
|
|||
"{rtp: {remote_ssrc: 1234, local_ssrc: 5678, transport_cc: off, nack: "
|
||||
"{rtp_history_ms: 0}, extensions: [{uri: "
|
||||
"urn:ietf:params:rtp-hdrext:ssrc-audio-level, id: 3}]}, "
|
||||
"rtcp_send_transport: null}",
|
||||
"rtcp_send_transport: null, media_transport: null}",
|
||||
config.ToString());
|
||||
}
|
||||
|
||||
|
|
|
@ -58,14 +58,15 @@ void CallEncoder(const std::unique_ptr<voe::ChannelSendProxy>& channel_proxy,
|
|||
std::unique_ptr<voe::ChannelSendProxy> CreateChannelAndProxy(
|
||||
rtc::TaskQueue* worker_queue,
|
||||
ProcessThread* module_process_thread,
|
||||
MediaTransportInterface* media_transport,
|
||||
RtcpRttStats* rtcp_rtt_stats,
|
||||
RtcEventLog* event_log,
|
||||
FrameEncryptorInterface* frame_encryptor,
|
||||
const webrtc::CryptoOptions& crypto_options) {
|
||||
return absl::make_unique<voe::ChannelSendProxy>(
|
||||
absl::make_unique<voe::ChannelSend>(worker_queue, module_process_thread,
|
||||
rtcp_rtt_stats, event_log,
|
||||
frame_encryptor, crypto_options));
|
||||
absl::make_unique<voe::ChannelSend>(
|
||||
worker_queue, module_process_thread, media_transport, rtcp_rtt_stats,
|
||||
event_log, frame_encryptor, crypto_options));
|
||||
}
|
||||
} // namespace
|
||||
|
||||
|
@ -97,7 +98,7 @@ AudioSendStream::AudioSendStream(
|
|||
const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
|
||||
rtc::TaskQueue* worker_queue,
|
||||
ProcessThread* module_process_thread,
|
||||
RtpTransportControllerSendInterface* transport,
|
||||
RtpTransportControllerSendInterface* rtp_transport,
|
||||
BitrateAllocatorInterface* bitrate_allocator,
|
||||
RtcEventLog* event_log,
|
||||
RtcpRttStats* rtcp_rtt_stats,
|
||||
|
@ -106,7 +107,7 @@ AudioSendStream::AudioSendStream(
|
|||
: AudioSendStream(config,
|
||||
audio_state,
|
||||
worker_queue,
|
||||
transport,
|
||||
rtp_transport,
|
||||
bitrate_allocator,
|
||||
event_log,
|
||||
rtcp_rtt_stats,
|
||||
|
@ -114,6 +115,7 @@ AudioSendStream::AudioSendStream(
|
|||
overall_call_lifetime,
|
||||
CreateChannelAndProxy(worker_queue,
|
||||
module_process_thread,
|
||||
config.media_transport,
|
||||
rtcp_rtt_stats,
|
||||
event_log,
|
||||
config.frame_encryptor,
|
||||
|
@ -123,7 +125,7 @@ AudioSendStream::AudioSendStream(
|
|||
const webrtc::AudioSendStream::Config& config,
|
||||
const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
|
||||
rtc::TaskQueue* worker_queue,
|
||||
RtpTransportControllerSendInterface* transport,
|
||||
RtpTransportControllerSendInterface* rtp_transport,
|
||||
BitrateAllocatorInterface* bitrate_allocator,
|
||||
RtcEventLog* event_log,
|
||||
RtcpRttStats* rtcp_rtt_stats,
|
||||
|
@ -131,12 +133,13 @@ AudioSendStream::AudioSendStream(
|
|||
TimeInterval* overall_call_lifetime,
|
||||
std::unique_ptr<voe::ChannelSendProxy> channel_proxy)
|
||||
: worker_queue_(worker_queue),
|
||||
config_(Config(nullptr)),
|
||||
config_(Config(/*send_transport=*/nullptr,
|
||||
/*media_transport=*/nullptr)),
|
||||
audio_state_(audio_state),
|
||||
channel_proxy_(std::move(channel_proxy)),
|
||||
event_log_(event_log),
|
||||
bitrate_allocator_(bitrate_allocator),
|
||||
transport_(transport),
|
||||
rtp_transport_(rtp_transport),
|
||||
packet_loss_tracker_(kPacketLossTrackerMaxWindowSizeMs,
|
||||
kPacketLossRateMinNumAckedPackets,
|
||||
kRecoverablePacketLossRateMinNumAckedPairs),
|
||||
|
@ -148,7 +151,11 @@ AudioSendStream::AudioSendStream(
|
|||
RTC_DCHECK(audio_state_);
|
||||
RTC_DCHECK(channel_proxy_);
|
||||
RTC_DCHECK(bitrate_allocator_);
|
||||
RTC_DCHECK(transport);
|
||||
// TODO(nisse): Eventually, we should have only media_transport. But for the
|
||||
// time being, we can have either. When media transport is injected, there
|
||||
// should be no rtp_transport, and below check should be strengthened to XOR
|
||||
// (either rtp_transport or media_transport but not both).
|
||||
RTC_DCHECK(rtp_transport || config.media_transport);
|
||||
RTC_DCHECK(overall_call_lifetime_);
|
||||
|
||||
channel_proxy_->SetRTCPStatus(true);
|
||||
|
@ -158,17 +165,22 @@ AudioSendStream::AudioSendStream(
|
|||
ConfigureStream(this, config, true);
|
||||
|
||||
pacer_thread_checker_.DetachFromThread();
|
||||
// Signal congestion controller this object is ready for OnPacket* callbacks.
|
||||
transport_->RegisterPacketFeedbackObserver(this);
|
||||
if (rtp_transport_) {
|
||||
// Signal congestion controller this object is ready for OnPacket*
|
||||
// callbacks.
|
||||
rtp_transport_->RegisterPacketFeedbackObserver(this);
|
||||
}
|
||||
}
|
||||
|
||||
AudioSendStream::~AudioSendStream() {
|
||||
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
|
||||
RTC_LOG(LS_INFO) << "~AudioSendStream: " << config_.rtp.ssrc;
|
||||
RTC_DCHECK(!sending_);
|
||||
transport_->DeRegisterPacketFeedbackObserver(this);
|
||||
channel_proxy_->RegisterTransport(nullptr);
|
||||
channel_proxy_->ResetSenderCongestionControlObjects();
|
||||
if (rtp_transport_) {
|
||||
rtp_transport_->DeRegisterPacketFeedbackObserver(this);
|
||||
channel_proxy_->RegisterTransport(nullptr);
|
||||
channel_proxy_->ResetSenderCongestionControlObjects();
|
||||
}
|
||||
// Lifetime can only be updated after deregistering
|
||||
// |timed_send_transport_adapter_| in the underlying channel object to avoid
|
||||
// data races in |active_lifetime_|.
|
||||
|
@ -272,14 +284,16 @@ void AudioSendStream::ConfigureStream(
|
|||
// Probing in application limited region is only used in combination with
|
||||
// send side congestion control, wich depends on feedback packets which
|
||||
// requires transport sequence numbers to be enabled.
|
||||
stream->transport_->EnablePeriodicAlrProbing(true);
|
||||
bandwidth_observer = stream->transport_->GetBandwidthObserver();
|
||||
if (stream->rtp_transport_) {
|
||||
stream->rtp_transport_->EnablePeriodicAlrProbing(true);
|
||||
bandwidth_observer = stream->rtp_transport_->GetBandwidthObserver();
|
||||
}
|
||||
}
|
||||
if (stream->rtp_transport_) {
|
||||
channel_proxy->RegisterSenderCongestionControlObjects(
|
||||
stream->rtp_transport_, bandwidth_observer);
|
||||
}
|
||||
|
||||
channel_proxy->RegisterSenderCongestionControlObjects(stream->transport_,
|
||||
bandwidth_observer);
|
||||
}
|
||||
|
||||
// MID RTP header extension.
|
||||
if ((first_time || new_ids.mid != old_ids.mid ||
|
||||
new_config.rtp.mid != old_config.rtp.mid) &&
|
||||
|
@ -312,7 +326,7 @@ void AudioSendStream::Start() {
|
|||
!webrtc::field_trial::IsEnabled("WebRTC-Audio-SendSideBwe") ||
|
||||
webrtc::field_trial::IsEnabled("WebRTC-Audio-ABWENoTWCC"))) {
|
||||
// Audio BWE is enabled.
|
||||
transport_->packet_sender()->SetAccountForAudioPackets(true);
|
||||
rtp_transport_->packet_sender()->SetAccountForAudioPackets(true);
|
||||
rtp_rtcp_module_->SetAsPartOfAllocation(true);
|
||||
ConfigureBitrateObserver(config_.min_bitrate_bps, config_.max_bitrate_bps,
|
||||
config_.bitrate_priority,
|
||||
|
@ -725,13 +739,13 @@ void AudioSendStream::ReconfigureBitrateObserver(
|
|||
!new_config.has_dscp &&
|
||||
(has_transport_sequence_number ||
|
||||
!webrtc::field_trial::IsEnabled("WebRTC-Audio-SendSideBwe"))) {
|
||||
stream->transport_->packet_sender()->SetAccountForAudioPackets(true);
|
||||
stream->rtp_transport_->packet_sender()->SetAccountForAudioPackets(true);
|
||||
stream->ConfigureBitrateObserver(
|
||||
new_config.min_bitrate_bps, new_config.max_bitrate_bps,
|
||||
new_config.bitrate_priority, has_transport_sequence_number);
|
||||
stream->rtp_rtcp_module_->SetAsPartOfAllocation(true);
|
||||
} else {
|
||||
stream->transport_->packet_sender()->SetAccountForAudioPackets(false);
|
||||
stream->rtp_transport_->packet_sender()->SetAccountForAudioPackets(false);
|
||||
stream->RemoveBitrateObserver();
|
||||
stream->rtp_rtcp_module_->SetAsPartOfAllocation(false);
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ class AudioSendStream final : public webrtc::AudioSendStream,
|
|||
const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
|
||||
rtc::TaskQueue* worker_queue,
|
||||
ProcessThread* module_process_thread,
|
||||
RtpTransportControllerSendInterface* transport,
|
||||
RtpTransportControllerSendInterface* rtp_transport,
|
||||
BitrateAllocatorInterface* bitrate_allocator,
|
||||
RtcEventLog* event_log,
|
||||
RtcpRttStats* rtcp_rtt_stats,
|
||||
|
@ -55,7 +55,7 @@ class AudioSendStream final : public webrtc::AudioSendStream,
|
|||
AudioSendStream(const webrtc::AudioSendStream::Config& config,
|
||||
const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
|
||||
rtc::TaskQueue* worker_queue,
|
||||
RtpTransportControllerSendInterface* transport,
|
||||
RtpTransportControllerSendInterface* rtp_transport,
|
||||
BitrateAllocatorInterface* bitrate_allocator,
|
||||
RtcEventLog* event_log,
|
||||
RtcpRttStats* rtcp_rtt_stats,
|
||||
|
@ -138,7 +138,7 @@ class AudioSendStream final : public webrtc::AudioSendStream,
|
|||
bool sending_ = false;
|
||||
|
||||
BitrateAllocatorInterface* const bitrate_allocator_;
|
||||
RtpTransportControllerSendInterface* const transport_;
|
||||
RtpTransportControllerSendInterface* const rtp_transport_;
|
||||
|
||||
rtc::CriticalSection packet_loss_tracker_cs_;
|
||||
TransportFeedbackPacketLossTracker packet_loss_tracker_
|
||||
|
|
|
@ -129,7 +129,7 @@ rtc::scoped_refptr<MockAudioEncoderFactory> SetupEncoderFactoryMock() {
|
|||
|
||||
struct ConfigHelper {
|
||||
ConfigHelper(bool audio_bwe_enabled, bool expect_set_encoder_call)
|
||||
: stream_config_(nullptr),
|
||||
: stream_config_(/*send_transport=*/nullptr, /*media_transport=*/nullptr),
|
||||
audio_processing_(new rtc::RefCountedObject<MockAudioProcessing>()),
|
||||
bitrate_allocator_(&limit_observer_),
|
||||
worker_queue_("ConfigHelper_worker_queue"),
|
||||
|
@ -318,7 +318,8 @@ struct ConfigHelper {
|
|||
} // namespace
|
||||
|
||||
TEST(AudioSendStreamTest, ConfigToString) {
|
||||
AudioSendStream::Config config(nullptr);
|
||||
AudioSendStream::Config config(/*send_transport=*/nullptr,
|
||||
/*media_transport=*/nullptr);
|
||||
config.rtp.ssrc = kSsrc;
|
||||
config.rtp.c_name = kCName;
|
||||
config.min_bitrate_bps = 12000;
|
||||
|
@ -335,6 +336,7 @@ TEST(AudioSendStreamTest, ConfigToString) {
|
|||
"{rtp: {ssrc: 1234, extensions: [{uri: "
|
||||
"urn:ietf:params:rtp-hdrext:ssrc-audio-level, id: 2}], nack: "
|
||||
"{rtp_history_ms: 0}, c_name: foo_name}, send_transport: null, "
|
||||
"media_transport: null, "
|
||||
"min_bitrate_bps: 12000, max_bitrate_bps: 34000, "
|
||||
"send_codec_spec: {nack_enabled: true, transport_cc_enabled: false, "
|
||||
"cng_payload_type: 42, payload_type: 103, "
|
||||
|
|
|
@ -51,12 +51,47 @@ constexpr int64_t kMinRetransmissionWindowMs = 30;
|
|||
constexpr int kVoiceEngineMinMinPlayoutDelayMs = 0;
|
||||
constexpr int kVoiceEngineMaxMinPlayoutDelayMs = 10000;
|
||||
|
||||
webrtc::FrameType WebrtcFrameTypeForMediaTransportFrameType(
|
||||
MediaTransportEncodedAudioFrame::FrameType frame_type) {
|
||||
switch (frame_type) {
|
||||
case MediaTransportEncodedAudioFrame::FrameType::kSpeech:
|
||||
return kAudioFrameSpeech;
|
||||
break;
|
||||
|
||||
case MediaTransportEncodedAudioFrame::FrameType::
|
||||
kDiscountinuousTransmission:
|
||||
return kAudioFrameCN;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
WebRtcRTPHeader CreateWebrtcRTPHeaderForMediaTransportFrame(
|
||||
const MediaTransportEncodedAudioFrame& frame,
|
||||
uint64_t channel_id) {
|
||||
webrtc::WebRtcRTPHeader webrtc_header = {};
|
||||
webrtc_header.header.payloadType = frame.payload_type();
|
||||
webrtc_header.header.payload_type_frequency = frame.sampling_rate_hz();
|
||||
webrtc_header.header.timestamp = frame.starting_sample_index();
|
||||
webrtc_header.header.sequenceNumber = frame.sequence_number();
|
||||
|
||||
webrtc_header.frameType =
|
||||
WebrtcFrameTypeForMediaTransportFrameType(frame.frame_type());
|
||||
|
||||
webrtc_header.header.ssrc = static_cast<uint32_t>(channel_id);
|
||||
|
||||
// The rest are initialized by the RTPHeader constructor.
|
||||
return webrtc_header;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
int32_t ChannelReceive::OnReceivedPayloadData(
|
||||
const uint8_t* payloadData,
|
||||
size_t payloadSize,
|
||||
const WebRtcRTPHeader* rtpHeader) {
|
||||
// We should not be receiving any RTP packets if media_transport is set.
|
||||
RTC_CHECK(!media_transport_);
|
||||
|
||||
if (!channel_state_.Get().playing) {
|
||||
// Avoid inserting into NetEQ when we are not playing. Count the
|
||||
// packet as discarded.
|
||||
|
@ -83,6 +118,27 @@ int32_t ChannelReceive::OnReceivedPayloadData(
|
|||
return 0;
|
||||
}
|
||||
|
||||
// MediaTransportAudioSinkInterface override.
|
||||
void ChannelReceive::OnData(uint64_t channel_id,
|
||||
MediaTransportEncodedAudioFrame frame) {
|
||||
RTC_CHECK(media_transport_);
|
||||
|
||||
if (!channel_state_.Get().playing) {
|
||||
// Avoid inserting into NetEQ when we are not playing. Count the
|
||||
// packet as discarded.
|
||||
return;
|
||||
}
|
||||
|
||||
// Send encoded audio frame to Decoder / NetEq.
|
||||
if (audio_coding_->IncomingPacket(
|
||||
frame.encoded_data().data(), frame.encoded_data().size(),
|
||||
CreateWebrtcRTPHeaderForMediaTransportFrame(frame, channel_id)) !=
|
||||
0) {
|
||||
RTC_DLOG(LS_ERROR) << "ChannelReceive::OnData: unable to "
|
||||
"push data to the ACM";
|
||||
}
|
||||
}
|
||||
|
||||
AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
|
||||
int sample_rate_hz,
|
||||
AudioFrame* audio_frame) {
|
||||
|
@ -200,6 +256,7 @@ int ChannelReceive::PreferredSampleRate() const {
|
|||
ChannelReceive::ChannelReceive(
|
||||
ProcessThread* module_process_thread,
|
||||
AudioDeviceModule* audio_device_module,
|
||||
MediaTransportInterface* media_transport,
|
||||
Transport* rtcp_send_transport,
|
||||
RtcEventLog* rtc_event_log,
|
||||
uint32_t remote_ssrc,
|
||||
|
@ -224,6 +281,7 @@ ChannelReceive::ChannelReceive(
|
|||
_audioDeviceModulePtr(audio_device_module),
|
||||
_outputGain(1.0f),
|
||||
associated_send_channel_(nullptr),
|
||||
media_transport_(media_transport),
|
||||
frame_decryptor_(frame_decryptor),
|
||||
crypto_options_(crypto_options) {
|
||||
RTC_DCHECK(module_process_thread);
|
||||
|
@ -279,10 +337,19 @@ void ChannelReceive::Init() {
|
|||
// be transmitted since the Transport object will then be invalid.
|
||||
// RTCP is enabled by default.
|
||||
_rtpRtcpModule->SetRTCPStatus(RtcpMode::kCompound);
|
||||
|
||||
if (media_transport_) {
|
||||
media_transport_->SetReceiveAudioSink(this);
|
||||
}
|
||||
}
|
||||
|
||||
void ChannelReceive::Terminate() {
|
||||
RTC_DCHECK(construction_thread_.CalledOnValidThread());
|
||||
|
||||
if (media_transport_) {
|
||||
media_transport_->SetReceiveAudioSink(nullptr);
|
||||
}
|
||||
|
||||
// Must be called on the same thread as Init().
|
||||
rtp_receive_statistics_->RegisterRtcpStatisticsCallback(NULL);
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "api/call/audio_sink.h"
|
||||
#include "api/call/transport.h"
|
||||
#include "api/crypto/cryptooptions.h"
|
||||
#include "api/media_transport_interface.h"
|
||||
#include "api/rtpreceiverinterface.h"
|
||||
#include "audio/audio_level.h"
|
||||
#include "call/syncable.h"
|
||||
|
@ -103,11 +104,12 @@ class ChannelReceiveState {
|
|||
State state_;
|
||||
};
|
||||
|
||||
class ChannelReceive : public RtpData {
|
||||
class ChannelReceive : public RtpData, public MediaTransportAudioSinkInterface {
|
||||
public:
|
||||
// Used for receive streams.
|
||||
ChannelReceive(ProcessThread* module_process_thread,
|
||||
AudioDeviceModule* audio_device_module,
|
||||
MediaTransportInterface* media_transport,
|
||||
Transport* rtcp_send_transport,
|
||||
RtcEventLog* rtc_event_log,
|
||||
uint32_t remote_ssrc,
|
||||
|
@ -165,6 +167,10 @@ class ChannelReceive : public RtpData {
|
|||
int GetRTPStatistics(CallReceiveStatistics& stats); // NOLINT
|
||||
void SetNACKStatus(bool enable, int maxNumberOfPackets);
|
||||
|
||||
// MediaTransportAudioSinkInterface override;
|
||||
void OnData(uint64_t channel_id,
|
||||
MediaTransportEncodedAudioFrame frame) override;
|
||||
|
||||
// From RtpData in the RTP/RTCP module
|
||||
int32_t OnReceivedPayloadData(const uint8_t* payloadData,
|
||||
size_t payloadSize,
|
||||
|
@ -259,6 +265,8 @@ class ChannelReceive : public RtpData {
|
|||
|
||||
rtc::ThreadChecker construction_thread_;
|
||||
|
||||
MediaTransportInterface* const media_transport_;
|
||||
|
||||
// E2EE Audio Frame Decryption
|
||||
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor_;
|
||||
webrtc::CryptoOptions crypto_options_;
|
||||
|
|
|
@ -48,6 +48,24 @@ namespace {
|
|||
constexpr int64_t kMaxRetransmissionWindowMs = 1000;
|
||||
constexpr int64_t kMinRetransmissionWindowMs = 30;
|
||||
|
||||
MediaTransportEncodedAudioFrame::FrameType
|
||||
MediaTransportFrameTypeForWebrtcFrameType(webrtc::FrameType frame_type) {
|
||||
switch (frame_type) {
|
||||
case kAudioFrameSpeech:
|
||||
return MediaTransportEncodedAudioFrame::FrameType::kSpeech;
|
||||
break;
|
||||
|
||||
case kAudioFrameCN:
|
||||
return MediaTransportEncodedAudioFrame::FrameType::
|
||||
kDiscontinuousTransmission;
|
||||
break;
|
||||
|
||||
default:
|
||||
RTC_CHECK(false) << "Unexpected frame type=" << frame_type;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
const int kTelephoneEventAttenuationdB = 10;
|
||||
|
@ -255,6 +273,23 @@ int32_t ChannelSend::SendData(FrameType frameType,
|
|||
size_t payloadSize,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
rtc::ArrayView<const uint8_t> payload(payloadData, payloadSize);
|
||||
|
||||
if (media_transport() != nullptr) {
|
||||
return SendMediaTransportAudio(frameType, payloadType, timeStamp, payload,
|
||||
fragmentation);
|
||||
} else {
|
||||
return SendRtpAudio(frameType, payloadType, timeStamp, payload,
|
||||
fragmentation);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t ChannelSend::SendRtpAudio(FrameType frameType,
|
||||
uint8_t payloadType,
|
||||
uint32_t timeStamp,
|
||||
rtc::ArrayView<const uint8_t> payload,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
if (_includeAudioLevelIndication) {
|
||||
// Store current audio level in the RTP/RTCP module.
|
||||
// The level will be used in combination with voice-activity state
|
||||
|
@ -269,16 +304,15 @@ int32_t ChannelSend::SendData(FrameType frameType,
|
|||
// TODO(benwright@webrtc.org) - Allocate enough to always encrypt inline.
|
||||
// Allocate a buffer to hold the maximum possible encrypted payload.
|
||||
size_t max_ciphertext_size = frame_encryptor_->GetMaxCiphertextByteSize(
|
||||
cricket::MEDIA_TYPE_AUDIO, payloadSize);
|
||||
cricket::MEDIA_TYPE_AUDIO, payload.size());
|
||||
encrypted_audio_payload.SetSize(max_ciphertext_size);
|
||||
|
||||
// Encrypt the audio payload into the buffer.
|
||||
size_t bytes_written = 0;
|
||||
int encrypt_status = frame_encryptor_->Encrypt(
|
||||
cricket::MEDIA_TYPE_AUDIO, _rtpRtcpModule->SSRC(),
|
||||
/*additional_data=*/nullptr,
|
||||
rtc::ArrayView<const uint8_t>(payloadData, payloadSize),
|
||||
encrypted_audio_payload, &bytes_written);
|
||||
/*additional_data=*/nullptr, payload, encrypted_audio_payload,
|
||||
&bytes_written);
|
||||
if (encrypt_status != 0) {
|
||||
RTC_DLOG(LS_ERROR) << "Channel::SendData() failed encrypt audio payload: "
|
||||
<< encrypt_status;
|
||||
|
@ -287,8 +321,7 @@ int32_t ChannelSend::SendData(FrameType frameType,
|
|||
// Resize the buffer to the exact number of bytes actually used.
|
||||
encrypted_audio_payload.SetSize(bytes_written);
|
||||
// Rewrite the payloadData and size to the new encrypted payload.
|
||||
payloadData = encrypted_audio_payload.data();
|
||||
payloadSize = encrypted_audio_payload.size();
|
||||
payload = encrypted_audio_payload;
|
||||
} else if (crypto_options_.sframe.require_frame_encryption) {
|
||||
RTC_DLOG(LS_ERROR) << "Channel::SendData() failed sending audio payload: "
|
||||
<< "A frame encryptor is required but one is not set.";
|
||||
|
@ -298,12 +331,13 @@ int32_t ChannelSend::SendData(FrameType frameType,
|
|||
// Push data from ACM to RTP/RTCP-module to deliver audio frame for
|
||||
// packetization.
|
||||
// This call will trigger Transport::SendPacket() from the RTP/RTCP module.
|
||||
if (!_rtpRtcpModule->SendOutgoingData(
|
||||
(FrameType&)frameType, payloadType, timeStamp,
|
||||
// Leaving the time when this frame was
|
||||
// received from the capture device as
|
||||
// undefined for voice for now.
|
||||
-1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) {
|
||||
if (!_rtpRtcpModule->SendOutgoingData((FrameType&)frameType, payloadType,
|
||||
timeStamp,
|
||||
// Leaving the time when this frame was
|
||||
// received from the capture device as
|
||||
// undefined for voice for now.
|
||||
-1, payload.data(), payload.size(),
|
||||
fragmentation, nullptr, nullptr)) {
|
||||
RTC_DLOG(LS_ERROR)
|
||||
<< "ChannelSend::SendData() failed to send data to RTP/RTCP module";
|
||||
return -1;
|
||||
|
@ -312,9 +346,68 @@ int32_t ChannelSend::SendData(FrameType frameType,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t ChannelSend::SendMediaTransportAudio(
|
||||
FrameType frameType,
|
||||
uint8_t payloadType,
|
||||
uint32_t timeStamp,
|
||||
rtc::ArrayView<const uint8_t> payload,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
// TODO(nisse): Use null _transportPtr for MediaTransport.
|
||||
// RTC_DCHECK(_transportPtr == nullptr);
|
||||
uint64_t channel_id;
|
||||
int sampling_rate_hz;
|
||||
{
|
||||
rtc::CritScope cs(&media_transport_lock_);
|
||||
if (media_transport_payload_type_ != payloadType) {
|
||||
// Payload type is being changed, media_transport_sampling_frequency_,
|
||||
// no longer current.
|
||||
return -1;
|
||||
}
|
||||
sampling_rate_hz = media_transport_sampling_frequency_;
|
||||
channel_id = media_transport_channel_id_;
|
||||
}
|
||||
const MediaTransportEncodedAudioFrame frame(
|
||||
/*sampling_rate_hz=*/sampling_rate_hz,
|
||||
|
||||
// TODO(nisse): Timestamp and sample index are the same for all supported
|
||||
// audio codecs except G722. Refactor audio coding module to only use
|
||||
// sample index, and leave translation to RTP time, when needed, for
|
||||
// RTP-specific code.
|
||||
/*starting_sample_index=*/timeStamp,
|
||||
|
||||
// Sample count isn't conveniently available from the AudioCodingModule,
|
||||
// and needs some refactoring to wire up in a good way. For now, left as
|
||||
// zero.
|
||||
/*sample_count=*/0,
|
||||
|
||||
/*sequence_number=*/media_transport_sequence_number_,
|
||||
MediaTransportFrameTypeForWebrtcFrameType(frameType), payloadType,
|
||||
std::vector<uint8_t>(payload.begin(), payload.end()));
|
||||
|
||||
// TODO(nisse): Introduce a MediaTransportSender object bound to a specific
|
||||
// channel id.
|
||||
RTCError rtc_error =
|
||||
media_transport()->SendAudioFrame(channel_id, std::move(frame));
|
||||
|
||||
if (!rtc_error.ok()) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to send frame, rtc_error="
|
||||
<< ToString(rtc_error.type()) << ", "
|
||||
<< rtc_error.message();
|
||||
return -1;
|
||||
}
|
||||
|
||||
++media_transport_sequence_number_;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool ChannelSend::SendRtp(const uint8_t* data,
|
||||
size_t len,
|
||||
const PacketOptions& options) {
|
||||
// We should not be sending RTP packets if media transport is available.
|
||||
RTC_CHECK(!media_transport());
|
||||
|
||||
rtc::CritScope cs(&_callbackCritSect);
|
||||
|
||||
if (_transportPtr == NULL) {
|
||||
|
@ -356,6 +449,7 @@ int ChannelSend::PreferredSampleRate() const {
|
|||
|
||||
ChannelSend::ChannelSend(rtc::TaskQueue* encoder_queue,
|
||||
ProcessThread* module_process_thread,
|
||||
MediaTransportInterface* media_transport,
|
||||
RtcpRttStats* rtcp_rtt_stats,
|
||||
RtcEventLog* rtc_event_log,
|
||||
FrameEncryptorInterface* frame_encryptor,
|
||||
|
@ -380,6 +474,7 @@ ChannelSend::ChannelSend(rtc::TaskQueue* encoder_queue,
|
|||
use_twcc_plr_for_ana_(
|
||||
webrtc::field_trial::FindFullName("UseTwccPlrForAna") == "Enabled"),
|
||||
encoder_queue_(encoder_queue),
|
||||
media_transport_(media_transport),
|
||||
frame_encryptor_(frame_encryptor),
|
||||
crypto_options_(crypto_options) {
|
||||
RTC_DCHECK(module_process_thread);
|
||||
|
@ -556,6 +651,13 @@ bool ChannelSend::SetEncoder(int payload_type,
|
|||
}
|
||||
}
|
||||
|
||||
if (media_transport_) {
|
||||
rtc::CritScope cs(&media_transport_lock_);
|
||||
media_transport_payload_type_ = payload_type;
|
||||
// TODO(nisse): Currently broken for G722, since timestamps passed through
|
||||
// encoder use RTP clock rather than sample count, and they differ for G722.
|
||||
media_transport_sampling_frequency_ = encoder->RtpTimestampRateHz();
|
||||
}
|
||||
audio_coding_->SetEncoder(std::move(encoder));
|
||||
return true;
|
||||
}
|
||||
|
@ -720,6 +822,10 @@ int ChannelSend::SetLocalSSRC(unsigned int ssrc) {
|
|||
RTC_DLOG(LS_ERROR) << "SetLocalSSRC() already sending";
|
||||
return -1;
|
||||
}
|
||||
if (media_transport_) {
|
||||
rtc::CritScope cs(&media_transport_lock_);
|
||||
media_transport_channel_id_ = ssrc;
|
||||
}
|
||||
_rtpRtcpModule->SetSSRC(ssrc);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "api/audio_codecs/audio_encoder.h"
|
||||
#include "api/call/transport.h"
|
||||
#include "api/crypto/cryptooptions.h"
|
||||
#include "api/media_transport_interface.h"
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "modules/audio_coding/include/audio_coding_module.h"
|
||||
#include "modules/audio_processing/rms_level.h"
|
||||
|
@ -119,6 +120,7 @@ class ChannelSend
|
|||
|
||||
ChannelSend(rtc::TaskQueue* encoder_queue,
|
||||
ProcessThread* module_process_thread,
|
||||
MediaTransportInterface* media_transport,
|
||||
RtcpRttStats* rtcp_rtt_stats,
|
||||
RtcEventLog* rtc_event_log,
|
||||
FrameEncryptorInterface* frame_encryptor,
|
||||
|
@ -251,6 +253,21 @@ class ChannelSend
|
|||
|
||||
int GetRtpTimestampRateHz() const;
|
||||
|
||||
int32_t SendRtpAudio(FrameType frameType,
|
||||
uint8_t payloadType,
|
||||
uint32_t timeStamp,
|
||||
rtc::ArrayView<const uint8_t> payload,
|
||||
const RTPFragmentationHeader* fragmentation);
|
||||
|
||||
int32_t SendMediaTransportAudio(FrameType frameType,
|
||||
uint8_t payloadType,
|
||||
uint32_t timeStamp,
|
||||
rtc::ArrayView<const uint8_t> payload,
|
||||
const RTPFragmentationHeader* fragmentation);
|
||||
|
||||
// Return media transport or nullptr if using RTP.
|
||||
MediaTransportInterface* media_transport() { return media_transport_; }
|
||||
|
||||
// Called on the encoder task queue when a new input audio frame is ready
|
||||
// for encoding.
|
||||
void ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input);
|
||||
|
@ -300,6 +317,20 @@ class ChannelSend
|
|||
bool encoder_queue_is_active_ RTC_GUARDED_BY(encoder_queue_lock_) = false;
|
||||
rtc::TaskQueue* encoder_queue_ = nullptr;
|
||||
|
||||
MediaTransportInterface* const media_transport_;
|
||||
int media_transport_sequence_number_ RTC_GUARDED_BY(encoder_queue_) = 0;
|
||||
|
||||
rtc::CriticalSection media_transport_lock_;
|
||||
// Currently set by SetLocalSSRC.
|
||||
uint64_t media_transport_channel_id_ RTC_GUARDED_BY(&media_transport_lock_) =
|
||||
0;
|
||||
// Cache payload type and sampling frequency from most recent call to
|
||||
// SetEncoder. Needed to set MediaTransportEncodedAudioFrame metadata, and
|
||||
// invalidate on encoder change.
|
||||
int media_transport_payload_type_ RTC_GUARDED_BY(&media_transport_lock_);
|
||||
int media_transport_sampling_frequency_
|
||||
RTC_GUARDED_BY(&media_transport_lock_);
|
||||
|
||||
// E2EE Audio Frame Encryption
|
||||
rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor_;
|
||||
// E2EE Frame Encryption Options
|
||||
|
|
142
audio/test/media_transport_test.cc
Normal file
142
audio/test/media_transport_test.cc
Normal file
|
@ -0,0 +1,142 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/audio_decoder_factory_template.h"
|
||||
#include "api/audio_codecs/audio_encoder_factory_template.h"
|
||||
#include "api/audio_codecs/opus/audio_decoder_opus.h"
|
||||
#include "api/audio_codecs/opus/audio_encoder_opus.h"
|
||||
#include "api/test/loopback_media_transport.h"
|
||||
#include "api/test/mock_audio_mixer.h"
|
||||
#include "audio/audio_receive_stream.h"
|
||||
#include "audio/audio_send_stream.h"
|
||||
#include "call/test/mock_bitrate_allocator.h"
|
||||
#include "logging/rtc_event_log/rtc_event_log.h"
|
||||
#include "modules/audio_device/include/test_audio_device.h"
|
||||
#include "modules/audio_mixer/audio_mixer_impl.h"
|
||||
#include "modules/audio_processing/include/mock_audio_processing.h"
|
||||
#include "modules/utility/include/process_thread.h"
|
||||
#include "rtc_base/task_queue.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
#include "test/gtest.h"
|
||||
#include "test/mock_transport.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace test {
|
||||
|
||||
namespace {
|
||||
constexpr int kPayloadTypeOpus = 17;
|
||||
constexpr int kSamplingFrequency = 48000;
|
||||
constexpr int kNumChannels = 2;
|
||||
constexpr int kWantedSamples = 3000;
|
||||
constexpr int kTestTimeoutMs = 2 * rtc::kNumMillisecsPerSec;
|
||||
|
||||
class TestRenderer : public TestAudioDeviceModule::Renderer {
|
||||
public:
|
||||
TestRenderer(int sampling_frequency, int num_channels, size_t wanted_samples)
|
||||
: sampling_frequency_(sampling_frequency),
|
||||
num_channels_(num_channels),
|
||||
wanted_samples_(wanted_samples) {}
|
||||
~TestRenderer() override = default;
|
||||
|
||||
int SamplingFrequency() const override { return sampling_frequency_; }
|
||||
int NumChannels() const override { return num_channels_; }
|
||||
|
||||
bool Render(rtc::ArrayView<const int16_t> data) override {
|
||||
if (data.size() >= wanted_samples_) {
|
||||
return false;
|
||||
}
|
||||
wanted_samples_ -= data.size();
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
const int sampling_frequency_;
|
||||
const int num_channels_;
|
||||
size_t wanted_samples_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(AudioWithMediaTransport, DeliversAudio) {
|
||||
MediaTransportPair transport_pair;
|
||||
MockTransport rtcp_send_transport;
|
||||
MockTransport send_transport;
|
||||
std::unique_ptr<RtcEventLog> null_event_log = RtcEventLog::CreateNull();
|
||||
MockBitrateAllocator bitrate_allocator;
|
||||
|
||||
rtc::scoped_refptr<TestAudioDeviceModule> audio_device =
|
||||
TestAudioDeviceModule::CreateTestAudioDeviceModule(
|
||||
TestAudioDeviceModule::CreatePulsedNoiseCapturer(
|
||||
/* max_amplitude= */ 10000, kSamplingFrequency, kNumChannels),
|
||||
absl::make_unique<TestRenderer>(kSamplingFrequency, kNumChannels,
|
||||
kWantedSamples));
|
||||
|
||||
AudioState::Config audio_config;
|
||||
audio_config.audio_mixer = AudioMixerImpl::Create();
|
||||
// TODO(nisse): Is a mock AudioProcessing enough?
|
||||
audio_config.audio_processing =
|
||||
new rtc::RefCountedObject<MockAudioProcessing>();
|
||||
audio_config.audio_device_module = audio_device;
|
||||
rtc::scoped_refptr<AudioState> audio_state = AudioState::Create(audio_config);
|
||||
|
||||
// TODO(nisse): Use some lossless codec?
|
||||
const SdpAudioFormat audio_format("opus", kSamplingFrequency, kNumChannels);
|
||||
|
||||
// Setup receive stream;
|
||||
webrtc::AudioReceiveStream::Config receive_config;
|
||||
// TODO(nisse): Update AudioReceiveStream to not require rtcp_send_transport
|
||||
// when a MediaTransport is provided.
|
||||
receive_config.rtcp_send_transport = &rtcp_send_transport;
|
||||
receive_config.media_transport = transport_pair.first();
|
||||
receive_config.decoder_map.emplace(kPayloadTypeOpus, audio_format);
|
||||
receive_config.decoder_factory =
|
||||
CreateAudioDecoderFactory<AudioDecoderOpus>();
|
||||
|
||||
std::unique_ptr<ProcessThread> receive_process_thread =
|
||||
ProcessThread::Create("audio recv thread");
|
||||
|
||||
webrtc::internal::AudioReceiveStream receive_stream(
|
||||
/*rtp_stream_receiver_controller=*/nullptr,
|
||||
/*packet_router=*/nullptr, receive_process_thread.get(), receive_config,
|
||||
audio_state, null_event_log.get());
|
||||
|
||||
// TODO(nisse): Update AudioSendStream to not require send_transport when a
|
||||
// MediaTransport is provided.
|
||||
AudioSendStream::Config send_config(&send_transport, transport_pair.second());
|
||||
send_config.send_codec_spec =
|
||||
AudioSendStream::Config::SendCodecSpec(kPayloadTypeOpus, audio_format);
|
||||
send_config.encoder_factory = CreateAudioEncoderFactory<AudioEncoderOpus>();
|
||||
rtc::TaskQueue send_tq("audio send queue");
|
||||
std::unique_ptr<ProcessThread> send_process_thread =
|
||||
ProcessThread::Create("audio send thread");
|
||||
TimeInterval life_time;
|
||||
webrtc::internal::AudioSendStream send_stream(
|
||||
send_config, audio_state, &send_tq, send_process_thread.get(),
|
||||
/*transport=*/nullptr, &bitrate_allocator, null_event_log.get(),
|
||||
/*rtcp_rtt_stats=*/nullptr, absl::optional<RtpState>(), &life_time);
|
||||
|
||||
audio_device->Init(); // Starts thread.
|
||||
audio_device->RegisterAudioCallback(audio_state->audio_transport());
|
||||
|
||||
receive_stream.Start();
|
||||
send_stream.Start();
|
||||
audio_device->StartPlayout();
|
||||
audio_device->StartRecording();
|
||||
|
||||
EXPECT_TRUE(audio_device->WaitForPlayoutEnd(kTestTimeoutMs));
|
||||
|
||||
audio_device->StopRecording();
|
||||
audio_device->StopPlayout();
|
||||
receive_stream.Stop();
|
||||
send_stream.Stop();
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace webrtc
|
|
@ -20,6 +20,7 @@
|
|||
#include "api/audio_codecs/audio_decoder_factory.h"
|
||||
#include "api/call/transport.h"
|
||||
#include "api/crypto/cryptooptions.h"
|
||||
#include "api/media_transport_interface.h"
|
||||
#include "api/rtpparameters.h"
|
||||
#include "api/rtpreceiverinterface.h"
|
||||
#include "call/rtp_config.h"
|
||||
|
@ -107,6 +108,8 @@ class AudioReceiveStream {
|
|||
|
||||
Transport* rtcp_send_transport = nullptr;
|
||||
|
||||
MediaTransportInterface* media_transport = nullptr;
|
||||
|
||||
// NetEq settings.
|
||||
size_t jitter_buffer_max_packets = 50;
|
||||
bool jitter_buffer_fast_accelerate = false;
|
||||
|
|
|
@ -21,8 +21,12 @@ namespace webrtc {
|
|||
AudioSendStream::Stats::Stats() = default;
|
||||
AudioSendStream::Stats::~Stats() = default;
|
||||
|
||||
AudioSendStream::Config::Config(Transport* send_transport,
|
||||
MediaTransportInterface* media_transport)
|
||||
: send_transport(send_transport), media_transport(media_transport) {}
|
||||
|
||||
AudioSendStream::Config::Config(Transport* send_transport)
|
||||
: send_transport(send_transport) {}
|
||||
: Config(send_transport, nullptr) {}
|
||||
|
||||
AudioSendStream::Config::~Config() = default;
|
||||
|
||||
|
@ -31,6 +35,7 @@ std::string AudioSendStream::Config::ToString() const {
|
|||
rtc::SimpleStringBuilder ss(buf);
|
||||
ss << "{rtp: " << rtp.ToString();
|
||||
ss << ", send_transport: " << (send_transport ? "(Transport)" : "null");
|
||||
ss << ", media_transport: " << (media_transport ? "(Transport)" : "null");
|
||||
ss << ", min_bitrate_bps: " << min_bitrate_bps;
|
||||
ss << ", max_bitrate_bps: " << max_bitrate_bps;
|
||||
ss << ", send_codec_spec: "
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include "api/call/transport.h"
|
||||
#include "api/crypto/cryptooptions.h"
|
||||
#include "api/crypto/frameencryptorinterface.h"
|
||||
#include "api/media_transport_interface.h"
|
||||
#include "api/rtpparameters.h"
|
||||
#include "call/rtp_config.h"
|
||||
#include "modules/audio_processing/include/audio_processing_statistics.h"
|
||||
|
@ -64,6 +65,7 @@ class AudioSendStream {
|
|||
|
||||
struct Config {
|
||||
Config() = delete;
|
||||
Config(Transport* send_transport, MediaTransportInterface* media_transport);
|
||||
explicit Config(Transport* send_transport);
|
||||
~Config();
|
||||
std::string ToString() const;
|
||||
|
@ -95,6 +97,8 @@ class AudioSendStream {
|
|||
// the entire life of the AudioSendStream and is owned by the API client.
|
||||
Transport* send_transport = nullptr;
|
||||
|
||||
MediaTransportInterface* media_transport = nullptr;
|
||||
|
||||
// Bitrate limits used for variable audio bitrate streams. Set both to -1 to
|
||||
// disable audio bitrate adaptation.
|
||||
// Note: This is still an experimental feature and not ready for real usage.
|
||||
|
|
|
@ -231,7 +231,8 @@ void CallPerfTest::TestAudioVideoSync(FecMode fec,
|
|||
CreateSendConfig(1, 0, 0, video_send_transport.get());
|
||||
CreateMatchingReceiveConfigs(receive_transport.get());
|
||||
|
||||
AudioSendStream::Config audio_send_config(audio_send_transport.get());
|
||||
AudioSendStream::Config audio_send_config(audio_send_transport.get(),
|
||||
/*media_transport=*/nullptr);
|
||||
audio_send_config.rtp.ssrc = kAudioSendSsrc;
|
||||
audio_send_config.send_codec_spec = AudioSendStream::Config::SendCodecSpec(
|
||||
kAudioSendPayloadType, {"ISAC", 16000, 1});
|
||||
|
|
|
@ -62,7 +62,8 @@ TEST(CallTest, ConstructDestruct) {
|
|||
|
||||
TEST(CallTest, CreateDestroy_AudioSendStream) {
|
||||
CallHelper call;
|
||||
AudioSendStream::Config config(nullptr);
|
||||
AudioSendStream::Config config(/*send_transport=*/nullptr,
|
||||
/*media_transport=*/nullptr);
|
||||
config.rtp.ssrc = 42;
|
||||
AudioSendStream* stream = call->CreateAudioSendStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
|
@ -84,7 +85,8 @@ TEST(CallTest, CreateDestroy_AudioReceiveStream) {
|
|||
|
||||
TEST(CallTest, CreateDestroy_AudioSendStreams) {
|
||||
CallHelper call;
|
||||
AudioSendStream::Config config(nullptr);
|
||||
AudioSendStream::Config config(/*send_transport=*/nullptr,
|
||||
/*media_transport=*/nullptr);
|
||||
std::list<AudioSendStream*> streams;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
|
||||
|
@ -142,7 +144,8 @@ TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_RecvFirst) {
|
|||
AudioReceiveStream* recv_stream = call->CreateAudioReceiveStream(recv_config);
|
||||
EXPECT_NE(recv_stream, nullptr);
|
||||
|
||||
AudioSendStream::Config send_config(nullptr);
|
||||
AudioSendStream::Config send_config(/*send_transport=*/nullptr,
|
||||
/*media_transport=*/nullptr);
|
||||
send_config.rtp.ssrc = 777;
|
||||
AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
|
||||
EXPECT_NE(send_stream, nullptr);
|
||||
|
@ -160,7 +163,8 @@ TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_RecvFirst) {
|
|||
|
||||
TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_SendFirst) {
|
||||
CallHelper call;
|
||||
AudioSendStream::Config send_config(nullptr);
|
||||
AudioSendStream::Config send_config(/*send_transport=*/nullptr,
|
||||
/*media_transport=*/nullptr);
|
||||
send_config.rtp.ssrc = 777;
|
||||
AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
|
||||
EXPECT_NE(send_stream, nullptr);
|
||||
|
@ -263,7 +267,8 @@ TEST(CallTest, RecreatingAudioStreamWithSameSsrcReusesRtpState) {
|
|||
CallHelper call;
|
||||
|
||||
auto create_stream_and_get_rtp_state = [&](uint32_t ssrc) {
|
||||
AudioSendStream::Config config(nullptr);
|
||||
AudioSendStream::Config config(/*send_transport=*/nullptr,
|
||||
/*media_transport=*/nullptr);
|
||||
config.rtp.ssrc = ssrc;
|
||||
AudioSendStream* stream = call->CreateAudioSendStream(config);
|
||||
const RtpState rtp_state =
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "absl/strings/match.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/call/audio_sink.h"
|
||||
#include "api/media_transport_interface.h"
|
||||
#include "media/base/audiosource.h"
|
||||
#include "media/base/mediaconstants.h"
|
||||
#include "media/base/streamparams.h"
|
||||
|
@ -709,12 +710,13 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
|
|||
const absl::optional<std::string>& audio_network_adaptor_config,
|
||||
webrtc::Call* call,
|
||||
webrtc::Transport* send_transport,
|
||||
webrtc::MediaTransportInterface* media_transport,
|
||||
const rtc::scoped_refptr<webrtc::AudioEncoderFactory>& encoder_factory,
|
||||
const absl::optional<webrtc::AudioCodecPairId> codec_pair_id,
|
||||
rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor,
|
||||
const webrtc::CryptoOptions& crypto_options)
|
||||
: call_(call),
|
||||
config_(send_transport),
|
||||
config_(send_transport, media_transport),
|
||||
send_side_bwe_with_overhead_(
|
||||
webrtc::field_trial::IsEnabled("WebRTC-SendSideBwe-WithOverhead")),
|
||||
max_send_bitrate_bps_(max_send_bitrate_bps),
|
||||
|
@ -1076,6 +1078,7 @@ class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream {
|
|||
const std::vector<webrtc::RtpExtension>& extensions,
|
||||
webrtc::Call* call,
|
||||
webrtc::Transport* rtcp_send_transport,
|
||||
webrtc::MediaTransportInterface* media_transport,
|
||||
const rtc::scoped_refptr<webrtc::AudioDecoderFactory>& decoder_factory,
|
||||
const std::map<int, webrtc::SdpAudioFormat>& decoder_map,
|
||||
absl::optional<webrtc::AudioCodecPairId> codec_pair_id,
|
||||
|
@ -1091,6 +1094,7 @@ class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream {
|
|||
config_.rtp.nack.rtp_history_ms = use_nack ? kNackRtpHistoryMs : 0;
|
||||
config_.rtp.extensions = extensions;
|
||||
config_.rtcp_send_transport = rtcp_send_transport;
|
||||
config_.media_transport = media_transport;
|
||||
config_.jitter_buffer_max_packets = jitter_buffer_max_packets;
|
||||
config_.jitter_buffer_fast_accelerate = jitter_buffer_fast_accelerate;
|
||||
if (!stream_ids.empty()) {
|
||||
|
@ -1792,7 +1796,8 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
|
|||
WebRtcAudioSendStream* stream = new WebRtcAudioSendStream(
|
||||
ssrc, mid_, sp.cname, sp.id, send_codec_spec_, send_rtp_extensions_,
|
||||
max_send_bitrate_bps_, audio_network_adaptor_config, call_, this,
|
||||
engine()->encoder_factory_, codec_pair_id_, nullptr, crypto_options_);
|
||||
media_transport(), engine()->encoder_factory_, codec_pair_id_, nullptr,
|
||||
crypto_options_);
|
||||
send_streams_.insert(std::make_pair(ssrc, stream));
|
||||
|
||||
// At this point the stream's local SSRC has been updated. If it is the first
|
||||
|
@ -1873,13 +1878,14 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
|
|||
|
||||
// Create a new channel for receiving audio data.
|
||||
recv_streams_.insert(std::make_pair(
|
||||
ssrc, new WebRtcAudioReceiveStream(
|
||||
ssrc, receiver_reports_ssrc_, recv_transport_cc_enabled_,
|
||||
recv_nack_enabled_, sp.stream_ids(), recv_rtp_extensions_,
|
||||
call_, this, engine()->decoder_factory_, decoder_map_,
|
||||
codec_pair_id_, engine()->audio_jitter_buffer_max_packets_,
|
||||
engine()->audio_jitter_buffer_fast_accelerate_,
|
||||
unsignaled_frame_decryptor_, crypto_options_)));
|
||||
ssrc,
|
||||
new WebRtcAudioReceiveStream(
|
||||
ssrc, receiver_reports_ssrc_, recv_transport_cc_enabled_,
|
||||
recv_nack_enabled_, sp.stream_ids(), recv_rtp_extensions_, call_,
|
||||
this, media_transport(), engine()->decoder_factory_, decoder_map_,
|
||||
codec_pair_id_, engine()->audio_jitter_buffer_max_packets_,
|
||||
engine()->audio_jitter_buffer_fast_accelerate_,
|
||||
unsignaled_frame_decryptor_, crypto_options_)));
|
||||
recv_streams_[ssrc]->SetPlayout(playout_);
|
||||
|
||||
return true;
|
||||
|
|
|
@ -176,9 +176,10 @@ class TestAudioDeviceModuleImpl
|
|||
uint32_t new_mic_level = 0;
|
||||
if (recording_buffer_.size() > 0) {
|
||||
audio_callback_->RecordedDataIsAvailable(
|
||||
recording_buffer_.data(), recording_buffer_.size(), 2,
|
||||
capturer_->NumChannels(), capturer_->SamplingFrequency(), 0, 0,
|
||||
0, false, new_mic_level);
|
||||
recording_buffer_.data(),
|
||||
recording_buffer_.size() / capturer_->NumChannels(),
|
||||
2 * capturer_->NumChannels(), capturer_->NumChannels(),
|
||||
capturer_->SamplingFrequency(), 0, 0, 0, false, new_mic_level);
|
||||
}
|
||||
if (!keep_capturing) {
|
||||
capturing_ = false;
|
||||
|
@ -191,9 +192,10 @@ class TestAudioDeviceModuleImpl
|
|||
int64_t ntp_time_ms = -1;
|
||||
const int sampling_frequency = renderer_->SamplingFrequency();
|
||||
audio_callback_->NeedMorePlayData(
|
||||
SamplesPerFrame(sampling_frequency), 2, renderer_->NumChannels(),
|
||||
sampling_frequency, playout_buffer_.data(), samples_out,
|
||||
&elapsed_time_ms, &ntp_time_ms);
|
||||
SamplesPerFrame(sampling_frequency), 2 * renderer_->NumChannels(),
|
||||
renderer_->NumChannels(), sampling_frequency,
|
||||
playout_buffer_.data(), samples_out, &elapsed_time_ms,
|
||||
&ntp_time_ms);
|
||||
const bool keep_rendering =
|
||||
renderer_->Render(rtc::ArrayView<const int16_t>(
|
||||
playout_buffer_.data(), samples_out));
|
||||
|
|
|
@ -39,7 +39,8 @@ CallTest::CallTest()
|
|||
send_event_log_(RtcEventLog::CreateNull()),
|
||||
recv_event_log_(RtcEventLog::CreateNull()),
|
||||
sender_call_transport_controller_(nullptr),
|
||||
audio_send_config_(nullptr),
|
||||
audio_send_config_(/*send_transport=*/nullptr,
|
||||
/*media_transport=*/nullptr),
|
||||
audio_send_stream_(nullptr),
|
||||
bbr_network_controller_factory_(new BbrNetworkControllerFactory()),
|
||||
fake_encoder_factory_([this]() {
|
||||
|
@ -259,7 +260,8 @@ void CallTest::CreateAudioAndFecSendConfigs(size_t num_audio_streams,
|
|||
RTC_DCHECK_LE(num_audio_streams, 1);
|
||||
RTC_DCHECK_LE(num_flexfec_streams, 1);
|
||||
if (num_audio_streams > 0) {
|
||||
AudioSendStream::Config audio_send_config(send_transport);
|
||||
AudioSendStream::Config audio_send_config(send_transport,
|
||||
/*media_transport=*/nullptr);
|
||||
audio_send_config.rtp.ssrc = kAudioSendSsrc;
|
||||
audio_send_config.send_codec_spec = AudioSendStream::Config::SendCodecSpec(
|
||||
kAudioSendPayloadType, {"opus", 48000, 2, {{"stereo", "1"}}});
|
||||
|
|
|
@ -67,7 +67,8 @@ SendAudioStream::SendAudioStream(
|
|||
rtc::scoped_refptr<AudioEncoderFactory> encoder_factory,
|
||||
Transport* send_transport)
|
||||
: sender_(sender), config_(config) {
|
||||
AudioSendStream::Config send_config(send_transport);
|
||||
AudioSendStream::Config send_config(send_transport,
|
||||
/*media_transport=*/nullptr);
|
||||
ssrc_ = sender->GetNextAudioSsrc();
|
||||
send_config.rtp.ssrc = ssrc_;
|
||||
SdpAudioFormat::Parameters sdp_params;
|
||||
|
|
|
@ -1255,7 +1255,8 @@ void VideoQualityTest::InitializeAudioDevice(Call::Config* send_call_config,
|
|||
}
|
||||
|
||||
void VideoQualityTest::SetupAudio(Transport* transport) {
|
||||
AudioSendStream::Config audio_send_config(transport);
|
||||
AudioSendStream::Config audio_send_config(transport,
|
||||
/*media_transport=*/nullptr);
|
||||
audio_send_config.rtp.ssrc = kAudioSendSsrc;
|
||||
|
||||
// Add extension to enable audio send side BWE, and allow audio bit rate
|
||||
|
|
Loading…
Reference in a new issue