diff --git a/api/peerconnectioninterface.h b/api/peerconnectioninterface.h index e82fa7559d..5b9df680a0 100644 --- a/api/peerconnectioninterface.h +++ b/api/peerconnectioninterface.h @@ -788,21 +788,6 @@ class PeerConnectionInterface : public rtc::RefCountInterface { std::unique_ptr bitrate_allocation_strategy) {} - // Enable/disable playout of received audio streams. Enabled by default. Note - // that even if playout is enabled, streams will only be played out if the - // appropriate SDP is also applied. Setting |playout| to false will stop - // playout of the underlying audio device but starts a task which will poll - // for audio data every 10ms to ensure that audio processing happens and the - // audio statistics are updated. - // TODO(henrika): deprecate and remove this. - virtual void SetAudioPlayout(bool playout) {} - - // Enable/disable recording of transmitted audio streams. Enabled by default. - // Note that even if recording is enabled, streams will only be recorded if - // the appropriate SDP is also applied. - // TODO(henrika): deprecate and remove this. - virtual void SetAudioRecording(bool recording) {} - // Returns the current SignalingState. virtual SignalingState signaling_state() = 0; diff --git a/api/peerconnectionproxy.h b/api/peerconnectionproxy.h index 78fe4027db..a8ea3fa360 100644 --- a/api/peerconnectionproxy.h +++ b/api/peerconnectionproxy.h @@ -100,8 +100,6 @@ BEGIN_SIGNALING_PROXY_MAP(PeerConnection) PROXY_METHOD1(bool, RemoveIceCandidates, const std::vector&); - PROXY_METHOD1(void, SetAudioPlayout, bool) - PROXY_METHOD1(void, SetAudioRecording, bool) PROXY_METHOD1(void, RegisterUMAObserver, UMAObserver*) PROXY_METHOD1(RTCError, SetBitrate, const BitrateParameters&); PROXY_METHOD1(void, diff --git a/audio/BUILD.gn b/audio/BUILD.gn index 80545ca7fa..a9ca0d55ab 100644 --- a/audio/BUILD.gn +++ b/audio/BUILD.gn @@ -23,8 +23,6 @@ rtc_static_library("audio") { "audio_transport_proxy.cc", "audio_transport_proxy.h", "conversion.h", - "null_audio_poller.cc", - "null_audio_poller.h", "scoped_voe_interface.h", "time_interval.cc", "time_interval.h", @@ -54,7 +52,6 @@ rtc_static_library("audio") { "../modules/pacing:pacing", "../modules/remote_bitrate_estimator:remote_bitrate_estimator", "../modules/rtp_rtcp:rtp_rtcp", - "../rtc_base:rtc_base", "../rtc_base:rtc_base_approved", "../rtc_base:rtc_task_queue", "../system_wrappers", diff --git a/audio/audio_state.cc b/audio/audio_state.cc index 9b5f74f598..2a84f5c92a 100644 --- a/audio/audio_state.cc +++ b/audio/audio_state.cc @@ -12,11 +12,8 @@ #include "modules/audio_device/include/audio_device.h" #include "rtc_base/atomicops.h" -#include "rtc_base/bind.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" -#include "rtc_base/ptr_util.h" -#include "rtc_base/thread.h" #include "voice_engine/transmit_mixer.h" namespace webrtc { @@ -62,40 +59,6 @@ bool AudioState::typing_noise_detected() const { return transmit_mixer->typing_noise_detected(); } -void AudioState::SetPlayout(bool enabled) { - LOG(INFO) << "SetPlayout(" << enabled << ")"; - RTC_DCHECK(thread_checker_.CalledOnValidThread()); - const bool currently_enabled = (null_audio_poller_ == nullptr); - if (enabled == currently_enabled) { - return; - } - VoEBase* const voe = VoEBase::GetInterface(voice_engine()); - RTC_DCHECK(voe); - if (enabled) { - null_audio_poller_.reset(); - } - // Will stop/start playout of the underlying device, if necessary, and - // remember the setting for when it receives subsequent calls of - // StartPlayout. - voe->SetPlayout(enabled); - if (!enabled) { - null_audio_poller_ = - rtc::MakeUnique(&audio_transport_proxy_); - } -} - -void AudioState::SetRecording(bool enabled) { - LOG(INFO) << "SetRecording(" << enabled << ")"; - RTC_DCHECK(thread_checker_.CalledOnValidThread()); - // TODO(henrika): keep track of state as in SetPlayout(). - VoEBase* const voe = VoEBase::GetInterface(voice_engine()); - RTC_DCHECK(voe); - // Will stop/start recording of the underlying device, if necessary, and - // remember the setting for when it receives subsequent calls of - // StartPlayout. - voe->SetRecording(enabled); -} - // Reference count; implementation copied from rtc::RefCountedObject. void AudioState::AddRef() const { rtc::AtomicOps::Increment(&ref_count_); diff --git a/audio/audio_state.h b/audio/audio_state.h index 023c7b1efd..86d60b6813 100644 --- a/audio/audio_state.h +++ b/audio/audio_state.h @@ -11,10 +11,7 @@ #ifndef AUDIO_AUDIO_STATE_H_ #define AUDIO_AUDIO_STATE_H_ -#include - #include "audio/audio_transport_proxy.h" -#include "audio/null_audio_poller.h" #include "audio/scoped_voe_interface.h" #include "call/audio_state.h" #include "rtc_base/constructormagic.h" @@ -36,9 +33,6 @@ class AudioState final : public webrtc::AudioState { return config_.audio_processing.get(); } - void SetPlayout(bool enabled) override; - void SetRecording(bool enabled) override; - VoiceEngine* voice_engine(); rtc::scoped_refptr mixer(); bool typing_noise_detected() const; @@ -63,11 +57,6 @@ class AudioState final : public webrtc::AudioState { // recorded audio to the VoE AudioTransport. AudioTransportProxy audio_transport_proxy_; - // Null audio poller is used to continue polling the audio streams if audio - // playout is disabled so that audio processing still happens and the audio - // stats are still updated. - std::unique_ptr null_audio_poller_; - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioState); }; } // namespace internal diff --git a/audio/null_audio_poller.cc b/audio/null_audio_poller.cc deleted file mode 100644 index c22b3d8791..0000000000 --- a/audio/null_audio_poller.cc +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "audio/null_audio_poller.h" -#include "rtc_base/logging.h" -#include "rtc_base/thread.h" - -namespace webrtc { -namespace internal { - -namespace { - -constexpr int64_t kPollDelayMs = 10; // WebRTC uses 10ms by default - -constexpr size_t kNumChannels = 1; -constexpr uint32_t kSamplesPerSecond = 48000; // 48kHz -constexpr size_t kNumSamples = kSamplesPerSecond / 100; // 10ms of samples - -} // namespace - -NullAudioPoller::NullAudioPoller(AudioTransport* audio_transport) - : audio_transport_(audio_transport), - reschedule_at_(rtc::TimeMillis() + kPollDelayMs) { - RTC_DCHECK(audio_transport); - OnMessage(nullptr); // Start the poll loop. -} - -NullAudioPoller::~NullAudioPoller() { - RTC_DCHECK(thread_checker_.CalledOnValidThread()); - rtc::Thread::Current()->Clear(this); -} - -void NullAudioPoller::OnMessage(rtc::Message* msg) { - RTC_DCHECK(thread_checker_.CalledOnValidThread()); - - // Buffer to hold the audio samples. - int16_t buffer[kNumSamples * kNumChannels]; - // Output variables from |NeedMorePlayData|. - size_t n_samples; - int64_t elapsed_time_ms; - int64_t ntp_time_ms; - audio_transport_->NeedMorePlayData(kNumSamples, sizeof(int16_t), kNumChannels, - kSamplesPerSecond, buffer, n_samples, - &elapsed_time_ms, &ntp_time_ms); - - // Reschedule the next poll iteration. If, for some reason, the given - // reschedule time has already passed, reschedule as soon as possible. - int64_t now = rtc::TimeMillis(); - if (reschedule_at_ < now) { - reschedule_at_ = now; - } - rtc::Thread::Current()->PostAt(RTC_FROM_HERE, reschedule_at_, this, 0); - - // Loop after next will be kPollDelayMs later. - reschedule_at_ += kPollDelayMs; -} - -} // namespace internal -} // namespace webrtc diff --git a/audio/null_audio_poller.h b/audio/null_audio_poller.h deleted file mode 100644 index 27c7e991a8..0000000000 --- a/audio/null_audio_poller.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef AUDIO_NULL_AUDIO_POLLER_H_ -#define AUDIO_NULL_AUDIO_POLLER_H_ - -#include "modules/audio_device/include/audio_device_defines.h" -#include "rtc_base/messagehandler.h" -#include "rtc_base/thread_checker.h" - -namespace webrtc { -namespace internal { - -class NullAudioPoller final : public rtc::MessageHandler { - public: - explicit NullAudioPoller(AudioTransport* audio_transport); - ~NullAudioPoller(); - - protected: - void OnMessage(rtc::Message* msg) override; - - private: - const rtc::ThreadChecker thread_checker_; - AudioTransport* const audio_transport_; - int64_t reschedule_at_; -}; - -} // namespace internal -} // namespace webrtc - -#endif // AUDIO_NULL_AUDIO_POLLER_H_ diff --git a/call/audio_state.h b/call/audio_state.h index ad411d1faa..7719388c3a 100644 --- a/call/audio_state.h +++ b/call/audio_state.h @@ -44,17 +44,6 @@ class AudioState : public rtc::RefCountInterface { virtual AudioProcessing* audio_processing() = 0; - // Enable/disable playout of the audio channels. Enabled by default. - // This will stop playout of the underlying audio device but start a task - // which will poll for audio data every 10ms to ensure that audio processing - // happens and the audio stats are updated. - virtual void SetPlayout(bool enabled) = 0; - - // Enable/disable recording of the audio channels. Enabled by default. - // This will stop recording of the underlying audio device and no audio - // packets will be encoded or transmitted. - virtual void SetRecording(bool enabled) = 0; - // TODO(solenberg): Replace scoped_refptr with shared_ptr once we can use it. static rtc::scoped_refptr Create( const AudioState::Config& config); diff --git a/media/engine/fakewebrtcvoiceengine.h b/media/engine/fakewebrtcvoiceengine.h index 55d31004ca..7e8e5c2b28 100644 --- a/media/engine/fakewebrtcvoiceengine.h +++ b/media/engine/fakewebrtcvoiceengine.h @@ -99,8 +99,6 @@ class FakeWebRtcVoiceEngine : public webrtc::VoEBase { WEBRTC_STUB(StartSend, (int channel)); WEBRTC_STUB(StopPlayout, (int channel)); WEBRTC_STUB(StopSend, (int channel)); - WEBRTC_STUB(SetPlayout, (bool enable)); - WEBRTC_STUB(SetRecording, (bool enable)); size_t GetNetEqCapacity() const { auto ch = channels_.find(last_channel_); diff --git a/pc/BUILD.gn b/pc/BUILD.gn index ac78dfed84..116c1acec1 100644 --- a/pc/BUILD.gn +++ b/pc/BUILD.gn @@ -183,7 +183,6 @@ rtc_static_library("peerconnection") { "../rtc_base:rtc_base_approved", "../stats", "../system_wrappers:system_wrappers", - "../voice_engine:voice_engine", ] public_deps = [ diff --git a/pc/peerconnection.cc b/pc/peerconnection.cc index 89452c0068..03d34d0246 100644 --- a/pc/peerconnection.cc +++ b/pc/peerconnection.cc @@ -1323,30 +1323,6 @@ void PeerConnection::SetBitrateAllocationStrategy( call_->SetBitrateAllocationStrategy(std::move(bitrate_allocation_strategy)); } -void PeerConnection::SetAudioPlayout(bool playout) { - if (!worker_thread()->IsCurrent()) { - worker_thread()->Invoke( - RTC_FROM_HERE, - rtc::Bind(&PeerConnection::SetAudioPlayout, this, playout)); - return; - } - auto audio_state = - factory_->channel_manager()->media_engine()->GetAudioState(); - audio_state->SetPlayout(playout); -} - -void PeerConnection::SetAudioRecording(bool recording) { - if (!worker_thread()->IsCurrent()) { - worker_thread()->Invoke( - RTC_FROM_HERE, - rtc::Bind(&PeerConnection::SetAudioRecording, this, recording)); - return; - } - auto audio_state = - factory_->channel_manager()->media_engine()->GetAudioState(); - audio_state->SetRecording(recording); -} - std::unique_ptr PeerConnection::GetRemoteAudioSSLCertificate() { if (!session_) { diff --git a/pc/peerconnection.h b/pc/peerconnection.h index 97068b9ee1..9163c36631 100644 --- a/pc/peerconnection.h +++ b/pc/peerconnection.h @@ -143,9 +143,6 @@ class PeerConnection : public PeerConnectionInterface, std::unique_ptr bitrate_allocation_strategy) override; - void SetAudioPlayout(bool playout) override; - void SetAudioRecording(bool recording) override; - RTC_DEPRECATED bool StartRtcEventLog(rtc::PlatformFile file, int64_t max_size_bytes) override; bool StartRtcEventLog(std::unique_ptr output) override; diff --git a/pc/peerconnection_integrationtest.cc b/pc/peerconnection_integrationtest.cc index 305ed94f03..3289ccbf59 100644 --- a/pc/peerconnection_integrationtest.cc +++ b/pc/peerconnection_integrationtest.cc @@ -3564,76 +3564,6 @@ TEST_F(PeerConnectionIntegrationTest, MediaFlowsWhenCandidatesSetOnlyInSdp) { kMaxWaitForFramesMs); } -// Test that SetAudioPlayout can be used to disable audio playout from the -// start, then later enable it. This may be useful, for example, if the caller -// needs to play a local ringtone until some event occurs, after which it -// switches to playing the received audio. -TEST_F(PeerConnectionIntegrationTest, DisableAndEnableAudioPlayout) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - - // Set up audio-only call where audio playout is disabled on caller's side. - caller()->pc()->SetAudioPlayout(false); - caller()->AddAudioOnlyMediaStream(); - callee()->AddAudioOnlyMediaStream(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Pump messages for a second. - WAIT(false, 1000); - // Since audio playout is disabled, the caller shouldn't have received - // anything (at the playout level, at least). - EXPECT_EQ(0, caller()->audio_frames_received()); - // As a sanity check, make sure the callee (for which playout isn't disabled) - // did still see frames on its audio level. - ASSERT_GT(callee()->audio_frames_received(), 0); - - // Enable playout again, and ensure audio starts flowing. - caller()->pc()->SetAudioPlayout(true); - ExpectNewFramesReceivedWithWait(kDefaultExpectedAudioFrameCount, 0, - kDefaultExpectedAudioFrameCount, 0, - kMaxWaitForFramesMs); -} - -double GetAudioEnergyStat(PeerConnectionWrapper* pc) { - auto report = pc->NewGetStats(); - auto track_stats_list = - report->GetStatsOfType(); - const webrtc::RTCMediaStreamTrackStats* remote_track_stats = nullptr; - for (const auto* track_stats : track_stats_list) { - if (track_stats->remote_source.is_defined() && - *track_stats->remote_source) { - remote_track_stats = track_stats; - break; - } - } - - if (!remote_track_stats->total_audio_energy.is_defined()) { - return 0.0; - } - return *remote_track_stats->total_audio_energy; -} - -// Test that if audio playout is disabled via the SetAudioPlayout() method, then -// incoming audio is still processed and statistics are generated. -TEST_F(PeerConnectionIntegrationTest, - DisableAudioPlayoutStillGeneratesAudioStats) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - - // Set up audio-only call where playout is disabled but audio-processing is - // still active. - caller()->AddAudioOnlyMediaStream(); - callee()->AddAudioOnlyMediaStream(); - caller()->pc()->SetAudioPlayout(false); - - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Wait for the callee to receive audio stats. - EXPECT_TRUE_WAIT(GetAudioEnergyStat(caller()) > 0, kMaxWaitForFramesMs); -} - } // namespace #endif // if !defined(THREAD_SANITIZER) diff --git a/sdk/android/api/org/webrtc/PeerConnection.java b/sdk/android/api/org/webrtc/PeerConnection.java index 5dd8832f46..66e8075c97 100644 --- a/sdk/android/api/org/webrtc/PeerConnection.java +++ b/sdk/android/api/org/webrtc/PeerConnection.java @@ -363,18 +363,6 @@ public class PeerConnection { public native void setRemoteDescription(SdpObserver observer, SessionDescription sdp); - // True if remote audio should be played out. Defaults to true. - // Note that even if playout is enabled, streams will only be played out if - // the appropriate SDP is also applied. The main purpose of this API is to - // be able to control the exact time when audio playout starts. - public native void setAudioPlayout(boolean playout); - - // True if local audio shall be recorded. Defaults to true. - // Note that even if recording is enabled, streams will only be recorded if - // the appropriate SDP is also applied. The main purpose of this API is to - // be able to control the exact time when audio recording starts. - public native void setAudioRecording(boolean recording); - public boolean setConfiguration(RTCConfiguration config) { return nativeSetConfiguration(config, nativeObserver); } diff --git a/sdk/android/src/jni/pc/peerconnection_jni.cc b/sdk/android/src/jni/pc/peerconnection_jni.cc index 3c6bf7697c..a542c28e48 100644 --- a/sdk/android/src/jni/pc/peerconnection_jni.cc +++ b/sdk/android/src/jni/pc/peerconnection_jni.cc @@ -166,22 +166,6 @@ JNI_FUNCTION_DECLARATION(void, observer, JavaToNativeSessionDescription(jni, j_sdp)); } -JNI_FUNCTION_DECLARATION(void, - PeerConnection_setAudioPlayout, - JNIEnv* jni, - jobject j_pc, - jboolean playout) { - ExtractNativePC(jni, j_pc)->SetAudioPlayout(playout); -} - -JNI_FUNCTION_DECLARATION(void, - PeerConnection_setAudioRecording, - JNIEnv* jni, - jobject j_pc, - jboolean recording) { - ExtractNativePC(jni, j_pc)->SetAudioRecording(recording); -} - JNI_FUNCTION_DECLARATION(jboolean, PeerConnection_nativeSetConfiguration, JNIEnv* jni, diff --git a/voice_engine/include/voe_base.h b/voice_engine/include/voe_base.h index a62a2b4520..94ac6ac461 100644 --- a/voice_engine/include/voe_base.h +++ b/voice_engine/include/voe_base.h @@ -139,21 +139,6 @@ class WEBRTC_DLLEXPORT VoEBase { // Stops sending packets from a specified |channel|. virtual int StopSend(int channel) = 0; - // Enable or disable playout to the underlying device. Takes precedence over - // StartPlayout. Though calls to StartPlayout are remembered; if - // SetPlayout(true) is called after StartPlayout, playout will be started. - // - // By default, playout is enabled. - virtual int SetPlayout(bool enabled) = 0; - - // Enable or disable recording (which drives sending of encoded audio packtes) - // from the underlying device. Takes precedence over StartSend. Though calls - // to StartSend are remembered; if SetRecording(true) is called after - // StartSend, recording will be started. - // - // By default, recording is enabled. - virtual int SetRecording(bool enabled) = 0; - // TODO(xians): Make the interface pure virtual after libjingle // implements the interface in its FakeWebRtcVoiceEngine. virtual AudioTransport* audio_transport() { return NULL; } diff --git a/voice_engine/voe_base_impl.cc b/voice_engine/voe_base_impl.cc index 9e7a5f46f9..b14bf954bb 100644 --- a/voice_engine/voe_base_impl.cc +++ b/voice_engine/voe_base_impl.cc @@ -407,7 +407,7 @@ int32_t VoEBaseImpl::StartPlayout() { LOG_F(LS_ERROR) << "Failed to initialize playout"; return -1; } - if (playout_enabled_ && shared_->audio_device()->StartPlayout() != 0) { + if (shared_->audio_device()->StartPlayout() != 0) { LOG_F(LS_ERROR) << "Failed to start playout"; return -1; } @@ -416,10 +416,7 @@ int32_t VoEBaseImpl::StartPlayout() { } int32_t VoEBaseImpl::StopPlayout() { - if (!playout_enabled_) { - return 0; - } - // Stop audio-device playing if no channel is playing out. + // Stop audio-device playing if no channel is playing out if (shared_->NumOfPlayingChannels() == 0) { if (shared_->audio_device()->StopPlayout() != 0) { LOG(LS_ERROR) << "StopPlayout() failed to stop playout"; @@ -430,12 +427,15 @@ int32_t VoEBaseImpl::StopPlayout() { } int32_t VoEBaseImpl::StartSend() { - if (!shared_->audio_device()->Recording()) { + if (!shared_->audio_device()->RecordingIsInitialized() && + !shared_->audio_device()->Recording()) { if (shared_->audio_device()->InitRecording() != 0) { LOG_F(LS_ERROR) << "Failed to initialize recording"; return -1; } - if (recording_enabled_ && shared_->audio_device()->StartRecording() != 0) { + } + if (!shared_->audio_device()->Recording()) { + if (shared_->audio_device()->StartRecording() != 0) { LOG_F(LS_ERROR) << "Failed to start recording"; return -1; } @@ -444,11 +444,8 @@ int32_t VoEBaseImpl::StartSend() { } int32_t VoEBaseImpl::StopSend() { - if (!recording_enabled_) { - return 0; - } - // Stop audio-device recording if no channel is recording. if (shared_->NumOfSendingChannels() == 0) { + // Stop audio-device recording if no channel is recording if (shared_->audio_device()->StopRecording() != 0) { LOG(LS_ERROR) << "StopSend() failed to stop recording"; return -1; @@ -459,58 +456,6 @@ int32_t VoEBaseImpl::StopSend() { return 0; } -int32_t VoEBaseImpl::SetPlayout(bool enabled) { - LOG(INFO) << "SetPlayout(" << enabled << ")"; - if (playout_enabled_ == enabled) { - return 0; - } - playout_enabled_ = enabled; - if (shared_->NumOfPlayingChannels() == 0) { - // If there are no channels attempting to play out yet, there's nothing to - // be done; we should be in a "not playing out" state either way. - return 0; - } - int32_t ret; - if (enabled) { - ret = shared_->audio_device()->StartPlayout(); - if (ret != 0) { - LOG(LS_ERROR) << "SetPlayout(true) failed to start playout"; - } - } else { - ret = shared_->audio_device()->StopPlayout(); - if (ret != 0) { - LOG(LS_ERROR) << "SetPlayout(false) failed to stop playout"; - } - } - return ret; -} - -int32_t VoEBaseImpl::SetRecording(bool enabled) { - LOG(INFO) << "SetRecording(" << enabled << ")"; - if (recording_enabled_ == enabled) { - return 0; - } - recording_enabled_ = enabled; - if (shared_->NumOfSendingChannels() == 0) { - // If there are no channels attempting to record out yet, there's nothing to - // be done; we should be in a "not recording" state either way. - return 0; - } - int32_t ret; - if (enabled) { - ret = shared_->audio_device()->StartRecording(); - if (ret != 0) { - LOG(LS_ERROR) << "SetRecording(true) failed to start recording"; - } - } else { - ret = shared_->audio_device()->StopRecording(); - if (ret != 0) { - LOG(LS_ERROR) << "SetRecording(false) failed to stop recording"; - } - } - return ret; -} - int32_t VoEBaseImpl::TerminateInternal() { // Delete any remaining channel objects shared_->channel_manager().DestroyAllChannels(); diff --git a/voice_engine/voe_base_impl.h b/voice_engine/voe_base_impl.h index e6471243a1..a3c4c1f40f 100644 --- a/voice_engine/voe_base_impl.h +++ b/voice_engine/voe_base_impl.h @@ -45,9 +45,6 @@ class VoEBaseImpl : public VoEBase, int StopPlayout(int channel) override; int StopSend(int channel) override; - int SetPlayout(bool enabled) override; - int SetRecording(bool enabled) override; - AudioTransport* audio_transport() override { return this; } // AudioTransport @@ -106,8 +103,6 @@ class VoEBaseImpl : public VoEBase, AudioFrame audioFrame_; voe::SharedData* shared_; - bool playout_enabled_ = true; - bool recording_enabled_ = true; }; } // namespace webrtc