diff --git a/audio/BUILD.gn b/audio/BUILD.gn index cb8fcee46d..7ece107407 100644 --- a/audio/BUILD.gn +++ b/audio/BUILD.gn @@ -223,7 +223,10 @@ if (rtc_include_tests) { "utility:utility_tests", "//testing/gtest", ] - absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/types:optional", + ] } rtc_library("channel_receive_unittest") { @@ -247,6 +250,9 @@ if (rtc_include_tests) { "../test:test_support", "../test/time_controller", ] - absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] } } diff --git a/modules/audio_device/BUILD.gn b/modules/audio_device/BUILD.gn index 6797da1855..2088e74dcd 100644 --- a/modules/audio_device/BUILD.gn +++ b/modules/audio_device/BUILD.gn @@ -88,6 +88,7 @@ rtc_library("audio_device_buffer") { "../../system_wrappers", "../../system_wrappers:metrics", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("audio_device_generic") { @@ -453,6 +454,7 @@ rtc_source_set("mock_audio_device") { "../../api:make_ref_counted", "../../test:test_support", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } if (rtc_include_tests && !build_with_chromium) { diff --git a/modules/audio_device/fine_audio_buffer.cc b/modules/audio_device/fine_audio_buffer.cc index 86240da196..f483b8dc79 100644 --- a/modules/audio_device/fine_audio_buffer.cc +++ b/modules/audio_device/fine_audio_buffer.cc @@ -13,6 +13,7 @@ #include #include +#include "api/array_view.h" #include "modules/audio_device/audio_device_buffer.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" @@ -107,7 +108,8 @@ void FineAudioBuffer::GetPlayoutData(rtc::ArrayView audio_buffer, void FineAudioBuffer::DeliverRecordedData( rtc::ArrayView audio_buffer, - int record_delay_ms) { + int record_delay_ms, + absl::optional capture_time_ns) { RTC_DCHECK(IsReadyForRecord()); // Always append new data and grow the buffer when needed. record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size()); @@ -118,7 +120,8 @@ void FineAudioBuffer::DeliverRecordedData( record_channels_ * record_samples_per_channel_10ms_; while (record_buffer_.size() >= num_elements_10ms) { audio_device_buffer_->SetRecordedBuffer(record_buffer_.data(), - record_samples_per_channel_10ms_); + record_samples_per_channel_10ms_, + capture_time_ns); audio_device_buffer_->SetVQEData(playout_delay_ms_, record_delay_ms); audio_device_buffer_->DeliverRecordedData(); memmove(record_buffer_.data(), record_buffer_.data() + num_elements_10ms, diff --git a/modules/audio_device/fine_audio_buffer.h b/modules/audio_device/fine_audio_buffer.h index a6c3042bb2..7af41d3b21 100644 --- a/modules/audio_device/fine_audio_buffer.h +++ b/modules/audio_device/fine_audio_buffer.h @@ -11,6 +11,10 @@ #ifndef MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_ #define MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_ +#include +#include + +#include "absl/types/optional.h" #include "api/array_view.h" #include "rtc_base/buffer.h" @@ -61,7 +65,12 @@ class FineAudioBuffer { // 5ms of data and sends a total of 10ms to WebRTC and clears the internal // cache. Call #3 restarts the scheme above. void DeliverRecordedData(rtc::ArrayView audio_buffer, - int record_delay_ms); + int record_delay_ms) { + DeliverRecordedData(audio_buffer, record_delay_ms, absl::nullopt); + } + void DeliverRecordedData(rtc::ArrayView audio_buffer, + int record_delay_ms, + absl::optional capture_time_ns); private: // Device buffer that works with 10ms chunks of data both for playout and diff --git a/modules/audio_device/fine_audio_buffer_unittest.cc b/modules/audio_device/fine_audio_buffer_unittest.cc index 36ea85f7dd..bb9fe63922 100644 --- a/modules/audio_device/fine_audio_buffer_unittest.cc +++ b/modules/audio_device/fine_audio_buffer_unittest.cc @@ -113,7 +113,7 @@ void RunFineBufferTest(int frame_size_in_samples) { { InSequence s; for (int j = 0; j < kNumberOfUpdateBufferCalls - 1; ++j) { - EXPECT_CALL(audio_device_buffer, SetRecordedBuffer(_, kSamplesPer10Ms)) + EXPECT_CALL(audio_device_buffer, SetRecordedBuffer(_, kSamplesPer10Ms, _)) .WillOnce(VerifyInputBuffer(j, kChannels * kSamplesPer10Ms)) .RetiresOnSaturation(); } diff --git a/modules/audio_device/mock_audio_device_buffer.h b/modules/audio_device/mock_audio_device_buffer.h index b0f54c20ff..0b276185da 100644 --- a/modules/audio_device/mock_audio_device_buffer.h +++ b/modules/audio_device/mock_audio_device_buffer.h @@ -11,6 +11,7 @@ #ifndef MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_ #define MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_ +#include "absl/types/optional.h" #include "modules/audio_device/audio_device_buffer.h" #include "test/gmock.h" @@ -24,7 +25,9 @@ class MockAudioDeviceBuffer : public AudioDeviceBuffer { MOCK_METHOD(int32_t, GetPlayoutData, (void* audioBuffer), (override)); MOCK_METHOD(int32_t, SetRecordedBuffer, - (const void* audioBuffer, size_t nSamples), + (const void* audioBuffer, + size_t nSamples, + absl::optional capture_time_ns), (override)); MOCK_METHOD(void, SetVQEData, (int playDelayMS, int recDelayMS), (override)); MOCK_METHOD(int32_t, DeliverRecordedData, (), (override)); diff --git a/sdk/objc/native/src/audio/audio_device_ios.h b/sdk/objc/native/src/audio/audio_device_ios.h index a86acb56fe..4ef4d0b5df 100644 --- a/sdk/objc/native/src/audio/audio_device_ios.h +++ b/sdk/objc/native/src/audio/audio_device_ios.h @@ -299,6 +299,10 @@ class AudioDeviceIOS : public AudioDeviceGeneric, // Avoids running pending task after `this` is Terminated. rtc::scoped_refptr safety_ = PendingTaskSafetyFlag::Create(); + + // Ratio between mach tick units and nanosecond. Used to change mach tick + // units to nanoseconds. + double machTickUnitsToNanoseconds_; }; } // namespace ios_adm } // namespace webrtc diff --git a/sdk/objc/native/src/audio/audio_device_ios.mm b/sdk/objc/native/src/audio/audio_device_ios.mm index dd2c11bdd2..78420ec232 100644 --- a/sdk/objc/native/src/audio/audio_device_ios.mm +++ b/sdk/objc/native/src/audio/audio_device_ios.mm @@ -13,6 +13,7 @@ #include "audio_device_ios.h" +#include #include #include "api/array_view.h" @@ -110,6 +111,9 @@ AudioDeviceIOS::AudioDeviceIOS(bool bypass_voice_processing) thread_ = rtc::Thread::Current(); audio_session_observer_ = [[RTCNativeAudioSessionDelegateAdapter alloc] initWithObserver:this]; + mach_timebase_info_data_t tinfo; + mach_timebase_info(&tinfo); + machTickUnitsToNanoseconds_ = (double)tinfo.numer / tinfo.denom; } AudioDeviceIOS::~AudioDeviceIOS() { @@ -376,6 +380,11 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags record_audio_buffer_.Clear(); record_audio_buffer_.SetSize(num_frames); + // Get audio timestamp for the audio. + // The timestamp will not have NTP time epoch, but that will be addressed by + // the TimeStampAligner in AudioDeviceBuffer::SetRecordedBuffer(). + SInt64 capture_timestamp_ns = time_stamp->mHostTime * machTickUnitsToNanoseconds_; + // Allocate AudioBuffers to be used as storage for the received audio. // The AudioBufferList structure works as a placeholder for the // AudioBuffer structure, which holds a pointer to the actual data buffer @@ -404,7 +413,8 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags // Get a pointer to the recorded audio and send it to the WebRTC ADB. // Use the FineAudioBuffer instance to convert between native buffer size // and the 10ms buffer size used by WebRTC. - fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_, kFixedRecordDelayEstimate); + fine_audio_buffer_->DeliverRecordedData( + record_audio_buffer_, kFixedRecordDelayEstimate, capture_timestamp_ns); return noErr; }