Record audio timestamps from iOS.

This is a step towards sending audio timestamps from Meet in iOS.
Next step is to enable sending the audio timestamps (in harmony).

After enable absolute-capture-time header extension in harmony, the receiving participants will be able to store E2E audio latency and A/V sync metrics.

Bug: webrtc:13609
Change-Id: I797c1ed0035625ed065307314ac34c932c5abe7e
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/334720
Commit-Queue: Olov Brändström <brandstrom@google.com>
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#41574}
This commit is contained in:
Olov Brändström 2024-01-18 15:05:08 +01:00 committed by WebRTC LUCI CQ
parent ed1d084d0a
commit 4c335b70e8
8 changed files with 45 additions and 8 deletions

View file

@ -223,7 +223,10 @@ if (rtc_include_tests) {
"utility:utility_tests",
"//testing/gtest",
]
absl_deps = [ "//third_party/abseil-cpp/absl/memory" ]
absl_deps = [
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("channel_receive_unittest") {
@ -247,6 +250,9 @@ if (rtc_include_tests) {
"../test:test_support",
"../test/time_controller",
]
absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
}

View file

@ -88,6 +88,7 @@ rtc_library("audio_device_buffer") {
"../../system_wrappers",
"../../system_wrappers:metrics",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("audio_device_generic") {
@ -453,6 +454,7 @@ rtc_source_set("mock_audio_device") {
"../../api:make_ref_counted",
"../../test:test_support",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
if (rtc_include_tests && !build_with_chromium) {

View file

@ -13,6 +13,7 @@
#include <cstdint>
#include <cstring>
#include "api/array_view.h"
#include "modules/audio_device/audio_device_buffer.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
@ -107,7 +108,8 @@ void FineAudioBuffer::GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
void FineAudioBuffer::DeliverRecordedData(
rtc::ArrayView<const int16_t> audio_buffer,
int record_delay_ms) {
int record_delay_ms,
absl::optional<int64_t> capture_time_ns) {
RTC_DCHECK(IsReadyForRecord());
// Always append new data and grow the buffer when needed.
record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size());
@ -118,7 +120,8 @@ void FineAudioBuffer::DeliverRecordedData(
record_channels_ * record_samples_per_channel_10ms_;
while (record_buffer_.size() >= num_elements_10ms) {
audio_device_buffer_->SetRecordedBuffer(record_buffer_.data(),
record_samples_per_channel_10ms_);
record_samples_per_channel_10ms_,
capture_time_ns);
audio_device_buffer_->SetVQEData(playout_delay_ms_, record_delay_ms);
audio_device_buffer_->DeliverRecordedData();
memmove(record_buffer_.data(), record_buffer_.data() + num_elements_10ms,

View file

@ -11,6 +11,10 @@
#ifndef MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
#define MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
#include <cstddef>
#include <cstdint>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "rtc_base/buffer.h"
@ -61,7 +65,12 @@ class FineAudioBuffer {
// 5ms of data and sends a total of 10ms to WebRTC and clears the internal
// cache. Call #3 restarts the scheme above.
void DeliverRecordedData(rtc::ArrayView<const int16_t> audio_buffer,
int record_delay_ms);
int record_delay_ms) {
DeliverRecordedData(audio_buffer, record_delay_ms, absl::nullopt);
}
void DeliverRecordedData(rtc::ArrayView<const int16_t> audio_buffer,
int record_delay_ms,
absl::optional<int64_t> capture_time_ns);
private:
// Device buffer that works with 10ms chunks of data both for playout and

View file

@ -113,7 +113,7 @@ void RunFineBufferTest(int frame_size_in_samples) {
{
InSequence s;
for (int j = 0; j < kNumberOfUpdateBufferCalls - 1; ++j) {
EXPECT_CALL(audio_device_buffer, SetRecordedBuffer(_, kSamplesPer10Ms))
EXPECT_CALL(audio_device_buffer, SetRecordedBuffer(_, kSamplesPer10Ms, _))
.WillOnce(VerifyInputBuffer(j, kChannels * kSamplesPer10Ms))
.RetiresOnSaturation();
}

View file

@ -11,6 +11,7 @@
#ifndef MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
#define MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
#include "absl/types/optional.h"
#include "modules/audio_device/audio_device_buffer.h"
#include "test/gmock.h"
@ -24,7 +25,9 @@ class MockAudioDeviceBuffer : public AudioDeviceBuffer {
MOCK_METHOD(int32_t, GetPlayoutData, (void* audioBuffer), (override));
MOCK_METHOD(int32_t,
SetRecordedBuffer,
(const void* audioBuffer, size_t nSamples),
(const void* audioBuffer,
size_t nSamples,
absl::optional<int64_t> capture_time_ns),
(override));
MOCK_METHOD(void, SetVQEData, (int playDelayMS, int recDelayMS), (override));
MOCK_METHOD(int32_t, DeliverRecordedData, (), (override));

View file

@ -299,6 +299,10 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
// Avoids running pending task after `this` is Terminated.
rtc::scoped_refptr<PendingTaskSafetyFlag> safety_ =
PendingTaskSafetyFlag::Create();
// Ratio between mach tick units and nanosecond. Used to change mach tick
// units to nanoseconds.
double machTickUnitsToNanoseconds_;
};
} // namespace ios_adm
} // namespace webrtc

View file

@ -13,6 +13,7 @@
#include "audio_device_ios.h"
#include <mach/mach_time.h>
#include <cmath>
#include "api/array_view.h"
@ -110,6 +111,9 @@ AudioDeviceIOS::AudioDeviceIOS(bool bypass_voice_processing)
thread_ = rtc::Thread::Current();
audio_session_observer_ = [[RTCNativeAudioSessionDelegateAdapter alloc] initWithObserver:this];
mach_timebase_info_data_t tinfo;
mach_timebase_info(&tinfo);
machTickUnitsToNanoseconds_ = (double)tinfo.numer / tinfo.denom;
}
AudioDeviceIOS::~AudioDeviceIOS() {
@ -376,6 +380,11 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags
record_audio_buffer_.Clear();
record_audio_buffer_.SetSize(num_frames);
// Get audio timestamp for the audio.
// The timestamp will not have NTP time epoch, but that will be addressed by
// the TimeStampAligner in AudioDeviceBuffer::SetRecordedBuffer().
SInt64 capture_timestamp_ns = time_stamp->mHostTime * machTickUnitsToNanoseconds_;
// Allocate AudioBuffers to be used as storage for the received audio.
// The AudioBufferList structure works as a placeholder for the
// AudioBuffer structure, which holds a pointer to the actual data buffer
@ -404,7 +413,8 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags
// Get a pointer to the recorded audio and send it to the WebRTC ADB.
// Use the FineAudioBuffer instance to convert between native buffer size
// and the 10ms buffer size used by WebRTC.
fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_, kFixedRecordDelayEstimate);
fine_audio_buffer_->DeliverRecordedData(
record_audio_buffer_, kFixedRecordDelayEstimate, capture_timestamp_ns);
return noErr;
}