Make muted param in GetAudio optional.

It is not necessary for the caller to use it and the mute info can be
found on AudioFrame.muted().

Bug: None
Change-Id: I458f1f2e8489c1d8f8a9078b21f889b2540bdab9
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/349940
Reviewed-by: Henrik Lundin <henrik.lundin@webrtc.org>
Commit-Queue: Jakob Ivarsson‎ <jakobi@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#42236}
This commit is contained in:
Jakob Ivarsson 2024-05-06 16:46:48 +02:00 committed by WebRTC LUCI CQ
parent 86cd7a35dc
commit 1e5f88c5be
6 changed files with 14 additions and 25 deletions

View file

@ -210,7 +210,7 @@ class NetEq {
// Returns kOK on success, or kFail in case of an error.
virtual int GetAudio(
AudioFrame* audio_frame,
bool* muted,
bool* muted = nullptr,
int* current_sample_rate_hz = nullptr,
absl::optional<Operation> action_override = absl::nullopt) = 0;

View file

@ -391,9 +391,7 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
event_log_->Log(std::make_unique<RtcEventAudioPlayout>(remote_ssrc_));
// Get 10ms raw PCM data from the ACM (mixer limits output frequency)
bool muted;
if (acm_receiver_.GetAudio(audio_frame->sample_rate_hz_, audio_frame,
&muted) == -1) {
if (acm_receiver_.GetAudio(audio_frame->sample_rate_hz_, audio_frame) == -1) {
RTC_DLOG(LS_ERROR)
<< "ChannelReceive::GetAudioFrame() PlayoutData10Ms() failed!";
// In all likelihood, the audio in this frame is garbage. We return an
@ -406,13 +404,6 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
return AudioMixer::Source::AudioFrameInfo::kError;
}
if (muted) {
// TODO(henrik.lundin): We should be able to do better than this. But we
// will have to go through all the cases below where the audio samples may
// be used, and handle the muted case in some way.
AudioFrameOperations::Mute(audio_frame);
}
{
// Pass the audio buffers to an optional sink callback, before applying
// scaling/panning, as that applies to the mix operation.
@ -509,9 +500,9 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
}
TRACE_EVENT_END2("webrtc", "ChannelReceive::GetAudioFrameWithInfo", "gain",
output_gain, "muted", muted);
return muted ? AudioMixer::Source::AudioFrameInfo::kMuted
: AudioMixer::Source::AudioFrameInfo::kNormal;
output_gain, "muted", audio_frame->muted());
return audio_frame->muted() ? AudioMixer::Source::AudioFrameInfo::kMuted
: AudioMixer::Source::AudioFrameInfo::kNormal;
}
int ChannelReceive::PreferredSampleRate() const {

View file

@ -151,8 +151,6 @@ int AcmReceiver::InsertPacket(const RTPHeader& rtp_header,
int AcmReceiver::GetAudio(int desired_freq_hz,
AudioFrame* audio_frame,
bool* muted) {
RTC_DCHECK(muted);
int current_sample_rate_hz = 0;
if (neteq_->GetAudio(audio_frame, muted, &current_sample_rate_hz) !=
NetEq::kOK) {
@ -212,7 +210,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz,
sizeof(int16_t) * audio_frame->samples_per_channel_ *
audio_frame->num_channels_);
call_stats_.DecodedByNetEq(audio_frame->speech_type_, *muted);
call_stats_.DecodedByNetEq(audio_frame->speech_type_, audio_frame->muted());
return 0;
}

View file

@ -94,7 +94,9 @@ class AcmReceiver {
// Return value : 0 if OK.
// -1 if NetEq returned an error.
//
int GetAudio(int desired_freq_hz, AudioFrame* audio_frame, bool* muted);
int GetAudio(int desired_freq_hz,
AudioFrame* audio_frame,
bool* muted = nullptr);
// Replace the current set of decoders with the specified set.
void SetCodecs(const std::map<int, SdpAudioFormat>& codecs);

View file

@ -218,13 +218,15 @@ int NetEqImpl::GetAudio(AudioFrame* audio_frame,
absl::optional<Operation> action_override) {
TRACE_EVENT0("webrtc", "NetEqImpl::GetAudio");
MutexLock lock(&mutex_);
if (GetAudioInternal(audio_frame, muted, action_override) != 0) {
if (GetAudioInternal(audio_frame, action_override) != 0) {
return kFail;
}
RTC_DCHECK_EQ(
audio_frame->sample_rate_hz_,
rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
RTC_DCHECK_EQ(*muted, audio_frame->muted());
if (muted != nullptr) {
*muted = audio_frame->muted();
}
audio_frame->speech_type_ = ToSpeechType(LastOutputType());
last_output_sample_rate_hz_ = audio_frame->sample_rate_hz_;
RTC_DCHECK(last_output_sample_rate_hz_ == 8000 ||
@ -747,13 +749,11 @@ bool NetEqImpl::MaybeChangePayloadType(uint8_t payload_type) {
}
int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame,
bool* muted,
absl::optional<Operation> action_override) {
PacketList packet_list;
DtmfEvent dtmf_event;
Operation operation;
bool play_dtmf;
*muted = false;
last_decoded_packet_infos_.clear();
tick_timer_->Increment();
stats_->IncreaseCounter(output_size_samples_, fs_hz_);
@ -786,7 +786,6 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame,
audio_frame->num_channels_ = sync_buffer_->Channels();
stats_->ExpandedNoiseSamples(output_size_samples_, false);
controller_->NotifyMutedState();
*muted = true;
return 0;
}
int return_value = GetDecision(&operation, &packet_list, &dtmf_event,

View file

@ -133,7 +133,7 @@ class NetEqImpl : public webrtc::NetEq {
int GetAudio(
AudioFrame* audio_frame,
bool* muted,
bool* muted = nullptr,
int* current_sample_rate_hz = nullptr,
absl::optional<Operation> action_override = absl::nullopt) override;
@ -216,7 +216,6 @@ class NetEqImpl : public webrtc::NetEq {
// Delivers 10 ms of audio data. The data is written to `audio_frame`.
// Returns 0 on success, otherwise an error code.
int GetAudioInternal(AudioFrame* audio_frame,
bool* muted,
absl::optional<Operation> action_override)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);