Make muted param in GetAudio optional.

It is not necessary for the caller to use it and the mute info can be
found on AudioFrame.muted().

Bug: None
Change-Id: I458f1f2e8489c1d8f8a9078b21f889b2540bdab9
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/349940
Reviewed-by: Henrik Lundin <henrik.lundin@webrtc.org>
Commit-Queue: Jakob Ivarsson‎ <jakobi@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#42236}
This commit is contained in:
Jakob Ivarsson 2024-05-06 16:46:48 +02:00 committed by WebRTC LUCI CQ
parent 86cd7a35dc
commit 1e5f88c5be
6 changed files with 14 additions and 25 deletions

View file

@ -210,7 +210,7 @@ class NetEq {
// Returns kOK on success, or kFail in case of an error. // Returns kOK on success, or kFail in case of an error.
virtual int GetAudio( virtual int GetAudio(
AudioFrame* audio_frame, AudioFrame* audio_frame,
bool* muted, bool* muted = nullptr,
int* current_sample_rate_hz = nullptr, int* current_sample_rate_hz = nullptr,
absl::optional<Operation> action_override = absl::nullopt) = 0; absl::optional<Operation> action_override = absl::nullopt) = 0;

View file

@ -391,9 +391,7 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
event_log_->Log(std::make_unique<RtcEventAudioPlayout>(remote_ssrc_)); event_log_->Log(std::make_unique<RtcEventAudioPlayout>(remote_ssrc_));
// Get 10ms raw PCM data from the ACM (mixer limits output frequency) // Get 10ms raw PCM data from the ACM (mixer limits output frequency)
bool muted; if (acm_receiver_.GetAudio(audio_frame->sample_rate_hz_, audio_frame) == -1) {
if (acm_receiver_.GetAudio(audio_frame->sample_rate_hz_, audio_frame,
&muted) == -1) {
RTC_DLOG(LS_ERROR) RTC_DLOG(LS_ERROR)
<< "ChannelReceive::GetAudioFrame() PlayoutData10Ms() failed!"; << "ChannelReceive::GetAudioFrame() PlayoutData10Ms() failed!";
// In all likelihood, the audio in this frame is garbage. We return an // In all likelihood, the audio in this frame is garbage. We return an
@ -406,13 +404,6 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
return AudioMixer::Source::AudioFrameInfo::kError; return AudioMixer::Source::AudioFrameInfo::kError;
} }
if (muted) {
// TODO(henrik.lundin): We should be able to do better than this. But we
// will have to go through all the cases below where the audio samples may
// be used, and handle the muted case in some way.
AudioFrameOperations::Mute(audio_frame);
}
{ {
// Pass the audio buffers to an optional sink callback, before applying // Pass the audio buffers to an optional sink callback, before applying
// scaling/panning, as that applies to the mix operation. // scaling/panning, as that applies to the mix operation.
@ -509,8 +500,8 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
} }
TRACE_EVENT_END2("webrtc", "ChannelReceive::GetAudioFrameWithInfo", "gain", TRACE_EVENT_END2("webrtc", "ChannelReceive::GetAudioFrameWithInfo", "gain",
output_gain, "muted", muted); output_gain, "muted", audio_frame->muted());
return muted ? AudioMixer::Source::AudioFrameInfo::kMuted return audio_frame->muted() ? AudioMixer::Source::AudioFrameInfo::kMuted
: AudioMixer::Source::AudioFrameInfo::kNormal; : AudioMixer::Source::AudioFrameInfo::kNormal;
} }

View file

@ -151,8 +151,6 @@ int AcmReceiver::InsertPacket(const RTPHeader& rtp_header,
int AcmReceiver::GetAudio(int desired_freq_hz, int AcmReceiver::GetAudio(int desired_freq_hz,
AudioFrame* audio_frame, AudioFrame* audio_frame,
bool* muted) { bool* muted) {
RTC_DCHECK(muted);
int current_sample_rate_hz = 0; int current_sample_rate_hz = 0;
if (neteq_->GetAudio(audio_frame, muted, &current_sample_rate_hz) != if (neteq_->GetAudio(audio_frame, muted, &current_sample_rate_hz) !=
NetEq::kOK) { NetEq::kOK) {
@ -212,7 +210,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz,
sizeof(int16_t) * audio_frame->samples_per_channel_ * sizeof(int16_t) * audio_frame->samples_per_channel_ *
audio_frame->num_channels_); audio_frame->num_channels_);
call_stats_.DecodedByNetEq(audio_frame->speech_type_, *muted); call_stats_.DecodedByNetEq(audio_frame->speech_type_, audio_frame->muted());
return 0; return 0;
} }

View file

@ -94,7 +94,9 @@ class AcmReceiver {
// Return value : 0 if OK. // Return value : 0 if OK.
// -1 if NetEq returned an error. // -1 if NetEq returned an error.
// //
int GetAudio(int desired_freq_hz, AudioFrame* audio_frame, bool* muted); int GetAudio(int desired_freq_hz,
AudioFrame* audio_frame,
bool* muted = nullptr);
// Replace the current set of decoders with the specified set. // Replace the current set of decoders with the specified set.
void SetCodecs(const std::map<int, SdpAudioFormat>& codecs); void SetCodecs(const std::map<int, SdpAudioFormat>& codecs);

View file

@ -218,13 +218,15 @@ int NetEqImpl::GetAudio(AudioFrame* audio_frame,
absl::optional<Operation> action_override) { absl::optional<Operation> action_override) {
TRACE_EVENT0("webrtc", "NetEqImpl::GetAudio"); TRACE_EVENT0("webrtc", "NetEqImpl::GetAudio");
MutexLock lock(&mutex_); MutexLock lock(&mutex_);
if (GetAudioInternal(audio_frame, muted, action_override) != 0) { if (GetAudioInternal(audio_frame, action_override) != 0) {
return kFail; return kFail;
} }
RTC_DCHECK_EQ( RTC_DCHECK_EQ(
audio_frame->sample_rate_hz_, audio_frame->sample_rate_hz_,
rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100)); rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
RTC_DCHECK_EQ(*muted, audio_frame->muted()); if (muted != nullptr) {
*muted = audio_frame->muted();
}
audio_frame->speech_type_ = ToSpeechType(LastOutputType()); audio_frame->speech_type_ = ToSpeechType(LastOutputType());
last_output_sample_rate_hz_ = audio_frame->sample_rate_hz_; last_output_sample_rate_hz_ = audio_frame->sample_rate_hz_;
RTC_DCHECK(last_output_sample_rate_hz_ == 8000 || RTC_DCHECK(last_output_sample_rate_hz_ == 8000 ||
@ -747,13 +749,11 @@ bool NetEqImpl::MaybeChangePayloadType(uint8_t payload_type) {
} }
int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame,
bool* muted,
absl::optional<Operation> action_override) { absl::optional<Operation> action_override) {
PacketList packet_list; PacketList packet_list;
DtmfEvent dtmf_event; DtmfEvent dtmf_event;
Operation operation; Operation operation;
bool play_dtmf; bool play_dtmf;
*muted = false;
last_decoded_packet_infos_.clear(); last_decoded_packet_infos_.clear();
tick_timer_->Increment(); tick_timer_->Increment();
stats_->IncreaseCounter(output_size_samples_, fs_hz_); stats_->IncreaseCounter(output_size_samples_, fs_hz_);
@ -786,7 +786,6 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame,
audio_frame->num_channels_ = sync_buffer_->Channels(); audio_frame->num_channels_ = sync_buffer_->Channels();
stats_->ExpandedNoiseSamples(output_size_samples_, false); stats_->ExpandedNoiseSamples(output_size_samples_, false);
controller_->NotifyMutedState(); controller_->NotifyMutedState();
*muted = true;
return 0; return 0;
} }
int return_value = GetDecision(&operation, &packet_list, &dtmf_event, int return_value = GetDecision(&operation, &packet_list, &dtmf_event,

View file

@ -133,7 +133,7 @@ class NetEqImpl : public webrtc::NetEq {
int GetAudio( int GetAudio(
AudioFrame* audio_frame, AudioFrame* audio_frame,
bool* muted, bool* muted = nullptr,
int* current_sample_rate_hz = nullptr, int* current_sample_rate_hz = nullptr,
absl::optional<Operation> action_override = absl::nullopt) override; absl::optional<Operation> action_override = absl::nullopt) override;
@ -216,7 +216,6 @@ class NetEqImpl : public webrtc::NetEq {
// Delivers 10 ms of audio data. The data is written to `audio_frame`. // Delivers 10 ms of audio data. The data is written to `audio_frame`.
// Returns 0 on success, otherwise an error code. // Returns 0 on success, otherwise an error code.
int GetAudioInternal(AudioFrame* audio_frame, int GetAudioInternal(AudioFrame* audio_frame,
bool* muted,
absl::optional<Operation> action_override) absl::optional<Operation> action_override)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);