Reland of Enable cpplint and fix cpplint errors in webrtc/*audio (patchset #1 id:1 of https://codereview.webrtc.org/2739143002/ )

Reason for revert:
Can reland it if backwards compatible API is kept.

Original issue's description:
> Revert of Enable cpplint and fix cpplint errors in webrtc/*audio (patchset #4 id:180001 of https://codereview.webrtc.org/2683033004/ )
>
> Reason for revert:
> The API change in audio/utility/audio_frame_operations.h caused breakage. Need to keep backward-compatible API.
>
> Original issue's description:
> > Enable cpplint and fix cpplint errors in webrtc/*audio
> >
> > Change usage accordingly throughout the codebase
> >
> > BUG=webrtc:5268
> >
> > TESTED=Fixed issues reported by:
> > find webrtc/*audio -type f -name *.cc -o -name *.h | xargs cpplint.py
> >
> > Review-Url: https://codereview.webrtc.org/2683033004
> > Cr-Commit-Position: refs/heads/master@{#17133}
> > Committed: aebe55ca6c
>
> TBR=henrika@webrtc.org,henrik.lundin@webrtc.org,kwiberg@webrtc.org
> # Skipping CQ checks because original CL landed less than 1 days ago.
> NOPRESUBMIT=true
> NOTREECHECKS=true
> NOTRY=true
> BUG=webrtc:5268
>
> Review-Url: https://codereview.webrtc.org/2739143002
> Cr-Commit-Position: refs/heads/master@{#17138}
> Committed: e47c1d3ca1

TBR=henrika@webrtc.org,henrik.lundin@webrtc.org,kwiberg@webrtc.org
# Skipping CQ checks because original CL landed less than 1 days ago.
BUG=webrtc:5268

Review-Url: https://codereview.webrtc.org/2739073003
Cr-Commit-Position: refs/heads/master@{#17144}
This commit is contained in:
oprypin 2017-03-09 06:25:06 -08:00 committed by Commit bot
parent c69385de8b
commit 67fdb80837
33 changed files with 1022 additions and 1068 deletions

View file

@ -18,6 +18,7 @@ CPPLINT_DIRS = [
'webrtc/api', 'webrtc/api',
'webrtc/audio', 'webrtc/audio',
'webrtc/call', 'webrtc/call',
'webrtc/common_audio',
'webrtc/common_video', 'webrtc/common_video',
'webrtc/examples', 'webrtc/examples',
'webrtc/modules/audio_mixer', 'webrtc/modules/audio_mixer',

View file

@ -52,7 +52,7 @@ int32_t AudioTransportProxy::RecordedDataIsAvailable(
const int32_t clockDrift, const int32_t clockDrift,
const uint32_t currentMicLevel, const uint32_t currentMicLevel,
const bool keyPressed, const bool keyPressed,
uint32_t& newMicLevel) { uint32_t& newMicLevel) { // NOLINT: to avoid changing APIs
// Pass call through to original audio transport instance. // Pass call through to original audio transport instance.
return voe_audio_transport_->RecordedDataIsAvailable( return voe_audio_transport_->RecordedDataIsAvailable(
audioSamples, nSamples, nBytesPerSample, nChannels, samplesPerSec, audioSamples, nSamples, nBytesPerSample, nChannels, samplesPerSec,

View file

@ -280,32 +280,32 @@ void AudioFrameOperations::ApplyHalfGain(AudioFrame* frame) {
} }
} }
int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) { int AudioFrameOperations::Scale(float left, float right, AudioFrame* frame) {
if (frame.num_channels_ != 2) { if (frame->num_channels_ != 2) {
return -1; return -1;
} }
for (size_t i = 0; i < frame.samples_per_channel_; i++) { for (size_t i = 0; i < frame->samples_per_channel_; i++) {
frame.data_[2 * i] = static_cast<int16_t>(left * frame.data_[2 * i]); frame->data_[2 * i] = static_cast<int16_t>(left * frame->data_[2 * i]);
frame.data_[2 * i + 1] = frame->data_[2 * i + 1] =
static_cast<int16_t>(right * frame.data_[2 * i + 1]); static_cast<int16_t>(right * frame->data_[2 * i + 1]);
} }
return 0; return 0;
} }
int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) { int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame* frame) {
int32_t temp_data = 0; int32_t temp_data = 0;
// Ensure that the output result is saturated [-32768, +32767]. // Ensure that the output result is saturated [-32768, +32767].
for (size_t i = 0; i < frame.samples_per_channel_ * frame.num_channels_; for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
i++) { i++) {
temp_data = static_cast<int32_t>(scale * frame.data_[i]); temp_data = static_cast<int32_t>(scale * frame->data_[i]);
if (temp_data < -32768) { if (temp_data < -32768) {
frame.data_[i] = -32768; frame->data_[i] = -32768;
} else if (temp_data > 32767) { } else if (temp_data > 32767) {
frame.data_[i] = 32767; frame->data_[i] = 32767;
} else { } else {
frame.data_[i] = static_cast<int16_t>(temp_data); frame->data_[i] = static_cast<int16_t>(temp_data);
} }
} }
return 0; return 0;

View file

@ -113,9 +113,19 @@ class AudioFrameOperations {
// Halve samples in |frame|. // Halve samples in |frame|.
static void ApplyHalfGain(AudioFrame* frame); static void ApplyHalfGain(AudioFrame* frame);
static int Scale(float left, float right, AudioFrame& frame); static int Scale(float left, float right, AudioFrame* frame);
static int ScaleWithSat(float scale, AudioFrame& frame); static int Scale(float left, float right, AudioFrame& frame) { // NOLINT
// TODO(oprypin): drop this method
return Scale(left, right, &frame);
}
static int ScaleWithSat(float scale, AudioFrame* frame);
static int ScaleWithSat(float scale, AudioFrame& frame) { // NOLINT
// TODO(oprypin): drop this method
return ScaleWithSat(scale, &frame);
}
}; };
} // namespace webrtc } // namespace webrtc

View file

@ -426,20 +426,20 @@ TEST_F(AudioFrameOperationsTest, MuteEndStereoShort) {
// TODO(andrew): should not allow negative scales. // TODO(andrew): should not allow negative scales.
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleFailsWithBadParameters) { TEST_F(AudioFrameOperationsTest, DISABLED_ScaleFailsWithBadParameters) {
frame_.num_channels_ = 1; frame_.num_channels_ = 1;
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, frame_)); EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, &frame_));
frame_.num_channels_ = 3; frame_.num_channels_ = 3;
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, frame_)); EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, &frame_));
frame_.num_channels_ = 2; frame_.num_channels_ = 2;
EXPECT_EQ(-1, AudioFrameOperations::Scale(-1.0, 1.0, frame_)); EXPECT_EQ(-1, AudioFrameOperations::Scale(-1.0, 1.0, &frame_));
EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, -1.0, frame_)); EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, -1.0, &frame_));
} }
// TODO(andrew): fix the wraparound bug. We should always saturate. // TODO(andrew): fix the wraparound bug. We should always saturate.
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleDoesNotWrapAround) { TEST_F(AudioFrameOperationsTest, DISABLED_ScaleDoesNotWrapAround) {
SetFrameData(4000, -4000, &frame_); SetFrameData(4000, -4000, &frame_);
EXPECT_EQ(0, AudioFrameOperations::Scale(10.0, 10.0, frame_)); EXPECT_EQ(0, AudioFrameOperations::Scale(10.0, 10.0, &frame_));
AudioFrame clipped_frame; AudioFrame clipped_frame;
clipped_frame.samples_per_channel_ = 320; clipped_frame.samples_per_channel_ = 320;
@ -450,7 +450,7 @@ TEST_F(AudioFrameOperationsTest, DISABLED_ScaleDoesNotWrapAround) {
TEST_F(AudioFrameOperationsTest, ScaleSucceeds) { TEST_F(AudioFrameOperationsTest, ScaleSucceeds) {
SetFrameData(1, -1, &frame_); SetFrameData(1, -1, &frame_);
EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, frame_)); EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, &frame_));
AudioFrame scaled_frame; AudioFrame scaled_frame;
scaled_frame.samples_per_channel_ = 320; scaled_frame.samples_per_channel_ = 320;
@ -461,13 +461,13 @@ TEST_F(AudioFrameOperationsTest, ScaleSucceeds) {
// TODO(andrew): should fail with a negative scale. // TODO(andrew): should fail with a negative scale.
TEST_F(AudioFrameOperationsTest, DISABLED_ScaleWithSatFailsWithBadParameters) { TEST_F(AudioFrameOperationsTest, DISABLED_ScaleWithSatFailsWithBadParameters) {
EXPECT_EQ(-1, AudioFrameOperations::ScaleWithSat(-1.0, frame_)); EXPECT_EQ(-1, AudioFrameOperations::ScaleWithSat(-1.0, &frame_));
} }
TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) { TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) {
frame_.num_channels_ = 1; frame_.num_channels_ = 1;
SetFrameData(4000, &frame_); SetFrameData(4000, &frame_);
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, frame_)); EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, &frame_));
AudioFrame clipped_frame; AudioFrame clipped_frame;
clipped_frame.samples_per_channel_ = 320; clipped_frame.samples_per_channel_ = 320;
@ -476,7 +476,7 @@ TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) {
VerifyFramesAreEqual(clipped_frame, frame_); VerifyFramesAreEqual(clipped_frame, frame_);
SetFrameData(-4000, &frame_); SetFrameData(-4000, &frame_);
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, frame_)); EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, &frame_));
SetFrameData(-32768, &clipped_frame); SetFrameData(-32768, &clipped_frame);
VerifyFramesAreEqual(clipped_frame, frame_); VerifyFramesAreEqual(clipped_frame, frame_);
} }
@ -484,7 +484,7 @@ TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) {
TEST_F(AudioFrameOperationsTest, ScaleWithSatSucceeds) { TEST_F(AudioFrameOperationsTest, ScaleWithSatSucceeds) {
frame_.num_channels_ = 1; frame_.num_channels_ = 1;
SetFrameData(1, &frame_); SetFrameData(1, &frame_);
EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(2.0, frame_)); EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(2.0, &frame_));
AudioFrame scaled_frame; AudioFrame scaled_frame;
scaled_frame.samples_per_channel_ = 320; scaled_frame.samples_per_channel_ = 320;

View file

@ -107,7 +107,8 @@ class ResampleConverter : public AudioConverter {
// converters must be provided. // converters must be provided.
class CompositionConverter : public AudioConverter { class CompositionConverter : public AudioConverter {
public: public:
CompositionConverter(std::vector<std::unique_ptr<AudioConverter>> converters) explicit CompositionConverter(
std::vector<std::unique_ptr<AudioConverter>> converters)
: converters_(std::move(converters)) { : converters_(std::move(converters)) {
RTC_CHECK_GE(converters_.size(), 2); RTC_CHECK_GE(converters_.size(), 2);
// We need an intermediate buffer after every converter. // We need an intermediate buffer after every converter.

View file

@ -31,7 +31,7 @@ class AudioConverter {
size_t src_frames, size_t src_frames,
size_t dst_channels, size_t dst_channels,
size_t dst_frames); size_t dst_frames);
virtual ~AudioConverter() {}; virtual ~AudioConverter() {}
// Convert |src|, containing |src_size| samples, to |dst|, having a sample // Convert |src|, containing |src_size| samples, to |dst|, having a sample
// capacity of |dst_capacity|. Both point to a series of buffers containing // capacity of |dst_capacity|. Both point to a series of buffers containing

View file

@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#ifndef WEBRTC_INTERNAL_BEAMFORMER_BLOCKER_H_ #ifndef WEBRTC_COMMON_AUDIO_BLOCKER_H_
#define WEBRTC_INTERNAL_BEAMFORMER_BLOCKER_H_ #define WEBRTC_COMMON_AUDIO_BLOCKER_H_
#include <memory> #include <memory>
@ -124,4 +124,4 @@ class Blocker {
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_INTERNAL_BEAMFORMER_BLOCKER_H_ #endif // WEBRTC_COMMON_AUDIO_BLOCKER_H_

View file

@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_CHANNEL_BUFFER_H_ #ifndef WEBRTC_COMMON_AUDIO_CHANNEL_BUFFER_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_CHANNEL_BUFFER_H_ #define WEBRTC_COMMON_AUDIO_CHANNEL_BUFFER_H_
#include <string.h> #include <string.h>
@ -183,4 +183,4 @@ class IFChannelBuffer {
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_CHANNEL_BUFFER_H_ #endif // WEBRTC_COMMON_AUDIO_CHANNEL_BUFFER_H_

View file

@ -11,6 +11,7 @@
#ifndef WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_ #ifndef WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
#define WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_ #define WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
#include <algorithm>
#include <limits> #include <limits>
#include <cstring> #include <cstring>

View file

@ -35,7 +35,7 @@ class RealFourier {
// Construct a wrapper instance for the given input order, which must be // Construct a wrapper instance for the given input order, which must be
// between 1 and kMaxFftOrder, inclusively. // between 1 and kMaxFftOrder, inclusively.
static std::unique_ptr<RealFourier> Create(int fft_order); static std::unique_ptr<RealFourier> Create(int fft_order);
virtual ~RealFourier() {}; virtual ~RealFourier() {}
// Helper to compute the smallest FFT order (a power of 2) which will contain // Helper to compute the smallest FFT order (a power of 2) which will contain
// the given input length. // the given input length.

View file

@ -13,8 +13,8 @@
* A wrapper for resampling a numerous amount of sampling combinations. * A wrapper for resampling a numerous amount of sampling combinations.
*/ */
#ifndef WEBRTC_RESAMPLER_RESAMPLER_H_ #ifndef WEBRTC_COMMON_AUDIO_RESAMPLER_INCLUDE_RESAMPLER_H_
#define WEBRTC_RESAMPLER_RESAMPLER_H_ #define WEBRTC_COMMON_AUDIO_RESAMPLER_INCLUDE_RESAMPLER_H_
#include <stddef.h> #include <stddef.h>
@ -23,10 +23,8 @@
namespace webrtc { namespace webrtc {
// All methods return 0 on success and -1 on failure. // All methods return 0 on success and -1 on failure.
class Resampler class Resampler {
{ public:
public:
Resampler(); Resampler();
Resampler(int inFreq, int outFreq, size_t num_channels); Resampler(int inFreq, int outFreq, size_t num_channels);
~Resampler(); ~Resampler();
@ -39,11 +37,10 @@ public:
// Resample samplesIn to samplesOut. // Resample samplesIn to samplesOut.
int Push(const int16_t* samplesIn, size_t lengthIn, int16_t* samplesOut, int Push(const int16_t* samplesIn, size_t lengthIn, int16_t* samplesOut,
size_t maxLen, size_t &outLen); size_t maxLen, size_t& outLen); // NOLINT: to avoid changing APIs
private: private:
enum ResamplerMode enum ResamplerMode {
{
kResamplerMode1To1, kResamplerMode1To1,
kResamplerMode1To2, kResamplerMode1To2,
kResamplerMode1To3, kResamplerMode1To3,
@ -92,4 +89,4 @@ private:
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_RESAMPLER_RESAMPLER_H_ #endif // WEBRTC_COMMON_AUDIO_RESAMPLER_INCLUDE_RESAMPLER_H_

View file

@ -53,7 +53,7 @@ void CheckExpectedBufferSizes(size_t src_length,
RTC_DCHECK_GE(dst_capacity, dst_size_10ms); RTC_DCHECK_GE(dst_capacity, dst_size_10ms);
#endif #endif
} }
} } // namespace
template <typename T> template <typename T>
PushResampler<T>::PushResampler() PushResampler<T>::PushResampler()

View file

@ -8,6 +8,7 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include <algorithm>
#include <cmath> #include <cmath>
#include <cstring> #include <cstring>
#include <memory> #include <memory>

File diff suppressed because it is too large Load diff

View file

@ -16,6 +16,7 @@
#include <math.h> #include <math.h>
#include <algorithm>
#include <memory> #include <memory>
#include "webrtc/base/timeutils.h" #include "webrtc/base/timeutils.h"

View file

@ -58,7 +58,8 @@ static void RandomStressTest(int** data_ptr) {
printf("seed=%u\n", seed); printf("seed=%u\n", seed);
srand(seed); srand(seed);
for (int i = 0; i < kNumTests; i++) { for (int i = 0; i < kNumTests; i++) {
const int buffer_size = std::max(rand() % kMaxBufferSize, 1); // rand_r is not supported on many platforms, so rand is used.
const int buffer_size = std::max(rand() % kMaxBufferSize, 1); // NOLINT
std::unique_ptr<int[]> write_data(new int[buffer_size]); std::unique_ptr<int[]> write_data(new int[buffer_size]);
std::unique_ptr<int[]> read_data(new int[buffer_size]); std::unique_ptr<int[]> read_data(new int[buffer_size]);
scoped_ring_buffer buffer(WebRtc_CreateBuffer(buffer_size, sizeof(int))); scoped_ring_buffer buffer(WebRtc_CreateBuffer(buffer_size, sizeof(int)));
@ -68,8 +69,8 @@ static void RandomStressTest(int** data_ptr) {
int write_element = 0; int write_element = 0;
int read_element = 0; int read_element = 0;
for (int j = 0; j < kNumOps; j++) { for (int j = 0; j < kNumOps; j++) {
const bool write = rand() % 2 == 0 ? true : false; const bool write = rand() % 2 == 0 ? true : false; // NOLINT
const int num_elements = rand() % buffer_size; const int num_elements = rand() % buffer_size; // NOLINT
if (write) { if (write) {
const int buffer_available = buffer_size - buffer_consumed; const int buffer_available = buffer_size - buffer_consumed;
ASSERT_EQ(static_cast<size_t>(buffer_available), ASSERT_EQ(static_cast<size_t>(buffer_available),

View file

@ -15,8 +15,8 @@
* For specific function calls, see bottom of file. * For specific function calls, see bottom of file.
*/ */
#ifndef WEBRTC_SPL_SIGNAL_PROCESSING_LIBRARY_H_ #ifndef WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SIGNAL_PROCESSING_LIBRARY_H_
#define WEBRTC_SPL_SIGNAL_PROCESSING_LIBRARY_H_ #define WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SIGNAL_PROCESSING_LIBRARY_H_
#include <string.h> #include <string.h>
#include "webrtc/common_audio/signal_processing/dot_product_with_scale.h" #include "webrtc/common_audio/signal_processing/dot_product_with_scale.h"
@ -77,7 +77,7 @@
// C + the 32 most significant bits of A * B // C + the 32 most significant bits of A * B
#define WEBRTC_SPL_SCALEDIFF32(A, B, C) \ #define WEBRTC_SPL_SCALEDIFF32(A, B, C) \
(C + (B >> 16) * A + (((uint32_t)(0x0000FFFF & B) * A) >> 16)) (C + (B >> 16) * A + (((uint32_t)(B & 0x0000FFFF) * A) >> 16))
#define WEBRTC_SPL_SAT(a, b, c) (b > a ? a : b < c ? c : b) #define WEBRTC_SPL_SAT(a, b, c) (b > a ? a : b < c ? c : b)
@ -906,7 +906,7 @@ void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif // __cplusplus #endif // __cplusplus
#endif // WEBRTC_SPL_SIGNAL_PROCESSING_LIBRARY_H_ #endif // WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SIGNAL_PROCESSING_LIBRARY_H_
// //
// WebRtcSpl_AddSatW16(...) // WebRtcSpl_AddSatW16(...)

View file

@ -12,8 +12,8 @@
// This header file includes the inline functions in // This header file includes the inline functions in
// the fix point signal processing library. // the fix point signal processing library.
#ifndef WEBRTC_SPL_SPL_INL_H_ #ifndef WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_H_
#define WEBRTC_SPL_SPL_INL_H_ #define WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_H_
#include "webrtc/system_wrappers/include/compile_assert_c.h" #include "webrtc/system_wrappers/include/compile_assert_c.h"
@ -56,7 +56,7 @@ static __inline int WebRtcSpl_CountLeadingZeros32(uint32_t n) {
// Returns the number of leading zero bits in the argument. // Returns the number of leading zero bits in the argument.
static __inline int WebRtcSpl_CountLeadingZeros64(uint64_t n) { static __inline int WebRtcSpl_CountLeadingZeros64(uint64_t n) {
#ifdef __GNUC__ #ifdef __GNUC__
COMPILE_ASSERT(sizeof(unsigned long long) == sizeof(uint64_t)); COMPILE_ASSERT(sizeof(unsigned long long) == sizeof(uint64_t)); // NOLINT
return n == 0 ? 64 : __builtin_clzll(n); return n == 0 ? 64 : __builtin_clzll(n);
#else #else
return WebRtcSpl_CountLeadingZeros64_NotBuiltin(n); return WebRtcSpl_CountLeadingZeros64_NotBuiltin(n);
@ -151,4 +151,4 @@ static __inline int32_t WebRtc_MulAccumW16(int16_t a, int16_t b, int32_t c) {
#endif // WEBRTC_ARCH_ARM_V7 #endif // WEBRTC_ARCH_ARM_V7
#endif // WEBRTC_SPL_SPL_INL_H_ #endif // WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_H_

View file

@ -13,8 +13,8 @@
* the fix point signal processing library. * the fix point signal processing library.
*/ */
#ifndef WEBRTC_SPL_SPL_INL_ARMV7_H_ #ifndef WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_ARMV7_H_
#define WEBRTC_SPL_SPL_INL_ARMV7_H_ #define WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_ARMV7_H_
/* TODO(kma): Replace some assembly code with GCC intrinsics /* TODO(kma): Replace some assembly code with GCC intrinsics
* (e.g. __builtin_clz). * (e.g. __builtin_clz).
@ -88,8 +88,7 @@ static __inline int16_t WebRtcSpl_NormW32(int32_t a) {
if (a == 0) { if (a == 0) {
return 0; return 0;
} } else if (a < 0) {
else if (a < 0) {
a ^= 0xFFFFFFFF; a ^= 0xFFFFFFFF;
} }
@ -114,8 +113,7 @@ static __inline int16_t WebRtcSpl_NormW16(int16_t a) {
if (a_32 == 0) { if (a_32 == 0) {
return 0; return 0;
} } else if (a_32 < 0) {
else if (a_32 < 0) {
a_32 ^= 0xFFFFFFFF; a_32 ^= 0xFFFFFFFF;
} }
@ -133,4 +131,4 @@ static __inline int16_t WebRtcSpl_SatW32ToW16(int32_t value32) {
return (int16_t)out; return (int16_t)out;
} }
#endif // WEBRTC_SPL_SPL_INL_ARMV7_H_ #endif // WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_ARMV7_H_

View file

@ -12,8 +12,8 @@
// This header file includes the inline functions in // This header file includes the inline functions in
// the fix point signal processing library. // the fix point signal processing library.
#ifndef WEBRTC_SPL_SPL_INL_MIPS_H_ #ifndef WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_MIPS_H_
#define WEBRTC_SPL_SPL_INL_MIPS_H_ #define WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_MIPS_H_
static __inline int32_t WEBRTC_SPL_MUL_16_16(int32_t a, static __inline int32_t WEBRTC_SPL_MUL_16_16(int32_t a,
int32_t b) { int32_t b) {
@ -33,8 +33,7 @@ static __inline int32_t WEBRTC_SPL_MUL_16_16(int32_t a,
"mul %[value32], %[a1], %[b1] \n\t" "mul %[value32], %[a1], %[b1] \n\t"
: [value32] "=r" (value32), [a1] "=&r" (a1), [b1] "=&r" (b1) : [value32] "=r" (value32), [a1] "=&r" (a1), [b1] "=&r" (b1)
: [a] "r" (a), [b] "r" (b) : [a] "r" (a), [b] "r" (b)
: "hi", "lo" : "hi", "lo");
);
return value32; return value32;
} }
@ -61,8 +60,7 @@ static __inline int32_t WEBRTC_SPL_MUL_16_32_RSFT16(int16_t a,
: [value32] "=&r" (value32), [b1] "=&r" (b1), [b2] "=&r" (b2), : [value32] "=&r" (value32), [b1] "=&r" (b1), [b2] "=&r" (b2),
[a1] "=&r" (a1) [a1] "=&r" (a1)
: [a] "r" (a), [b] "r" (b) : [a] "r" (a), [b] "r" (b)
: "hi", "lo" : "hi", "lo");
);
return value32; return value32;
} }
@ -72,8 +70,7 @@ static __inline int16_t WebRtcSpl_SatW32ToW16(int32_t value32) {
"shll_s.w %[value32], %[value32], 16 \n\t" "shll_s.w %[value32], %[value32], 16 \n\t"
"sra %[value32], %[value32], 16 \n\t" "sra %[value32], %[value32], 16 \n\t"
: [value32] "+r" (value32) : [value32] "+r" (value32)
: :);
);
int16_t out16 = (int16_t)value32; int16_t out16 = (int16_t)value32;
return out16; return out16;
} }
@ -84,8 +81,7 @@ static __inline int16_t WebRtcSpl_AddSatW16(int16_t a, int16_t b) {
__asm __volatile( __asm __volatile(
"addq_s.ph %[value32], %[a], %[b] \n\t" "addq_s.ph %[value32], %[a], %[b] \n\t"
: [value32] "=r" (value32) : [value32] "=r" (value32)
: [a] "r" (a), [b] "r" (b) : [a] "r" (a), [b] "r" (b) );
);
return (int16_t)value32; return (int16_t)value32;
} }
@ -95,8 +91,7 @@ static __inline int32_t WebRtcSpl_AddSatW32(int32_t l_var1, int32_t l_var2) {
__asm __volatile( __asm __volatile(
"addq_s.w %[l_sum], %[l_var1], %[l_var2] \n\t" "addq_s.w %[l_sum], %[l_var1], %[l_var2] \n\t"
: [l_sum] "=r" (l_sum) : [l_sum] "=r" (l_sum)
: [l_var1] "r" (l_var1), [l_var2] "r" (l_var2) : [l_var1] "r" (l_var1), [l_var2] "r" (l_var2) );
);
return l_sum; return l_sum;
} }
@ -107,8 +102,7 @@ static __inline int16_t WebRtcSpl_SubSatW16(int16_t var1, int16_t var2) {
__asm __volatile( __asm __volatile(
"subq_s.ph %[value32], %[var1], %[var2] \n\t" "subq_s.ph %[value32], %[var1], %[var2] \n\t"
: [value32] "=r" (value32) : [value32] "=r" (value32)
: [var1] "r" (var1), [var2] "r" (var2) : [var1] "r" (var1), [var2] "r" (var2) );
);
return (int16_t)value32; return (int16_t)value32;
} }
@ -119,8 +113,7 @@ static __inline int32_t WebRtcSpl_SubSatW32(int32_t l_var1, int32_t l_var2) {
__asm __volatile( __asm __volatile(
"subq_s.w %[l_diff], %[l_var1], %[l_var2] \n\t" "subq_s.w %[l_diff], %[l_var1], %[l_var2] \n\t"
: [l_diff] "=r" (l_diff) : [l_diff] "=r" (l_diff)
: [l_var1] "r" (l_var1), [l_var2] "r" (l_var2) : [l_var1] "r" (l_var1), [l_var2] "r" (l_var2) );
);
return l_diff; return l_diff;
} }
@ -134,8 +127,7 @@ static __inline int16_t WebRtcSpl_GetSizeInBits(uint32_t n) {
"clz %[bits], %[n] \n\t" "clz %[bits], %[n] \n\t"
"subu %[bits], %[i32], %[bits] \n\t" "subu %[bits], %[i32], %[bits] \n\t"
: [bits] "=&r" (bits) : [bits] "=&r" (bits)
: [n] "r" (n), [i32] "r" (i32) : [n] "r" (n), [i32] "r" (i32) );
);
return (int16_t)bits; return (int16_t)bits;
} }
@ -157,8 +149,7 @@ static __inline int16_t WebRtcSpl_NormW32(int32_t a) {
"2: \n\t" "2: \n\t"
".set pop \n\t" ".set pop \n\t"
: [zeros]"=&r"(zeros) : [zeros]"=&r"(zeros)
: [a] "r" (a) : [a] "r" (a) );
);
return (int16_t)zeros; return (int16_t)zeros;
} }
@ -169,8 +160,7 @@ static __inline int16_t WebRtcSpl_NormU32(uint32_t a) {
__asm __volatile( __asm __volatile(
"clz %[zeros], %[a] \n\t" "clz %[zeros], %[a] \n\t"
: [zeros] "=r" (zeros) : [zeros] "=r" (zeros)
: [a] "r" (a) : [a] "r" (a) );
);
return (int16_t)(zeros & 0x1f); return (int16_t)(zeros & 0x1f);
} }
@ -193,8 +183,7 @@ static __inline int16_t WebRtcSpl_NormW16(int16_t a) {
"2: \n\t" "2: \n\t"
".set pop \n\t" ".set pop \n\t"
: [zeros]"=&r"(zeros) : [zeros]"=&r"(zeros)
: [a0] "r" (a0) : [a0] "r" (a0) );
);
return (int16_t)zeros; return (int16_t)zeros;
} }
@ -217,9 +206,8 @@ static __inline int32_t WebRtc_MulAccumW16(int16_t a,
"addu %[c1], %[c], %[res] \n\t" "addu %[c1], %[c], %[res] \n\t"
: [c1] "=r" (c1), [res] "=&r" (res) : [c1] "=r" (c1), [res] "=&r" (res)
: [a] "r" (a), [b] "r" (b), [c] "r" (c) : [a] "r" (a), [b] "r" (b), [c] "r" (c)
: "hi", "lo" : "hi", "lo");
);
return (c1); return (c1);
} }
#endif // WEBRTC_SPL_SPL_INL_MIPS_H_ #endif // WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_MIPS_H_

View file

@ -60,7 +60,7 @@ TEST_F(RealFFTTest, RealAndComplexMatch) {
for (i = 0, j = 0; i < kTimeDataLength; i += 1, j += 2) { for (i = 0, j = 0; i < kTimeDataLength; i += 1, j += 2) {
complex_fft_buff[j] = kRefData[i]; complex_fft_buff[j] = kRefData[i];
complex_fft_buff[j + 1] = 0; // Insert zero's to imaginary parts. complex_fft_buff[j + 1] = 0; // Insert zero's to imaginary parts.
}; }
// Create and run real forward FFT. // Create and run real forward FFT.
RealFFT* fft = WebRtcSpl_CreateRealFFT(kOrder); RealFFT* fft = WebRtcSpl_CreateRealFFT(kOrder);

View file

@ -14,8 +14,8 @@
* *
*/ */
#ifndef WEBRTC_SPL_RESAMPLE_BY_2_INTERNAL_H_ #ifndef WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_RESAMPLE_BY_2_INTERNAL_H_
#define WEBRTC_SPL_RESAMPLE_BY_2_INTERNAL_H_ #define WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_RESAMPLE_BY_2_INTERNAL_H_
#include "webrtc/typedefs.h" #include "webrtc/typedefs.h"
@ -44,4 +44,4 @@ void WebRtcSpl_LPBy2ShortToInt(const int16_t* in, int32_t len,
void WebRtcSpl_LPBy2IntToInt(const int32_t* in, int32_t len, int32_t* out, void WebRtcSpl_LPBy2IntToInt(const int32_t* in, int32_t len, int32_t* out,
int32_t* state); int32_t* state);
#endif // WEBRTC_SPL_RESAMPLE_BY_2_INTERNAL_H_ #endif // WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_RESAMPLE_BY_2_INTERNAL_H_

View file

@ -328,7 +328,8 @@ TEST_F(SplTest, VectorOperationsTest) {
for (size_t kk = 0; kk < kVectorSize; ++kk) { for (size_t kk = 0; kk < kVectorSize; ++kk) {
EXPECT_EQ((B[kk]*3+7)>>2, bTmp16[kk]); EXPECT_EQ((B[kk]*3+7)>>2, bTmp16[kk]);
} }
WebRtcSpl_ScaleAndAddVectorsWithRound(b16, 3, b16, 2, 2, bTmp16, kVectorSize); WebRtcSpl_ScaleAndAddVectorsWithRound(b16, 3, b16, 2, 2, bTmp16,
kVectorSize);
for (size_t kk = 0; kk < kVectorSize; ++kk) { for (size_t kk = 0; kk < kVectorSize; ++kk) {
EXPECT_EQ((B[kk]*3+B[kk]*2+2)>>2, bTmp16[kk]); EXPECT_EQ((B[kk]*3+B[kk]*2+2)>>2, bTmp16[kk]);
} }
@ -355,7 +356,8 @@ TEST_F(SplTest, VectorOperationsTest) {
for (size_t kk = 0; kk < kVectorSize; ++kk) { for (size_t kk = 0; kk < kVectorSize; ++kk) {
EXPECT_EQ(B[kk] >> 1, bTmp16[kk]); EXPECT_EQ(B[kk] >> 1, bTmp16[kk]);
} }
WebRtcSpl_ReverseOrderMultArrayElements(bTmp16, a16, &b16[3], kVectorSize, 2); WebRtcSpl_ReverseOrderMultArrayElements(bTmp16, a16, &b16[3],
kVectorSize, 2);
for (size_t kk = 0; kk < kVectorSize; ++kk) { for (size_t kk = 0; kk < kVectorSize; ++kk) {
EXPECT_EQ((a16[kk]*b16[3-kk])>>2, bTmp16[kk]); EXPECT_EQ((a16[kk]*b16[3-kk])>>2, bTmp16[kk]);
} }
@ -558,7 +560,7 @@ TEST_F(SplTest, FFTTest) {
// } // }
WebRtcSpl_ComplexBitReverse(B, 3); WebRtcSpl_ComplexBitReverse(B, 3);
for (int kk = 0; kk < 16; ++kk) { for (int kk = 0; kk < 16; ++kk) {
//EXPECT_EQ(A[kk], B[kk]); // EXPECT_EQ(A[kk], B[kk]);
} }
} }

View file

@ -24,9 +24,7 @@ enum { kNumGaussians = 2 }; // Number of Gaussians per channel in the GMM.
enum { kTableSize = kNumChannels * kNumGaussians }; enum { kTableSize = kNumChannels * kNumGaussians };
enum { kMinEnergy = 10 }; // Minimum energy required to trigger audio signal. enum { kMinEnergy = 10 }; // Minimum energy required to trigger audio signal.
typedef struct VadInstT_ typedef struct VadInstT_ {
{
int vad; int vad;
int32_t downsampling_filter_states[4]; int32_t downsampling_filter_states[4];
WebRtcSpl_State48khzTo8khz state_48_to_8; WebRtcSpl_State48khzTo8khz state_48_to_8;
@ -52,7 +50,6 @@ typedef struct VadInstT_
int16_t total[3]; int16_t total[3];
int init_flag; int init_flag;
} VadInstT; } VadInstT;
// Initializes the core VAD component. The default aggressiveness mode is // Initializes the core VAD component. The default aggressiveness mode is

View file

@ -18,7 +18,8 @@ extern "C" {
#include "webrtc/common_audio/vad/vad_core.h" #include "webrtc/common_audio/vad/vad_core.h"
} }
namespace { namespace webrtc {
namespace test {
TEST_F(VadTest, InitCore) { TEST_F(VadTest, InitCore) {
// Test WebRtcVad_InitCore(). // Test WebRtcVad_InitCore().
@ -102,4 +103,5 @@ TEST_F(VadTest, CalcVad) {
free(self); free(self);
} }
} // namespace } // namespace test
} // namespace webrtc

View file

@ -19,7 +19,8 @@ extern "C" {
#include "webrtc/common_audio/vad/vad_filterbank.h" #include "webrtc/common_audio/vad/vad_filterbank.h"
} }
namespace { namespace webrtc {
namespace test {
const int kNumValidFrameLengths = 3; const int kNumValidFrameLengths = 3;
@ -89,4 +90,5 @@ TEST_F(VadTest, vad_filterbank) {
free(self); free(self);
} }
} // namespace } // namespace test
} // namespace webrtc

View file

@ -16,7 +16,8 @@ extern "C" {
#include "webrtc/common_audio/vad/vad_gmm.h" #include "webrtc/common_audio/vad/vad_gmm.h"
} }
namespace { namespace webrtc {
namespace test {
TEST_F(VadTest, vad_gmm) { TEST_F(VadTest, vad_gmm) {
int16_t delta = 0; int16_t delta = 0;
@ -40,4 +41,5 @@ TEST_F(VadTest, vad_gmm) {
EXPECT_EQ(0, WebRtcVad_GaussianProbability(105, 0, 128, &delta)); EXPECT_EQ(0, WebRtcVad_GaussianProbability(105, 0, 128, &delta));
EXPECT_EQ(13440, delta); EXPECT_EQ(13440, delta);
} }
} // namespace } // namespace test
} // namespace webrtc

View file

@ -19,7 +19,8 @@ extern "C" {
#include "webrtc/common_audio/vad/vad_sp.h" #include "webrtc/common_audio/vad/vad_sp.h"
} }
namespace { namespace webrtc {
namespace test {
TEST_F(VadTest, vad_sp) { TEST_F(VadTest, vad_sp) {
VadInstT* self = reinterpret_cast<VadInstT*>(malloc(sizeof(VadInstT))); VadInstT* self = reinterpret_cast<VadInstT*>(malloc(sizeof(VadInstT)));
@ -71,4 +72,5 @@ TEST_F(VadTest, vad_sp) {
free(self); free(self);
} }
} // namespace } // namespace test
} // namespace webrtc

View file

@ -52,7 +52,8 @@ bool VadTest::ValidRatesAndFrameLengths(int rate, size_t frame_length) {
return false; return false;
} }
namespace { namespace webrtc {
namespace test {
TEST_F(VadTest, ApiTest) { TEST_F(VadTest, ApiTest) {
// This API test runs through the APIs for all possible valid and invalid // This API test runs through the APIs for all possible valid and invalid
@ -152,4 +153,5 @@ TEST_F(VadTest, ValidRatesFrameLengths) {
// TODO(bjornv): Add a process test, run on file. // TODO(bjornv): Add a process test, run on file.
} // namespace } // namespace test
} // namespace webrtc

View file

@ -8,15 +8,16 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#ifndef WEBRTC_COMMON_AUDIO_VAD_VAD_UNITTEST_H #ifndef WEBRTC_COMMON_AUDIO_VAD_VAD_UNITTEST_H_
#define WEBRTC_COMMON_AUDIO_VAD_VAD_UNITTEST_H #define WEBRTC_COMMON_AUDIO_VAD_VAD_UNITTEST_H_
#include <stddef.h> // size_t #include <stddef.h> // size_t
#include "webrtc/test/gtest.h" #include "webrtc/test/gtest.h"
#include "webrtc/typedefs.h" #include "webrtc/typedefs.h"
namespace { namespace webrtc {
namespace test {
// Modes we support // Modes we support
const int kModes[] = { 0, 1, 2, 3 }; const int kModes[] = { 0, 1, 2, 3 };
@ -32,7 +33,8 @@ const size_t kFrameLengths[] = { 80, 120, 160, 240, 320, 480, 640, 960,
kMaxFrameLength }; kMaxFrameLength };
const size_t kFrameLengthsSize = sizeof(kFrameLengths) / sizeof(*kFrameLengths); const size_t kFrameLengthsSize = sizeof(kFrameLengths) / sizeof(*kFrameLengths);
} // namespace } // namespace test
} // namespace webrtc
class VadTest : public ::testing::Test { class VadTest : public ::testing::Test {
protected: protected:
@ -44,4 +46,4 @@ class VadTest : public ::testing::Test {
bool ValidRatesAndFrameLengths(int rate, size_t frame_length); bool ValidRatesAndFrameLengths(int rate, size_t frame_length);
}; };
#endif // WEBRTC_COMMON_AUDIO_VAD_VAD_UNITTEST_H #endif // WEBRTC_COMMON_AUDIO_VAD_VAD_UNITTEST_H_

View file

@ -675,7 +675,7 @@ MixerParticipant::AudioFrameInfo Channel::GetAudioFrameWithMuted(
// Output volume scaling // Output volume scaling
if (output_gain < 0.99f || output_gain > 1.01f) { if (output_gain < 0.99f || output_gain > 1.01f) {
// TODO(solenberg): Combine with mute state - this can cause clicks! // TODO(solenberg): Combine with mute state - this can cause clicks!
AudioFrameOperations::ScaleWithSat(output_gain, *audioFrame); AudioFrameOperations::ScaleWithSat(output_gain, audioFrame);
} }
// Mix decoded PCM output with file if file mixing is enabled // Mix decoded PCM output with file if file mixing is enabled