Delete modules/video_processing

Reasons:
1) It is not used by `PeerConnection` (only in tests)
2) We have no plans on using it
3) The code is functionally untouched since many years

Bug: b/249972434
Change-Id: I1d30edd34231f25d86e8495ff71f1786ba2b0a1c
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/277445
Commit-Queue: Rasmus Brandt <brandtr@webrtc.org>
Reviewed-by: Tomas Gunnarsson <tommi@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#38260}
This commit is contained in:
Rasmus Brandt 2022-09-30 10:30:13 +02:00 committed by WebRTC LUCI CQ
parent 9a92b8a546
commit 48912451d4
21 changed files with 0 additions and 1738 deletions

View file

@ -73,9 +73,6 @@
'video_coding': {
'filepath': 'modules/video_coding/.*',
},
'video_processing': {
'filepath': 'modules/video_processing/.*',
},
'bitrate_controller': {
'filepath': 'modules/bitrate_controller/.*'
},
@ -161,9 +158,6 @@
'stefan@webrtc.org',
'video-team@agora.io',
'zhengzhonghou@agora.io'],
'video_processing': ['stefan@webrtc.org',
'video-team@agora.io',
'zhengzhonghou@agora.io'],
'bitrate_controller': ['mflodman@webrtc.org',
'stefan@webrtc.org',
'zhuangzesen@agora.io'],

View file

@ -21,7 +21,6 @@ group("modules") {
"rtp_rtcp",
"utility",
"video_coding",
"video_processing",
]
if (rtc_desktop_capture_supported) {
@ -230,7 +229,6 @@ if (rtc_include_tests && !build_with_chromium) {
"rtp_rtcp:rtp_rtcp_unittests",
"video_coding:video_coding_unittests",
"video_coding/timing:timing_unittests",
"video_processing:video_processing_unittests",
]
if (rtc_desktop_capture_supported) {

View file

@ -1,105 +0,0 @@
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../webrtc.gni")
build_video_processing_sse2 = current_cpu == "x86" || current_cpu == "x64"
rtc_library("video_processing") {
visibility = [ "*" ]
sources = [
"util/denoiser_filter.cc",
"util/denoiser_filter_c.cc",
"util/denoiser_filter_c.h",
"util/noise_estimation.cc",
"util/noise_estimation.h",
"util/skin_detection.cc",
"util/skin_detection.h",
"video_denoiser.cc",
"video_denoiser.h",
]
deps = [
":denoiser_filter",
"../../api:scoped_refptr",
"../../api/video:video_frame",
"../../api/video:video_rtp_headers",
"../../common_audio",
"../../common_video",
"../../rtc_base:checks",
"../../rtc_base/system:arch",
"../../system_wrappers",
"//third_party/libyuv",
]
if (build_video_processing_sse2) {
deps += [ ":video_processing_sse2" ]
}
if (rtc_build_with_neon) {
deps += [ ":video_processing_neon" ]
}
}
rtc_source_set("denoiser_filter") {
# Target that only exists to avoid cyclic depdency errors for the SSE2 and
# Neon implementations below.
sources = [ "util/denoiser_filter.h" ]
}
if (build_video_processing_sse2) {
rtc_library("video_processing_sse2") {
sources = [
"util/denoiser_filter_sse2.cc",
"util/denoiser_filter_sse2.h",
]
deps = [
":denoiser_filter",
"../../system_wrappers",
]
if (is_posix || is_fuchsia) {
cflags = [ "-msse2" ]
}
}
}
if (rtc_build_with_neon) {
rtc_library("video_processing_neon") {
sources = [
"util/denoiser_filter_neon.cc",
"util/denoiser_filter_neon.h",
]
deps = [ ":denoiser_filter" ]
if (current_cpu != "arm64") {
suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
cflags = [ "-mfpu=neon" ]
}
}
}
if (rtc_include_tests) {
rtc_library("video_processing_unittests") {
testonly = true
sources = [ "test/denoiser_test.cc" ]
deps = [
":denoiser_filter",
":video_processing",
"../../api:scoped_refptr",
"../../api/video:video_frame",
"../../api/video:video_rtp_headers",
"../../common_video",
"../../test:fileutils",
"../../test:frame_utils",
"../../test:test_support",
"../../test:video_test_common",
]
}
}

View file

@ -1,6 +0,0 @@
include_rules = [
"+common_audio",
"+common_video",
"+system_wrappers",
"+third_party/libyuv",
]

View file

@ -1,2 +0,0 @@
stefan@webrtc.org
marpan@webrtc.org

View file

@ -1,148 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <memory>
#include <string>
#include "api/scoped_refptr.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_frame_buffer.h"
#include "modules/video_processing/util/denoiser_filter.h"
#include "modules/video_processing/util/skin_detection.h"
#include "modules/video_processing/video_denoiser.h"
#include "test/frame_utils.h"
#include "test/gtest.h"
#include "test/testsupport/file_utils.h"
namespace webrtc {
TEST(VideoDenoiserTest, Variance) {
std::unique_ptr<DenoiserFilter> df_c(DenoiserFilter::Create(false, nullptr));
std::unique_ptr<DenoiserFilter> df_sse_neon(
DenoiserFilter::Create(true, nullptr));
uint8_t src[16 * 16], dst[16 * 16];
uint32_t sum = 0, sse = 0, var;
for (int i = 0; i < 16; ++i) {
for (int j = 0; j < 16; ++j) {
src[i * 16 + j] = i * 16 + j;
}
}
// Compute the 16x8 variance of the 16x16 block.
for (int i = 0; i < 8; ++i) {
for (int j = 0; j < 16; ++j) {
sum += (i * 32 + j);
sse += (i * 32 + j) * (i * 32 + j);
}
}
var = sse - ((sum * sum) >> 7);
memset(dst, 0, 16 * 16);
EXPECT_EQ(var, df_c->Variance16x8(src, 16, dst, 16, &sse));
EXPECT_EQ(var, df_sse_neon->Variance16x8(src, 16, dst, 16, &sse));
}
TEST(VideoDenoiserTest, MbDenoise) {
std::unique_ptr<DenoiserFilter> df_c(DenoiserFilter::Create(false, nullptr));
std::unique_ptr<DenoiserFilter> df_sse_neon(
DenoiserFilter::Create(true, nullptr));
uint8_t running_src[16 * 16], src[16 * 16];
uint8_t dst[16 * 16], dst_sse_neon[16 * 16];
// Test case: `diff` <= |3 + shift_inc1|
for (int i = 0; i < 16; ++i) {
for (int j = 0; j < 16; ++j) {
running_src[i * 16 + j] = i * 11 + j;
src[i * 16 + j] = i * 11 + j + 2;
}
}
memset(dst, 0, 16 * 16);
df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
memset(dst_sse_neon, 0, 16 * 16);
df_sse_neon->MbDenoise(running_src, 16, dst_sse_neon, 16, src, 16, 0, 1);
EXPECT_EQ(0, memcmp(dst, dst_sse_neon, 16 * 16));
// Test case: `diff` >= |4 + shift_inc1|
for (int i = 0; i < 16; ++i) {
for (int j = 0; j < 16; ++j) {
running_src[i * 16 + j] = i * 11 + j;
src[i * 16 + j] = i * 11 + j + 5;
}
}
memset(dst, 0, 16 * 16);
df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
memset(dst_sse_neon, 0, 16 * 16);
df_sse_neon->MbDenoise(running_src, 16, dst_sse_neon, 16, src, 16, 0, 1);
EXPECT_EQ(0, memcmp(dst, dst_sse_neon, 16 * 16));
// Test case: `diff` >= 8
for (int i = 0; i < 16; ++i) {
for (int j = 0; j < 16; ++j) {
running_src[i * 16 + j] = i * 11 + j;
src[i * 16 + j] = i * 11 + j + 8;
}
}
memset(dst, 0, 16 * 16);
df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
memset(dst_sse_neon, 0, 16 * 16);
df_sse_neon->MbDenoise(running_src, 16, dst_sse_neon, 16, src, 16, 0, 1);
EXPECT_EQ(0, memcmp(dst, dst_sse_neon, 16 * 16));
// Test case: `diff` > 15
for (int i = 0; i < 16; ++i) {
for (int j = 0; j < 16; ++j) {
running_src[i * 16 + j] = i * 11 + j;
src[i * 16 + j] = i * 11 + j + 16;
}
}
memset(dst, 0, 16 * 16);
DenoiserDecision decision =
df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
EXPECT_EQ(COPY_BLOCK, decision);
decision = df_sse_neon->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
EXPECT_EQ(COPY_BLOCK, decision);
}
TEST(VideoDenoiserTest, Denoiser) {
const int kWidth = 352;
const int kHeight = 288;
const std::string video_file =
webrtc::test::ResourcePath("foreman_cif", "yuv");
FILE* source_file = fopen(video_file.c_str(), "rb");
ASSERT_TRUE(source_file != nullptr)
<< "Cannot open source file: " << video_file;
// Create pure C denoiser.
VideoDenoiser denoiser_c(false);
// Create SSE or NEON denoiser.
VideoDenoiser denoiser_sse_neon(true);
for (;;) {
rtc::scoped_refptr<I420BufferInterface> video_frame_buffer(
test::ReadI420Buffer(kWidth, kHeight, source_file));
if (!video_frame_buffer)
break;
rtc::scoped_refptr<I420BufferInterface> denoised_frame_c(
denoiser_c.DenoiseFrame(video_frame_buffer, false));
rtc::scoped_refptr<I420BufferInterface> denoised_frame_sse_neon(
denoiser_sse_neon.DenoiseFrame(video_frame_buffer, false));
// Denoising results should be the same for C and SSE/NEON denoiser.
ASSERT_TRUE(
test::FrameBufsEqual(denoised_frame_c, denoised_frame_sse_neon));
}
ASSERT_NE(0, feof(source_file)) << "Error reading source file";
}
} // namespace webrtc

View file

@ -1,65 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_processing/util/denoiser_filter.h"
#include "modules/video_processing/util/denoiser_filter_c.h"
#include "rtc_base/checks.h"
#include "rtc_base/system/arch.h"
#include "system_wrappers/include/cpu_features_wrapper.h"
#if defined(WEBRTC_ARCH_X86_FAMILY)
#include "modules/video_processing/util/denoiser_filter_sse2.h"
#elif defined(WEBRTC_HAS_NEON)
#include "modules/video_processing/util/denoiser_filter_neon.h"
#endif
namespace webrtc {
const int kMotionMagnitudeThreshold = 8 * 3;
const int kSumDiffThreshold = 96;
const int kSumDiffThresholdHigh = 448;
std::unique_ptr<DenoiserFilter> DenoiserFilter::Create(
bool runtime_cpu_detection,
CpuType* cpu_type) {
std::unique_ptr<DenoiserFilter> filter;
if (cpu_type != nullptr)
*cpu_type = CPU_NOT_NEON;
if (runtime_cpu_detection) {
// If we know the minimum architecture at compile time, avoid CPU detection.
#if defined(WEBRTC_ARCH_X86_FAMILY)
#if defined(__SSE2__)
filter.reset(new DenoiserFilterSSE2());
#else
// x86 CPU detection required.
if (GetCPUInfo(kSSE2)) {
filter.reset(new DenoiserFilterSSE2());
} else {
filter.reset(new DenoiserFilterC());
}
#endif
#elif defined(WEBRTC_HAS_NEON)
filter.reset(new DenoiserFilterNEON());
if (cpu_type != nullptr)
*cpu_type = CPU_NEON;
#else
filter.reset(new DenoiserFilterC());
#endif
} else {
filter.reset(new DenoiserFilterC());
}
RTC_DCHECK(filter.get() != nullptr);
return filter;
}
} // namespace webrtc

View file

@ -1,50 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_H_
#define MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_H_
#include <stdint.h>
#include <memory>
namespace webrtc {
extern const int kMotionMagnitudeThreshold;
extern const int kSumDiffThreshold;
extern const int kSumDiffThresholdHigh;
enum DenoiserDecision { COPY_BLOCK, FILTER_BLOCK };
enum CpuType { CPU_NEON, CPU_NOT_NEON };
class DenoiserFilter {
public:
static std::unique_ptr<DenoiserFilter> Create(bool runtime_cpu_detection,
CpuType* cpu_type);
virtual ~DenoiserFilter() {}
virtual uint32_t Variance16x8(const uint8_t* a,
int a_stride,
const uint8_t* b,
int b_stride,
unsigned int* sse) = 0;
virtual DenoiserDecision MbDenoise(const uint8_t* mc_running_avg_y,
int mc_avg_y_stride,
uint8_t* running_avg_y,
int avg_y_stride,
const uint8_t* sig,
int sig_stride,
uint8_t motion_magnitude,
int increase_denoising) = 0;
};
} // namespace webrtc
#endif // MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_H_

View file

@ -1,126 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_processing/util/denoiser_filter_c.h"
#include <stdlib.h>
#include <string.h>
namespace webrtc {
uint32_t DenoiserFilterC::Variance16x8(const uint8_t* a,
int a_stride,
const uint8_t* b,
int b_stride,
uint32_t* sse) {
int sum = 0;
*sse = 0;
a_stride <<= 1;
b_stride <<= 1;
for (int i = 0; i < 8; i++) {
for (int j = 0; j < 16; j++) {
const int diff = a[j] - b[j];
sum += diff;
*sse += diff * diff;
}
a += a_stride;
b += b_stride;
}
return *sse - ((static_cast<int64_t>(sum) * sum) >> 7);
}
DenoiserDecision DenoiserFilterC::MbDenoise(const uint8_t* mc_running_avg_y,
int mc_avg_y_stride,
uint8_t* running_avg_y,
int avg_y_stride,
const uint8_t* sig,
int sig_stride,
uint8_t motion_magnitude,
int increase_denoising) {
int sum_diff_thresh = 0;
int sum_diff = 0;
int adj_val[3] = {3, 4, 6};
int shift_inc1 = 0;
int shift_inc2 = 1;
int col_sum[16] = {0};
if (motion_magnitude <= kMotionMagnitudeThreshold) {
if (increase_denoising) {
shift_inc1 = 1;
shift_inc2 = 2;
}
adj_val[0] += shift_inc2;
adj_val[1] += shift_inc2;
adj_val[2] += shift_inc2;
}
for (int r = 0; r < 16; ++r) {
for (int c = 0; c < 16; ++c) {
int diff = 0;
int adjustment = 0;
int absdiff = 0;
diff = mc_running_avg_y[c] - sig[c];
absdiff = abs(diff);
// When `diff` <= |3 + shift_inc1|, use pixel value from
// last denoised raw.
if (absdiff <= 3 + shift_inc1) {
running_avg_y[c] = mc_running_avg_y[c];
col_sum[c] += diff;
} else {
if (absdiff >= 4 + shift_inc1 && absdiff <= 7)
adjustment = adj_val[0];
else if (absdiff >= 8 && absdiff <= 15)
adjustment = adj_val[1];
else
adjustment = adj_val[2];
if (diff > 0) {
if ((sig[c] + adjustment) > 255)
running_avg_y[c] = 255;
else
running_avg_y[c] = sig[c] + adjustment;
col_sum[c] += adjustment;
} else {
if ((sig[c] - adjustment) < 0)
running_avg_y[c] = 0;
else
running_avg_y[c] = sig[c] - adjustment;
col_sum[c] -= adjustment;
}
}
}
// Update pointers for next iteration.
sig += sig_stride;
mc_running_avg_y += mc_avg_y_stride;
running_avg_y += avg_y_stride;
}
for (int c = 0; c < 16; ++c) {
if (col_sum[c] >= 128) {
col_sum[c] = 127;
}
sum_diff += col_sum[c];
}
sum_diff_thresh =
increase_denoising ? kSumDiffThresholdHigh : kSumDiffThreshold;
if (abs(sum_diff) > sum_diff_thresh)
return COPY_BLOCK;
return FILTER_BLOCK;
}
} // namespace webrtc

View file

@ -1,40 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_
#define MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_
#include <stdint.h>
#include "modules/video_processing/util/denoiser_filter.h"
namespace webrtc {
class DenoiserFilterC : public DenoiserFilter {
public:
DenoiserFilterC() {}
uint32_t Variance16x8(const uint8_t* a,
int a_stride,
const uint8_t* b,
int b_stride,
unsigned int* sse) override;
DenoiserDecision MbDenoise(const uint8_t* mc_running_avg_y,
int mc_avg_y_stride,
uint8_t* running_avg_y,
int avg_y_stride,
const uint8_t* sig,
int sig_stride,
uint8_t motion_magnitude,
int increase_denoising) override;
};
} // namespace webrtc
#endif // MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_

View file

@ -1,182 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_processing/util/denoiser_filter_neon.h"
#include <arm_neon.h>
namespace webrtc {
const int kSumDiffThresholdHighNeon = 600;
static int HorizontalAddS16x8(const int16x8_t v_16x8) {
const int32x4_t a = vpaddlq_s16(v_16x8);
const int64x2_t b = vpaddlq_s32(a);
const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
vreinterpret_s32_s64(vget_high_s64(b)));
return vget_lane_s32(c, 0);
}
static int HorizontalAddS32x4(const int32x4_t v_32x4) {
const int64x2_t b = vpaddlq_s32(v_32x4);
const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
vreinterpret_s32_s64(vget_high_s64(b)));
return vget_lane_s32(c, 0);
}
static void VarianceNeonW8(const uint8_t* a,
int a_stride,
const uint8_t* b,
int b_stride,
int w,
int h,
uint32_t* sse,
int64_t* sum) {
int16x8_t v_sum = vdupq_n_s16(0);
int32x4_t v_sse_lo = vdupq_n_s32(0);
int32x4_t v_sse_hi = vdupq_n_s32(0);
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; j += 8) {
const uint8x8_t v_a = vld1_u8(&a[j]);
const uint8x8_t v_b = vld1_u8(&b[j]);
const uint16x8_t v_diff = vsubl_u8(v_a, v_b);
const int16x8_t sv_diff = vreinterpretq_s16_u16(v_diff);
v_sum = vaddq_s16(v_sum, sv_diff);
v_sse_lo =
vmlal_s16(v_sse_lo, vget_low_s16(sv_diff), vget_low_s16(sv_diff));
v_sse_hi =
vmlal_s16(v_sse_hi, vget_high_s16(sv_diff), vget_high_s16(sv_diff));
}
a += a_stride;
b += b_stride;
}
*sum = HorizontalAddS16x8(v_sum);
*sse =
static_cast<uint32_t>(HorizontalAddS32x4(vaddq_s32(v_sse_lo, v_sse_hi)));
}
uint32_t DenoiserFilterNEON::Variance16x8(const uint8_t* a,
int a_stride,
const uint8_t* b,
int b_stride,
uint32_t* sse) {
int64_t sum = 0;
VarianceNeonW8(a, a_stride << 1, b, b_stride << 1, 16, 8, sse, &sum);
return *sse - ((sum * sum) >> 7);
}
DenoiserDecision DenoiserFilterNEON::MbDenoise(const uint8_t* mc_running_avg_y,
int mc_running_avg_y_stride,
uint8_t* running_avg_y,
int running_avg_y_stride,
const uint8_t* sig,
int sig_stride,
uint8_t motion_magnitude,
int increase_denoising) {
// If motion_magnitude is small, making the denoiser more aggressive by
// increasing the adjustment for each level, level1 adjustment is
// increased, the deltas stay the same.
int shift_inc =
(increase_denoising && motion_magnitude <= kMotionMagnitudeThreshold) ? 1
: 0;
int sum_diff_thresh = 0;
const uint8x16_t v_level1_adjustment = vmovq_n_u8(
(motion_magnitude <= kMotionMagnitudeThreshold) ? 4 + shift_inc : 3);
const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
int64x2_t v_sum_diff_total = vdupq_n_s64(0);
// Go over lines.
for (int r = 0; r < 16; ++r) {
// Load inputs.
const uint8x16_t v_sig = vld1q_u8(sig);
const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
// Calculate absolute difference and sign masks.
const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);
// Figure out which level that put us in.
const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff);
const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff);
const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff);
// Calculate absolute adjustments for level 1, 2 and 3.
const uint8x16_t v_level2_adjustment =
vandq_u8(v_level2_mask, v_delta_level_1_and_2);
const uint8x16_t v_level3_adjustment =
vandq_u8(v_level3_mask, v_delta_level_2_and_3);
const uint8x16_t v_level1and2_adjustment =
vaddq_u8(v_level1_adjustment, v_level2_adjustment);
const uint8x16_t v_level1and2and3_adjustment =
vaddq_u8(v_level1and2_adjustment, v_level3_adjustment);
// Figure adjustment absolute value by selecting between the absolute
// difference if in level0 or the value for level 1, 2 and 3.
const uint8x16_t v_abs_adjustment =
vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff);
// Calculate positive and negative adjustments. Apply them to the signal
// and accumulate them. Adjustments are less than eight and the maximum
// sum of them (7 * 16) can fit in a signed char.
const uint8x16_t v_pos_adjustment =
vandq_u8(v_diff_pos_mask, v_abs_adjustment);
const uint8x16_t v_neg_adjustment =
vandq_u8(v_diff_neg_mask, v_abs_adjustment);
uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment);
v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment);
// Store results.
vst1q_u8(running_avg_y, v_running_avg_y);
// Sum all the accumulators to have the sum of all pixel differences
// for this macroblock.
{
const int8x16_t v_sum_diff =
vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
vreinterpretq_s8_u8(v_neg_adjustment));
const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
const int32x4_t fedc_ba98_7654_3210 =
vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210);
v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
}
// Update pointers for next iteration.
sig += sig_stride;
mc_running_avg_y += mc_running_avg_y_stride;
running_avg_y += running_avg_y_stride;
}
// Too much adjustments => copy block.
int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total),
vget_low_s64(v_sum_diff_total));
int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
sum_diff_thresh =
increase_denoising ? kSumDiffThresholdHighNeon : kSumDiffThreshold;
if (sum_diff > sum_diff_thresh)
return COPY_BLOCK;
// Tell above level that block was filtered.
running_avg_y -= running_avg_y_stride * 16;
sig -= sig_stride * 16;
return FILTER_BLOCK;
}
} // namespace webrtc

View file

@ -1,38 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_NEON_H_
#define MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_NEON_H_
#include "modules/video_processing/util/denoiser_filter.h"
namespace webrtc {
class DenoiserFilterNEON : public DenoiserFilter {
public:
DenoiserFilterNEON() {}
uint32_t Variance16x8(const uint8_t* a,
int a_stride,
const uint8_t* b,
int b_stride,
unsigned int* sse) override;
DenoiserDecision MbDenoise(const uint8_t* mc_running_avg_y,
int mc_avg_y_stride,
uint8_t* running_avg_y,
int avg_y_stride,
const uint8_t* sig,
int sig_stride,
uint8_t motion_magnitude,
int increase_denoising) override;
};
} // namespace webrtc
#endif // MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_NEON_H_

View file

@ -1,200 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_processing/util/denoiser_filter_sse2.h"
#include <emmintrin.h>
#include <stdlib.h>
#include <string.h>
namespace webrtc {
static void Get8x8varSse2(const uint8_t* src,
int src_stride,
const uint8_t* ref,
int ref_stride,
unsigned int* sse,
int* sum) {
const __m128i zero = _mm_setzero_si128();
__m128i vsum = _mm_setzero_si128();
__m128i vsse = _mm_setzero_si128();
for (int i = 0; i < 8; i += 2) {
const __m128i src0 = _mm_unpacklo_epi8(
_mm_loadl_epi64((const __m128i*)(src + i * src_stride)), zero);
const __m128i ref0 = _mm_unpacklo_epi8(
_mm_loadl_epi64((const __m128i*)(ref + i * ref_stride)), zero);
const __m128i diff0 = _mm_sub_epi16(src0, ref0);
const __m128i src1 = _mm_unpacklo_epi8(
_mm_loadl_epi64((const __m128i*)(src + (i + 1) * src_stride)), zero);
const __m128i ref1 = _mm_unpacklo_epi8(
_mm_loadl_epi64((const __m128i*)(ref + (i + 1) * ref_stride)), zero);
const __m128i diff1 = _mm_sub_epi16(src1, ref1);
vsum = _mm_add_epi16(vsum, diff0);
vsum = _mm_add_epi16(vsum, diff1);
vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff0, diff0));
vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff1, diff1));
}
// sum
vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 2));
*sum = static_cast<int16_t>(_mm_extract_epi16(vsum, 0));
// sse
vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 8));
vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 4));
*sse = _mm_cvtsi128_si32(vsse);
}
static void VarianceSSE2(const unsigned char* src,
int src_stride,
const unsigned char* ref,
int ref_stride,
int w,
int h,
uint32_t* sse,
int64_t* sum,
int block_size) {
*sse = 0;
*sum = 0;
for (int i = 0; i < h; i += block_size) {
for (int j = 0; j < w; j += block_size) {
uint32_t sse0 = 0;
int32_t sum0 = 0;
Get8x8varSse2(src + src_stride * i + j, src_stride,
ref + ref_stride * i + j, ref_stride, &sse0, &sum0);
*sse += sse0;
*sum += sum0;
}
}
}
// Compute the sum of all pixel differences of this MB.
static uint32_t AbsSumDiff16x1(__m128i acc_diff) {
const __m128i k_1 = _mm_set1_epi16(1);
const __m128i acc_diff_lo =
_mm_srai_epi16(_mm_unpacklo_epi8(acc_diff, acc_diff), 8);
const __m128i acc_diff_hi =
_mm_srai_epi16(_mm_unpackhi_epi8(acc_diff, acc_diff), 8);
const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi);
const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1);
const __m128i hgfe_dcba =
_mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8));
const __m128i hgfedcba =
_mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4));
unsigned int sum_diff = abs(_mm_cvtsi128_si32(hgfedcba));
return sum_diff;
}
uint32_t DenoiserFilterSSE2::Variance16x8(const uint8_t* src,
int src_stride,
const uint8_t* ref,
int ref_stride,
uint32_t* sse) {
int64_t sum = 0;
VarianceSSE2(src, src_stride << 1, ref, ref_stride << 1, 16, 8, sse, &sum, 8);
return *sse - ((sum * sum) >> 7);
}
DenoiserDecision DenoiserFilterSSE2::MbDenoise(const uint8_t* mc_running_avg_y,
int mc_avg_y_stride,
uint8_t* running_avg_y,
int avg_y_stride,
const uint8_t* sig,
int sig_stride,
uint8_t motion_magnitude,
int increase_denoising) {
DenoiserDecision decision = FILTER_BLOCK;
unsigned int sum_diff_thresh = 0;
int shift_inc =
(increase_denoising && motion_magnitude <= kMotionMagnitudeThreshold) ? 1
: 0;
__m128i acc_diff = _mm_setzero_si128();
const __m128i k_0 = _mm_setzero_si128();
const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
const __m128i k_8 = _mm_set1_epi8(8);
const __m128i k_16 = _mm_set1_epi8(16);
// Modify each level's adjustment according to motion_magnitude.
const __m128i l3 = _mm_set1_epi8(
(motion_magnitude <= kMotionMagnitudeThreshold) ? 7 + shift_inc : 6);
// Difference between level 3 and level 2 is 2.
const __m128i l32 = _mm_set1_epi8(2);
// Difference between level 2 and level 1 is 1.
const __m128i l21 = _mm_set1_epi8(1);
for (int r = 0; r < 16; ++r) {
// Calculate differences.
const __m128i v_sig =
_mm_loadu_si128(reinterpret_cast<const __m128i*>(&sig[0]));
const __m128i v_mc_running_avg_y =
_mm_loadu_si128(reinterpret_cast<const __m128i*>(&mc_running_avg_y[0]));
__m128i v_running_avg_y;
const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
// Obtain the sign. FF if diff is negative.
const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
// Clamp absolute difference to 16 to be used to get mask. Doing this
// allows us to use _mm_cmpgt_epi8, which operates on signed byte.
const __m128i clamped_absdiff =
_mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_16);
// Get masks for l2 l1 and l0 adjustments.
const __m128i mask2 = _mm_cmpgt_epi8(k_16, clamped_absdiff);
const __m128i mask1 = _mm_cmpgt_epi8(k_8, clamped_absdiff);
const __m128i mask0 = _mm_cmpgt_epi8(k_4, clamped_absdiff);
// Get adjustments for l2, l1, and l0.
__m128i adj2 = _mm_and_si128(mask2, l32);
const __m128i adj1 = _mm_and_si128(mask1, l21);
const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff);
__m128i adj, padj, nadj;
// Combine the adjustments and get absolute adjustments.
adj2 = _mm_add_epi8(adj2, adj1);
adj = _mm_sub_epi8(l3, adj2);
adj = _mm_andnot_si128(mask0, adj);
adj = _mm_or_si128(adj, adj0);
// Restore the sign and get positive and negative adjustments.
padj = _mm_andnot_si128(diff_sign, adj);
nadj = _mm_and_si128(diff_sign, adj);
// Calculate filtered value.
v_running_avg_y = _mm_adds_epu8(v_sig, padj);
v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj);
_mm_storeu_si128(reinterpret_cast<__m128i*>(running_avg_y),
v_running_avg_y);
// Adjustments <=7, and each element in acc_diff can fit in signed
// char.
acc_diff = _mm_adds_epi8(acc_diff, padj);
acc_diff = _mm_subs_epi8(acc_diff, nadj);
// Update pointers for next iteration.
sig += sig_stride;
mc_running_avg_y += mc_avg_y_stride;
running_avg_y += avg_y_stride;
}
// Compute the sum of all pixel differences of this MB.
unsigned int abs_sum_diff = AbsSumDiff16x1(acc_diff);
sum_diff_thresh =
increase_denoising ? kSumDiffThresholdHigh : kSumDiffThreshold;
if (abs_sum_diff > sum_diff_thresh)
decision = COPY_BLOCK;
return decision;
}
} // namespace webrtc

View file

@ -1,40 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_SSE2_H_
#define MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_SSE2_H_
#include <stdint.h>
#include "modules/video_processing/util/denoiser_filter.h"
namespace webrtc {
class DenoiserFilterSSE2 : public DenoiserFilter {
public:
DenoiserFilterSSE2() {}
uint32_t Variance16x8(const uint8_t* a,
int a_stride,
const uint8_t* b,
int b_stride,
unsigned int* sse) override;
DenoiserDecision MbDenoise(const uint8_t* mc_running_avg_y,
int mc_avg_y_stride,
uint8_t* running_avg_y,
int avg_y_stride,
const uint8_t* sig,
int sig_stride,
uint8_t motion_magnitude,
int increase_denoising) override;
};
} // namespace webrtc
#endif // MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_SSE2_H_

View file

@ -1,113 +0,0 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_processing/util/noise_estimation.h"
#if DISPLAYNEON
#include <android/log.h>
#endif
namespace webrtc {
void NoiseEstimation::Init(int width, int height, CpuType cpu_type) {
int mb_cols = width >> 4;
int mb_rows = height >> 4;
consec_low_var_.reset(new uint32_t[mb_cols * mb_rows]());
width_ = width;
height_ = height;
mb_cols_ = width_ >> 4;
mb_rows_ = height_ >> 4;
cpu_type_ = cpu_type;
}
void NoiseEstimation::GetNoise(int mb_index, uint32_t var, uint32_t luma) {
consec_low_var_[mb_index]++;
num_static_block_++;
if (consec_low_var_[mb_index] >= kConsecLowVarFrame &&
(luma >> 6) < kAverageLumaMax && (luma >> 6) > kAverageLumaMin) {
// Normalized var by the average luma value, this gives more weight to
// darker blocks.
int nor_var = var / (luma >> 10);
noise_var_ +=
nor_var > kBlockSelectionVarMax ? kBlockSelectionVarMax : nor_var;
num_noisy_block_++;
}
}
void NoiseEstimation::ResetConsecLowVar(int mb_index) {
consec_low_var_[mb_index] = 0;
}
void NoiseEstimation::UpdateNoiseLevel() {
// TODO(jackychen): Tune a threshold for numb_noisy_block > T to make the
// condition more reasonable.
// No enough samples implies the motion of the camera or too many moving
// objects in the frame.
if (num_static_block_ <
(0.65 * mb_cols_ * mb_rows_ / NOISE_SUBSAMPLE_INTERVAL) ||
!num_noisy_block_) {
#if DISPLAY
printf("Not enough samples. %d \n", num_static_block_);
#elif DISPLAYNEON
__android_log_print(ANDROID_LOG_DEBUG, "DISPLAY",
"Not enough samples. %d \n", num_static_block_);
#endif
noise_var_ = 0;
noise_var_accum_ = 0;
num_noisy_block_ = 0;
num_static_block_ = 0;
return;
} else {
#if DISPLAY
printf("%d %d fraction = %.3f\n", num_static_block_,
mb_cols_ * mb_rows_ / NOISE_SUBSAMPLE_INTERVAL,
percent_static_block_);
#elif DISPLAYNEON
__android_log_print(ANDROID_LOG_DEBUG, "DISPLAY", "%d %d fraction = %.3f\n",
num_static_block_,
mb_cols_ * mb_rows_ / NOISE_SUBSAMPLE_INTERVAL,
percent_static_block_);
#endif
// Normalized by the number of noisy blocks.
noise_var_ /= num_noisy_block_;
// Get the percentage of static blocks.
percent_static_block_ = static_cast<double>(num_static_block_) /
(mb_cols_ * mb_rows_ / NOISE_SUBSAMPLE_INTERVAL);
num_noisy_block_ = 0;
num_static_block_ = 0;
}
// For the first frame just update the value with current noise_var_,
// otherwise, use the averaging window.
if (noise_var_accum_ == 0) {
noise_var_accum_ = noise_var_;
} else {
noise_var_accum_ = (noise_var_accum_ * 15 + noise_var_) / 16;
}
#if DISPLAY
printf("noise_var_accum_ = %.1f, noise_var_ = %d.\n", noise_var_accum_,
noise_var_);
#elif DISPLAYNEON
__android_log_print(ANDROID_LOG_DEBUG, "DISPLAY",
"noise_var_accum_ = %.1f, noise_var_ = %d.\n",
noise_var_accum_, noise_var_);
#endif
// Reset noise_var_ for the next frame.
noise_var_ = 0;
}
uint8_t NoiseEstimation::GetNoiseLevel() {
int noise_thr = cpu_type_ ? kNoiseThreshold : kNoiseThresholdNeon;
UpdateNoiseLevel();
if (noise_var_accum_ > noise_thr) {
return 1;
}
return 0;
}
} // namespace webrtc

View file

@ -1,63 +0,0 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_PROCESSING_UTIL_NOISE_ESTIMATION_H_
#define MODULES_VIDEO_PROCESSING_UTIL_NOISE_ESTIMATION_H_
#include <cstdint>
#include <memory>
#include "modules/video_processing/util/denoiser_filter.h"
namespace webrtc {
#define DISPLAY 0 // Rectangle diagnostics
#define DISPLAYNEON 0 // Rectangle diagnostics on NEON
const int kNoiseThreshold = 150;
const int kNoiseThresholdNeon = 70;
const int kConsecLowVarFrame = 6;
const int kAverageLumaMin = 20;
const int kAverageLumaMax = 220;
const int kBlockSelectionVarMax = kNoiseThreshold << 1;
// TODO(jackychen): To test different sampling strategy.
// Collect noise data every NOISE_SUBSAMPLE_INTERVAL blocks.
#define NOISE_SUBSAMPLE_INTERVAL 41
class NoiseEstimation {
public:
void Init(int width, int height, CpuType cpu_type);
// Collect noise data from one qualified block.
void GetNoise(int mb_index, uint32_t var, uint32_t luma);
// Reset the counter for consecutive low-var blocks.
void ResetConsecLowVar(int mb_index);
// Update noise level for current frame.
void UpdateNoiseLevel();
// 0: low noise, 1: high noise
uint8_t GetNoiseLevel();
private:
int width_;
int height_;
int mb_rows_;
int mb_cols_;
int num_noisy_block_;
int num_static_block_;
CpuType cpu_type_;
uint32_t noise_var_;
double noise_var_accum_;
double percent_static_block_;
std::unique_ptr<uint32_t[]> consec_low_var_;
};
} // namespace webrtc
#endif // MODULES_VIDEO_PROCESSING_UTIL_NOISE_ESTIMATION_H_

View file

@ -1,96 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_processing/util/skin_detection.h"
namespace webrtc {
// Fixed-point skin color model parameters.
static const int skin_mean[5][2] = {{7463, 9614},
{6400, 10240},
{7040, 10240},
{8320, 9280},
{6800, 9614}};
static const int skin_inv_cov[4] = {4107, 1663, 1663, 2157}; // q16
static const int skin_threshold[6] = {1570636, 1400000, 800000,
800000, 800000, 800000}; // q18
// Thresholds on luminance.
static const int y_low = 40;
static const int y_high = 220;
// Evaluates the Mahalanobis distance measure for the input CbCr values.
static int EvaluateSkinColorDifference(int cb, int cr, int idx) {
const int cb_q6 = cb << 6;
const int cr_q6 = cr << 6;
const int cb_diff_q12 =
(cb_q6 - skin_mean[idx][0]) * (cb_q6 - skin_mean[idx][0]);
const int cbcr_diff_q12 =
(cb_q6 - skin_mean[idx][0]) * (cr_q6 - skin_mean[idx][1]);
const int cr_diff_q12 =
(cr_q6 - skin_mean[idx][1]) * (cr_q6 - skin_mean[idx][1]);
const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10;
const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10;
const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10;
const int skin_diff =
skin_inv_cov[0] * cb_diff_q2 + skin_inv_cov[1] * cbcr_diff_q2 +
skin_inv_cov[2] * cbcr_diff_q2 + skin_inv_cov[3] * cr_diff_q2;
return skin_diff;
}
static int SkinPixel(const uint8_t y, const uint8_t cb, const uint8_t cr) {
if (y < y_low || y > y_high) {
return 0;
} else {
if (MODEL_MODE == 0) {
return (EvaluateSkinColorDifference(cb, cr, 0) < skin_threshold[0]);
} else {
// Exit on grey.
if (cb == 128 && cr == 128)
return 0;
// Exit on very strong cb.
if (cb > 150 && cr < 110)
return 0;
// Exit on (another) low luminance threshold if either color is high.
if (y < 50 && (cb > 140 || cr > 140))
return 0;
for (int i = 0; i < 5; i++) {
int diff = EvaluateSkinColorDifference(cb, cr, i);
if (diff < skin_threshold[i + 1]) {
return 1;
} else if (diff > (skin_threshold[i + 1] << 3)) {
// Exit if difference is much large than the threshold.
return 0;
}
}
return 0;
}
}
}
bool MbHasSkinColor(const uint8_t* y_src,
const uint8_t* u_src,
const uint8_t* v_src,
const int stride_y,
const int stride_u,
const int stride_v,
const int mb_row,
const int mb_col) {
const uint8_t* y = y_src + ((mb_row << 4) + 8) * stride_y + (mb_col << 4) + 8;
const uint8_t* u = u_src + ((mb_row << 3) + 4) * stride_u + (mb_col << 3) + 4;
const uint8_t* v = v_src + ((mb_row << 3) + 4) * stride_v + (mb_col << 3) + 4;
// Use 2x2 average of center pixel to compute skin area.
uint8_t y_avg = (*y + *(y + 1) + *(y + stride_y) + *(y + stride_y + 1)) >> 2;
uint8_t u_avg = (*u + *(u + 1) + *(u + stride_u) + *(u + stride_u + 1)) >> 2;
uint8_t v_avg = (*v + *(v + 1) + *(v + stride_v) + *(v + stride_v + 1)) >> 2;
return SkinPixel(y_avg, u_avg, v_avg) == 1;
}
} // namespace webrtc

View file

@ -1,30 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_PROCESSING_UTIL_SKIN_DETECTION_H_
#define MODULES_VIDEO_PROCESSING_UTIL_SKIN_DETECTION_H_
namespace webrtc {
#define MODEL_MODE 0
typedef unsigned char uint8_t;
bool MbHasSkinColor(const uint8_t* y_src,
const uint8_t* u_src,
const uint8_t* v_src,
int stride_y,
int stride_u,
int stride_v,
int mb_row,
int mb_col);
} // namespace webrtc
#endif // MODULES_VIDEO_PROCESSING_UTIL_SKIN_DETECTION_H_

View file

@ -1,339 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_processing/video_denoiser.h"
#include <stdint.h>
#include <string.h>
#include "api/video/i420_buffer.h"
#include "third_party/libyuv/include/libyuv/planar_functions.h"
namespace webrtc {
#if DISPLAY || DISPLAYNEON
static void ShowRect(const std::unique_ptr<DenoiserFilter>& filter,
const std::unique_ptr<uint8_t[]>& d_status,
const std::unique_ptr<uint8_t[]>& moving_edge_red,
const std::unique_ptr<uint8_t[]>& x_density,
const std::unique_ptr<uint8_t[]>& y_density,
const uint8_t* u_src,
int stride_u_src,
const uint8_t* v_src,
int stride_v_src,
uint8_t* u_dst,
int stride_u_dst,
uint8_t* v_dst,
int stride_v_dst,
int mb_rows_,
int mb_cols_) {
for (int mb_row = 0; mb_row < mb_rows_; ++mb_row) {
for (int mb_col = 0; mb_col < mb_cols_; ++mb_col) {
int mb_index = mb_row * mb_cols_ + mb_col;
const uint8_t* mb_src_u =
u_src + (mb_row << 3) * stride_u_src + (mb_col << 3);
const uint8_t* mb_src_v =
v_src + (mb_row << 3) * stride_v_src + (mb_col << 3);
uint8_t* mb_dst_u = u_dst + (mb_row << 3) * stride_u_dst + (mb_col << 3);
uint8_t* mb_dst_v = v_dst + (mb_row << 3) * stride_v_dst + (mb_col << 3);
uint8_t uv_tmp[8 * 8];
memset(uv_tmp, 200, 8 * 8);
if (d_status[mb_index] == 1) {
// Paint to red.
libyuv::CopyPlane(mb_src_u, stride_u_src, mb_dst_u, stride_u_dst, 8, 8);
libyuv::CopyPlane(uv_tmp, 8, mb_dst_v, stride_v_dst, 8, 8);
} else if (moving_edge_red[mb_row * mb_cols_ + mb_col] &&
x_density[mb_col] * y_density[mb_row]) {
// Paint to blue.
libyuv::CopyPlane(uv_tmp, 8, mb_dst_u, stride_u_dst, 8, 8);
libyuv::CopyPlane(mb_src_v, stride_v_src, mb_dst_v, stride_v_dst, 8, 8);
} else {
libyuv::CopyPlane(mb_src_u, stride_u_src, mb_dst_u, stride_u_dst, 8, 8);
libyuv::CopyPlane(mb_src_v, stride_v_src, mb_dst_v, stride_v_dst, 8, 8);
}
}
}
}
#endif
VideoDenoiser::VideoDenoiser(bool runtime_cpu_detection)
: width_(0),
height_(0),
filter_(DenoiserFilter::Create(runtime_cpu_detection, &cpu_type_)),
ne_(new NoiseEstimation()) {}
void VideoDenoiser::DenoiserReset(
rtc::scoped_refptr<I420BufferInterface> frame) {
width_ = frame->width();
height_ = frame->height();
mb_cols_ = width_ >> 4;
mb_rows_ = height_ >> 4;
// Init noise estimator and allocate buffers.
ne_->Init(width_, height_, cpu_type_);
moving_edge_.reset(new uint8_t[mb_cols_ * mb_rows_]);
mb_filter_decision_.reset(new DenoiserDecision[mb_cols_ * mb_rows_]);
x_density_.reset(new uint8_t[mb_cols_]);
y_density_.reset(new uint8_t[mb_rows_]);
moving_object_.reset(new uint8_t[mb_cols_ * mb_rows_]);
}
int VideoDenoiser::PositionCheck(int mb_row, int mb_col, int noise_level) {
if (noise_level == 0)
return 1;
if ((mb_row <= (mb_rows_ >> 4)) || (mb_col <= (mb_cols_ >> 4)) ||
(mb_col >= (15 * mb_cols_ >> 4)))
return 3;
else if ((mb_row <= (mb_rows_ >> 3)) || (mb_col <= (mb_cols_ >> 3)) ||
(mb_col >= (7 * mb_cols_ >> 3)))
return 2;
else
return 1;
}
void VideoDenoiser::ReduceFalseDetection(
const std::unique_ptr<uint8_t[]>& d_status,
std::unique_ptr<uint8_t[]>* moving_edge_red,
int noise_level) {
// From up left corner.
int mb_col_stop = mb_cols_ - 1;
for (int mb_row = 0; mb_row <= mb_rows_ - 1; ++mb_row) {
for (int mb_col = 0; mb_col <= mb_col_stop; ++mb_col) {
if (d_status[mb_row * mb_cols_ + mb_col]) {
mb_col_stop = mb_col - 1;
break;
}
(*moving_edge_red)[mb_row * mb_cols_ + mb_col] = 0;
}
}
// From bottom left corner.
mb_col_stop = mb_cols_ - 1;
for (int mb_row = mb_rows_ - 1; mb_row >= 0; --mb_row) {
for (int mb_col = 0; mb_col <= mb_col_stop; ++mb_col) {
if (d_status[mb_row * mb_cols_ + mb_col]) {
mb_col_stop = mb_col - 1;
break;
}
(*moving_edge_red)[mb_row * mb_cols_ + mb_col] = 0;
}
}
// From up right corner.
mb_col_stop = 0;
for (int mb_row = 0; mb_row <= mb_rows_ - 1; ++mb_row) {
for (int mb_col = mb_cols_ - 1; mb_col >= mb_col_stop; --mb_col) {
if (d_status[mb_row * mb_cols_ + mb_col]) {
mb_col_stop = mb_col + 1;
break;
}
(*moving_edge_red)[mb_row * mb_cols_ + mb_col] = 0;
}
}
// From bottom right corner.
mb_col_stop = 0;
for (int mb_row = mb_rows_ - 1; mb_row >= 0; --mb_row) {
for (int mb_col = mb_cols_ - 1; mb_col >= mb_col_stop; --mb_col) {
if (d_status[mb_row * mb_cols_ + mb_col]) {
mb_col_stop = mb_col + 1;
break;
}
(*moving_edge_red)[mb_row * mb_cols_ + mb_col] = 0;
}
}
}
bool VideoDenoiser::IsTrailingBlock(const std::unique_ptr<uint8_t[]>& d_status,
int mb_row,
int mb_col) {
bool ret = false;
int mb_index = mb_row * mb_cols_ + mb_col;
if (!mb_row || !mb_col || mb_row == mb_rows_ - 1 || mb_col == mb_cols_ - 1)
ret = false;
else
ret = d_status[mb_index + 1] || d_status[mb_index - 1] ||
d_status[mb_index + mb_cols_] || d_status[mb_index - mb_cols_];
return ret;
}
void VideoDenoiser::CopySrcOnMOB(const uint8_t* y_src,
int stride_src,
uint8_t* y_dst,
int stride_dst) {
// Loop over to copy src block if the block is marked as moving object block
// or if the block may cause trailing artifacts.
for (int mb_row = 0; mb_row < mb_rows_; ++mb_row) {
const int mb_index_base = mb_row * mb_cols_;
const uint8_t* mb_src_base = y_src + (mb_row << 4) * stride_src;
uint8_t* mb_dst_base = y_dst + (mb_row << 4) * stride_dst;
for (int mb_col = 0; mb_col < mb_cols_; ++mb_col) {
const int mb_index = mb_index_base + mb_col;
const uint32_t offset_col = mb_col << 4;
const uint8_t* mb_src = mb_src_base + offset_col;
uint8_t* mb_dst = mb_dst_base + offset_col;
// Check if the block is a moving object block or may cause a trailing
// artifacts.
if (mb_filter_decision_[mb_index] != FILTER_BLOCK ||
IsTrailingBlock(moving_edge_, mb_row, mb_col) ||
(x_density_[mb_col] * y_density_[mb_row] &&
moving_object_[mb_row * mb_cols_ + mb_col])) {
// Copy y source.
libyuv::CopyPlane(mb_src, stride_src, mb_dst, stride_dst, 16, 16);
}
}
}
}
void VideoDenoiser::CopyLumaOnMargin(const uint8_t* y_src,
int stride_src,
uint8_t* y_dst,
int stride_dst) {
int height_margin = height_ - (mb_rows_ << 4);
if (height_margin > 0) {
const uint8_t* margin_y_src = y_src + (mb_rows_ << 4) * stride_src;
uint8_t* margin_y_dst = y_dst + (mb_rows_ << 4) * stride_dst;
libyuv::CopyPlane(margin_y_src, stride_src, margin_y_dst, stride_dst,
width_, height_margin);
}
int width_margin = width_ - (mb_cols_ << 4);
if (width_margin > 0) {
const uint8_t* margin_y_src = y_src + (mb_cols_ << 4);
uint8_t* margin_y_dst = y_dst + (mb_cols_ << 4);
libyuv::CopyPlane(margin_y_src, stride_src, margin_y_dst, stride_dst,
width_ - (mb_cols_ << 4), mb_rows_ << 4);
}
}
rtc::scoped_refptr<I420BufferInterface> VideoDenoiser::DenoiseFrame(
rtc::scoped_refptr<I420BufferInterface> frame,
bool noise_estimation_enabled) {
// If previous width and height are different from current frame's, need to
// reallocate the buffers and no denoising for the current frame.
if (!prev_buffer_ || width_ != frame->width() || height_ != frame->height()) {
DenoiserReset(frame);
prev_buffer_ = frame;
return frame;
}
// Set buffer pointers.
const uint8_t* y_src = frame->DataY();
int stride_y_src = frame->StrideY();
rtc::scoped_refptr<I420Buffer> dst =
buffer_pool_.CreateI420Buffer(width_, height_);
uint8_t* y_dst = dst->MutableDataY();
int stride_y_dst = dst->StrideY();
const uint8_t* y_dst_prev = prev_buffer_->DataY();
int stride_prev = prev_buffer_->StrideY();
memset(x_density_.get(), 0, mb_cols_);
memset(y_density_.get(), 0, mb_rows_);
memset(moving_object_.get(), 1, mb_cols_ * mb_rows_);
uint8_t noise_level = noise_estimation_enabled ? ne_->GetNoiseLevel() : 0;
int thr_var_base = 16 * 16 * 2;
// Loop over blocks to accumulate/extract noise level and update x/y_density
// factors for moving object detection.
for (int mb_row = 0; mb_row < mb_rows_; ++mb_row) {
const int mb_index_base = mb_row * mb_cols_;
const uint8_t* mb_src_base = y_src + (mb_row << 4) * stride_y_src;
uint8_t* mb_dst_base = y_dst + (mb_row << 4) * stride_y_dst;
const uint8_t* mb_dst_prev_base = y_dst_prev + (mb_row << 4) * stride_prev;
for (int mb_col = 0; mb_col < mb_cols_; ++mb_col) {
const int mb_index = mb_index_base + mb_col;
const bool ne_enable = (mb_index % NOISE_SUBSAMPLE_INTERVAL == 0);
const int pos_factor = PositionCheck(mb_row, mb_col, noise_level);
const uint32_t thr_var_adp = thr_var_base * pos_factor;
const uint32_t offset_col = mb_col << 4;
const uint8_t* mb_src = mb_src_base + offset_col;
uint8_t* mb_dst = mb_dst_base + offset_col;
const uint8_t* mb_dst_prev = mb_dst_prev_base + offset_col;
// TODO(jackychen): Need SSE2/NEON opt.
int luma = 0;
if (ne_enable) {
for (int i = 4; i < 12; ++i) {
for (int j = 4; j < 12; ++j) {
luma += mb_src[i * stride_y_src + j];
}
}
}
// Get the filtered block and filter_decision.
mb_filter_decision_[mb_index] =
filter_->MbDenoise(mb_dst_prev, stride_prev, mb_dst, stride_y_dst,
mb_src, stride_y_src, 0, noise_level);
// If filter decision is FILTER_BLOCK, no need to check moving edge.
// It is unlikely for a moving edge block to be filtered in current
// setting.
if (mb_filter_decision_[mb_index] == FILTER_BLOCK) {
uint32_t sse_t = 0;
if (ne_enable) {
// The variance used in noise estimation is based on the src block in
// time t (mb_src) and filtered block in time t-1 (mb_dist_prev).
uint32_t noise_var = filter_->Variance16x8(
mb_dst_prev, stride_y_dst, mb_src, stride_y_src, &sse_t);
ne_->GetNoise(mb_index, noise_var, luma);
}
moving_edge_[mb_index] = 0; // Not a moving edge block.
} else {
uint32_t sse_t = 0;
// The variance used in MOD is based on the filtered blocks in time
// T (mb_dst) and T-1 (mb_dst_prev).
uint32_t noise_var = filter_->Variance16x8(
mb_dst_prev, stride_prev, mb_dst, stride_y_dst, &sse_t);
if (noise_var > thr_var_adp) { // Moving edge checking.
if (ne_enable) {
ne_->ResetConsecLowVar(mb_index);
}
moving_edge_[mb_index] = 1; // Mark as moving edge block.
x_density_[mb_col] += (pos_factor < 3);
y_density_[mb_row] += (pos_factor < 3);
} else {
moving_edge_[mb_index] = 0;
if (ne_enable) {
// The variance used in noise estimation is based on the src block
// in time t (mb_src) and filtered block in time t-1 (mb_dist_prev).
uint32_t noise_var = filter_->Variance16x8(
mb_dst_prev, stride_prev, mb_src, stride_y_src, &sse_t);
ne_->GetNoise(mb_index, noise_var, luma);
}
}
}
} // End of for loop
} // End of for loop
ReduceFalseDetection(moving_edge_, &moving_object_, noise_level);
CopySrcOnMOB(y_src, stride_y_src, y_dst, stride_y_dst);
// When frame width/height not divisible by 16, copy the margin to
// denoised_frame.
if ((mb_rows_ << 4) != height_ || (mb_cols_ << 4) != width_)
CopyLumaOnMargin(y_src, stride_y_src, y_dst, stride_y_dst);
// Copy u/v planes.
libyuv::CopyPlane(frame->DataU(), frame->StrideU(), dst->MutableDataU(),
dst->StrideU(), (width_ + 1) >> 1, (height_ + 1) >> 1);
libyuv::CopyPlane(frame->DataV(), frame->StrideV(), dst->MutableDataV(),
dst->StrideV(), (width_ + 1) >> 1, (height_ + 1) >> 1);
#if DISPLAY || DISPLAYNEON
// Show rectangular region
ShowRect(filter_, moving_edge_, moving_object_, x_density_, y_density_,
frame->DataU(), frame->StrideU(), frame->DataV(), frame->StrideV(),
dst->MutableDataU(), dst->StrideU(), dst->MutableDataV(),
dst->StrideV(), mb_rows_, mb_cols_);
#endif
prev_buffer_ = dst;
return dst;
}
} // namespace webrtc

View file

@ -1,86 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_PROCESSING_VIDEO_DENOISER_H_
#define MODULES_VIDEO_PROCESSING_VIDEO_DENOISER_H_
#include <memory>
#include "api/scoped_refptr.h"
#include "api/video/video_frame_buffer.h"
#include "common_video/include/video_frame_buffer_pool.h"
#include "modules/video_processing/util/denoiser_filter.h"
#include "modules/video_processing/util/noise_estimation.h"
#include "modules/video_processing/util/skin_detection.h"
namespace webrtc {
class VideoDenoiser {
public:
explicit VideoDenoiser(bool runtime_cpu_detection);
rtc::scoped_refptr<I420BufferInterface> DenoiseFrame(
rtc::scoped_refptr<I420BufferInterface> frame,
bool noise_estimation_enabled);
private:
void DenoiserReset(rtc::scoped_refptr<I420BufferInterface> frame);
// Check the mb position, return 1: close to the frame center (between 1/8
// and 7/8 of width/height), 3: close to the border (out of 1/16 and 15/16
// of width/height), 2: in between.
int PositionCheck(int mb_row, int mb_col, int noise_level);
// To reduce false detection in moving object detection (MOD).
void ReduceFalseDetection(const std::unique_ptr<uint8_t[]>& d_status,
std::unique_ptr<uint8_t[]>* d_status_red,
int noise_level);
// Return whether a block might cause trailing artifact by checking if one of
// its neighbor blocks is a moving edge block.
bool IsTrailingBlock(const std::unique_ptr<uint8_t[]>& d_status,
int mb_row,
int mb_col);
// Copy input blocks to dst buffer on moving object blocks (MOB).
void CopySrcOnMOB(const uint8_t* y_src,
int stride_src,
uint8_t* y_dst,
int stride_dst);
// Copy luma margin blocks when frame width/height not divisible by 16.
void CopyLumaOnMargin(const uint8_t* y_src,
int stride_src,
uint8_t* y_dst,
int stride_dst);
int width_;
int height_;
int mb_rows_;
int mb_cols_;
CpuType cpu_type_;
std::unique_ptr<DenoiserFilter> filter_;
std::unique_ptr<NoiseEstimation> ne_;
// 1 for moving edge block, 0 for static block.
std::unique_ptr<uint8_t[]> moving_edge_;
// 1 for moving object block, 0 for static block.
std::unique_ptr<uint8_t[]> moving_object_;
// x_density_ and y_density_ are used in MOD process.
std::unique_ptr<uint8_t[]> x_density_;
std::unique_ptr<uint8_t[]> y_density_;
// Save the return values by MbDenoise for each block.
std::unique_ptr<DenoiserDecision[]> mb_filter_decision_;
VideoFrameBufferPool buffer_pool_;
rtc::scoped_refptr<I420BufferInterface> prev_buffer_;
};
} // namespace webrtc
#endif // MODULES_VIDEO_PROCESSING_VIDEO_DENOISER_H_

View file

@ -13,6 +13,5 @@ include_rules = [
"+modules/rtp_rtcp",
"+modules/utility",
"+modules/video_coding",
"+modules/video_processing",
"+system_wrappers",
]