Merge branch 'm118'

This commit is contained in:
inaqui-signal 2023-11-02 11:08:33 -05:00 committed by Iñaqui
commit fa4fd71354
678 changed files with 24102 additions and 28509 deletions

3
.gn
View file

@ -78,7 +78,4 @@ default_args = {
# Chromium fix resolves the problem.
fuchsia_sdk_readelf_exec =
"//third_party/llvm-build/Release+Asserts/bin/llvm-readelf"
fuchsia_target_api_level = 9
use_cxx17 = true
}

View file

@ -80,6 +80,7 @@ Maksim Sisov <msisov@igalia.com>
Maxim Pavlov <pavllovmax@gmail.com>
Maxim Potapov <vopatop.skam@gmail.com>
Michael Iedema <michael@kapsulate.com>
Michael Klingbeil <klingm@amazon.com>
Michał Zarach <michalzaq12@gmail.com>
Michel Promonet <michel.promonet.1@gmail.com>
Miguel Paris <mparisdiaz@gmail.com>
@ -88,6 +89,7 @@ Mike Wei <Mike.WeiB@gmail.com>
Min Wang <mingewang@gmail.com>
Mike Woodworth <mike@divergentmedia.com>
Mo Zanaty <mzanaty@cisco.com>
Mohamed Risaldar UT <mohamed.ut@travancoreanalytics.com>
Nico Schlumprecht <me@github.nico.onl>
Niek van der Maas <mail@niekvandermaas.nl>
Olivier Crête <olivier.crete@ocrete.ca>

View file

@ -64,12 +64,12 @@ if (!build_with_chromium) {
"modules/video_capture:video_capture_internal_impl",
"modules/video_coding:video_codec_perf_tests",
"net/dcsctp:dcsctp_unittests",
"pc:peer_connection_mediachannel_split_unittests",
"pc:peerconnection_unittests",
"pc:rtc_pc_unittests",
"pc:slow_peer_connection_unittests",
"pc:svc_tests",
"rtc_tools:rtp_generator",
"rtc_tools:video_encoder",
"rtc_tools:video_replay",
"stats:rtc_stats_unittests",
"system_wrappers:system_wrappers_unittests",
@ -138,6 +138,14 @@ config("common_inherited_config") {
cflags = []
ldflags = []
if (rtc_jni_generator_legacy_symbols) {
defines += [ "RTC_JNI_GENERATOR_LEGACY_SYMBOLS" ]
}
if (rtc_objc_prefix != "") {
defines += [ "RTC_OBJC_TYPE_PREFIX=${rtc_objc_prefix}" ]
}
if (rtc_dlog_always_on) {
defines += [ "DLOG_ALWAYS_ON" ]
}
@ -313,6 +321,10 @@ config("common_config") {
defines += [ "WEBRTC_ABSL_MUTEX" ]
}
if (rtc_enable_libevent) {
defines += [ "WEBRTC_ENABLE_LIBEVENT" ]
}
if (rtc_disable_logging) {
defines += [ "RTC_DISABLE_LOGGING" ]
}
@ -596,6 +608,7 @@ if (rtc_include_tests && !build_with_chromium) {
"call:fake_network_pipe_unittests",
"p2p:libstunprober_unittests",
"p2p:rtc_p2p_unittests",
"rtc_base:async_dns_resolver_unittests",
"rtc_base:callback_list_unittests",
"rtc_base:rtc_base_approved_unittests",
"rtc_base:rtc_base_unittests",

140
DEPS
View file

@ -10,7 +10,7 @@ vars = {
# chromium waterfalls. More info at: crbug.com/570091.
'checkout_configuration': 'default',
'checkout_instrumented_libraries': 'checkout_linux and checkout_configuration == "default"',
'chromium_revision': '8603a0cee25d1cc4d701ef742b3df7c775440524',
'chromium_revision': '6ac79291669656814b2c66e66ea296caac6652fd',
# Fetch the prebuilt binaries for llvm-cov and llvm-profdata. Needed to
# process the raw profiles produced by instrumented targets (built with
@ -25,7 +25,7 @@ vars = {
# By default, download the fuchsia sdk from the public sdk directory.
'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/core/',
'fuchsia_version': 'version:13.20230615.1.1',
'fuchsia_version': 'version:14.20230826.1.1',
# By default, download the fuchsia images from the fuchsia GCS bucket.
'fuchsia_images_bucket': 'fuchsia',
'checkout_fuchsia': False,
@ -35,8 +35,12 @@ vars = {
'checkout_fuchsia_boot_images': "terminal.qemu-x64",
'checkout_fuchsia_product_bundles': '"{checkout_fuchsia_boot_images}" != ""',
# Fetch configuration files required for the 'use_remoteexec' gn arg
'download_remoteexec_cfg': False,
# RBE instance to use for running remote builds
'rbe_instance': 'projects/rbe-webrtc-developer/instances/default_instance',
# reclient CIPD package version
'reclient_version': 're_client_version:0.108.0.7cdbbe9-gomaip',
'reclient_version': 're_client_version:0.113.0.8b45b89-gomaip',
# ninja CIPD package version
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
@ -50,30 +54,30 @@ deps = {
# TODO(kjellander): Move this to be Android-only.
'src/base':
'https://chromium.googlesource.com/chromium/src/base@ca4474373784d15364b5d190970e5bdfa1544c2a',
'https://chromium.googlesource.com/chromium/src/base@609cafa975c8a29d3b2f686c9a42530a556835fe',
'src/build':
'https://chromium.googlesource.com/chromium/src/build@6c0e0e0c84aa581f9bfa042e511dc9aaffa8fd82',
'https://chromium.googlesource.com/chromium/src/build@115a7079919c25462a7fd8c1d22900378bbc6585',
'src/buildtools':
'https://chromium.googlesource.com/chromium/src/buildtools@3739a3619309af3b788379ad0936ca00b981616e',
'https://chromium.googlesource.com/chromium/src/buildtools@b2043d4f435131d0a1bdd5342c17753ef9236572',
# Gradle 6.6.1. Used for testing Android Studio project generation for WebRTC.
'src/examples/androidtests/third_party/gradle': {
'url': 'https://chromium.googlesource.com/external/github.com/gradle/gradle.git@f2d1fb54a951d8b11d25748e4711bec8d128d7e3',
'condition': 'checkout_android',
},
'src/ios': {
'url': 'https://chromium.googlesource.com/chromium/src/ios@0df9bead2936138bd3853fdf826b29470cfa517e',
'url': 'https://chromium.googlesource.com/chromium/src/ios@17864bdc8fb2f78060ea4109d61a9144f64f4d67',
'condition': 'checkout_ios',
},
'src/testing':
'https://chromium.googlesource.com/chromium/src/testing@f3b8f1d8c1d7ca49f9a77b8e669c357572f4447c',
'https://chromium.googlesource.com/chromium/src/testing@ff8dee88bc0b49f8337cee6e82151c245a63b98c',
'src/third_party':
'https://chromium.googlesource.com/chromium/src/third_party@770155421d251b9541301084d0db46812540c251',
'https://chromium.googlesource.com/chromium/src/third_party@ee6367daea550c5845a6079cec5fd6555f39144f',
'src/buildtools/linux64': {
'packages': [
{
'package': 'gn/gn/linux-${{arch}}',
'version': 'git_revision:4bd1a77e67958fb7f6739bd4542641646f264e5d',
'version': 'git_revision:cc56a0f98bb34accd5323316e0292575ff17a5d4',
}
],
'dep_type': 'cipd',
@ -83,7 +87,7 @@ deps = {
'packages': [
{
'package': 'gn/gn/mac-${{arch}}',
'version': 'git_revision:4bd1a77e67958fb7f6739bd4542641646f264e5d',
'version': 'git_revision:cc56a0f98bb34accd5323316e0292575ff17a5d4',
}
],
'dep_type': 'cipd',
@ -93,7 +97,7 @@ deps = {
'packages': [
{
'package': 'gn/gn/windows-amd64',
'version': 'git_revision:4bd1a77e67958fb7f6739bd4542641646f264e5d',
'version': 'git_revision:cc56a0f98bb34accd5323316e0292575ff17a5d4',
}
],
'dep_type': 'cipd',
@ -113,13 +117,13 @@ deps = {
},
'src/third_party/clang-format/script':
'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/clang/tools/clang-format.git@f97059df7f8b205064625cdb5f97b56668a125ef',
'src/buildtools/third_party/libc++/trunk':
'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxx.git@055b2e17ae4f0e2c025ad0c7508b01787df17758',
'src/buildtools/third_party/libc++abi/trunk':
'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxxabi.git@c2a35d1b2cf4b6ca85f5235c76ad9b1aff97e801',
'src/buildtools/third_party/libunwind/trunk':
'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libunwind.git@b5a43ecdac82a248f8a700a68c722b4d98708377',
'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/clang/tools/clang-format.git@e5337933f2951cacd3aeacd238ce4578163ca0b9',
'src/third_party/libc++/src':
'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxx.git@84fb809dd6dae36d556dc0bb702c6cc2ce9d4b80',
'src/third_party/libc++abi/src':
'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxxabi.git@3d83ca7bd2ab81f042bafe6996da08c9cd57c119',
'src/third_party/libunwind/src':
'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libunwind.git@76e621a89787516da745489245d8b65a48ad60d8',
'src/third_party/ninja': {
'packages': [
@ -135,7 +139,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_system_sdk/public',
'version': 'RGY8Vyf8jjszRIJRFxZj7beXSUEHTQM90MtYejUvdMgC',
'version': '4QeolYaSKWBtVTgzJU4tHUfzA9OJTDM8YUcD426IctwC',
},
],
'condition': 'checkout_android',
@ -166,7 +170,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/bundletool',
'version': 'LbB0aRQ3VKjRJZmc_PD0VTZ1O34_zD92rh71aOEOEcEC',
'version': '2RPwohwtc6on0_96oFxokeEvnC1LbLrGuyCAw00k62AC',
},
],
'condition': 'checkout_android',
@ -174,11 +178,11 @@ deps = {
},
'src/third_party/boringssl/src':
'https://boringssl.googlesource.com/boringssl.git@ae88f198a49d77993e9c44b017d0e69c810dc668',
'https://boringssl.googlesource.com/boringssl.git@b8e012e1ff736cc794273af4a7db521e6b18bcd5',
'src/third_party/breakpad/breakpad':
'https://chromium.googlesource.com/breakpad/breakpad.git@8988364bcddd9b194b0bf931c10bc125987330ed',
'src/third_party/catapult':
'https://chromium.googlesource.com/catapult.git@89fad9023d62d7031789a904b2aa6bd1d4d0a3e2',
'https://chromium.googlesource.com/catapult.git@b8c4f2d99ac66fe47cb8cceec0dd1a1da5d1b51e',
'src/third_party/ced/src': {
'url': 'https://chromium.googlesource.com/external/github.com/google/compact_enc_det.git@ba412eaaacd3186085babcd901679a48863c7dd5',
},
@ -191,21 +195,21 @@ deps = {
'src/third_party/crc32c/src':
'https://chromium.googlesource.com/external/github.com/google/crc32c.git@fa5ade41ee480003d9c5af6f43567ba22e4e17e6',
'src/third_party/depot_tools':
'https://chromium.googlesource.com/chromium/tools/depot_tools.git@3ffad8166e1c233624dcac4e5a12a59944f1231a',
'https://chromium.googlesource.com/chromium/tools/depot_tools.git@427f0f43ad0ceb08399561ab9cc60e45931059d3',
'src/third_party/ffmpeg':
'https://chromium.googlesource.com/chromium/third_party/ffmpeg.git@881c5c3f6412020c37e97e178e0f5da9ddd2ae90',
'https://chromium.googlesource.com/chromium/third_party/ffmpeg.git@0ba37733400593b162e5ae9ff26b384cff49c250',
'src/third_party/flatbuffers/src':
'https://chromium.googlesource.com/external/github.com/google/flatbuffers.git@13fc75cb6b7b44793f3f5b4ba025ff403d012c9f',
'https://chromium.googlesource.com/external/github.com/google/flatbuffers.git@28861d1d7d5ec6ce34d4bbdc10bec4aace341167',
'src/third_party/grpc/src': {
'url': 'https://chromium.googlesource.com/external/github.com/grpc/grpc.git@822dab21d9995c5cf942476b35ca12a1aa9d2737',
},
# Used for embedded builds. CrOS & Linux use the system version.
'src/third_party/fontconfig/src': {
'url': 'https://chromium.googlesource.com/external/fontconfig.git@06929a556fdc39c8fe12965b69070c8df520a33e',
'url': 'https://chromium.googlesource.com/external/fontconfig.git@2fb3419a92156569bc1ec707401258c922cd0d99',
'condition': 'checkout_linux',
},
'src/third_party/freetype/src':
'https://chromium.googlesource.com/chromium/src/third_party/freetype2.git@5c00a46805d6423fc45b4ba2c0f2e22dd0450d73',
'https://chromium.googlesource.com/chromium/src/third_party/freetype2.git@dd1ced4ee37b375686a1e0fb6e3a6966b195f4ab',
'src/third_party/harfbuzz-ng/src':
'https://chromium.googlesource.com/external/github.com/harfbuzz/harfbuzz.git@db700b5670d9475cc8ed4880cc9447b232c5e432',
'src/third_party/google_benchmark/src': {
@ -227,13 +231,13 @@ deps = {
'src/third_party/googletest/src':
'https://chromium.googlesource.com/external/github.com/google/googletest.git@af29db7ec28d6df1c7f0f745186884091e602e07',
'src/third_party/icu': {
'url': 'https://chromium.googlesource.com/chromium/deps/icu.git@e8c3bc9ea97d4423ad0515e5f1c064f486dae8b1',
'url': 'https://chromium.googlesource.com/chromium/deps/icu.git@985b9a6f70e13f3db741fed121e4dcc3046ad494',
},
'src/third_party/jdk': {
'packages': [
{
'package': 'chromium/third_party/jdk',
'version': '2Of9Pe_OdO4xoAATuiLDiMVNebKTNO3WrwJGqil4RosC',
'version': '0yjD6s5XYtcGAQoObIys7xs2ThkudwxJwS-2ZNP0SFEC',
},
],
'condition': 'host_os == "linux" and checkout_android',
@ -262,7 +266,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/kotlin_stdlib',
'version': 'z4_AYYz2Tw5GKikuiDLTuxxf0NJVGLkC3CVcyiIpc-gC',
'version': '6cGkpHi3fSRhpRfq2b1mjmzfFmShvtQe6gy4g2nFQd0C',
},
],
'condition': 'checkout_android',
@ -273,7 +277,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/kotlinc',
'version': 'QSwE30iq_KLKxImEnQEwDMQM_cU10eZSAwNobs8BEsoC',
'version': '6Hdj5fkzcomS1cNTWnXoeTZj0wvCG4zdyLtZ23eK-U4C',
},
],
'condition': 'checkout_android',
@ -283,23 +287,23 @@ deps = {
'src/third_party/libFuzzer/src':
'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/compiler-rt/lib/fuzzer.git@26cc39e59b2bf5cbc20486296248a842c536878d',
'src/third_party/libjpeg_turbo':
'https://chromium.googlesource.com/chromium/deps/libjpeg_turbo.git@aa4075f116e4312537d0d3e9dbd5e31096539f94',
'https://chromium.googlesource.com/chromium/deps/libjpeg_turbo.git@30bdb85e302ecfc52593636b2f44af438e05e784',
'src/third_party/libsrtp':
'https://chromium.googlesource.com/chromium/deps/libsrtp.git@5b7c744eb8310250ccc534f3f86a2015b3887a0a',
'src/third_party/dav1d/libdav1d':
'https://chromium.googlesource.com/external/github.com/videolan/dav1d.git@f8ae94eca0f53502a2cddd29a263c1edea4822a0',
'src/third_party/libaom/source/libaom':
'https://aomedia.googlesource.com/aom.git@233000f66e9ff0bb09226a2f222a029bb4c89de6',
'https://aomedia.googlesource.com/aom.git@5f8db64abce68a3698fb732697ae50880bc9cac4',
'src/third_party/libunwindstack': {
'url': 'https://chromium.googlesource.com/chromium/src/third_party/libunwindstack.git@4dbfa0e8c844c8e243b297bc185e54a99ff94f9e',
'condition': 'checkout_android',
},
'src/third_party/perfetto':
'https://android.googlesource.com/platform/external/perfetto.git@0ba4c2cd12264c4d33787fb700b93c67ee9fbc11',
'https://android.googlesource.com/platform/external/perfetto.git@00427277dd1728c836d92f78006c60430c04d6bc',
'src/third_party/libvpx/source/libvpx':
'https://chromium.googlesource.com/webm/libvpx.git@278d0acd32c3fd544e6f99cbfd714df3430a6442',
'https://chromium.googlesource.com/webm/libvpx.git@24c0dcc8513b8c1ba4ffbf934a399f89de646ffe',
'src/third_party/libyuv':
'https://chromium.googlesource.com/libyuv/libyuv.git@552571e8b24b2619c39ec176e6cb8e75d3e7fdd3',
'https://chromium.googlesource.com/libyuv/libyuv.git@04821d1e7d60845525e8db55c7bcd41ef5be9406',
'src/third_party/lss': {
'url': 'https://chromium.googlesource.com/linux-syscall-support.git@ce877209e11aa69dcfffbd53ef90ea1d07136521',
'condition': 'checkout_android or checkout_linux',
@ -320,7 +324,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/r8',
'version': 'vw5kLlW3-suSlCKSO9OQpFWpR8oDnvQ8k1RgKNUapQYC',
'version': 'TBaeKaSTY2ttKx2JSFuWiQ8Na80KHZwLEgSAvT1DBJ0C',
},
],
'condition': 'checkout_android',
@ -333,18 +337,18 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/r8',
'version': 'PwglNZFRNPkBBXdnY9NfrZFk2ULWDTRxhV9rl2kvkpUC',
'version': 'vw5kLlW3-suSlCKSO9OQpFWpR8oDnvQ8k1RgKNUapQYC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/requests/src': {
'url': 'https://chromium.googlesource.com/external/github.com/kennethreitz/requests.git@refs/tags/v2.23.0',
'url': 'https://chromium.googlesource.com/external/github.com/kennethreitz/requests.git@c7e0fc087ceeadb8b4c84a0953a422c474093d6d',
'condition': 'checkout_android',
},
'src/tools':
'https://chromium.googlesource.com/chromium/src/tools@eb2e55cf816468d0b8899ce5d8429f7eb8c42f01',
'https://chromium.googlesource.com/chromium/src/tools@3e78ed797e9e5308cb90f319c7330a6d44dac2c7',
'src/third_party/accessibility_test_framework': {
'packages': [
@ -401,11 +405,11 @@ deps = {
'dep_type': 'cipd',
},
'src/third_party/android_toolchain': {
'src/third_party/android_toolchain/ndk': {
'packages': [
{
'package': 'chromium/third_party/android_toolchain/android_toolchain',
'version': 'version:2@r25c.cr1',
'version': 'R_8suM8m0oHbZ1awdxGXvKEFpAOETscbfZxkkMthyk8C',
},
],
'condition': 'checkout_android',
@ -416,7 +420,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/androidx',
'version': 'MqkmMx1Ct4Fk2Vb_FY05yLzXxVnH9evr2OqP6tpU9MEC',
'version': '2n47PFweHFzGxPWjh9RANTrGhmSDWowZ-YhkOV4j11MC',
},
],
'condition': 'checkout_android',
@ -427,7 +431,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/manifest_merger',
'version': 'UNXioFXYvz7k7UmE2WYAaXuYIK3Ky0aSQ0IuDEdS9soC',
'version': 'kkbYOGsVRXhtxBiXuTufY0puTnG5QAfyxvFTBHFWL08C',
},
],
'condition': 'checkout_android',
@ -437,8 +441,8 @@ deps = {
'src/third_party/android_sdk/public': {
'packages': [
{
'package': 'chromium/third_party/android_sdk/public/build-tools/33.0.0',
'version': '-VRKr36Uw8L_iFqqo9nevIBgNMggND5iWxjidyjnCgsC',
'package': 'chromium/third_party/android_sdk/public/build-tools/34.0.0',
'version': 'YK9Rzw3fDzMHVzatNN6VlyoD_81amLZpN1AbmkdOd6AC',
},
{
'package': 'chromium/third_party/android_sdk/public/emulator',
@ -450,11 +454,11 @@ deps = {
},
{
'package': 'chromium/third_party/android_sdk/public/platform-tools',
'version': 'RSI3iwryh7URLGRgJHsCvUxj092woTPnKt4pwFcJ6L8C',
'version': 'HWVsGs2HCKgSVv41FsOcsfJbNcB0UFiNrF6Tc4yRArYC',
},
{
'package': 'chromium/third_party/android_sdk/public/platforms/android-33',
'version': 'eo5KvW6UVor92LwZai8Zulc624BQZoCu-yn7wa1z_YcC',
'package': 'chromium/third_party/android_sdk/public/platforms/android-34',
'version': 'u-bhWbTME6u-DjypTgr3ZikCyeAeU6txkR9ET6Uudc8C',
},
{
'package': 'chromium/third_party/android_sdk/public/platforms/android-tiramisuprivacysandbox',
@ -466,7 +470,7 @@ deps = {
},
{
'package': 'chromium/third_party/android_sdk/public/cmdline-tools',
'version': 'EWnL2r7oV5GtE9Ef7GyohyFam42wtMtEKYU4dCb3U1YC',
'version': 'Sy00LuyBIUJdRGYKwg0zjWH8eAIUvgnnNiPkI8etaZYC',
},
],
'condition': 'checkout_android',
@ -521,7 +525,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/turbine',
'version': 'G8Cku1fztaDd9to_8gk3PNWM2iRacAxD9zcUAgUPUEAC',
'version': 'ZlMS4BOYyYmbU8BuBDGyW7QrkvZ_-pTkm4lH4jKjTi4C',
},
],
'condition': 'checkout_android',
@ -532,11 +536,11 @@ deps = {
'packages': [
{
'package': 'infra/tools/luci/isolate/${{platform}}',
'version': 'git_revision:39f255d5875293d3e1d978888b819ac124a8b0cc',
'version': 'git_revision:fe3cfd422b1012c2c8cf00d65cdb11aa2c26cd66',
},
{
'package': 'infra/tools/luci/swarming/${{platform}}',
'version': 'git_revision:39f255d5875293d3e1d978888b819ac124a8b0cc',
'version': 'git_revision:fe3cfd422b1012c2c8cf00d65cdb11aa2c26cd66',
},
],
'dep_type': 'cipd',
@ -1752,7 +1756,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_squareup_okio_okio_jvm',
'version': 'version:2@3.0.0.cr1',
'version': 'version:2@3.3.0.cr1',
},
],
'condition': 'checkout_android',
@ -1763,7 +1767,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_squareup_wire_wire_runtime_jvm',
'version': 'version:2@4.5.1.cr1',
'version': 'version:2@4.7.0.cr1',
},
],
'condition': 'checkout_android',
@ -1895,7 +1899,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy',
'version': 'version:2@1.14.4.cr1',
'version': 'version:2@1.14.5.cr1',
},
],
'condition': 'checkout_android',
@ -1906,7 +1910,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy_agent',
'version': 'version:2@1.14.4.cr1',
'version': 'version:2@1.14.5.cr1',
},
],
'condition': 'checkout_android',
@ -2115,7 +2119,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_mockito_mockito_android',
'version': 'version:2@5.3.1.cr1',
'version': 'version:2@5.4.0.cr1',
},
],
'condition': 'checkout_android',
@ -2126,7 +2130,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_mockito_mockito_core',
'version': 'version:2@5.3.1.cr1',
'version': 'version:2@5.4.0.cr1',
},
],
'condition': 'checkout_android',
@ -2137,7 +2141,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_mockito_mockito_subclass',
'version': 'version:2@5.3.1.cr1',
'version': 'version:2@5.4.0.cr1',
},
],
'condition': 'checkout_android',
@ -2697,6 +2701,20 @@ hooks = [
'-vpython-tool', 'install',
],
},
# Download remote exec cfg files
{
'name': 'fetch_reclient_cfgs',
'pattern': '.',
'condition': 'download_remoteexec_cfg',
'action': ['python3',
'src/buildtools/reclient_cfgs/fetch_reclient_cfgs.py',
'--rbe_instance',
Var('rbe_instance'),
'--reproxy_cfg_template',
'reproxy.cfg.template',
'--quiet',
],
},
]
recursedeps = []

View file

@ -4,6 +4,7 @@ Version: 90
CPEPrefix: cpe:/a:webrtc_project:webrtc:90
License: BSD
License File: LICENSE
Shipped: yes
Description:
WebRTC provides real time voice and video processing

View file

@ -270,7 +270,7 @@ rtc_library("libjingle_peerconnection_api") {
"dtmf_sender_interface.h",
"rtp_sender_interface.h",
]
public_deps = [ # no-presubmit-check TODO(webrtc:8603)
public_deps += [ # no-presubmit-check TODO(webrtc:8603)
# Remove when downstream has been updated
":dtmf_sender_interface",
":rtp_sender_interface",
@ -412,6 +412,7 @@ rtc_source_set("async_dns_resolver") {
"../rtc_base:socket_address",
"../rtc_base/system:rtc_export",
]
absl_deps = [ "//third_party/abseil-cpp/absl/functional:any_invocable" ]
}
rtc_source_set("wrapping_async_dns_resolver") {
@ -798,6 +799,7 @@ rtc_library("transport_api") {
"call/transport.h",
]
deps = [
":array_view",
":refcountedbase",
":scoped_refptr",
]

View file

@ -14,6 +14,7 @@
#include <functional>
#include <memory>
#include "absl/functional/any_invocable.h"
#include "rtc_base/checks.h"
#include "rtc_base/socket_address.h"
#include "rtc_base/system/rtc_export.h"
@ -63,11 +64,11 @@ class RTC_EXPORT AsyncDnsResolverInterface {
// Start address resolution of the hostname in `addr`.
virtual void Start(const rtc::SocketAddress& addr,
std::function<void()> callback) = 0;
absl::AnyInvocable<void()> callback) = 0;
// Start address resolution of the hostname in `addr` matching `family`.
virtual void Start(const rtc::SocketAddress& addr,
int family,
std::function<void()> callback) = 0;
absl::AnyInvocable<void()> callback) = 0;
virtual const AsyncDnsResolverResult& result() const = 0;
};
@ -83,7 +84,7 @@ class AsyncDnsResolverFactoryInterface {
// The callback will be called on the sequence that the caller runs on.
virtual std::unique_ptr<webrtc::AsyncDnsResolverInterface> CreateAndResolve(
const rtc::SocketAddress& addr,
std::function<void()> callback) = 0;
absl::AnyInvocable<void()> callback) = 0;
// Creates an AsyncDnsResolver and starts resolving the name to an address
// matching the specified family. The callback will be called when resolution
// is finished. The callback will be called on the sequence that the caller
@ -91,7 +92,7 @@ class AsyncDnsResolverFactoryInterface {
virtual std::unique_ptr<webrtc::AsyncDnsResolverInterface> CreateAndResolve(
const rtc::SocketAddress& addr,
int family,
std::function<void()> callback) = 0;
absl::AnyInvocable<void()> callback) = 0;
// Creates an AsyncDnsResolver and does not start it.
// For backwards compatibility, will be deprecated and removed.
// One has to do a separate Start() call on the

View file

@ -18,6 +18,7 @@ namespace webrtc {
// An abstract factory for creating AsyncResolverInterfaces. This allows
// client applications to provide WebRTC with their own mechanism for
// performing DNS resolution.
// TODO(bugs.webrtc.org/12598): Deprecate and remove.
class AsyncResolverFactory {
public:
AsyncResolverFactory() = default;

View file

@ -14,6 +14,7 @@
#include <stddef.h>
#include <stdint.h>
#include "api/array_view.h"
#include "api/ref_counted_base.h"
#include "api/scoped_refptr.h"
@ -44,10 +45,29 @@ struct PacketOptions {
class Transport {
public:
virtual bool SendRtp(const uint8_t* packet,
size_t length,
const PacketOptions& options) = 0;
virtual bool SendRtcp(const uint8_t* packet, size_t length) = 0;
// New style functions. Default implementations are to accomodate
// subclasses that haven't been converted to new style yet.
// TODO(bugs.webrtc.org/14870): Deprecate and remove old functions.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
virtual bool SendRtp(rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
return SendRtp(packet.data(), packet.size(), options);
}
virtual bool SendRtcp(rtc::ArrayView<const uint8_t> packet) {
return SendRtcp(packet.data(), packet.size());
}
#pragma clang diagnostic pop
// Old style functions.
[[deprecated("Use ArrayView version")]] virtual bool
SendRtp(const uint8_t* packet, size_t length, const PacketOptions& options) {
return SendRtp(rtc::MakeArrayView(packet, length), options);
}
[[deprecated("Use ArrayView version")]] virtual bool SendRtcp(
const uint8_t* packet,
size_t length) {
return SendRtcp(rtc::MakeArrayView(packet, length));
}
protected:
virtual ~Transport() {}

View file

@ -78,11 +78,12 @@ std::string Candidate::ToStringInternal(bool sensitive) const {
rtc::StringBuilder ost;
std::string address =
sensitive ? address_.ToSensitiveString() : address_.ToString();
std::string related_address = sensitive ? related_address_.ToSensitiveString()
: related_address_.ToString();
ost << "Cand[" << transport_name_ << ":" << foundation_ << ":" << component_
<< ":" << protocol_ << ":" << priority_ << ":" << address << ":" << type_
<< ":" << related_address_.ToString() << ":" << username_ << ":"
<< password_ << ":" << network_id_ << ":" << network_cost_ << ":"
<< generation_ << "]";
<< ":" << related_address << ":" << username_ << ":" << password_ << ":"
<< network_id_ << ":" << network_cost_ << ":" << generation_ << "]";
return ost.Release();
}

View file

@ -26,14 +26,14 @@ struct CryptoParams {
absl::string_view cs,
absl::string_view kp,
absl::string_view sp)
: tag(t), cipher_suite(cs), key_params(kp), session_params(sp) {}
: tag(t), crypto_suite(cs), key_params(kp), session_params(sp) {}
bool Matches(const CryptoParams& params) const {
return (tag == params.tag && cipher_suite == params.cipher_suite);
return (tag == params.tag && crypto_suite == params.crypto_suite);
}
int tag;
std::string cipher_suite;
std::string crypto_suite;
std::string key_params;
std::string session_params;
};

View file

@ -36,6 +36,8 @@ class TransformableFrameInterface {
virtual uint8_t GetPayloadType() const = 0;
virtual uint32_t GetSsrc() const = 0;
virtual uint32_t GetTimestamp() const = 0;
virtual void SetRTPTimestamp(uint32_t timestamp) = 0;
// TODO(https://bugs.webrtc.org/14878): Change this to pure virtual after it
// is implemented everywhere.
virtual absl::optional<Timestamp> GetCaptureTimeIdentifier() const {
@ -68,14 +70,6 @@ class TransformableAudioFrameInterface : public TransformableFrameInterface {
public:
virtual ~TransformableAudioFrameInterface() = default;
virtual void SetRTPTimestamp(uint32_t timestamp) = 0;
// Exposes the frame header, enabling the interface clients to use the
// information in the header as needed, for example to compile the list of
// csrcs.
// TODO(crbug.com/1453226): Deprecate and remove once callers have migrated to
// the getters for specific fields.
virtual const RTPHeader& GetHeader() const = 0;
virtual rtc::ArrayView<const uint32_t> GetContributingSources() const = 0;
// TODO(crbug.com/1453226): Change this to pure virtual after it
@ -83,6 +77,18 @@ class TransformableAudioFrameInterface : public TransformableFrameInterface {
virtual const absl::optional<uint16_t> SequenceNumber() const {
return absl::nullopt;
}
// TODO(crbug.com/1456628): Change this to pure virtual after it
// is implemented everywhere.
virtual absl::optional<uint64_t> AbsoluteCaptureTimestamp() const {
return absl::nullopt;
}
enum class FrameType { kEmptyFrame, kAudioFrameSpeech, kAudioFrameCN };
// TODO(crbug.com/1456628): Change this to pure virtual after it
// is implemented everywhere.
virtual FrameType Type() const { return FrameType::kEmptyFrame; }
};
// Objects implement this interface to be notified with the transformed frame.

View file

@ -64,7 +64,8 @@ struct IceTransportInit final {
RTC_DCHECK(!async_resolver_factory_);
async_dns_resolver_factory_ = async_dns_resolver_factory;
}
AsyncResolverFactory* async_resolver_factory() {
[[deprecated("Use async_dns_resolver_factory")]] AsyncResolverFactory*
async_resolver_factory() {
return async_resolver_factory_;
}
ABSL_DEPRECATED("bugs.webrtc.org/12598")

View file

@ -88,8 +88,13 @@ PeerConnectionDependencies::PeerConnectionDependencies(
PeerConnectionObserver* observer_in)
: observer(observer_in) {}
// TODO(bugs.webrtc.org/12598: remove pragma once async_resolver_factory
// is removed from PeerConnectionDependencies
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
PeerConnectionDependencies::PeerConnectionDependencies(
PeerConnectionDependencies&&) = default;
#pragma clang diagnostic pop
PeerConnectionDependencies::~PeerConnectionDependencies() = default;

View file

@ -1453,6 +1453,9 @@ struct RTC_EXPORT PeerConnectionDependencies final {
std::unique_ptr<webrtc::AsyncDnsResolverFactoryInterface>
async_dns_resolver_factory;
// Deprecated - use async_dns_resolver_factory
// Deprecation is in abeyance until Chromium is updated.
// TODO(crbug.com/1475925): Deprecate once Chromium is updated
// [[deprecated("Use async_dns_resolver_factory")]]
std::unique_ptr<webrtc::AsyncResolverFactory> async_resolver_factory;
std::unique_ptr<webrtc::IceTransportFactory> ice_transport_factory;
std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator;

View file

@ -516,11 +516,15 @@ struct RTC_EXPORT RtpEncodingParameters {
// Value to use for RID RTP header extension.
// Called "encodingId" in ORTC.
std::string rid;
bool request_key_frame = false;
// Allow dynamic frame length changes for audio:
// https://w3c.github.io/webrtc-extensions/#dom-rtcrtpencodingparameters-adaptiveptime
bool adaptive_ptime = false;
// Allow changing the used codec for this encoding.
absl::optional<RtpCodec> codec;
bool operator==(const RtpEncodingParameters& o) const {
return ssrc == o.ssrc && bitrate_priority == o.bitrate_priority &&
network_priority == o.network_priority &&
@ -531,7 +535,7 @@ struct RTC_EXPORT RtpEncodingParameters {
scale_resolution_down_by == o.scale_resolution_down_by &&
active == o.active && rid == o.rid &&
adaptive_ptime == o.adaptive_ptime &&
requested_resolution == o.requested_resolution;
requested_resolution == o.requested_resolution && codec == o.codec;
}
bool operator!=(const RtpEncodingParameters& o) const {
return !(*this == o);

View file

@ -109,11 +109,6 @@ class RTC_EXPORT RtpSenderInterface : public rtc::RefCountInterface {
std::unique_ptr<VideoEncoderFactory::EncoderSelectorInterface>
encoder_selector) = 0;
// TODO(crbug.com/1354101): make pure virtual again after Chrome roll.
virtual RTCError GenerateKeyFrame(const std::vector<std::string>& rids) {
return RTCError::OK();
}
protected:
~RtpSenderInterface() override = default;
};

View file

@ -1,2 +1,3 @@
set noparent
hbos@webrtc.org
hta@webrtc.org

View file

@ -248,13 +248,18 @@ class RTC_EXPORT RTCInboundRtpStreamStats final
RTCStatsMember<uint32_t> packets_received;
RTCStatsMember<uint64_t> packets_discarded;
RTCStatsMember<uint64_t> fec_packets_received;
RTCStatsMember<uint64_t> fec_bytes_received;
RTCStatsMember<uint64_t> fec_packets_discarded;
// Inbound FEC SSRC. Only present if a mechanism like FlexFEC is negotiated.
RTCStatsMember<uint32_t> fec_ssrc;
RTCStatsMember<uint64_t> bytes_received;
RTCStatsMember<uint64_t> header_bytes_received;
// Inbound RTX stats. Only defined when RTX is used and it is therefore
// possible to distinguish retransmissions.
RTCStatsMember<uint64_t> retransmitted_packets_received;
RTCStatsMember<uint64_t> retransmitted_bytes_received;
RTCStatsMember<uint32_t> rtx_ssrc;
RTCStatsMember<double> last_packet_received_timestamp;
RTCStatsMember<double> jitter_buffer_delay;
RTCStatsMember<double> jitter_buffer_target_delay;
@ -367,6 +372,9 @@ class RTC_EXPORT RTCOutboundRtpStreamStats final
// In JavaScript, this is only exposed if HW exposure is allowed.
RTCStatsMember<bool> power_efficient_encoder;
RTCStatsMember<std::string> scalability_mode;
// RTX ssrc. Only present if RTX is negotiated.
RTCStatsMember<uint32_t> rtx_ssrc;
};
// https://w3c.github.io/webrtc-stats/#remoteinboundrtpstats-dict*

View file

@ -130,6 +130,7 @@ rtc_library("pending_task_safety_flag") {
"../../api:sequence_checker",
"../../rtc_base:checks",
"../../rtc_base/system:no_unique_address",
"../../rtc_base/system:rtc_export",
]
absl_deps = [ "//third_party/abseil-cpp/absl/functional:any_invocable" ]
}

View file

@ -19,6 +19,7 @@
#include "api/sequence_checker.h"
#include "rtc_base/checks.h"
#include "rtc_base/system/no_unique_address.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
@ -58,7 +59,7 @@ namespace webrtc {
//
// my_task_queue_->PostTask(SafeTask(safety_flag_, [this] { MyMethod(); }));
//
class PendingTaskSafetyFlag final
class RTC_EXPORT PendingTaskSafetyFlag final
: public rtc::RefCountedNonVirtual<PendingTaskSafetyFlag> {
public:
static rtc::scoped_refptr<PendingTaskSafetyFlag> Create();
@ -116,7 +117,7 @@ class PendingTaskSafetyFlag final
// This should be used by the class that wants tasks dropped after destruction.
// The requirement is that the instance has to be constructed and destructed on
// the same thread as the potentially dropped tasks would be running on.
class ScopedTaskSafety final {
class RTC_EXPORT ScopedTaskSafety final {
public:
ScopedTaskSafety() = default;
explicit ScopedTaskSafety(rtc::scoped_refptr<PendingTaskSafetyFlag> flag)
@ -140,7 +141,7 @@ class ScopedTaskSafety final {
// Like ScopedTaskSafety, but allows construction on a different thread than
// where the flag will be used.
class ScopedTaskSafetyDetached final {
class RTC_EXPORT ScopedTaskSafetyDetached final {
public:
ScopedTaskSafetyDetached() = default;
~ScopedTaskSafetyDetached() { flag_->SetNotAlive(); }

View file

@ -32,11 +32,13 @@ class MockAsyncDnsResolver : public AsyncDnsResolverInterface {
public:
MOCK_METHOD(void,
Start,
(const rtc::SocketAddress&, std::function<void()>),
(const rtc::SocketAddress&, absl::AnyInvocable<void()>),
(override));
MOCK_METHOD(void,
Start,
(const rtc::SocketAddress&, int family, std::function<void()>),
(const rtc::SocketAddress&,
int family,
absl::AnyInvocable<void()>),
(override));
MOCK_METHOD(AsyncDnsResolverResult&, result, (), (const, override));
};
@ -45,11 +47,11 @@ class MockAsyncDnsResolverFactory : public AsyncDnsResolverFactoryInterface {
public:
MOCK_METHOD(std::unique_ptr<webrtc::AsyncDnsResolverInterface>,
CreateAndResolve,
(const rtc::SocketAddress&, std::function<void()>),
(const rtc::SocketAddress&, absl::AnyInvocable<void()>),
(override));
MOCK_METHOD(std::unique_ptr<webrtc::AsyncDnsResolverInterface>,
CreateAndResolve,
(const rtc::SocketAddress&, int, std::function<void()>),
(const rtc::SocketAddress&, int, absl::AnyInvocable<void()>),
(override));
MOCK_METHOD(std::unique_ptr<webrtc::AsyncDnsResolverInterface>,
Create,

View file

@ -24,7 +24,6 @@ class MockTransformableAudioFrame : public TransformableAudioFrameInterface {
MOCK_METHOD(uint8_t, GetPayloadType, (), (const, override));
MOCK_METHOD(uint32_t, GetSsrc, (), (const, override));
MOCK_METHOD(uint32_t, GetTimestamp, (), (const, override));
MOCK_METHOD(RTPHeader&, GetHeader, (), (const override));
MOCK_METHOD(rtc::ArrayView<const uint32_t>,
GetContributingSources,
(),
@ -37,6 +36,14 @@ class MockTransformableAudioFrame : public TransformableAudioFrameInterface {
GetDirection,
(),
(const, override));
MOCK_METHOD(absl::optional<uint64_t>,
AbsoluteCaptureTimestamp,
(),
(const, override));
MOCK_METHOD(TransformableAudioFrameInterface::FrameType,
Type,
(),
(const, override));
};
} // namespace webrtc

View file

@ -24,6 +24,7 @@ class MockTransformableVideoFrame
MOCK_METHOD(rtc::ArrayView<const uint8_t>, GetData, (), (const, override));
MOCK_METHOD(void, SetData, (rtc::ArrayView<const uint8_t> data), (override));
MOCK_METHOD(uint32_t, GetTimestamp, (), (const, override));
MOCK_METHOD(void, SetRTPTimestamp, (uint32_t), (override));
MOCK_METHOD(uint32_t, GetSsrc, (), (const, override));
MOCK_METHOD(bool, IsKeyFrame, (), (const, override));
MOCK_METHOD(void,
@ -36,6 +37,10 @@ class MockTransformableVideoFrame
(),
(const, override));
MOCK_METHOD(VideoFrameMetadata, Metadata, (), (const, override));
MOCK_METHOD(absl::optional<Timestamp>,
GetCaptureTimeIdentifier,
(),
(const, override));
};
static_assert(!std::is_abstract_v<MockTransformableVideoFrame>, "");

View file

@ -18,6 +18,9 @@
namespace webrtc {
using testing::_;
using testing::Invoke;
class MockDecodedImageCallback : public DecodedImageCallback {
public:
MOCK_METHOD(int32_t,
@ -43,6 +46,14 @@ class MockVideoDecoder : public VideoDecoder {
// Make `Configure` succeed by default, so that individual tests that
// verify other methods wouldn't need to stub `Configure`.
ON_CALL(*this, Configure).WillByDefault(testing::Return(true));
// TODO(bugs.webrtc.org/15444): Remove once all tests have been migrated to
// expecting calls Decode without a missing_frames param.
ON_CALL(*this, Decode(_, _))
.WillByDefault(Invoke([this](const EncodedImage& input_image,
int64_t render_time_ms) {
return Decode(input_image, /*missing_frames=*/false, render_time_ms);
}));
}
~MockVideoDecoder() override { Destruct(); }
@ -51,9 +62,13 @@ class MockVideoDecoder : public VideoDecoder {
MOCK_METHOD(int32_t,
Decode,
(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms),
(override));
MOCK_METHOD(int32_t,
Decode,
(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms));
MOCK_METHOD(int32_t,
RegisterDecodeCompleteCallback,
(DecodedImageCallback * callback),

View file

@ -82,6 +82,33 @@ NetworkEmulationManager::SimulatedNetworkNode::Builder::packet_queue_length(
return *this;
}
NetworkEmulationManager::SimulatedNetworkNode::Builder&
NetworkEmulationManager::SimulatedNetworkNode::Builder::
delay_standard_deviation_ms(int delay_standard_deviation_ms) {
config_.delay_standard_deviation_ms = delay_standard_deviation_ms;
return *this;
}
NetworkEmulationManager::SimulatedNetworkNode::Builder&
NetworkEmulationManager::SimulatedNetworkNode::Builder::allow_reordering() {
config_.allow_reordering = true;
return *this;
}
NetworkEmulationManager::SimulatedNetworkNode::Builder&
NetworkEmulationManager::SimulatedNetworkNode::Builder::avg_burst_loss_length(
int avg_burst_loss_length) {
config_.avg_burst_loss_length = avg_burst_loss_length;
return *this;
}
NetworkEmulationManager::SimulatedNetworkNode::Builder&
NetworkEmulationManager::SimulatedNetworkNode::Builder::packet_overhead(
int packet_overhead) {
config_.packet_overhead = packet_overhead;
return *this;
}
NetworkEmulationManager::SimulatedNetworkNode
NetworkEmulationManager::SimulatedNetworkNode::Builder::Build(
uint64_t random_seed) const {

View file

@ -184,6 +184,10 @@ class NetworkEmulationManager {
Builder& capacity_Mbps(int link_capacity_Mbps);
Builder& loss(double loss_rate);
Builder& packet_queue_length(int max_queue_length_in_packets);
Builder& delay_standard_deviation_ms(int delay_standard_deviation_ms);
Builder& allow_reordering();
Builder& avg_burst_loss_length(int avg_burst_loss_length);
Builder& packet_overhead(int packet_overhead);
SimulatedNetworkNode Build(uint64_t random_seed = 1) const;
SimulatedNetworkNode Build(NetworkEmulationManager* net,
uint64_t random_seed = 1) const;

View file

@ -65,6 +65,7 @@ rtc_library("media_quality_test_params") {
deps = [
":media_configuration",
"../..:async_dns_resolver",
"../../../api:callfactory_api",
"../../../api:fec_controller_api",
"../../../api:field_trials_view",
@ -94,6 +95,7 @@ rtc_library("peer_configurer") {
deps = [
":media_configuration",
":media_quality_test_params",
"../..:async_dns_resolver",
"../../../api:callfactory_api",
"../../../api:create_peer_connection_quality_test_frame_generator",
"../../../api:fec_controller_api",

View file

@ -15,7 +15,7 @@
#include <string>
#include <vector>
#include "api/async_resolver_factory.h"
#include "api/async_dns_resolver.h"
#include "api/audio/audio_mixer.h"
#include "api/call/call_factory_interface.h"
#include "api/fec_controller.h"
@ -85,7 +85,8 @@ struct PeerConnectionComponents {
rtc::NetworkManager* const network_manager;
rtc::PacketSocketFactory* const packet_socket_factory;
std::unique_ptr<webrtc::AsyncResolverFactory> async_resolver_factory;
std::unique_ptr<webrtc::AsyncDnsResolverFactoryInterface>
async_dns_resolver_factory;
std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator;
std::unique_ptr<rtc::SSLCertificateVerifier> tls_cert_verifier;
std::unique_ptr<IceTransportFactory> ice_transport_factory;

View file

@ -86,10 +86,11 @@ PeerConfigurer* PeerConfigurer::SetAudioDecoderFactory(
components_->pcf_dependencies->audio_decoder_factory = audio_decoder_factory;
return this;
}
PeerConfigurer* PeerConfigurer::SetAsyncResolverFactory(
std::unique_ptr<webrtc::AsyncResolverFactory> async_resolver_factory) {
components_->pc_dependencies->async_resolver_factory =
std::move(async_resolver_factory);
PeerConfigurer* PeerConfigurer::SetAsyncDnsResolverFactory(
std::unique_ptr<webrtc::AsyncDnsResolverFactoryInterface>
async_dns_resolver_factory) {
components_->pc_dependencies->async_dns_resolver_factory =
std::move(async_dns_resolver_factory);
return this;
}
PeerConfigurer* PeerConfigurer::SetRTCCertificateGenerator(

View file

@ -16,7 +16,7 @@
#include <vector>
#include "absl/strings/string_view.h"
#include "api/async_resolver_factory.h"
#include "api/async_dns_resolver.h"
#include "api/audio/audio_mixer.h"
#include "api/call/call_factory_interface.h"
#include "api/fec_controller.h"
@ -88,8 +88,9 @@ class PeerConfigurer {
// The parameters of the following 4 methods will be passed to the
// PeerConnectionInterface implementation that will be created for this
// peer.
PeerConfigurer* SetAsyncResolverFactory(
std::unique_ptr<webrtc::AsyncResolverFactory> async_resolver_factory);
PeerConfigurer* SetAsyncDnsResolverFactory(
std::unique_ptr<webrtc::AsyncDnsResolverFactoryInterface>
async_resolver_factory);
PeerConfigurer* SetRTCCertificateGenerator(
std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator);
PeerConfigurer* SetSSLCertificateVerifier(

View file

@ -9,6 +9,9 @@
*/
#include "api/test/video/test_video_track_source.h"
#include <utility>
#include "absl/types/optional.h"
#include "api/media_stream_interface.h"
#include "api/sequence_checker.h"
#include "api/video/video_frame.h"
@ -19,8 +22,12 @@
namespace webrtc {
namespace test {
TestVideoTrackSource::TestVideoTrackSource(bool remote)
: state_(kInitializing), remote_(remote) {
TestVideoTrackSource::TestVideoTrackSource(
bool remote,
absl::optional<std::string> stream_label)
: stream_label_(std::move(stream_label)),
state_(kInitializing),
remote_(remote) {
worker_thread_checker_.Detach();
signaling_thread_checker_.Detach();
}

View file

@ -11,6 +11,8 @@
#ifndef API_TEST_VIDEO_TEST_VIDEO_TRACK_SOURCE_H_
#define API_TEST_VIDEO_TEST_VIDEO_TRACK_SOURCE_H_
#include <string>
#include "absl/types/optional.h"
#include "api/media_stream_interface.h"
#include "api/notifier.h"
@ -28,7 +30,9 @@ namespace test {
// Video source that can be used as input for tests.
class TestVideoTrackSource : public Notifier<VideoTrackSourceInterface> {
public:
explicit TestVideoTrackSource(bool remote);
explicit TestVideoTrackSource(
bool remote,
absl::optional<std::string> stream_label = absl::nullopt);
~TestVideoTrackSource() override = default;
void SetState(SourceState new_state);
@ -72,10 +76,15 @@ class TestVideoTrackSource : public Notifier<VideoTrackSourceInterface> {
int height,
const absl::optional<int>& max_fps) {}
// Returns stream label for this video source if present. Implementations
// may override this method to increase debugability and testability.
virtual absl::optional<std::string> GetStreamLabel() { return stream_label_; }
protected:
virtual rtc::VideoSourceInterface<VideoFrame>* source() = 0;
private:
const absl::optional<std::string> stream_label_;
RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_thread_checker_;
RTC_NO_UNIQUE_ADDRESS SequenceChecker signaling_thread_checker_;
SourceState state_ RTC_GUARDED_BY(&signaling_thread_checker_);

View file

@ -14,6 +14,7 @@ rtc_source_set("rtp_source") {
deps = [
"../../../api:rtp_headers",
"../../../api/units:time_delta",
"../../../api/units:timestamp",
"../../../rtc_base:checks",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]

View file

@ -16,6 +16,7 @@
#include "absl/types/optional.h"
#include "api/rtp_headers.h"
#include "api/units/time_delta.h"
#include "api/units/timestamp.h"
#include "rtc_base/checks.h"
namespace webrtc {
@ -44,12 +45,25 @@ class RtpSource {
RtpSource() = delete;
RtpSource(Timestamp timestamp,
uint32_t source_id,
RtpSourceType source_type,
uint32_t rtp_timestamp,
const RtpSource::Extensions& extensions)
: timestamp_(timestamp),
source_id_(source_id),
source_type_(source_type),
extensions_(extensions),
rtp_timestamp_(rtp_timestamp) {}
// TODO(bugs.webrtc.org/13757): deprecate when chromium stop using this
// and remove after 2023-09-18
RtpSource(int64_t timestamp_ms,
uint32_t source_id,
RtpSourceType source_type,
uint32_t rtp_timestamp,
const RtpSource::Extensions& extensions)
: timestamp_ms_(timestamp_ms),
: timestamp_(Timestamp::Millis(timestamp_ms)),
source_id_(source_id),
source_type_(source_type),
extensions_(extensions),
@ -59,10 +73,14 @@ class RtpSource {
RtpSource& operator=(const RtpSource&) = default;
~RtpSource() = default;
int64_t timestamp_ms() const { return timestamp_ms_; }
void update_timestamp_ms(int64_t timestamp_ms) {
RTC_DCHECK_LE(timestamp_ms_, timestamp_ms);
timestamp_ms_ = timestamp_ms;
Timestamp timestamp() const { return timestamp_; }
// TODO(bugs.webrtc.org/13757): deprecate when chromium stop using this
// and remove after 2023-09-18
int64_t timestamp_ms() const { return timestamp_.ms(); }
[[deprecated]] void update_timestamp_ms(int64_t timestamp_ms) {
RTC_DCHECK_LE(timestamp_.ms(), timestamp_ms);
timestamp_ = Timestamp::Millis(timestamp_ms);
}
// The identifier of the source can be the CSRC or the SSRC.
@ -90,7 +108,7 @@ class RtpSource {
}
bool operator==(const RtpSource& o) const {
return timestamp_ms_ == o.timestamp_ms() && source_id_ == o.source_id() &&
return timestamp_ == o.timestamp() && source_id_ == o.source_id() &&
source_type_ == o.source_type() &&
extensions_.audio_level == o.extensions_.audio_level &&
extensions_.absolute_capture_time ==
@ -99,7 +117,7 @@ class RtpSource {
}
private:
int64_t timestamp_ms_;
Timestamp timestamp_;
uint32_t source_id_;
RtpSourceType source_type_;
RtpSource::Extensions extensions_;

View file

@ -722,6 +722,10 @@ StunAttributeValueType StunMessage::GetAttributeValueType(int type) const {
return STUN_VALUE_BYTE_STRING;
case STUN_ATTR_GOOG_MISC_INFO:
return STUN_VALUE_UINT16_LIST;
case STUN_ATTR_GOOG_DELTA:
return STUN_VALUE_BYTE_STRING;
case STUN_ATTR_GOOG_DELTA_ACK:
return STUN_VALUE_UINT64;
default:
return STUN_VALUE_UNKNOWN;
}

View file

@ -119,16 +119,6 @@ enum AddIceCandidateResult {
kAddIceCandidateMax
};
// Metric for recording which api surface was used to enable simulcast.
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused.
enum SimulcastApiVersion {
kSimulcastApiVersionNone = 0,
kSimulcastApiVersionLegacy = 1,
kSimulcastApiVersionSpecCompliant = 2,
kSimulcastApiVersionMax
};
// Metrics for reporting usage of BUNDLE.
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused.

View file

@ -28,6 +28,7 @@ rtc_library("video_rtp_headers") {
deps = [
"..:array_view",
"../../rtc_base:checks",
"../../rtc_base:logging",
"../../rtc_base:safe_conversions",
"../../rtc_base:stringutils",
@ -126,6 +127,7 @@ rtc_source_set("video_frame_type") {
visibility = [ "*" ]
sources = [ "video_frame_type.h" ]
absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
deps = [ "../../rtc_base:checks" ]
}
rtc_source_set("render_resolution") {
@ -155,6 +157,7 @@ rtc_library("encoded_image") {
"../../rtc_base:checks",
"../../rtc_base:refcount",
"../../rtc_base/system:rtc_export",
"../units:timestamp",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
@ -167,7 +170,10 @@ rtc_library("encoded_frame") {
]
deps = [
"../../modules/video_coding:encoded_frame",
":encoded_image",
":video_frame",
"../../modules/rtp_rtcp:rtp_video_header",
"../../modules/video_coding:video_codec_interface",
"../units:timestamp",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]

View file

@ -1,11 +1,9 @@
specific_include_rules = {
# Until the new VideoStreamDecoder is implemented the current decoding
# pipeline will be used, and therefore EncodedFrame needs to inherit
# VCMEncodedFrame.
"encoded_frame.h": [
"+modules/video_coding/encoded_frame.h",
"encoded_frame.h" : [
"+modules/rtp_rtcp/source/rtp_video_header.h",
"+modules/video_coding/include/video_codec_interface.h",
"+modules/video_coding/include/video_coding_defines.h",
],
"encoded_image\.h" : [
"+rtc_base/ref_count.h",
],

View file

@ -101,7 +101,7 @@ class RTC_EXPORT ColorSpace {
kInvalid = 0,
// Limited Rec. 709 color range with RGB values ranging from 16 to 235.
kLimited = 1,
// Full RGB color range with RGB valees from 0 to 255.
// Full RGB color range with RGB values from 0 to 255.
kFull = 2,
// Range is defined by MatrixCoefficients/TransferCharacteristics.
kDerived = 3,

View file

@ -11,6 +11,7 @@
#include "api/video/encoded_frame.h"
#include "absl/types/optional.h"
#include "modules/rtp_rtcp/source/rtp_video_header.h"
namespace webrtc {
@ -30,4 +31,100 @@ bool EncodedFrame::delayed_by_retransmission() const {
return false;
}
void EncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
if (header) {
switch (header->codec) {
case kVideoCodecVP8: {
const auto& vp8_header =
absl::get<RTPVideoHeaderVP8>(header->video_type_header);
if (_codecSpecificInfo.codecType != kVideoCodecVP8) {
// This is the first packet for this frame.
_codecSpecificInfo.codecSpecific.VP8.temporalIdx = 0;
_codecSpecificInfo.codecSpecific.VP8.layerSync = false;
_codecSpecificInfo.codecSpecific.VP8.keyIdx = -1;
_codecSpecificInfo.codecType = kVideoCodecVP8;
}
_codecSpecificInfo.codecSpecific.VP8.nonReference =
vp8_header.nonReference;
if (vp8_header.temporalIdx != kNoTemporalIdx) {
_codecSpecificInfo.codecSpecific.VP8.temporalIdx =
vp8_header.temporalIdx;
_codecSpecificInfo.codecSpecific.VP8.layerSync = vp8_header.layerSync;
}
if (vp8_header.keyIdx != kNoKeyIdx) {
_codecSpecificInfo.codecSpecific.VP8.keyIdx = vp8_header.keyIdx;
}
break;
}
case kVideoCodecVP9: {
const auto& vp9_header =
absl::get<RTPVideoHeaderVP9>(header->video_type_header);
if (_codecSpecificInfo.codecType != kVideoCodecVP9) {
// This is the first packet for this frame.
_codecSpecificInfo.codecSpecific.VP9.temporal_idx = 0;
_codecSpecificInfo.codecSpecific.VP9.gof_idx = 0;
_codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted = false;
_codecSpecificInfo.codecType = kVideoCodecVP9;
}
_codecSpecificInfo.codecSpecific.VP9.inter_pic_predicted =
vp9_header.inter_pic_predicted;
_codecSpecificInfo.codecSpecific.VP9.flexible_mode =
vp9_header.flexible_mode;
_codecSpecificInfo.codecSpecific.VP9.num_ref_pics =
vp9_header.num_ref_pics;
for (uint8_t r = 0; r < vp9_header.num_ref_pics; ++r) {
_codecSpecificInfo.codecSpecific.VP9.p_diff[r] =
vp9_header.pid_diff[r];
}
_codecSpecificInfo.codecSpecific.VP9.ss_data_available =
vp9_header.ss_data_available;
if (vp9_header.temporal_idx != kNoTemporalIdx) {
_codecSpecificInfo.codecSpecific.VP9.temporal_idx =
vp9_header.temporal_idx;
_codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
vp9_header.temporal_up_switch;
}
if (vp9_header.spatial_idx != kNoSpatialIdx) {
_codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted =
vp9_header.inter_layer_predicted;
SetSpatialIndex(vp9_header.spatial_idx);
}
if (vp9_header.gof_idx != kNoGofIdx) {
_codecSpecificInfo.codecSpecific.VP9.gof_idx = vp9_header.gof_idx;
}
if (vp9_header.ss_data_available) {
_codecSpecificInfo.codecSpecific.VP9.num_spatial_layers =
vp9_header.num_spatial_layers;
_codecSpecificInfo.codecSpecific.VP9
.spatial_layer_resolution_present =
vp9_header.spatial_layer_resolution_present;
if (vp9_header.spatial_layer_resolution_present) {
for (size_t i = 0; i < vp9_header.num_spatial_layers; ++i) {
_codecSpecificInfo.codecSpecific.VP9.width[i] =
vp9_header.width[i];
_codecSpecificInfo.codecSpecific.VP9.height[i] =
vp9_header.height[i];
}
}
_codecSpecificInfo.codecSpecific.VP9.gof.CopyGofInfoVP9(
vp9_header.gof);
}
break;
}
case kVideoCodecH264: {
_codecSpecificInfo.codecType = kVideoCodecH264;
break;
}
case kVideoCodecAV1: {
_codecSpecificInfo.codecType = kVideoCodecAV1;
break;
}
default: {
_codecSpecificInfo.codecType = kVideoCodecGeneric;
break;
}
}
}
}
} // namespace webrtc

View file

@ -16,14 +16,17 @@
#include "absl/types/optional.h"
#include "api/units/timestamp.h"
#include "modules/video_coding/encoded_frame.h"
#include "api/video/encoded_image.h"
#include "api/video/video_codec_type.h"
#include "modules/rtp_rtcp/source/rtp_video_header.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/include/video_coding_defines.h"
namespace webrtc {
// TODO(philipel): Remove webrtc::VCMEncodedFrame inheritance.
// TODO(philipel): Move transport specific info out of EncodedFrame.
// NOTE: This class is still under development and may change without notice.
class EncodedFrame : public webrtc::VCMEncodedFrame {
class EncodedFrame : public EncodedImage {
public:
static const uint8_t kMaxFrameReferences = 5;
@ -33,14 +36,16 @@ class EncodedFrame : public webrtc::VCMEncodedFrame {
// When this frame was received.
// TODO(bugs.webrtc.org/13756): Use Timestamp instead of int.
virtual int64_t ReceivedTime() const = 0;
virtual int64_t ReceivedTime() const { return -1; }
// Returns a Timestamp from `ReceivedTime`, or nullopt if there is no receive
// time.
absl::optional<webrtc::Timestamp> ReceivedTimestamp() const;
// When this frame should be rendered.
// TODO(bugs.webrtc.org/13756): Use Timestamp instead of int.
virtual int64_t RenderTime() const = 0;
virtual int64_t RenderTime() const { return _renderTimeMs; }
// TODO(bugs.webrtc.org/13756): Migrate to ReceivedTimestamp.
int64_t RenderTimeMs() const { return _renderTimeMs; }
// Returns a Timestamp from `RenderTime`, or nullopt if there is no
// render time.
absl::optional<webrtc::Timestamp> RenderTimestamp() const;
@ -55,6 +60,21 @@ class EncodedFrame : public webrtc::VCMEncodedFrame {
void SetId(int64_t id) { id_ = id; }
int64_t Id() const { return id_; }
uint8_t PayloadType() const { return _payloadType; }
void SetRenderTime(const int64_t renderTimeMs) {
_renderTimeMs = renderTimeMs;
}
const webrtc::EncodedImage& EncodedImage() const {
return static_cast<const webrtc::EncodedImage&>(*this);
}
const CodecSpecificInfo* CodecSpecific() const { return &_codecSpecificInfo; }
void SetCodecSpecific(const CodecSpecificInfo* codec_specific) {
_codecSpecificInfo = *codec_specific;
}
// TODO(philipel): Add simple modify/access functions to prevent adding too
// many `references`.
size_t num_references = 0;
@ -63,6 +83,18 @@ class EncodedFrame : public webrtc::VCMEncodedFrame {
// mean that the last packet has a marker bit set).
bool is_last_spatial_layer = true;
protected:
// TODO(https://bugs.webrtc.org/9378): Move RTP specifics down into a
// transport-aware subclass, eg RtpFrameObject.
void CopyCodecSpecific(const RTPVideoHeader* header);
// TODO(https://bugs.webrtc.org/9378): Make fields private with
// getters/setters as needed.
int64_t _renderTimeMs = -1;
uint8_t _payloadType = 0;
CodecSpecificInfo _codecSpecificInfo;
VideoCodecType _codec = kVideoCodecGeneric;
private:
// The ID of the frame is determined from RTP level information. The IDs are
// used to describe order and dependencies between frames.

View file

@ -75,6 +75,11 @@ void EncodedImage::SetEncodeTime(int64_t encode_start_ms,
timing_.encode_finish_ms = encode_finish_ms;
}
webrtc::Timestamp EncodedImage::CaptureTime() const {
return capture_time_ms_ > 0 ? Timestamp::Millis(capture_time_ms_)
: Timestamp::MinusInfinity();
}
absl::optional<size_t> EncodedImage::SpatialLayerFrameSize(
int spatial_index) const {
RTC_DCHECK_GE(spatial_index, 0);

View file

@ -19,6 +19,7 @@
#include "absl/types/optional.h"
#include "api/rtp_packet_infos.h"
#include "api/scoped_refptr.h"
#include "api/units/timestamp.h"
#include "api/video/color_space.h"
#include "api/video/video_codec_constants.h"
#include "api/video/video_content_type.h"
@ -87,6 +88,8 @@ class RTC_EXPORT EncodedImage {
void SetEncodeTime(int64_t encode_start_ms, int64_t encode_finish_ms);
webrtc::Timestamp CaptureTime() const;
int64_t NtpTimeMs() const { return ntp_time_ms_; }
// Every simulcast layer (= encoding) has its own encoder and RTP stream.
@ -135,6 +138,14 @@ class RTC_EXPORT EncodedImage {
color_space_ = color_space;
}
absl::optional<VideoPlayoutDelay> PlayoutDelay() const {
return playout_delay_;
}
void SetPlayoutDelay(absl::optional<VideoPlayoutDelay> playout_delay) {
playout_delay_ = playout_delay;
}
// These methods along with the private member video_frame_tracking_id_ are
// meant for media quality testing purpose only.
absl::optional<uint16_t> VideoFrameTrackingId() const {
@ -190,6 +201,14 @@ class RTC_EXPORT EncodedImage {
at_target_quality_ = at_target_quality;
}
webrtc::VideoFrameType FrameType() const { return _frameType; }
void SetFrameType(webrtc::VideoFrameType frame_type) {
_frameType = frame_type;
}
VideoContentType contentType() const { return content_type_; }
VideoRotation rotation() const { return rotation_; }
uint32_t _encodedWidth = 0;
uint32_t _encodedHeight = 0;
// NTP time of the capture time in local timebase in milliseconds.
@ -201,11 +220,6 @@ class RTC_EXPORT EncodedImage {
VideoContentType content_type_ = VideoContentType::UNSPECIFIED;
int qp_ = -1; // Quantizer value.
// When an application indicates non-zero values here, it is taken as an
// indication that all future frames will be constrained with those limits
// until the application indicates a change again.
VideoPlayoutDelay playout_delay_;
struct Timing {
uint8_t flags = VideoSendTiming::kInvalid;
int64_t encode_start_ms = 0;
@ -217,10 +231,16 @@ class RTC_EXPORT EncodedImage {
int64_t receive_start_ms = 0;
int64_t receive_finish_ms = 0;
} timing_;
EncodedImage::Timing video_timing() const { return timing_; }
EncodedImage::Timing* video_timing_mutable() { return &timing_; }
private:
size_t capacity() const { return encoded_data_ ? encoded_data_->size() : 0; }
// When set, indicates that all future frames will be constrained with those
// limits until the application indicates a change again.
absl::optional<VideoPlayoutDelay> playout_delay_;
rtc::scoped_refptr<EncodedImageBufferInterface> encoded_data_;
size_t size_ = 0; // Size of encoded frame data.
uint32_t timestamp_rtp_ = 0;

View file

@ -10,21 +10,7 @@
#include "api/video/video_content_type.h"
// VideoContentType stored as a single byte, which is sent over the network.
// Structure:
//
// 0 1 2 3 4 5 6 7
// +---------------+
// |r r e e e s s c|
//
// where:
// r - reserved bits.
// e - 3-bit number of an experiment group counted from 1. 0 means there's no
// experiment ongoing.
// s - 2-bit simulcast stream id or spatial layer, counted from 1. 0 means that
// no simulcast information is set.
// c - content type. 0 means real-time video, 1 means screenshare.
//
#include "rtc_base/checks.h"
namespace webrtc {
namespace videocontenttypehelpers {
@ -33,57 +19,21 @@ namespace {
static constexpr uint8_t kScreenshareBitsSize = 1;
static constexpr uint8_t kScreenshareBitsMask =
(1u << kScreenshareBitsSize) - 1;
static constexpr uint8_t kSimulcastShift = 1;
static constexpr uint8_t kSimulcastBitsSize = 2;
static constexpr uint8_t kSimulcastBitsMask = ((1u << kSimulcastBitsSize) - 1)
<< kSimulcastShift; // 0b00000110
static constexpr uint8_t kExperimentShift = 3;
static constexpr uint8_t kExperimentBitsSize = 3;
static constexpr uint8_t kExperimentBitsMask =
((1u << kExperimentBitsSize) - 1) << kExperimentShift; // 0b00111000
static constexpr uint8_t kTotalBitsSize =
kScreenshareBitsSize + kSimulcastBitsSize + kExperimentBitsSize;
} // namespace
bool SetExperimentId(VideoContentType* content_type, uint8_t experiment_id) {
// Store in bits 2-4.
if (experiment_id >= (1 << kExperimentBitsSize))
return false;
*content_type = static_cast<VideoContentType>(
(static_cast<uint8_t>(*content_type) & ~kExperimentBitsMask) |
((experiment_id << kExperimentShift) & kExperimentBitsMask));
return true;
}
bool SetSimulcastId(VideoContentType* content_type, uint8_t simulcast_id) {
// Store in bits 5-6.
if (simulcast_id >= (1 << kSimulcastBitsSize))
return false;
*content_type = static_cast<VideoContentType>(
(static_cast<uint8_t>(*content_type) & ~kSimulcastBitsMask) |
((simulcast_id << kSimulcastShift) & kSimulcastBitsMask));
return true;
}
uint8_t GetExperimentId(const VideoContentType& content_type) {
return (static_cast<uint8_t>(content_type) & kExperimentBitsMask) >>
kExperimentShift;
}
uint8_t GetSimulcastId(const VideoContentType& content_type) {
return (static_cast<uint8_t>(content_type) & kSimulcastBitsMask) >>
kSimulcastShift;
}
bool IsScreenshare(const VideoContentType& content_type) {
// Ensure no bits apart from the screenshare bit is set.
// This CHECK is a temporary measure to detect code that introduces
// values according to old versions.
RTC_CHECK((static_cast<uint8_t>(content_type) & !kScreenshareBitsMask) == 0);
return (static_cast<uint8_t>(content_type) & kScreenshareBitsMask) > 0;
}
bool IsValidContentType(uint8_t value) {
// Any 6-bit value is allowed.
return value < (1 << kTotalBitsSize);
// Only the screenshare bit is allowed.
// However, due to previous usage of the next 5 bits, we allow
// the lower 6 bits to be set.
return value < (1 << 6);
}
const char* ToString(const VideoContentType& content_type) {

View file

@ -15,18 +15,15 @@
namespace webrtc {
// VideoContentType stored as a single byte, which is sent over the network
// in the rtp-hdrext/video-content-type extension.
// Only the lowest bit is used, per the enum.
enum class VideoContentType : uint8_t {
UNSPECIFIED = 0,
SCREENSHARE = 1,
};
namespace videocontenttypehelpers {
bool SetExperimentId(VideoContentType* content_type, uint8_t experiment_id);
bool SetSimulcastId(VideoContentType* content_type, uint8_t simulcast_id);
uint8_t GetExperimentId(const VideoContentType& content_type);
uint8_t GetSimulcastId(const VideoContentType& content_type);
bool IsScreenshare(const VideoContentType& content_type);
bool IsValidContentType(uint8_t value);

View file

@ -12,6 +12,7 @@
#define API_VIDEO_VIDEO_FRAME_TYPE_H_
#include "absl/strings/string_view.h"
#include "rtc_base/checks.h"
namespace webrtc {
@ -25,15 +26,15 @@ enum class VideoFrameType {
inline constexpr absl::string_view VideoFrameTypeToString(
VideoFrameType frame_type) {
if (frame_type == VideoFrameType::kEmptyFrame) {
return "empty";
}
if (frame_type == VideoFrameType::kVideoFrameKey) {
return "key";
}
if (frame_type == VideoFrameType::kVideoFrameDelta) {
return "delta";
switch (frame_type) {
case VideoFrameType::kEmptyFrame:
return "empty";
case VideoFrameType::kVideoFrameKey:
return "key";
case VideoFrameType::kVideoFrameDelta:
return "delta";
}
RTC_CHECK_NOTREACHED();
return "";
}

View file

@ -10,6 +10,8 @@
#include "api/video/video_timing.h"
#include <algorithm>
#include "api/array_view.h"
#include "api/units/time_delta.h"
#include "rtc_base/logging.h"
@ -98,4 +100,23 @@ std::string TimingFrameInfo::ToString() const {
return sb.str();
}
VideoPlayoutDelay::VideoPlayoutDelay(TimeDelta min, TimeDelta max)
: min_(std::clamp(min, TimeDelta::Zero(), kMax)),
max_(std::clamp(max, min_, kMax)) {
if (!(TimeDelta::Zero() <= min && min <= max && max <= kMax)) {
RTC_LOG(LS_ERROR) << "Invalid video playout delay: [" << min << "," << max
<< "]. Clamped to [" << this->min() << "," << this->max()
<< "]";
}
}
bool VideoPlayoutDelay::Set(TimeDelta min, TimeDelta max) {
if (TimeDelta::Zero() <= min && min <= max && max <= kMax) {
min_ = min;
max_ = max;
return true;
}
return false;
}
} // namespace webrtc

View file

@ -17,12 +17,13 @@
#include <string>
#include "api/units/time_delta.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Video timing timestamps in ms counted from capture_time_ms of a frame.
// This structure represents data sent in video-timing RTP header extension.
struct VideoSendTiming {
struct RTC_EXPORT VideoSendTiming {
enum TimingFrameFlags : uint8_t {
kNotTriggered = 0, // Timing info valid, but not to be transmitted.
// Used on send-side only.
@ -51,7 +52,7 @@ struct VideoSendTiming {
// timestamps for a lifetime of that specific frame. Reported as a string via
// GetStats(). Only frame which took the longest between two GetStats calls is
// reported.
struct TimingFrameInfo {
struct RTC_EXPORT TimingFrameInfo {
TimingFrameInfo();
// Returns end-to-end delay of a frame, if sender and receiver timestamps are
@ -106,22 +107,42 @@ struct TimingFrameInfo {
// Minimum and maximum playout delay values from capture to render.
// These are best effort values.
//
// A value < 0 indicates no change from previous valid value.
//
// min = max = 0 indicates that the receiver should try and render
// frame as soon as possible.
//
// min = x, max = y indicates that the receiver is free to adapt
// in the range (x, y) based on network jitter.
struct VideoPlayoutDelay {
VideoPlayoutDelay() = default;
VideoPlayoutDelay(int min_ms, int max_ms) : min_ms(min_ms), max_ms(max_ms) {}
int min_ms = -1;
int max_ms = -1;
// This class ensures invariant 0 <= min <= max <= kMax.
class RTC_EXPORT VideoPlayoutDelay {
public:
// Maximum supported value for the delay limit.
static constexpr TimeDelta kMax = TimeDelta::Millis(10) * 0xFFF;
bool operator==(const VideoPlayoutDelay& rhs) const {
return min_ms == rhs.min_ms && max_ms == rhs.max_ms;
// Creates delay limits that indicates receiver should try to render frame
// as soon as possible.
static VideoPlayoutDelay Minimal() {
return VideoPlayoutDelay(TimeDelta::Zero(), TimeDelta::Zero());
}
// Creates valid, but unspecified limits.
VideoPlayoutDelay() = default;
VideoPlayoutDelay(const VideoPlayoutDelay&) = default;
VideoPlayoutDelay& operator=(const VideoPlayoutDelay&) = default;
VideoPlayoutDelay(TimeDelta min, TimeDelta max);
bool Set(TimeDelta min, TimeDelta max);
TimeDelta min() const { return min_; }
TimeDelta max() const { return max_; }
friend bool operator==(const VideoPlayoutDelay& lhs,
const VideoPlayoutDelay& rhs) {
return lhs.min_ == rhs.min_ && lhs.max_ == rhs.max_;
}
private:
TimeDelta min_ = TimeDelta::Zero();
TimeDelta max_ = kMax;
};
} // namespace webrtc

View file

@ -68,6 +68,13 @@ rtc_library("video_codecs_api") {
"vp9_profile.h",
]
if (rtc_use_h265) {
sources += [
"h265_profile_tier_level.cc",
"h265_profile_tier_level.h",
]
}
deps = [
":scalability_mode",
"..:fec_controller_api",
@ -291,6 +298,7 @@ rtc_library("rtc_software_fallback_wrappers") {
deps = [
":video_codecs_api",
"..:fec_controller_api",
"../../api/transport:field_trial_based_config",
"../../api/video:video_frame",
"../../media:rtc_media_base",
"../../modules/video_coding:video_codec_interface",
@ -298,6 +306,7 @@ rtc_library("rtc_software_fallback_wrappers") {
"../../rtc_base:checks",
"../../rtc_base:event_tracer",
"../../rtc_base:logging",
"../../rtc_base/experiments:field_trial_parser",
"../../rtc_base/system:rtc_export",
"../../system_wrappers:field_trial",
"../../system_wrappers:metrics",

View file

@ -238,7 +238,8 @@ absl::optional<std::string> H264ProfileLevelIdToString(
}
char str[7];
snprintf(str, 7u, "%s%02x", profile_idc_iop_string, profile_level_id.level);
snprintf(str, 7u, "%s%02x", profile_idc_iop_string,
static_cast<unsigned>(profile_level_id.level));
return {str};
}

View file

@ -0,0 +1,248 @@
/*
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/h265_profile_tier_level.h"
#include <string>
#include "rtc_base/string_to_number.h"
namespace webrtc {
namespace {
const char kH265FmtpProfile[] = "profile-id";
const char kH265FmtpTier[] = "tier-flag";
const char kH265FmtpLevel[] = "level-id";
} // anonymous namespace
// Annex A of https://www.itu.int/rec/T-REC-H.265 (08/21), section A.3.
absl::optional<H265Profile> StringToH265Profile(const std::string& profile) {
absl::optional<int> i = rtc::StringToNumber<int>(profile);
if (!i.has_value()) {
return absl::nullopt;
}
switch (i.value()) {
case 1:
return H265Profile::kProfileMain;
case 2:
return H265Profile::kProfileMain10;
case 3:
return H265Profile::kProfileMainStill;
case 4:
return H265Profile::kProfileRangeExtensions;
case 5:
return H265Profile::kProfileHighThroughput;
case 6:
return H265Profile::kProfileMultiviewMain;
case 7:
return H265Profile::kProfileScalableMain;
case 8:
return H265Profile::kProfile3dMain;
case 9:
return H265Profile::kProfileScreenContentCoding;
case 10:
return H265Profile::kProfileScalableRangeExtensions;
case 11:
return H265Profile::kProfileHighThroughputScreenContentCoding;
default:
return absl::nullopt;
}
}
// Annex A of https://www.itu.int/rec/T-REC-H.265 (08/21), section A.4,
// tiers and levels.
absl::optional<H265Tier> StringToH265Tier(const std::string& tier) {
absl::optional<int> i = rtc::StringToNumber<int>(tier);
if (!i.has_value()) {
return absl::nullopt;
}
switch (i.value()) {
case 0:
return H265Tier::kTier0;
case 1:
return H265Tier::kTier1;
default:
return absl::nullopt;
}
}
absl::optional<H265Level> StringToH265Level(const std::string& level) {
const absl::optional<int> i = rtc::StringToNumber<int>(level);
if (!i.has_value())
return absl::nullopt;
switch (i.value()) {
case 30:
return H265Level::kLevel1;
case 60:
return H265Level::kLevel2;
case 63:
return H265Level::kLevel2_1;
case 90:
return H265Level::kLevel3;
case 93:
return H265Level::kLevel3_1;
case 120:
return H265Level::kLevel4;
case 123:
return H265Level::kLevel4_1;
case 150:
return H265Level::kLevel5;
case 153:
return H265Level::kLevel5_1;
case 156:
return H265Level::kLevel5_2;
case 180:
return H265Level::kLevel6;
case 183:
return H265Level::kLevel6_1;
case 186:
return H265Level::kLevel6_2;
default:
return absl::nullopt;
}
}
std::string H265ProfileToString(H265Profile profile) {
switch (profile) {
case H265Profile::kProfileMain:
return "1";
case H265Profile::kProfileMain10:
return "2";
case H265Profile::kProfileMainStill:
return "3";
case H265Profile::kProfileRangeExtensions:
return "4";
case H265Profile::kProfileHighThroughput:
return "5";
case H265Profile::kProfileMultiviewMain:
return "6";
case H265Profile::kProfileScalableMain:
return "7";
case H265Profile::kProfile3dMain:
return "8";
case H265Profile::kProfileScreenContentCoding:
return "9";
case H265Profile::kProfileScalableRangeExtensions:
return "10";
case H265Profile::kProfileHighThroughputScreenContentCoding:
return "11";
}
}
std::string H265TierToString(H265Tier tier) {
switch (tier) {
case H265Tier::kTier0:
return "0";
case H265Tier::kTier1:
return "1";
}
}
std::string H265LevelToString(H265Level level) {
switch (level) {
case H265Level::kLevel1:
return "30";
case H265Level::kLevel2:
return "60";
case H265Level::kLevel2_1:
return "63";
case H265Level::kLevel3:
return "90";
case H265Level::kLevel3_1:
return "93";
case H265Level::kLevel4:
return "120";
case H265Level::kLevel4_1:
return "123";
case H265Level::kLevel5:
return "150";
case H265Level::kLevel5_1:
return "153";
case H265Level::kLevel5_2:
return "156";
case H265Level::kLevel6:
return "180";
case H265Level::kLevel6_1:
return "183";
case H265Level::kLevel6_2:
return "186";
}
}
absl::optional<H265ProfileTierLevel> ParseSdpForH265ProfileTierLevel(
const SdpVideoFormat::Parameters& params) {
static const H265ProfileTierLevel kDefaultProfileTierLevel(
H265Profile::kProfileMain, H265Tier::kTier0, H265Level::kLevel3_1);
bool profile_tier_level_specified = false;
absl::optional<H265Profile> profile;
const auto profile_it = params.find(kH265FmtpProfile);
if (profile_it != params.end()) {
profile_tier_level_specified = true;
const std::string& profile_str = profile_it->second;
profile = StringToH265Profile(profile_str);
if (!profile) {
return absl::nullopt;
}
} else {
profile = H265Profile::kProfileMain;
}
absl::optional<H265Tier> tier;
const auto tier_it = params.find(kH265FmtpTier);
if (tier_it != params.end()) {
profile_tier_level_specified = true;
const std::string& tier_str = tier_it->second;
tier = StringToH265Tier(tier_str);
if (!tier) {
return absl::nullopt;
}
} else {
tier = H265Tier::kTier0;
}
absl::optional<H265Level> level;
const auto level_it = params.find(kH265FmtpLevel);
if (level_it != params.end()) {
profile_tier_level_specified = true;
const std::string& level_str = level_it->second;
level = StringToH265Level(level_str);
if (!level) {
return absl::nullopt;
}
} else {
level = H265Level::kLevel3_1;
}
// Spec Table A.9, level 1 to level 3.1 does not allow high tiers.
if (level <= H265Level::kLevel3_1 && tier == H265Tier::kTier1) {
return absl::nullopt;
}
return !profile_tier_level_specified
? kDefaultProfileTierLevel
: H265ProfileTierLevel(profile.value(), tier.value(),
level.value());
}
bool H265IsSameProfileTierLevel(const SdpVideoFormat::Parameters& params1,
const SdpVideoFormat::Parameters& params2) {
const absl::optional<H265ProfileTierLevel> ptl1 =
ParseSdpForH265ProfileTierLevel(params1);
const absl::optional<H265ProfileTierLevel> ptl2 =
ParseSdpForH265ProfileTierLevel(params2);
return ptl1 && ptl2 && ptl1->profile == ptl2->profile &&
ptl1->tier == ptl2->tier && ptl1->level == ptl2->level;
}
} // namespace webrtc

View file

@ -0,0 +1,109 @@
/*
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_H265_PROFILE_TIER_LEVEL_H_
#define API_VIDEO_CODECS_H265_PROFILE_TIER_LEVEL_H_
#include <string>
#include "absl/types/optional.h"
#include "api/video_codecs/sdp_video_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Profiles can be found at:
// https://www.itu.int/rec/T-REC-H.265
// The enum values match the number specified in the SDP.
enum class H265Profile {
kProfileMain = 1,
kProfileMain10 = 2,
kProfileMainStill = 3,
kProfileRangeExtensions = 4,
kProfileHighThroughput = 5,
kProfileMultiviewMain = 6,
kProfileScalableMain = 7,
kProfile3dMain = 8,
kProfileScreenContentCoding = 9,
kProfileScalableRangeExtensions = 10,
kProfileHighThroughputScreenContentCoding = 11,
};
// Tiers can be found at https://www.itu.int/rec/T-REC-H.265
enum class H265Tier {
kTier0,
kTier1,
};
// All values are equal to 30 times the level number.
enum class H265Level {
kLevel1 = 30,
kLevel2 = 60,
kLevel2_1 = 63,
kLevel3 = 90,
kLevel3_1 = 93,
kLevel4 = 120,
kLevel4_1 = 123,
kLevel5 = 150,
kLevel5_1 = 153,
kLevel5_2 = 156,
kLevel6 = 180,
kLevel6_1 = 183,
kLevel6_2 = 186,
};
struct H265ProfileTierLevel {
constexpr H265ProfileTierLevel(H265Profile profile,
H265Tier tier,
H265Level level)
: profile(profile), tier(tier), level(level) {}
H265Profile profile;
H265Tier tier;
H265Level level;
};
// Helper function to convert H265Profile to std::string.
RTC_EXPORT std::string H265ProfileToString(H265Profile profile);
// Helper function to convert H265Tier to std::string.
RTC_EXPORT std::string H265TierToString(H265Tier tier);
// Helper function to convert H265Level to std::string.
RTC_EXPORT std::string H265LevelToString(H265Level level);
// Helper function to get H265Profile from profile string.
RTC_EXPORT absl::optional<H265Profile> StringToH265Profile(
const std::string& profile);
// Helper function to get H265Tier from tier string.
RTC_EXPORT absl::optional<H265Tier> StringToH265Tier(const std::string& tier);
// Helper function to get H265Level from level string.
RTC_EXPORT absl::optional<H265Level> StringToH265Level(
const std::string& level);
// Parses an SDP key-value map of format parameters to retrive an H265
// profile/tier/level. Returns an H265ProfileTierlevel by setting its
// members. profile defaults to `kProfileMain` if no profile-id is specified.
// tier defaults to "kTier0" if no tier-flag is specified.
// level defaults to "kLevel3_1" if no level-id is specified.
// Returns empty value if any of the profile/tier/level key is present but
// contains an invalid value.
RTC_EXPORT absl::optional<H265ProfileTierLevel> ParseSdpForH265ProfileTierLevel(
const SdpVideoFormat::Parameters& params);
// Returns true if the parameters have the same H265 profile or neither contains
// an H265 profile, otherwise false.
bool H265IsSameProfileTierLevel(const SdpVideoFormat::Parameters& params1,
const SdpVideoFormat::Parameters& params2);
} // namespace webrtc
#endif // API_VIDEO_CODECS_H265_PROFILE_TIER_LEVEL_H_

View file

@ -19,6 +19,10 @@ if (rtc_include_tests) {
"video_encoder_software_fallback_wrapper_unittest.cc",
]
if (rtc_use_h265) {
sources += [ "h265_profile_tier_level_unittest.cc" ]
}
deps = [
":video_decoder_factory_template_tests",
":video_encoder_factory_template_tests",
@ -35,6 +39,7 @@ if (rtc_include_tests) {
"../../../modules/video_coding:webrtc_vp8",
"../../../rtc_base:checks",
"../../../rtc_base:rtc_base_tests_utils",
"../../../test:fake_video_codecs",
"../../../test:field_trial",
"../../../test:test_support",
"../../../test:video_test_common",

View file

@ -0,0 +1,248 @@
/*
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/h265_profile_tier_level.h"
#include <string>
#include "absl/types/optional.h"
#include "test/gtest.h"
namespace webrtc {
TEST(H265ProfileTierLevel, TestLevelToString) {
EXPECT_EQ(H265LevelToString(H265Level::kLevel1), "30");
EXPECT_EQ(H265LevelToString(H265Level::kLevel2), "60");
EXPECT_EQ(H265LevelToString(H265Level::kLevel2_1), "63");
EXPECT_EQ(H265LevelToString(H265Level::kLevel3), "90");
EXPECT_EQ(H265LevelToString(H265Level::kLevel3_1), "93");
EXPECT_EQ(H265LevelToString(H265Level::kLevel4), "120");
EXPECT_EQ(H265LevelToString(H265Level::kLevel4_1), "123");
EXPECT_EQ(H265LevelToString(H265Level::kLevel5), "150");
EXPECT_EQ(H265LevelToString(H265Level::kLevel5_1), "153");
EXPECT_EQ(H265LevelToString(H265Level::kLevel5_2), "156");
EXPECT_EQ(H265LevelToString(H265Level::kLevel6), "180");
EXPECT_EQ(H265LevelToString(H265Level::kLevel6_1), "183");
EXPECT_EQ(H265LevelToString(H265Level::kLevel6_2), "186");
}
TEST(H265ProfileTierLevel, TestProfileToString) {
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileMain), "1");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileMain10), "2");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileMainStill), "3");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileRangeExtensions), "4");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileHighThroughput), "5");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileMultiviewMain), "6");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileScalableMain), "7");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfile3dMain), "8");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileScreenContentCoding), "9");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileScalableRangeExtensions),
"10");
EXPECT_EQ(H265ProfileToString(
H265Profile::kProfileHighThroughputScreenContentCoding),
"11");
}
TEST(H265ProfileTierLevel, TestTierToString) {
EXPECT_EQ(H265TierToString(H265Tier::kTier0), "0");
EXPECT_EQ(H265TierToString(H265Tier::kTier1), "1");
}
TEST(H265ProfileTierLevel, TestStringToProfile) {
// Invalid profiles.
EXPECT_FALSE(StringToH265Profile("0"));
EXPECT_FALSE(StringToH265Profile("12"));
// Malformed profiles
EXPECT_FALSE(StringToH265Profile(""));
EXPECT_FALSE(StringToH265Profile(" 1"));
EXPECT_FALSE(StringToH265Profile("12x"));
EXPECT_FALSE(StringToH265Profile("x12"));
EXPECT_FALSE(StringToH265Profile("gggg"));
// Valid profiles.
EXPECT_EQ(StringToH265Profile("1"), H265Profile::kProfileMain);
EXPECT_EQ(StringToH265Profile("2"), H265Profile::kProfileMain10);
EXPECT_EQ(StringToH265Profile("4"), H265Profile::kProfileRangeExtensions);
}
TEST(H265ProfileTierLevel, TestStringToLevel) {
// Invalid levels.
EXPECT_FALSE(StringToH265Level("0"));
EXPECT_FALSE(StringToH265Level("200"));
// Malformed levels.
EXPECT_FALSE(StringToH265Level(""));
EXPECT_FALSE(StringToH265Level(" 30"));
EXPECT_FALSE(StringToH265Level("30x"));
EXPECT_FALSE(StringToH265Level("x30"));
EXPECT_FALSE(StringToH265Level("ggggg"));
// Valid levels.
EXPECT_EQ(StringToH265Level("30"), H265Level::kLevel1);
EXPECT_EQ(StringToH265Level("93"), H265Level::kLevel3_1);
EXPECT_EQ(StringToH265Level("183"), H265Level::kLevel6_1);
}
TEST(H265ProfileTierLevel, TestStringToTier) {
// Invalid tiers.
EXPECT_FALSE(StringToH265Tier("4"));
EXPECT_FALSE(StringToH265Tier("-1"));
// Malformed tiers.
EXPECT_FALSE(StringToH265Tier(""));
EXPECT_FALSE(StringToH265Tier(" 1"));
EXPECT_FALSE(StringToH265Tier("t1"));
// Valid tiers.
EXPECT_EQ(StringToH265Tier("0"), H265Tier::kTier0);
EXPECT_EQ(StringToH265Tier("1"), H265Tier::kTier1);
}
TEST(H265ProfileTierLevel, TestParseSdpProfileTierLevelAllEmpty) {
const absl::optional<H265ProfileTierLevel> profile_tier_level =
ParseSdpForH265ProfileTierLevel(SdpVideoFormat::Parameters());
EXPECT_TRUE(profile_tier_level);
EXPECT_EQ(H265Profile::kProfileMain, profile_tier_level->profile);
EXPECT_EQ(H265Level::kLevel3_1, profile_tier_level->level);
EXPECT_EQ(H265Tier::kTier0, profile_tier_level->tier);
}
TEST(H265ProfileTierLevel, TestParseSdpProfileTierLevelPartialEmpty) {
SdpVideoFormat::Parameters params;
params["profile-id"] = "1";
params["tier-flag"] = "0";
absl::optional<H265ProfileTierLevel> profile_tier_level =
ParseSdpForH265ProfileTierLevel(params);
EXPECT_TRUE(profile_tier_level);
EXPECT_EQ(H265Profile::kProfileMain, profile_tier_level->profile);
EXPECT_EQ(H265Level::kLevel3_1, profile_tier_level->level);
EXPECT_EQ(H265Tier::kTier0, profile_tier_level->tier);
params.clear();
params["profile-id"] = "2";
profile_tier_level = ParseSdpForH265ProfileTierLevel(params);
EXPECT_TRUE(profile_tier_level);
EXPECT_EQ(H265Profile::kProfileMain10, profile_tier_level->profile);
EXPECT_EQ(H265Level::kLevel3_1, profile_tier_level->level);
EXPECT_EQ(H265Tier::kTier0, profile_tier_level->tier);
params.clear();
params["level-id"] = "180";
profile_tier_level = ParseSdpForH265ProfileTierLevel(params);
EXPECT_TRUE(profile_tier_level);
EXPECT_EQ(H265Profile::kProfileMain, profile_tier_level->profile);
EXPECT_EQ(H265Level::kLevel6, profile_tier_level->level);
EXPECT_EQ(H265Tier::kTier0, profile_tier_level->tier);
}
TEST(H265ProfileTierLevel, TestParseSdpProfileTierLevelInvalid) {
SdpVideoFormat::Parameters params;
// Invalid profile-tier-level combination.
params["profile-id"] = "1";
params["tier-flag"] = "1";
params["level-id"] = "93";
absl::optional<H265ProfileTierLevel> profile_tier_level =
ParseSdpForH265ProfileTierLevel(params);
EXPECT_FALSE(profile_tier_level);
params.clear();
params["profile-id"] = "1";
params["tier-flag"] = "4";
params["level-id"] = "180";
profile_tier_level = ParseSdpForH265ProfileTierLevel(params);
EXPECT_FALSE(profile_tier_level);
// Valid profile-tier-level combination.
params.clear();
params["profile-id"] = "1";
params["tier-flag"] = "0";
params["level-id"] = "153";
profile_tier_level = ParseSdpForH265ProfileTierLevel(params);
EXPECT_TRUE(profile_tier_level);
}
TEST(H265ProfileTierLevel, TestToStringRoundTrip) {
SdpVideoFormat::Parameters params;
params["profile-id"] = "1";
params["tier-flag"] = "0";
params["level-id"] = "93";
absl::optional<H265ProfileTierLevel> profile_tier_level =
ParseSdpForH265ProfileTierLevel(params);
EXPECT_TRUE(profile_tier_level);
EXPECT_EQ("1", H265ProfileToString(profile_tier_level->profile));
EXPECT_EQ("0", H265TierToString(profile_tier_level->tier));
EXPECT_EQ("93", H265LevelToString(profile_tier_level->level));
params.clear();
params["profile-id"] = "2";
params["tier-flag"] = "1";
params["level-id"] = "180";
profile_tier_level = ParseSdpForH265ProfileTierLevel(params);
EXPECT_TRUE(profile_tier_level);
EXPECT_EQ("2", H265ProfileToString(profile_tier_level->profile));
EXPECT_EQ("1", H265TierToString(profile_tier_level->tier));
EXPECT_EQ("180", H265LevelToString(profile_tier_level->level));
}
TEST(H265ProfileTierLevel, TestProfileTierLevelCompare) {
SdpVideoFormat::Parameters params1;
SdpVideoFormat::Parameters params2;
// None of profile-id/tier-flag/level-id is specified,
EXPECT_TRUE(H265IsSameProfileTierLevel(params1, params2));
// Same non-empty PTL
params1["profile-id"] = "1";
params1["tier-flag"] = "0";
params1["level-id"] = "120";
params2["profile-id"] = "1";
params2["tier-flag"] = "0";
params2["level-id"] = "120";
EXPECT_TRUE(H265IsSameProfileTierLevel(params1, params2));
// Different profiles.
params1.clear();
params2.clear();
params1["profile-id"] = "1";
params2["profile-id"] = "2";
EXPECT_FALSE(H265IsSameProfileTierLevel(params1, params2));
// Different levels.
params1.clear();
params2.clear();
params1["profile-id"] = "1";
params2["profile-id"] = "1";
params1["level-id"] = "93";
params2["level-id"] = "183";
EXPECT_FALSE(H265IsSameProfileTierLevel(params1, params2));
// Different tiers.
params1.clear();
params2.clear();
params1["profile-id"] = "1";
params2["profile-id"] = "1";
params1["level-id"] = "93";
params2["level-id"] = "93";
params1["tier-flag"] = "0";
params2["tier-flag"] = "1";
EXPECT_FALSE(H265IsSameProfileTierLevel(params1, params2));
// One of the SdpVideoFormat::Parameters is invalid.
params1.clear();
params2.clear();
params1["profile-id"] = "1";
params2["profile-id"] = "1";
params1["tier-flag"] = "0";
params2["tier-flag"] = "4";
EXPECT_FALSE(H265IsSameProfileTierLevel(params1, params2));
}
} // namespace webrtc

View file

@ -45,7 +45,6 @@ class VideoDecoderSoftwareFallbackWrapperTest : public ::testing::Test {
}
int32_t Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) override {
++decode_count_;
return decode_return_code_;
@ -84,7 +83,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, InitializesDecoder) {
EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->configure_count_)
<< "Initialized decoder should not be reinitialized.";
EXPECT_EQ(1, fake_decoder_->decode_count_);
@ -98,7 +97,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->configure_count_)
<< "Should not have attempted reinitializing the fallback decoder on "
"keyframe.";
@ -113,12 +112,12 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, IsSoftwareFallbackSticky) {
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
EncodedImage encoded_image;
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->decode_count_);
// Software fallback should be sticky, fake_decoder_ shouldn't be used.
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->decode_count_)
<< "Decoder shouldn't be used after failure.";
@ -131,10 +130,10 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, DoesNotFallbackOnEveryError) {
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_ERROR;
EncodedImage encoded_image;
EXPECT_EQ(fake_decoder_->decode_return_code_,
fallback_wrapper_->Decode(encoded_image, false, -1));
fallback_wrapper_->Decode(encoded_image, -1));
EXPECT_EQ(1, fake_decoder_->decode_count_);
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(2, fake_decoder_->decode_count_)
<< "Decoder should be active even though previous decode failed.";
}
@ -144,14 +143,14 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, UsesHwDecoderAfterReinit) {
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
EncodedImage encoded_image;
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->decode_count_);
fallback_wrapper_->Release();
fallback_wrapper_->Configure({});
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_OK;
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(2, fake_decoder_->decode_count_)
<< "Should not be using fallback after reinit.";
}
@ -164,7 +163,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, ForwardsReleaseCall) {
fallback_wrapper_->Configure({});
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
EncodedImage encoded_image;
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(2, fake_decoder_->release_count_)
<< "Decoder should be released during fallback.";
fallback_wrapper_->Release();
@ -200,7 +199,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
EncodedImage encoded_image;
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
// Hard coded expected value since libvpx is the software implementation name
// for VP8. Change accordingly if the underlying implementation does.
EXPECT_STREQ("libvpx (fallback from: fake-decoder)",
@ -215,13 +214,13 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, FallbacksOnTooManyErrors) {
EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
// Doesn't fallback from a single error.
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName());
// However, many frames with the same error, fallback should happen.
const int kNumFramesToEncode = 10;
for (int i = 0; i < kNumFramesToEncode; ++i) {
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
}
// Hard coded expected value since libvpx is the software implementation name
// for VP8. Change accordingly if the underlying implementation does.
@ -241,7 +240,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
// Many decoded frames with the same error
const int kNumFramesToEncode = 10;
for (int i = 0; i < kNumFramesToEncode; ++i) {
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
}
EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName());
@ -259,9 +258,9 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
for (int i = 0; i < kNumFramesToEncode; ++i) {
// Interleaved errors and successful decodes.
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_ERROR;
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_OK;
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
}
EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName());
fallback_wrapper_->Release();
@ -289,7 +288,7 @@ TEST_F(ForcedSoftwareDecoderFallbackTest, UsesForcedFallback) {
EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
fallback_wrapper_->Decode(encoded_image, false, -1);
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, sw_fallback_decoder_->configure_count_);
EXPECT_EQ(1, sw_fallback_decoder_->decode_count_);

View file

@ -34,6 +34,7 @@
#include "modules/video_coding/include/video_error_codes.h"
#include "modules/video_coding/utility/simulcast_rate_allocator.h"
#include "rtc_base/fake_clock.h"
#include "test/fake_encoder.h"
#include "test/fake_texture_frame.h"
#include "test/field_trial.h"
#include "test/gmock.h"
@ -42,6 +43,7 @@
namespace webrtc {
using ::testing::_;
using ::testing::Return;
using ::testing::ValuesIn;
namespace {
const int kWidth = 320;
@ -1083,4 +1085,60 @@ TEST_F(PreferTemporalLayersFallbackTest, PrimesEncoderOnSwitch) {
EXPECT_EQ(wrapper_->GetEncoderInfo().implementation_name, "hw");
}
struct ResolutionBasedFallbackTestParams {
std::string test_name;
std::string field_trials = "";
VideoCodecType codec_type = kVideoCodecGeneric;
int width = 16;
int height = 16;
std::string expect_implementation_name;
};
using ResolutionBasedFallbackTest =
::testing::TestWithParam<ResolutionBasedFallbackTestParams>;
INSTANTIATE_TEST_SUITE_P(
VideoEncoderFallbackTest,
ResolutionBasedFallbackTest,
ValuesIn<ResolutionBasedFallbackTestParams>(
{{.test_name = "FallbackNotConfigured",
.expect_implementation_name = "primary"},
{.test_name = "ResolutionAboveFallbackThreshold",
.field_trials = "WebRTC-Video-EncoderFallbackSettings/"
"resolution_threshold_px:255/",
.expect_implementation_name = "primary"},
{.test_name = "ResolutionEqualFallbackThreshold",
.field_trials = "WebRTC-Video-EncoderFallbackSettings/"
"resolution_threshold_px:256/",
.expect_implementation_name = "fallback"},
{.test_name = "GenericFallbackSettingsTakePrecedence",
.field_trials =
"WebRTC-Video-EncoderFallbackSettings/"
"resolution_threshold_px:255/"
"WebRTC-VP8-Forced-Fallback-Encoder-v2/Enabled-1,256,1/",
.codec_type = kVideoCodecVP8,
.expect_implementation_name = "primary"}}),
[](const testing::TestParamInfo<ResolutionBasedFallbackTest::ParamType>&
info) { return info.param.test_name; });
TEST_P(ResolutionBasedFallbackTest, VerifyForcedEncoderFallback) {
const ResolutionBasedFallbackTestParams& params = GetParam();
test::ScopedFieldTrials field_trials(params.field_trials);
auto primary = new test::FakeEncoder(Clock::GetRealTimeClock());
auto fallback = new test::FakeEncoder(Clock::GetRealTimeClock());
auto encoder = CreateVideoEncoderSoftwareFallbackWrapper(
std::unique_ptr<VideoEncoder>(fallback),
std::unique_ptr<VideoEncoder>(primary),
/*prefer_temporal_support=*/false);
primary->SetImplementationName("primary");
fallback->SetImplementationName("fallback");
VideoCodec codec;
codec.codecType = params.codec_type;
codec.width = params.width;
codec.height = params.height;
encoder->InitEncode(&codec, kSettings);
EXPECT_EQ(encoder->GetEncoderInfo().implementation_name,
params.expect_implementation_name);
}
} // namespace webrtc

View file

@ -98,9 +98,20 @@ class RTC_EXPORT VideoDecoder {
// times, in such case only latest `settings` are in effect.
virtual bool Configure(const Settings& settings) = 0;
// TODO(bugs.webrtc.org/15444): Make pure virtual once all subclasses have
// migrated to implementing this class.
virtual int32_t Decode(const EncodedImage& input_image,
int64_t render_time_ms) {
return Decode(input_image, /*missing_frame=*/false, render_time_ms);
}
// TODO(bugs.webrtc.org/15444): Migrate all subclasses to Decode() without
// missing_frame and delete this.
virtual int32_t Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) = 0;
int64_t render_time_ms) {
return Decode(input_image, render_time_ms);
}
virtual int32_t RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) = 0;

View file

@ -41,7 +41,6 @@ class VideoDecoderSoftwareFallbackWrapper final : public VideoDecoder {
bool Configure(const Settings& settings) override;
int32_t Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) override;
int32_t RegisterDecodeCompleteCallback(
@ -176,7 +175,6 @@ void VideoDecoderSoftwareFallbackWrapper::UpdateFallbackDecoderHistograms() {
int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) {
TRACE_EVENT0("webrtc", "VideoDecoderSoftwareFallbackWrapper::Decode");
switch (decoder_type_) {
@ -184,7 +182,7 @@ int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
case DecoderType::kHardware: {
int32_t ret = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
ret = hw_decoder_->Decode(input_image, missing_frames, render_time_ms);
ret = hw_decoder_->Decode(input_image, render_time_ms);
if (ret != WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE) {
if (ret != WEBRTC_VIDEO_CODEC_ERROR) {
++hw_decoded_frames_since_last_fallback_;
@ -212,8 +210,7 @@ int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
[[fallthrough]];
}
case DecoderType::kFallback:
return fallback_decoder_->Decode(input_image, missing_frames,
render_time_ms);
return fallback_decoder_->Decode(input_image, render_time_ms);
default:
RTC_DCHECK_NOTREACHED();
return WEBRTC_VIDEO_CODEC_ERROR;

View file

@ -20,6 +20,7 @@
#include "absl/strings/match.h"
#include "absl/types/optional.h"
#include "api/fec_controller_override.h"
#include "api/transport/field_trial_based_config.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_bitrate_allocation.h"
#include "api/video/video_frame.h"
@ -29,6 +30,7 @@
#include "modules/video_coding/include/video_error_codes.h"
#include "modules/video_coding/utility/simulcast_utility.h"
#include "rtc_base/checks.h"
#include "rtc_base/experiments/field_trial_parser.h"
#include "rtc_base/logging.h"
#include "system_wrappers/include/field_trial.h"
@ -48,10 +50,18 @@ namespace {
struct ForcedFallbackParams {
public:
bool SupportsResolutionBasedSwitch(const VideoCodec& codec) const {
return enable_resolution_based_switch &&
codec.codecType == kVideoCodecVP8 &&
codec.numberOfSimulcastStreams <= 1 &&
codec.width * codec.height <= max_pixels;
if (!enable_resolution_based_switch ||
codec.width * codec.height > max_pixels) {
return false;
}
if (vp8_specific_resolution_switch &&
(codec.codecType != kVideoCodecVP8 ||
codec.numberOfSimulcastStreams > 1)) {
return false;
}
return true;
}
bool SupportsTemporalBasedSwitch(const VideoCodec& codec) const {
@ -61,7 +71,8 @@ struct ForcedFallbackParams {
bool enable_temporal_based_switch = false;
bool enable_resolution_based_switch = false;
int min_pixels = 320 * 180;
bool vp8_specific_resolution_switch = false;
int min_pixels = kDefaultMinPixelsPerFrame;
int max_pixels = 320 * 240;
};
@ -70,6 +81,19 @@ const char kVp8ForceFallbackEncoderFieldTrial[] =
absl::optional<ForcedFallbackParams> ParseFallbackParamsFromFieldTrials(
const VideoEncoder& main_encoder) {
// Ignore WebRTC-VP8-Forced-Fallback-Encoder-v2 if
// WebRTC-Video-EncoderFallbackSettings is present.
FieldTrialOptional<int> resolution_threshold_px("resolution_threshold_px");
ParseFieldTrial(
{&resolution_threshold_px},
FieldTrialBasedConfig().Lookup("WebRTC-Video-EncoderFallbackSettings"));
if (resolution_threshold_px) {
ForcedFallbackParams params;
params.enable_resolution_based_switch = true;
params.max_pixels = resolution_threshold_px.Value();
return params;
}
const std::string field_trial =
webrtc::field_trial::FindFullName(kVp8ForceFallbackEncoderFieldTrial);
if (!absl::StartsWith(field_trial, "Enabled")) {
@ -95,6 +119,7 @@ absl::optional<ForcedFallbackParams> ParseFallbackParamsFromFieldTrials(
return absl::nullopt;
}
params.vp8_specific_resolution_switch = true;
return params;
}
@ -107,7 +132,7 @@ absl::optional<ForcedFallbackParams> GetForcedFallbackParams(
if (!params.has_value()) {
params.emplace();
}
params->enable_temporal_based_switch = prefer_temporal_support;
params->enable_temporal_based_switch = true;
}
return params;
}
@ -421,18 +446,8 @@ VideoEncoder::EncoderInfo VideoEncoderSoftwareFallbackWrapper::GetEncoderInfo()
fallback_encoder_info.apply_alignment_to_all_simulcast_layers ||
default_encoder_info.apply_alignment_to_all_simulcast_layers;
if (fallback_params_.has_value()) {
const auto settings = (encoder_state_ == EncoderState::kForcedFallback)
? fallback_encoder_info.scaling_settings
: default_encoder_info.scaling_settings;
info.scaling_settings =
settings.thresholds
? VideoEncoder::ScalingSettings(settings.thresholds->low,
settings.thresholds->high,
fallback_params_->min_pixels)
: VideoEncoder::ScalingSettings::kOff;
} else {
info.scaling_settings = default_encoder_info.scaling_settings;
if (fallback_params_ && fallback_params_->vp8_specific_resolution_switch) {
info.scaling_settings.min_pixels_per_frame = fallback_params_->min_pixels;
}
return info;

View file

@ -67,7 +67,7 @@ class RTC_EXPORT WrappingAsyncDnsResolver : public AsyncDnsResolverInterface,
}
void Start(const rtc::SocketAddress& addr,
std::function<void()> callback) override {
absl::AnyInvocable<void()> callback) override {
RTC_DCHECK_RUN_ON(&sequence_checker_);
PrepareToResolve(std::move(callback));
wrapped_->Start(addr);
@ -75,7 +75,7 @@ class RTC_EXPORT WrappingAsyncDnsResolver : public AsyncDnsResolverInterface,
void Start(const rtc::SocketAddress& addr,
int family,
std::function<void()> callback) override {
absl::AnyInvocable<void()> callback) override {
RTC_DCHECK_RUN_ON(&sequence_checker_);
PrepareToResolve(std::move(callback));
wrapped_->Start(addr, family);
@ -97,7 +97,7 @@ class RTC_EXPORT WrappingAsyncDnsResolver : public AsyncDnsResolverInterface,
return wrapped_.get();
}
void PrepareToResolve(std::function<void()> callback) {
void PrepareToResolve(absl::AnyInvocable<void()> callback) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
RTC_DCHECK_EQ(State::kNotStarted, state_);
state_ = State::kStarted;
@ -118,7 +118,7 @@ class RTC_EXPORT WrappingAsyncDnsResolver : public AsyncDnsResolverInterface,
// The class variables need to be accessed on a single thread.
SequenceChecker sequence_checker_;
std::function<void()> callback_ RTC_GUARDED_BY(sequence_checker_);
absl::AnyInvocable<void()> callback_ RTC_GUARDED_BY(sequence_checker_);
std::unique_ptr<rtc::AsyncResolverInterface> wrapped_
RTC_GUARDED_BY(sequence_checker_);
State state_ RTC_GUARDED_BY(sequence_checker_) = State::kNotStarted;

View file

@ -49,9 +49,9 @@ TEST_F(AudioSendStreamCallTest, SupportsCName) {
CNameObserver() = default;
private:
Action OnSendRtcp(const uint8_t* packet, size_t length) override {
Action OnSendRtcp(rtc::ArrayView<const uint8_t> packet) override {
RtcpPacketParser parser;
EXPECT_TRUE(parser.Parse(packet, length));
EXPECT_TRUE(parser.Parse(packet));
if (parser.sdes()->num_packets() > 0) {
EXPECT_EQ(1u, parser.sdes()->chunks().size());
EXPECT_EQ(kCName, parser.sdes()->chunks()[0].cname);
@ -82,9 +82,9 @@ TEST_F(AudioSendStreamCallTest, NoExtensionsByDefault) {
NoExtensionsObserver() = default;
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
Action OnSendRtp(rtc::ArrayView<const uint8_t> packet) override {
RtpPacket rtp_packet;
EXPECT_TRUE(rtp_packet.Parse(packet, length)); // rtp packet is valid.
EXPECT_TRUE(rtp_packet.Parse(packet)); // rtp packet is valid.
EXPECT_EQ(packet[0] & 0b0001'0000, 0); // extension bit not set.
observation_complete_.Set();
@ -112,9 +112,9 @@ TEST_F(AudioSendStreamCallTest, SupportsAudioLevel) {
extensions_.Register<AudioLevel>(kAudioLevelExtensionId);
}
Action OnSendRtp(const uint8_t* packet, size_t length) override {
Action OnSendRtp(rtc::ArrayView<const uint8_t> packet) override {
RtpPacket rtp_packet(&extensions_);
EXPECT_TRUE(rtp_packet.Parse(packet, length));
EXPECT_TRUE(rtp_packet.Parse(packet));
uint8_t audio_level = 0;
bool voice = false;
@ -158,9 +158,9 @@ class TransportWideSequenceNumberObserver : public AudioSendTest {
}
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
Action OnSendRtp(rtc::ArrayView<const uint8_t> packet) override {
RtpPacket rtp_packet(&extensions_);
EXPECT_TRUE(rtp_packet.Parse(packet, length));
EXPECT_TRUE(rtp_packet.Parse(packet));
EXPECT_EQ(rtp_packet.HasExtension<TransportSequenceNumber>(),
expect_sequence_number_);
@ -204,9 +204,9 @@ TEST_F(AudioSendStreamCallTest, SendDtmf) {
DtmfObserver() = default;
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
Action OnSendRtp(rtc::ArrayView<const uint8_t> packet) override {
RtpPacket rtp_packet;
EXPECT_TRUE(rtp_packet.Parse(packet, length));
EXPECT_TRUE(rtp_packet.Parse(packet));
if (rtp_packet.PayloadType() == kDtmfPayloadType) {
EXPECT_EQ(rtp_packet.headers_size(), 12u);

View file

@ -70,7 +70,8 @@ acm2::AcmReceiver::Config AcmConfig(
rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
absl::optional<AudioCodecPairId> codec_pair_id,
size_t jitter_buffer_max_packets,
bool jitter_buffer_fast_playout) {
bool jitter_buffer_fast_playout,
int jitter_buffer_min_delay_ms) {
acm2::AcmReceiver::Config acm_config;
acm_config.neteq_factory = neteq_factory;
acm_config.decoder_factory = decoder_factory;
@ -78,6 +79,7 @@ acm2::AcmReceiver::Config AcmConfig(
acm_config.neteq_config.max_packets_in_buffer = jitter_buffer_max_packets;
acm_config.neteq_config.enable_fast_accelerate = jitter_buffer_fast_playout;
acm_config.neteq_config.enable_muted_state = true;
acm_config.neteq_config.min_delay_ms = jitter_buffer_min_delay_ms;
return acm_config;
}
@ -298,7 +300,8 @@ class ChannelReceive : public ChannelReceiveInterface,
webrtc::AbsoluteCaptureTimeInterpolator absolute_capture_time_interpolator_
RTC_GUARDED_BY(worker_thread_checker_);
webrtc::CaptureClockOffsetUpdater capture_clock_offset_updater_;
webrtc::CaptureClockOffsetUpdater capture_clock_offset_updater_
RTC_GUARDED_BY(ts_stats_lock_);
rtc::scoped_refptr<ChannelReceiveFrameTransformerDelegate>
frame_transformer_delegate_;
@ -480,6 +483,7 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
for (auto& packet_info : audio_frame->packet_infos_) {
absl::optional<int64_t> local_capture_clock_offset_q32x32;
if (packet_info.absolute_capture_time().has_value()) {
MutexLock lock(&ts_stats_lock_);
local_capture_clock_offset_q32x32 =
capture_clock_offset_updater_.AdjustEstimatedCaptureClockOffset(
packet_info.absolute_capture_time()
@ -559,7 +563,8 @@ ChannelReceive::ChannelReceive(
decoder_factory,
codec_pair_id,
jitter_buffer_max_packets,
jitter_buffer_fast_playout)),
jitter_buffer_fast_playout,
jitter_buffer_min_delay_ms)),
_outputAudioLevel(),
clock_(clock),
ntp_estimator_(clock),

View file

@ -40,7 +40,6 @@ class TransformableIncomingAudioFrame
uint8_t GetPayloadType() const override { return header_.payloadType; }
uint32_t GetSsrc() const override { return ssrc_; }
uint32_t GetTimestamp() const override { return header_.timestamp; }
const RTPHeader& GetHeader() const override { return header_; }
rtc::ArrayView<const uint32_t> GetContributingSources() const override {
return rtc::ArrayView<const uint32_t>(header_.arrOfCSRCs, header_.numCSRCs);
}
@ -50,6 +49,18 @@ class TransformableIncomingAudioFrame
return header_.sequenceNumber;
}
absl::optional<uint64_t> AbsoluteCaptureTimestamp() const override {
// This could be extracted from received header extensions + extrapolation,
// if required in future, eg for being able to re-send received frames.
return absl::nullopt;
}
const RTPHeader& Header() const { return header_; }
FrameType Type() const override {
return header_.extension.voiceActivity ? FrameType::kAudioFrameSpeech
: FrameType::kAudioFrameCN;
}
private:
rtc::Buffer payload_;
RTPHeader header_;
@ -101,11 +112,30 @@ void ChannelReceiveFrameTransformerDelegate::ReceiveFrame(
RTC_DCHECK_RUN_ON(&sequence_checker_);
if (!receive_frame_callback_)
return;
RTC_CHECK_EQ(frame->GetDirection(),
TransformableFrameInterface::Direction::kReceiver);
auto* transformed_frame =
static_cast<TransformableIncomingAudioFrame*>(frame.get());
receive_frame_callback_(transformed_frame->GetData(),
transformed_frame->GetHeader());
RTPHeader header;
if (frame->GetDirection() ==
TransformableFrameInterface::Direction::kSender) {
auto* transformed_frame =
static_cast<TransformableAudioFrameInterface*>(frame.get());
header.payloadType = transformed_frame->GetPayloadType();
header.timestamp = transformed_frame->GetTimestamp();
header.ssrc = transformed_frame->GetSsrc();
if (transformed_frame->AbsoluteCaptureTimestamp().has_value()) {
header.extension.absolute_capture_time = AbsoluteCaptureTime();
header.extension.absolute_capture_time->absolute_capture_timestamp =
transformed_frame->AbsoluteCaptureTimestamp().value();
}
} else {
auto* transformed_frame =
static_cast<TransformableIncomingAudioFrame*>(frame.get());
header = transformed_frame->Header();
}
// TODO(crbug.com/1464860): Take an explicit struct with the required
// information rather than the RTPHeader to make it easier to
// construct the required information when injecting transformed frames not
// originally from this receiver.
receive_frame_callback_(frame->GetData(), header);
}
} // namespace webrtc

View file

@ -13,6 +13,7 @@
#include <memory>
#include <utility>
#include "audio/channel_send_frame_transformer_delegate.h"
#include "test/gmock.h"
#include "test/gtest.h"
#include "test/mock_frame_transformer.h"
@ -21,6 +22,8 @@
namespace webrtc {
namespace {
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::NiceMock;
using ::testing::SaveArg;
@ -94,6 +97,39 @@ TEST(ChannelReceiveFrameTransformerDelegateTest,
rtc::ThreadManager::ProcessAllMessageQueuesForTesting();
}
// Test that when the delegate receives a Outgoing frame from the frame
// transformer, it passes it to the channel using the ReceiveFrameCallback.
TEST(ChannelReceiveFrameTransformerDelegateTest,
TransformRunsChannelReceiveCallbackForSenderFrame) {
rtc::AutoThread main_thread;
rtc::scoped_refptr<MockFrameTransformer> mock_frame_transformer =
rtc::make_ref_counted<NiceMock<MockFrameTransformer>>();
MockChannelReceive mock_channel;
rtc::scoped_refptr<ChannelReceiveFrameTransformerDelegate> delegate =
rtc::make_ref_counted<ChannelReceiveFrameTransformerDelegate>(
mock_channel.callback(), mock_frame_transformer,
rtc::Thread::Current());
rtc::scoped_refptr<TransformedFrameCallback> callback;
EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameCallback)
.WillOnce(SaveArg<0>(&callback));
delegate->Init();
ASSERT_TRUE(callback);
const uint8_t data[] = {1, 2, 3, 4};
rtc::ArrayView<const uint8_t> packet(data, sizeof(data));
RTPHeader header;
EXPECT_CALL(mock_channel, ReceiveFrame(ElementsAre(1, 2, 3, 4), _));
ON_CALL(*mock_frame_transformer, Transform)
.WillByDefault([&callback](
std::unique_ptr<TransformableFrameInterface> frame) {
auto* transformed_frame =
static_cast<TransformableAudioFrameInterface*>(frame.get());
callback->OnTransformedFrame(CloneSenderAudioFrame(transformed_frame));
});
delegate->Transform(packet, header, 1111 /*ssrc*/);
rtc::ThreadManager::ProcessAllMessageQueuesForTesting();
}
// Test that if the delegate receives a transformed frame after it has been
// reset, it does not run the ReceiveFrameCallback, as the channel is destroyed
// after resetting the delegate.
@ -110,7 +146,7 @@ TEST(ChannelReceiveFrameTransformerDelegateTest,
delegate->Reset();
EXPECT_CALL(mock_channel, ReceiveFrame).Times(0);
delegate->OnTransformedFrame(std::make_unique<MockTransformableFrame>());
delegate->OnTransformedFrame(std::make_unique<MockTransformableAudioFrame>());
rtc::ThreadManager::ProcessAllMessageQueuesForTesting();
}

View file

@ -172,8 +172,9 @@ TEST_F(ChannelReceiveTest, ReceiveReportGeneratedOnTime) {
bool receiver_report_sent = false;
EXPECT_CALL(transport_, SendRtcp)
.WillRepeatedly([&](const uint8_t* packet, size_t length) {
if (length >= 2 && packet[1] == rtcp::ReceiverReport::kPacketType) {
.WillRepeatedly([&](rtc::ArrayView<const uint8_t> packet) {
if (packet.size() >= 2 &&
packet[1] == rtcp::ReceiverReport::kPacketType) {
receiver_report_sent = true;
}
return true;
@ -189,8 +190,8 @@ TEST_F(ChannelReceiveTest, CaptureStartTimeBecomesValid) {
auto channel = CreateTestChannelReceive();
EXPECT_CALL(transport_, SendRtcp)
.WillRepeatedly([&](const uint8_t* packet, size_t length) {
HandleGeneratedRtcp(*channel, rtc::MakeArrayView(packet, length));
.WillRepeatedly([&](rtc::ArrayView<const uint8_t> packet) {
HandleGeneratedRtcp(*channel, packet);
return true;
});
// Before any packets are sent, CaptureStartTime is invalid.

View file

@ -165,7 +165,7 @@ class ChannelSend : public ChannelSendInterface,
int32_t SendRtpAudio(AudioFrameType frameType,
uint8_t payloadType,
uint32_t rtp_timestamp,
uint32_t rtp_timestamp_without_offset,
rtc::ArrayView<const uint8_t> payload,
int64_t absolute_capture_timestamp_ms)
RTC_RUN_ON(encoder_queue_);
@ -280,7 +280,7 @@ int32_t ChannelSend::SendData(AudioFrameType frameType,
// Asynchronously transform the payload before sending it. After the payload
// is transformed, the delegate will call SendRtpAudio to send it.
frame_transformer_delegate_->Transform(
frameType, payloadType, rtp_timestamp, rtp_rtcp_->StartTimestamp(),
frameType, payloadType, rtp_timestamp + rtp_rtcp_->StartTimestamp(),
payloadData, payloadSize, absolute_capture_timestamp_ms,
rtp_rtcp_->SSRC());
return 0;
@ -291,16 +291,9 @@ int32_t ChannelSend::SendData(AudioFrameType frameType,
int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType,
uint8_t payloadType,
uint32_t rtp_timestamp,
uint32_t rtp_timestamp_without_offset,
rtc::ArrayView<const uint8_t> payload,
int64_t absolute_capture_timestamp_ms) {
if (include_audio_level_indication_.load()) {
// Store current audio level in the RTP sender.
// The level will be used in combination with voice-activity state
// (frameType) to add an RTP header extension
rtp_sender_audio_->SetAudioLevel(rms_level_.Average());
}
// E2EE Custom Audio Frame Encryption (This is optional).
// Keep this buffer around for the lifetime of the send call.
rtc::Buffer encrypted_audio_payload;
@ -341,7 +334,7 @@ int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType,
// Push data from ACM to RTP/RTCP-module to deliver audio frame for
// packetization.
if (!rtp_rtcp_->OnSendingRtpFrame(rtp_timestamp,
if (!rtp_rtcp_->OnSendingRtpFrame(rtp_timestamp_without_offset,
// Leaving the time when this frame was
// received from the capture device as
// undefined for voice for now.
@ -357,9 +350,19 @@ int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType,
// knowledge of the offset to a single place.
// This call will trigger Transport::SendPacket() from the RTP/RTCP module.
if (!rtp_sender_audio_->SendAudio(
frameType, payloadType, rtp_timestamp + rtp_rtcp_->StartTimestamp(),
payload.data(), payload.size(), absolute_capture_timestamp_ms)) {
RTPSenderAudio::RtpAudioFrame frame = {
.type = frameType,
.payload = payload,
.payload_id = payloadType,
.rtp_timestamp =
rtp_timestamp_without_offset + rtp_rtcp_->StartTimestamp()};
if (absolute_capture_timestamp_ms > 0) {
frame.capture_time = Timestamp::Millis(absolute_capture_timestamp_ms);
}
if (include_audio_level_indication_.load()) {
frame.audio_level_dbov = rms_level_.Average();
}
if (!rtp_sender_audio_->SendAudio(frame)) {
RTC_DLOG(LS_ERROR)
<< "ChannelSend::SendData() failed to send data to RTP/RTCP module";
return -1;
@ -841,11 +844,14 @@ void ChannelSend::InitFrameTransformerDelegate(
// to send the transformed audio.
ChannelSendFrameTransformerDelegate::SendFrameCallback send_audio_callback =
[this](AudioFrameType frameType, uint8_t payloadType,
uint32_t rtp_timestamp, rtc::ArrayView<const uint8_t> payload,
uint32_t rtp_timestamp_with_offset,
rtc::ArrayView<const uint8_t> payload,
int64_t absolute_capture_timestamp_ms) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
return SendRtpAudio(frameType, payloadType, rtp_timestamp, payload,
absolute_capture_timestamp_ms);
return SendRtpAudio(
frameType, payloadType,
rtp_timestamp_with_offset - rtp_rtcp_->StartTimestamp(), payload,
absolute_capture_timestamp_ms);
};
frame_transformer_delegate_ =
rtc::make_ref_counted<ChannelSendFrameTransformerDelegate>(

View file

@ -15,21 +15,49 @@
namespace webrtc {
namespace {
using IfaceFrameType = TransformableAudioFrameInterface::FrameType;
IfaceFrameType InternalFrameTypeToInterfaceFrameType(
const AudioFrameType frame_type) {
switch (frame_type) {
case AudioFrameType::kEmptyFrame:
return IfaceFrameType::kEmptyFrame;
case AudioFrameType::kAudioFrameSpeech:
return IfaceFrameType::kAudioFrameSpeech;
case AudioFrameType::kAudioFrameCN:
return IfaceFrameType::kAudioFrameCN;
}
RTC_DCHECK_NOTREACHED();
return IfaceFrameType::kEmptyFrame;
}
AudioFrameType InterfaceFrameTypeToInternalFrameType(
const IfaceFrameType frame_type) {
switch (frame_type) {
case IfaceFrameType::kEmptyFrame:
return AudioFrameType::kEmptyFrame;
case IfaceFrameType::kAudioFrameSpeech:
return AudioFrameType::kAudioFrameSpeech;
case IfaceFrameType::kAudioFrameCN:
return AudioFrameType::kAudioFrameCN;
}
RTC_DCHECK_NOTREACHED();
return AudioFrameType::kEmptyFrame;
}
class TransformableOutgoingAudioFrame
: public TransformableAudioFrameInterface {
public:
TransformableOutgoingAudioFrame(AudioFrameType frame_type,
uint8_t payload_type,
uint32_t rtp_timestamp,
uint32_t rtp_start_timestamp,
uint32_t rtp_timestamp_with_offset,
const uint8_t* payload_data,
size_t payload_size,
int64_t absolute_capture_timestamp_ms,
uint32_t ssrc)
: frame_type_(frame_type),
payload_type_(payload_type),
rtp_timestamp_(rtp_timestamp),
rtp_start_timestamp_(rtp_start_timestamp),
rtp_timestamp_with_offset_(rtp_timestamp_with_offset),
payload_(payload_data, payload_size),
absolute_capture_timestamp_ms_(absolute_capture_timestamp_ms),
ssrc_(ssrc) {}
@ -38,22 +66,15 @@ class TransformableOutgoingAudioFrame
void SetData(rtc::ArrayView<const uint8_t> data) override {
payload_.SetData(data.data(), data.size());
}
uint32_t GetTimestamp() const override {
return rtp_timestamp_ + rtp_start_timestamp_;
}
uint32_t GetStartTimestamp() const { return rtp_start_timestamp_; }
uint32_t GetTimestamp() const override { return rtp_timestamp_with_offset_; }
uint32_t GetSsrc() const override { return ssrc_; }
AudioFrameType GetFrameType() const { return frame_type_; }
uint8_t GetPayloadType() const override { return payload_type_; }
int64_t GetAbsoluteCaptureTimestampMs() const {
return absolute_capture_timestamp_ms_;
IfaceFrameType Type() const override {
return InternalFrameTypeToInterfaceFrameType(frame_type_);
}
Direction GetDirection() const override { return Direction::kSender; }
// TODO(crbug.com/1453226): Remove once GetHeader() is removed from
// TransformableAudioFrameInterface.
const RTPHeader& GetHeader() const override { return empty_header_; }
uint8_t GetPayloadType() const override { return payload_type_; }
Direction GetDirection() const override { return Direction::kSender; }
rtc::ArrayView<const uint32_t> GetContributingSources() const override {
return {};
@ -63,22 +84,21 @@ class TransformableOutgoingAudioFrame
return absl::nullopt;
}
void SetRTPTimestamp(uint32_t timestamp) override {
rtp_timestamp_ = timestamp - rtp_start_timestamp_;
void SetRTPTimestamp(uint32_t rtp_timestamp_with_offset) override {
rtp_timestamp_with_offset_ = rtp_timestamp_with_offset;
}
absl::optional<uint64_t> AbsoluteCaptureTimestamp() const override {
return absolute_capture_timestamp_ms_;
}
private:
AudioFrameType frame_type_;
uint8_t payload_type_;
uint32_t rtp_timestamp_;
uint32_t rtp_start_timestamp_;
uint32_t rtp_timestamp_with_offset_;
rtc::Buffer payload_;
int64_t absolute_capture_timestamp_ms_;
uint32_t ssrc_;
// TODO(crbug.com/1453226): Remove once GetHeader() is removed from
// TransformableAudioFrameInterface.
RTPHeader empty_header_;
};
} // namespace
@ -107,15 +127,14 @@ void ChannelSendFrameTransformerDelegate::Transform(
AudioFrameType frame_type,
uint8_t payload_type,
uint32_t rtp_timestamp,
uint32_t rtp_start_timestamp,
const uint8_t* payload_data,
size_t payload_size,
int64_t absolute_capture_timestamp_ms,
uint32_t ssrc) {
frame_transformer_->Transform(
std::make_unique<TransformableOutgoingAudioFrame>(
frame_type, payload_type, rtp_timestamp, rtp_start_timestamp,
payload_data, payload_size, absolute_capture_timestamp_ms, ssrc));
frame_type, payload_type, rtp_timestamp, payload_data, payload_size,
absolute_capture_timestamp_ms, ssrc));
}
void ChannelSendFrameTransformerDelegate::OnTransformedFrame(
@ -134,33 +153,27 @@ void ChannelSendFrameTransformerDelegate::SendFrame(
std::unique_ptr<TransformableFrameInterface> frame) const {
MutexLock lock(&send_lock_);
RTC_DCHECK_RUN_ON(encoder_queue_);
RTC_CHECK_EQ(frame->GetDirection(),
TransformableFrameInterface::Direction::kSender);
if (!send_frame_callback_)
return;
auto* transformed_frame =
static_cast<TransformableOutgoingAudioFrame*>(frame.get());
send_frame_callback_(transformed_frame->GetFrameType(),
transformed_frame->GetPayloadType(),
transformed_frame->GetTimestamp() -
transformed_frame->GetStartTimestamp(),
transformed_frame->GetData(),
transformed_frame->GetAbsoluteCaptureTimestampMs());
static_cast<TransformableAudioFrameInterface*>(frame.get());
send_frame_callback_(
InterfaceFrameTypeToInternalFrameType(transformed_frame->Type()),
transformed_frame->GetPayloadType(), transformed_frame->GetTimestamp(),
transformed_frame->GetData(),
transformed_frame->AbsoluteCaptureTimestamp()
? *transformed_frame->AbsoluteCaptureTimestamp()
: 0);
}
std::unique_ptr<TransformableAudioFrameInterface> CloneSenderAudioFrame(
TransformableAudioFrameInterface* original) {
AudioFrameType audio_frame_type =
original->GetHeader().extension.voiceActivity
? AudioFrameType::kAudioFrameSpeech
: AudioFrameType::kAudioFrameCN;
// TODO(crbug.com/webrtc/14949): Ensure the correct timestamps are passed.
return std::make_unique<TransformableOutgoingAudioFrame>(
audio_frame_type, original->GetPayloadType(), original->GetTimestamp(),
/*rtp_start_timestamp=*/0u, original->GetData().data(),
original->GetData().size(), original->GetTimestamp(),
original->GetSsrc());
InterfaceFrameTypeToInternalFrameType(original->Type()),
original->GetPayloadType(), original->GetTimestamp(),
original->GetData().data(), original->GetData().size(),
original->GetTimestamp(), original->GetSsrc());
}
} // namespace webrtc

View file

@ -32,7 +32,7 @@ class ChannelSendFrameTransformerDelegate : public TransformedFrameCallback {
using SendFrameCallback =
std::function<int32_t(AudioFrameType frameType,
uint8_t payloadType,
uint32_t rtp_timestamp,
uint32_t rtp_timestamp_with_offset,
rtc::ArrayView<const uint8_t> payload,
int64_t absolute_capture_timestamp_ms)>;
ChannelSendFrameTransformerDelegate(
@ -54,7 +54,6 @@ class ChannelSendFrameTransformerDelegate : public TransformedFrameCallback {
void Transform(AudioFrameType frame_type,
uint8_t payload_type,
uint32_t rtp_timestamp,
uint32_t rtp_start_timestamp,
const uint8_t* payload_data,
size_t payload_size,
int64_t absolute_capture_timestamp_ms,

View file

@ -22,7 +22,10 @@
namespace webrtc {
namespace {
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::SaveArg;
class MockChannelSend {
@ -48,6 +51,18 @@ class MockChannelSend {
}
};
std::unique_ptr<MockTransformableAudioFrame> CreateMockReceiverFrame() {
const uint8_t mock_data[] = {1, 2, 3, 4};
std::unique_ptr<MockTransformableAudioFrame> mock_frame =
std::make_unique<MockTransformableAudioFrame>();
rtc::ArrayView<const uint8_t> payload(mock_data);
ON_CALL(*mock_frame, GetData).WillByDefault(Return(payload));
ON_CALL(*mock_frame, GetPayloadType).WillByDefault(Return(0));
ON_CALL(*mock_frame, GetDirection)
.WillByDefault(Return(TransformableFrameInterface::Direction::kReceiver));
return mock_frame;
}
// Test that the delegate registers itself with the frame transformer on Init().
TEST(ChannelSendFrameTransformerDelegateTest,
RegisterTransformedFrameCallbackOnInit) {
@ -99,8 +114,38 @@ TEST(ChannelSendFrameTransformerDelegateTest,
[&callback](std::unique_ptr<TransformableFrameInterface> frame) {
callback->OnTransformedFrame(std::move(frame));
});
delegate->Transform(AudioFrameType::kEmptyFrame, 0, 0, 0, data, sizeof(data),
0, 0);
delegate->Transform(AudioFrameType::kEmptyFrame, 0, 0, data, sizeof(data), 0,
0);
channel_queue.WaitForPreviouslyPostedTasks();
}
// Test that when the delegate receives a Incoming frame from the frame
// transformer, it passes it to the channel using the SendFrameCallback.
TEST(ChannelSendFrameTransformerDelegateTest,
TransformRunsChannelSendCallbackForIncomingFrame) {
TaskQueueForTest channel_queue("channel_queue");
rtc::scoped_refptr<MockFrameTransformer> mock_frame_transformer =
rtc::make_ref_counted<NiceMock<MockFrameTransformer>>();
MockChannelSend mock_channel;
rtc::scoped_refptr<ChannelSendFrameTransformerDelegate> delegate =
rtc::make_ref_counted<ChannelSendFrameTransformerDelegate>(
mock_channel.callback(), mock_frame_transformer, &channel_queue);
rtc::scoped_refptr<TransformedFrameCallback> callback;
EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameCallback)
.WillOnce(SaveArg<0>(&callback));
delegate->Init();
ASSERT_TRUE(callback);
const uint8_t data[] = {1, 2, 3, 4};
EXPECT_CALL(mock_channel, SendFrame).Times(0);
EXPECT_CALL(mock_channel, SendFrame(_, 0, 0, ElementsAre(1, 2, 3, 4), _));
ON_CALL(*mock_frame_transformer, Transform)
.WillByDefault(
[&callback](std::unique_ptr<TransformableFrameInterface> frame) {
callback->OnTransformedFrame(CreateMockReceiverFrame());
});
delegate->Transform(AudioFrameType::kEmptyFrame, 0, 0, data, sizeof(data), 0,
0);
channel_queue.WaitForPreviouslyPostedTasks();
}
@ -119,7 +164,7 @@ TEST(ChannelSendFrameTransformerDelegateTest,
delegate->Reset();
EXPECT_CALL(mock_channel, SendFrame).Times(0);
delegate->OnTransformedFrame(std::make_unique<MockTransformableFrame>());
delegate->OnTransformedFrame(std::make_unique<MockTransformableAudioFrame>());
channel_queue.WaitForPreviouslyPostedTasks();
}

View file

@ -19,7 +19,9 @@
#include "api/units/time_delta.h"
#include "api/units/timestamp.h"
#include "call/rtp_transport_controller_send.h"
#include "rtc_base/gunit.h"
#include "test/gtest.h"
#include "test/mock_frame_transformer.h"
#include "test/mock_transport.h"
#include "test/scoped_key_value_config.h"
#include "test/time_controller/simulated_time_controller.h"
@ -31,6 +33,7 @@ namespace {
using ::testing::Invoke;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::SaveArg;
constexpr int kRtcpIntervalMs = 1000;
constexpr int kSsrc = 333;
@ -120,11 +123,11 @@ TEST_F(ChannelSendTest, IncreaseRtpTimestampByPauseDuration) {
channel_->StartSend();
uint32_t timestamp;
int sent_packets = 0;
auto send_rtp = [&](const uint8_t* data, size_t length,
auto send_rtp = [&](rtc::ArrayView<const uint8_t> data,
const PacketOptions& options) {
++sent_packets;
RtpPacketReceived packet;
packet.Parse(data, length);
packet.Parse(data);
timestamp = packet.Timestamp();
return true;
};
@ -145,6 +148,46 @@ TEST_F(ChannelSendTest, IncreaseRtpTimestampByPauseDuration) {
EXPECT_EQ(timestamp_gap_ms, 10020);
}
TEST_F(ChannelSendTest, FrameTransformerGetsCorrectTimestamp) {
rtc::scoped_refptr<MockFrameTransformer> mock_frame_transformer =
rtc::make_ref_counted<MockFrameTransformer>();
channel_->SetEncoderToPacketizerFrameTransformer(mock_frame_transformer);
rtc::scoped_refptr<TransformedFrameCallback> callback;
EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameCallback)
.WillOnce(SaveArg<0>(&callback));
EXPECT_CALL(*mock_frame_transformer, UnregisterTransformedFrameCallback);
absl::optional<uint32_t> sent_timestamp;
auto send_rtp = [&](rtc::ArrayView<const uint8_t> data,
const PacketOptions& options) {
RtpPacketReceived packet;
packet.Parse(data);
if (!sent_timestamp) {
sent_timestamp = packet.Timestamp();
}
return true;
};
EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(send_rtp));
channel_->StartSend();
int64_t transformable_frame_timestamp = -1;
EXPECT_CALL(*mock_frame_transformer, Transform)
.WillOnce([&](std::unique_ptr<TransformableFrameInterface> frame) {
transformable_frame_timestamp = frame->GetTimestamp();
callback->OnTransformedFrame(std::move(frame));
});
// Insert two frames which should trigger a new packet.
ProcessNextFrame();
ProcessNextFrame();
// Ensure the RTP timestamp on the frame passed to the transformer
// includes the RTP offset and matches the actual RTP timestamp on the sent
// packet.
EXPECT_EQ_WAIT(transformable_frame_timestamp,
0 + channel_->GetRtpRtcp()->StartTimestamp(), 1000);
EXPECT_TRUE_WAIT(sent_timestamp, 1000);
EXPECT_EQ(*sent_timestamp, transformable_frame_timestamp);
}
} // namespace
} // namespace voe
} // namespace webrtc

View file

@ -132,8 +132,10 @@ int32_t AudioEgress::SendData(AudioFrameType frame_type,
const uint32_t rtp_timestamp = timestamp + rtp_rtcp_->StartTimestamp();
// This call will trigger Transport::SendPacket() from the RTP/RTCP module.
if (!rtp_sender_audio_.SendAudio(frame_type, payload_type, rtp_timestamp,
payload.data(), payload.size())) {
if (!rtp_sender_audio_.SendAudio({.type = frame_type,
.payload = payload,
.payload_id = payload_type,
.rtp_timestamp = rtp_timestamp})) {
RTC_DLOG(LS_ERROR)
<< "AudioEgress::SendData() failed to send data to RTP/RTCP module";
return -1;

View file

@ -104,9 +104,8 @@ class AudioChannelTest : public ::testing::Test {
// Resulted RTP packet is looped back into AudioChannel and gets decoded into
// audio frame to see if it has some signal to indicate its validity.
TEST_F(AudioChannelTest, PlayRtpByLocalLoop) {
auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) {
audio_channel_->ReceivedRTPPacket(
rtc::ArrayView<const uint8_t>(packet, length));
auto loop_rtp = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
audio_channel_->ReceivedRTPPacket(packet);
return true;
};
EXPECT_CALL(transport_, SendRtp).WillOnce(Invoke(loop_rtp));
@ -130,8 +129,8 @@ TEST_F(AudioChannelTest, PlayRtpByLocalLoop) {
// Validate assigned local SSRC is resulted in RTP packet.
TEST_F(AudioChannelTest, VerifyLocalSsrcAsAssigned) {
RtpPacketReceived rtp;
auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) {
rtp.Parse(packet, length);
auto loop_rtp = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
rtp.Parse(packet);
return true;
};
EXPECT_CALL(transport_, SendRtp).WillOnce(Invoke(loop_rtp));
@ -145,9 +144,8 @@ TEST_F(AudioChannelTest, VerifyLocalSsrcAsAssigned) {
// Check metrics after processing an RTP packet.
TEST_F(AudioChannelTest, TestIngressStatistics) {
auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) {
audio_channel_->ReceivedRTPPacket(
rtc::ArrayView<const uint8_t>(packet, length));
auto loop_rtp = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
audio_channel_->ReceivedRTPPacket(packet);
return true;
};
EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(loop_rtp));
@ -223,14 +221,12 @@ TEST_F(AudioChannelTest, TestIngressStatistics) {
// Check ChannelStatistics metric after processing RTP and RTCP packets.
TEST_F(AudioChannelTest, TestChannelStatistics) {
auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) {
audio_channel_->ReceivedRTPPacket(
rtc::ArrayView<const uint8_t>(packet, length));
auto loop_rtp = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
audio_channel_->ReceivedRTPPacket(packet);
return true;
};
auto loop_rtcp = [&](const uint8_t* packet, size_t length) {
audio_channel_->ReceivedRTCPPacket(
rtc::ArrayView<const uint8_t>(packet, length));
auto loop_rtcp = [&](rtc::ArrayView<const uint8_t> packet) {
audio_channel_->ReceivedRTCPPacket(packet);
return true;
};
EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(loop_rtp));
@ -294,8 +290,8 @@ TEST_F(AudioChannelTest, RttIsAvailableAfterChangeOfRemoteSsrc) {
auto send_recv_rtp = [&](rtc::scoped_refptr<AudioChannel> rtp_sender,
rtc::scoped_refptr<AudioChannel> rtp_receiver) {
// Setup routing logic via transport_.
auto route_rtp = [&](const uint8_t* packet, size_t length, Unused) {
rtp_receiver->ReceivedRTPPacket(rtc::MakeArrayView(packet, length));
auto route_rtp = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
rtp_receiver->ReceivedRTPPacket(packet);
return true;
};
ON_CALL(transport_, SendRtp).WillByDefault(route_rtp);
@ -316,8 +312,8 @@ TEST_F(AudioChannelTest, RttIsAvailableAfterChangeOfRemoteSsrc) {
auto send_recv_rtcp = [&](rtc::scoped_refptr<AudioChannel> rtcp_sender,
rtc::scoped_refptr<AudioChannel> rtcp_receiver) {
// Setup routing logic via transport_.
auto route_rtcp = [&](const uint8_t* packet, size_t length) {
rtcp_receiver->ReceivedRTCPPacket(rtc::MakeArrayView(packet, length));
auto route_rtcp = [&](rtc::ArrayView<const uint8_t> packet) {
rtcp_receiver->ReceivedRTCPPacket(packet);
return true;
};
ON_CALL(transport_, SendRtcp).WillByDefault(route_rtcp);

View file

@ -122,8 +122,8 @@ TEST_F(AudioEgressTest, ProcessAudioWithMute) {
rtc::Event event;
int rtp_count = 0;
RtpPacketReceived rtp;
auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
rtp.Parse(packet, length);
auto rtp_sent = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
rtp.Parse(packet);
if (++rtp_count == kExpected) {
event.Set();
}
@ -160,8 +160,8 @@ TEST_F(AudioEgressTest, ProcessAudioWithSineWave) {
rtc::Event event;
int rtp_count = 0;
RtpPacketReceived rtp;
auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
rtp.Parse(packet, length);
auto rtp_sent = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
rtp.Parse(packet);
if (++rtp_count == kExpected) {
event.Set();
}
@ -195,7 +195,7 @@ TEST_F(AudioEgressTest, SkipAudioEncodingAfterStopSend) {
constexpr int kExpected = 10;
rtc::Event event;
int rtp_count = 0;
auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
auto rtp_sent = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
if (++rtp_count == kExpected) {
event.Set();
}
@ -269,9 +269,9 @@ TEST_F(AudioEgressTest, SendDTMF) {
// It's possible that we may have actual audio RTP packets along with
// DTMF packtets. We are only interested in the exact number of DTMF
// packets rtp stack is emitting.
auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
auto rtp_sent = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
RtpPacketReceived rtp;
rtp.Parse(packet, length);
rtp.Parse(packet);
if (is_dtmf(rtp) && ++dtmf_count == kExpected) {
event.Set();
}
@ -296,7 +296,7 @@ TEST_F(AudioEgressTest, TestAudioInputLevelAndEnergyDuration) {
constexpr int kExpected = 6;
rtc::Event event;
int rtp_count = 0;
auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
auto rtp_sent = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
if (++rtp_count == kExpected) {
event.Set();
}

View file

@ -114,8 +114,8 @@ TEST_F(AudioIngressTest, PlayingAfterStartAndStop) {
TEST_F(AudioIngressTest, GetAudioFrameAfterRtpReceived) {
rtc::Event event;
auto handle_rtp = [&](const uint8_t* packet, size_t length, Unused) {
ingress_->ReceivedRTPPacket(rtc::ArrayView<const uint8_t>(packet, length));
auto handle_rtp = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
ingress_->ReceivedRTPPacket(packet);
event.Set();
return true;
};
@ -144,8 +144,8 @@ TEST_F(AudioIngressTest, TestSpeechOutputLevelAndEnergyDuration) {
constexpr int kNumRtp = 6;
int rtp_count = 0;
rtc::Event event;
auto handle_rtp = [&](const uint8_t* packet, size_t length, Unused) {
ingress_->ReceivedRTPPacket(rtc::ArrayView<const uint8_t>(packet, length));
auto handle_rtp = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
ingress_->ReceivedRTPPacket(packet);
if (++rtp_count == kNumRtp) {
event.Set();
}
@ -175,8 +175,8 @@ TEST_F(AudioIngressTest, TestSpeechOutputLevelAndEnergyDuration) {
TEST_F(AudioIngressTest, PreferredSampleRate) {
rtc::Event event;
auto handle_rtp = [&](const uint8_t* packet, size_t length, Unused) {
ingress_->ReceivedRTPPacket(rtc::ArrayView<const uint8_t>(packet, length));
auto handle_rtp = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
ingress_->ReceivedRTPPacket(packet);
event.Set();
return true;
};
@ -204,8 +204,8 @@ TEST_F(AudioIngressTest, GetMutedAudioFrameAfterRtpReceivedAndStopPlay) {
constexpr int kNumRtp = 6;
int rtp_count = 0;
rtc::Event event;
auto handle_rtp = [&](const uint8_t* packet, size_t length, Unused) {
ingress_->ReceivedRTPPacket(rtc::ArrayView<const uint8_t>(packet, length));
auto handle_rtp = [&](rtc::ArrayView<const uint8_t> packet, Unused) {
ingress_->ReceivedRTPPacket(packet);
if (++rtp_count == kNumRtp) {
event.Set();
}

View file

@ -16,6 +16,8 @@ enable_safe_libcxx = true
# only needed to support both WebRTC standalone and Chromium builds.
build_with_chromium = false
use_cxx17 = !is_android
# Use our own suppressions files.
asan_suppressions_file = "//build/sanitizers/asan_suppressions.cc"
lsan_suppressions_file = "//tools_webrtc/sanitizers/lsan_suppressions_webrtc.cc"
@ -51,6 +53,7 @@ declare_args() {
# is used for base tracing, so this feature is disabled.
enable_base_tracing = false
use_perfetto_client_library = false
use_perfetto_trace_processor = false
# Limits the defined //third_party/android_deps targets to only "buildCompile"
# and "buildCompileNoDeps" targets. This is useful for third-party

View file

@ -67,6 +67,7 @@ rtc_library("call_interfaces") {
"../modules/audio_processing",
"../modules/audio_processing:api",
"../modules/audio_processing:audio_processing_statistics",
"../modules/rtp_rtcp",
"../modules/rtp_rtcp:rtp_rtcp_format",
"../rtc_base:audio_format_to_string",
"../rtc_base:checks",

View file

@ -186,6 +186,7 @@ class BitrateEstimatorTest : public test::CallTest {
test::VideoTestConstants::kDefaultFramerate,
*test->task_queue_factory_);
frame_generator_capturer_->Init();
frame_generator_capturer_->Start();
send_stream_->SetSource(frame_generator_capturer_.get(),
DegradationPreference::MAINTAIN_FRAMERATE);
send_stream_->Start();

View file

@ -1293,7 +1293,7 @@ void Call::OnSentPacket(const rtc::SentPacket& sent_packet) {
// on a ProcessThread. This is alright as is since we forward the call to
// implementations that either just do a PostTask or use locking.
video_send_delay_stats_->OnSentPacket(sent_packet.packet_id,
clock_->TimeInMilliseconds());
clock_->CurrentTime());
transport_send_->OnSentPacket(sent_packet);
}

View file

@ -469,10 +469,10 @@ void CallPerfTest::TestCaptureNtpTime(
EXPECT_TRUE(std::abs(time_offset_ms) < threshold_ms_);
}
Action OnSendRtp(const uint8_t* packet, size_t length) override {
Action OnSendRtp(rtc::ArrayView<const uint8_t> packet) override {
MutexLock lock(&mutex_);
RtpPacket rtp_packet;
EXPECT_TRUE(rtp_packet.Parse(packet, length));
EXPECT_TRUE(rtp_packet.Parse(packet));
if (!rtp_start_timestamp_set_) {
// Calculate the rtp timestamp offset in order to calculate the real
@ -695,7 +695,7 @@ void CallPerfTest::TestMinTransmitBitrate(bool pad_to_min_bitrate) {
private:
// TODO(holmer): Run this with a timer instead of once per packet.
Action OnSendRtp(const uint8_t* packet, size_t length) override {
Action OnSendRtp(rtc::ArrayView<const uint8_t> packet) override {
task_queue_->PostTask(SafeTask(task_safety_flag_, [this]() {
VideoSendStream::Stats stats = send_stream_->GetStats();
@ -1147,7 +1147,7 @@ void CallPerfTest::TestEncodeFramerate(VideoEncoderFactory* encoder_factory,
}
}
Action OnSendRtp(const uint8_t* packet, size_t length) override {
Action OnSendRtp(rtc::ArrayView<const uint8_t> packet) override {
const Timestamp now = clock_->CurrentTime();
if (now - last_getstats_time_ > kMinGetStatsInterval) {
last_getstats_time_ = now;

View file

@ -31,18 +31,17 @@ DegradedCall::FakeNetworkPipeOnTaskQueue::FakeNetworkPipeOnTaskQueue(
pipe_(clock, std::move(network_behavior)) {}
void DegradedCall::FakeNetworkPipeOnTaskQueue::SendRtp(
const uint8_t* packet,
size_t length,
rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options,
Transport* transport) {
pipe_.SendRtp(packet, length, options, transport);
pipe_.SendRtp(packet, options, transport);
Process();
}
void DegradedCall::FakeNetworkPipeOnTaskQueue::SendRtcp(const uint8_t* packet,
size_t length,
Transport* transport) {
pipe_.SendRtcp(packet, length, transport);
void DegradedCall::FakeNetworkPipeOnTaskQueue::SendRtcp(
rtc::ArrayView<const uint8_t> packet,
Transport* transport) {
pipe_.SendRtcp(packet, transport);
Process();
}
@ -102,20 +101,19 @@ DegradedCall::FakeNetworkPipeTransportAdapter::
}
bool DegradedCall::FakeNetworkPipeTransportAdapter::SendRtp(
const uint8_t* packet,
size_t length,
rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
// A call here comes from the RTP stack (probably pacer). We intercept it and
// put it in the fake network pipe instead, but report to Call that is has
// been sent, so that the bandwidth estimator sees the delay we add.
network_pipe_->SendRtp(packet, length, options, real_transport_);
network_pipe_->SendRtp(packet, options, real_transport_);
if (options.packet_id != -1) {
rtc::SentPacket sent_packet;
sent_packet.packet_id = options.packet_id;
sent_packet.send_time_ms = clock_->TimeInMilliseconds();
sent_packet.info.included_in_feedback = options.included_in_feedback;
sent_packet.info.included_in_allocation = options.included_in_allocation;
sent_packet.info.packet_size_bytes = length;
sent_packet.info.packet_size_bytes = packet.size();
sent_packet.info.packet_type = rtc::PacketType::kData;
call_->OnSentPacket(sent_packet);
}
@ -123,9 +121,8 @@ bool DegradedCall::FakeNetworkPipeTransportAdapter::SendRtp(
}
bool DegradedCall::FakeNetworkPipeTransportAdapter::SendRtcp(
const uint8_t* packet,
size_t length) {
network_pipe_->SendRtcp(packet, length, real_transport_);
rtc::ArrayView<const uint8_t> packet) {
network_pipe_->SendRtcp(packet, real_transport_);
return true;
}

View file

@ -128,11 +128,10 @@ class DegradedCall : public Call, private PacketReceiver {
Clock* clock,
std::unique_ptr<NetworkBehaviorInterface> network_behavior);
void SendRtp(const uint8_t* packet,
size_t length,
void SendRtp(rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options,
Transport* transport);
void SendRtcp(const uint8_t* packet, size_t length, Transport* transport);
void SendRtcp(rtc::ArrayView<const uint8_t> packet, Transport* transport);
void AddActiveTransport(Transport* transport);
void RemoveActiveTransport(Transport* transport);
@ -161,10 +160,9 @@ class DegradedCall : public Call, private PacketReceiver {
Transport* real_transport);
~FakeNetworkPipeTransportAdapter();
bool SendRtp(const uint8_t* packet,
size_t length,
bool SendRtp(rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) override;
bool SendRtcp(const uint8_t* packet, size_t length) override;
bool SendRtcp(rtc::ArrayView<const uint8_t> packet) override;
private:
FakeNetworkPipeOnTaskQueue* const network_pipe_;

View file

@ -106,34 +106,13 @@ FakeNetworkPipe::FakeNetworkPipe(
: clock_(clock),
network_behavior_(std::move(network_behavior)),
receiver_(receiver),
global_transport_(nullptr),
clock_offset_ms_(0),
dropped_packets_(0),
sent_packets_(0),
total_packet_delay_us_(0),
last_log_time_us_(clock_->TimeInMicroseconds()) {}
FakeNetworkPipe::FakeNetworkPipe(
Clock* clock,
std::unique_ptr<NetworkBehaviorInterface> network_behavior,
Transport* transport)
: clock_(clock),
network_behavior_(std::move(network_behavior)),
receiver_(nullptr),
global_transport_(transport),
clock_offset_ms_(0),
dropped_packets_(0),
sent_packets_(0),
total_packet_delay_us_(0),
last_log_time_us_(clock_->TimeInMicroseconds()) {
RTC_DCHECK(global_transport_);
AddActiveTransport(global_transport_);
}
FakeNetworkPipe::~FakeNetworkPipe() {
if (global_transport_) {
RemoveActiveTransport(global_transport_);
}
RTC_DCHECK(active_transports_.empty());
}
@ -156,38 +135,18 @@ void FakeNetworkPipe::RemoveActiveTransport(Transport* transport) {
}
}
bool FakeNetworkPipe::SendRtp(const uint8_t* packet,
size_t length,
const PacketOptions& options) {
RTC_DCHECK(global_transport_);
EnqueuePacket(rtc::CopyOnWriteBuffer(packet, length), options, false,
global_transport_);
return true;
}
bool FakeNetworkPipe::SendRtcp(const uint8_t* packet, size_t length) {
RTC_DCHECK(global_transport_);
EnqueuePacket(rtc::CopyOnWriteBuffer(packet, length), absl::nullopt, true,
global_transport_);
return true;
}
bool FakeNetworkPipe::SendRtp(const uint8_t* packet,
size_t length,
bool FakeNetworkPipe::SendRtp(rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options,
Transport* transport) {
RTC_DCHECK(transport);
EnqueuePacket(rtc::CopyOnWriteBuffer(packet, length), options, false,
transport);
EnqueuePacket(rtc::CopyOnWriteBuffer(packet), options, false, transport);
return true;
}
bool FakeNetworkPipe::SendRtcp(const uint8_t* packet,
size_t length,
bool FakeNetworkPipe::SendRtcp(rtc::ArrayView<const uint8_t> packet,
Transport* transport) {
RTC_DCHECK(transport);
EnqueuePacket(rtc::CopyOnWriteBuffer(packet, length), absl::nullopt, true,
transport);
EnqueuePacket(rtc::CopyOnWriteBuffer(packet), absl::nullopt, true, transport);
return true;
}
@ -356,10 +315,12 @@ void FakeNetworkPipe::DeliverNetworkPacket(NetworkPacket* packet) {
return;
}
if (packet->is_rtcp()) {
transport->SendRtcp(packet->data(), packet->data_length());
transport->SendRtcp(
rtc::MakeArrayView(packet->data(), packet->data_length()));
} else {
transport->SendRtp(packet->data(), packet->data_length(),
packet->packet_options());
transport->SendRtp(
rtc::MakeArrayView(packet->data(), packet->data_length()),
packet->packet_options());
}
} else if (receiver_) {
int64_t packet_time_us = packet->packet_time_us().value_or(-1);

View file

@ -113,11 +113,6 @@ class FakeNetworkPipe : public SimulatedPacketReceiverInterface {
PacketReceiver* receiver,
uint64_t seed);
// Use this constructor if you plan to insert packets using SendRt[c?]p().
FakeNetworkPipe(Clock* clock,
std::unique_ptr<NetworkBehaviorInterface> network_behavior,
Transport* transport);
~FakeNetworkPipe() override;
FakeNetworkPipe(const FakeNetworkPipe&) = delete;
@ -134,23 +129,13 @@ class FakeNetworkPipe : public SimulatedPacketReceiverInterface {
void AddActiveTransport(Transport* transport);
void RemoveActiveTransport(Transport* transport);
// Implements Transport interface. When/if packets are delivered, they will
// be passed to the transport instance given in SetReceiverTransport(). These
// methods should only be called if a Transport instance was provided in the
// constructor.
bool SendRtp(const uint8_t* packet,
size_t length,
const PacketOptions& options);
bool SendRtcp(const uint8_t* packet, size_t length);
// Methods for use with Transport interface. When/if packets are delivered,
// they will be passed to the instance specified by the `transport` parameter.
// Note that that instance must be in the map of active transports.
bool SendRtp(const uint8_t* packet,
size_t length,
bool SendRtp(rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options,
Transport* transport);
bool SendRtcp(const uint8_t* packet, size_t length, Transport* transport);
bool SendRtcp(rtc::ArrayView<const uint8_t> packet, Transport* transport);
// Implements the PacketReceiver interface. When/if packets are delivered,
// they will be passed directly to the receiver instance given in
@ -218,7 +203,6 @@ class FakeNetworkPipe : public SimulatedPacketReceiverInterface {
mutable Mutex config_lock_;
const std::unique_ptr<NetworkBehaviorInterface> network_behavior_;
PacketReceiver* receiver_ RTC_GUARDED_BY(config_lock_);
Transport* const global_transport_;
// `process_lock` guards the data structures involved in delay and loss
// processes, such as the packet queues.

View file

@ -21,6 +21,7 @@
#include "api/rtp_parameters.h"
#include "call/receive_stream.h"
#include "call/rtp_packet_sink_interface.h"
#include "modules/rtp_rtcp/include/receive_statistics.h"
namespace webrtc {
@ -69,6 +70,8 @@ class FlexfecReceiveStream : public RtpPacketSinkInterface,
// Called to change the payload type after initialization.
virtual void SetPayloadType(int payload_type) = 0;
virtual int payload_type() const = 0;
virtual const ReceiveStatistics* GetStats() const = 0;
};
} // namespace webrtc

View file

@ -21,7 +21,6 @@
#include "api/rtp_parameters.h"
#include "call/rtp_stream_receiver_controller_interface.h"
#include "modules/rtp_rtcp/include/flexfec_receiver.h"
#include "modules/rtp_rtcp/include/receive_statistics.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"

View file

@ -70,6 +70,10 @@ class FlexfecReceiveStreamImpl : public FlexfecReceiveStream {
rtp_rtcp_->SetRTCPStatus(mode);
}
const ReceiveStatistics* GetStats() const override {
return rtp_receive_statistics_.get();
}
private:
RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_;

View file

@ -207,7 +207,7 @@ RTPVideoHeader RtpPayloadParams::GetRtpVideoHeader(
rtp_video_header.frame_type = image._frameType;
rtp_video_header.rotation = image.rotation_;
rtp_video_header.content_type = image.content_type_;
rtp_video_header.playout_delay = image.playout_delay_;
rtp_video_header.playout_delay = image.PlayoutDelay();
rtp_video_header.width = image._encodedWidth;
rtp_video_header.height = image._encodedHeight;
rtp_video_header.color_space = image.ColorSpace()
@ -542,7 +542,8 @@ void RtpPayloadParams::Vp9ToGeneric(const CodecSpecificInfoVP9& vp9_info,
const auto& vp9_header =
absl::get<RTPVideoHeaderVP9>(rtp_video_header.video_type_header);
const int num_spatial_layers = kMaxSimulatedSpatialLayers;
const int num_active_spatial_layers = vp9_header.num_spatial_layers;
const int first_active_spatial_id = vp9_header.first_active_layer;
const int last_active_spatial_id = vp9_header.num_spatial_layers - 1;
const int num_temporal_layers = kMaxTemporalStreams;
static_assert(num_spatial_layers <=
RtpGenericFrameDescriptor::kMaxSpatialLayers);
@ -556,10 +557,16 @@ void RtpPayloadParams::Vp9ToGeneric(const CodecSpecificInfoVP9& vp9_info,
int temporal_index =
vp9_header.temporal_idx != kNoTemporalIdx ? vp9_header.temporal_idx : 0;
if (spatial_index >= num_spatial_layers ||
temporal_index >= num_temporal_layers ||
num_active_spatial_layers > num_spatial_layers) {
if (!(temporal_index < num_temporal_layers &&
first_active_spatial_id <= spatial_index &&
spatial_index <= last_active_spatial_id &&
last_active_spatial_id < num_spatial_layers)) {
// Prefer to generate no generic layering than an inconsistent one.
RTC_LOG(LS_ERROR) << "Inconsistent layer id sid=" << spatial_index
<< ",tid=" << temporal_index
<< " in VP9 header. Active spatial ids: ["
<< first_active_spatial_id << ","
<< last_active_spatial_id << "]";
return;
}
@ -642,28 +649,39 @@ void RtpPayloadParams::Vp9ToGeneric(const CodecSpecificInfoVP9& vp9_info,
}
result.active_decode_targets =
((uint32_t{1} << num_temporal_layers * num_active_spatial_layers) - 1);
((uint32_t{1} << num_temporal_layers * (last_active_spatial_id + 1)) -
1) ^
((uint32_t{1} << num_temporal_layers * first_active_spatial_id) - 1);
// Calculate chains, asuming chain includes all frames with temporal_id = 0
if (!vp9_header.inter_pic_predicted && !vp9_header.inter_layer_predicted) {
// Assume frames without dependencies also reset chains.
for (int sid = spatial_index; sid < num_spatial_layers; ++sid) {
for (int sid = spatial_index; sid <= last_active_spatial_id; ++sid) {
chain_last_frame_id_[sid] = -1;
}
}
result.chain_diffs.resize(num_spatial_layers, 0);
for (int sid = 0; sid < num_active_spatial_layers; ++sid) {
for (int sid = first_active_spatial_id; sid <= last_active_spatial_id;
++sid) {
if (chain_last_frame_id_[sid] == -1) {
result.chain_diffs[sid] = 0;
continue;
}
result.chain_diffs[sid] = shared_frame_id - chain_last_frame_id_[sid];
int64_t chain_diff = shared_frame_id - chain_last_frame_id_[sid];
if (chain_diff >= 256) {
RTC_LOG(LS_ERROR)
<< "Too many frames since last VP9 T0 frame for spatial layer #"
<< sid << " at frame#" << shared_frame_id;
chain_last_frame_id_[sid] = -1;
chain_diff = 0;
}
result.chain_diffs[sid] = chain_diff;
}
if (temporal_index == 0) {
chain_last_frame_id_[spatial_index] = shared_frame_id;
if (!vp9_header.non_ref_for_inter_layer_pred) {
for (int sid = spatial_index + 1; sid < num_spatial_layers; ++sid) {
for (int sid = spatial_index + 1; sid <= last_active_spatial_id; ++sid) {
chain_last_frame_id_[sid] = shared_frame_id;
}
}

View file

@ -1136,6 +1136,170 @@ TEST(RtpPayloadParamsVp9ToGenericTest,
EXPECT_EQ(headers[2].generic->chain_diffs[1], 2);
}
TEST(RtpPayloadParamsVp9ToGenericTest, ChangeFirstActiveLayer) {
// S2 4---5
//
// S1 1---3 7
//
// S0 0---2 6
RtpPayloadState state;
RtpPayloadParams params(/*ssrc=*/123, &state, FieldTrialBasedConfig());
EncodedImage image;
CodecSpecificInfo info;
info.codecType = kVideoCodecVP9;
info.codecSpecific.VP9.flexible_mode = true;
info.codecSpecific.VP9.first_frame_in_picture = true;
info.codecSpecific.VP9.inter_layer_predicted = false;
info.codecSpecific.VP9.non_ref_for_inter_layer_pred = true;
info.codecSpecific.VP9.first_frame_in_picture = true;
info.end_of_picture = true;
RTPVideoHeader headers[8];
// S0 key frame.
info.codecSpecific.VP9.num_spatial_layers = 2;
info.codecSpecific.VP9.first_active_layer = 0;
image._frameType = VideoFrameType::kVideoFrameKey;
image.SetSpatialIndex(0);
info.codecSpecific.VP9.inter_pic_predicted = false;
info.codecSpecific.VP9.num_ref_pics = 0;
headers[0] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/0);
// S1 key frame.
image._frameType = VideoFrameType::kVideoFrameKey;
image.SetSpatialIndex(1);
info.codecSpecific.VP9.inter_pic_predicted = false;
info.codecSpecific.VP9.num_ref_pics = 0;
headers[1] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/1);
// S0 delta frame.
image._frameType = VideoFrameType::kVideoFrameDelta;
image.SetSpatialIndex(0);
info.codecSpecific.VP9.inter_pic_predicted = true;
info.codecSpecific.VP9.num_ref_pics = 1;
info.codecSpecific.VP9.p_diff[0] = 1;
headers[2] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/2);
// S1 delta frame.
image._frameType = VideoFrameType::kVideoFrameDelta;
info.codecSpecific.VP9.inter_pic_predicted = true;
info.codecSpecific.VP9.num_ref_pics = 1;
info.codecSpecific.VP9.p_diff[0] = 1;
headers[3] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/3);
// S2 key frame
info.codecSpecific.VP9.num_spatial_layers = 3;
info.codecSpecific.VP9.first_active_layer = 2;
image._frameType = VideoFrameType::kVideoFrameKey;
image.SetSpatialIndex(2);
info.codecSpecific.VP9.inter_pic_predicted = false;
info.codecSpecific.VP9.num_ref_pics = 0;
headers[4] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/4);
// S2 delta frame.
image._frameType = VideoFrameType::kVideoFrameDelta;
info.codecSpecific.VP9.inter_pic_predicted = true;
info.codecSpecific.VP9.num_ref_pics = 1;
info.codecSpecific.VP9.p_diff[0] = 1;
headers[5] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/5);
// S0 key frame after pause.
info.codecSpecific.VP9.num_spatial_layers = 2;
info.codecSpecific.VP9.first_active_layer = 0;
image._frameType = VideoFrameType::kVideoFrameKey;
image.SetSpatialIndex(0);
info.codecSpecific.VP9.inter_pic_predicted = false;
info.codecSpecific.VP9.num_ref_pics = 0;
headers[6] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/6);
// S1 key frame.
image._frameType = VideoFrameType::kVideoFrameKey;
image.SetSpatialIndex(1);
info.codecSpecific.VP9.inter_pic_predicted = false;
info.codecSpecific.VP9.num_ref_pics = 0;
headers[7] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/7);
ASSERT_TRUE(headers[0].generic);
int num_decode_targets = headers[0].generic->decode_target_indications.size();
int num_chains = headers[0].generic->chain_diffs.size();
// Rely on implementation detail there are always kMaxTemporalStreams temporal
// layers. In particular assume Decode Target#0 matches layer S0T0, and
// Decode Target#kMaxTemporalStreams matches layer S1T0.
static constexpr int kS0T0 = 0;
static constexpr int kS1T0 = kMaxTemporalStreams;
static constexpr int kS2T0 = 2 * kMaxTemporalStreams;
ASSERT_GE(num_decode_targets, 3);
ASSERT_GE(num_chains, 3);
for (int frame_idx = 0; frame_idx < int{std::size(headers)}; ++frame_idx) {
const RTPVideoHeader& header = headers[frame_idx];
ASSERT_TRUE(header.generic);
EXPECT_EQ(header.generic->temporal_index, 0);
ASSERT_THAT(header.generic->decode_target_indications,
SizeIs(num_decode_targets));
ASSERT_THAT(header.generic->chain_diffs, SizeIs(num_chains));
EXPECT_EQ(header.generic->frame_id, frame_idx);
}
EXPECT_TRUE(headers[0].generic->active_decode_targets[kS0T0]);
EXPECT_TRUE(headers[0].generic->active_decode_targets[kS1T0]);
EXPECT_FALSE(headers[0].generic->active_decode_targets[kS2T0]);
EXPECT_FALSE(headers[4].generic->active_decode_targets[kS0T0]);
EXPECT_FALSE(headers[4].generic->active_decode_targets[kS1T0]);
EXPECT_TRUE(headers[4].generic->active_decode_targets[kS2T0]);
EXPECT_EQ(headers[1].generic->active_decode_targets,
headers[0].generic->active_decode_targets);
EXPECT_EQ(headers[2].generic->active_decode_targets,
headers[0].generic->active_decode_targets);
EXPECT_EQ(headers[3].generic->active_decode_targets,
headers[0].generic->active_decode_targets);
EXPECT_EQ(headers[5].generic->active_decode_targets,
headers[4].generic->active_decode_targets);
EXPECT_EQ(headers[6].generic->active_decode_targets,
headers[0].generic->active_decode_targets);
EXPECT_EQ(headers[7].generic->active_decode_targets,
headers[0].generic->active_decode_targets);
EXPECT_EQ(headers[0].generic->chain_diffs[0], 0);
EXPECT_EQ(headers[0].generic->chain_diffs[1], 0);
EXPECT_EQ(headers[0].generic->chain_diffs[2], 0);
EXPECT_EQ(headers[1].generic->chain_diffs[0], 1);
EXPECT_EQ(headers[1].generic->chain_diffs[1], 0);
EXPECT_EQ(headers[1].generic->chain_diffs[2], 0);
EXPECT_EQ(headers[2].generic->chain_diffs[0], 2);
EXPECT_EQ(headers[2].generic->chain_diffs[1], 1);
EXPECT_EQ(headers[2].generic->chain_diffs[2], 0);
EXPECT_EQ(headers[3].generic->chain_diffs[0], 1);
EXPECT_EQ(headers[3].generic->chain_diffs[1], 2);
EXPECT_EQ(headers[3].generic->chain_diffs[2], 0);
EXPECT_EQ(headers[4].generic->chain_diffs[0], 0);
EXPECT_EQ(headers[4].generic->chain_diffs[1], 0);
EXPECT_EQ(headers[4].generic->chain_diffs[2], 0);
EXPECT_EQ(headers[5].generic->chain_diffs[0], 0);
EXPECT_EQ(headers[5].generic->chain_diffs[1], 0);
EXPECT_EQ(headers[5].generic->chain_diffs[2], 1);
EXPECT_EQ(headers[6].generic->chain_diffs[0], 0);
EXPECT_EQ(headers[6].generic->chain_diffs[1], 0);
EXPECT_EQ(headers[6].generic->chain_diffs[2], 0);
EXPECT_EQ(headers[7].generic->chain_diffs[0], 1);
EXPECT_EQ(headers[7].generic->chain_diffs[1], 0);
EXPECT_EQ(headers[7].generic->chain_diffs[2], 0);
}
class RtpPayloadParamsH264ToGenericTest : public ::testing::Test {
public:
enum LayerSync { kNoSync, kSync };

View file

@ -21,6 +21,7 @@
#include "api/array_view.h"
#include "api/task_queue/task_queue_factory.h"
#include "api/transport/field_trial_based_config.h"
#include "api/units/time_delta.h"
#include "api/video_codecs/video_codec.h"
#include "call/rtp_transport_controller_send_interface.h"
#include "modules/pacing/packet_router.h"
@ -279,7 +280,7 @@ std::vector<RtpStreamSender> CreateRtpStreamSenders(
crypto_options.sframe.require_frame_encryption;
video_config.field_trials = &trials;
video_config.enable_retransmit_all_layers =
video_config.field_trials->IsEnabled(
!video_config.field_trials->IsDisabled(
"WebRTC-Video-EnableRetransmitAllLayers");
const bool using_flexfec =
@ -497,7 +498,7 @@ void RtpVideoSender::SetActiveModules(const std::vector<bool>& active_modules) {
void RtpVideoSender::SetActiveModulesLocked(
const std::vector<bool>& active_modules) {
RTC_DCHECK_RUN_ON(&transport_checker_);
RTC_DCHECK_EQ(rtp_streams_.size(), active_modules.size());
RTC_CHECK_EQ(rtp_streams_.size(), active_modules.size());
active_ = false;
for (size_t i = 0; i < active_modules.size(); ++i) {
if (active_modules[i]) {
@ -591,10 +592,10 @@ EncodedImageCallback::Result RtpVideoSender::OnEncodedImage(
return Result(Result::ERROR_SEND_FAILED);
}
absl::optional<int64_t> expected_retransmission_time_ms;
TimeDelta expected_retransmission_time = TimeDelta::PlusInfinity();
if (encoded_image.RetransmissionAllowed()) {
expected_retransmission_time_ms =
rtp_streams_[simulcast_index].rtp_rtcp->ExpectedRetransmissionTimeMs();
expected_retransmission_time =
rtp_streams_[simulcast_index].rtp_rtcp->ExpectedRetransmissionTime();
}
if (IsFirstFrameOfACodedVideoSequence(encoded_image, codec_specific_info)) {
@ -623,7 +624,7 @@ EncodedImageCallback::Result RtpVideoSender::OnEncodedImage(
rtp_config_.payload_type, codec_type_, rtp_timestamp, encoded_image,
params_[simulcast_index].GetRtpVideoHeader(
encoded_image, codec_specific_info, shared_frame_id_),
expected_retransmission_time_ms);
expected_retransmission_time);
if (frame_count_observer_) {
FrameCounts& counts = frame_counts_[simulcast_index];
if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {

View file

@ -464,10 +464,10 @@ TEST(RtpVideoSenderTest, DoesNotRetrasmitAckedPackets) {
EXPECT_CALL(test.transport(), SendRtp)
.Times(2)
.WillRepeatedly([&rtp_sequence_numbers, &transport_sequence_numbers](
const uint8_t* packet, size_t length,
rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
RtpPacket rtp_packet;
EXPECT_TRUE(rtp_packet.Parse(packet, length));
EXPECT_TRUE(rtp_packet.Parse(packet));
rtp_sequence_numbers.push_back(rtp_packet.SequenceNumber());
transport_sequence_numbers.push_back(options.packet_id);
return true;
@ -491,10 +491,10 @@ TEST(RtpVideoSenderTest, DoesNotRetrasmitAckedPackets) {
EXPECT_CALL(test.transport(), SendRtp)
.Times(2)
.WillRepeatedly([&retransmitted_rtp_sequence_numbers](
const uint8_t* packet, size_t length,
rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
RtpPacket rtp_packet;
EXPECT_TRUE(rtp_packet.Parse(packet, length));
EXPECT_TRUE(rtp_packet.Parse(packet));
EXPECT_EQ(rtp_packet.Ssrc(), kRtxSsrc1);
// Capture the retransmitted sequence number from the RTX header.
rtc::ArrayView<const uint8_t> payload = rtp_packet.payload();
@ -532,10 +532,10 @@ TEST(RtpVideoSenderTest, DoesNotRetrasmitAckedPackets) {
// still be retransmitted.
test.AdvanceTime(TimeDelta::Millis(33));
EXPECT_CALL(test.transport(), SendRtp)
.WillOnce([&lost_packet_feedback](const uint8_t* packet, size_t length,
.WillOnce([&lost_packet_feedback](rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
RtpPacket rtp_packet;
EXPECT_TRUE(rtp_packet.Parse(packet, length));
EXPECT_TRUE(rtp_packet.Parse(packet));
EXPECT_EQ(rtp_packet.Ssrc(), kRtxSsrc1);
// Capture the retransmitted sequence number from the RTX header.
rtc::ArrayView<const uint8_t> payload = rtp_packet.payload();
@ -635,10 +635,10 @@ TEST(RtpVideoSenderTest, EarlyRetransmits) {
EXPECT_CALL(test.transport(), SendRtp)
.WillOnce(
[&frame1_rtp_sequence_number, &frame1_transport_sequence_number](
const uint8_t* packet, size_t length,
rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
RtpPacket rtp_packet;
EXPECT_TRUE(rtp_packet.Parse(packet, length));
EXPECT_TRUE(rtp_packet.Parse(packet));
frame1_rtp_sequence_number = rtp_packet.SequenceNumber();
frame1_transport_sequence_number = options.packet_id;
EXPECT_EQ(rtp_packet.Ssrc(), kSsrc1);
@ -655,10 +655,10 @@ TEST(RtpVideoSenderTest, EarlyRetransmits) {
EXPECT_CALL(test.transport(), SendRtp)
.WillOnce(
[&frame2_rtp_sequence_number, &frame2_transport_sequence_number](
const uint8_t* packet, size_t length,
rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
RtpPacket rtp_packet;
EXPECT_TRUE(rtp_packet.Parse(packet, length));
EXPECT_TRUE(rtp_packet.Parse(packet));
frame2_rtp_sequence_number = rtp_packet.SequenceNumber();
frame2_transport_sequence_number = options.packet_id;
EXPECT_EQ(rtp_packet.Ssrc(), kSsrc2);
@ -673,11 +673,11 @@ TEST(RtpVideoSenderTest, EarlyRetransmits) {
// Inject a transport feedback where the packet for the first frame is lost,
// expect a retransmission for it.
EXPECT_CALL(test.transport(), SendRtp)
.WillOnce([&frame1_rtp_sequence_number](const uint8_t* packet,
size_t length,
const PacketOptions& options) {
.WillOnce([&frame1_rtp_sequence_number](
rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
RtpPacket rtp_packet;
EXPECT_TRUE(rtp_packet.Parse(packet, length));
EXPECT_TRUE(rtp_packet.Parse(packet));
EXPECT_EQ(rtp_packet.Ssrc(), kRtxSsrc1);
// Retransmitted sequence number from the RTX header should match
@ -716,10 +716,10 @@ TEST(RtpVideoSenderTest, SupportsDependencyDescriptor) {
kDependencyDescriptorExtensionId);
std::vector<RtpPacket> sent_packets;
ON_CALL(test.transport(), SendRtp)
.WillByDefault([&](const uint8_t* packet, size_t length,
.WillByDefault([&](rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
sent_packets.emplace_back(&extensions);
EXPECT_TRUE(sent_packets.back().Parse(packet, length));
EXPECT_TRUE(sent_packets.back().Parse(packet));
return true;
});
@ -777,9 +777,8 @@ TEST(RtpVideoSenderTest,
std::vector<RtpPacket> sent_packets;
ON_CALL(test.transport(), SendRtp)
.WillByDefault(
[&](const uint8_t* packet, size_t length, const PacketOptions&) {
EXPECT_TRUE(
sent_packets.emplace_back(&extensions).Parse(packet, length));
[&](rtc::ArrayView<const uint8_t> packet, const PacketOptions&) {
EXPECT_TRUE(sent_packets.emplace_back(&extensions).Parse(packet));
return true;
});
test.SetActiveModules({true});
@ -823,10 +822,10 @@ TEST(RtpVideoSenderTest, SupportsDependencyDescriptorForVp9) {
kDependencyDescriptorExtensionId);
std::vector<RtpPacket> sent_packets;
ON_CALL(test.transport(), SendRtp)
.WillByDefault([&](const uint8_t* packet, size_t length,
.WillByDefault([&](rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
sent_packets.emplace_back(&extensions);
EXPECT_TRUE(sent_packets.back().Parse(packet, length));
EXPECT_TRUE(sent_packets.back().Parse(packet));
return true;
});
@ -879,10 +878,10 @@ TEST(RtpVideoSenderTest,
kDependencyDescriptorExtensionId);
std::vector<RtpPacket> sent_packets;
ON_CALL(test.transport(), SendRtp)
.WillByDefault([&](const uint8_t* packet, size_t length,
.WillByDefault([&](rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
sent_packets.emplace_back(&extensions);
EXPECT_TRUE(sent_packets.back().Parse(packet, length));
EXPECT_TRUE(sent_packets.back().Parse(packet));
return true;
});
@ -934,10 +933,10 @@ TEST(RtpVideoSenderTest, GenerateDependecyDescriptorForGenericCodecs) {
kDependencyDescriptorExtensionId);
std::vector<RtpPacket> sent_packets;
ON_CALL(test.transport(), SendRtp)
.WillByDefault([&](const uint8_t* packet, size_t length,
.WillByDefault([&](rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
sent_packets.emplace_back(&extensions);
EXPECT_TRUE(sent_packets.back().Parse(packet, length));
EXPECT_TRUE(sent_packets.back().Parse(packet));
return true;
});
@ -980,10 +979,10 @@ TEST(RtpVideoSenderTest, SupportsStoppingUsingDependencyDescriptor) {
kDependencyDescriptorExtensionId);
std::vector<RtpPacket> sent_packets;
ON_CALL(test.transport(), SendRtp)
.WillByDefault([&](const uint8_t* packet, size_t length,
.WillByDefault([&](rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
sent_packets.emplace_back(&extensions);
EXPECT_TRUE(sent_packets.back().Parse(packet, length));
EXPECT_TRUE(sent_packets.back().Parse(packet));
return true;
});
@ -1092,10 +1091,10 @@ TEST(RtpVideoSenderTest, ClearsPendingPacketsOnInactivation) {
kDependencyDescriptorExtensionId);
std::vector<RtpPacket> sent_packets;
ON_CALL(test.transport(), SendRtp)
.WillByDefault([&](const uint8_t* packet, size_t length,
.WillByDefault([&](rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
sent_packets.emplace_back(&extensions);
EXPECT_TRUE(sent_packets.back().Parse(packet, length));
EXPECT_TRUE(sent_packets.back().Parse(packet));
return true;
});
@ -1182,10 +1181,10 @@ TEST(RtpVideoSenderTest, RetransmitsBaseLayerOnly) {
EXPECT_CALL(test.transport(), SendRtp)
.Times(2)
.WillRepeatedly([&rtp_sequence_numbers, &transport_sequence_numbers](
const uint8_t* packet, size_t length,
rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
RtpPacket rtp_packet;
EXPECT_TRUE(rtp_packet.Parse(packet, length));
EXPECT_TRUE(rtp_packet.Parse(packet));
rtp_sequence_numbers.push_back(rtp_packet.SequenceNumber());
transport_sequence_numbers.push_back(options.packet_id);
return true;
@ -1218,10 +1217,10 @@ TEST(RtpVideoSenderTest, RetransmitsBaseLayerOnly) {
EXPECT_CALL(test.transport(), SendRtp)
.Times(1)
.WillRepeatedly([&retransmitted_rtp_sequence_numbers](
const uint8_t* packet, size_t length,
rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
RtpPacket rtp_packet;
EXPECT_TRUE(rtp_packet.Parse(packet, length));
EXPECT_TRUE(rtp_packet.Parse(packet));
EXPECT_EQ(rtp_packet.Ssrc(), kRtxSsrc1);
// Capture the retransmitted sequence number from the RTX header.
rtc::ArrayView<const uint8_t> payload = rtp_packet.payload();

View file

@ -13,7 +13,7 @@
namespace webrtc {
// The timestamp is always in UTC.
const char* const kSourceTimestamp = "WebRTC source stamp 2023-06-20T04:03:02";
const char* const kSourceTimestamp = "WebRTC source stamp 2023-09-05T04:12:20";
void LoadWebRTCVersionInRegister() {
// Using volatile to instruct the compiler to not optimize `p` away even

View file

@ -88,7 +88,7 @@ class VideoReceiveStreamInterface : public MediaReceiveStreamInterface {
uint32_t frames_rendered = 0;
// Decoder stats.
std::string decoder_implementation_name = "unknown";
absl::optional<std::string> decoder_implementation_name;
absl::optional<bool> power_efficient_decoder;
FrameCounts frame_counts;
int decode_ms = 0;

View file

@ -101,7 +101,7 @@ class VideoSendStream {
Stats();
~Stats();
std::string ToString(int64_t time_ms) const;
std::string encoder_implementation_name = "unknown";
absl::optional<std::string> encoder_implementation_name;
double input_frame_rate = 0;
int encode_frame_rate = 0;
int avg_encode_time_ms = 0;

View file

@ -25,6 +25,7 @@
*/
#include <stdlib.h>
#include <limits.h>
#include "rtc_base/checks.h"
#include "common_audio/signal_processing/include/signal_processing_library.h"
@ -67,7 +68,8 @@ int32_t WebRtcSpl_MaxAbsValueW32C(const int32_t* vector, size_t length) {
RTC_DCHECK_GT(length, 0);
for (i = 0; i < length; i++) {
absolute = abs((int)vector[i]);
absolute =
(vector[i] != INT_MIN) ? abs((int)vector[i]) : INT_MAX + (uint32_t)1;
if (absolute > maximum) {
maximum = absolute;
}

View file

@ -6,6 +6,7 @@ Date: 2018-06-19
License: Custome license
License File: LICENSE
Security Critical: yes
Shipped: yes
Description:
This is a package to calculate Discrete Fourier/Cosine/Sine Transforms of

View file

@ -6,6 +6,7 @@ Date: 2018-03-22
License: Custom license
License File: LICENSE
Security Critical: yes
Shipped: yes
Description:
Sqrt routine, originally was posted to the USENET group comp.sys.arm on

View file

@ -37,6 +37,23 @@ rtc_library("common_video") {
"video_frame_buffer_pool.cc",
]
if (rtc_use_h265) {
sources += [
"h265/h265_bitstream_parser.cc",
"h265/h265_bitstream_parser.h",
"h265/h265_common.cc",
"h265/h265_common.h",
"h265/h265_inline.cc",
"h265/h265_inline.h",
"h265/h265_pps_parser.cc",
"h265/h265_pps_parser.h",
"h265/h265_sps_parser.cc",
"h265/h265_sps_parser.h",
"h265/h265_vps_parser.cc",
"h265/h265_vps_parser.h",
]
}
deps = [
"../api:array_view",
"../api:make_ref_counted",
@ -71,6 +88,12 @@ rtc_library("common_video") {
"../system_wrappers:metrics",
"//third_party/libyuv",
]
if (rtc_use_h265) {
deps += [
"../rtc_base:compile_assert_c",
"../rtc_base/containers:flat_map",
]
}
absl_deps = [
"//third_party/abseil-cpp/absl/numeric:bits",
"//third_party/abseil-cpp/absl/types:optional",
@ -110,6 +133,15 @@ if (rtc_include_tests && !build_with_chromium) {
"video_frame_unittest.cc",
]
if (rtc_use_h265) {
sources += [
"h265/h265_bitstream_parser_unittest.cc",
"h265/h265_pps_parser_unittest.cc",
"h265/h265_sps_parser_unittest.cc",
"h265/h265_vps_parser_unittest.cc",
]
}
deps = [
":common_video",
"../api:scoped_refptr",

View file

@ -0,0 +1,544 @@
/*
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "common_video/h265/h265_bitstream_parser.h"
#include <stdlib.h>
#include <cstdint>
#include <vector>
#include "common_video/h265/h265_common.h"
#include "rtc_base/bit_buffer.h"
#include "rtc_base/bitstream_reader.h"
#include "rtc_base/logging.h"
#define IN_RANGE_OR_RETURN(val, min, max) \
do { \
if (!slice_reader.Ok() || (val) < (min) || (val) > (max)) { \
RTC_LOG(LS_WARNING) << "Error in stream: invalid value, expected " #val \
" to be" \
<< " in range [" << (min) << ":" << (max) << "]" \
<< " found " << (val) << " instead"; \
return kInvalidStream; \
} \
} while (0)
#define IN_RANGE_OR_RETURN_NULL(val, min, max) \
do { \
if (!slice_reader.Ok() || (val) < (min) || (val) > (max)) { \
RTC_LOG(LS_WARNING) << "Error in stream: invalid value, expected " #val \
" to be" \
<< " in range [" << (min) << ":" << (max) << "]" \
<< " found " << (val) << " instead"; \
return absl::nullopt; \
} \
} while (0)
#define IN_RANGE_OR_RETURN_VOID(val, min, max) \
do { \
if (!slice_reader.Ok() || (val) < (min) || (val) > (max)) { \
RTC_LOG(LS_WARNING) << "Error in stream: invalid value, expected " #val \
" to be" \
<< " in range [" << (min) << ":" << (max) << "]" \
<< " found " << (val) << " instead"; \
return; \
} \
} while (0)
#define TRUE_OR_RETURN(a) \
do { \
if (!slice_reader.Ok() || !(a)) { \
RTC_LOG(LS_WARNING) << "Error in stream: invalid value, expected " \
<< #a; \
return kInvalidStream; \
} \
} while (0)
namespace {
constexpr int kMaxAbsQpDeltaValue = 51;
constexpr int kMinQpValue = 0;
constexpr int kMaxQpValue = 51;
constexpr int kMaxRefIdxActive = 15;
} // namespace
namespace webrtc {
H265BitstreamParser::H265BitstreamParser() = default;
H265BitstreamParser::~H265BitstreamParser() = default;
// General note: this is based off the 08/2021 version of the H.265 standard,
// section 7.3.6.1. You can find it on this page:
// http://www.itu.int/rec/T-REC-H.265
H265BitstreamParser::Result H265BitstreamParser::ParseNonParameterSetNalu(
const uint8_t* source,
size_t source_length,
uint8_t nalu_type) {
last_slice_qp_delta_ = absl::nullopt;
last_slice_pps_id_ = absl::nullopt;
const std::vector<uint8_t> slice_rbsp =
H265::ParseRbsp(source, source_length);
if (slice_rbsp.size() < H265::kNaluHeaderSize)
return kInvalidStream;
BitstreamReader slice_reader(slice_rbsp);
slice_reader.ConsumeBits(H265::kNaluHeaderSize * 8);
// first_slice_segment_in_pic_flag: u(1)
bool first_slice_segment_in_pic_flag = slice_reader.Read<bool>();
bool irap_pic = (H265::NaluType::kBlaWLp <= nalu_type &&
nalu_type <= H265::NaluType::kRsvIrapVcl23);
if (irap_pic) {
// no_output_of_prior_pics_flag: u(1)
slice_reader.ConsumeBits(1);
}
// slice_pic_parameter_set_id: ue(v)
uint32_t pps_id = slice_reader.ReadExponentialGolomb();
IN_RANGE_OR_RETURN(pps_id, 0, 63);
const H265PpsParser::PpsState* pps = GetPPS(pps_id);
TRUE_OR_RETURN(pps);
const H265SpsParser::SpsState* sps = GetSPS(pps->sps_id);
TRUE_OR_RETURN(sps);
bool dependent_slice_segment_flag = 0;
if (!first_slice_segment_in_pic_flag) {
if (pps->dependent_slice_segments_enabled_flag) {
// dependent_slice_segment_flag: u(1)
dependent_slice_segment_flag = slice_reader.Read<bool>();
}
// slice_segment_address: u(v)
int32_t log2_ctb_size_y = sps->log2_min_luma_coding_block_size_minus3 + 3 +
sps->log2_diff_max_min_luma_coding_block_size;
uint32_t ctb_size_y = 1 << log2_ctb_size_y;
uint32_t pic_width_in_ctbs_y = sps->pic_width_in_luma_samples / ctb_size_y;
if (sps->pic_width_in_luma_samples % ctb_size_y)
pic_width_in_ctbs_y++;
uint32_t pic_height_in_ctbs_y =
sps->pic_height_in_luma_samples / ctb_size_y;
if (sps->pic_height_in_luma_samples % ctb_size_y)
pic_height_in_ctbs_y++;
uint32_t slice_segment_address_bits =
H265::Log2Ceiling(pic_height_in_ctbs_y * pic_width_in_ctbs_y);
slice_reader.ConsumeBits(slice_segment_address_bits);
}
if (dependent_slice_segment_flag == 0) {
for (uint32_t i = 0; i < pps->num_extra_slice_header_bits; i++) {
// slice_reserved_flag: u(1)
slice_reader.ConsumeBits(1);
}
// slice_type: ue(v)
uint32_t slice_type = 0;
slice_type = slice_reader.ReadExponentialGolomb();
IN_RANGE_OR_RETURN(slice_type, 0, 2);
if (pps->output_flag_present_flag) {
// pic_output_flag: u(1)
slice_reader.ConsumeBits(1);
}
if (sps->separate_colour_plane_flag) {
// colour_plane_id: u(2)
slice_reader.ConsumeBits(2);
}
uint32_t num_long_term_sps = 0;
uint32_t num_long_term_pics = 0;
std::vector<bool> used_by_curr_pic_lt_flag;
bool short_term_ref_pic_set_sps_flag = false;
uint32_t short_term_ref_pic_set_idx = 0;
H265SpsParser::ShortTermRefPicSet short_term_ref_pic_set;
bool slice_temporal_mvp_enabled_flag = 0;
if (nalu_type != H265::NaluType::kIdrWRadl &&
nalu_type != H265::NaluType::kIdrNLp) {
// slice_pic_order_cnt_lsb: u(v)
uint32_t slice_pic_order_cnt_lsb_bits =
sps->log2_max_pic_order_cnt_lsb_minus4 + 4;
slice_reader.ConsumeBits(slice_pic_order_cnt_lsb_bits);
// short_term_ref_pic_set_sps_flag: u(1)
short_term_ref_pic_set_sps_flag = slice_reader.Read<bool>();
if (!short_term_ref_pic_set_sps_flag) {
absl::optional<H265SpsParser::ShortTermRefPicSet> ref_pic_set =
H265SpsParser::ParseShortTermRefPicSet(
sps->num_short_term_ref_pic_sets,
sps->num_short_term_ref_pic_sets, sps->short_term_ref_pic_set,
sps->sps_max_dec_pic_buffering_minus1
[sps->sps_max_sub_layers_minus1],
slice_reader);
TRUE_OR_RETURN(ref_pic_set);
short_term_ref_pic_set = *ref_pic_set;
} else if (sps->num_short_term_ref_pic_sets > 1) {
// short_term_ref_pic_set_idx: u(v)
uint32_t short_term_ref_pic_set_idx_bits =
H265::Log2Ceiling(sps->num_short_term_ref_pic_sets);
if ((1 << short_term_ref_pic_set_idx_bits) <
sps->num_short_term_ref_pic_sets) {
short_term_ref_pic_set_idx_bits++;
}
if (short_term_ref_pic_set_idx_bits > 0) {
short_term_ref_pic_set_idx =
slice_reader.ReadBits(short_term_ref_pic_set_idx_bits);
IN_RANGE_OR_RETURN(short_term_ref_pic_set_idx, 0,
sps->num_short_term_ref_pic_sets - 1);
}
}
if (sps->long_term_ref_pics_present_flag) {
if (sps->num_long_term_ref_pics_sps > 0) {
// num_long_term_sps: ue(v)
num_long_term_sps = slice_reader.ReadExponentialGolomb();
IN_RANGE_OR_RETURN(num_long_term_sps, 0,
sps->num_long_term_ref_pics_sps);
}
// num_long_term_pics: ue(v)
num_long_term_pics = slice_reader.ReadExponentialGolomb();
IN_RANGE_OR_RETURN(num_long_term_pics, 0,
kMaxLongTermRefPicSets - num_long_term_sps);
used_by_curr_pic_lt_flag.resize(num_long_term_sps + num_long_term_pics,
0);
for (uint32_t i = 0; i < num_long_term_sps + num_long_term_pics; i++) {
if (i < num_long_term_sps) {
uint32_t lt_idx_sps = 0;
if (sps->num_long_term_ref_pics_sps > 1) {
// lt_idx_sps: u(v)
uint32_t lt_idx_sps_bits =
H265::Log2Ceiling(sps->num_long_term_ref_pics_sps);
lt_idx_sps = slice_reader.ReadBits(lt_idx_sps_bits);
IN_RANGE_OR_RETURN(lt_idx_sps, 0,
sps->num_long_term_ref_pics_sps - 1);
}
used_by_curr_pic_lt_flag[i] =
sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
} else {
// poc_lsb_lt: u(v)
uint32_t poc_lsb_lt_bits =
sps->log2_max_pic_order_cnt_lsb_minus4 + 4;
slice_reader.ConsumeBits(poc_lsb_lt_bits);
// used_by_curr_pic_lt_flag: u(1)
used_by_curr_pic_lt_flag[i] = slice_reader.Read<bool>();
}
// delta_poc_msb_present_flag: u(1)
bool delta_poc_msb_present_flag = slice_reader.Read<bool>();
if (delta_poc_msb_present_flag) {
// delta_poc_msb_cycle_lt: ue(v)
int delta_poc_msb_cycle_lt = slice_reader.ReadExponentialGolomb();
IN_RANGE_OR_RETURN(
delta_poc_msb_cycle_lt, 0,
std::pow(2, 32 - sps->log2_max_pic_order_cnt_lsb_minus4 - 4));
}
}
}
if (sps->sps_temporal_mvp_enabled_flag) {
// slice_temporal_mvp_enabled_flag: u(1)
slice_temporal_mvp_enabled_flag = slice_reader.Read<bool>();
}
}
if (sps->sample_adaptive_offset_enabled_flag) {
// slice_sao_luma_flag: u(1)
slice_reader.ConsumeBits(1);
uint32_t chroma_array_type =
sps->separate_colour_plane_flag == 0 ? sps->chroma_format_idc : 0;
if (chroma_array_type != 0) {
// slice_sao_chroma_flag: u(1)
slice_reader.ConsumeBits(1);
}
}
if (slice_type == H265::SliceType::kP ||
slice_type == H265::SliceType::kB) {
// num_ref_idx_active_override_flag: u(1)
bool num_ref_idx_active_override_flag = slice_reader.Read<bool>();
uint32_t num_ref_idx_l0_active_minus1 =
pps->num_ref_idx_l0_default_active_minus1;
uint32_t num_ref_idx_l1_active_minus1 =
pps->num_ref_idx_l1_default_active_minus1;
if (num_ref_idx_active_override_flag) {
// num_ref_idx_l0_active_minus1: ue(v)
num_ref_idx_l0_active_minus1 = slice_reader.ReadExponentialGolomb();
IN_RANGE_OR_RETURN(num_ref_idx_l0_active_minus1, 0,
kMaxRefIdxActive - 1);
if (slice_type == H265::SliceType::kB) {
// num_ref_idx_l1_active_minus1: ue(v)
num_ref_idx_l1_active_minus1 = slice_reader.ReadExponentialGolomb();
IN_RANGE_OR_RETURN(num_ref_idx_l1_active_minus1, 0,
kMaxRefIdxActive - 1);
}
}
uint32_t num_pic_total_curr = 0;
uint32_t curr_sps_idx;
if (short_term_ref_pic_set_sps_flag) {
curr_sps_idx = short_term_ref_pic_set_idx;
} else {
curr_sps_idx = sps->num_short_term_ref_pic_sets;
}
if (sps->short_term_ref_pic_set.size() <= curr_sps_idx) {
TRUE_OR_RETURN(!(curr_sps_idx != 0 || short_term_ref_pic_set_sps_flag));
}
const H265SpsParser::ShortTermRefPicSet* ref_pic_set;
if (curr_sps_idx < sps->short_term_ref_pic_set.size()) {
ref_pic_set = &(sps->short_term_ref_pic_set[curr_sps_idx]);
} else {
ref_pic_set = &short_term_ref_pic_set;
}
// Equation 7-57
IN_RANGE_OR_RETURN(ref_pic_set->num_negative_pics, 0,
kMaxShortTermRefPicSets);
IN_RANGE_OR_RETURN(ref_pic_set->num_positive_pics, 0,
kMaxShortTermRefPicSets);
for (uint32_t i = 0; i < ref_pic_set->num_negative_pics; i++) {
if (ref_pic_set->used_by_curr_pic_s0[i]) {
num_pic_total_curr++;
}
}
for (uint32_t i = 0; i < ref_pic_set->num_positive_pics; i++) {
if (ref_pic_set->used_by_curr_pic_s1[i]) {
num_pic_total_curr++;
}
}
for (uint32_t i = 0; i < num_long_term_sps + num_long_term_pics; i++) {
if (used_by_curr_pic_lt_flag[i]) {
num_pic_total_curr++;
}
}
if (pps->lists_modification_present_flag && num_pic_total_curr > 1) {
// ref_pic_lists_modification()
uint32_t list_entry_bits = H265::Log2Ceiling(num_pic_total_curr);
if ((1 << list_entry_bits) < num_pic_total_curr) {
list_entry_bits++;
}
// ref_pic_list_modification_flag_l0: u(1)
bool ref_pic_list_modification_flag_l0 = slice_reader.Read<bool>();
if (ref_pic_list_modification_flag_l0) {
for (uint32_t i = 0; i < num_ref_idx_l0_active_minus1; i++) {
// list_entry_l0: u(v)
slice_reader.ConsumeBits(list_entry_bits);
}
}
if (slice_type == H265::SliceType::kB) {
// ref_pic_list_modification_flag_l1: u(1)
bool ref_pic_list_modification_flag_l1 = slice_reader.Read<bool>();
if (ref_pic_list_modification_flag_l1) {
for (uint32_t i = 0; i < num_ref_idx_l1_active_minus1; i++) {
// list_entry_l1: u(v)
slice_reader.ConsumeBits(list_entry_bits);
}
}
}
}
if (slice_type == H265::SliceType::kB) {
// mvd_l1_zero_flag: u(1)
slice_reader.ConsumeBits(1);
}
if (pps->cabac_init_present_flag) {
// cabac_init_flag: u(1)
slice_reader.ConsumeBits(1);
}
if (slice_temporal_mvp_enabled_flag) {
bool collocated_from_l0_flag = false;
if (slice_type == H265::SliceType::kB) {
// collocated_from_l0_flag: u(1)
collocated_from_l0_flag = slice_reader.Read<bool>();
}
if ((collocated_from_l0_flag && num_ref_idx_l0_active_minus1 > 0) ||
(!collocated_from_l0_flag && num_ref_idx_l1_active_minus1 > 0)) {
// collocated_ref_idx: ue(v)
uint32_t collocated_ref_idx = slice_reader.ReadExponentialGolomb();
if ((slice_type == H265::SliceType::kP ||
slice_type == H265::SliceType::kB) &&
collocated_from_l0_flag) {
IN_RANGE_OR_RETURN(collocated_ref_idx, 0,
num_ref_idx_l0_active_minus1);
}
if (slice_type == H265::SliceType::kB && !collocated_from_l0_flag) {
IN_RANGE_OR_RETURN(collocated_ref_idx, 0,
num_ref_idx_l1_active_minus1);
}
}
}
if (!slice_reader.Ok() ||
((pps->weighted_pred_flag && slice_type == H265::SliceType::kP) ||
(pps->weighted_bipred_flag && slice_type == H265::SliceType::kB))) {
// pred_weight_table()
RTC_LOG(LS_ERROR) << "Streams with pred_weight_table unsupported.";
return kUnsupportedStream;
}
// five_minus_max_num_merge_cand: ue(v)
uint32_t five_minus_max_num_merge_cand =
slice_reader.ReadExponentialGolomb();
IN_RANGE_OR_RETURN(5 - five_minus_max_num_merge_cand, 1, 5);
}
}
// slice_qp_delta: se(v)
int32_t last_slice_qp_delta = slice_reader.ReadSignedExponentialGolomb();
if (!slice_reader.Ok() || (abs(last_slice_qp_delta) > kMaxAbsQpDeltaValue)) {
// Something has gone wrong, and the parsed value is invalid.
RTC_LOG(LS_ERROR) << "Parsed QP value out of range.";
return kInvalidStream;
}
// 7-54 in H265 spec.
IN_RANGE_OR_RETURN(26 + pps->init_qp_minus26 + last_slice_qp_delta,
-pps->qp_bd_offset_y, 51);
last_slice_qp_delta_ = last_slice_qp_delta;
last_slice_pps_id_ = pps_id;
if (!slice_reader.Ok()) {
return kInvalidStream;
}
return kOk;
}
const H265PpsParser::PpsState* H265BitstreamParser::GetPPS(uint32_t id) const {
auto it = pps_.find(id);
if (it == pps_.end()) {
RTC_LOG(LS_WARNING) << "Requested a nonexistent PPS id " << id;
return nullptr;
}
return &it->second;
}
const H265SpsParser::SpsState* H265BitstreamParser::GetSPS(uint32_t id) const {
auto it = sps_.find(id);
if (it == sps_.end()) {
RTC_LOG(LS_WARNING) << "Requested a nonexistent SPS id " << id;
return nullptr;
}
return &it->second;
}
void H265BitstreamParser::ParseSlice(const uint8_t* slice, size_t length) {
H265::NaluType nalu_type = H265::ParseNaluType(slice[0]);
switch (nalu_type) {
case H265::NaluType::kVps: {
absl::optional<H265VpsParser::VpsState> vps_state;
if (length >= H265::kNaluHeaderSize) {
vps_state = H265VpsParser::ParseVps(slice + H265::kNaluHeaderSize,
length - H265::kNaluHeaderSize);
}
if (!vps_state) {
RTC_LOG(LS_WARNING) << "Unable to parse VPS from H265 bitstream.";
} else {
vps_[vps_state->id] = *vps_state;
}
break;
}
case H265::NaluType::kSps: {
absl::optional<H265SpsParser::SpsState> sps_state;
if (length >= H265::kNaluHeaderSize) {
sps_state = H265SpsParser::ParseSps(slice + H265::kNaluHeaderSize,
length - H265::kNaluHeaderSize);
}
if (!sps_state) {
RTC_LOG(LS_WARNING) << "Unable to parse SPS from H265 bitstream.";
} else {
sps_[sps_state->sps_id] = *sps_state;
}
break;
}
case H265::NaluType::kPps: {
absl::optional<H265PpsParser::PpsState> pps_state;
if (length >= H265::kNaluHeaderSize) {
std::vector<uint8_t> unpacked_buffer = H265::ParseRbsp(
slice + H265::kNaluHeaderSize, length - H265::kNaluHeaderSize);
BitstreamReader slice_reader(unpacked_buffer);
// pic_parameter_set_id: ue(v)
uint32_t pps_id = slice_reader.ReadExponentialGolomb();
IN_RANGE_OR_RETURN_VOID(pps_id, 0, 63);
// seq_parameter_set_id: ue(v)
uint32_t sps_id = slice_reader.ReadExponentialGolomb();
IN_RANGE_OR_RETURN_VOID(sps_id, 0, 15);
const H265SpsParser::SpsState* sps = GetSPS(sps_id);
pps_state = H265PpsParser::ParsePps(
slice + H265::kNaluHeaderSize, length - H265::kNaluHeaderSize, sps);
}
if (!pps_state) {
RTC_LOG(LS_WARNING) << "Unable to parse PPS from H265 bitstream.";
} else {
pps_[pps_state->pps_id] = *pps_state;
}
break;
}
case H265::NaluType::kAud:
case H265::NaluType::kPrefixSei:
case H265::NaluType::kSuffixSei:
case H265::NaluType::kAP:
case H265::NaluType::kFU:
break;
default:
Result res = ParseNonParameterSetNalu(slice, length, nalu_type);
if (res != kOk) {
RTC_LOG(LS_INFO) << "Failed to parse bitstream. Error: " << res;
}
break;
}
}
absl::optional<uint32_t>
H265BitstreamParser::ParsePpsIdFromSliceSegmentLayerRbsp(const uint8_t* data,
size_t length,
uint8_t nalu_type) {
std::vector<uint8_t> unpacked_buffer = H265::ParseRbsp(data, length);
BitstreamReader slice_reader(unpacked_buffer);
// first_slice_segment_in_pic_flag: u(1)
slice_reader.ConsumeBits(1);
if (!slice_reader.Ok()) {
return absl::nullopt;
}
if (nalu_type >= H265::NaluType::kBlaWLp &&
nalu_type <= H265::NaluType::kRsvIrapVcl23) {
// no_output_of_prior_pics_flag: u(1)
slice_reader.ConsumeBits(1);
}
// slice_pic_parameter_set_id: ue(v)
uint32_t slice_pic_parameter_set_id = slice_reader.ReadExponentialGolomb();
IN_RANGE_OR_RETURN_NULL(slice_pic_parameter_set_id, 0, 63);
if (!slice_reader.Ok()) {
return absl::nullopt;
}
return slice_pic_parameter_set_id;
}
void H265BitstreamParser::ParseBitstream(
rtc::ArrayView<const uint8_t> bitstream) {
std::vector<H265::NaluIndex> nalu_indices =
H265::FindNaluIndices(bitstream.data(), bitstream.size());
for (const H265::NaluIndex& index : nalu_indices)
ParseSlice(&bitstream[index.payload_start_offset], index.payload_size);
}
absl::optional<int> H265BitstreamParser::GetLastSliceQp() const {
if (!last_slice_qp_delta_ || !last_slice_pps_id_) {
return absl::nullopt;
}
uint32_t pps_id = 0;
const H265PpsParser::PpsState* pps = GetPPS(pps_id);
if (!pps)
return absl::nullopt;
const int parsed_qp = 26 + pps->init_qp_minus26 + *last_slice_qp_delta_;
if (parsed_qp < kMinQpValue || parsed_qp > kMaxQpValue) {
RTC_LOG(LS_ERROR) << "Parsed invalid QP from bitstream.";
return absl::nullopt;
}
return parsed_qp;
}
} // namespace webrtc

View file

@ -0,0 +1,71 @@
/*
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef COMMON_VIDEO_H265_H265_BITSTREAM_PARSER_H_
#define COMMON_VIDEO_H265_H265_BITSTREAM_PARSER_H_
#include <stddef.h>
#include <stdint.h>
#include <vector>
#include "absl/types/optional.h"
#include "api/video_codecs/bitstream_parser.h"
#include "common_video/h265/h265_pps_parser.h"
#include "common_video/h265/h265_sps_parser.h"
#include "common_video/h265/h265_vps_parser.h"
#include "rtc_base/containers/flat_map.h"
namespace webrtc {
// Stateful H265 bitstream parser (due to VPS/SPS/PPS). Used to parse out QP
// values from the bitstream.
class H265BitstreamParser : public BitstreamParser {
public:
H265BitstreamParser();
~H265BitstreamParser() override;
// New interface.
void ParseBitstream(rtc::ArrayView<const uint8_t> bitstream) override;
absl::optional<int> GetLastSliceQp() const override;
static absl::optional<uint32_t> ParsePpsIdFromSliceSegmentLayerRbsp(
const uint8_t* data,
size_t length,
uint8_t nalu_type);
protected:
enum Result {
kOk,
kInvalidStream,
kUnsupportedStream,
};
void ParseSlice(const uint8_t* slice, size_t length);
Result ParseNonParameterSetNalu(const uint8_t* source,
size_t source_length,
uint8_t nalu_type);
const H265PpsParser::PpsState* GetPPS(uint32_t id) const;
const H265SpsParser::SpsState* GetSPS(uint32_t id) const;
// VPS/SPS/PPS state, updated when parsing new VPS/SPS/PPS, used to parse
// slices.
flat_map<uint32_t, H265VpsParser::VpsState> vps_;
flat_map<uint32_t, H265SpsParser::SpsState> sps_;
flat_map<uint32_t, H265PpsParser::PpsState> pps_;
// Last parsed slice QP.
absl::optional<int32_t> last_slice_qp_delta_;
absl::optional<uint32_t> last_slice_pps_id_;
};
} // namespace webrtc
#endif // COMMON_VIDEO_H265_H265_BITSTREAM_PARSER_H_

Some files were not shown because too many files have changed in this diff Show more