Move media configuration classes out of PeerConnectionE2EQualityTestFixture.

The goal is to remove the dependency between PeerConfigurerImpl and PeerConnectionE2EQualityTestFixture so that PeerConfigurerImpl can be used in PeerConnectionE2EQualityTestFixture API.

Change-Id: I29ae44b9d0e39075d0c395ff9d9f8d313be12176
Bug: webrtc:14627
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/281740
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Reviewed-by: Artem Titov <titovartem@webrtc.org>
Commit-Queue: Jeremy Leconte <jleconte@google.com>
Cr-Commit-Position: refs/heads/main@{#38560}
This commit is contained in:
Jeremy Leconte 2022-11-05 12:56:07 +01:00 committed by WebRTC LUCI CQ
parent d34a7ab50d
commit e91d4bc517
7 changed files with 805 additions and 669 deletions

View file

@ -507,10 +507,7 @@ rtc_source_set("peer_network_dependencies") {
rtc_source_set("peer_connection_quality_test_fixture_api") {
visibility = [ "*" ]
testonly = true
sources = [
"test/peerconnection_quality_test_fixture.cc",
"test/peerconnection_quality_test_fixture.h",
]
sources = [ "test/peerconnection_quality_test_fixture.h" ]
deps = [
":array_view",
@ -540,6 +537,8 @@ rtc_source_set("peer_connection_quality_test_fixture_api") {
"audio:audio_mixer_api",
"rtc_event_log",
"task_queue",
"test/pclf:media_configuration",
"test/pclf:media_quality_test_params",
"test/video:video_frame_writer",
"transport:network_control",
"units:time_delta",
@ -1325,6 +1324,7 @@ if (rtc_include_tests) {
"../test:rtc_expect_death",
"../test:test_support",
"task_queue:task_queue_default_factory_unittests",
"test/pclf:media_configuration",
"test/video:video_frame_writer",
"transport:field_trial_based_config",
"units:time_delta",

View file

@ -8,18 +8,66 @@
import("../../../webrtc.gni")
rtc_source_set("media_configuration") {
visibility = [ "*" ]
testonly = true
sources = [
"media_configuration.cc",
"media_configuration.h",
]
deps = [
"../..:array_view",
"../..:audio_options_api",
"../..:audio_quality_analyzer_api",
"../..:callfactory_api",
"../..:fec_controller_api",
"../..:frame_generator_api",
"../..:function_view",
"../..:libjingle_peerconnection_api",
"../..:media_stream_interface",
"../..:packet_socket_factory",
"../..:peer_network_dependencies",
"../..:rtp_parameters",
"../..:simulated_network_api",
"../..:stats_observer_interface",
"../..:track_id_stream_info_map",
"../..:video_quality_analyzer_api",
"../../../modules/audio_processing:api",
"../../../rtc_base:checks",
"../../../rtc_base:rtc_base",
"../../../rtc_base:stringutils",
"../../../rtc_base:threading",
"../../../test:fileutils",
"../../../test:video_test_support",
"../../../test/pc/e2e:video_dumping",
"../../audio:audio_mixer_api",
"../../rtc_event_log",
"../../task_queue",
"../../transport:network_control",
"../../units:time_delta",
"../../video_codecs:video_codecs_api",
"../video:video_frame_writer",
]
absl_deps = [
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("media_quality_test_params") {
visibility = [ "*" ]
testonly = true
sources = [ "media_quality_test_params.h" ]
deps = [
":media_configuration",
"../../../api:callfactory_api",
"../../../api:fec_controller_api",
"../../../api:field_trials_view",
"../../../api:libjingle_peerconnection_api",
"../../../api:packet_socket_factory",
"../../../api:peer_connection_quality_test_fixture_api",
"../../../api/audio:audio_mixer_api",
"../../../api/rtc_event_log",
"../../../api/task_queue",

View file

@ -8,9 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/test/peerconnection_quality_test_fixture.h"
#include "api/test/pclf/media_configuration.h"
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
@ -20,26 +21,17 @@
#include "rtc_base/strings/string_builder.h"
#include "test/pc/e2e/analyzer/video/video_dumping.h"
#include "test/testsupport/file_utils.h"
#include "test/testsupport/video_frame_writer.h"
namespace webrtc {
namespace webrtc_pc_e2e {
namespace {
using VideoCodecConfig = ::webrtc::webrtc_pc_e2e::
PeerConnectionE2EQualityTestFixture::VideoCodecConfig;
using VideoSubscription = ::webrtc::webrtc_pc_e2e::
PeerConnectionE2EQualityTestFixture::VideoSubscription;
using VideoResolution = ::webrtc::webrtc_pc_e2e::
PeerConnectionE2EQualityTestFixture::VideoResolution;
std::string SpecToString(
PeerConnectionE2EQualityTestFixture::VideoResolution::VideoResolution::Spec
spec) {
std::string SpecToString(VideoResolution::Spec spec) {
switch (spec) {
case PeerConnectionE2EQualityTestFixture::VideoResolution::Spec::kNone:
case VideoResolution::Spec::kNone:
return "None";
case PeerConnectionE2EQualityTestFixture::VideoResolution::Spec::
kMaxFromSender:
case VideoResolution::Spec::kMaxFromSender:
return "MaxFromSender";
}
}
@ -52,16 +44,42 @@ void AppendResolution(const VideoResolution& resolution,
} // namespace
PeerConnectionE2EQualityTestFixture::VideoResolution::VideoResolution(
size_t width,
size_t height,
int32_t fps)
ScrollingParams::ScrollingParams(TimeDelta duration,
size_t source_width,
size_t source_height)
: duration(duration),
source_width(source_width),
source_height(source_height) {
RTC_CHECK_GT(duration.ms(), 0);
}
ScreenShareConfig::ScreenShareConfig(TimeDelta slide_change_interval)
: slide_change_interval(slide_change_interval) {
RTC_CHECK_GT(slide_change_interval.ms(), 0);
}
VideoSimulcastConfig::VideoSimulcastConfig(int simulcast_streams_count)
: simulcast_streams_count(simulcast_streams_count) {
RTC_CHECK_GT(simulcast_streams_count, 1);
}
EmulatedSFUConfig::EmulatedSFUConfig(int target_layer_index)
: target_layer_index(target_layer_index) {
RTC_CHECK_GE(target_layer_index, 0);
}
EmulatedSFUConfig::EmulatedSFUConfig(absl::optional<int> target_layer_index,
absl::optional<int> target_temporal_index)
: target_layer_index(target_layer_index),
target_temporal_index(target_temporal_index) {
RTC_CHECK_GE(target_temporal_index.value_or(0), 0);
if (target_temporal_index)
RTC_CHECK_GE(*target_temporal_index, 0);
}
VideoResolution::VideoResolution(size_t width, size_t height, int32_t fps)
: width_(width), height_(height), fps_(fps), spec_(Spec::kNone) {}
PeerConnectionE2EQualityTestFixture::VideoResolution::VideoResolution(Spec spec)
VideoResolution::VideoResolution(Spec spec)
: width_(0), height_(0), fps_(0), spec_(spec) {}
bool PeerConnectionE2EQualityTestFixture::VideoResolution::operator==(
const VideoResolution& other) const {
bool VideoResolution::operator==(const VideoResolution& other) const {
if (spec_ != Spec::kNone && spec_ == other.spec_) {
// If there is some particular spec set, then it doesn't matter what
// values we have in other fields.
@ -70,23 +88,153 @@ bool PeerConnectionE2EQualityTestFixture::VideoResolution::operator==(
return width_ == other.width_ && height_ == other.height_ &&
fps_ == other.fps_ && spec_ == other.spec_;
}
bool VideoResolution::operator!=(const VideoResolution& other) const {
return !(*this == other);
}
std::string PeerConnectionE2EQualityTestFixture::VideoResolution::ToString()
const {
bool VideoResolution::IsRegular() const {
return spec_ == Spec::kNone;
}
std::string VideoResolution::ToString() const {
rtc::StringBuilder out;
out << "{ width=" << width_ << ", height=" << height_ << ", fps=" << fps_
<< ", spec=" << SpecToString(spec_) << " }";
return out.Release();
}
bool PeerConnectionE2EQualityTestFixture::VideoSubscription::operator==(
const VideoSubscription& other) const {
return default_resolution_ == other.default_resolution_ &&
peers_resolution_ == other.peers_resolution_;
VideoDumpOptions::VideoDumpOptions(
absl::string_view output_directory,
int sampling_modulo,
bool export_frame_ids,
std::function<std::unique_ptr<test::VideoFrameWriter>(
absl::string_view file_name_prefix,
const VideoResolution& resolution)> video_frame_writer_factory)
: output_directory_(output_directory),
sampling_modulo_(sampling_modulo),
export_frame_ids_(export_frame_ids),
video_frame_writer_factory_(video_frame_writer_factory) {
RTC_CHECK_GT(sampling_modulo, 0);
}
absl::optional<PeerConnectionE2EQualityTestFixture::VideoResolution>
PeerConnectionE2EQualityTestFixture::VideoSubscription::GetMaxResolution(
VideoDumpOptions::VideoDumpOptions(absl::string_view output_directory,
bool export_frame_ids)
: VideoDumpOptions(output_directory,
kDefaultSamplingModulo,
export_frame_ids) {}
std::unique_ptr<test::VideoFrameWriter>
VideoDumpOptions::CreateInputDumpVideoFrameWriter(
absl::string_view stream_label,
const VideoResolution& resolution) const {
std::unique_ptr<test::VideoFrameWriter> writer = video_frame_writer_factory_(
GetInputDumpFileName(stream_label, resolution), resolution);
absl::optional<std::string> frame_ids_file =
GetInputFrameIdsDumpFileName(stream_label, resolution);
if (frame_ids_file.has_value()) {
writer = CreateVideoFrameWithIdsWriter(std::move(writer), *frame_ids_file);
}
return writer;
}
std::unique_ptr<test::VideoFrameWriter>
VideoDumpOptions::CreateOutputDumpVideoFrameWriter(
absl::string_view stream_label,
absl::string_view receiver,
const VideoResolution& resolution) const {
std::unique_ptr<test::VideoFrameWriter> writer = video_frame_writer_factory_(
GetOutputDumpFileName(stream_label, receiver, resolution), resolution);
absl::optional<std::string> frame_ids_file =
GetOutputFrameIdsDumpFileName(stream_label, receiver, resolution);
if (frame_ids_file.has_value()) {
writer = CreateVideoFrameWithIdsWriter(std::move(writer), *frame_ids_file);
}
return writer;
}
std::unique_ptr<test::VideoFrameWriter>
VideoDumpOptions::Y4mVideoFrameWriterFactory(
absl::string_view file_name_prefix,
const VideoResolution& resolution) {
return std::make_unique<test::Y4mVideoFrameWriterImpl>(
std::string(file_name_prefix) + ".y4m", resolution.width(),
resolution.height(), resolution.fps());
}
std::string VideoDumpOptions::GetInputDumpFileName(
absl::string_view stream_label,
const VideoResolution& resolution) const {
rtc::StringBuilder file_name;
file_name << stream_label;
AppendResolution(resolution, file_name);
return test::JoinFilename(output_directory_, file_name.Release());
}
absl::optional<std::string> VideoDumpOptions::GetInputFrameIdsDumpFileName(
absl::string_view stream_label,
const VideoResolution& resolution) const {
if (!export_frame_ids_) {
return absl::nullopt;
}
return GetInputDumpFileName(stream_label, resolution) + ".frame_ids.txt";
}
std::string VideoDumpOptions::GetOutputDumpFileName(
absl::string_view stream_label,
absl::string_view receiver,
const VideoResolution& resolution) const {
rtc::StringBuilder file_name;
file_name << stream_label << "_" << receiver;
AppendResolution(resolution, file_name);
return test::JoinFilename(output_directory_, file_name.Release());
}
absl::optional<std::string> VideoDumpOptions::GetOutputFrameIdsDumpFileName(
absl::string_view stream_label,
absl::string_view receiver,
const VideoResolution& resolution) const {
if (!export_frame_ids_) {
return absl::nullopt;
}
return GetOutputDumpFileName(stream_label, receiver, resolution) +
".frame_ids.txt";
}
std::string VideoDumpOptions::ToString() const {
rtc::StringBuilder out;
out << "{ output_directory_=" << output_directory_
<< ", sampling_modulo_=" << sampling_modulo_
<< ", export_frame_ids_=" << export_frame_ids_ << " }";
return out.Release();
}
VideoConfig::VideoConfig(const VideoResolution& resolution)
: width(resolution.width()),
height(resolution.height()),
fps(resolution.fps()) {
RTC_CHECK(resolution.IsRegular());
}
VideoConfig::VideoConfig(size_t width, size_t height, int32_t fps)
: width(width), height(height), fps(fps) {}
VideoConfig::VideoConfig(std::string stream_label,
size_t width,
size_t height,
int32_t fps)
: width(width),
height(height),
fps(fps),
stream_label(std::move(stream_label)) {}
AudioConfig::AudioConfig(std::string stream_label)
: stream_label(std::move(stream_label)) {}
VideoCodecConfig::VideoCodecConfig(std::string name)
: name(std::move(name)), required_params() {}
VideoCodecConfig::VideoCodecConfig(
std::string name,
std::map<std::string, std::string> required_params)
: name(std::move(name)), required_params(std::move(required_params)) {}
absl::optional<VideoResolution> VideoSubscription::GetMaxResolution(
rtc::ArrayView<const VideoConfig> video_configs) {
std::vector<VideoResolution> resolutions;
for (const auto& video_config : video_configs) {
@ -95,8 +243,7 @@ PeerConnectionE2EQualityTestFixture::VideoSubscription::GetMaxResolution(
return GetMaxResolution(resolutions);
}
absl::optional<PeerConnectionE2EQualityTestFixture::VideoResolution>
PeerConnectionE2EQualityTestFixture::VideoSubscription::GetMaxResolution(
absl::optional<VideoResolution> VideoSubscription::GetMaxResolution(
rtc::ArrayView<const VideoResolution> resolutions) {
if (resolutions.empty()) {
return absl::nullopt;
@ -117,8 +264,46 @@ PeerConnectionE2EQualityTestFixture::VideoSubscription::GetMaxResolution(
return max_resolution;
}
std::string PeerConnectionE2EQualityTestFixture::VideoSubscription::ToString()
const {
bool VideoSubscription::operator==(const VideoSubscription& other) const {
return default_resolution_ == other.default_resolution_ &&
peers_resolution_ == other.peers_resolution_;
}
bool VideoSubscription::operator!=(const VideoSubscription& other) const {
return !(*this == other);
}
VideoSubscription& VideoSubscription::SubscribeToPeer(
absl::string_view peer_name,
VideoResolution resolution) {
peers_resolution_[std::string(peer_name)] = resolution;
return *this;
}
VideoSubscription& VideoSubscription::SubscribeToAllPeers(
VideoResolution resolution) {
default_resolution_ = resolution;
return *this;
}
absl::optional<VideoResolution> VideoSubscription::GetResolutionForPeer(
absl::string_view peer_name) const {
auto it = peers_resolution_.find(std::string(peer_name));
if (it == peers_resolution_.end()) {
return default_resolution_;
}
return it->second;
}
std::vector<std::string> VideoSubscription::GetSubscribedPeers() const {
std::vector<std::string> subscribed_streams;
subscribed_streams.reserve(peers_resolution_.size());
for (const auto& entry : peers_resolution_) {
subscribed_streams.push_back(entry.first);
}
return subscribed_streams;
}
std::string VideoSubscription::ToString() const {
rtc::StringBuilder out;
out << "{ default_resolution_=[";
if (default_resolution_.has_value()) {
@ -133,125 +318,5 @@ std::string PeerConnectionE2EQualityTestFixture::VideoSubscription::ToString()
out << "} }";
return out.Release();
}
PeerConnectionE2EQualityTestFixture::VideoDumpOptions::VideoDumpOptions(
absl::string_view output_directory,
int sampling_modulo,
bool export_frame_ids,
std::function<std::unique_ptr<test::VideoFrameWriter>(
absl::string_view file_name_prefix,
const VideoResolution& resolution)> video_frame_writer_factory)
: output_directory_(output_directory),
sampling_modulo_(sampling_modulo),
export_frame_ids_(export_frame_ids),
video_frame_writer_factory_(video_frame_writer_factory) {
RTC_CHECK_GT(sampling_modulo, 0);
}
PeerConnectionE2EQualityTestFixture::VideoDumpOptions::VideoDumpOptions(
absl::string_view output_directory,
bool export_frame_ids)
: VideoDumpOptions(output_directory,
kDefaultSamplingModulo,
export_frame_ids) {}
std::unique_ptr<test::VideoFrameWriter> PeerConnectionE2EQualityTestFixture::
VideoDumpOptions::CreateInputDumpVideoFrameWriter(
absl::string_view stream_label,
const VideoResolution& resolution) const {
std::unique_ptr<test::VideoFrameWriter> writer = video_frame_writer_factory_(
GetInputDumpFileName(stream_label, resolution), resolution);
absl::optional<std::string> frame_ids_file =
GetInputFrameIdsDumpFileName(stream_label, resolution);
if (frame_ids_file.has_value()) {
writer = CreateVideoFrameWithIdsWriter(std::move(writer), *frame_ids_file);
}
return writer;
}
std::unique_ptr<test::VideoFrameWriter> PeerConnectionE2EQualityTestFixture::
VideoDumpOptions::CreateOutputDumpVideoFrameWriter(
absl::string_view stream_label,
absl::string_view receiver,
const VideoResolution& resolution) const {
std::unique_ptr<test::VideoFrameWriter> writer = video_frame_writer_factory_(
GetOutputDumpFileName(stream_label, receiver, resolution), resolution);
absl::optional<std::string> frame_ids_file =
GetOutputFrameIdsDumpFileName(stream_label, receiver, resolution);
if (frame_ids_file.has_value()) {
writer = CreateVideoFrameWithIdsWriter(std::move(writer), *frame_ids_file);
}
return writer;
}
std::unique_ptr<test::VideoFrameWriter> PeerConnectionE2EQualityTestFixture::
VideoDumpOptions::Y4mVideoFrameWriterFactory(
absl::string_view file_name_prefix,
const VideoResolution& resolution) {
return std::make_unique<test::Y4mVideoFrameWriterImpl>(
std::string(file_name_prefix) + ".y4m", resolution.width(),
resolution.height(), resolution.fps());
}
std::string
PeerConnectionE2EQualityTestFixture::VideoDumpOptions::GetInputDumpFileName(
absl::string_view stream_label,
const VideoResolution& resolution) const {
rtc::StringBuilder file_name;
file_name << stream_label;
AppendResolution(resolution, file_name);
return test::JoinFilename(output_directory_, file_name.Release());
}
absl::optional<std::string> PeerConnectionE2EQualityTestFixture::
VideoDumpOptions::GetInputFrameIdsDumpFileName(
absl::string_view stream_label,
const VideoResolution& resolution) const {
if (!export_frame_ids_) {
return absl::nullopt;
}
return GetInputDumpFileName(stream_label, resolution) + ".frame_ids.txt";
}
std::string
PeerConnectionE2EQualityTestFixture::VideoDumpOptions::GetOutputDumpFileName(
absl::string_view stream_label,
absl::string_view receiver,
const VideoResolution& resolution) const {
rtc::StringBuilder file_name;
file_name << stream_label << "_" << receiver;
AppendResolution(resolution, file_name);
return test::JoinFilename(output_directory_, file_name.Release());
}
absl::optional<std::string> PeerConnectionE2EQualityTestFixture::
VideoDumpOptions::GetOutputFrameIdsDumpFileName(
absl::string_view stream_label,
absl::string_view receiver,
const VideoResolution& resolution) const {
if (!export_frame_ids_) {
return absl::nullopt;
}
return GetOutputDumpFileName(stream_label, receiver, resolution) +
".frame_ids.txt";
}
std::string PeerConnectionE2EQualityTestFixture::VideoDumpOptions::ToString()
const {
rtc::StringBuilder out;
out << "{ output_directory_=" << output_directory_
<< ", sampling_modulo_=" << sampling_modulo_
<< ", export_frame_ids_=" << export_frame_ids_ << " }";
return out.Release();
}
PeerConnectionE2EQualityTestFixture::VideoConfig::VideoConfig(
const VideoResolution& resolution)
: width(resolution.width()),
height(resolution.height()),
fps(resolution.fps()) {
RTC_CHECK(resolution.IsRegular());
}
} // namespace webrtc_pc_e2e
} // namespace webrtc

View file

@ -0,0 +1,485 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_TEST_PCLF_MEDIA_CONFIGURATION_H_
#define API_TEST_PCLF_MEDIA_CONFIGURATION_H_
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/async_resolver_factory.h"
#include "api/audio/audio_mixer.h"
#include "api/audio_options.h"
#include "api/call/call_factory_interface.h"
#include "api/fec_controller.h"
#include "api/function_view.h"
#include "api/media_stream_interface.h"
#include "api/peer_connection_interface.h"
#include "api/rtc_event_log/rtc_event_log_factory_interface.h"
#include "api/rtp_parameters.h"
#include "api/task_queue/task_queue_factory.h"
#include "api/test/audio_quality_analyzer_interface.h"
#include "api/test/frame_generator_interface.h"
#include "api/test/peer_network_dependencies.h"
#include "api/test/simulated_network.h"
#include "api/test/stats_observer_interface.h"
#include "api/test/track_id_stream_info_map.h"
#include "api/test/video/video_frame_writer.h"
#include "api/test/video_quality_analyzer_interface.h"
#include "api/transport/network_control.h"
#include "api/units/time_delta.h"
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "modules/audio_processing/include/audio_processing.h"
#include "rtc_base/checks.h"
#include "rtc_base/network.h"
#include "rtc_base/rtc_certificate_generator.h"
#include "rtc_base/ssl_certificate.h"
#include "rtc_base/thread.h"
namespace webrtc {
namespace webrtc_pc_e2e {
// The index of required capturing device in OS provided list of video
// devices. On Linux and Windows the list will be obtained via
// webrtc::VideoCaptureModule::DeviceInfo, on Mac OS via
// [RTCCameraVideoCapturer captureDevices].
enum class CapturingDeviceIndex : size_t {};
// Contains parameters for screen share scrolling.
//
// If scrolling is enabled, then it will be done by putting sliding window
// on source video and moving this window from top left corner to the
// bottom right corner of the picture.
//
// In such case source dimensions must be greater or equal to the sliding
// window dimensions. So `source_width` and `source_height` are the dimensions
// of the source frame, while `VideoConfig::width` and `VideoConfig::height`
// are the dimensions of the sliding window.
//
// Because `source_width` and `source_height` are dimensions of the source
// frame, they have to be width and height of videos from
// `ScreenShareConfig::slides_yuv_file_names`.
//
// Because scrolling have to be done on single slide it also requires, that
// `duration` must be less or equal to
// `ScreenShareConfig::slide_change_interval`.
struct ScrollingParams {
ScrollingParams(TimeDelta duration,
size_t source_width,
size_t source_height);
// Duration of scrolling.
TimeDelta duration;
// Width of source slides video.
size_t source_width;
// Height of source slides video.
size_t source_height;
};
// Contains screen share video stream properties.
struct ScreenShareConfig {
explicit ScreenShareConfig(TimeDelta slide_change_interval);
// Shows how long one slide should be presented on the screen during
// slide generation.
TimeDelta slide_change_interval;
// If true, slides will be generated programmatically. No scrolling params
// will be applied in such case.
bool generate_slides = false;
// If present scrolling will be applied. Please read extra requirement on
// `slides_yuv_file_names` for scrolling.
absl::optional<ScrollingParams> scrolling_params;
// Contains list of yuv files with slides.
//
// If empty, default set of slides will be used. In such case
// `VideoConfig::width` must be equal to `kDefaultSlidesWidth` and
// `VideoConfig::height` must be equal to `kDefaultSlidesHeight` or if
// `scrolling_params` are specified, then `ScrollingParams::source_width`
// must be equal to `kDefaultSlidesWidth` and
// `ScrollingParams::source_height` must be equal to `kDefaultSlidesHeight`.
std::vector<std::string> slides_yuv_file_names;
};
// Config for Vp8 simulcast or non-standard Vp9 SVC testing.
//
// To configure standard SVC setting, use `scalability_mode` in the
// `encoding_params` array.
// This configures Vp9 SVC by requesting simulcast layers, the request is
// internally converted to a request for SVC layers.
//
// SVC support is limited:
// During SVC testing there is no SFU, so framework will try to emulate SFU
// behavior in regular p2p call. Because of it there are such limitations:
// * if `target_spatial_index` is not equal to the highest spatial layer
// then no packet/frame drops are allowed.
//
// If there will be any drops, that will affect requested layer, then
// WebRTC SVC implementation will continue decoding only the highest
// available layer and won't restore lower layers, so analyzer won't
// receive required data which will cause wrong results or test failures.
struct VideoSimulcastConfig {
explicit VideoSimulcastConfig(int simulcast_streams_count);
// Specified amount of simulcast streams/SVC layers, depending on which
// encoder is used.
int simulcast_streams_count;
};
// Configuration for the emulated Selective Forward Unit (SFU)
//
// The framework can optionally filter out frames that are decoded
// using an emulated SFU.
// When using simulcast or SVC, it's not always desirable to receive
// all frames. In a real world call, a SFU will only forward a subset
// of the frames.
// The emulated SFU is not able to change its configuration dynamically,
// if adaptation happens during the call, layers may be dropped and the
// analyzer won't receive the required data which will cause wrong results or
// test failures.
struct EmulatedSFUConfig {
EmulatedSFUConfig() = default;
explicit EmulatedSFUConfig(int target_layer_index);
EmulatedSFUConfig(absl::optional<int> target_layer_index,
absl::optional<int> target_temporal_index);
// Specifies simulcast or spatial index of the video stream to analyze.
// There are 2 cases:
// 1. simulcast encoding is used:
// in such case `target_layer_index` will specify the index of
// simulcast stream, that should be analyzed. Other streams will be
// dropped.
// 2. SVC encoding is used:
// in such case `target_layer_index` will specify the top interesting
// spatial layer and all layers below, including target one will be
// processed. All layers above target one will be dropped.
// If not specified then all streams will be received and analyzed.
// When set, it instructs the framework to create an emulated Selective
// Forwarding Unit (SFU) that will propagate only the requested layers.
absl::optional<int> target_layer_index;
// Specifies the index of the maximum temporal unit to keep.
// If not specified then all temporal layers will be received and analyzed.
// When set, it instructs the framework to create an emulated Selective
// Forwarding Unit (SFU) that will propagate only up to the requested layer.
absl::optional<int> target_temporal_index;
};
class VideoResolution {
public:
// Determines special resolutions, which can't be expressed in terms of
// width, height and fps.
enum class Spec {
// No extra spec set. It describes a regular resolution described by
// width, height and fps.
kNone,
// Describes resolution which contains max value among all sender's
// video streams in each dimension (width, height, fps).
kMaxFromSender
};
VideoResolution(size_t width, size_t height, int32_t fps);
explicit VideoResolution(Spec spec = Spec::kNone);
bool operator==(const VideoResolution& other) const;
bool operator!=(const VideoResolution& other) const;
size_t width() const { return width_; }
void set_width(size_t width) { width_ = width; }
size_t height() const { return height_; }
void set_height(size_t height) { height_ = height; }
int32_t fps() const { return fps_; }
void set_fps(int32_t fps) { fps_ = fps; }
// Returns if it is a regular resolution or not. The resolution is regular
// if it's spec is `Spec::kNone`.
bool IsRegular() const;
std::string ToString() const;
private:
size_t width_ = 0;
size_t height_ = 0;
int32_t fps_ = 0;
Spec spec_ = Spec::kNone;
};
class VideoDumpOptions {
public:
static constexpr int kDefaultSamplingModulo = 1;
// output_directory - the output directory where stream will be dumped. The
// output files' names will be constructed as
// <stream_name>_<receiver_name>_<resolution>.<extension> for output dumps
// and <stream_name>_<resolution>.<extension> for input dumps.
// By default <extension> is "y4m". Resolution is in the format
// <width>x<height>_<fps>.
// sampling_modulo - the module for the video frames to be dumped. Modulo
// equals X means every Xth frame will be written to the dump file. The
// value must be greater than 0. (Default: 1)
// export_frame_ids - specifies if frame ids should be exported together
// with content of the stream. If true, an output file with the same name as
// video dump and suffix ".frame_ids.txt" will be created. It will contain
// the frame ids in the same order as original frames in the output
// file with stream content. File will contain one frame id per line.
// (Default: false)
// `video_frame_writer_factory` - factory function to create a video frame
// writer for input and output video files. (Default: Y4M video writer
// factory).
explicit VideoDumpOptions(
absl::string_view output_directory,
int sampling_modulo = kDefaultSamplingModulo,
bool export_frame_ids = false,
std::function<std::unique_ptr<test::VideoFrameWriter>(
absl::string_view file_name_prefix,
const VideoResolution& resolution)> video_frame_writer_factory =
Y4mVideoFrameWriterFactory);
VideoDumpOptions(absl::string_view output_directory, bool export_frame_ids);
VideoDumpOptions(const VideoDumpOptions&) = default;
VideoDumpOptions& operator=(const VideoDumpOptions&) = default;
VideoDumpOptions(VideoDumpOptions&&) = default;
VideoDumpOptions& operator=(VideoDumpOptions&&) = default;
std::string output_directory() const { return output_directory_; }
int sampling_modulo() const { return sampling_modulo_; }
bool export_frame_ids() const { return export_frame_ids_; }
std::unique_ptr<test::VideoFrameWriter> CreateInputDumpVideoFrameWriter(
absl::string_view stream_label,
const VideoResolution& resolution) const;
std::unique_ptr<test::VideoFrameWriter> CreateOutputDumpVideoFrameWriter(
absl::string_view stream_label,
absl::string_view receiver,
const VideoResolution& resolution) const;
std::string ToString() const;
private:
static std::unique_ptr<test::VideoFrameWriter> Y4mVideoFrameWriterFactory(
absl::string_view file_name_prefix,
const VideoResolution& resolution);
std::string GetInputDumpFileName(absl::string_view stream_label,
const VideoResolution& resolution) const;
// Returns file name for input frame ids dump if `export_frame_ids()` is
// true, absl::nullopt otherwise.
absl::optional<std::string> GetInputFrameIdsDumpFileName(
absl::string_view stream_label,
const VideoResolution& resolution) const;
std::string GetOutputDumpFileName(absl::string_view stream_label,
absl::string_view receiver,
const VideoResolution& resolution) const;
// Returns file name for output frame ids dump if `export_frame_ids()` is
// true, absl::nullopt otherwise.
absl::optional<std::string> GetOutputFrameIdsDumpFileName(
absl::string_view stream_label,
absl::string_view receiver,
const VideoResolution& resolution) const;
std::string output_directory_;
int sampling_modulo_ = 1;
bool export_frame_ids_ = false;
std::function<std::unique_ptr<test::VideoFrameWriter>(
absl::string_view file_name_prefix,
const VideoResolution& resolution)>
video_frame_writer_factory_;
};
// Contains properties of single video stream.
struct VideoConfig {
explicit VideoConfig(const VideoResolution& resolution);
VideoConfig(size_t width, size_t height, int32_t fps);
VideoConfig(std::string stream_label,
size_t width,
size_t height,
int32_t fps);
// Video stream width.
size_t width;
// Video stream height.
size_t height;
int32_t fps;
VideoResolution GetResolution() const {
return VideoResolution(width, height, fps);
}
// Have to be unique among all specified configs for all peers in the call.
// Will be auto generated if omitted.
absl::optional<std::string> stream_label;
// Will be set for current video track. If equals to kText or kDetailed -
// screencast in on.
absl::optional<VideoTrackInterface::ContentHint> content_hint;
// If presented video will be transfered in simulcast/SVC mode depending on
// which encoder is used.
//
// Simulcast is supported only from 1st added peer. For VP8 simulcast only
// without RTX is supported so it will be automatically disabled for all
// simulcast tracks. For VP9 simulcast enables VP9 SVC mode and support RTX,
// but only on non-lossy networks. See more in documentation to
// VideoSimulcastConfig.
absl::optional<VideoSimulcastConfig> simulcast_config;
// Configuration for the emulated Selective Forward Unit (SFU).
absl::optional<EmulatedSFUConfig> emulated_sfu_config;
// Encoding parameters for both singlecast and per simulcast layer.
// If singlecast is used, if not empty, a single value can be provided.
// If simulcast is used, if not empty, `encoding_params` size have to be
// equal to `simulcast_config.simulcast_streams_count`. Will be used to set
// transceiver send encoding params for each layer.
// RtpEncodingParameters::rid may be changed by fixture implementation to
// ensure signaling correctness.
std::vector<RtpEncodingParameters> encoding_params;
// Count of temporal layers for video stream. This value will be set into
// each RtpEncodingParameters of RtpParameters of corresponding
// RtpSenderInterface for this video stream.
absl::optional<int> temporal_layers_count;
// If specified defines how input should be dumped. It is actually one of
// the test's output file, which contains copy of what was captured during
// the test for this video stream on sender side. It is useful when
// generator is used as input.
absl::optional<VideoDumpOptions> input_dump_options;
// If specified defines how output should be dumped on the receiver side for
// this stream. The produced files contain what was rendered for this video
// stream on receiver side per each receiver.
absl::optional<VideoDumpOptions> output_dump_options;
// If set to true uses fixed frame rate while dumping output video to the
// file. Requested `VideoSubscription::fps()` will be used as frame rate.
bool output_dump_use_fixed_framerate = false;
// If true will display input and output video on the user's screen.
bool show_on_screen = false;
// If specified, determines a sync group to which this video stream belongs.
// According to bugs.webrtc.org/4762 WebRTC supports synchronization only
// for pair of single audio and single video stream.
absl::optional<std::string> sync_group;
// If specified, it will be set into RtpParameters of corresponding
// RtpSenderInterface for this video stream.
// Note that this setting takes precedence over `content_hint`.
absl::optional<DegradationPreference> degradation_preference;
};
// Contains properties for audio in the call.
struct AudioConfig {
enum Mode {
kGenerated,
kFile,
};
AudioConfig() = default;
explicit AudioConfig(std::string stream_label);
// Have to be unique among all specified configs for all peers in the call.
// Will be auto generated if omitted.
absl::optional<std::string> stream_label;
Mode mode = kGenerated;
// Have to be specified only if mode = kFile
absl::optional<std::string> input_file_name;
// If specified the input stream will be also copied to specified file.
absl::optional<std::string> input_dump_file_name;
// If specified the output stream will be copied to specified file.
absl::optional<std::string> output_dump_file_name;
// Audio options to use.
cricket::AudioOptions audio_options;
// Sampling frequency of input audio data (from file or generated).
int sampling_frequency_in_hz = 48000;
// If specified, determines a sync group to which this audio stream belongs.
// According to bugs.webrtc.org/4762 WebRTC supports synchronization only
// for pair of single audio and single video stream.
absl::optional<std::string> sync_group;
};
struct VideoCodecConfig {
explicit VideoCodecConfig(std::string name);
VideoCodecConfig(std::string name,
std::map<std::string, std::string> required_params);
// Next two fields are used to specify concrete video codec, that should be
// used in the test. Video code will be negotiated in SDP during offer/
// answer exchange.
// Video codec name. You can find valid names in
// media/base/media_constants.h
std::string name;
// Map of parameters, that have to be specified on SDP codec. Each parameter
// is described by key and value. Codec parameters will match the specified
// map if and only if for each key from `required_params` there will be
// a parameter with name equal to this key and parameter value will be equal
// to the value from `required_params` for this key.
// If empty then only name will be used to match the codec.
std::map<std::string, std::string> required_params;
};
// Subscription to the remote video streams. It declares which remote stream
// peer should receive and in which resolution (width x height x fps).
class VideoSubscription {
public:
// Returns the resolution constructed as maximum from all resolution
// dimensions: width, height and fps.
static absl::optional<VideoResolution> GetMaxResolution(
rtc::ArrayView<const VideoConfig> video_configs);
static absl::optional<VideoResolution> GetMaxResolution(
rtc::ArrayView<const VideoResolution> resolutions);
bool operator==(const VideoSubscription& other) const;
bool operator!=(const VideoSubscription& other) const;
// Subscribes receiver to all streams sent by the specified peer with
// specified resolution. It will override any resolution that was used in
// `SubscribeToAll` independently from methods call order.
VideoSubscription& SubscribeToPeer(
absl::string_view peer_name,
VideoResolution resolution =
VideoResolution(VideoResolution::Spec::kMaxFromSender));
// Subscribes receiver to the all sent streams with specified resolution.
// If any stream was subscribed to with `SubscribeTo` method that will
// override resolution passed to this function independently from methods
// call order.
VideoSubscription& SubscribeToAllPeers(
VideoResolution resolution =
VideoResolution(VideoResolution::Spec::kMaxFromSender));
// Returns resolution for specific sender. If no specific resolution was
// set for this sender, then will return resolution used for all streams.
// If subscription doesn't subscribe to all streams, `absl::nullopt` will be
// returned.
absl::optional<VideoResolution> GetResolutionForPeer(
absl::string_view peer_name) const;
// Returns a maybe empty list of senders for which peer explicitly
// subscribed to with specific resolution.
std::vector<std::string> GetSubscribedPeers() const;
std::string ToString() const;
private:
absl::optional<VideoResolution> default_resolution_ = absl::nullopt;
std::map<std::string, VideoResolution> peers_resolution_;
};
// Contains configuration for echo emulator.
struct EchoEmulationConfig {
// Delay which represents the echo path delay, i.e. how soon rendered signal
// should reach capturer.
TimeDelta echo_delay = TimeDelta::Millis(50);
};
} // namespace webrtc_pc_e2e
} // namespace webrtc
#endif // API_TEST_PCLF_MEDIA_CONFIGURATION_H_

View file

@ -22,7 +22,7 @@
#include "api/field_trials_view.h"
#include "api/rtc_event_log/rtc_event_log_factory_interface.h"
#include "api/task_queue/task_queue_factory.h"
#include "api/test/peerconnection_quality_test_fixture.h"
#include "api/test/pclf/media_configuration.h"
#include "api/transport/network_control.h"
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder_factory.h"
@ -118,7 +118,7 @@ struct Params {
// Peer name. If empty - default one will be set by the fixture.
absl::optional<std::string> name;
// If `audio_config` is set audio stream will be configured
absl::optional<PeerConnectionE2EQualityTestFixture::AudioConfig> audio_config;
absl::optional<AudioConfig> audio_config;
// Flags to set on `cricket::PortAllocator`. These flags will be added
// to the default ones that are presented on the port allocator.
uint32_t port_allocator_extra_flags = cricket::kDefaultPortAllocatorFlags;
@ -142,18 +142,38 @@ struct Params {
PeerConnectionInterface::RTCConfiguration rtc_configuration;
PeerConnectionInterface::RTCOfferAnswerOptions rtc_offer_answer_options;
BitrateSettings bitrate_settings;
std::vector<PeerConnectionE2EQualityTestFixture::VideoCodecConfig>
video_codecs;
std::vector<VideoCodecConfig> video_codecs;
};
// Contains parameters that maybe changed by test writer during the test call.
struct ConfigurableParams {
// If `video_configs` is empty - no video should be added to the test call.
std::vector<PeerConnectionE2EQualityTestFixture::VideoConfig> video_configs;
std::vector<VideoConfig> video_configs;
PeerConnectionE2EQualityTestFixture::VideoSubscription video_subscription =
PeerConnectionE2EQualityTestFixture::VideoSubscription()
.SubscribeToAllPeers();
VideoSubscription video_subscription =
VideoSubscription().SubscribeToAllPeers();
};
// Contains parameters, that describe how long framework should run quality
// test.
struct RunParams {
explicit RunParams(TimeDelta run_duration) : run_duration(run_duration) {}
// Specifies how long the test should be run. This time shows how long
// the media should flow after connection was established and before
// it will be shut downed.
TimeDelta run_duration;
// If set to true peers will be able to use Flex FEC, otherwise they won't
// be able to negotiate it even if it's enabled on per peer level.
bool enable_flex_fec_support = false;
// If true will set conference mode in SDP media section for all video
// tracks for all peers.
bool use_conference_mode = false;
// If specified echo emulation will be done, by mixing the render audio into
// the capture signal. In such case input signal will be reduced by half to
// avoid saturation or compression in the echo path simulation.
absl::optional<EchoEmulationConfig> echo_emulation_config;
};
} // namespace webrtc_pc_e2e

View file

@ -10,6 +10,10 @@
#ifndef API_TEST_PEERCONNECTION_QUALITY_TEST_FIXTURE_H_
#define API_TEST_PEERCONNECTION_QUALITY_TEST_FIXTURE_H_
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <map>
#include <memory>
#include <string>
@ -32,6 +36,8 @@
#include "api/task_queue/task_queue_factory.h"
#include "api/test/audio_quality_analyzer_interface.h"
#include "api/test/frame_generator_interface.h"
#include "api/test/pclf/media_configuration.h"
#include "api/test/pclf/media_quality_test_params.h"
#include "api/test/peer_network_dependencies.h"
#include "api/test/simulated_network.h"
#include "api/test/stats_observer_interface.h"
@ -45,6 +51,7 @@
#include "api/video_codecs/video_encoder_factory.h"
#include "media/base/media_constants.h"
#include "modules/audio_processing/include/audio_processing.h"
#include "rtc_base/checks.h"
#include "rtc_base/network.h"
#include "rtc_base/rtc_certificate_generator.h"
#include "rtc_base/ssl_certificate.h"
@ -59,471 +66,19 @@ constexpr size_t kDefaultSlidesHeight = 1110;
// API is in development. Can be changed/removed without notice.
class PeerConnectionE2EQualityTestFixture {
public:
// The index of required capturing device in OS provided list of video
// devices. On Linux and Windows the list will be obtained via
// webrtc::VideoCaptureModule::DeviceInfo, on Mac OS via
// [RTCCameraVideoCapturer captureDevices].
enum class CapturingDeviceIndex : size_t {};
// Contains parameters for screen share scrolling.
//
// If scrolling is enabled, then it will be done by putting sliding window
// on source video and moving this window from top left corner to the
// bottom right corner of the picture.
//
// In such case source dimensions must be greater or equal to the sliding
// window dimensions. So `source_width` and `source_height` are the dimensions
// of the source frame, while `VideoConfig::width` and `VideoConfig::height`
// are the dimensions of the sliding window.
//
// Because `source_width` and `source_height` are dimensions of the source
// frame, they have to be width and height of videos from
// `ScreenShareConfig::slides_yuv_file_names`.
//
// Because scrolling have to be done on single slide it also requires, that
// `duration` must be less or equal to
// `ScreenShareConfig::slide_change_interval`.
struct ScrollingParams {
ScrollingParams(TimeDelta duration,
size_t source_width,
size_t source_height)
: duration(duration),
source_width(source_width),
source_height(source_height) {
RTC_CHECK_GT(duration.ms(), 0);
}
// Duration of scrolling.
TimeDelta duration;
// Width of source slides video.
size_t source_width;
// Height of source slides video.
size_t source_height;
};
// Contains screen share video stream properties.
struct ScreenShareConfig {
explicit ScreenShareConfig(TimeDelta slide_change_interval)
: slide_change_interval(slide_change_interval) {
RTC_CHECK_GT(slide_change_interval.ms(), 0);
}
// Shows how long one slide should be presented on the screen during
// slide generation.
TimeDelta slide_change_interval;
// If true, slides will be generated programmatically. No scrolling params
// will be applied in such case.
bool generate_slides = false;
// If present scrolling will be applied. Please read extra requirement on
// `slides_yuv_file_names` for scrolling.
absl::optional<ScrollingParams> scrolling_params;
// Contains list of yuv files with slides.
//
// If empty, default set of slides will be used. In such case
// `VideoConfig::width` must be equal to `kDefaultSlidesWidth` and
// `VideoConfig::height` must be equal to `kDefaultSlidesHeight` or if
// `scrolling_params` are specified, then `ScrollingParams::source_width`
// must be equal to `kDefaultSlidesWidth` and
// `ScrollingParams::source_height` must be equal to `kDefaultSlidesHeight`.
std::vector<std::string> slides_yuv_file_names;
};
// Config for Vp8 simulcast or non-standard Vp9 SVC testing.
//
// To configure standard SVC setting, use `scalability_mode` in the
// `encoding_params` array.
// This configures Vp9 SVC by requesting simulcast layers, the request is
// internally converted to a request for SVC layers.
//
// SVC support is limited:
// During SVC testing there is no SFU, so framework will try to emulate SFU
// behavior in regular p2p call. Because of it there are such limitations:
// * if `target_spatial_index` is not equal to the highest spatial layer
// then no packet/frame drops are allowed.
//
// If there will be any drops, that will affect requested layer, then
// WebRTC SVC implementation will continue decoding only the highest
// available layer and won't restore lower layers, so analyzer won't
// receive required data which will cause wrong results or test failures.
struct VideoSimulcastConfig {
explicit VideoSimulcastConfig(int simulcast_streams_count)
: simulcast_streams_count(simulcast_streams_count) {
RTC_CHECK_GT(simulcast_streams_count, 1);
}
// Specified amount of simulcast streams/SVC layers, depending on which
// encoder is used.
int simulcast_streams_count;
};
// Configuration for the emulated Selective Forward Unit (SFU)
//
// The framework can optionally filter out frames that are decoded
// using an emulated SFU.
// When using simulcast or SVC, it's not always desirable to receive
// all frames. In a real world call, a SFU will only forward a subset
// of the frames.
// The emulated SFU is not able to change its configuration dynamically,
// if adaptation happens during the call, layers may be dropped and the
// analyzer won't receive the required data which will cause wrong results or
// test failures.
struct EmulatedSFUConfig {
EmulatedSFUConfig() {}
explicit EmulatedSFUConfig(int target_layer_index)
: target_layer_index(target_layer_index) {
RTC_CHECK_GE(target_layer_index, 0);
}
EmulatedSFUConfig(absl::optional<int> target_layer_index,
absl::optional<int> target_temporal_index)
: target_layer_index(target_layer_index),
target_temporal_index(target_temporal_index) {
RTC_CHECK_GE(target_temporal_index.value_or(0), 0);
if (target_temporal_index)
RTC_CHECK_GE(*target_temporal_index, 0);
}
// Specifies simulcast or spatial index of the video stream to analyze.
// There are 2 cases:
// 1. simulcast encoding is used:
// in such case `target_layer_index` will specify the index of
// simulcast stream, that should be analyzed. Other streams will be
// dropped.
// 2. SVC encoding is used:
// in such case `target_layer_index` will specify the top interesting
// spatial layer and all layers below, including target one will be
// processed. All layers above target one will be dropped.
// If not specified then all streams will be received and analyzed.
// When set, it instructs the framework to create an emulated Selective
// Forwarding Unit (SFU) that will propagate only the requested layers.
absl::optional<int> target_layer_index;
// Specifies the index of the maximum temporal unit to keep.
// If not specified then all temporal layers will be received and analyzed.
// When set, it instructs the framework to create an emulated Selective
// Forwarding Unit (SFU) that will propagate only up to the requested layer.
absl::optional<int> target_temporal_index;
};
class VideoResolution {
public:
// Determines special resolutions, which can't be expressed in terms of
// width, height and fps.
enum class Spec {
// No extra spec set. It describes a regular resolution described by
// width, height and fps.
kNone,
// Describes resolution which contains max value among all sender's
// video streams in each dimension (width, height, fps).
kMaxFromSender
};
VideoResolution(size_t width, size_t height, int32_t fps);
explicit VideoResolution(Spec spec = Spec::kNone);
bool operator==(const VideoResolution& other) const;
bool operator!=(const VideoResolution& other) const {
return !(*this == other);
}
size_t width() const { return width_; }
void set_width(size_t width) { width_ = width; }
size_t height() const { return height_; }
void set_height(size_t height) { height_ = height; }
int32_t fps() const { return fps_; }
void set_fps(int32_t fps) { fps_ = fps; }
// Returns if it is a regular resolution or not. The resolution is regular
// if it's spec is `Spec::kNone`.
bool IsRegular() const { return spec_ == Spec::kNone; }
std::string ToString() const;
private:
size_t width_ = 0;
size_t height_ = 0;
int32_t fps_ = 0;
Spec spec_ = Spec::kNone;
};
class VideoDumpOptions {
public:
static constexpr int kDefaultSamplingModulo = 1;
// output_directory - the output directory where stream will be dumped. The
// output files' names will be constructed as
// <stream_name>_<receiver_name>_<resolution>.<extension> for output dumps
// and <stream_name>_<resolution>.<extension> for input dumps.
// By default <extension> is "y4m". Resolution is in the format
// <width>x<height>_<fps>.
// sampling_modulo - the module for the video frames to be dumped. Modulo
// equals X means every Xth frame will be written to the dump file. The
// value must be greater than 0. (Default: 1)
// export_frame_ids - specifies if frame ids should be exported together
// with content of the stream. If true, an output file with the same name as
// video dump and suffix ".frame_ids.txt" will be created. It will contain
// the frame ids in the same order as original frames in the output
// file with stream content. File will contain one frame id per line.
// (Default: false)
// `video_frame_writer_factory` - factory function to create a video frame
// writer for input and output video files. (Default: Y4M video writer
// factory).
explicit VideoDumpOptions(
absl::string_view output_directory,
int sampling_modulo = kDefaultSamplingModulo,
bool export_frame_ids = false,
std::function<std::unique_ptr<test::VideoFrameWriter>(
absl::string_view file_name_prefix,
const VideoResolution& resolution)> video_frame_writer_factory =
Y4mVideoFrameWriterFactory);
VideoDumpOptions(absl::string_view output_directory, bool export_frame_ids);
VideoDumpOptions(const VideoDumpOptions&) = default;
VideoDumpOptions& operator=(const VideoDumpOptions&) = default;
VideoDumpOptions(VideoDumpOptions&&) = default;
VideoDumpOptions& operator=(VideoDumpOptions&&) = default;
std::string output_directory() const { return output_directory_; }
int sampling_modulo() const { return sampling_modulo_; }
bool export_frame_ids() const { return export_frame_ids_; }
std::unique_ptr<test::VideoFrameWriter> CreateInputDumpVideoFrameWriter(
absl::string_view stream_label,
const VideoResolution& resolution) const;
std::unique_ptr<test::VideoFrameWriter> CreateOutputDumpVideoFrameWriter(
absl::string_view stream_label,
absl::string_view receiver,
const VideoResolution& resolution) const;
std::string ToString() const;
private:
static std::unique_ptr<test::VideoFrameWriter> Y4mVideoFrameWriterFactory(
absl::string_view file_name_prefix,
const VideoResolution& resolution);
std::string GetInputDumpFileName(absl::string_view stream_label,
const VideoResolution& resolution) const;
// Returns file name for input frame ids dump if `export_frame_ids()` is
// true, absl::nullopt otherwise.
absl::optional<std::string> GetInputFrameIdsDumpFileName(
absl::string_view stream_label,
const VideoResolution& resolution) const;
std::string GetOutputDumpFileName(absl::string_view stream_label,
absl::string_view receiver,
const VideoResolution& resolution) const;
// Returns file name for output frame ids dump if `export_frame_ids()` is
// true, absl::nullopt otherwise.
absl::optional<std::string> GetOutputFrameIdsDumpFileName(
absl::string_view stream_label,
absl::string_view receiver,
const VideoResolution& resolution) const;
std::string output_directory_;
int sampling_modulo_ = 1;
bool export_frame_ids_ = false;
std::function<std::unique_ptr<test::VideoFrameWriter>(
absl::string_view file_name_prefix,
const VideoResolution& resolution)>
video_frame_writer_factory_;
};
// Contains properties of single video stream.
struct VideoConfig {
explicit VideoConfig(const VideoResolution& resolution);
VideoConfig(size_t width, size_t height, int32_t fps)
: width(width), height(height), fps(fps) {}
VideoConfig(std::string stream_label,
size_t width,
size_t height,
int32_t fps)
: width(width),
height(height),
fps(fps),
stream_label(std::move(stream_label)) {}
// Video stream width.
size_t width;
// Video stream height.
size_t height;
int32_t fps;
VideoResolution GetResolution() const {
return VideoResolution(width, height, fps);
}
// Have to be unique among all specified configs for all peers in the call.
// Will be auto generated if omitted.
absl::optional<std::string> stream_label;
// Will be set for current video track. If equals to kText or kDetailed -
// screencast in on.
absl::optional<VideoTrackInterface::ContentHint> content_hint;
// If presented video will be transfered in simulcast/SVC mode depending on
// which encoder is used.
//
// Simulcast is supported only from 1st added peer. For VP8 simulcast only
// without RTX is supported so it will be automatically disabled for all
// simulcast tracks. For VP9 simulcast enables VP9 SVC mode and support RTX,
// but only on non-lossy networks. See more in documentation to
// VideoSimulcastConfig.
absl::optional<VideoSimulcastConfig> simulcast_config;
// Configuration for the emulated Selective Forward Unit (SFU).
absl::optional<EmulatedSFUConfig> emulated_sfu_config;
// Encoding parameters for both singlecast and per simulcast layer.
// If singlecast is used, if not empty, a single value can be provided.
// If simulcast is used, if not empty, `encoding_params` size have to be
// equal to `simulcast_config.simulcast_streams_count`. Will be used to set
// transceiver send encoding params for each layer.
// RtpEncodingParameters::rid may be changed by fixture implementation to
// ensure signaling correctness.
std::vector<RtpEncodingParameters> encoding_params;
// Count of temporal layers for video stream. This value will be set into
// each RtpEncodingParameters of RtpParameters of corresponding
// RtpSenderInterface for this video stream.
absl::optional<int> temporal_layers_count;
// If specified defines how input should be dumped. It is actually one of
// the test's output file, which contains copy of what was captured during
// the test for this video stream on sender side. It is useful when
// generator is used as input.
absl::optional<VideoDumpOptions> input_dump_options;
// If specified defines how output should be dumped on the receiver side for
// this stream. The produced files contain what was rendered for this video
// stream on receiver side per each receiver.
absl::optional<VideoDumpOptions> output_dump_options;
// If set to true uses fixed frame rate while dumping output video to the
// file. Requested `VideoSubscription::fps()` will be used as frame rate.
bool output_dump_use_fixed_framerate = false;
// If true will display input and output video on the user's screen.
bool show_on_screen = false;
// If specified, determines a sync group to which this video stream belongs.
// According to bugs.webrtc.org/4762 WebRTC supports synchronization only
// for pair of single audio and single video stream.
absl::optional<std::string> sync_group;
// If specified, it will be set into RtpParameters of corresponding
// RtpSenderInterface for this video stream.
// Note that this setting takes precedence over `content_hint`.
absl::optional<DegradationPreference> degradation_preference;
};
// Contains properties for audio in the call.
struct AudioConfig {
enum Mode {
kGenerated,
kFile,
};
AudioConfig() = default;
explicit AudioConfig(std::string stream_label)
: stream_label(std::move(stream_label)) {}
// Have to be unique among all specified configs for all peers in the call.
// Will be auto generated if omitted.
absl::optional<std::string> stream_label;
Mode mode = kGenerated;
// Have to be specified only if mode = kFile
absl::optional<std::string> input_file_name;
// If specified the input stream will be also copied to specified file.
absl::optional<std::string> input_dump_file_name;
// If specified the output stream will be copied to specified file.
absl::optional<std::string> output_dump_file_name;
// Audio options to use.
cricket::AudioOptions audio_options;
// Sampling frequency of input audio data (from file or generated).
int sampling_frequency_in_hz = 48000;
// If specified, determines a sync group to which this audio stream belongs.
// According to bugs.webrtc.org/4762 WebRTC supports synchronization only
// for pair of single audio and single video stream.
absl::optional<std::string> sync_group;
};
struct VideoCodecConfig {
explicit VideoCodecConfig(std::string name)
: name(std::move(name)), required_params() {}
VideoCodecConfig(std::string name,
std::map<std::string, std::string> required_params)
: name(std::move(name)), required_params(std::move(required_params)) {}
// Next two fields are used to specify concrete video codec, that should be
// used in the test. Video code will be negotiated in SDP during offer/
// answer exchange.
// Video codec name. You can find valid names in
// media/base/media_constants.h
std::string name = cricket::kVp8CodecName;
// Map of parameters, that have to be specified on SDP codec. Each parameter
// is described by key and value. Codec parameters will match the specified
// map if and only if for each key from `required_params` there will be
// a parameter with name equal to this key and parameter value will be equal
// to the value from `required_params` for this key.
// If empty then only name will be used to match the codec.
std::map<std::string, std::string> required_params;
};
// Subscription to the remote video streams. It declares which remote stream
// peer should receive and in which resolution (width x height x fps).
class VideoSubscription {
public:
// Returns the resolution constructed as maximum from all resolution
// dimensions: width, height and fps.
static absl::optional<VideoResolution> GetMaxResolution(
rtc::ArrayView<const VideoConfig> video_configs);
static absl::optional<VideoResolution> GetMaxResolution(
rtc::ArrayView<const VideoResolution> resolutions);
bool operator==(const VideoSubscription& other) const;
bool operator!=(const VideoSubscription& other) const {
return !(*this == other);
}
// Subscribes receiver to all streams sent by the specified peer with
// specified resolution. It will override any resolution that was used in
// `SubscribeToAll` independently from methods call order.
VideoSubscription& SubscribeToPeer(
absl::string_view peer_name,
VideoResolution resolution =
VideoResolution(VideoResolution::Spec::kMaxFromSender)) {
peers_resolution_[std::string(peer_name)] = resolution;
return *this;
}
// Subscribes receiver to the all sent streams with specified resolution.
// If any stream was subscribed to with `SubscribeTo` method that will
// override resolution passed to this function independently from methods
// call order.
VideoSubscription& SubscribeToAllPeers(
VideoResolution resolution =
VideoResolution(VideoResolution::Spec::kMaxFromSender)) {
default_resolution_ = resolution;
return *this;
}
// Returns resolution for specific sender. If no specific resolution was
// set for this sender, then will return resolution used for all streams.
// If subscription doesn't subscribe to all streams, `absl::nullopt` will be
// returned.
absl::optional<VideoResolution> GetResolutionForPeer(
absl::string_view peer_name) const {
auto it = peers_resolution_.find(std::string(peer_name));
if (it == peers_resolution_.end()) {
return default_resolution_;
}
return it->second;
}
// Returns a maybe empty list of senders for which peer explicitly
// subscribed to with specific resolution.
std::vector<std::string> GetSubscribedPeers() const {
std::vector<std::string> subscribed_streams;
subscribed_streams.reserve(peers_resolution_.size());
for (const auto& entry : peers_resolution_) {
subscribed_streams.push_back(entry.first);
}
return subscribed_streams;
}
std::string ToString() const;
private:
absl::optional<VideoResolution> default_resolution_ = absl::nullopt;
std::map<std::string, VideoResolution> peers_resolution_;
};
using CapturingDeviceIndex = ::webrtc::webrtc_pc_e2e::CapturingDeviceIndex;
using ScrollingParams = ::webrtc::webrtc_pc_e2e::ScrollingParams;
using ScreenShareConfig = ::webrtc::webrtc_pc_e2e::ScreenShareConfig;
using VideoSimulcastConfig = ::webrtc::webrtc_pc_e2e::VideoSimulcastConfig;
using EmulatedSFUConfig = ::webrtc::webrtc_pc_e2e::EmulatedSFUConfig;
using VideoResolution = ::webrtc::webrtc_pc_e2e::VideoResolution;
using VideoDumpOptions = ::webrtc::webrtc_pc_e2e::VideoDumpOptions;
using VideoConfig = ::webrtc::webrtc_pc_e2e::VideoConfig;
using AudioConfig = ::webrtc::webrtc_pc_e2e::AudioConfig;
using VideoCodecConfig = ::webrtc::webrtc_pc_e2e::VideoCodecConfig;
using VideoSubscription = ::webrtc::webrtc_pc_e2e::VideoSubscription;
using EchoEmulationConfig = ::webrtc::webrtc_pc_e2e::EchoEmulationConfig;
using RunParams = ::webrtc::webrtc_pc_e2e::RunParams;
// This class is used to fully configure one peer inside the call.
class PeerConfigurer {
@ -646,35 +201,6 @@ class PeerConnectionE2EQualityTestFixture {
BitrateSettings bitrate_settings) = 0;
};
// Contains configuration for echo emulator.
struct EchoEmulationConfig {
// Delay which represents the echo path delay, i.e. how soon rendered signal
// should reach capturer.
TimeDelta echo_delay = TimeDelta::Millis(50);
};
// Contains parameters, that describe how long framework should run quality
// test.
struct RunParams {
explicit RunParams(TimeDelta run_duration) : run_duration(run_duration) {}
// Specifies how long the test should be run. This time shows how long
// the media should flow after connection was established and before
// it will be shut downed.
TimeDelta run_duration;
// If set to true peers will be able to use Flex FEC, otherwise they won't
// be able to negotiate it even if it's enabled on per peer level.
bool enable_flex_fec_support = false;
// If true will set conference mode in SDP media section for all video
// tracks for all peers.
bool use_conference_mode = false;
// If specified echo emulation will be done, by mixing the render audio into
// the capture signal. In such case input signal will be reduced by half to
// avoid saturation or compression in the echo path simulation.
absl::optional<EchoEmulationConfig> echo_emulation_config;
};
// Represent an entity that will report quality metrics after test.
class QualityMetricsReporter : public StatsObserverInterface {
public:

View file

@ -13,6 +13,7 @@
#include <vector>
#include "absl/types/optional.h"
#include "api/test/pclf/media_configuration.h"
#include "api/test/video/video_frame_writer.h"
#include "rtc_base/gunit.h"
#include "test/gmock.h"
@ -24,15 +25,6 @@ namespace {
using ::testing::Eq;
using VideoResolution = ::webrtc::webrtc_pc_e2e::
PeerConnectionE2EQualityTestFixture::VideoResolution;
using VideoConfig =
::webrtc::webrtc_pc_e2e::PeerConnectionE2EQualityTestFixture::VideoConfig;
using VideoSubscription = ::webrtc::webrtc_pc_e2e::
PeerConnectionE2EQualityTestFixture::VideoSubscription;
using VideoDumpOptions = ::webrtc::webrtc_pc_e2e::
PeerConnectionE2EQualityTestFixture::VideoDumpOptions;
TEST(PclfVideoSubscriptionTest,
MaxFromSenderSpecEqualIndependentOfOtherFields) {
VideoResolution r1(VideoResolution::Spec::kMaxFromSender);