mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-13 13:50:40 +01:00
Revert of flag simplification.
In order to unify WebRTC recipes with Chromium recipes this CL tries to revert the old CL https://webrtc-review.googlesource.com/c/src/+/171681. This CL was already partially reverted (https://webrtc-review.googlesource.com/c/src/+/171809). In upcoming CLs, the added flag dump_json_test_results will be removed in order to use isolated-script-test-output instead. Bug: webrtc:13556 Change-Id: I3144498b9a5cbaa56c23b3b8adbac2229ad63c37 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/245602 Reviewed-by: Andrey Logvin <landrey@webrtc.org> Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org> Commit-Queue: Jeremy Leconte <jleconte@google.com> Cr-Commit-Position: refs/heads/main@{#35666}
This commit is contained in:
parent
ba38934771
commit
994bf454ec
4 changed files with 173 additions and 145 deletions
|
@ -15,6 +15,7 @@ output files will be performed.
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import collections
|
import collections
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
@ -62,6 +63,10 @@ def _ParseArgs():
|
||||||
'--isolated-script-test-perf-output',
|
'--isolated-script-test-perf-output',
|
||||||
default=None,
|
default=None,
|
||||||
help='Path to store perf results in histogram proto format.')
|
help='Path to store perf results in histogram proto format.')
|
||||||
|
parser.add_argument(
|
||||||
|
'--isolated-script-test-output',
|
||||||
|
default=None,
|
||||||
|
help='Path to output an empty JSON file which Chromium infra requires.')
|
||||||
parser.add_argument('--extra-test-args',
|
parser.add_argument('--extra-test-args',
|
||||||
default=[],
|
default=[],
|
||||||
action='append',
|
action='append',
|
||||||
|
@ -262,7 +267,6 @@ def _ConfigurePythonPath(args):
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
# pylint: disable=W0101
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
logging.info('Invoked with %s', str(sys.argv))
|
logging.info('Invoked with %s', str(sys.argv))
|
||||||
|
|
||||||
|
@ -354,6 +358,10 @@ def main():
|
||||||
with open(args.isolated_script_test_perf_output, 'wb') as f:
|
with open(args.isolated_script_test_perf_output, 'wb') as f:
|
||||||
f.write(histograms.AsProto().SerializeToString())
|
f.write(histograms.AsProto().SerializeToString())
|
||||||
|
|
||||||
|
if args.isolated_script_test_output:
|
||||||
|
with open(args.isolated_script_test_output, 'w') as f:
|
||||||
|
json.dump({"version": 3}, f)
|
||||||
|
|
||||||
return test_process.wait()
|
return test_process.wait()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -59,6 +59,11 @@ ABSL_FLAG(
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
ABSL_FLAG(std::string,
|
||||||
|
isolated_script_test_output,
|
||||||
|
"",
|
||||||
|
"Path to output an empty JSON file which Chromium infra requires.");
|
||||||
|
|
||||||
ABSL_FLAG(
|
ABSL_FLAG(
|
||||||
std::string,
|
std::string,
|
||||||
isolated_script_test_perf_output,
|
isolated_script_test_perf_output,
|
||||||
|
@ -232,6 +237,14 @@ class TestMainImpl : public TestMain {
|
||||||
if (metrics_to_plot) {
|
if (metrics_to_plot) {
|
||||||
webrtc::test::PrintPlottableResults(*metrics_to_plot);
|
webrtc::test::PrintPlottableResults(*metrics_to_plot);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string result_filename =
|
||||||
|
absl::GetFlag(FLAGS_isolated_script_test_output);
|
||||||
|
if (!result_filename.empty()) {
|
||||||
|
std::ofstream result_file(result_filename);
|
||||||
|
result_file << "{\"version\": 3}";
|
||||||
|
result_file.close();
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (capture_events) {
|
if (capture_events) {
|
||||||
|
|
|
@ -15,32 +15,36 @@ import sys
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('--isolated-script-test-perf-output')
|
parser.add_argument('--isolated-script-test-output')
|
||||||
args, unrecognized_args = parser.parse_known_args()
|
parser.add_argument('--isolated-script-test-perf-output')
|
||||||
|
args, unrecognized_args = parser.parse_known_args()
|
||||||
|
|
||||||
test_command = _ForcePythonInterpreter(unrecognized_args)
|
test_command = _ForcePythonInterpreter(unrecognized_args)
|
||||||
if args.isolated_script_test_perf_output:
|
if args.isolated_script_test_output:
|
||||||
test_command += [
|
test_command += [
|
||||||
'--isolated_script_test_perf_output=' +
|
'--isolated_script_test_output', args.isolated_script_test_output
|
||||||
args.isolated_script_test_perf_output
|
]
|
||||||
]
|
if args.isolated_script_test_perf_output:
|
||||||
logging.info('Running %r', test_command)
|
test_command += [
|
||||||
|
'--isolated_script_test_perf_output=' +
|
||||||
|
args.isolated_script_test_perf_output
|
||||||
|
]
|
||||||
|
logging.info('Running %r', test_command)
|
||||||
|
|
||||||
return subprocess.call(test_command)
|
return subprocess.call(test_command)
|
||||||
|
|
||||||
|
|
||||||
def _ForcePythonInterpreter(cmd):
|
def _ForcePythonInterpreter(cmd):
|
||||||
"""Returns the fixed command line to call the right python executable."""
|
"""Returns the fixed command line to call the right python executable."""
|
||||||
out = cmd[:]
|
out = cmd[:]
|
||||||
if out[0] == 'python':
|
if out[0] == 'python':
|
||||||
out[0] = sys.executable
|
out[0] = sys.executable
|
||||||
elif out[0].endswith('.py'):
|
elif out[0].endswith('.py'):
|
||||||
out.insert(0, sys.executable)
|
out.insert(0, sys.executable)
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
# pylint: disable=W0101
|
logging.basicConfig(level=logging.INFO)
|
||||||
logging.basicConfig(level=logging.INFO)
|
sys.exit(main())
|
||||||
sys.exit(main())
|
|
||||||
|
|
|
@ -15,7 +15,8 @@ gtest-parallel, renaming options and translating environment variables into
|
||||||
flags. Developers should execute gtest-parallel directly.
|
flags. Developers should execute gtest-parallel directly.
|
||||||
|
|
||||||
In particular, this translates the GTEST_SHARD_INDEX and GTEST_TOTAL_SHARDS
|
In particular, this translates the GTEST_SHARD_INDEX and GTEST_TOTAL_SHARDS
|
||||||
environment variables to the --shard_index and --shard_count flags
|
environment variables to the --shard_index and --shard_count flags, renames
|
||||||
|
the --isolated-script-test-output flag to --dump_json_test_results,
|
||||||
and interprets e.g. --workers=2x as 2 workers per core.
|
and interprets e.g. --workers=2x as 2 workers per core.
|
||||||
|
|
||||||
Flags before '--' will be attempted to be understood as arguments to
|
Flags before '--' will be attempted to be understood as arguments to
|
||||||
|
@ -44,6 +45,7 @@ For example:
|
||||||
--another_flag \
|
--another_flag \
|
||||||
--output_dir=SOME_OUTPUT_DIR \
|
--output_dir=SOME_OUTPUT_DIR \
|
||||||
--store-test-artifacts
|
--store-test-artifacts
|
||||||
|
--isolated-script-test-output=SOME_DIR \
|
||||||
--isolated-script-test-perf-output=SOME_OTHER_DIR \
|
--isolated-script-test-perf-output=SOME_OTHER_DIR \
|
||||||
-- \
|
-- \
|
||||||
--foo=bar \
|
--foo=bar \
|
||||||
|
@ -81,169 +83,170 @@ Args = collections.namedtuple(
|
||||||
|
|
||||||
|
|
||||||
def _CatFiles(file_list, output_file):
|
def _CatFiles(file_list, output_file):
|
||||||
with open(output_file, 'w') as output_file:
|
with open(output_file, 'w') as output_file:
|
||||||
for filename in file_list:
|
for filename in file_list:
|
||||||
with open(filename) as input_file:
|
with open(filename) as input_file:
|
||||||
output_file.write(input_file.read())
|
output_file.write(input_file.read())
|
||||||
os.remove(filename)
|
os.remove(filename)
|
||||||
|
|
||||||
|
|
||||||
def _ParseWorkersOption(workers):
|
def _ParseWorkersOption(workers):
|
||||||
"""Interpret Nx syntax as N * cpu_count. Int value is left as is."""
|
"""Interpret Nx syntax as N * cpu_count. Int value is left as is."""
|
||||||
base = float(workers.rstrip('x'))
|
base = float(workers.rstrip('x'))
|
||||||
if workers.endswith('x'):
|
if workers.endswith('x'):
|
||||||
result = int(base * multiprocessing.cpu_count())
|
result = int(base * multiprocessing.cpu_count())
|
||||||
else:
|
else:
|
||||||
result = int(base)
|
result = int(base)
|
||||||
return max(result, 1) # Sanitize when using e.g. '0.5x'.
|
return max(result, 1) # Sanitize when using e.g. '0.5x'.
|
||||||
|
|
||||||
|
|
||||||
class ReconstructibleArgumentGroup(object):
|
class ReconstructibleArgumentGroup(object):
|
||||||
"""An argument group that can be converted back into a command line.
|
"""An argument group that can be converted back into a command line.
|
||||||
|
|
||||||
This acts like ArgumentParser.add_argument_group, but names of arguments added
|
This acts like ArgumentParser.add_argument_group, but names of arguments added
|
||||||
to it are also kept in a list, so that parsed options from
|
to it are also kept in a list, so that parsed options from
|
||||||
ArgumentParser.parse_args can be reconstructed back into a command line (list
|
ArgumentParser.parse_args can be reconstructed back into a command line (list
|
||||||
of args) based on the list of wanted keys."""
|
of args) based on the list of wanted keys."""
|
||||||
|
|
||||||
def __init__(self, parser, *args, **kwargs):
|
def __init__(self, parser, *args, **kwargs):
|
||||||
self._group = parser.add_argument_group(*args, **kwargs)
|
self._group = parser.add_argument_group(*args, **kwargs)
|
||||||
self._keys = []
|
self._keys = []
|
||||||
|
|
||||||
def AddArgument(self, *args, **kwargs):
|
def AddArgument(self, *args, **kwargs):
|
||||||
arg = self._group.add_argument(*args, **kwargs)
|
arg = self._group.add_argument(*args, **kwargs)
|
||||||
self._keys.append(arg.dest)
|
self._keys.append(arg.dest)
|
||||||
|
|
||||||
def RemakeCommandLine(self, options):
|
def RemakeCommandLine(self, options):
|
||||||
result = []
|
result = []
|
||||||
for key in self._keys:
|
for key in self._keys:
|
||||||
value = getattr(options, key)
|
value = getattr(options, key)
|
||||||
if value is True:
|
if value is True:
|
||||||
result.append('--%s' % key)
|
result.append('--%s' % key)
|
||||||
elif value is not None:
|
elif value is not None:
|
||||||
result.append('--%s=%s' % (key, value))
|
result.append('--%s=%s' % (key, value))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def ParseArgs(argv=None):
|
def ParseArgs(argv=None):
|
||||||
parser = argparse.ArgumentParser(argv)
|
parser = argparse.ArgumentParser(argv)
|
||||||
|
|
||||||
gtest_group = ReconstructibleArgumentGroup(parser,
|
gtest_group = ReconstructibleArgumentGroup(parser,
|
||||||
'Arguments to gtest-parallel')
|
'Arguments to gtest-parallel')
|
||||||
# These options will be passed unchanged to gtest-parallel.
|
# These options will be passed unchanged to gtest-parallel.
|
||||||
gtest_group.AddArgument('-d', '--output_dir')
|
gtest_group.AddArgument('-d', '--output_dir')
|
||||||
gtest_group.AddArgument('-r', '--repeat')
|
gtest_group.AddArgument('-r', '--repeat')
|
||||||
gtest_group.AddArgument('--dump_json_test_results')
|
# TODO(webrtc:13556): use isolated-script-test-output argument instead
|
||||||
gtest_group.AddArgument('--retry_failed')
|
# of dump_json_test_results as it was done prior to chromium:1051927.
|
||||||
gtest_group.AddArgument('--gtest_color')
|
gtest_group.AddArgument('--dump_json_test_results')
|
||||||
gtest_group.AddArgument('--gtest_filter')
|
gtest_group.AddArgument('--retry_failed')
|
||||||
gtest_group.AddArgument('--gtest_also_run_disabled_tests',
|
gtest_group.AddArgument('--gtest_color')
|
||||||
action='store_true',
|
gtest_group.AddArgument('--gtest_filter')
|
||||||
default=None)
|
gtest_group.AddArgument('--gtest_also_run_disabled_tests',
|
||||||
gtest_group.AddArgument('--timeout')
|
action='store_true',
|
||||||
|
default=None)
|
||||||
|
gtest_group.AddArgument('--timeout')
|
||||||
|
|
||||||
# Syntax 'Nx' will be interpreted as N * number of cpu cores.
|
# Syntax 'Nx' will be interpreted as N * number of cpu cores.
|
||||||
gtest_group.AddArgument('-w', '--workers', type=_ParseWorkersOption)
|
gtest_group.AddArgument('-w', '--workers', type=_ParseWorkersOption)
|
||||||
|
|
||||||
# Needed when the test wants to store test artifacts, because it doesn't
|
# Needed when the test wants to store test artifacts, because it doesn't
|
||||||
# know what will be the swarming output dir.
|
# know what will be the swarming output dir.
|
||||||
parser.add_argument('--store-test-artifacts', action='store_true')
|
parser.add_argument('--store-test-artifacts', action='store_true')
|
||||||
|
|
||||||
# No-sandbox is a Chromium-specific flag, ignore it.
|
# No-sandbox is a Chromium-specific flag, ignore it.
|
||||||
# TODO(oprypin): Remove (bugs.webrtc.org/8115)
|
# TODO(oprypin): Remove (bugs.webrtc.org/8115)
|
||||||
parser.add_argument('--no-sandbox',
|
parser.add_argument('--no-sandbox',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help=argparse.SUPPRESS)
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
parser.add_argument('executable')
|
parser.add_argument('executable')
|
||||||
parser.add_argument('executable_args', nargs='*')
|
parser.add_argument('executable_args', nargs='*')
|
||||||
|
|
||||||
options, unrecognized_args = parser.parse_known_args(argv)
|
options, unrecognized_args = parser.parse_known_args(argv)
|
||||||
|
|
||||||
args_to_pass = []
|
webrtc_flags_to_change = {
|
||||||
for arg in unrecognized_args:
|
'--isolated-script-test-perf-output':
|
||||||
if arg.startswith('--isolated-script-test-perf-output'):
|
'--isolated_script_test_perf_output',
|
||||||
arg_split = arg.split('=')
|
'--isolated-script-test-output': '--isolated_script_test_output',
|
||||||
assert len(
|
}
|
||||||
arg_split) == 2, 'You must use the = syntax for this flag.'
|
args_to_pass = []
|
||||||
args_to_pass.append('--isolated_script_test_perf_output=' +
|
for arg in unrecognized_args:
|
||||||
arg_split[1])
|
if any(arg.startswith(k) for k in webrtc_flags_to_change.keys()):
|
||||||
else:
|
arg_split = arg.split('=')
|
||||||
args_to_pass.append(arg)
|
args_to_pass.append(webrtc_flags_to_change[arg_split[0]] + '=' +
|
||||||
|
arg_split[1])
|
||||||
executable_args = options.executable_args + args_to_pass
|
|
||||||
|
|
||||||
if options.store_test_artifacts:
|
|
||||||
assert options.output_dir, (
|
|
||||||
'--output_dir must be specified for storing test artifacts.')
|
|
||||||
test_artifacts_dir = os.path.join(options.output_dir, 'test_artifacts')
|
|
||||||
|
|
||||||
executable_args.insert(0,
|
|
||||||
'--test_artifacts_dir=%s' % test_artifacts_dir)
|
|
||||||
else:
|
else:
|
||||||
test_artifacts_dir = None
|
args_to_pass.append(arg)
|
||||||
|
|
||||||
gtest_parallel_args = gtest_group.RemakeCommandLine(options)
|
executable_args = options.executable_args + args_to_pass
|
||||||
|
|
||||||
# GTEST_SHARD_INDEX and GTEST_TOTAL_SHARDS must be removed from the
|
if options.store_test_artifacts:
|
||||||
# environment. Otherwise it will be picked up by the binary, causing a bug
|
assert options.output_dir, (
|
||||||
# where only tests in the first shard are executed.
|
'--output_dir must be specified for storing test artifacts.')
|
||||||
test_env = os.environ.copy()
|
test_artifacts_dir = os.path.join(options.output_dir, 'test_artifacts')
|
||||||
gtest_shard_index = test_env.pop('GTEST_SHARD_INDEX', '0')
|
|
||||||
gtest_total_shards = test_env.pop('GTEST_TOTAL_SHARDS', '1')
|
|
||||||
|
|
||||||
gtest_parallel_args.insert(0, '--shard_index=%s' % gtest_shard_index)
|
executable_args.insert(0, '--test_artifacts_dir=%s' % test_artifacts_dir)
|
||||||
gtest_parallel_args.insert(1, '--shard_count=%s' % gtest_total_shards)
|
else:
|
||||||
|
test_artifacts_dir = None
|
||||||
|
|
||||||
gtest_parallel_args.append(options.executable)
|
gtest_parallel_args = gtest_group.RemakeCommandLine(options)
|
||||||
if executable_args:
|
|
||||||
gtest_parallel_args += ['--'] + executable_args
|
|
||||||
|
|
||||||
return Args(gtest_parallel_args, test_env, options.output_dir,
|
# GTEST_SHARD_INDEX and GTEST_TOTAL_SHARDS must be removed from the
|
||||||
test_artifacts_dir)
|
# environment. Otherwise it will be picked up by the binary, causing a bug
|
||||||
|
# where only tests in the first shard are executed.
|
||||||
|
test_env = os.environ.copy()
|
||||||
|
gtest_shard_index = test_env.pop('GTEST_SHARD_INDEX', '0')
|
||||||
|
gtest_total_shards = test_env.pop('GTEST_TOTAL_SHARDS', '1')
|
||||||
|
|
||||||
|
gtest_parallel_args.insert(0, '--shard_index=%s' % gtest_shard_index)
|
||||||
|
gtest_parallel_args.insert(1, '--shard_count=%s' % gtest_total_shards)
|
||||||
|
|
||||||
|
gtest_parallel_args.append(options.executable)
|
||||||
|
if executable_args:
|
||||||
|
gtest_parallel_args += ['--'] + executable_args
|
||||||
|
|
||||||
|
return Args(gtest_parallel_args, test_env, options.output_dir,
|
||||||
|
test_artifacts_dir)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
webrtc_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
webrtc_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
gtest_parallel_path = os.path.join(webrtc_root, 'third_party',
|
gtest_parallel_path = os.path.join(webrtc_root, 'third_party',
|
||||||
'gtest-parallel', 'gtest-parallel')
|
'gtest-parallel', 'gtest-parallel')
|
||||||
|
|
||||||
gtest_parallel_args, test_env, output_dir, test_artifacts_dir = ParseArgs()
|
gtest_parallel_args, test_env, output_dir, test_artifacts_dir = ParseArgs()
|
||||||
|
|
||||||
command = [
|
command = [
|
||||||
sys.executable,
|
sys.executable,
|
||||||
gtest_parallel_path,
|
gtest_parallel_path,
|
||||||
] + gtest_parallel_args
|
] + gtest_parallel_args
|
||||||
|
|
||||||
if output_dir and not os.path.isdir(output_dir):
|
if output_dir and not os.path.isdir(output_dir):
|
||||||
os.makedirs(output_dir)
|
os.makedirs(output_dir)
|
||||||
if test_artifacts_dir and not os.path.isdir(test_artifacts_dir):
|
if test_artifacts_dir and not os.path.isdir(test_artifacts_dir):
|
||||||
os.makedirs(test_artifacts_dir)
|
os.makedirs(test_artifacts_dir)
|
||||||
|
|
||||||
print 'gtest-parallel-wrapper: Executing command %s' % ' '.join(command)
|
print 'gtest-parallel-wrapper: Executing command %s' % ' '.join(command)
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
|
|
||||||
exit_code = subprocess.call(command, env=test_env, cwd=os.getcwd())
|
exit_code = subprocess.call(command, env=test_env, cwd=os.getcwd())
|
||||||
|
|
||||||
if output_dir:
|
if output_dir:
|
||||||
for test_status in 'passed', 'failed', 'interrupted':
|
for test_status in 'passed', 'failed', 'interrupted':
|
||||||
logs_dir = os.path.join(output_dir, 'gtest-parallel-logs',
|
logs_dir = os.path.join(output_dir, 'gtest-parallel-logs', test_status)
|
||||||
test_status)
|
if not os.path.isdir(logs_dir):
|
||||||
if not os.path.isdir(logs_dir):
|
continue
|
||||||
continue
|
logs = [os.path.join(logs_dir, log) for log in os.listdir(logs_dir)]
|
||||||
logs = [
|
log_file = os.path.join(output_dir, '%s-tests.log' % test_status)
|
||||||
os.path.join(logs_dir, log) for log in os.listdir(logs_dir)
|
_CatFiles(logs, log_file)
|
||||||
]
|
os.rmdir(logs_dir)
|
||||||
log_file = os.path.join(output_dir, '%s-tests.log' % test_status)
|
|
||||||
_CatFiles(logs, log_file)
|
|
||||||
os.rmdir(logs_dir)
|
|
||||||
|
|
||||||
if test_artifacts_dir:
|
if test_artifacts_dir:
|
||||||
shutil.make_archive(test_artifacts_dir, 'zip', test_artifacts_dir)
|
shutil.make_archive(test_artifacts_dir, 'zip', test_artifacts_dir)
|
||||||
shutil.rmtree(test_artifacts_dir)
|
shutil.rmtree(test_artifacts_dir)
|
||||||
|
|
||||||
return exit_code
|
return exit_code
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
Loading…
Reference in a new issue