Revert of flag simplification.

In order to unify WebRTC recipes with Chromium recipes this CL tries to revert the old CL https://webrtc-review.googlesource.com/c/src/+/171681.
This CL was already partially reverted (https://webrtc-review.googlesource.com/c/src/+/171809).
In upcoming CLs, the added flag dump_json_test_results will be removed in order to use isolated-script-test-output instead.

Bug: webrtc:13556
Change-Id: I3144498b9a5cbaa56c23b3b8adbac2229ad63c37
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/245602
Reviewed-by: Andrey Logvin <landrey@webrtc.org>
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Commit-Queue: Jeremy Leconte <jleconte@google.com>
Cr-Commit-Position: refs/heads/main@{#35666}
This commit is contained in:
Jeremy Leconte 2022-01-12 10:51:16 +01:00 committed by WebRTC LUCI CQ
parent ba38934771
commit 994bf454ec
4 changed files with 173 additions and 145 deletions

View file

@ -15,6 +15,7 @@ output files will be performed.
import argparse
import collections
import json
import logging
import os
import re
@ -62,6 +63,10 @@ def _ParseArgs():
'--isolated-script-test-perf-output',
default=None,
help='Path to store perf results in histogram proto format.')
parser.add_argument(
'--isolated-script-test-output',
default=None,
help='Path to output an empty JSON file which Chromium infra requires.')
parser.add_argument('--extra-test-args',
default=[],
action='append',
@ -262,7 +267,6 @@ def _ConfigurePythonPath(args):
def main():
# pylint: disable=W0101
logging.basicConfig(level=logging.INFO)
logging.info('Invoked with %s', str(sys.argv))
@ -354,6 +358,10 @@ def main():
with open(args.isolated_script_test_perf_output, 'wb') as f:
f.write(histograms.AsProto().SerializeToString())
if args.isolated_script_test_output:
with open(args.isolated_script_test_output, 'w') as f:
json.dump({"version": 3}, f)
return test_process.wait()

View file

@ -59,6 +59,11 @@ ABSL_FLAG(
#else
ABSL_FLAG(std::string,
isolated_script_test_output,
"",
"Path to output an empty JSON file which Chromium infra requires.");
ABSL_FLAG(
std::string,
isolated_script_test_perf_output,
@ -232,6 +237,14 @@ class TestMainImpl : public TestMain {
if (metrics_to_plot) {
webrtc::test::PrintPlottableResults(*metrics_to_plot);
}
std::string result_filename =
absl::GetFlag(FLAGS_isolated_script_test_output);
if (!result_filename.empty()) {
std::ofstream result_file(result_filename);
result_file << "{\"version\": 3}";
result_file.close();
}
#endif
if (capture_events) {

View file

@ -15,32 +15,36 @@ import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--isolated-script-test-perf-output')
args, unrecognized_args = parser.parse_known_args()
parser = argparse.ArgumentParser()
parser.add_argument('--isolated-script-test-output')
parser.add_argument('--isolated-script-test-perf-output')
args, unrecognized_args = parser.parse_known_args()
test_command = _ForcePythonInterpreter(unrecognized_args)
if args.isolated_script_test_perf_output:
test_command += [
'--isolated_script_test_perf_output=' +
args.isolated_script_test_perf_output
]
logging.info('Running %r', test_command)
test_command = _ForcePythonInterpreter(unrecognized_args)
if args.isolated_script_test_output:
test_command += [
'--isolated_script_test_output', args.isolated_script_test_output
]
if args.isolated_script_test_perf_output:
test_command += [
'--isolated_script_test_perf_output=' +
args.isolated_script_test_perf_output
]
logging.info('Running %r', test_command)
return subprocess.call(test_command)
return subprocess.call(test_command)
def _ForcePythonInterpreter(cmd):
"""Returns the fixed command line to call the right python executable."""
out = cmd[:]
if out[0] == 'python':
out[0] = sys.executable
elif out[0].endswith('.py'):
out.insert(0, sys.executable)
return out
"""Returns the fixed command line to call the right python executable."""
out = cmd[:]
if out[0] == 'python':
out[0] = sys.executable
elif out[0].endswith('.py'):
out.insert(0, sys.executable)
return out
if __name__ == '__main__':
# pylint: disable=W0101
logging.basicConfig(level=logging.INFO)
sys.exit(main())
logging.basicConfig(level=logging.INFO)
sys.exit(main())

View file

@ -15,7 +15,8 @@ gtest-parallel, renaming options and translating environment variables into
flags. Developers should execute gtest-parallel directly.
In particular, this translates the GTEST_SHARD_INDEX and GTEST_TOTAL_SHARDS
environment variables to the --shard_index and --shard_count flags
environment variables to the --shard_index and --shard_count flags, renames
the --isolated-script-test-output flag to --dump_json_test_results,
and interprets e.g. --workers=2x as 2 workers per core.
Flags before '--' will be attempted to be understood as arguments to
@ -44,6 +45,7 @@ For example:
--another_flag \
--output_dir=SOME_OUTPUT_DIR \
--store-test-artifacts
--isolated-script-test-output=SOME_DIR \
--isolated-script-test-perf-output=SOME_OTHER_DIR \
-- \
--foo=bar \
@ -81,169 +83,170 @@ Args = collections.namedtuple(
def _CatFiles(file_list, output_file):
with open(output_file, 'w') as output_file:
for filename in file_list:
with open(filename) as input_file:
output_file.write(input_file.read())
os.remove(filename)
with open(output_file, 'w') as output_file:
for filename in file_list:
with open(filename) as input_file:
output_file.write(input_file.read())
os.remove(filename)
def _ParseWorkersOption(workers):
"""Interpret Nx syntax as N * cpu_count. Int value is left as is."""
base = float(workers.rstrip('x'))
if workers.endswith('x'):
result = int(base * multiprocessing.cpu_count())
else:
result = int(base)
return max(result, 1) # Sanitize when using e.g. '0.5x'.
"""Interpret Nx syntax as N * cpu_count. Int value is left as is."""
base = float(workers.rstrip('x'))
if workers.endswith('x'):
result = int(base * multiprocessing.cpu_count())
else:
result = int(base)
return max(result, 1) # Sanitize when using e.g. '0.5x'.
class ReconstructibleArgumentGroup(object):
"""An argument group that can be converted back into a command line.
"""An argument group that can be converted back into a command line.
This acts like ArgumentParser.add_argument_group, but names of arguments added
to it are also kept in a list, so that parsed options from
ArgumentParser.parse_args can be reconstructed back into a command line (list
of args) based on the list of wanted keys."""
def __init__(self, parser, *args, **kwargs):
self._group = parser.add_argument_group(*args, **kwargs)
self._keys = []
def __init__(self, parser, *args, **kwargs):
self._group = parser.add_argument_group(*args, **kwargs)
self._keys = []
def AddArgument(self, *args, **kwargs):
arg = self._group.add_argument(*args, **kwargs)
self._keys.append(arg.dest)
def AddArgument(self, *args, **kwargs):
arg = self._group.add_argument(*args, **kwargs)
self._keys.append(arg.dest)
def RemakeCommandLine(self, options):
result = []
for key in self._keys:
value = getattr(options, key)
if value is True:
result.append('--%s' % key)
elif value is not None:
result.append('--%s=%s' % (key, value))
return result
def RemakeCommandLine(self, options):
result = []
for key in self._keys:
value = getattr(options, key)
if value is True:
result.append('--%s' % key)
elif value is not None:
result.append('--%s=%s' % (key, value))
return result
def ParseArgs(argv=None):
parser = argparse.ArgumentParser(argv)
parser = argparse.ArgumentParser(argv)
gtest_group = ReconstructibleArgumentGroup(parser,
'Arguments to gtest-parallel')
# These options will be passed unchanged to gtest-parallel.
gtest_group.AddArgument('-d', '--output_dir')
gtest_group.AddArgument('-r', '--repeat')
gtest_group.AddArgument('--dump_json_test_results')
gtest_group.AddArgument('--retry_failed')
gtest_group.AddArgument('--gtest_color')
gtest_group.AddArgument('--gtest_filter')
gtest_group.AddArgument('--gtest_also_run_disabled_tests',
action='store_true',
default=None)
gtest_group.AddArgument('--timeout')
gtest_group = ReconstructibleArgumentGroup(parser,
'Arguments to gtest-parallel')
# These options will be passed unchanged to gtest-parallel.
gtest_group.AddArgument('-d', '--output_dir')
gtest_group.AddArgument('-r', '--repeat')
# TODO(webrtc:13556): use isolated-script-test-output argument instead
# of dump_json_test_results as it was done prior to chromium:1051927.
gtest_group.AddArgument('--dump_json_test_results')
gtest_group.AddArgument('--retry_failed')
gtest_group.AddArgument('--gtest_color')
gtest_group.AddArgument('--gtest_filter')
gtest_group.AddArgument('--gtest_also_run_disabled_tests',
action='store_true',
default=None)
gtest_group.AddArgument('--timeout')
# Syntax 'Nx' will be interpreted as N * number of cpu cores.
gtest_group.AddArgument('-w', '--workers', type=_ParseWorkersOption)
# Syntax 'Nx' will be interpreted as N * number of cpu cores.
gtest_group.AddArgument('-w', '--workers', type=_ParseWorkersOption)
# Needed when the test wants to store test artifacts, because it doesn't
# know what will be the swarming output dir.
parser.add_argument('--store-test-artifacts', action='store_true')
# Needed when the test wants to store test artifacts, because it doesn't
# know what will be the swarming output dir.
parser.add_argument('--store-test-artifacts', action='store_true')
# No-sandbox is a Chromium-specific flag, ignore it.
# TODO(oprypin): Remove (bugs.webrtc.org/8115)
parser.add_argument('--no-sandbox',
action='store_true',
help=argparse.SUPPRESS)
# No-sandbox is a Chromium-specific flag, ignore it.
# TODO(oprypin): Remove (bugs.webrtc.org/8115)
parser.add_argument('--no-sandbox',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('executable')
parser.add_argument('executable_args', nargs='*')
parser.add_argument('executable')
parser.add_argument('executable_args', nargs='*')
options, unrecognized_args = parser.parse_known_args(argv)
options, unrecognized_args = parser.parse_known_args(argv)
args_to_pass = []
for arg in unrecognized_args:
if arg.startswith('--isolated-script-test-perf-output'):
arg_split = arg.split('=')
assert len(
arg_split) == 2, 'You must use the = syntax for this flag.'
args_to_pass.append('--isolated_script_test_perf_output=' +
arg_split[1])
else:
args_to_pass.append(arg)
executable_args = options.executable_args + args_to_pass
if options.store_test_artifacts:
assert options.output_dir, (
'--output_dir must be specified for storing test artifacts.')
test_artifacts_dir = os.path.join(options.output_dir, 'test_artifacts')
executable_args.insert(0,
'--test_artifacts_dir=%s' % test_artifacts_dir)
webrtc_flags_to_change = {
'--isolated-script-test-perf-output':
'--isolated_script_test_perf_output',
'--isolated-script-test-output': '--isolated_script_test_output',
}
args_to_pass = []
for arg in unrecognized_args:
if any(arg.startswith(k) for k in webrtc_flags_to_change.keys()):
arg_split = arg.split('=')
args_to_pass.append(webrtc_flags_to_change[arg_split[0]] + '=' +
arg_split[1])
else:
test_artifacts_dir = None
args_to_pass.append(arg)
gtest_parallel_args = gtest_group.RemakeCommandLine(options)
executable_args = options.executable_args + args_to_pass
# GTEST_SHARD_INDEX and GTEST_TOTAL_SHARDS must be removed from the
# environment. Otherwise it will be picked up by the binary, causing a bug
# where only tests in the first shard are executed.
test_env = os.environ.copy()
gtest_shard_index = test_env.pop('GTEST_SHARD_INDEX', '0')
gtest_total_shards = test_env.pop('GTEST_TOTAL_SHARDS', '1')
if options.store_test_artifacts:
assert options.output_dir, (
'--output_dir must be specified for storing test artifacts.')
test_artifacts_dir = os.path.join(options.output_dir, 'test_artifacts')
gtest_parallel_args.insert(0, '--shard_index=%s' % gtest_shard_index)
gtest_parallel_args.insert(1, '--shard_count=%s' % gtest_total_shards)
executable_args.insert(0, '--test_artifacts_dir=%s' % test_artifacts_dir)
else:
test_artifacts_dir = None
gtest_parallel_args.append(options.executable)
if executable_args:
gtest_parallel_args += ['--'] + executable_args
gtest_parallel_args = gtest_group.RemakeCommandLine(options)
return Args(gtest_parallel_args, test_env, options.output_dir,
test_artifacts_dir)
# GTEST_SHARD_INDEX and GTEST_TOTAL_SHARDS must be removed from the
# environment. Otherwise it will be picked up by the binary, causing a bug
# where only tests in the first shard are executed.
test_env = os.environ.copy()
gtest_shard_index = test_env.pop('GTEST_SHARD_INDEX', '0')
gtest_total_shards = test_env.pop('GTEST_TOTAL_SHARDS', '1')
gtest_parallel_args.insert(0, '--shard_index=%s' % gtest_shard_index)
gtest_parallel_args.insert(1, '--shard_count=%s' % gtest_total_shards)
gtest_parallel_args.append(options.executable)
if executable_args:
gtest_parallel_args += ['--'] + executable_args
return Args(gtest_parallel_args, test_env, options.output_dir,
test_artifacts_dir)
def main():
webrtc_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
gtest_parallel_path = os.path.join(webrtc_root, 'third_party',
'gtest-parallel', 'gtest-parallel')
webrtc_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
gtest_parallel_path = os.path.join(webrtc_root, 'third_party',
'gtest-parallel', 'gtest-parallel')
gtest_parallel_args, test_env, output_dir, test_artifacts_dir = ParseArgs()
gtest_parallel_args, test_env, output_dir, test_artifacts_dir = ParseArgs()
command = [
sys.executable,
gtest_parallel_path,
] + gtest_parallel_args
command = [
sys.executable,
gtest_parallel_path,
] + gtest_parallel_args
if output_dir and not os.path.isdir(output_dir):
os.makedirs(output_dir)
if test_artifacts_dir and not os.path.isdir(test_artifacts_dir):
os.makedirs(test_artifacts_dir)
if output_dir and not os.path.isdir(output_dir):
os.makedirs(output_dir)
if test_artifacts_dir and not os.path.isdir(test_artifacts_dir):
os.makedirs(test_artifacts_dir)
print 'gtest-parallel-wrapper: Executing command %s' % ' '.join(command)
sys.stdout.flush()
print 'gtest-parallel-wrapper: Executing command %s' % ' '.join(command)
sys.stdout.flush()
exit_code = subprocess.call(command, env=test_env, cwd=os.getcwd())
exit_code = subprocess.call(command, env=test_env, cwd=os.getcwd())
if output_dir:
for test_status in 'passed', 'failed', 'interrupted':
logs_dir = os.path.join(output_dir, 'gtest-parallel-logs',
test_status)
if not os.path.isdir(logs_dir):
continue
logs = [
os.path.join(logs_dir, log) for log in os.listdir(logs_dir)
]
log_file = os.path.join(output_dir, '%s-tests.log' % test_status)
_CatFiles(logs, log_file)
os.rmdir(logs_dir)
if output_dir:
for test_status in 'passed', 'failed', 'interrupted':
logs_dir = os.path.join(output_dir, 'gtest-parallel-logs', test_status)
if not os.path.isdir(logs_dir):
continue
logs = [os.path.join(logs_dir, log) for log in os.listdir(logs_dir)]
log_file = os.path.join(output_dir, '%s-tests.log' % test_status)
_CatFiles(logs, log_file)
os.rmdir(logs_dir)
if test_artifacts_dir:
shutil.make_archive(test_artifacts_dir, 'zip', test_artifacts_dir)
shutil.rmtree(test_artifacts_dir)
if test_artifacts_dir:
shutil.make_archive(test_artifacts_dir, 'zip', test_artifacts_dir)
shutil.rmtree(test_artifacts_dir)
return exit_code
return exit_code
if __name__ == '__main__':
sys.exit(main())
sys.exit(main())