Greatly simplify flags for test binaries.

Since we're now calling the shots of what flags get passed in the
recipes, we can just pass the right ones right away and remove all
the flag renaming.

--isolated-script-test-output is no longer passed, so we can just
remove it. The recipe is currently passing
--isolated-script-perf-test-output but I will start passing the
underscore version shortly.

Bug: chromium:1051927
Change-Id: I571090e62f79ea17c793295df7f5abb21f45d207
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/171681
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Commit-Queue: Patrik Höglund <phoglund@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#30878}
This commit is contained in:
Patrik Höglund 2020-03-25 08:58:51 +01:00 committed by Commit Bot
parent 0e5527529a
commit 1b20c41dcb
7 changed files with 7 additions and 110 deletions

View file

@ -57,10 +57,8 @@ def _ParseArgs():
parser.add_argument('--adb-path', help='Path to adb binary.', default='adb')
parser.add_argument('--num-retries', default='0',
help='Number of times to retry the test on Android.')
parser.add_argument('--isolated-script-test-perf-output', default=None,
parser.add_argument('--isolated_script_test_perf_output', default=None,
help='Path to store perf results in chartjson format.')
parser.add_argument('--isolated-script-test-output', default=None,
help='Path to output an empty JSON file which Chromium infra requires.')
parser.add_argument('--extra-test-args', default=[], action='append',
help='Extra args to path to the test binary.')
@ -301,10 +299,6 @@ def main():
with open(args.isolated_script_test_perf_output, 'w') as f:
json.dump({"format_version": "1.0", "charts": charts}, f)
if args.isolated_script_test_output:
with open(args.isolated_script_test_output, 'w') as f:
json.dump({"version": 3}, f)
return test_process.wait()

View file

@ -60,11 +60,6 @@ ABSL_FLAG(
#else
ABSL_FLAG(std::string,
isolated_script_test_output,
"",
"Path to output an empty JSON file which Chromium infra requires.");
ABSL_FLAG(
std::string,
isolated_script_test_perf_output,
@ -190,14 +185,6 @@ class TestMainImpl : public TestMain {
if (metrics_to_plot) {
webrtc::test::PrintPlottableResults(*metrics_to_plot);
}
std::string result_filename =
absl::GetFlag(FLAGS_isolated_script_test_output);
if (!result_filename.empty()) {
std::ofstream result_file(result_filename);
result_file << "{\"version\": 3}";
result_file.close();
}
#endif
if (capture_events) {

View file

@ -1,48 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import argparse
import logging
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--isolated-script-test-output')
parser.add_argument('--isolated-script-test-perf-output')
args, unrecognized_args = parser.parse_known_args()
test_command = _ForcePythonInterpreter(unrecognized_args)
if args.isolated_script_test_output:
test_command += ['--isolated_script_test_output',
args.isolated_script_test_output]
if args.isolated_script_test_perf_output:
test_command += ['--isolated_script_test_perf_output',
args.isolated_script_test_perf_output]
logging.info('Running %r', test_command)
return subprocess.call(test_command)
def _ForcePythonInterpreter(cmd):
"""Returns the fixed command line to call the right python executable."""
out = cmd[:]
if out[0] == 'python':
out[0] = sys.executable
elif out[0].endswith('.py'):
out.insert(0, sys.executable)
return out
if __name__ == '__main__':
# pylint: disable=W0101
logging.basicConfig(level=logging.INFO)
sys.exit(main())

View file

@ -15,9 +15,8 @@ gtest-parallel, renaming options and translating environment variables into
flags. Developers should execute gtest-parallel directly.
In particular, this translates the GTEST_SHARD_INDEX and GTEST_TOTAL_SHARDS
environment variables to the --shard_index and --shard_count flags, renames
the --isolated-script-test-output flag to --dump_json_test_results,
and interprets e.g. --workers=2x as 2 workers per core.
environment variables to the --shard_index and --shard_count flags, and
interprets e.g. --workers=2x as 2 workers per core.
Flags before '--' will be attempted to be understood as arguments to
gtest-parallel. If gtest-parallel doesn't recognize the flag or the flag is
@ -38,8 +37,6 @@ For example:
--another_flag \
--output_dir=SOME_OUTPUT_DIR \
--store-test-artifacts
--isolated-script-test-output=SOME_DIR \
--isolated-script-test-perf-output=SOME_OTHER_DIR \
-- \
--foo=bar \
--baz
@ -50,13 +47,11 @@ Will be converted into:
--shard_index 0 \
--shard_count 1 \
--output_dir=SOME_OUTPUT_DIR \
--dump_json_test_results=SOME_DIR \
some_test \
-- \
--test_artifacts_dir=SOME_OUTPUT_DIR/test_artifacts \
--some_flag=some_value \
--another_flag \
--isolated-script-test-perf-output=SOME_OTHER_DIR \
--foo=bar \
--baz
@ -137,12 +132,6 @@ def ParseArgs(argv=None):
# Syntax 'Nx' will be interpreted as N * number of cpu cores.
gtest_group.AddArgument('-w', '--workers', type=_ParseWorkersOption)
# --isolated-script-test-output is used to upload results to the flakiness
# dashboard. This translation is made because gtest-parallel expects the flag
# to be called --dump_json_test_results instead.
gtest_group.AddArgument('--isolated-script-test-output',
dest='dump_json_test_results')
# Needed when the test wants to store test artifacts, because it doesn't know
# what will be the swarming output dir.
parser.add_argument('--store-test-artifacts', action='store_true')
@ -157,20 +146,8 @@ def ParseArgs(argv=None):
options, unrecognized_args = parser.parse_known_args(argv)
webrtc_flags_to_change = {
'--isolated-script-test-perf-output': '--isolated_script_test_perf_output',
'--isolated-script-test-output': '--isolated_script_test_output',
}
args_to_pass = []
for arg in unrecognized_args:
if any(arg.startswith(k) for k in webrtc_flags_to_change.keys()):
arg_split = arg.split('=')
args_to_pass.append(
webrtc_flags_to_change[arg_split[0]] + '=' + arg_split[1])
else:
args_to_pass.append(arg)
executable_args = options.executable_args + args_to_pass
# Just pass on flags we don't recognize to the test binary.
executable_args = options.executable_args + unrecognized_args
if options.store_test_artifacts:
assert options.output_dir, (

View file

@ -110,12 +110,6 @@ class GtestParallelWrapperTest(unittest.TestCase):
self.assertEqual(result.output_dir, '/tmp/foo')
self.assertEqual(result.test_artifacts_dir, None)
def testJsonTestResults(self):
result = gtest_parallel_wrapper.ParseArgs(
['--isolated-script-test-output', '/tmp/foo', 'exec'])
expected = self._Expected(['--dump_json_test_results=/tmp/foo', 'exec'])
self.assertEqual(result.gtest_parallel_args, expected)
def testShortArg(self):
result = gtest_parallel_wrapper.ParseArgs(['-d', '/tmp/foo', 'exec'])
expected = self._Expected(['--output_dir=/tmp/foo', 'exec'])
@ -139,13 +133,12 @@ class GtestParallelWrapperTest(unittest.TestCase):
result = gtest_parallel_wrapper.ParseArgs([
'some_test', '--some_flag=some_value', '--another_flag',
'--output_dir=' + output_dir, '--store-test-artifacts',
'--isolated-script-test-output=SOME_DIR',
'--isolated-script-test-perf-output=SOME_OTHER_DIR', '--foo=bar',
'--isolated_script_test_perf_output=SOME_OTHER_DIR', '--foo=bar',
'--baz'
])
expected_artifacts_dir = os.path.join(output_dir, 'test_artifacts')
expected = self._Expected([
'--output_dir=' + output_dir, '--dump_json_test_results=SOME_DIR',
'--output_dir=' + output_dir,
'some_test', '--', '--test_artifacts_dir=' + expected_artifacts_dir,
'--some_flag=some_value', '--another_flag',
'--isolated_script_test_perf_output=SOME_OTHER_DIR', '--foo=bar',

View file

@ -855,10 +855,6 @@ class MetaBuildWrapper(object):
'--logcat-output-file', '${ISOLATED_OUTDIR}/logcats',
'--store-tombstones']
else:
if test_type == 'raw':
cmdline.append('../../tools_webrtc/flags_compatibility.py')
extra_files.append('../../tools_webrtc/flags_compatibility.py')
if isolate_map[target].get('use_webcam', False):
cmdline.append('../../tools_webrtc/ensure_webcam_is_running.py')
extra_files.append('../../tools_webrtc/ensure_webcam_is_running.py')

View file

@ -453,11 +453,9 @@ class UnitTest(unittest.TestCase):
self.assertEqual(files, [
'../../.vpython',
'../../testing/test_env.py',
'../../tools_webrtc/flags_compatibility.py',
'base_unittests',
])
self.assertEqual(command, [
'../../tools_webrtc/flags_compatibility.py',
'../../testing/test_env.py',
'./base_unittests',
'--asan=0',