Add copy of src/ios/build/bots/scripts to unbreak iOS Simulator bots.

BUG=webrtc:6934
TBR=ehmaldonado@webrtc.org

Review-Url: https://codereview.webrtc.org/2595173003 .
Cr-Commit-Position: refs/heads/master@{#15756}
This commit is contained in:
Henrik Kjellander 2016-12-22 12:01:17 +01:00
parent 49d2f26859
commit 0779e8fa45
9 changed files with 2068 additions and 0 deletions

View file

@ -437,6 +437,7 @@ def _CommonChecks(input_api, output_api):
black_list=(r'^base[\\\/].*\.py$', black_list=(r'^base[\\\/].*\.py$',
r'^build[\\\/].*\.py$', r'^build[\\\/].*\.py$',
r'^buildtools[\\\/].*\.py$', r'^buildtools[\\\/].*\.py$',
r'^ios[\\\/].*\.py$',
r'^out.*[\\\/].*\.py$', r'^out.*[\\\/].*\.py$',
r'^testing[\\\/].*\.py$', r'^testing[\\\/].*\.py$',
r'^third_party[\\\/].*\.py$', r'^third_party[\\\/].*\.py$',

2
ios/README Normal file
View file

@ -0,0 +1,2 @@
This directory is only temporary, it will be removed
when https://bugs.chromium.org/p/webrtc/issues/detail?id=6934 has made progress.

View file

@ -0,0 +1,2 @@
set noparent
smut@google.com

View file

@ -0,0 +1,195 @@
#!/usr/bin/python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds Xcode installations, optionally switching to a desired version.
Usage:
./find_xcode.py -j /tmp/out.json -v 6.0.1
Finds Xcode 6.0.1 and switches to it. Writes a summary to /tmp/out.json
that includes the Xcode installations that were found, the Xcode version
that was active before running this script, and the Xcode version that
is active after running this script.
e.g. {
"installations": {
"/Applications/Xcode5.app": "5.1.1 (5B1008)",
"/Applications/Xcode6.app": "6.0 (6A313)",
"/Applications/Xcode6.0.1.app": "6.0.1 (6A317)",
"/Applications/Xcode6.1.app": "6.1 (6A1046a)",
},
"matches": {
"/Applications/Xcode6.0.1.app": "6.0.1 (6A317)",
},
"previous version": {
"path": "/Application/Xcode5.app",
"version": "5.1.1",
"build": "(5B1008)",
},
"current version": {
"path": "/Applications/Xcode6.0.1.app",
"version": "6.0.1",
"build": "6A317",
},
"found": true,
}
"""
import argparse
import json
import os
import subprocess
import sys
def get_xcodebuild_path(xcode_app):
"""Returns the path to xcodebuild under the given Xcode app.
Args:
xcode_app: The path to an installed Xcode.app. e.g. /Applications/Xcode.app.
Returns:
The absolute path to the xcodebuild binary under the given Xcode app.
"""
return os.path.join(
xcode_app,
'Contents',
'Developer',
'usr',
'bin',
'xcodebuild',
)
def get_xcode_version(xcodebuild):
"""Returns the Xcode version and build version.
Args:
xcodebuild: The absolute path to the xcodebuild binary.
Returns:
A tuple of (version string, build version string).
e.g. ("6.0.1", "6A317")
"""
# Sample output:
# Xcode 6.0.1
# Build version 6A317
out = subprocess.check_output([xcodebuild, '-version']).splitlines()
return out[0].split(' ')[-1], out[1].split(' ')[-1]
def get_current_xcode_info():
"""Returns the current Xcode path, version, and build number.
Returns:
A dict with 'path', 'version', and 'build' keys.
'path': The absolute path to the Xcode installation.
'version': The Xcode version.
'build': The Xcode build version.
"""
version, build_version = get_xcode_version('xcodebuild')
return {
'path': subprocess.check_output(['xcode-select', '--print-path']).rstrip(),
'version': version,
'build': build_version,
}
def find_xcode(target_version=None):
"""Finds all Xcode versions, switching to the given Xcode version.
Args:
target_version: The version of Xcode to switch to, or None if the
Xcode version should not be switched.
Returns:
A summary dict as described in the usage section above.
"""
xcode_info = {
'installations': {
},
'current version': {
},
}
if target_version:
xcode_info['found'] = False
xcode_info['matches'] = {}
xcode_info['previous version'] = get_current_xcode_info()
if xcode_info['previous version']['version'] == target_version:
xcode_info['found'] = True
for app in os.listdir(os.path.join('/', 'Applications')):
if app.startswith('Xcode'):
installation_path = os.path.join('/', 'Applications', app)
xcodebuild = get_xcodebuild_path(installation_path)
if os.path.exists(xcodebuild):
version, build_version = get_xcode_version(xcodebuild)
xcode_info['installations'][installation_path] = "%s (%s)" % (
version,
build_version,
)
if target_version and version == target_version:
xcode_info['matches'][installation_path] = "%s (%s)" % (
version,
build_version,
)
# If this is the first match, switch to it.
if not xcode_info['found']:
subprocess.check_call([
'sudo',
'xcode-select',
'-switch',
installation_path,
])
xcode_info['found'] = True
xcode_info['current version'] = get_current_xcode_info()
if target_version and not xcode_info['found']:
# Flush buffers to ensure correct output ordering for buildbot.
sys.stdout.flush()
sys.stderr.write('Target Xcode version not found: %s\n' % target_version)
sys.stderr.flush()
return xcode_info
def main(args):
xcode_info = find_xcode(args.version)
if args.json_file:
with open(args.json_file, 'w') as json_file:
json.dump(xcode_info, json_file)
if args.version and not xcode_info['found']:
return 1
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-j',
'--json-file',
help='Location to write a JSON summary.',
metavar='file',
)
parser.add_argument(
'-v',
'--version',
help='Xcode version to find and switch to.',
metavar='ver',
)
sys.exit(main(parser.parse_args()))

View file

@ -0,0 +1,473 @@
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import re
# These labels should match the ones output by gtest's JSON.
TEST_UNKNOWN_LABEL = 'UNKNOWN'
TEST_SUCCESS_LABEL = 'SUCCESS'
TEST_FAILURE_LABEL = 'FAILURE'
TEST_TIMEOUT_LABEL = 'TIMEOUT'
TEST_WARNING_LABEL = 'WARNING'
class GTestResult(object):
"""A result of gtest.
Properties:
command: The command argv.
crashed: Whether or not the test crashed.
crashed_test: The name of the test during which execution crashed, or
None if a particular test didn't crash.
failed_tests: A dict mapping the names of failed tests to a list of
lines of output from those tests.
flaked_tests: A dict mapping the names of failed flaky tests to a list
of lines of output from those tests.
passed_tests: A list of passed tests.
perf_links: A dict mapping the names of perf data points collected
to links to view those graphs.
return_code: The return code of the command.
success: Whether or not this run of the command was considered a
successful GTest execution.
"""
@property
def crashed(self):
return self._crashed
@property
def crashed_test(self):
return self._crashed_test
@property
def command(self):
return self._command
@property
def failed_tests(self):
if self.__finalized:
return copy.deepcopy(self._failed_tests)
return self._failed_tests
@property
def flaked_tests(self):
if self.__finalized:
return copy.deepcopy(self._flaked_tests)
return self._flaked_tests
@property
def passed_tests(self):
if self.__finalized:
return copy.deepcopy(self._passed_tests)
return self._passed_tests
@property
def perf_links(self):
if self.__finalized:
return copy.deepcopy(self._perf_links)
return self._perf_links
@property
def return_code(self):
return self._return_code
@property
def success(self):
return self._success
def __init__(self, command):
if not isinstance(command, collections.Iterable):
raise ValueError('Expected an iterable of command arguments.', command)
if not command:
raise ValueError('Expected a non-empty command.', command)
self._command = tuple(command)
self._crashed = False
self._crashed_test = None
self._failed_tests = collections.OrderedDict()
self._flaked_tests = collections.OrderedDict()
self._passed_tests = []
self._perf_links = collections.OrderedDict()
self._return_code = None
self._success = None
self.__finalized = False
def finalize(self, return_code, success):
self._return_code = return_code
self._success = success
# If the test was not considered to be a GTest success, but had no
# failing tests, conclude that it must have crashed.
if not self._success and not self._failed_tests and not self._flaked_tests:
self._crashed = True
# At most one test can crash the entire app in a given parsing.
for test, log_lines in self._failed_tests.iteritems():
# A test with no output would have crashed. No output is replaced
# by the GTestLogParser by a sentence indicating non-completion.
if 'Did not complete.' in log_lines:
self._crashed = True
self._crashed_test = test
# A test marked as flaky may also have crashed the app.
for test, log_lines in self._flaked_tests.iteritems():
if 'Did not complete.' in log_lines:
self._crashed = True
self._crashed_test = test
self.__finalized = True
class GTestLogParser(object):
"""This helper class process GTest test output."""
def __init__(self):
# State tracking for log parsing
self.completed = False
self._current_test = ''
self._failure_description = []
self._parsing_failures = False
# Line number currently being processed.
self._line_number = 0
# List of parsing errors, as human-readable strings.
self._internal_error_lines = []
# Tests are stored here as 'test.name': (status, [description]).
# The status should be one of ('started', 'OK', 'failed', 'timeout',
# 'warning'). Warning indicates that a test did not pass when run in
# parallel with other tests but passed when run alone. The description is
# a list of lines detailing the test's error, as reported in the log.
self._test_status = {}
# This may be either text or a number. It will be used in the phrase
# '%s disabled' or '%s flaky' on the waterfall display.
self._disabled_tests = 0
self._flaky_tests = 0
# Regular expressions for parsing GTest logs. Test names look like
# "x.y", with 0 or more "w/" prefixes and 0 or more "/z" suffixes.
# e.g.:
# SomeName/SomeTestCase.SomeTest/1
# SomeName/SomeTestCase/1.SomeTest
# SomeName/SomeTestCase/1.SomeTest/SomeModifider
test_name_regexp = r'((\w+/)*\w+\.\w+(/\w+)*)'
self._master_name_re = re.compile(r'\[Running for master: "([^"]*)"')
self.master_name = ''
self._test_name = re.compile(test_name_regexp)
self._test_start = re.compile(r'\[\s+RUN\s+\] ' + test_name_regexp)
self._test_ok = re.compile(r'\[\s+OK\s+\] ' + test_name_regexp)
self._test_fail = re.compile(r'\[\s+FAILED\s+\] ' + test_name_regexp)
self._test_passed = re.compile(r'\[\s+PASSED\s+\] \d+ tests?.')
self._run_test_cases_line = re.compile(
r'\[\s*\d+\/\d+\]\s+[0-9\.]+s ' + test_name_regexp + ' .+')
self._test_timeout = re.compile(
r'Test timeout \([0-9]+ ms\) exceeded for ' + test_name_regexp)
self._disabled = re.compile(r'\s*YOU HAVE (\d+) DISABLED TEST')
self._flaky = re.compile(r'\s*YOU HAVE (\d+) FLAKY TEST')
self._retry_message = re.compile('RETRYING FAILED TESTS:')
self.retrying_failed = False
self.TEST_STATUS_MAP = {
'OK': TEST_SUCCESS_LABEL,
'failed': TEST_FAILURE_LABEL,
'timeout': TEST_TIMEOUT_LABEL,
'warning': TEST_WARNING_LABEL
}
def GetCurrentTest(self):
return self._current_test
def _StatusOfTest(self, test):
"""Returns the status code for the given test, or 'not known'."""
test_status = self._test_status.get(test, ('not known', []))
return test_status[0]
def _TestsByStatus(self, status, include_fails, include_flaky):
"""Returns list of tests with the given status.
Args:
include_fails: If False, tests containing 'FAILS_' anywhere in their
names will be excluded from the list.
include_flaky: If False, tests containing 'FLAKY_' anywhere in their
names will be excluded from the list.
"""
test_list = [x[0] for x in self._test_status.items()
if self._StatusOfTest(x[0]) == status]
if not include_fails:
test_list = [x for x in test_list if x.find('FAILS_') == -1]
if not include_flaky:
test_list = [x for x in test_list if x.find('FLAKY_') == -1]
return test_list
def _RecordError(self, line, reason):
"""Record a log line that produced a parsing error.
Args:
line: text of the line at which the error occurred
reason: a string describing the error
"""
self._internal_error_lines.append('%s: %s [%s]' %
(self._line_number, line.strip(), reason))
def RunningTests(self):
"""Returns list of tests that appear to be currently running."""
return self._TestsByStatus('started', True, True)
def ParsingErrors(self):
"""Returns a list of lines that have caused parsing errors."""
return self._internal_error_lines
def ClearParsingErrors(self):
"""Clears the currently stored parsing errors."""
self._internal_error_lines = ['Cleared.']
def PassedTests(self, include_fails=False, include_flaky=False):
"""Returns list of tests that passed."""
return self._TestsByStatus('OK', include_fails, include_flaky)
def FailedTests(self, include_fails=False, include_flaky=False):
"""Returns list of tests that failed, timed out, or didn't finish
(crashed).
This list will be incorrect until the complete log has been processed,
because it will show currently running tests as having failed.
Args:
include_fails: If true, all failing tests with FAILS_ in their names will
be included. Otherwise, they will only be included if they crashed or
timed out.
include_flaky: If true, all failing tests with FLAKY_ in their names will
be included. Otherwise, they will only be included if they crashed or
timed out.
"""
return (self._TestsByStatus('failed', include_fails, include_flaky) +
self._TestsByStatus('timeout', True, True) +
self._TestsByStatus('warning', include_fails, include_flaky) +
self.RunningTests())
def TriesForTest(self, test):
"""Returns a list containing the state for all tries of the given test.
This parser doesn't support retries so a single result is returned."""
return [self.TEST_STATUS_MAP.get(self._StatusOfTest(test),
TEST_UNKNOWN_LABEL)]
def DisabledTests(self):
"""Returns the name of the disabled test (if there is only 1) or the number
of disabled tests.
"""
return self._disabled_tests
def FlakyTests(self):
"""Returns the name of the flaky test (if there is only 1) or the number
of flaky tests.
"""
return self._flaky_tests
def FailureDescription(self, test):
"""Returns a list containing the failure description for the given test.
If the test didn't fail or timeout, returns [].
"""
test_status = self._test_status.get(test, ('', []))
return ['%s: ' % test] + test_status[1]
def CompletedWithoutFailure(self):
"""Returns True if all tests completed and no tests failed unexpectedly."""
return self.completed and not self.FailedTests()
def ProcessLine(self, line):
"""This is called once with each line of the test log."""
# Track line number for error messages.
self._line_number += 1
# Some tests (net_unittests in particular) run subprocesses which can write
# stuff to shared stdout buffer. Sometimes such output appears between new
# line and gtest directives ('[ RUN ]', etc) which breaks the parser.
# Code below tries to detect such cases and recognize a mixed line as two
# separate lines.
# List of regexps that parses expects to find at the start of a line but
# which can be somewhere in the middle.
gtest_regexps = [
self._test_start,
self._test_ok,
self._test_fail,
self._test_passed,
]
for regexp in gtest_regexps:
match = regexp.search(line)
if match:
break
if not match or match.start() == 0:
self._ProcessLine(line)
else:
self._ProcessLine(line[:match.start()])
self._ProcessLine(line[match.start():])
def _ProcessLine(self, line):
"""Parses the line and changes the state of parsed tests accordingly.
Will recognize newly started tests, OK or FAILED statuses, timeouts, etc.
"""
# Note: When sharding, the number of disabled and flaky tests will be read
# multiple times, so this will only show the most recent values (but they
# should all be the same anyway).
# Is it a line listing the master name?
if not self.master_name:
results = self._master_name_re.match(line)
if results:
self.master_name = results.group(1)
results = self._run_test_cases_line.match(line)
if results:
# A run_test_cases.py output.
if self._current_test:
if self._test_status[self._current_test][0] == 'started':
self._test_status[self._current_test] = (
'timeout', self._failure_description)
self._current_test = ''
self._failure_description = []
return
# Is it a line declaring all tests passed?
results = self._test_passed.match(line)
if results:
self.completed = True
self._current_test = ''
return
# Is it a line reporting disabled tests?
results = self._disabled.match(line)
if results:
try:
disabled = int(results.group(1))
except ValueError:
disabled = 0
if disabled > 0 and isinstance(self._disabled_tests, int):
self._disabled_tests = disabled
else:
# If we can't parse the line, at least give a heads-up. This is a
# safety net for a case that shouldn't happen but isn't a fatal error.
self._disabled_tests = 'some'
return
# Is it a line reporting flaky tests?
results = self._flaky.match(line)
if results:
try:
flaky = int(results.group(1))
except ValueError:
flaky = 0
if flaky > 0 and isinstance(self._flaky_tests, int):
self._flaky_tests = flaky
else:
# If we can't parse the line, at least give a heads-up. This is a
# safety net for a case that shouldn't happen but isn't a fatal error.
self._flaky_tests = 'some'
return
# Is it the start of a test?
results = self._test_start.match(line)
if results:
if self._current_test:
if self._test_status[self._current_test][0] == 'started':
self._test_status[self._current_test] = (
'timeout', self._failure_description)
test_name = results.group(1)
self._test_status[test_name] = ('started', ['Did not complete.'])
self._current_test = test_name
if self.retrying_failed:
self._failure_description = self._test_status[test_name][1]
self._failure_description.extend(['', 'RETRY OUTPUT:', ''])
else:
self._failure_description = []
return
# Is it a test success line?
results = self._test_ok.match(line)
if results:
test_name = results.group(1)
status = self._StatusOfTest(test_name)
if status != 'started':
self._RecordError(line, 'success while in status %s' % status)
if self.retrying_failed:
self._test_status[test_name] = ('warning', self._failure_description)
else:
self._test_status[test_name] = ('OK', [])
self._failure_description = []
self._current_test = ''
return
# Is it a test failure line?
results = self._test_fail.match(line)
if results:
test_name = results.group(1)
status = self._StatusOfTest(test_name)
if status not in ('started', 'failed', 'timeout'):
self._RecordError(line, 'failure while in status %s' % status)
# Don't overwrite the failure description when a failing test is listed a
# second time in the summary, or if it was already recorded as timing
# out.
if status not in ('failed', 'timeout'):
self._test_status[test_name] = ('failed', self._failure_description)
self._failure_description = []
self._current_test = ''
return
# Is it a test timeout line?
results = self._test_timeout.search(line)
if results:
test_name = results.group(1)
status = self._StatusOfTest(test_name)
if status not in ('started', 'failed'):
self._RecordError(line, 'timeout while in status %s' % status)
self._test_status[test_name] = (
'timeout', self._failure_description + ['Killed (timed out).'])
self._failure_description = []
self._current_test = ''
return
# Is it the start of the retry tests?
results = self._retry_message.match(line)
if results:
self.retrying_failed = True
return
# Random line: if we're in a test, collect it for the failure description.
# Tests may run simultaneously, so this might be off, but it's worth a try.
# This also won't work if a test times out before it begins running.
if self._current_test:
self._failure_description.append(line)
# Parse the "Failing tests:" list at the end of the output, and add any
# additional failed tests to the list. For example, this includes tests
# that crash after the OK line.
if self._parsing_failures:
results = self._test_name.match(line)
if results:
test_name = results.group(1)
status = self._StatusOfTest(test_name)
if status in ('not known', 'OK'):
self._test_status[test_name] = (
'failed', ['Unknown error, see stdio log.'])
else:
self._parsing_failures = False
elif line.startswith('Failing tests:'):
self._parsing_failures = True

152
ios/build/bots/scripts/run.py Executable file
View file

@ -0,0 +1,152 @@
#!/usr/bin/python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run a test.
Sample usage:
./run.py \
-a src/xcodebuild/Release-iphoneos/base_unittests.app \
-o /tmp/out \
-p iPhone 5s \
-v 9.3
Installs base_unittests.app in an iPhone 5s simulator running iOS 9.3,
runs it, and captures all test data in /tmp/out.
"""
import argparse
import json
import os
import sys
import traceback
import test_runner
def main(args, test_args):
summary = {}
tr = None
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
try:
if args.iossim and args.platform and args.version:
tr = test_runner.SimulatorTestRunner(
args.app,
args.iossim,
args.platform,
args.version,
args.xcode_version,
args.out_dir,
env_vars=args.env_var,
test_args=test_args,
xctest=args.xctest,
)
else:
tr = test_runner.DeviceTestRunner(
args.app,
args.xcode_version,
args.out_dir,
env_vars=args.env_var,
test_args=test_args,
xctest=args.xctest,
)
return 0 if tr.launch() else 1
except test_runner.TestRunnerError as e:
sys.stderr.write(traceback.format_exc())
summary['step_text'] = '%s%s' % (
e.__class__.__name__, ': %s' % e.args[0] if e.args else '')
# test_runner.Launch returns 0 on success, 1 on failure, so return 2
# on exception to distinguish between a test failure, and a failure
# to launch the test at all.
return 2
finally:
if tr:
summary['logs'] = tr.logs
with open(os.path.join(args.out_dir, 'summary.json'), 'w') as f:
json.dump(summary, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-a',
'--app',
help='Compiled .app to run.',
metavar='app',
required=True,
)
parser.add_argument(
'-e',
'--env-var',
action='append',
help='Environment variable to pass to the test itself.',
metavar='ENV=val',
)
parser.add_argument(
'-i',
'--iossim',
help='Compiled iossim to run the app on.',
metavar='iossim',
)
parser.add_argument(
'-j',
'--args-json',
default='{}',
help='Specify "env_var": [...] and "test_args": [...] using a JSON dict.',
metavar='{}',
)
parser.add_argument(
'-o',
'--out-dir',
help='Directory to store all test data in.',
metavar='dir',
required=True,
)
parser.add_argument(
'-p',
'--platform',
help='Platform to simulate.',
metavar='sim',
)
parser.add_argument(
'-v',
'--version',
help='Version of iOS the simulator should run.',
metavar='ver',
)
parser.add_argument(
'-x',
'--xcode-version',
help='Version of Xcode to use.',
metavar='ver',
required=True,
)
parser.add_argument(
'--xctest',
action='store_true',
help='Whether or not the given app should be run as an XCTest.',
)
args, test_args = parser.parse_known_args()
if args.iossim or args.platform or args.version:
# If any of --iossim, --platform, or --version
# are specified then they must all be specified.
if not (args.iossim and args.platform and args.version):
parser.error(
'must specify all or none of -i/--iossim, -p/--platform, -v/--version')
args_json = json.loads(args.args_json)
args.env_var = args.env_var or []
args.env_var.extend(args_json.get('env_var', []))
args.xctest = args_json.get('xctest', args.xctest)
test_args.extend(args_json.get('test_args', []))
sys.exit(main(args, test_args))

View file

@ -0,0 +1,688 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test runners for iOS."""
import argparse
import collections
import errno
import os
import shutil
import subprocess
import sys
import tempfile
import time
import find_xcode
import gtest_utils
import xctest_utils
XCTEST_PROJECT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'TestProject',
'TestProject.xcodeproj',
))
XCTEST_SCHEME = 'TestProject'
class Error(Exception):
"""Base class for errors."""
pass
class TestRunnerError(Error):
"""Base class for TestRunner-related errors."""
pass
class AppLaunchError(TestRunnerError):
"""The app failed to launch."""
pass
class AppNotFoundError(TestRunnerError):
"""The requested app was not found."""
def __init__(self, app_path):
super(AppNotFoundError, self).__init__(
'App does not exist: %s' % app_path)
class DeviceDetectionError(TestRunnerError):
"""Unexpected number of devices detected."""
def __init__(self, udids):
super(DeviceDetectionError, self).__init__(
'Expected one device, found %s:\n%s' % (len(udids), '\n'.join(udids)))
class PlugInsNotFoundError(TestRunnerError):
"""The PlugIns directory was not found."""
def __init__(self, plugins_dir):
super(PlugInsNotFoundError, self).__init__(
'PlugIns directory does not exist: %s' % plugins_dir)
class SimulatorNotFoundError(TestRunnerError):
"""The given simulator binary was not found."""
def __init__(self, iossim_path):
super(SimulatorNotFoundError, self).__init__(
'Simulator does not exist: %s' % iossim_path)
class XcodeVersionNotFoundError(TestRunnerError):
"""The requested version of Xcode was not found."""
def __init__(self, xcode_version):
super(XcodeVersionNotFoundError, self).__init__(
'Xcode version not found: %s', xcode_version)
class XCTestPlugInNotFoundError(TestRunnerError):
"""The .xctest PlugIn was not found."""
def __init__(self, xctest_path):
super(XCTestPlugInNotFoundError, self).__init__(
'XCTest not found: %s', xctest_path)
def get_kif_test_filter(tests, invert=False):
"""Returns the KIF test filter to filter the given test cases.
Args:
tests: List of test cases to filter.
invert: Whether to invert the filter or not. Inverted, the filter will match
everything except the given test cases.
Returns:
A string which can be supplied to GKIF_SCENARIO_FILTER.
"""
# A pipe-separated list of test cases with the "KIF." prefix omitted.
# e.g. NAME:a|b|c matches KIF.a, KIF.b, KIF.c.
# e.g. -NAME:a|b|c matches everything except KIF.a, KIF.b, KIF.c.
test_filter = '|'.join(test.split('KIF.', 1)[-1] for test in tests)
if invert:
return '-NAME:%s' % test_filter
return 'NAME:%s' % test_filter
def get_gtest_filter(tests, invert=False):
"""Returns the GTest filter to filter the given test cases.
Args:
tests: List of test cases to filter.
invert: Whether to invert the filter or not. Inverted, the filter will match
everything except the given test cases.
Returns:
A string which can be supplied to --gtest_filter.
"""
# A colon-separated list of tests cases.
# e.g. a:b:c matches a, b, c.
# e.g. -a:b:c matches everything except a, b, c.
test_filter = ':'.join(test for test in tests)
if invert:
return '-%s' % test_filter
return test_filter
class TestRunner(object):
"""Base class containing common functionality."""
def __init__(
self,
app_path,
xcode_version,
out_dir,
env_vars=None,
test_args=None,
xctest=False,
):
"""Initializes a new instance of this class.
Args:
app_path: Path to the compiled .app to run.
xcode_version: Version of Xcode to use when running the test.
out_dir: Directory to emit test data into.
env_vars: List of environment variables to pass to the test itself.
test_args: List of strings to pass as arguments to the test when
launching.
xctest: Whether or not this is an XCTest.
Raises:
AppNotFoundError: If the given app does not exist.
PlugInsNotFoundError: If the PlugIns directory does not exist for XCTests.
XcodeVersionNotFoundError: If the given Xcode version does not exist.
XCTestPlugInNotFoundError: If the .xctest PlugIn does not exist.
"""
app_path = os.path.abspath(app_path)
if not os.path.exists(app_path):
raise AppNotFoundError(app_path)
if not find_xcode.find_xcode(xcode_version)['found']:
raise XcodeVersionNotFoundError(xcode_version)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
self.app_name = os.path.splitext(os.path.split(app_path)[-1])[0]
self.app_path = app_path
self.cfbundleid = subprocess.check_output([
'/usr/libexec/PlistBuddy',
'-c', 'Print:CFBundleIdentifier',
os.path.join(app_path, 'Info.plist'),
]).rstrip()
self.env_vars = env_vars or []
self.logs = collections.OrderedDict()
self.out_dir = out_dir
self.test_args = test_args or []
self.xcode_version = xcode_version
self.xctest_path = ''
if xctest:
plugins_dir = os.path.join(self.app_path, 'PlugIns')
if not os.path.exists(plugins_dir):
raise PlugInsNotFoundError(plugins_dir)
for plugin in os.listdir(plugins_dir):
if plugin.endswith('.xctest'):
self.xctest_path = os.path.join(plugins_dir, plugin)
if not os.path.exists(self.xctest_path):
raise XCTestPlugInNotFoundError(self.xctest_path)
def get_launch_command(self, test_filter=None, invert=False):
"""Returns the command that can be used to launch the test app.
Args:
test_filter: List of test cases to filter.
invert: Whether to invert the filter or not. Inverted, the filter will
match everything except the given test cases.
Returns:
A list of strings forming the command to launch the test.
"""
raise NotImplementedError
def get_launch_env(self):
"""Returns a dict of environment variables to use to launch the test app.
Returns:
A dict of environment variables.
"""
return os.environ.copy()
def set_up(self):
"""Performs setup actions which must occur prior to every test launch."""
raise NotImplementedError
def tear_down(self):
"""Performs cleanup actions which must occur after every test launch."""
raise NotImplementedError
def screenshot_desktop(self):
"""Saves a screenshot of the desktop in the output directory."""
subprocess.check_call([
'screencapture',
os.path.join(self.out_dir, 'desktop_%s.png' % time.time()),
])
def _run(self, cmd):
"""Runs the specified command, parsing GTest output.
Args:
cmd: List of strings forming the command to run.
Returns:
GTestResult instance.
"""
print ' '.join(cmd)
print
result = gtest_utils.GTestResult(cmd)
if self.xctest_path:
parser = xctest_utils.XCTestLogParser()
else:
parser = gtest_utils.GTestLogParser()
proc = subprocess.Popen(
cmd,
env=self.get_launch_env(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
while True:
line = proc.stdout.readline()
if not line:
break
line = line.rstrip()
parser.ProcessLine(line)
print line
sys.stdout.flush()
proc.wait()
sys.stdout.flush()
for test in parser.FailedTests(include_flaky=True):
# Test cases are named as <test group>.<test case>. If the test case
# is prefixed with "FLAKY_", it should be reported as flaked not failed.
if '.' in test and test.split('.', 1)[1].startswith('FLAKY_'):
result.flaked_tests[test] = parser.FailureDescription(test)
else:
result.failed_tests[test] = parser.FailureDescription(test)
result.passed_tests.extend(parser.PassedTests(include_flaky=True))
print '%s returned %s' % (cmd[0], proc.returncode)
print
# iossim can return 5 if it exits noncleanly even if all tests passed.
# Therefore we cannot rely on process exit code to determine success.
result.finalize(proc.returncode, parser.CompletedWithoutFailure())
return result
def launch(self):
"""Launches the test app."""
self.set_up()
cmd = self.get_launch_command()
try:
result = self._run(cmd)
if result.crashed and not result.crashed_test:
# If the app crashed but not during any particular test case, assume
# it crashed on startup. Try one more time.
print 'Crashed on startup, retrying...'
print
result = self._run(cmd)
if result.crashed and not result.crashed_test:
raise AppLaunchError
passed = result.passed_tests
failed = result.failed_tests
flaked = result.flaked_tests
try:
# XCTests cannot currently be resumed at the next test case.
while not self.xctest_path and result.crashed and result.crashed_test:
# If the app crashes during a specific test case, then resume at the
# next test case. This is achieved by filtering out every test case
# which has already run.
print 'Crashed during %s, resuming...' % result.crashed_test
print
result = self._run(self.get_launch_command(
test_filter=passed + failed.keys() + flaked.keys(), invert=True,
))
passed.extend(result.passed_tests)
failed.update(result.failed_tests)
flaked.update(result.flaked_tests)
except OSError as e:
if e.errno == errno.E2BIG:
print 'Too many test cases to resume.'
print
else:
raise
self.logs['passed tests'] = passed
for test, log_lines in failed.iteritems():
self.logs[test] = log_lines
for test, log_lines in flaked.iteritems():
self.logs[test] = log_lines
return not failed
finally:
self.tear_down()
class SimulatorTestRunner(TestRunner):
"""Class for running tests on iossim."""
def __init__(
self,
app_path,
iossim_path,
platform,
version,
xcode_version,
out_dir,
env_vars=None,
test_args=None,
xctest=False,
):
"""Initializes a new instance of this class.
Args:
app_path: Path to the compiled .app or .ipa to run.
iossim_path: Path to the compiled iossim binary to use.
platform: Name of the platform to simulate. Supported values can be found
by running "iossim -l". e.g. "iPhone 5s", "iPad Retina".
version: Version of iOS the platform should be running. Supported values
can be found by running "iossim -l". e.g. "9.3", "8.2", "7.1".
xcode_version: Version of Xcode to use when running the test.
out_dir: Directory to emit test data into.
env_vars: List of environment variables to pass to the test itself.
test_args: List of strings to pass as arguments to the test when
launching.
xctest: Whether or not this is an XCTest.
Raises:
AppNotFoundError: If the given app does not exist.
PlugInsNotFoundError: If the PlugIns directory does not exist for XCTests.
XcodeVersionNotFoundError: If the given Xcode version does not exist.
XCTestPlugInNotFoundError: If the .xctest PlugIn does not exist.
"""
super(SimulatorTestRunner, self).__init__(
app_path,
xcode_version,
out_dir,
env_vars=env_vars,
test_args=test_args,
xctest=xctest,
)
iossim_path = os.path.abspath(iossim_path)
if not os.path.exists(iossim_path):
raise SimulatorNotFoundError(iossim_path)
self.homedir = ''
self.iossim_path = iossim_path
self.platform = platform
self.start_time = None
self.version = version
@staticmethod
def kill_simulators():
"""Kills all running simulators."""
try:
subprocess.check_call([
'pkill',
'-9',
'-x',
# The simulator's name varies by Xcode version.
'iPhone Simulator', # Xcode 5
'iOS Simulator', # Xcode 6
'Simulator', # Xcode 7+
'simctl', # https://crbug.com/637429
])
# If a signal was sent, wait for the simulators to actually be killed.
time.sleep(5)
except subprocess.CalledProcessError as e:
if e.returncode != 1:
# Ignore a 1 exit code (which means there were no simulators to kill).
raise
def wipe_simulator(self):
"""Wipes the simulator."""
subprocess.check_call([
self.iossim_path,
'-d', self.platform,
'-s', self.version,
'-w',
])
def get_home_directory(self):
"""Returns the simulator's home directory."""
return subprocess.check_output([
self.iossim_path,
'-d', self.platform,
'-p',
'-s', self.version,
]).rstrip()
def set_up(self):
"""Performs setup actions which must occur prior to every test launch."""
self.kill_simulators()
self.wipe_simulator()
self.homedir = self.get_home_directory()
# Crash reports have a timestamp in their file name, formatted as
# YYYY-MM-DD-HHMMSS. Save the current time in the same format so
# we can compare and fetch crash reports from this run later on.
self.start_time = time.strftime('%Y-%m-%d-%H%M%S', time.localtime())
def extract_test_data(self):
"""Extracts data emitted by the test."""
# Find the Documents directory of the test app. The app directory names
# don't correspond with any known information, so we have to examine them
# all until we find one with a matching CFBundleIdentifier.
apps_dir = os.path.join(
self.homedir, 'Containers', 'Data', 'Application')
if os.path.exists(apps_dir):
for appid_dir in os.listdir(apps_dir):
docs_dir = os.path.join(apps_dir, appid_dir, 'Documents')
metadata_plist = os.path.join(
apps_dir,
appid_dir,
'.com.apple.mobile_container_manager.metadata.plist',
)
if os.path.exists(docs_dir) and os.path.exists(metadata_plist):
cfbundleid = subprocess.check_output([
'/usr/libexec/PlistBuddy',
'-c', 'Print:MCMMetadataIdentifier',
metadata_plist,
]).rstrip()
if cfbundleid == self.cfbundleid:
shutil.copytree(docs_dir, os.path.join(self.out_dir, 'Documents'))
return
def retrieve_crash_reports(self):
"""Retrieves crash reports produced by the test."""
# A crash report's naming scheme is [app]_[timestamp]_[hostname].crash.
# e.g. net_unittests_2014-05-13-15-0900_vm1-a1.crash.
crash_reports_dir = os.path.expanduser(os.path.join(
'~', 'Library', 'Logs', 'DiagnosticReports'))
if not os.path.exists(crash_reports_dir):
return
for crash_report in os.listdir(crash_reports_dir):
report_name, ext = os.path.splitext(crash_report)
if report_name.startswith(self.app_name) and ext == '.crash':
report_time = report_name[len(self.app_name) + 1:].split('_')[0]
# The timestamp format in a crash report is big-endian and therefore
# a staight string comparison works.
if report_time > self.start_time:
with open(os.path.join(crash_reports_dir, crash_report)) as f:
self.logs['crash report (%s)' % report_time] = (
f.read().splitlines())
def tear_down(self):
"""Performs cleanup actions which must occur after every test launch."""
self.extract_test_data()
self.retrieve_crash_reports()
self.screenshot_desktop()
self.kill_simulators()
self.wipe_simulator()
if os.path.exists(self.homedir):
shutil.rmtree(self.homedir, ignore_errors=True)
self.homedir = ''
def get_launch_command(self, test_filter=None, invert=False):
"""Returns the command that can be used to launch the test app.
Args:
test_filter: List of test cases to filter.
invert: Whether to invert the filter or not. Inverted, the filter will
match everything except the given test cases.
Returns:
A list of strings forming the command to launch the test.
"""
cmd = [
self.iossim_path,
'-d', self.platform,
'-s', self.version,
]
if test_filter:
kif_filter = get_kif_test_filter(test_filter, invert=invert)
gtest_filter = get_gtest_filter(test_filter, invert=invert)
cmd.extend(['-e', 'GKIF_SCENARIO_FILTER=%s' % kif_filter])
cmd.extend(['-c', '--gtest_filter=%s' % gtest_filter])
for env_var in self.env_vars:
cmd.extend(['-e', env_var])
for test_arg in self.test_args:
cmd.extend(['-c', test_arg])
cmd.append(self.app_path)
if self.xctest_path:
cmd.append(self.xctest_path)
return cmd
def get_launch_env(self):
"""Returns a dict of environment variables to use to launch the test app.
Returns:
A dict of environment variables.
"""
env = super(SimulatorTestRunner, self).get_launch_env()
if self.xctest_path:
env['NSUnbufferedIO'] = 'YES'
return env
class DeviceTestRunner(TestRunner):
"""Class for running tests on devices."""
def __init__(
self,
app_path,
xcode_version,
out_dir,
env_vars=None,
test_args=None,
xctest=False,
):
"""Initializes a new instance of this class.
Args:
app_path: Path to the compiled .app to run.
xcode_version: Version of Xcode to use when running the test.
out_dir: Directory to emit test data into.
env_vars: List of environment variables to pass to the test itself.
test_args: List of strings to pass as arguments to the test when
launching.
xctest: Whether or not this is an XCTest.
Raises:
AppNotFoundError: If the given app does not exist.
PlugInsNotFoundError: If the PlugIns directory does not exist for XCTests.
XcodeVersionNotFoundError: If the given Xcode version does not exist.
XCTestPlugInNotFoundError: If the .xctest PlugIn does not exist.
"""
super(DeviceTestRunner, self).__init__(
app_path,
xcode_version,
out_dir,
env_vars=env_vars,
test_args=test_args,
xctest=xctest,
)
self.udid = subprocess.check_output(['idevice_id', '--list']).rstrip()
if len(self.udid.splitlines()) != 1:
raise DeviceDetectionError(self.udid)
def uninstall_apps(self):
"""Uninstalls all apps found on the device."""
for app in subprocess.check_output(
['idevicefs', '--udid', self.udid, 'ls', '@']).splitlines():
subprocess.check_call(
['ideviceinstaller', '--udid', self.udid, '--uninstall', app])
def install_app(self):
"""Installs the app."""
subprocess.check_call(
['ideviceinstaller', '--udid', self.udid, '--install', self.app_path])
def set_up(self):
"""Performs setup actions which must occur prior to every test launch."""
self.uninstall_apps()
self.install_app()
def extract_test_data(self):
"""Extracts data emitted by the test."""
subprocess.check_call([
'idevicefs',
'--udid', self.udid,
'pull',
'@%s/Documents' % self.cfbundleid,
os.path.join(self.out_dir, 'Documents'),
])
def retrieve_crash_reports(self):
"""Retrieves crash reports produced by the test."""
logs_dir = os.path.join(self.out_dir, 'Logs')
os.mkdir(logs_dir)
subprocess.check_call([
'idevicecrashreport',
'--extract',
'--udid', self.udid,
logs_dir,
])
def tear_down(self):
"""Performs cleanup actions which must occur after every test launch."""
self.extract_test_data()
self.retrieve_crash_reports()
self.screenshot_desktop()
self.uninstall_apps()
def get_launch_command(self, test_filter=None, invert=False):
"""Returns the command that can be used to launch the test app.
Args:
test_filter: List of test cases to filter.
invert: Whether to invert the filter or not. Inverted, the filter will
match everything except the given test cases.
Returns:
A list of strings forming the command to launch the test.
"""
if self.xctest_path:
return [
'xcodebuild',
'test-without-building',
'BUILT_PRODUCTS_DIR=%s' % os.path.dirname(self.app_path),
'-destination', 'id=%s' % self.udid,
'-project', XCTEST_PROJECT,
'-scheme', XCTEST_SCHEME,
]
cmd = [
'idevice-app-runner',
'--udid', self.udid,
'--start', self.cfbundleid,
]
args = []
if test_filter:
kif_filter = get_kif_test_filter(test_filter, invert=invert)
gtest_filter = get_gtest_filter(test_filter, invert=invert)
cmd.extend(['-D', 'GKIF_SCENARIO_FILTER=%s' % kif_filter])
args.append('--gtest-filter=%s' % gtest_filter)
for env_var in self.env_vars:
cmd.extend(['-D', env_var])
if args or self.test_args:
cmd.append('--args')
cmd.extend(self.test_args)
cmd.extend(args)
return cmd
def get_launch_env(self):
"""Returns a dict of environment variables to use to launch the test app.
Returns:
A dict of environment variables.
"""
env = super(DeviceTestRunner, self).get_launch_env()
if self.xctest_path:
env['NSUnbufferedIO'] = 'YES'
# e.g. ios_web_shell_egtests
env['APP_TARGET_NAME'] = os.path.splitext(
os.path.basename(self.app_path))[0]
# e.g. ios_web_shell_egtests_module
env['TEST_TARGET_NAME'] = env['APP_TARGET_NAME'] + '_module'
return env

View file

@ -0,0 +1,289 @@
#!/usr/bin/python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for test_runner.py."""
import collections
import json
import os
import sys
import unittest
import test_runner
class TestCase(unittest.TestCase):
"""Test case which supports installing mocks. Uninstalls on tear down."""
def __init__(self, *args, **kwargs):
"""Initializes a new instance of this class."""
super(TestCase, self).__init__(*args, **kwargs)
# Maps object to a dict which maps names of mocked members to their
# original values.
self._mocks = collections.OrderedDict()
def mock(self, obj, member, mock):
"""Installs mock in place of the named member of the given obj.
Args:
obj: Any object.
member: String naming the attribute of the object to mock.
mock: The mock to install.
"""
self._mocks.setdefault(obj, collections.OrderedDict()).setdefault(
member, getattr(obj, member))
setattr(obj, member, mock)
def tearDown(self, *args, **kwargs):
"""Uninstalls mocks."""
super(TestCase, self).tearDown(*args, **kwargs)
for obj in self._mocks:
for member, original_value in self._mocks[obj].iteritems():
setattr(obj, member, original_value)
class GetKIFTestFilterTest(TestCase):
"""Tests for test_runner.get_kif_test_filter."""
def test_correct(self):
"""Ensures correctness of filter."""
tests = [
'KIF.test1',
'KIF.test2',
]
expected = 'NAME:test1|test2'
self.assertEqual(test_runner.get_kif_test_filter(tests), expected)
def test_correct_inverted(self):
"""Ensures correctness of inverted filter."""
tests = [
'KIF.test1',
'KIF.test2',
]
expected = '-NAME:test1|test2'
self.assertEqual(
test_runner.get_kif_test_filter(tests, invert=True), expected)
class GetGTestFilterTest(TestCase):
"""Tests for test_runner.get_gtest_filter."""
def test_correct(self):
"""Ensures correctness of filter."""
tests = [
'test.1',
'test.2',
]
expected = 'test.1:test.2'
self.assertEqual(test_runner.get_gtest_filter(tests), expected)
def test_correct_inverted(self):
"""Ensures correctness of inverted filter."""
tests = [
'test.1',
'test.2',
]
expected = '-test.1:test.2'
self.assertEqual(
test_runner.get_gtest_filter(tests, invert=True), expected)
class SimulatorTestRunnerTest(TestCase):
"""Tests for test_runner.SimulatorTestRunner."""
def test_app_not_found(self):
"""Ensures AppNotFoundError is raised."""
def exists(path):
if path == 'fake-app':
return False
return True
def find_xcode(version):
return {'found': True}
def check_output(command):
return 'fake-bundle-id'
self.mock(test_runner.os.path, 'exists', exists)
self.mock(test_runner.find_xcode, 'find_xcode', find_xcode)
self.mock(test_runner.subprocess, 'check_output', check_output)
self.assertRaises(
test_runner.AppNotFoundError,
test_runner.SimulatorTestRunner,
'fake-app',
'fake-iossim',
'platform',
'os',
'xcode-version',
'out-dir',
)
def test_iossim_not_found(self):
"""Ensures SimulatorNotFoundError is raised."""
def exists(path):
if path == 'fake-iossim':
return False
return True
def find_xcode(version):
return {'found': True}
def check_output(command):
return 'fake-bundle-id'
self.mock(test_runner.os.path, 'exists', exists)
self.mock(test_runner.find_xcode, 'find_xcode', find_xcode)
self.mock(test_runner.subprocess, 'check_output', check_output)
self.assertRaises(
test_runner.SimulatorNotFoundError,
test_runner.SimulatorTestRunner,
'fake-app',
'fake-iossim',
'platform',
'os',
'xcode-version',
'out-dir',
)
def test_init(self):
"""Ensures instance is created."""
def exists(path):
return True
def find_xcode(version):
return {'found': True}
def check_output(command):
return 'fake-bundle-id'
self.mock(test_runner.os.path, 'exists', exists)
self.mock(test_runner.find_xcode, 'find_xcode', find_xcode)
self.mock(test_runner.subprocess, 'check_output', check_output)
tr = test_runner.SimulatorTestRunner(
'fake-app',
'fake-iossim',
'platform',
'os',
'xcode-version',
'out-dir',
)
self.failUnless(tr)
def test_startup_crash(self):
"""Ensures test is relaunched once on startup crash."""
def exists(path):
return True
def find_xcode(version):
return {'found': True}
def check_output(command):
return 'fake-bundle-id'
def set_up(self):
return
@staticmethod
def _run(command):
return collections.namedtuple('result', ['crashed', 'crashed_test'])(
crashed=True, crashed_test=None)
def tear_down(self):
return
self.mock(test_runner.os.path, 'exists', exists)
self.mock(test_runner.find_xcode, 'find_xcode', find_xcode)
self.mock(test_runner.subprocess, 'check_output', check_output)
self.mock(test_runner.SimulatorTestRunner, 'set_up', set_up)
self.mock(test_runner.TestRunner, '_run', _run)
self.mock(test_runner.SimulatorTestRunner, 'tear_down', tear_down)
tr = test_runner.SimulatorTestRunner(
'fake-app',
'fake-iossim',
'platform',
'os',
'xcode-version',
'out-dir',
)
self.assertRaises(test_runner.AppLaunchError, tr.launch)
def test_relaunch(self):
"""Ensures test is relaunched on test crash until tests complete."""
def exists(path):
return True
def find_xcode(version):
return {'found': True}
def check_output(command):
return 'fake-bundle-id'
def set_up(self):
return
@staticmethod
def _run(command):
result = collections.namedtuple(
'result', [
'crashed',
'crashed_test',
'failed_tests',
'flaked_tests',
'passed_tests',
],
)
if '-e' not in command:
# First run, has no test filter supplied. Mock a crash.
return result(
crashed=True,
crashed_test='c',
failed_tests={'b': ['b-out'], 'c': ['Did not complete.']},
flaked_tests={'d': ['d-out']},
passed_tests=['a'],
)
else:
return result(
crashed=False,
crashed_test=None,
failed_tests={},
flaked_tests={},
passed_tests=[],
)
def tear_down(self):
return
self.mock(test_runner.os.path, 'exists', exists)
self.mock(test_runner.find_xcode, 'find_xcode', find_xcode)
self.mock(test_runner.subprocess, 'check_output', check_output)
self.mock(test_runner.SimulatorTestRunner, 'set_up', set_up)
self.mock(test_runner.TestRunner, '_run', _run)
self.mock(test_runner.SimulatorTestRunner, 'tear_down', tear_down)
tr = test_runner.SimulatorTestRunner(
'fake-app',
'fake-iossim',
'platform',
'os',
'xcode-version',
'out-dir',
)
tr.launch()
self.failUnless(tr.logs)
if __name__ == '__main__':
unittest.main()

View file

@ -0,0 +1,266 @@
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import re
import tempfile
# These labels should match the ones output by gtest's JSON.
TEST_UNKNOWN_LABEL = 'UNKNOWN'
TEST_SUCCESS_LABEL = 'SUCCESS'
TEST_FAILURE_LABEL = 'FAILURE'
TEST_CRASH_LABEL = 'CRASH'
TEST_TIMEOUT_LABEL = 'TIMEOUT'
TEST_WARNING_LABEL = 'WARNING'
class XCTestLogParser(object):
"""This helper class process XCTest test output."""
def __init__(self):
# State tracking for log parsing
self.completed = False
self._current_test = ''
self._failure_description = []
self._current_report_hash = ''
self._current_report = []
self._parsing_failures = False
# Line number currently being processed.
self._line_number = 0
# List of parsing errors, as human-readable strings.
self._internal_error_lines = []
# Tests are stored here as 'test.name': (status, [description]).
# The status should be one of ('started', 'OK', 'failed', 'timeout',
# 'warning'). Warning indicates that a test did not pass when run in
# parallel with other tests but passed when run alone. The description is
# a list of lines detailing the test's error, as reported in the log.
self._test_status = {}
# This may be either text or a number. It will be used in the phrase
# '%s disabled' or '%s flaky' on the waterfall display.
self._disabled_tests = 0
self._flaky_tests = 0
test_name_regexp = r'\-\[(\w+)\s(\w+)\]'
self._test_name = re.compile(test_name_regexp)
self._test_start = re.compile(
r'Test Case \'' + test_name_regexp + '\' started\.')
self._test_ok = re.compile(
r'Test Case \'' + test_name_regexp +
'\' passed\s+\(\d+\.\d+\s+seconds\)?.')
self._test_fail = re.compile(
r'Test Case \'' + test_name_regexp +
'\' failed\s+\(\d+\.\d+\s+seconds\)?.')
self._test_passed = re.compile(r'\*\*\s+TEST\s+EXECUTE\s+SUCCEEDED\s+\*\*')
self._retry_message = re.compile('RETRYING FAILED TESTS:')
self.retrying_failed = False
self.TEST_STATUS_MAP = {
'OK': TEST_SUCCESS_LABEL,
'failed': TEST_FAILURE_LABEL,
'timeout': TEST_TIMEOUT_LABEL,
'warning': TEST_WARNING_LABEL
}
def GetCurrentTest(self):
return self._current_test
def _StatusOfTest(self, test):
"""Returns the status code for the given test, or 'not known'."""
test_status = self._test_status.get(test, ('not known', []))
return test_status[0]
def _TestsByStatus(self, status, include_fails, include_flaky):
"""Returns list of tests with the given status.
Args:
include_fails: If False, tests containing 'FAILS_' anywhere in their
names will be excluded from the list.
include_flaky: If False, tests containing 'FLAKY_' anywhere in their
names will be excluded from the list.
"""
test_list = [x[0] for x in self._test_status.items()
if self._StatusOfTest(x[0]) == status]
if not include_fails:
test_list = [x for x in test_list if x.find('FAILS_') == -1]
if not include_flaky:
test_list = [x for x in test_list if x.find('FLAKY_') == -1]
return test_list
def _RecordError(self, line, reason):
"""Record a log line that produced a parsing error.
Args:
line: text of the line at which the error occurred
reason: a string describing the error
"""
self._internal_error_lines.append('%s: %s [%s]' %
(self._line_number, line.strip(), reason))
def RunningTests(self):
"""Returns list of tests that appear to be currently running."""
return self._TestsByStatus('started', True, True)
def ParsingErrors(self):
"""Returns a list of lines that have caused parsing errors."""
return self._internal_error_lines
def ClearParsingErrors(self):
"""Clears the currently stored parsing errors."""
self._internal_error_lines = ['Cleared.']
def PassedTests(self, include_fails=False, include_flaky=False):
"""Returns list of tests that passed."""
return self._TestsByStatus('OK', include_fails, include_flaky)
def FailedTests(self, include_fails=False, include_flaky=False):
"""Returns list of tests that failed, timed out, or didn't finish
(crashed).
This list will be incorrect until the complete log has been processed,
because it will show currently running tests as having failed.
Args:
include_fails: If true, all failing tests with FAILS_ in their names will
be included. Otherwise, they will only be included if they crashed or
timed out.
include_flaky: If true, all failing tests with FLAKY_ in their names will
be included. Otherwise, they will only be included if they crashed or
timed out.
"""
return (self._TestsByStatus('failed', include_fails, include_flaky) +
self._TestsByStatus('timeout', True, True) +
self._TestsByStatus('warning', include_fails, include_flaky) +
self.RunningTests())
def TriesForTest(self, test):
"""Returns a list containing the state for all tries of the given test.
This parser doesn't support retries so a single result is returned."""
return [self.TEST_STATUS_MAP.get(self._StatusOfTest(test),
TEST_UNKNOWN_LABEL)]
def FailureDescription(self, test):
"""Returns a list containing the failure description for the given test.
If the test didn't fail or timeout, returns [].
"""
test_status = self._test_status.get(test, ('', []))
return ['%s: ' % test] + test_status[1]
def CompletedWithoutFailure(self):
"""Returns True if all tests completed and no tests failed unexpectedly."""
return self.completed
def ProcessLine(self, line):
"""This is called once with each line of the test log."""
# Track line number for error messages.
self._line_number += 1
# Some tests (net_unittests in particular) run subprocesses which can write
# stuff to shared stdout buffer. Sometimes such output appears between new
# line and gtest directives ('[ RUN ]', etc) which breaks the parser.
# Code below tries to detect such cases and recognize a mixed line as two
# separate lines.
# List of regexps that parses expects to find at the start of a line but
# which can be somewhere in the middle.
gtest_regexps = [
self._test_start,
self._test_ok,
self._test_fail,
self._test_passed,
]
for regexp in gtest_regexps:
match = regexp.search(line)
if match:
break
if not match or match.start() == 0:
self._ProcessLine(line)
else:
self._ProcessLine(line[:match.start()])
self._ProcessLine(line[match.start():])
def _ProcessLine(self, line):
"""Parses the line and changes the state of parsed tests accordingly.
Will recognize newly started tests, OK or FAILED statuses, timeouts, etc.
"""
# Is it a line declaring all tests passed?
results = self._test_passed.match(line)
if results:
self.completed = True
self._current_test = ''
return
# Is it the start of a test?
results = self._test_start.match(line)
if results:
if self._current_test:
if self._test_status[self._current_test][0] == 'started':
self._test_status[self._current_test] = (
'timeout', self._failure_description)
test_name = '%s.%s' % (results.group(1), results.group(2))
self._test_status[test_name] = ('started', ['Did not complete.'])
self._current_test = test_name
if self.retrying_failed:
self._failure_description = self._test_status[test_name][1]
self._failure_description.extend(['', 'RETRY OUTPUT:', ''])
else:
self._failure_description = []
return
# Is it a test success line?
results = self._test_ok.match(line)
if results:
test_name = '%s.%s' % (results.group(1), results.group(2))
status = self._StatusOfTest(test_name)
if status != 'started':
self._RecordError(line, 'success while in status %s' % status)
if self.retrying_failed:
self._test_status[test_name] = ('warning', self._failure_description)
else:
self._test_status[test_name] = ('OK', [])
self._failure_description = []
self._current_test = ''
return
# Is it a test failure line?
results = self._test_fail.match(line)
if results:
test_name = '%s.%s' % (results.group(1), results.group(2))
status = self._StatusOfTest(test_name)
if status not in ('started', 'failed', 'timeout'):
self._RecordError(line, 'failure while in status %s' % status)
# Don't overwrite the failure description when a failing test is listed a
# second time in the summary, or if it was already recorded as timing
# out.
if status not in ('failed', 'timeout'):
self._test_status[test_name] = ('failed', self._failure_description)
self._failure_description = []
self._current_test = ''
return
# Is it the start of the retry tests?
results = self._retry_message.match(line)
if results:
self.retrying_failed = True
return
# Random line: if we're in a test, collect it for the failure description.
# Tests may run simultaneously, so this might be off, but it's worth a try.
# This also won't work if a test times out before it begins running.
if self._current_test:
self._failure_description.append(line)