diff --git a/.vpython3 b/.vpython3 index 3d2a8e40c2..99b1a0d8e9 100644 --- a/.vpython3 +++ b/.vpython3 @@ -33,10 +33,16 @@ wheel: < # Used by tools_webrtc/perf/webrtc_dashboard_upload.py. wheel: < - name: "infra/python/wheels/httplib2-py2_py3" - version: "version:0.10.3" + name: "infra/python/wheels/httplib2-py3" + version: "version:0.19.1" > +wheel: < + name: "infra/python/wheels/pyparsing-py2_py3" + version: "version:2.4.7" +> + + # Used by: # build/toolchain/win wheel: < diff --git a/PRESUBMIT.py b/PRESUBMIT.py index 0591c1bab9..acc3110e7b 100755 --- a/PRESUBMIT.py +++ b/PRESUBMIT.py @@ -1,3 +1,5 @@ +#!/usr/bin/env vpython3 + # Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -13,6 +15,9 @@ import sys from collections import defaultdict from contextlib import contextmanager +# Runs PRESUBMIT.py in py3 mode by git cl presubmit. +USE_PYTHON3 = True + # Files and directories that are *skipped* by cpplint in the presubmit script. CPPLINT_EXCEPTIONS = [ 'api/video_codecs/video_decoder.h', @@ -31,6 +36,9 @@ CPPLINT_EXCEPTIONS = [ 'modules/video_capture', 'p2p/base/pseudo_tcp.cc', 'p2p/base/pseudo_tcp.h', + 'PRESUBMIT.py', + 'presubmit_test_mocks.py', + 'presubmit_test.py', 'rtc_base', 'sdk/android/src/jni', 'sdk/objc', @@ -115,42 +123,42 @@ FILE_PATH_RE = re.compile(r'"(?P(\w|\/)+)(?P\.\w+)"') def FindSrcDirPath(starting_dir): - """Returns the abs path to the src/ dir of the project.""" - src_dir = starting_dir - while os.path.basename(src_dir) != 'src': - src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) - return src_dir + """Returns the abs path to the src/ dir of the project.""" + src_dir = starting_dir + while os.path.basename(src_dir) != 'src': + src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) + return src_dir @contextmanager def _AddToPath(*paths): - original_sys_path = sys.path - sys.path.extend(paths) - try: - yield - finally: - # Restore sys.path to what it was before. - sys.path = original_sys_path + original_sys_path = sys.path + sys.path.extend(paths) + try: + yield + finally: + # Restore sys.path to what it was before. + sys.path = original_sys_path def VerifyNativeApiHeadersListIsValid(input_api, output_api): - """Ensures the list of native API header directories is up to date.""" - non_existing_paths = [] - native_api_full_paths = [ - input_api.os_path.join(input_api.PresubmitLocalPath(), - *path.split('/')) for path in API_DIRS + """Ensures the list of native API header directories is up to date.""" + non_existing_paths = [] + native_api_full_paths = [ + input_api.os_path.join(input_api.PresubmitLocalPath(), *path.split('/')) + for path in API_DIRS + ] + for path in native_api_full_paths: + if not os.path.isdir(path): + non_existing_paths.append(path) + if non_existing_paths: + return [ + output_api.PresubmitError( + 'Directories to native API headers have changed which has made ' + 'the list in PRESUBMIT.py outdated.\nPlease update it to the ' + 'current location of our native APIs.', non_existing_paths) ] - for path in native_api_full_paths: - if not os.path.isdir(path): - non_existing_paths.append(path) - if non_existing_paths: - return [ - output_api.PresubmitError( - 'Directories to native API headers have changed which has made ' - 'the list in PRESUBMIT.py outdated.\nPlease update it to the ' - 'current location of our native APIs.', non_existing_paths) - ] - return [] + return [] API_CHANGE_MSG = """ @@ -174,610 +182,592 @@ Related files: def CheckNativeApiHeaderChanges(input_api, output_api): - """Checks to remind proper changing of native APIs.""" - files = [] - source_file_filter = lambda x: input_api.FilterSourceFile( - x, files_to_check=[r'.+\.(gn|gni|h)$']) - for f in input_api.AffectedSourceFiles(source_file_filter): - for path in API_DIRS: - dn = os.path.dirname(f.LocalPath()) - if path == 'api': - # Special case: Subdirectories included. - if dn == 'api' or dn.startswith('api/'): - files.append(f.LocalPath()) - else: - # Normal case: Subdirectories not included. - if dn == path: - files.append(f.LocalPath()) + """Checks to remind proper changing of native APIs.""" + files = [] + source_file_filter = lambda x: input_api.FilterSourceFile( + x, files_to_check=[r'.+\.(gn|gni|h)$']) + for f in input_api.AffectedSourceFiles(source_file_filter): + for path in API_DIRS: + dn = os.path.dirname(f.LocalPath()) + if path == 'api': + # Special case: Subdirectories included. + if dn == 'api' or dn.startswith('api/'): + files.append(f.LocalPath()) + else: + # Normal case: Subdirectories not included. + if dn == path: + files.append(f.LocalPath()) - if files: - return [output_api.PresubmitNotifyResult(API_CHANGE_MSG, files)] - return [] + if files: + return [output_api.PresubmitNotifyResult(API_CHANGE_MSG, files)] + return [] def CheckNoIOStreamInHeaders(input_api, output_api, source_file_filter): - """Checks to make sure no .h files include .""" - files = [] - pattern = input_api.re.compile(r'^#include\s*', - input_api.re.MULTILINE) - file_filter = lambda x: (input_api.FilterSourceFile(x) and - source_file_filter(x)) - for f in input_api.AffectedSourceFiles(file_filter): - if not f.LocalPath().endswith('.h'): - continue - contents = input_api.ReadFile(f) - if pattern.search(contents): - files.append(f) + """Checks to make sure no .h files include .""" + files = [] + pattern = input_api.re.compile(r'^#include\s*', + input_api.re.MULTILINE) + file_filter = lambda x: (input_api.FilterSourceFile(x) and source_file_filter( + x)) + for f in input_api.AffectedSourceFiles(file_filter): + if not f.LocalPath().endswith('.h'): + continue + contents = input_api.ReadFile(f) + if pattern.search(contents): + files.append(f) - if len(files): - return [ - output_api.PresubmitError( - 'Do not #include in header files, since it inserts ' - 'static initialization into every file including the header. ' - 'Instead, #include . See http://crbug.com/94794', - files) - ] - return [] + if len(files) > 0: + return [ + output_api.PresubmitError( + 'Do not #include in header files, since it inserts ' + 'static initialization into every file including the header. ' + 'Instead, #include . See http://crbug.com/94794', files) + ] + return [] def CheckNoPragmaOnce(input_api, output_api, source_file_filter): - """Make sure that banned functions are not used.""" - files = [] - pattern = input_api.re.compile(r'^#pragma\s+once', input_api.re.MULTILINE) - file_filter = lambda x: (input_api.FilterSourceFile(x) and - source_file_filter(x)) - for f in input_api.AffectedSourceFiles(file_filter): - if not f.LocalPath().endswith('.h'): - continue - contents = input_api.ReadFile(f) - if pattern.search(contents): - files.append(f) + """Make sure that banned functions are not used.""" + files = [] + pattern = input_api.re.compile(r'^#pragma\s+once', input_api.re.MULTILINE) + file_filter = lambda x: (input_api.FilterSourceFile(x) and source_file_filter( + x)) + for f in input_api.AffectedSourceFiles(file_filter): + if not f.LocalPath().endswith('.h'): + continue + contents = input_api.ReadFile(f) + if pattern.search(contents): + files.append(f) + + if files: + return [ + output_api.PresubmitError( + 'Do not use #pragma once in header files.\n' + 'See http://www.chromium.org/developers/coding-style' + '#TOC-File-headers', files) + ] + return [] - if files: - return [ - output_api.PresubmitError( - 'Do not use #pragma once in header files.\n' - 'See http://www.chromium.org/developers/coding-style' - '#TOC-File-headers', - files) - ] - return [] def CheckNoFRIEND_TEST(# pylint: disable=invalid-name input_api, output_api, source_file_filter): - """Make sure that gtest's FRIEND_TEST() macro is not used, the + """Make sure that gtest's FRIEND_TEST() macro is not used, the FRIEND_TEST_ALL_PREFIXES() macro from testsupport/gtest_prod_util.h should be used instead since that allows for FLAKY_, FAILS_ and DISABLED_ prefixes.""" - problems = [] + problems = [] - file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and - source_file_filter(f)) - for f in input_api.AffectedFiles(file_filter=file_filter): - for line_num, line in f.ChangedContents(): - if 'FRIEND_TEST(' in line: - problems.append(' %s:%d' % (f.LocalPath(), line_num)) + file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and + source_file_filter(f)) + for f in input_api.AffectedFiles(file_filter=file_filter): + for line_num, line in f.ChangedContents(): + if 'FRIEND_TEST(' in line: + problems.append(' %s:%d' % (f.LocalPath(), line_num)) - if not problems: - return [] - return [ - output_api.PresubmitPromptWarning( - 'WebRTC\'s code should not use gtest\'s FRIEND_TEST() macro. ' - 'Include testsupport/gtest_prod_util.h and use ' - 'FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems)) - ] + if not problems: + return [] + return [ + output_api.PresubmitPromptWarning( + 'WebRTC\'s code should not use gtest\'s FRIEND_TEST() macro. ' + 'Include testsupport/gtest_prod_util.h and use ' + 'FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems)) + ] def IsLintDisabled(disabled_paths, file_path): - """ Checks if a file is disabled for lint check.""" - for path in disabled_paths: - if file_path == path or os.path.dirname(file_path).startswith(path): - return True - return False + """ Checks if a file is disabled for lint check.""" + for path in disabled_paths: + if file_path == path or os.path.dirname(file_path).startswith(path): + return True + return False def CheckApprovedFilesLintClean(input_api, output_api, source_file_filter=None): - """Checks that all new or non-exempt .cc and .h files pass cpplint.py. + """Checks that all new or non-exempt .cc and .h files pass cpplint.py. This check is based on CheckChangeLintsClean in depot_tools/presubmit_canned_checks.py but has less filters and only checks added files.""" - result = [] + result = [] - # Initialize cpplint. - import cpplint - # Access to a protected member _XX of a client class - # pylint: disable=W0212 - cpplint._cpplint_state.ResetErrorCounts() + # Initialize cpplint. + import cpplint + # Access to a protected member _XX of a client class + # pylint: disable=W0212 + cpplint._cpplint_state.ResetErrorCounts() - lint_filters = cpplint._Filters() - lint_filters.extend(DISABLED_LINT_FILTERS) - cpplint._SetFilters(','.join(lint_filters)) + lint_filters = cpplint._Filters() + lint_filters.extend(DISABLED_LINT_FILTERS) + cpplint._SetFilters(','.join(lint_filters)) - # Create a platform independent exempt list for cpplint. - disabled_paths = [ - input_api.os_path.join(*path.split('/')) for path in CPPLINT_EXCEPTIONS - ] + # Create a platform independent exempt list for cpplint. + disabled_paths = [ + input_api.os_path.join(*path.split('/')) for path in CPPLINT_EXCEPTIONS + ] - # Use the strictest verbosity level for cpplint.py (level 1) which is the - # default when running cpplint.py from command line. To make it possible to - # work with not-yet-converted code, we're only applying it to new (or - # moved/renamed) files and files not listed in CPPLINT_EXCEPTIONS. - verbosity_level = 1 - files = [] - for f in input_api.AffectedSourceFiles(source_file_filter): - # Note that moved/renamed files also count as added. - if f.Action() == 'A' or not IsLintDisabled(disabled_paths, - f.LocalPath()): - files.append(f.AbsoluteLocalPath()) + # Use the strictest verbosity level for cpplint.py (level 1) which is the + # default when running cpplint.py from command line. To make it possible to + # work with not-yet-converted code, we're only applying it to new (or + # moved/renamed) files and files not listed in CPPLINT_EXCEPTIONS. + verbosity_level = 1 + files = [] + for f in input_api.AffectedSourceFiles(source_file_filter): + # Note that moved/renamed files also count as added. + if f.Action() == 'A' or not IsLintDisabled(disabled_paths, f.LocalPath()): + files.append(f.AbsoluteLocalPath()) - for file_name in files: - cpplint.ProcessFile(file_name, verbosity_level) + for file_name in files: + cpplint.ProcessFile(file_name, verbosity_level) - if cpplint._cpplint_state.error_count > 0: - if input_api.is_committing: - res_type = output_api.PresubmitError - else: - res_type = output_api.PresubmitPromptWarning - result = [res_type('Changelist failed cpplint.py check.')] + if cpplint._cpplint_state.error_count > 0: + if input_api.is_committing: + res_type = output_api.PresubmitError + else: + res_type = output_api.PresubmitPromptWarning + result = [res_type('Changelist failed cpplint.py check.')] - return result + return result def CheckNoSourcesAbove(input_api, gn_files, output_api): - # Disallow referencing source files with paths above the GN file location. - source_pattern = input_api.re.compile(r' +sources \+?= \[(.*?)\]', - re.MULTILINE | re.DOTALL) - file_pattern = input_api.re.compile(r'"((\.\./.*?)|(//.*?))"') - violating_gn_files = set() - violating_source_entries = [] - for gn_file in gn_files: - contents = input_api.ReadFile(gn_file) - for source_block_match in source_pattern.finditer(contents): - # Find all source list entries starting with ../ in the source block - # (exclude overrides entries). - for file_list_match in file_pattern.finditer( - source_block_match.group(1)): - source_file = file_list_match.group(1) - if 'overrides/' not in source_file: - violating_source_entries.append(source_file) - violating_gn_files.add(gn_file) - if violating_gn_files: - return [ - output_api.PresubmitError( - 'Referencing source files above the directory of the GN file ' - 'is not allowed. Please introduce new GN targets in the proper ' - 'location instead.\n' - 'Invalid source entries:\n' - '%s\n' - 'Violating GN files:' % '\n'.join(violating_source_entries), - items=violating_gn_files) - ] - return [] + # Disallow referencing source files with paths above the GN file location. + source_pattern = input_api.re.compile(r' +sources \+?= \[(.*?)\]', + re.MULTILINE | re.DOTALL) + file_pattern = input_api.re.compile(r'"((\.\./.*?)|(//.*?))"') + violating_gn_files = set() + violating_source_entries = [] + for gn_file in gn_files: + contents = input_api.ReadFile(gn_file) + for source_block_match in source_pattern.finditer(contents): + # Find all source list entries starting with ../ in the source block + # (exclude overrides entries). + for file_list_match in file_pattern.finditer(source_block_match.group(1)): + source_file = file_list_match.group(1) + if 'overrides/' not in source_file: + violating_source_entries.append(source_file) + violating_gn_files.add(gn_file) + if violating_gn_files: + return [ + output_api.PresubmitError( + 'Referencing source files above the directory of the GN file ' + 'is not allowed. Please introduce new GN targets in the proper ' + 'location instead.\n' + 'Invalid source entries:\n' + '%s\n' + 'Violating GN files:' % '\n'.join(violating_source_entries), + items=violating_gn_files) + ] + return [] def CheckAbseilDependencies(input_api, gn_files, output_api): - """Checks that Abseil dependencies are declared in `absl_deps`.""" - absl_re = re.compile(r'third_party/abseil-cpp', re.MULTILINE | re.DOTALL) - target_types_to_check = [ - 'rtc_library', - 'rtc_source_set', - 'rtc_static_library', - 'webrtc_fuzzer_test', - ] - error_msg = ('Abseil dependencies in target "%s" (file: %s) ' - 'should be moved to the "absl_deps" parameter.') - errors = [] + """Checks that Abseil dependencies are declared in `absl_deps`.""" + absl_re = re.compile(r'third_party/abseil-cpp', re.MULTILINE | re.DOTALL) + target_types_to_check = [ + 'rtc_library', + 'rtc_source_set', + 'rtc_static_library', + 'webrtc_fuzzer_test', + ] + error_msg = ('Abseil dependencies in target "%s" (file: %s) ' + 'should be moved to the "absl_deps" parameter.') + errors = [] - for gn_file in gn_files: - gn_file_content = input_api.ReadFile(gn_file) - for target_match in TARGET_RE.finditer(gn_file_content): - target_type = target_match.group('target_type') - target_name = target_match.group('target_name') - target_contents = target_match.group('target_contents') - if target_type in target_types_to_check: - for deps_match in DEPS_RE.finditer(target_contents): - deps = deps_match.group('deps').splitlines() - for dep in deps: - if re.search(absl_re, dep): - errors.append( - output_api.PresubmitError( - error_msg % - (target_name, gn_file.LocalPath()))) - break # no need to warn more than once per target - return errors + # pylint: disable=too-many-nested-blocks + for gn_file in gn_files: + gn_file_content = input_api.ReadFile(gn_file) + for target_match in TARGET_RE.finditer(gn_file_content): + target_type = target_match.group('target_type') + target_name = target_match.group('target_name') + target_contents = target_match.group('target_contents') + if target_type in target_types_to_check: + for deps_match in DEPS_RE.finditer(target_contents): + deps = deps_match.group('deps').splitlines() + for dep in deps: + if re.search(absl_re, dep): + errors.append( + output_api.PresubmitError(error_msg % + (target_name, gn_file.LocalPath()))) + break # no need to warn more than once per target + return errors def CheckNoMixingSources(input_api, gn_files, output_api): - """Disallow mixing C, C++ and Obj-C/Obj-C++ in the same target. + """Disallow mixing C, C++ and Obj-C/Obj-C++ in the same target. See bugs.webrtc.org/7743 for more context. """ - def _MoreThanOneSourceUsed(*sources_lists): - sources_used = 0 - for source_list in sources_lists: - if len(source_list): - sources_used += 1 - return sources_used > 1 + def _MoreThanOneSourceUsed(*sources_lists): + sources_used = 0 + for source_list in sources_lists: + if len(source_list) > 0: + sources_used += 1 + return sources_used > 1 - errors = defaultdict(lambda: []) - for gn_file in gn_files: - gn_file_content = input_api.ReadFile(gn_file) - for target_match in TARGET_RE.finditer(gn_file_content): - # list_of_sources is a list of tuples of the form - # (c_files, cc_files, objc_files) that keeps track of all the - # sources defined in a target. A GN target can have more that - # on definition of sources (since it supports if/else statements). - # E.g.: - # rtc_static_library("foo") { - # if (is_win) { - # sources = [ "foo.cc" ] - # } else { - # sources = [ "foo.mm" ] - # } - # } - # This is allowed and the presubmit check should support this case. - list_of_sources = [] - c_files = [] - cc_files = [] - objc_files = [] - target_name = target_match.group('target_name') - target_contents = target_match.group('target_contents') - for sources_match in SOURCES_RE.finditer(target_contents): - if '+=' not in sources_match.group(0): - if c_files or cc_files or objc_files: - list_of_sources.append((c_files, cc_files, objc_files)) - c_files = [] - cc_files = [] - objc_files = [] - for file_match in FILE_PATH_RE.finditer( - sources_match.group(1)): - file_path = file_match.group('file_path') - extension = file_match.group('extension') - if extension == '.c': - c_files.append(file_path + extension) - if extension == '.cc': - cc_files.append(file_path + extension) - if extension in ['.m', '.mm']: - objc_files.append(file_path + extension) + errors = defaultdict(lambda: []) + for gn_file in gn_files: + gn_file_content = input_api.ReadFile(gn_file) + for target_match in TARGET_RE.finditer(gn_file_content): + # list_of_sources is a list of tuples of the form + # (c_files, cc_files, objc_files) that keeps track of all the + # sources defined in a target. A GN target can have more that + # on definition of sources (since it supports if/else statements). + # E.g.: + # rtc_static_library("foo") { + # if (is_win) { + # sources = [ "foo.cc" ] + # } else { + # sources = [ "foo.mm" ] + # } + # } + # This is allowed and the presubmit check should support this case. + list_of_sources = [] + c_files = [] + cc_files = [] + objc_files = [] + target_name = target_match.group('target_name') + target_contents = target_match.group('target_contents') + for sources_match in SOURCES_RE.finditer(target_contents): + if '+=' not in sources_match.group(0): + if c_files or cc_files or objc_files: list_of_sources.append((c_files, cc_files, objc_files)) - for c_files_list, cc_files_list, objc_files_list in list_of_sources: - if _MoreThanOneSourceUsed(c_files_list, cc_files_list, - objc_files_list): - all_sources = sorted(c_files_list + cc_files_list + - objc_files_list) - errors[gn_file.LocalPath()].append( - (target_name, all_sources)) - if errors: - return [ - output_api.PresubmitError( - 'GN targets cannot mix .c, .cc and .m (or .mm) source files.\n' - 'Please create a separate target for each collection of ' - 'sources.\n' - 'Mixed sources: \n' - '%s\n' - 'Violating GN files:\n%s\n' % - (json.dumps(errors, indent=2), '\n'.join(errors.keys()))) - ] - return [] + c_files = [] + cc_files = [] + objc_files = [] + for file_match in FILE_PATH_RE.finditer(sources_match.group(1)): + file_path = file_match.group('file_path') + extension = file_match.group('extension') + if extension == '.c': + c_files.append(file_path + extension) + if extension == '.cc': + cc_files.append(file_path + extension) + if extension in ['.m', '.mm']: + objc_files.append(file_path + extension) + list_of_sources.append((c_files, cc_files, objc_files)) + for c_files_list, cc_files_list, objc_files_list in list_of_sources: + if _MoreThanOneSourceUsed(c_files_list, cc_files_list, objc_files_list): + all_sources = sorted(c_files_list + cc_files_list + objc_files_list) + errors[gn_file.LocalPath()].append((target_name, all_sources)) + if errors: + return [ + output_api.PresubmitError( + 'GN targets cannot mix .c, .cc and .m (or .mm) source files.\n' + 'Please create a separate target for each collection of ' + 'sources.\n' + 'Mixed sources: \n' + '%s\n' + 'Violating GN files:\n%s\n' % + (json.dumps(errors, indent=2), '\n'.join(list(errors.keys())))) + ] + return [] def CheckNoPackageBoundaryViolations(input_api, gn_files, output_api): - cwd = input_api.PresubmitLocalPath() - with _AddToPath( - input_api.os_path.join(cwd, 'tools_webrtc', - 'presubmit_checks_lib')): - from check_package_boundaries import CheckPackageBoundaries - build_files = [ - os.path.join(cwd, gn_file.LocalPath()) for gn_file in gn_files + cwd = input_api.PresubmitLocalPath() + with _AddToPath( + input_api.os_path.join(cwd, 'tools_webrtc', 'presubmit_checks_lib')): + from check_package_boundaries import CheckPackageBoundaries + build_files = [os.path.join(cwd, gn_file.LocalPath()) for gn_file in gn_files] + errors = CheckPackageBoundaries(cwd, build_files)[:5] + if errors: + return [ + output_api.PresubmitError( + 'There are package boundary violations in the following GN ' + 'files:', + long_text='\n\n'.join(str(err) for err in errors)) ] - errors = CheckPackageBoundaries(cwd, build_files)[:5] - if errors: - return [ - output_api.PresubmitError( - 'There are package boundary violations in the following GN ' - 'files:', long_text='\n\n'.join(str(err) for err in errors)) - ] - return [] + return [] def _ReportFileAndLine(filename, line_num): - """Default error formatter for _FindNewViolationsOfRule.""" - return '%s (line %s)' % (filename, line_num) + """Default error formatter for _FindNewViolationsOfRule.""" + return '%s (line %s)' % (filename, line_num) def CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api, output_api, error_formatter=_ReportFileAndLine): - """Ensure warning suppression flags are not added wihtout a reason.""" - msg = ('Usage of //build/config/clang:extra_warnings is discouraged ' - 'in WebRTC.\n' - 'If you are not adding this code (e.g. you are just moving ' - 'existing code) or you want to add an exception,\n' - 'you can add a comment on the line that causes the problem:\n\n' - '"-Wno-odr" # no-presubmit-check TODO(bugs.webrtc.org/BUG_ID)\n' - '\n' - 'Affected files:\n') - errors = [] # 2-element tuples with (file, line number) - clang_warn_re = input_api.re.compile( - r'//build/config/clang:extra_warnings') - no_presubmit_re = input_api.re.compile( - r'# no-presubmit-check TODO\(bugs\.webrtc\.org/\d+\)') - for f in gn_files: - for line_num, line in f.ChangedContents(): - if clang_warn_re.search(line) and not no_presubmit_re.search(line): - errors.append(error_formatter(f.LocalPath(), line_num)) - if errors: - return [output_api.PresubmitError(msg, errors)] - return [] + """Ensure warning suppression flags are not added without a reason.""" + msg = ('Usage of //build/config/clang:extra_warnings is discouraged ' + 'in WebRTC.\n' + 'If you are not adding this code (e.g. you are just moving ' + 'existing code) or you want to add an exception,\n' + 'you can add a comment on the line that causes the problem:\n\n' + '"-Wno-odr" # no-presubmit-check TODO(bugs.webrtc.org/BUG_ID)\n' + '\n' + 'Affected files:\n') + errors = [] # 2-element tuples with (file, line number) + clang_warn_re = input_api.re.compile(r'//build/config/clang:extra_warnings') + # pylint: disable-next=fixme + no_presubmit_re = input_api.re.compile( + r'# no-presubmit-check TODO\(bugs\.webrtc\.org/\d+\)') + for f in gn_files: + for line_num, line in f.ChangedContents(): + if clang_warn_re.search(line) and not no_presubmit_re.search(line): + errors.append(error_formatter(f.LocalPath(), line_num)) + if errors: + return [output_api.PresubmitError(msg, errors)] + return [] def CheckNoTestCaseUsageIsAdded(input_api, output_api, source_file_filter, error_formatter=_ReportFileAndLine): - error_msg = ('Usage of legacy GoogleTest API detected!\nPlease use the ' - 'new API: https://github.com/google/googletest/blob/master/' - 'googletest/docs/primer.md#beware-of-the-nomenclature.\n' - 'Affected files:\n') - errors = [] # 2-element tuples with (file, line number) - test_case_re = input_api.re.compile(r'TEST_CASE') - file_filter = lambda f: (source_file_filter(f) and f.LocalPath().endswith( - '.cc')) - for f in input_api.AffectedSourceFiles(file_filter): - for line_num, line in f.ChangedContents(): - if test_case_re.search(line): - errors.append(error_formatter(f.LocalPath(), line_num)) - if errors: - return [output_api.PresubmitError(error_msg, errors)] - return [] + error_msg = ('Usage of legacy GoogleTest API detected!\nPlease use the ' + 'new API: https://github.com/google/googletest/blob/master/' + 'googletest/docs/primer.md#beware-of-the-nomenclature.\n' + 'Affected files:\n') + errors = [] # 2-element tuples with (file, line number) + test_case_re = input_api.re.compile(r'TEST_CASE') + file_filter = lambda f: (source_file_filter(f) and f.LocalPath().endswith( + '.cc')) + for f in input_api.AffectedSourceFiles(file_filter): + for line_num, line in f.ChangedContents(): + if test_case_re.search(line): + errors.append(error_formatter(f.LocalPath(), line_num)) + if errors: + return [output_api.PresubmitError(error_msg, errors)] + return [] def CheckNoStreamUsageIsAdded(input_api, output_api, source_file_filter, error_formatter=_ReportFileAndLine): - """Make sure that no more dependencies on stringstream are added.""" - error_msg = ( - 'Usage of , and in WebRTC is ' - 'deprecated.\n' - 'This includes the following types:\n' - 'std::istringstream, std::ostringstream, std::wistringstream, ' - 'std::wostringstream,\n' - 'std::wstringstream, std::ostream, std::wostream, std::istream,' - 'std::wistream,\n' - 'std::iostream, std::wiostream.\n' - 'If you are not adding this code (e.g. you are just moving ' - 'existing code),\n' - 'you can add a comment on the line that causes the problem:\n\n' - '#include // no-presubmit-check TODO(webrtc:8982)\n' - 'std::ostream& F() { // no-presubmit-check TODO(webrtc:8982)\n' - '\n' - 'If you are adding new code, consider using ' - 'rtc::SimpleStringBuilder\n' - '(in rtc_base/strings/string_builder.h).\n' - 'Affected files:\n') - errors = [] # 2-element tuples with (file, line number) - include_re = input_api.re.compile(r'#include <(i|o|s)stream>') - usage_re = input_api.re.compile( - r'std::(w|i|o|io|wi|wo|wio)(string)*stream') - no_presubmit_re = input_api.re.compile( - r'// no-presubmit-check TODO\(webrtc:8982\)') - file_filter = lambda x: (input_api.FilterSourceFile(x) and - source_file_filter(x)) + """Make sure that no more dependencies on stringstream are added.""" + error_msg = ('Usage of , and in WebRTC is ' + 'deprecated.\n' + 'This includes the following types:\n' + 'std::istringstream, std::ostringstream, std::wistringstream, ' + 'std::wostringstream,\n' + 'std::wstringstream, std::ostream, std::wostream, std::istream,' + 'std::wistream,\n' + 'std::iostream, std::wiostream.\n' + 'If you are not adding this code (e.g. you are just moving ' + 'existing code),\n' + 'you can add a comment on the line that causes the problem:\n\n' + '#include // no-presubmit-check TODO(webrtc:8982)\n' + 'std::ostream& F() { // no-presubmit-check TODO(webrtc:8982)\n' + '\n' + 'If you are adding new code, consider using ' + 'rtc::SimpleStringBuilder\n' + '(in rtc_base/strings/string_builder.h).\n' + 'Affected files:\n') + errors = [] # 2-element tuples with (file, line number) + include_re = input_api.re.compile(r'#include <(i|o|s)stream>') + usage_re = input_api.re.compile(r'std::(w|i|o|io|wi|wo|wio)(string)*stream') + no_presubmit_re = input_api.re.compile( + r'// no-presubmit-check TODO\(webrtc:8982\)') + file_filter = lambda x: (input_api.FilterSourceFile(x) and source_file_filter( + x)) - def _IsException(file_path): - is_test = any( - file_path.endswith(x) for x in - ['_test.cc', '_tests.cc', '_unittest.cc', '_unittests.cc']) - return (file_path.startswith('examples') - or file_path.startswith('test') or is_test) + def _IsException(file_path): + is_test = any( + file_path.endswith(x) + for x in ['_test.cc', '_tests.cc', '_unittest.cc', '_unittests.cc']) + return (file_path.startswith('examples') or file_path.startswith('test') + or is_test) - for f in input_api.AffectedSourceFiles(file_filter): - # Usage of stringstream is allowed under examples/ and in tests. - if f.LocalPath() == 'PRESUBMIT.py' or _IsException(f.LocalPath()): - continue - for line_num, line in f.ChangedContents(): - if ((include_re.search(line) or usage_re.search(line)) - and not no_presubmit_re.search(line)): - errors.append(error_formatter(f.LocalPath(), line_num)) - if errors: - return [output_api.PresubmitError(error_msg, errors)] - return [] + for f in input_api.AffectedSourceFiles(file_filter): + # Usage of stringstream is allowed under examples/ and in tests. + if f.LocalPath() == 'PRESUBMIT.py' or _IsException(f.LocalPath()): + continue + for line_num, line in f.ChangedContents(): + if ((include_re.search(line) or usage_re.search(line)) + and not no_presubmit_re.search(line)): + errors.append(error_formatter(f.LocalPath(), line_num)) + if errors: + return [output_api.PresubmitError(error_msg, errors)] + return [] def CheckPublicDepsIsNotUsed(gn_files, input_api, output_api): - """Checks that public_deps is not used without a good reason.""" - result = [] - no_presubmit_check_re = input_api.re.compile( - r'# no-presubmit-check TODO\(webrtc:\d+\)') - error_msg = ('public_deps is not recommended in WebRTC BUILD.gn files ' - 'because it doesn\'t map well to downstream build systems.\n' - 'Used in: %s (line %d).\n' - 'If you are not adding this code (e.g. you are just moving ' - 'existing code) or you have a good reason, you can add this ' - 'comment (verbatim) on the line that causes the problem:\n\n' - 'public_deps = [ # no-presubmit-check TODO(webrtc:8603)\n') - for affected_file in gn_files: - for (line_number, affected_line) in affected_file.ChangedContents(): - if 'public_deps' in affected_line: - surpressed = no_presubmit_check_re.search(affected_line) - if not surpressed: - result.append( - output_api.PresubmitError( - error_msg % - (affected_file.LocalPath(), line_number))) - return result + """Checks that public_deps is not used without a good reason.""" + result = [] + no_presubmit_check_re = input_api.re.compile( + r'# no-presubmit-check TODO\(webrtc:\d+\)') + error_msg = ('public_deps is not recommended in WebRTC BUILD.gn files ' + 'because it doesn\'t map well to downstream build systems.\n' + 'Used in: %s (line %d).\n' + 'If you are not adding this code (e.g. you are just moving ' + 'existing code) or you have a good reason, you can add this ' + 'comment (verbatim) on the line that causes the problem:\n\n' + 'public_deps = [ # no-presubmit-check TODO(webrtc:8603)\n') + for affected_file in gn_files: + for (line_number, affected_line) in affected_file.ChangedContents(): + if 'public_deps' in affected_line: + surpressed = no_presubmit_check_re.search(affected_line) + if not surpressed: + result.append( + output_api.PresubmitError( + error_msg % (affected_file.LocalPath(), line_number))) + return result def CheckCheckIncludesIsNotUsed(gn_files, input_api, output_api): - result = [] - error_msg = ( - 'check_includes overrides are not allowed since it can cause ' - 'incorrect dependencies to form. It effectively means that your ' - 'module can include any .h file without depending on its ' - 'corresponding target. There are some exceptional cases when ' - 'this is allowed: if so, get approval from a .gn owner in the ' - 'root OWNERS file.\n' - 'Used in: %s (line %d).') - no_presubmit_re = input_api.re.compile( - r'# no-presubmit-check TODO\(bugs\.webrtc\.org/\d+\)') - for affected_file in gn_files: - for (line_number, affected_line) in affected_file.ChangedContents(): - if ('check_includes' in affected_line - and not no_presubmit_re.search(affected_line)): - result.append( - output_api.PresubmitError( - error_msg % (affected_file.LocalPath(), line_number))) - return result + result = [] + error_msg = ('check_includes overrides are not allowed since it can cause ' + 'incorrect dependencies to form. It effectively means that your ' + 'module can include any .h file without depending on its ' + 'corresponding target. There are some exceptional cases when ' + 'this is allowed: if so, get approval from a .gn owner in the ' + 'root OWNERS file.\n' + 'Used in: %s (line %d).') + # pylint: disable-next=fixme + no_presubmit_re = input_api.re.compile( + r'# no-presubmit-check TODO\(bugs\.webrtc\.org/\d+\)') + for affected_file in gn_files: + for (line_number, affected_line) in affected_file.ChangedContents(): + if ('check_includes' in affected_line + and not no_presubmit_re.search(affected_line)): + result.append( + output_api.PresubmitError(error_msg % + (affected_file.LocalPath(), line_number))) + return result def CheckGnChanges(input_api, output_api): - file_filter = lambda x: (input_api.FilterSourceFile( - x, - files_to_check=(r'.+\.(gn|gni)$', ), - files_to_skip=(r'.*/presubmit_checks_lib/testdata/.*', ))) + file_filter = lambda x: (input_api.FilterSourceFile( + x, + files_to_check=(r'.+\.(gn|gni)$', ), + files_to_skip=(r'.*/presubmit_checks_lib/testdata/.*', ))) - gn_files = [] - for f in input_api.AffectedSourceFiles(file_filter): - gn_files.append(f) + gn_files = [] + for f in input_api.AffectedSourceFiles(file_filter): + gn_files.append(f) - result = [] - if gn_files: - result.extend(CheckNoSourcesAbove(input_api, gn_files, output_api)) - result.extend(CheckNoMixingSources(input_api, gn_files, output_api)) - result.extend(CheckAbseilDependencies(input_api, gn_files, output_api)) - result.extend( - CheckNoPackageBoundaryViolations(input_api, gn_files, output_api)) - result.extend(CheckPublicDepsIsNotUsed(gn_files, input_api, - output_api)) - result.extend( - CheckCheckIncludesIsNotUsed(gn_files, input_api, output_api)) - result.extend( - CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api, - output_api)) - return result + result = [] + if gn_files: + result.extend(CheckNoSourcesAbove(input_api, gn_files, output_api)) + result.extend(CheckNoMixingSources(input_api, gn_files, output_api)) + result.extend(CheckAbseilDependencies(input_api, gn_files, output_api)) + result.extend( + CheckNoPackageBoundaryViolations(input_api, gn_files, output_api)) + result.extend(CheckPublicDepsIsNotUsed(gn_files, input_api, output_api)) + result.extend(CheckCheckIncludesIsNotUsed(gn_files, input_api, output_api)) + result.extend( + CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api, output_api)) + return result def CheckGnGen(input_api, output_api): - """Runs `gn gen --check` with default args to detect mismatches between + """Runs `gn gen --check` with default args to detect mismatches between #includes and dependencies in the BUILD.gn files, as well as general build errors. """ - with _AddToPath( - input_api.os_path.join(input_api.PresubmitLocalPath(), - 'tools_webrtc', 'presubmit_checks_lib')): - from build_helpers import RunGnCheck - errors = RunGnCheck(FindSrcDirPath(input_api.PresubmitLocalPath()))[:5] - if errors: - return [ - output_api.PresubmitPromptWarning( - 'Some #includes do not match the build dependency graph. ' - 'Please run:\n' - ' gn gen --check ', - long_text='\n\n'.join(errors)) - ] - return [] + with _AddToPath( + input_api.os_path.join(input_api.PresubmitLocalPath(), 'tools_webrtc', + 'presubmit_checks_lib')): + from build_helpers import RunGnCheck + errors = RunGnCheck(FindSrcDirPath(input_api.PresubmitLocalPath()))[:5] + if errors: + return [ + output_api.PresubmitPromptWarning( + 'Some #includes do not match the build dependency graph. ' + 'Please run:\n' + ' gn gen --check ', + long_text='\n\n'.join(errors)) + ] + return [] def CheckUnwantedDependencies(input_api, output_api, source_file_filter): - """Runs checkdeps on #include statements added in this + """Runs checkdeps on #include statements added in this change. Breaking - rules is an error, breaking ! rules is a warning. """ - # Copied from Chromium's src/PRESUBMIT.py. + # Copied from Chromium's src/PRESUBMIT.py. - # We need to wait until we have an input_api object and use this - # roundabout construct to import checkdeps because this file is - # eval-ed and thus doesn't have __file__. - src_path = FindSrcDirPath(input_api.PresubmitLocalPath()) - checkdeps_path = input_api.os_path.join(src_path, 'buildtools', - 'checkdeps') - if not os.path.exists(checkdeps_path): - return [ - output_api.PresubmitError( - 'Cannot find checkdeps at %s\nHave you run "gclient sync" to ' - 'download all the DEPS entries?' % checkdeps_path) - ] - with _AddToPath(checkdeps_path): - import checkdeps - from cpp_checker import CppChecker - from rules import Rule + # We need to wait until we have an input_api object and use this + # roundabout construct to import checkdeps because this file is + # eval-ed and thus doesn't have __file__. + src_path = FindSrcDirPath(input_api.PresubmitLocalPath()) + checkdeps_path = input_api.os_path.join(src_path, 'buildtools', 'checkdeps') + if not os.path.exists(checkdeps_path): + return [ + output_api.PresubmitError( + 'Cannot find checkdeps at %s\nHave you run "gclient sync" to ' + 'download all the DEPS entries?' % checkdeps_path) + ] + with _AddToPath(checkdeps_path): + import checkdeps + from cpp_checker import CppChecker + from rules import Rule - added_includes = [] - for f in input_api.AffectedFiles(file_filter=source_file_filter): - if not CppChecker.IsCppFile(f.LocalPath()): - continue + added_includes = [] + for f in input_api.AffectedFiles(file_filter=source_file_filter): + if not CppChecker.IsCppFile(f.LocalPath()): + continue - changed_lines = [line for _, line in f.ChangedContents()] - added_includes.append([f.LocalPath(), changed_lines]) + changed_lines = [line for _, line in f.ChangedContents()] + added_includes.append([f.LocalPath(), changed_lines]) - deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath()) + deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath()) - error_descriptions = [] - warning_descriptions = [] - for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes( - added_includes): - description_with_path = '%s\n %s' % (path, rule_description) - if rule_type == Rule.DISALLOW: - error_descriptions.append(description_with_path) - else: - warning_descriptions.append(description_with_path) + error_descriptions = [] + warning_descriptions = [] + for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes( + added_includes): + description_with_path = '%s\n %s' % (path, rule_description) + if rule_type == Rule.DISALLOW: + error_descriptions.append(description_with_path) + else: + warning_descriptions.append(description_with_path) - results = [] - if error_descriptions: - results.append( - output_api.PresubmitError( - 'You added one or more #includes that violate checkdeps rules.' - '\nCheck that the DEPS files in these locations contain valid ' - 'rules.\nSee ' - 'https://cs.chromium.org/chromium/src/buildtools/checkdeps/ ' - 'for more details about checkdeps.', error_descriptions)) - if warning_descriptions: - results.append( - output_api.PresubmitPromptOrNotify( - 'You added one or more #includes of files that are temporarily' - '\nallowed but being removed. Can you avoid introducing the\n' - '#include? See relevant DEPS file(s) for details and contacts.' - '\nSee ' - 'https://cs.chromium.org/chromium/src/buildtools/checkdeps/ ' - 'for more details about checkdeps.', warning_descriptions)) - return results + results = [] + if error_descriptions: + results.append( + output_api.PresubmitError( + 'You added one or more #includes that violate checkdeps rules.' + '\nCheck that the DEPS files in these locations contain valid ' + 'rules.\nSee ' + 'https://cs.chromium.org/chromium/src/buildtools/checkdeps/ ' + 'for more details about checkdeps.', error_descriptions)) + if warning_descriptions: + results.append( + output_api.PresubmitPromptOrNotify( + 'You added one or more #includes of files that are temporarily' + '\nallowed but being removed. Can you avoid introducing the\n' + '#include? See relevant DEPS file(s) for details and contacts.' + '\nSee ' + 'https://cs.chromium.org/chromium/src/buildtools/checkdeps/ ' + 'for more details about checkdeps.', warning_descriptions)) + return results def CheckCommitMessageBugEntry(input_api, output_api): - """Check that bug entries are well-formed in commit message.""" - bogus_bug_msg = ( - 'Bogus Bug entry: %s. Please specify the issue tracker prefix and the ' - 'issue number, separated by a colon, e.g. webrtc:123 or chromium:12345.' - ) - results = [] - for bug in input_api.change.BugsFromDescription(): - bug = bug.strip() - if bug.lower() == 'none': - continue - if 'b/' not in bug and ':' not in bug: - try: - if int(bug) > 100000: - # Rough indicator for current chromium bugs. - prefix_guess = 'chromium' - else: - prefix_guess = 'webrtc' - results.append( - 'Bug entry requires issue tracker prefix, e.g. %s:%s' % - (prefix_guess, bug)) - except ValueError: - results.append(bogus_bug_msg % bug) - elif not (re.match(r'\w+:\d+', bug) or re.match(r'b/\d+', bug)): - results.append(bogus_bug_msg % bug) - return [output_api.PresubmitError(r) for r in results] + """Check that bug entries are well-formed in commit message.""" + bogus_bug_msg = ( + 'Bogus Bug entry: %s. Please specify the issue tracker prefix and the ' + 'issue number, separated by a colon, e.g. webrtc:123 or chromium:12345.') + results = [] + for bug in input_api.change.BugsFromDescription(): + bug = bug.strip() + if bug.lower() == 'none': + continue + if 'b/' not in bug and ':' not in bug: + try: + if int(bug) > 100000: + # Rough indicator for current chromium bugs. + prefix_guess = 'chromium' + else: + prefix_guess = 'webrtc' + results.append('Bug entry requires issue tracker prefix, e.g. %s:%s' % + (prefix_guess, bug)) + except ValueError: + results.append(bogus_bug_msg % bug) + elif not (re.match(r'\w+:\d+', bug) or re.match(r'b/\d+', bug)): + results.append(bogus_bug_msg % bug) + return [output_api.PresubmitError(r) for r in results] def CheckChangeHasBugField(input_api, output_api): - """Requires that the changelist is associated with a bug. + """Requires that the changelist is associated with a bug. This check is stricter than the one in depot_tools/presubmit_canned_checks.py since it fails the presubmit if the bug field is missing or doesn't contain @@ -786,273 +776,272 @@ def CheckChangeHasBugField(input_api, output_api): This supports both 'BUG=' and 'Bug:' since we are in the process of migrating to Gerrit and it encourages the usage of 'Bug:'. """ - if input_api.change.BugsFromDescription(): - return [] - else: - return [ - output_api.PresubmitError( - 'The "Bug: [bug number]" footer is mandatory. Please create a ' - 'bug and reference it using either of:\n' - ' * https://bugs.webrtc.org - reference it using Bug: ' - 'webrtc:XXXX\n' - ' * https://crbug.com - reference it using Bug: chromium:XXXXXX' - ) - ] + if input_api.change.BugsFromDescription(): + return [] + return [ + output_api.PresubmitError( + 'The "Bug: [bug number]" footer is mandatory. Please create a ' + 'bug and reference it using either of:\n' + ' * https://bugs.webrtc.org - reference it using Bug: ' + 'webrtc:XXXX\n' + ' * https://crbug.com - reference it using Bug: chromium:XXXXXX') + ] def CheckJSONParseErrors(input_api, output_api, source_file_filter): - """Check that JSON files do not contain syntax errors.""" + """Check that JSON files do not contain syntax errors.""" - def FilterFile(affected_file): - return (input_api.os_path.splitext( - affected_file.LocalPath())[1] == '.json' - and source_file_filter(affected_file)) + def FilterFile(affected_file): + return (input_api.os_path.splitext(affected_file.LocalPath())[1] == '.json' + and source_file_filter(affected_file)) - def GetJSONParseError(input_api, filename): - try: - contents = input_api.ReadFile(filename) - input_api.json.loads(contents) - except ValueError as e: - return e - return None + def GetJSONParseError(input_api, filename): + try: + contents = input_api.ReadFile(filename) + input_api.json.loads(contents) + except ValueError as e: + return e + return None - results = [] - for affected_file in input_api.AffectedFiles(file_filter=FilterFile, - include_deletes=False): - parse_error = GetJSONParseError(input_api, - affected_file.AbsoluteLocalPath()) - if parse_error: - results.append( - output_api.PresubmitError( - '%s could not be parsed: %s' % - (affected_file.LocalPath(), parse_error))) - return results + results = [] + for affected_file in input_api.AffectedFiles(file_filter=FilterFile, + include_deletes=False): + parse_error = GetJSONParseError(input_api, + affected_file.AbsoluteLocalPath()) + if parse_error: + results.append( + output_api.PresubmitError('%s could not be parsed: %s' % + (affected_file.LocalPath(), parse_error))) + return results def RunPythonTests(input_api, output_api): - def Join(*args): - return input_api.os_path.join(input_api.PresubmitLocalPath(), *args) + def Join(*args): + return input_api.os_path.join(input_api.PresubmitLocalPath(), *args) - test_directories = [ - input_api.PresubmitLocalPath(), - Join('rtc_tools', 'py_event_log_analyzer'), - Join('audio', 'test', 'unittests'), - ] + [ - root for root, _, files in os.walk(Join('tools_webrtc')) if any( - f.endswith('_test.py') for f in files) - ] + test_directories = [ + input_api.PresubmitLocalPath(), + Join('rtc_tools', 'py_event_log_analyzer'), + Join('audio', 'test', 'unittests'), + ] + [ + root for root, _, files in os.walk(Join('tools_webrtc')) if any( + f.endswith('_test.py') for f in files) + ] - tests = [] - for directory in test_directories: - tests.extend( - input_api.canned_checks.GetUnitTestsInDirectory( - input_api, - output_api, - directory, - files_to_check=[r'.+_test\.py$'])) - return input_api.RunTests(tests, parallel=True) + tests = [] + for directory in test_directories: + tests.extend( + input_api.canned_checks.GetUnitTestsInDirectory( + input_api, + output_api, + directory, + files_to_check=[r'.+_test\.py$'], + run_on_python2=False)) + return input_api.RunTests(tests, parallel=True) def CheckUsageOfGoogleProtobufNamespace(input_api, output_api, source_file_filter): - """Checks that the namespace google::protobuf has not been used.""" - files = [] - pattern = input_api.re.compile(r'google::protobuf') - proto_utils_path = os.path.join('rtc_base', 'protobuf_utils.h') - file_filter = lambda x: (input_api.FilterSourceFile(x) and - source_file_filter(x)) - for f in input_api.AffectedSourceFiles(file_filter): - if f.LocalPath() in [proto_utils_path, 'PRESUBMIT.py']: - continue - contents = input_api.ReadFile(f) - if pattern.search(contents): - files.append(f) + """Checks that the namespace google::protobuf has not been used.""" + files = [] + pattern = input_api.re.compile(r'google::protobuf') + proto_utils_path = os.path.join('rtc_base', 'protobuf_utils.h') + file_filter = lambda x: (input_api.FilterSourceFile(x) and source_file_filter( + x)) + for f in input_api.AffectedSourceFiles(file_filter): + if f.LocalPath() in [proto_utils_path, 'PRESUBMIT.py']: + continue + contents = input_api.ReadFile(f) + if pattern.search(contents): + files.append(f) - if files: - return [ - output_api.PresubmitError( - 'Please avoid to use namespace `google::protobuf` directly.\n' - 'Add a using directive in `%s` and include that header instead.' - % proto_utils_path, files) - ] - return [] + if files: + return [ + output_api.PresubmitError( + 'Please avoid to use namespace `google::protobuf` directly.\n' + 'Add a using directive in `%s` and include that header instead.' % + proto_utils_path, files) + ] + return [] def _LicenseHeader(input_api): - """Returns the license header regexp.""" - # Accept any year number from 2003 to the current year - current_year = int(input_api.time.strftime('%Y')) - allowed_years = (str(s) for s in reversed(xrange(2003, current_year + 1))) - years_re = '(' + '|'.join(allowed_years) + ')' - license_header = ( - r'.*? Copyright( \(c\))? %(year)s The WebRTC [Pp]roject [Aa]uthors\. ' - r'All [Rr]ights [Rr]eserved\.\n' - r'.*?\n' - r'.*? Use of this source code is governed by a BSD-style license\n' - r'.*? that can be found in the LICENSE file in the root of the source\n' - r'.*? tree\. An additional intellectual property rights grant can be ' - r'found\n' - r'.*? in the file PATENTS\. All contributing project authors may\n' - r'.*? be found in the AUTHORS file in the root of the source tree\.\n' - ) % { - 'year': years_re, - } - return license_header + """Returns the license header regexp.""" + # Accept any year number from 2003 to the current year + current_year = int(input_api.time.strftime('%Y')) + allowed_years = (str(s) for s in reversed(range(2003, current_year + 1))) + years_re = '(' + '|'.join(allowed_years) + ')' + license_header = ( + r'.*? Copyright( \(c\))? %(year)s The WebRTC [Pp]roject [Aa]uthors\. ' + r'All [Rr]ights [Rr]eserved\.\n' + r'.*?\n' + r'.*? Use of this source code is governed by a BSD-style license\n' + r'.*? that can be found in the LICENSE file in the root of the source\n' + r'.*? tree\. An additional intellectual property rights grant can be ' + r'found\n' + r'.*? in the file PATENTS\. All contributing project authors may\n' + r'.*? be found in the AUTHORS file in the root of the source tree\.\n' + ) % { + 'year': years_re, + } + return license_header def CommonChecks(input_api, output_api): - """Checks common to both upload and commit.""" - results = [] - # Filter out files that are in objc or ios dirs from being cpplint-ed since - # they do not follow C++ lint rules. - exception_list = input_api.DEFAULT_FILES_TO_SKIP + ( - r".*\bobjc[\\\/].*", - r".*objc\.[hcm]+$", - ) - source_file_filter = lambda x: input_api.FilterSourceFile( - x, None, exception_list) - results.extend( - CheckApprovedFilesLintClean(input_api, output_api, source_file_filter)) - results.extend( - input_api.canned_checks.CheckLicense(input_api, output_api, - _LicenseHeader(input_api))) + """Checks common to both upload and commit.""" + results = [] + # Filter out files that are in objc or ios dirs from being cpplint-ed since + # they do not follow C++ lint rules. + exception_list = input_api.DEFAULT_FILES_TO_SKIP + ( + r".*\bobjc[\\\/].*", + r".*objc\.[hcm]+$", + ) + source_file_filter = lambda x: input_api.FilterSourceFile( + x, None, exception_list) + results.extend( + CheckApprovedFilesLintClean(input_api, output_api, source_file_filter)) + results.extend( + input_api.canned_checks.CheckLicense(input_api, output_api, + _LicenseHeader(input_api))) - # TODO(bugs.webrtc.org/12114): Delete this filter and run pylint on - # all python files. This is a temporary solution. - python_file_filter = lambda f: (f.LocalPath().endswith('.py') and - source_file_filter(f)) - python_changed_files = [f.LocalPath() for f in input_api.AffectedFiles( - include_deletes=False, file_filter=python_file_filter)] + # TODO(bugs.webrtc.org/12114): Delete this filter and run pylint on + # all python files. This is a temporary solution. + python_file_filter = lambda f: (f.LocalPath().endswith('.py') and + source_file_filter(f)) + python_changed_files = [ + f.LocalPath() + for f in input_api.AffectedFiles(include_deletes=False, + file_filter=python_file_filter) + ] - results.extend( - input_api.canned_checks.RunPylint( - input_api, - output_api, - files_to_check=python_changed_files, - files_to_skip=( - r'^base[\\\/].*\.py$', - r'^build[\\\/].*\.py$', - r'^buildtools[\\\/].*\.py$', - r'^infra[\\\/].*\.py$', - r'^ios[\\\/].*\.py$', - r'^out.*[\\\/].*\.py$', - r'^testing[\\\/].*\.py$', - r'^third_party[\\\/].*\.py$', - r'^tools[\\\/].*\.py$', - # TODO(phoglund): should arguably be checked. - r'^tools_webrtc[\\\/]mb[\\\/].*\.py$', - r'^xcodebuild.*[\\\/].*\.py$', - ), - pylintrc='pylintrc')) + results.extend( + input_api.canned_checks.RunPylint( + input_api, + output_api, + files_to_check=python_changed_files, + files_to_skip=( + r'^base[\\\/].*\.py$', + r'^build[\\\/].*\.py$', + r'^buildtools[\\\/].*\.py$', + r'^infra[\\\/].*\.py$', + r'^ios[\\\/].*\.py$', + r'^out.*[\\\/].*\.py$', + r'^testing[\\\/].*\.py$', + r'^third_party[\\\/].*\.py$', + r'^tools[\\\/].*\.py$', + # TODO(bugs.webrtc.org/13605): should arguably be checked. + r'^tools_webrtc[\\\/]mb[\\\/].*\.py$', + r'^xcodebuild.*[\\\/].*\.py$', + ), + pylintrc='pylintrc', + version='2.7')) - # TODO(nisse): talk/ is no more, so make below checks simpler? - # WebRTC can't use the presubmit_canned_checks.PanProjectChecks function - # since we need to have different license checks - # in talk/ and webrtc/directories. - # Instead, hand-picked checks are included below. + # TODO(bugs.webrtc.org/13606): talk/ is no more, so make below checks simpler? + # WebRTC can't use the presubmit_canned_checks.PanProjectChecks function + # since we need to have different license checks + # in talk/ and webrtc/directories. + # Instead, hand-picked checks are included below. - # .m and .mm files are ObjC files. For simplicity we will consider - # .h files in ObjC subdirectories ObjC headers. - objc_filter_list = (r'.+\.m$', r'.+\.mm$', r'.+objc\/.+\.h$') - # Skip long-lines check for DEPS and GN files. - build_file_filter_list = (r'.+\.gn$', r'.+\.gni$', 'DEPS') - # Also we will skip most checks for third_party directory. - third_party_filter_list = (r'^third_party[\\\/].+', ) - eighty_char_sources = lambda x: input_api.FilterSourceFile( - x, - files_to_skip=build_file_filter_list + objc_filter_list + - third_party_filter_list) - hundred_char_sources = lambda x: input_api.FilterSourceFile( - x, files_to_check=objc_filter_list) - non_third_party_sources = lambda x: input_api.FilterSourceFile( - x, files_to_skip=third_party_filter_list) + # .m and .mm files are ObjC files. For simplicity we will consider + # .h files in ObjC subdirectories ObjC headers. + objc_filter_list = (r'.+\.m$', r'.+\.mm$', r'.+objc\/.+\.h$') + # Skip long-lines check for DEPS and GN files. + build_file_filter_list = (r'.+\.gn$', r'.+\.gni$', 'DEPS') + # Also we will skip most checks for third_party directory. + third_party_filter_list = (r'^third_party[\\\/].+', ) + eighty_char_sources = lambda x: input_api.FilterSourceFile( + x, + files_to_skip=build_file_filter_list + objc_filter_list + + third_party_filter_list) + hundred_char_sources = lambda x: input_api.FilterSourceFile( + x, files_to_check=objc_filter_list) + non_third_party_sources = lambda x: input_api.FilterSourceFile( + x, files_to_skip=third_party_filter_list) - results.extend( - input_api.canned_checks.CheckLongLines( - input_api, - output_api, - maxlen=80, - source_file_filter=eighty_char_sources)) - results.extend( - input_api.canned_checks.CheckLongLines( - input_api, - output_api, - maxlen=100, - source_file_filter=hundred_char_sources)) - results.extend( - input_api.canned_checks.CheckChangeHasNoTabs( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend( - input_api.canned_checks.CheckChangeHasNoStrayWhitespace( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend( - input_api.canned_checks.CheckAuthorizedAuthor( - input_api, - output_api, - bot_allowlist=[ - 'chromium-webrtc-autoroll@webrtc-ci.iam.gserviceaccount.com', - 'webrtc-version-updater@webrtc-ci.iam.gserviceaccount.com', - ])) - results.extend( - input_api.canned_checks.CheckChangeTodoHasOwner( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend( - input_api.canned_checks.CheckPatchFormatted(input_api, output_api)) - results.extend(CheckNativeApiHeaderChanges(input_api, output_api)) - results.extend( - CheckNoIOStreamInHeaders(input_api, - output_api, - source_file_filter=non_third_party_sources)) - results.extend( - CheckNoPragmaOnce(input_api, - output_api, - source_file_filter=non_third_party_sources)) - results.extend( - CheckNoFRIEND_TEST(input_api, + results.extend( + input_api.canned_checks.CheckLongLines( + input_api, + output_api, + maxlen=80, + source_file_filter=eighty_char_sources)) + results.extend( + input_api.canned_checks.CheckLongLines( + input_api, + output_api, + maxlen=100, + source_file_filter=hundred_char_sources)) + results.extend( + input_api.canned_checks.CheckChangeHasNoTabs( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + input_api.canned_checks.CheckChangeHasNoStrayWhitespace( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + input_api.canned_checks.CheckAuthorizedAuthor( + input_api, + output_api, + bot_allowlist=[ + 'chromium-webrtc-autoroll@webrtc-ci.iam.gserviceaccount.com', + 'webrtc-version-updater@webrtc-ci.iam.gserviceaccount.com', + ])) + results.extend( + input_api.canned_checks.CheckChangeTodoHasOwner( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + input_api.canned_checks.CheckPatchFormatted(input_api, output_api)) + results.extend(CheckNativeApiHeaderChanges(input_api, output_api)) + results.extend( + CheckNoIOStreamInHeaders(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend( + CheckNoPragmaOnce(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend( + CheckNoFRIEND_TEST(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend(CheckGnChanges(input_api, output_api)) + results.extend( + CheckUnwantedDependencies(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend( + CheckJSONParseErrors(input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend(CheckGnChanges(input_api, output_api)) - results.extend( - CheckUnwantedDependencies(input_api, - output_api, - source_file_filter=non_third_party_sources)) - results.extend( - CheckJSONParseErrors(input_api, - output_api, - source_file_filter=non_third_party_sources)) - results.extend(RunPythonTests(input_api, output_api)) - results.extend( - CheckUsageOfGoogleProtobufNamespace( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend( - CheckOrphanHeaders(input_api, - output_api, - source_file_filter=non_third_party_sources)) - results.extend( - CheckNewlineAtTheEndOfProtoFiles( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend( - CheckNoStreamUsageIsAdded(input_api, output_api, + results.extend(RunPythonTests(input_api, output_api)) + results.extend( + CheckUsageOfGoogleProtobufNamespace( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + CheckOrphanHeaders(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend( + CheckNewlineAtTheEndOfProtoFiles( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + CheckNoStreamUsageIsAdded(input_api, output_api, non_third_party_sources)) + results.extend( + CheckNoTestCaseUsageIsAdded(input_api, output_api, non_third_party_sources)) - results.extend( - CheckNoTestCaseUsageIsAdded(input_api, output_api, - non_third_party_sources)) - results.extend(CheckAddedDepsHaveTargetApprovals(input_api, output_api)) - results.extend(CheckApiDepsFileIsUpToDate(input_api, output_api)) - results.extend( - CheckAbslMemoryInclude(input_api, output_api, non_third_party_sources)) - results.extend( - CheckAssertUsage(input_api, output_api, non_third_party_sources)) - results.extend( - CheckBannedAbslMakeUnique(input_api, output_api, - non_third_party_sources)) - results.extend( - CheckObjcApiSymbols(input_api, output_api, non_third_party_sources)) - return results + results.extend(CheckAddedDepsHaveTargetApprovals(input_api, output_api)) + results.extend(CheckApiDepsFileIsUpToDate(input_api, output_api)) + results.extend( + CheckAbslMemoryInclude(input_api, output_api, non_third_party_sources)) + results.extend( + CheckAssertUsage(input_api, output_api, non_third_party_sources)) + results.extend( + CheckBannedAbslMakeUnique(input_api, output_api, non_third_party_sources)) + results.extend( + CheckObjcApiSymbols(input_api, output_api, non_third_party_sources)) + return results def CheckApiDepsFileIsUpToDate(input_api, output_api): - """Check that 'include_rules' in api/DEPS is up to date. + """Check that 'include_rules' in api/DEPS is up to date. The file api/DEPS must be kept up to date in order to avoid to avoid to include internal header from WebRTC's api/ headers. @@ -1061,388 +1050,379 @@ def CheckApiDepsFileIsUpToDate(input_api, output_api): rule for each root level directory. More focused allow rules can be added to 'specific_include_rules'. """ - results = [] - api_deps = os.path.join(input_api.PresubmitLocalPath(), 'api', 'DEPS') - with open(api_deps) as f: - deps_content = _ParseDeps(f.read()) + results = [] + api_deps = os.path.join(input_api.PresubmitLocalPath(), 'api', 'DEPS') + with open(api_deps) as f: + deps_content = _ParseDeps(f.read()) - include_rules = deps_content.get('include_rules', []) - dirs_to_skip = set(['api', 'docs']) + include_rules = deps_content.get('include_rules', []) + dirs_to_skip = set(['api', 'docs']) - # Only check top level directories affected by the current CL. - dirs_to_check = set() - for f in input_api.AffectedFiles(): - path_tokens = [t for t in f.LocalPath().split(os.sep) if t] - if len(path_tokens) > 1: - if (path_tokens[0] not in dirs_to_skip and os.path.isdir( - os.path.join(input_api.PresubmitLocalPath(), - path_tokens[0]))): - dirs_to_check.add(path_tokens[0]) + # Only check top level directories affected by the current CL. + dirs_to_check = set() + for f in input_api.AffectedFiles(): + path_tokens = [t for t in f.LocalPath().split(os.sep) if t] + if len(path_tokens) > 1: + if (path_tokens[0] not in dirs_to_skip and os.path.isdir( + os.path.join(input_api.PresubmitLocalPath(), path_tokens[0]))): + dirs_to_check.add(path_tokens[0]) - missing_include_rules = set() - for p in dirs_to_check: - rule = '-%s' % p - if rule not in include_rules: - missing_include_rules.add(rule) + missing_include_rules = set() + for p in dirs_to_check: + rule = '-%s' % p + if rule not in include_rules: + missing_include_rules.add(rule) - if missing_include_rules: - error_msg = [ - 'include_rules = [\n', - ' ...\n', - ] + if missing_include_rules: + error_msg = [ + 'include_rules = [\n', + ' ...\n', + ] - for r in sorted(missing_include_rules): - error_msg.append(' "%s",\n' % str(r)) + for r in sorted(missing_include_rules): + error_msg.append(' "%s",\n' % str(r)) - error_msg.append(' ...\n') - error_msg.append(']\n') + error_msg.append(' ...\n') + error_msg.append(']\n') - results.append( - output_api.PresubmitError( - 'New root level directory detected! WebRTC api/ headers should ' - 'not #include headers from \n' - 'the new directory, so please update "include_rules" in file\n' - '"%s". Example:\n%s\n' % (api_deps, ''.join(error_msg)))) + results.append( + output_api.PresubmitError( + 'New root level directory detected! WebRTC api/ headers should ' + 'not #include headers from \n' + 'the new directory, so please update "include_rules" in file\n' + '"%s". Example:\n%s\n' % (api_deps, ''.join(error_msg)))) - return results + return results def CheckBannedAbslMakeUnique(input_api, output_api, source_file_filter): - file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and - source_file_filter(f)) + file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and + source_file_filter(f)) - files = [] - for f in input_api.AffectedFiles(include_deletes=False, - file_filter=file_filter): - for _, line in f.ChangedContents(): - if 'absl::make_unique' in line: - files.append(f) - break + files = [] + for f in input_api.AffectedFiles(include_deletes=False, + file_filter=file_filter): + for _, line in f.ChangedContents(): + if 'absl::make_unique' in line: + files.append(f) + break - if len(files): - return [ - output_api.PresubmitError( - 'Please use std::make_unique instead of absl::make_unique.\n' - 'Affected files:', files) - ] - return [] + if files: + return [ + output_api.PresubmitError( + 'Please use std::make_unique instead of absl::make_unique.\n' + 'Affected files:', files) + ] + return [] def CheckObjcApiSymbols(input_api, output_api, source_file_filter): - rtc_objc_export = re.compile(r'RTC_OBJC_EXPORT(.|\n){26}', - re.MULTILINE | re.DOTALL) - file_filter = lambda f: (f.LocalPath().endswith(('.h')) and - source_file_filter(f)) + rtc_objc_export = re.compile(r'RTC_OBJC_EXPORT(.|\n){26}', + re.MULTILINE | re.DOTALL) + file_filter = lambda f: (f.LocalPath().endswith(('.h')) and + source_file_filter(f)) - files = [] - file_filter = lambda x: (input_api.FilterSourceFile(x) and - source_file_filter(x)) - for f in input_api.AffectedSourceFiles(file_filter): - if not f.LocalPath().endswith('.h') or not 'sdk/objc' in f.LocalPath(): - continue - if f.LocalPath().endswith('sdk/objc/base/RTCMacros.h'): - continue - contents = input_api.ReadFile(f) - for match in rtc_objc_export.finditer(contents): - export_block = match.group(0) - if 'RTC_OBJC_TYPE' not in export_block: - files.append(f.LocalPath()) + files = [] + file_filter = lambda x: (input_api.FilterSourceFile(x) and source_file_filter( + x)) + for f in input_api.AffectedSourceFiles(file_filter): + if not f.LocalPath().endswith('.h') or not 'sdk/objc' in f.LocalPath(): + continue + if f.LocalPath().endswith('sdk/objc/base/RTCMacros.h'): + continue + contents = input_api.ReadFile(f) + for match in rtc_objc_export.finditer(contents): + export_block = match.group(0) + if 'RTC_OBJC_TYPE' not in export_block: + files.append(f.LocalPath()) - if len(files): - return [ - output_api.PresubmitError( - 'RTC_OBJC_EXPORT types must be wrapped into an RTC_OBJC_TYPE() ' - + 'macro.\n\n' + 'For example:\n' + - 'RTC_OBJC_EXPORT @protocol RTC_OBJC_TYPE(RtcFoo)\n\n' + - 'RTC_OBJC_EXPORT @interface RTC_OBJC_TYPE(RtcFoo)\n\n' + - 'Please fix the following files:', files) - ] - return [] + if len(files) > 0: + return [ + output_api.PresubmitError( + 'RTC_OBJC_EXPORT types must be wrapped into an RTC_OBJC_TYPE() ' + + 'macro.\n\n' + 'For example:\n' + + 'RTC_OBJC_EXPORT @protocol RTC_OBJC_TYPE(RtcFoo)\n\n' + + 'RTC_OBJC_EXPORT @interface RTC_OBJC_TYPE(RtcFoo)\n\n' + + 'Please fix the following files:', files) + ] + return [] def CheckAssertUsage(input_api, output_api, source_file_filter): - pattern = input_api.re.compile(r'\bassert\(') - file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h', '.m', '.mm')) - and source_file_filter(f)) + pattern = input_api.re.compile(r'\bassert\(') + file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h', '.m', '.mm')) + and source_file_filter(f)) - files = [] - for f in input_api.AffectedFiles(include_deletes=False, - file_filter=file_filter): - for _, line in f.ChangedContents(): - if pattern.search(line): - files.append(f.LocalPath()) - break + files = [] + for f in input_api.AffectedFiles(include_deletes=False, + file_filter=file_filter): + for _, line in f.ChangedContents(): + if pattern.search(line): + files.append(f.LocalPath()) + break - if len(files): - return [ - output_api.PresubmitError( - 'Usage of assert() has been detected in the following files, ' - 'please use RTC_DCHECK() instead.\n Files:', files) - ] - return [] + if len(files) > 0: + return [ + output_api.PresubmitError( + 'Usage of assert() has been detected in the following files, ' + 'please use RTC_DCHECK() instead.\n Files:', files) + ] + return [] def CheckAbslMemoryInclude(input_api, output_api, source_file_filter): - pattern = input_api.re.compile(r'^#include\s*"absl/memory/memory.h"', - input_api.re.MULTILINE) - file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and - source_file_filter(f)) + pattern = input_api.re.compile(r'^#include\s*"absl/memory/memory.h"', + input_api.re.MULTILINE) + file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and + source_file_filter(f)) - files = [] - for f in input_api.AffectedFiles(include_deletes=False, - file_filter=file_filter): - contents = input_api.ReadFile(f) - if pattern.search(contents): - continue - for _, line in f.ChangedContents(): - if 'absl::WrapUnique' in line: - files.append(f) - break + files = [] + for f in input_api.AffectedFiles(include_deletes=False, + file_filter=file_filter): + contents = input_api.ReadFile(f) + if pattern.search(contents): + continue + for _, line in f.ChangedContents(): + if 'absl::WrapUnique' in line: + files.append(f) + break - if len(files): - return [ - output_api.PresubmitError( - 'Please include "absl/memory/memory.h" header for ' - 'absl::WrapUnique.\nThis header may or may not be included ' - 'transitively depending on the C++ standard version.', files) - ] - return [] + if len(files) > 0: + return [ + output_api.PresubmitError( + 'Please include "absl/memory/memory.h" header for ' + 'absl::WrapUnique.\nThis header may or may not be included ' + 'transitively depending on the C++ standard version.', files) + ] + return [] def CheckChangeOnUpload(input_api, output_api): - results = [] - results.extend(CommonChecks(input_api, output_api)) - results.extend(CheckGnGen(input_api, output_api)) - results.extend( - input_api.canned_checks.CheckGNFormatted(input_api, output_api)) - return results + results = [] + results.extend(CommonChecks(input_api, output_api)) + results.extend(CheckGnGen(input_api, output_api)) + results.extend(input_api.canned_checks.CheckGNFormatted( + input_api, output_api)) + return results def CheckChangeOnCommit(input_api, output_api): - results = [] - results.extend(CommonChecks(input_api, output_api)) - results.extend(VerifyNativeApiHeadersListIsValid(input_api, output_api)) - results.extend(input_api.canned_checks.CheckOwners(input_api, output_api)) - results.extend( - input_api.canned_checks.CheckChangeWasUploaded(input_api, output_api)) - results.extend( - input_api.canned_checks.CheckChangeHasDescription( - input_api, output_api)) - results.extend(CheckChangeHasBugField(input_api, output_api)) - results.extend(CheckCommitMessageBugEntry(input_api, output_api)) - results.extend( - input_api.canned_checks.CheckTreeIsOpen( - input_api, - output_api, - json_url='http://webrtc-status.appspot.com/current?format=json')) - return results + results = [] + results.extend(CommonChecks(input_api, output_api)) + results.extend(VerifyNativeApiHeadersListIsValid(input_api, output_api)) + results.extend(input_api.canned_checks.CheckOwners(input_api, output_api)) + results.extend( + input_api.canned_checks.CheckChangeWasUploaded(input_api, output_api)) + results.extend( + input_api.canned_checks.CheckChangeHasDescription(input_api, output_api)) + results.extend(CheckChangeHasBugField(input_api, output_api)) + results.extend(CheckCommitMessageBugEntry(input_api, output_api)) + results.extend( + input_api.canned_checks.CheckTreeIsOpen( + input_api, + output_api, + json_url='http://webrtc-status.appspot.com/current?format=json')) + return results def CheckOrphanHeaders(input_api, output_api, source_file_filter): - # We need to wait until we have an input_api object and use this - # roundabout construct to import prebubmit_checks_lib because this file is - # eval-ed and thus doesn't have __file__. - error_msg = """{} should be listed in {}.""" - results = [] - exempt_paths = [ - os.path.join('tools_webrtc', 'ios', 'SDK'), - ] - with _AddToPath( - input_api.os_path.join(input_api.PresubmitLocalPath(), - 'tools_webrtc', 'presubmit_checks_lib')): - from check_orphan_headers import GetBuildGnPathFromFilePath - from check_orphan_headers import IsHeaderInBuildGn + # We need to wait until we have an input_api object and use this + # roundabout construct to import prebubmit_checks_lib because this file is + # eval-ed and thus doesn't have __file__. + error_msg = """{} should be listed in {}.""" + results = [] + exempt_paths = [ + os.path.join('tools_webrtc', 'ios', 'SDK'), + ] + with _AddToPath( + input_api.os_path.join(input_api.PresubmitLocalPath(), 'tools_webrtc', + 'presubmit_checks_lib')): + from check_orphan_headers import GetBuildGnPathFromFilePath + from check_orphan_headers import IsHeaderInBuildGn - file_filter = lambda x: input_api.FilterSourceFile( - x, files_to_skip=exempt_paths) and source_file_filter(x) - for f in input_api.AffectedSourceFiles(file_filter): - if f.LocalPath().endswith('.h'): - file_path = os.path.abspath(f.LocalPath()) - root_dir = os.getcwd() - gn_file_path = GetBuildGnPathFromFilePath(file_path, - os.path.exists, root_dir) - in_build_gn = IsHeaderInBuildGn(file_path, gn_file_path) - if not in_build_gn: - results.append( - output_api.PresubmitError( - error_msg.format(f.LocalPath(), - os.path.relpath(gn_file_path)))) - return results + file_filter = lambda x: input_api.FilterSourceFile( + x, files_to_skip=exempt_paths) and source_file_filter(x) + for f in input_api.AffectedSourceFiles(file_filter): + if f.LocalPath().endswith('.h'): + file_path = os.path.abspath(f.LocalPath()) + root_dir = os.getcwd() + gn_file_path = GetBuildGnPathFromFilePath(file_path, os.path.exists, + root_dir) + in_build_gn = IsHeaderInBuildGn(file_path, gn_file_path) + if not in_build_gn: + results.append( + output_api.PresubmitError( + error_msg.format(f.LocalPath(), os.path.relpath(gn_file_path)))) + return results def CheckNewlineAtTheEndOfProtoFiles(input_api, output_api, source_file_filter): - """Checks that all .proto files are terminated with a newline.""" - error_msg = 'File {} must end with exactly one newline.' - results = [] - file_filter = lambda x: input_api.FilterSourceFile( - x, files_to_check=(r'.+\.proto$', )) and source_file_filter(x) - for f in input_api.AffectedSourceFiles(file_filter): - file_path = f.LocalPath() - with open(file_path) as f: - lines = f.readlines() - if len(lines) > 0 and not lines[-1].endswith('\n'): - results.append( - output_api.PresubmitError(error_msg.format(file_path))) - return results + """Checks that all .proto files are terminated with a newline.""" + error_msg = 'File {} must end with exactly one newline.' + results = [] + file_filter = lambda x: input_api.FilterSourceFile( + x, files_to_check=(r'.+\.proto$', )) and source_file_filter(x) + for f in input_api.AffectedSourceFiles(file_filter): + file_path = f.LocalPath() + with open(file_path) as f: + lines = f.readlines() + if len(lines) > 0 and not lines[-1].endswith('\n'): + results.append(output_api.PresubmitError(error_msg.format(file_path))) + return results def _ExtractAddRulesFromParsedDeps(parsed_deps): - """Extract the rules that add dependencies from a parsed DEPS file. + """Extract the rules that add dependencies from a parsed DEPS file. Args: parsed_deps: the locals dictionary from evaluating the DEPS file.""" - add_rules = set() + add_rules = set() + add_rules.update([ + rule[1:] for rule in parsed_deps.get('include_rules', []) + if rule.startswith('+') or rule.startswith('!') + ]) + for _, rules in parsed_deps.get('specific_include_rules', {}).items(): add_rules.update([ - rule[1:] for rule in parsed_deps.get('include_rules', []) + rule[1:] for rule in rules if rule.startswith('+') or rule.startswith('!') ]) - for _, rules in parsed_deps.get('specific_include_rules', {}).iteritems(): - add_rules.update([ - rule[1:] for rule in rules - if rule.startswith('+') or rule.startswith('!') - ]) - return add_rules + return add_rules def _ParseDeps(contents): - """Simple helper for parsing DEPS files.""" + """Simple helper for parsing DEPS files.""" - # Stubs for handling special syntax in the root DEPS file. - class VarImpl(object): - def __init__(self, local_scope): - self._local_scope = local_scope + # Stubs for handling special syntax in the root DEPS file. + class VarImpl: + def __init__(self, local_scope): + self._local_scope = local_scope - def Lookup(self, var_name): - """Implements the Var syntax.""" - try: - return self._local_scope['vars'][var_name] - except KeyError: - raise Exception('Var is not defined: %s' % var_name) + def Lookup(self, var_name): + """Implements the Var syntax.""" + try: + return self._local_scope['vars'][var_name] + except KeyError as var_not_defined: + raise Exception('Var is not defined: %s' % + var_name) from var_not_defined - local_scope = {} - global_scope = { - 'Var': VarImpl(local_scope).Lookup, - } - exec contents in global_scope, local_scope - return local_scope + local_scope = {} + global_scope = { + 'Var': VarImpl(local_scope).Lookup, + } + exec(contents, global_scope, local_scope) + return local_scope def _CalculateAddedDeps(os_path, old_contents, new_contents): - """Helper method for _CheckAddedDepsHaveTargetApprovals. Returns + """Helper method for _CheckAddedDepsHaveTargetApprovals. Returns a set of DEPS entries that we should look up. For a directory (rather than a specific filename) we fake a path to a specific filename by adding /DEPS. This is chosen as a file that will seldom or never be subject to per-file include_rules. """ - # We ignore deps entries on auto-generated directories. - auto_generated_dirs = ['grit', 'jni'] + # We ignore deps entries on auto-generated directories. + auto_generated_dirs = ['grit', 'jni'] - old_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(old_contents)) - new_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(new_contents)) + old_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(old_contents)) + new_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(new_contents)) - added_deps = new_deps.difference(old_deps) + added_deps = new_deps.difference(old_deps) - results = set() - for added_dep in added_deps: - if added_dep.split('/')[0] in auto_generated_dirs: - continue - # Assume that a rule that ends in .h is a rule for a specific file. - if added_dep.endswith('.h'): - results.add(added_dep) - else: - results.add(os_path.join(added_dep, 'DEPS')) - return results + results = set() + for added_dep in added_deps: + if added_dep.split('/')[0] in auto_generated_dirs: + continue + # Assume that a rule that ends in .h is a rule for a specific file. + if added_dep.endswith('.h'): + results.add(added_dep) + else: + results.add(os_path.join(added_dep, 'DEPS')) + return results def CheckAddedDepsHaveTargetApprovals(input_api, output_api): - """When a dependency prefixed with + is added to a DEPS file, we + """When a dependency prefixed with + is added to a DEPS file, we want to make sure that the change is reviewed by an OWNER of the target file or directory, to avoid layering violations from being introduced. This check verifies that this happens. """ - virtual_depended_on_files = set() + virtual_depended_on_files = set() - file_filter = lambda f: not input_api.re.match( - r"^third_party[\\\/](WebKit|blink)[\\\/].*", f.LocalPath()) - for f in input_api.AffectedFiles(include_deletes=False, - file_filter=file_filter): - filename = input_api.os_path.basename(f.LocalPath()) - if filename == 'DEPS': - virtual_depended_on_files.update( - _CalculateAddedDeps(input_api.os_path, - '\n'.join(f.OldContents()), - '\n'.join(f.NewContents()))) - - if not virtual_depended_on_files: - return [] - - if input_api.is_committing: - if input_api.tbr: - return [ - output_api.PresubmitNotifyResult( - '--tbr was specified, skipping OWNERS check for DEPS ' - 'additions' - ) - ] - if input_api.dry_run: - return [ - output_api.PresubmitNotifyResult( - 'This is a dry run, skipping OWNERS check for DEPS ' - 'additions' - ) - ] - if not input_api.change.issue: - return [ - output_api.PresubmitError( - "DEPS approval by OWNERS check failed: this change has " - "no change number, so we can't check it for approvals.") - ] - output = output_api.PresubmitError - else: - output = output_api.PresubmitNotifyResult - - owner_email, reviewers = ( - input_api.canned_checks.GetCodereviewOwnerAndReviewers( - input_api, - None, - approval_needed=input_api.is_committing)) - - owner_email = owner_email or input_api.change.author_email - - approval_status = input_api.owners_client.GetFilesApprovalStatus( - virtual_depended_on_files, reviewers.union([owner_email]), []) - missing_files = [ - f for f in virtual_depended_on_files - if approval_status[f] != input_api.owners_client.APPROVED] - - # We strip the /DEPS part that was added by - # _FilesToCheckForIncomingDeps to fake a path to a file in a - # directory. - def StripDeps(path): - start_deps = path.rfind('/DEPS') - if start_deps != -1: - return path[:start_deps] - else: - return path - - unapproved_dependencies = [ - "'+%s'," % StripDeps(path) for path in missing_files - ] - - if unapproved_dependencies: - output_list = [ - output( - 'You need LGTM from owners of depends-on paths in DEPS that ' - ' were modified in this CL:\n %s' % - '\n '.join(sorted(unapproved_dependencies))) - ] - suggested_owners = input_api.owners_client.SuggestOwners( - missing_files, exclude=[owner_email]) - output_list.append( - output('Suggested missing target path OWNERS:\n %s' % - '\n '.join(suggested_owners or []))) - return output_list + file_filter = lambda f: not input_api.re.match( + r"^third_party[\\\/](WebKit|blink)[\\\/].*", f.LocalPath()) + for f in input_api.AffectedFiles(include_deletes=False, + file_filter=file_filter): + filename = input_api.os_path.basename(f.LocalPath()) + if filename == 'DEPS': + virtual_depended_on_files.update( + _CalculateAddedDeps(input_api.os_path, '\n'.join(f.OldContents()), + '\n'.join(f.NewContents()))) + if not virtual_depended_on_files: return [] + + if input_api.is_committing: + if input_api.tbr: + return [ + output_api.PresubmitNotifyResult( + '--tbr was specified, skipping OWNERS check for DEPS ' + 'additions') + ] + if input_api.dry_run: + return [ + output_api.PresubmitNotifyResult( + 'This is a dry run, skipping OWNERS check for DEPS ' + 'additions') + ] + if not input_api.change.issue: + return [ + output_api.PresubmitError( + "DEPS approval by OWNERS check failed: this change has " + "no change number, so we can't check it for approvals.") + ] + output = output_api.PresubmitError + else: + output = output_api.PresubmitNotifyResult + + owner_email, reviewers = ( + input_api.canned_checks.GetCodereviewOwnerAndReviewers( + input_api, None, approval_needed=input_api.is_committing)) + + owner_email = owner_email or input_api.change.author_email + + approval_status = input_api.owners_client.GetFilesApprovalStatus( + virtual_depended_on_files, reviewers.union([owner_email]), []) + missing_files = [ + f for f in virtual_depended_on_files + if approval_status[f] != input_api.owners_client.APPROVED + ] + + # We strip the /DEPS part that was added by + # _FilesToCheckForIncomingDeps to fake a path to a file in a + # directory. + def StripDeps(path): + start_deps = path.rfind('/DEPS') + if start_deps != -1: + return path[:start_deps] + return path + + unapproved_dependencies = [ + "'+%s'," % StripDeps(path) for path in missing_files + ] + + if unapproved_dependencies: + output_list = [ + output('You need LGTM from owners of depends-on paths in DEPS that ' + ' were modified in this CL:\n %s' % + '\n '.join(sorted(unapproved_dependencies))) + ] + suggested_owners = input_api.owners_client.SuggestOwners( + missing_files, exclude=[owner_email]) + output_list.append( + output('Suggested missing target path OWNERS:\n %s' % + '\n '.join(suggested_owners or []))) + return output_list + + return [] diff --git a/presubmit_test.py b/presubmit_test.py index e7879f99f7..b6951bf92f 100755 --- a/presubmit_test.py +++ b/presubmit_test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright 2017 The WebRTC project authors. All Rights Reserved. # @@ -8,6 +8,7 @@ # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. +from __future__ import absolute_import import os import shutil import tempfile @@ -20,145 +21,145 @@ from presubmit_test_mocks import MockInputApi, MockOutputApi, MockFile, MockChan class CheckBugEntryFieldTest(unittest.TestCase): - def testCommitMessageBugEntryWithNoError(self): - mock_input_api = MockInputApi() - mock_output_api = MockOutputApi() - mock_input_api.change = MockChange([], ['webrtc:1234']) - errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, - mock_output_api) - self.assertEqual(0, len(errors)) + def testCommitMessageBugEntryWithNoError(self): + mock_input_api = MockInputApi() + mock_output_api = MockOutputApi() + mock_input_api.change = MockChange([], ['webrtc:1234']) + errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, + mock_output_api) + self.assertEqual(0, len(errors)) - def testCommitMessageBugEntryReturnError(self): - mock_input_api = MockInputApi() - mock_output_api = MockOutputApi() - mock_input_api.change = MockChange([], ['webrtc:1234', 'webrtc=4321']) - errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, - mock_output_api) - self.assertEqual(1, len(errors)) - self.assertEqual(('Bogus Bug entry: webrtc=4321. Please specify' - ' the issue tracker prefix and the issue number,' - ' separated by a colon, e.g. webrtc:123 or' - ' chromium:12345.'), str(errors[0])) + def testCommitMessageBugEntryReturnError(self): + mock_input_api = MockInputApi() + mock_output_api = MockOutputApi() + mock_input_api.change = MockChange([], ['webrtc:1234', 'webrtc=4321']) + errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, + mock_output_api) + self.assertEqual(1, len(errors)) + self.assertEqual(('Bogus Bug entry: webrtc=4321. Please specify' + ' the issue tracker prefix and the issue number,' + ' separated by a colon, e.g. webrtc:123 or' + ' chromium:12345.'), str(errors[0])) - def testCommitMessageBugEntryWithoutPrefix(self): - mock_input_api = MockInputApi() - mock_output_api = MockOutputApi() - mock_input_api.change = MockChange([], ['1234']) - errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, - mock_output_api) - self.assertEqual(1, len(errors)) - self.assertEqual(('Bug entry requires issue tracker prefix, ' - 'e.g. webrtc:1234'), str(errors[0])) + def testCommitMessageBugEntryWithoutPrefix(self): + mock_input_api = MockInputApi() + mock_output_api = MockOutputApi() + mock_input_api.change = MockChange([], ['1234']) + errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, + mock_output_api) + self.assertEqual(1, len(errors)) + self.assertEqual(('Bug entry requires issue tracker prefix, ' + 'e.g. webrtc:1234'), str(errors[0])) - def testCommitMessageBugEntryIsNone(self): - mock_input_api = MockInputApi() - mock_output_api = MockOutputApi() - mock_input_api.change = MockChange([], ['None']) - errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, - mock_output_api) - self.assertEqual(0, len(errors)) + def testCommitMessageBugEntryIsNone(self): + mock_input_api = MockInputApi() + mock_output_api = MockOutputApi() + mock_input_api.change = MockChange([], ['None']) + errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, + mock_output_api) + self.assertEqual(0, len(errors)) - def testCommitMessageBugEntrySupportInternalBugReference(self): - mock_input_api = MockInputApi() - mock_output_api = MockOutputApi() - mock_input_api.change.BUG = 'b/12345' - errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, - mock_output_api) - self.assertEqual(0, len(errors)) - mock_input_api.change.BUG = 'b/12345, webrtc:1234' - errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, - mock_output_api) - self.assertEqual(0, len(errors)) + def testCommitMessageBugEntrySupportInternalBugReference(self): + mock_input_api = MockInputApi() + mock_output_api = MockOutputApi() + mock_input_api.change.BUG = 'b/12345' + errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, + mock_output_api) + self.assertEqual(0, len(errors)) + mock_input_api.change.BUG = 'b/12345, webrtc:1234' + errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, + mock_output_api) + self.assertEqual(0, len(errors)) class CheckNewlineAtTheEndOfProtoFilesTest(unittest.TestCase): - def setUp(self): - self.tmp_dir = tempfile.mkdtemp() - self.proto_file_path = os.path.join(self.tmp_dir, 'foo.proto') - self.input_api = MockInputApi() - self.output_api = MockOutputApi() + def setUp(self): + self.tmp_dir = tempfile.mkdtemp() + self.proto_file_path = os.path.join(self.tmp_dir, 'foo.proto') + self.input_api = MockInputApi() + self.output_api = MockOutputApi() - def tearDown(self): - shutil.rmtree(self.tmp_dir, ignore_errors=True) + def tearDown(self): + shutil.rmtree(self.tmp_dir, ignore_errors=True) - def testErrorIfProtoFileDoesNotEndWithNewline(self): - self._GenerateProtoWithoutNewlineAtTheEnd() - self.input_api.files = [MockFile(self.proto_file_path)] - errors = PRESUBMIT.CheckNewlineAtTheEndOfProtoFiles( - self.input_api, self.output_api, lambda x: True) - self.assertEqual(1, len(errors)) - self.assertEqual( - 'File %s must end with exactly one newline.' % - self.proto_file_path, str(errors[0])) + def testErrorIfProtoFileDoesNotEndWithNewline(self): + self._GenerateProtoWithoutNewlineAtTheEnd() + self.input_api.files = [MockFile(self.proto_file_path)] + errors = PRESUBMIT.CheckNewlineAtTheEndOfProtoFiles( + self.input_api, self.output_api, lambda x: True) + self.assertEqual(1, len(errors)) + self.assertEqual( + 'File %s must end with exactly one newline.' % self.proto_file_path, + str(errors[0])) - def testNoErrorIfProtoFileEndsWithNewline(self): - self._GenerateProtoWithNewlineAtTheEnd() - self.input_api.files = [MockFile(self.proto_file_path)] - errors = PRESUBMIT.CheckNewlineAtTheEndOfProtoFiles( - self.input_api, self.output_api, lambda x: True) - self.assertEqual(0, len(errors)) + def testNoErrorIfProtoFileEndsWithNewline(self): + self._GenerateProtoWithNewlineAtTheEnd() + self.input_api.files = [MockFile(self.proto_file_path)] + errors = PRESUBMIT.CheckNewlineAtTheEndOfProtoFiles( + self.input_api, self.output_api, lambda x: True) + self.assertEqual(0, len(errors)) - def _GenerateProtoWithNewlineAtTheEnd(self): - with open(self.proto_file_path, 'w') as f: - f.write( - textwrap.dedent(""" + def _GenerateProtoWithNewlineAtTheEnd(self): + with open(self.proto_file_path, 'w') as f: + f.write( + textwrap.dedent(""" syntax = "proto2"; option optimize_for = LITE_RUNTIME; package webrtc.audioproc; """)) - def _GenerateProtoWithoutNewlineAtTheEnd(self): - with open(self.proto_file_path, 'w') as f: - f.write( - textwrap.dedent(""" + def _GenerateProtoWithoutNewlineAtTheEnd(self): + with open(self.proto_file_path, 'w') as f: + f.write( + textwrap.dedent(""" syntax = "proto2"; option optimize_for = LITE_RUNTIME; package webrtc.audioproc;""")) class CheckNoMixingSourcesTest(unittest.TestCase): - def setUp(self): - self.tmp_dir = tempfile.mkdtemp() - self.file_path = os.path.join(self.tmp_dir, 'BUILD.gn') - self.input_api = MockInputApi() - self.output_api = MockOutputApi() + def setUp(self): + self.tmp_dir = tempfile.mkdtemp() + self.file_path = os.path.join(self.tmp_dir, 'BUILD.gn') + self.input_api = MockInputApi() + self.output_api = MockOutputApi() - def tearDown(self): - shutil.rmtree(self.tmp_dir, ignore_errors=True) + def tearDown(self): + shutil.rmtree(self.tmp_dir, ignore_errors=True) - def testErrorIfCAndCppAreMixed(self): - self._AssertNumberOfErrorsWithSources(1, ['foo.c', 'bar.cc', 'bar.h']) + def testErrorIfCAndCppAreMixed(self): + self._AssertNumberOfErrorsWithSources(1, ['foo.c', 'bar.cc', 'bar.h']) - def testErrorIfCAndObjCAreMixed(self): - self._AssertNumberOfErrorsWithSources(1, ['foo.c', 'bar.m', 'bar.h']) + def testErrorIfCAndObjCAreMixed(self): + self._AssertNumberOfErrorsWithSources(1, ['foo.c', 'bar.m', 'bar.h']) - def testErrorIfCAndObjCppAreMixed(self): - self._AssertNumberOfErrorsWithSources(1, ['foo.c', 'bar.mm', 'bar.h']) + def testErrorIfCAndObjCppAreMixed(self): + self._AssertNumberOfErrorsWithSources(1, ['foo.c', 'bar.mm', 'bar.h']) - def testErrorIfCppAndObjCAreMixed(self): - self._AssertNumberOfErrorsWithSources(1, ['foo.cc', 'bar.m', 'bar.h']) + def testErrorIfCppAndObjCAreMixed(self): + self._AssertNumberOfErrorsWithSources(1, ['foo.cc', 'bar.m', 'bar.h']) - def testErrorIfCppAndObjCppAreMixed(self): - self._AssertNumberOfErrorsWithSources(1, ['foo.cc', 'bar.mm', 'bar.h']) + def testErrorIfCppAndObjCppAreMixed(self): + self._AssertNumberOfErrorsWithSources(1, ['foo.cc', 'bar.mm', 'bar.h']) - def testNoErrorIfOnlyC(self): - self._AssertNumberOfErrorsWithSources(0, ['foo.c', 'bar.c', 'bar.h']) + def testNoErrorIfOnlyC(self): + self._AssertNumberOfErrorsWithSources(0, ['foo.c', 'bar.c', 'bar.h']) - def testNoErrorIfOnlyCpp(self): - self._AssertNumberOfErrorsWithSources(0, ['foo.cc', 'bar.cc', 'bar.h']) + def testNoErrorIfOnlyCpp(self): + self._AssertNumberOfErrorsWithSources(0, ['foo.cc', 'bar.cc', 'bar.h']) - def testNoErrorIfOnlyObjC(self): - self._AssertNumberOfErrorsWithSources(0, ['foo.m', 'bar.m', 'bar.h']) + def testNoErrorIfOnlyObjC(self): + self._AssertNumberOfErrorsWithSources(0, ['foo.m', 'bar.m', 'bar.h']) - def testNoErrorIfOnlyObjCpp(self): - self._AssertNumberOfErrorsWithSources(0, ['foo.mm', 'bar.mm', 'bar.h']) + def testNoErrorIfOnlyObjCpp(self): + self._AssertNumberOfErrorsWithSources(0, ['foo.mm', 'bar.mm', 'bar.h']) - def testNoErrorIfObjCAndObjCppAreMixed(self): - self._AssertNumberOfErrorsWithSources(0, ['foo.m', 'bar.mm', 'bar.h']) + def testNoErrorIfObjCAndObjCppAreMixed(self): + self._AssertNumberOfErrorsWithSources(0, ['foo.m', 'bar.mm', 'bar.h']) - def testNoErrorIfSourcesAreInExclusiveIfBranches(self): - self._GenerateBuildFile( - textwrap.dedent(""" + def testNoErrorIfSourcesAreInExclusiveIfBranches(self): + self._GenerateBuildFile( + textwrap.dedent(""" rtc_library("bar_foo") { if (is_win) { sources = [ @@ -184,15 +185,15 @@ class CheckNoMixingSourcesTest(unittest.TestCase): } } """)) - self.input_api.files = [MockFile(self.file_path)] - errors = PRESUBMIT.CheckNoMixingSources(self.input_api, - [MockFile(self.file_path)], - self.output_api) - self.assertEqual(0, len(errors)) + self.input_api.files = [MockFile(self.file_path)] + errors = PRESUBMIT.CheckNoMixingSources(self.input_api, + [MockFile(self.file_path)], + self.output_api) + self.assertEqual(0, len(errors)) - def testErrorIfSourcesAreNotInExclusiveIfBranches(self): - self._GenerateBuildFile( - textwrap.dedent(""" + def testErrorIfSourcesAreNotInExclusiveIfBranches(self): + self._GenerateBuildFile( + textwrap.dedent(""" rtc_library("bar_foo") { if (is_win) { sources = [ @@ -224,23 +225,22 @@ class CheckNoMixingSourcesTest(unittest.TestCase): } } """)) - self.input_api.files = [MockFile(self.file_path)] - errors = PRESUBMIT.CheckNoMixingSources(self.input_api, - [MockFile(self.file_path)], - self.output_api) - self.assertEqual(1, len(errors)) - self.assertTrue('bar.cc' in str(errors[0])) - self.assertTrue('bar.mm' in str(errors[0])) - self.assertTrue('foo.cc' in str(errors[0])) - self.assertTrue('foo.mm' in str(errors[0])) - self.assertTrue('bar.m' in str(errors[0])) - self.assertTrue('bar.c' in str(errors[0])) + self.input_api.files = [MockFile(self.file_path)] + errors = PRESUBMIT.CheckNoMixingSources(self.input_api, + [MockFile(self.file_path)], + self.output_api) + self.assertEqual(1, len(errors)) + self.assertTrue('bar.cc' in str(errors[0])) + self.assertTrue('bar.mm' in str(errors[0])) + self.assertTrue('foo.cc' in str(errors[0])) + self.assertTrue('foo.mm' in str(errors[0])) + self.assertTrue('bar.m' in str(errors[0])) + self.assertTrue('bar.c' in str(errors[0])) - def _AssertNumberOfErrorsWithSources(self, number_of_errors, sources): - assert len( - sources) == 3, 'This function accepts a list of 3 source files' - self._GenerateBuildFile( - textwrap.dedent(""" + def _AssertNumberOfErrorsWithSources(self, number_of_errors, sources): + assert len(sources) == 3, 'This function accepts a list of 3 source files' + self._GenerateBuildFile( + textwrap.dedent(""" rtc_static_library("bar_foo") { sources = [ "%s", @@ -256,84 +256,76 @@ class CheckNoMixingSourcesTest(unittest.TestCase): ], } """ % (tuple(sources) * 2))) - self.input_api.files = [MockFile(self.file_path)] - errors = PRESUBMIT.CheckNoMixingSources(self.input_api, - [MockFile(self.file_path)], - self.output_api) - self.assertEqual(number_of_errors, len(errors)) - if number_of_errors == 1: - for source in sources: - if not source.endswith('.h'): - self.assertTrue(source in str(errors[0])) + self.input_api.files = [MockFile(self.file_path)] + errors = PRESUBMIT.CheckNoMixingSources(self.input_api, + [MockFile(self.file_path)], + self.output_api) + self.assertEqual(number_of_errors, len(errors)) + if number_of_errors == 1: + for source in sources: + if not source.endswith('.h'): + self.assertTrue(source in str(errors[0])) - def _GenerateBuildFile(self, content): - with open(self.file_path, 'w') as f: - f.write(content) + def _GenerateBuildFile(self, content): + with open(self.file_path, 'w') as f: + f.write(content) class CheckAssertUsageTest(unittest.TestCase): - def setUp(self): - self.input_api = MockInputApi() - self.output_api = MockOutputApi() - self._content_with_assert = [ - 'void Foo() {', - ' assert(true);', - '}' - ] - self._content_without_assert = [ - 'void Foo() {', - ' RTC_CHECK(true);', - '}' - ] + def setUp(self): + self.input_api = MockInputApi() + self.output_api = MockOutputApi() + self._content_with_assert = ['void Foo() {', ' assert(true);', '}'] + self._content_without_assert = ['void Foo() {', ' RTC_CHECK(true);', '}'] - def testDetectsAssertInCcFile(self): - self.input_api.files = [ - MockFile('with_assert.cc', self._content_with_assert), - MockFile('without_assert.cc', self._content_without_assert), - ] - errors = PRESUBMIT.CheckAssertUsage( - self.input_api, self.output_api, lambda x: True) - self.assertEqual(1, len(errors)) - self.assertEqual('with_assert.cc', errors[0].items[0]) + def testDetectsAssertInCcFile(self): + self.input_api.files = [ + MockFile('with_assert.cc', self._content_with_assert), + MockFile('without_assert.cc', self._content_without_assert), + ] + errors = PRESUBMIT.CheckAssertUsage(self.input_api, + self.output_api, lambda x: True) + self.assertEqual(1, len(errors)) + self.assertEqual('with_assert.cc', errors[0].items[0]) - def testDetectsAssertInHeaderFile(self): - self.input_api.files = [ - MockFile('with_assert.h', self._content_with_assert), - MockFile('without_assert.h', self._content_without_assert), - ] - errors = PRESUBMIT.CheckAssertUsage( - self.input_api, self.output_api, lambda x: True) - self.assertEqual(1, len(errors)) - self.assertEqual('with_assert.h', errors[0].items[0]) + def testDetectsAssertInHeaderFile(self): + self.input_api.files = [ + MockFile('with_assert.h', self._content_with_assert), + MockFile('without_assert.h', self._content_without_assert), + ] + errors = PRESUBMIT.CheckAssertUsage(self.input_api, + self.output_api, lambda x: True) + self.assertEqual(1, len(errors)) + self.assertEqual('with_assert.h', errors[0].items[0]) - def testDetectsAssertInObjCFile(self): - self.input_api.files = [ - MockFile('with_assert.m', self._content_with_assert), - MockFile('without_assert.m', self._content_without_assert), - ] - errors = PRESUBMIT.CheckAssertUsage( - self.input_api, self.output_api, lambda x: True) - self.assertEqual(1, len(errors)) - self.assertEqual('with_assert.m', errors[0].items[0]) + def testDetectsAssertInObjCFile(self): + self.input_api.files = [ + MockFile('with_assert.m', self._content_with_assert), + MockFile('without_assert.m', self._content_without_assert), + ] + errors = PRESUBMIT.CheckAssertUsage(self.input_api, + self.output_api, lambda x: True) + self.assertEqual(1, len(errors)) + self.assertEqual('with_assert.m', errors[0].items[0]) - def testDetectsAssertInObjCppFile(self): - self.input_api.files = [ - MockFile('with_assert.mm', self._content_with_assert), - MockFile('without_assert.mm', self._content_without_assert), - ] - errors = PRESUBMIT.CheckAssertUsage( - self.input_api, self.output_api, lambda x: True) - self.assertEqual(1, len(errors)) - self.assertEqual('with_assert.mm', errors[0].items[0]) + def testDetectsAssertInObjCppFile(self): + self.input_api.files = [ + MockFile('with_assert.mm', self._content_with_assert), + MockFile('without_assert.mm', self._content_without_assert), + ] + errors = PRESUBMIT.CheckAssertUsage(self.input_api, + self.output_api, lambda x: True) + self.assertEqual(1, len(errors)) + self.assertEqual('with_assert.mm', errors[0].items[0]) - def testDoesntDetectAssertInOtherFiles(self): - self.input_api.files = [ - MockFile('with_assert.cpp', self._content_with_assert), - ] - errors = PRESUBMIT.CheckAssertUsage( - self.input_api, self.output_api, lambda x: True) - self.assertEqual(0, len(errors)) + def testDoesntDetectAssertInOtherFiles(self): + self.input_api.files = [ + MockFile('with_assert.cpp', self._content_with_assert), + ] + errors = PRESUBMIT.CheckAssertUsage(self.input_api, + self.output_api, lambda x: True) + self.assertEqual(0, len(errors)) if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/presubmit_test_mocks.py b/presubmit_test_mocks.py index 4ed7947530..015f6e7e15 100644 --- a/presubmit_test_mocks.py +++ b/presubmit_test_mocks.py @@ -1,3 +1,5 @@ +#!/usr/bin/env vpython3 + # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -9,135 +11,131 @@ # This file is inspired to [1]. # [1] - https://cs.chromium.org/chromium/src/PRESUBMIT_test_mocks.py +from __future__ import absolute_import import os.path import re -class MockInputApi(object): - """Mock class for the InputApi class. +class MockInputApi: + """Mock class for the InputApi class. This class can be used for unittests for presubmit by initializing the files attribute as the list of changed files. """ - def __init__(self): - self.change = MockChange([], []) - self.files = [] - self.presubmit_local_path = os.path.dirname(__file__) - self.re = re # pylint: disable=invalid-name + def __init__(self): + self.change = MockChange([], []) + self.files = [] + self.presubmit_local_path = os.path.dirname(__file__) + self.re = re # pylint: disable=invalid-name - def AffectedSourceFiles(self, file_filter=None): - return self.AffectedFiles(file_filter=file_filter) + def AffectedSourceFiles(self, file_filter=None): + return self.AffectedFiles(file_filter=file_filter) - def AffectedFiles(self, file_filter=None, include_deletes=False): - for f in self.files: - if file_filter and not file_filter(f): - continue - if not include_deletes and f.Action() == 'D': - continue - yield f + def AffectedFiles(self, file_filter=None, include_deletes=False): + for f in self.files: + if file_filter and not file_filter(f): + continue + if not include_deletes and f.Action() == 'D': + continue + yield f - @classmethod - def FilterSourceFile(cls, - affected_file, - files_to_check=(), - files_to_skip=()): - # pylint: disable=unused-argument - return True + @classmethod + def FilterSourceFile(cls, affected_file, files_to_check=(), files_to_skip=()): + # pylint: disable=unused-argument + return True - def PresubmitLocalPath(self): - return self.presubmit_local_path + def PresubmitLocalPath(self): + return self.presubmit_local_path - def ReadFile(self, affected_file, mode='rU'): - filename = affected_file.AbsoluteLocalPath() - for f in self.files: - if f.LocalPath() == filename: - with open(filename, mode) as f: - return f.read() - # Otherwise, file is not in our mock API. - raise IOError, "No such file or directory: '%s'" % filename + def ReadFile(self, affected_file, mode='r'): + filename = affected_file.AbsoluteLocalPath() + for f in self.files: + if f.LocalPath() == filename: + with open(filename, mode) as f: + return f.read() + # Otherwise, file is not in our mock API. + raise IOError("No such file or directory: '%s'" % filename) -class MockOutputApi(object): - """Mock class for the OutputApi class. +class MockOutputApi: + """Mock class for the OutputApi class. An instance of this class can be passed to presubmit unittests for outputing various types of results. """ - class PresubmitResult(object): - def __init__(self, message, items=None, long_text=''): - self.message = message - self.items = items - self.long_text = long_text + class PresubmitResult: + def __init__(self, message, items=None, long_text=''): + self.message = message + self.items = items + self.long_text = long_text - def __repr__(self): - return self.message + def __repr__(self): + return self.message - class PresubmitError(PresubmitResult): - def __init__(self, message, items=None, long_text=''): - MockOutputApi.PresubmitResult.__init__(self, message, items, - long_text) - self.type = 'error' + class PresubmitError(PresubmitResult): + def __init__(self, message, items=None, long_text=''): + MockOutputApi.PresubmitResult.__init__(self, message, items, long_text) + self.type = 'error' -class MockChange(object): - """Mock class for Change class. +class MockChange: + """Mock class for Change class. This class can be used in presubmit unittests to mock the query of the current change. """ - def __init__(self, changed_files, bugs_from_description, tags=None): - self._changed_files = changed_files - self._bugs_from_description = bugs_from_description - self.tags = dict() if not tags else tags + def __init__(self, changed_files, bugs_from_description, tags=None): + self._changed_files = changed_files + self._bugs_from_description = bugs_from_description + self.tags = dict() if not tags else tags - def BugsFromDescription(self): - return self._bugs_from_description + def BugsFromDescription(self): + return self._bugs_from_description - def __getattr__(self, attr): - """Return tags directly as attributes on the object.""" - if not re.match(r"^[A-Z_]*$", attr): - raise AttributeError(self, attr) - return self.tags.get(attr) + def __getattr__(self, attr): + """Return tags directly as attributes on the object.""" + if not re.match(r"^[A-Z_]*$", attr): + raise AttributeError(self, attr) + return self.tags.get(attr) -class MockFile(object): - """Mock class for the File class. +class MockFile: + """Mock class for the File class. This class can be used to form the mock list of changed files in MockInputApi for presubmit unittests. """ - def __init__(self, - local_path, - new_contents=None, - old_contents=None, - action='A'): - if new_contents is None: - new_contents = ["Data"] - self._local_path = local_path - self._new_contents = new_contents - self._changed_contents = [(i + 1, l) - for i, l in enumerate(new_contents)] - self._action = action - self._old_contents = old_contents + def __init__(self, + local_path, + new_contents=None, + old_contents=None, + action='A'): + if new_contents is None: + new_contents = ["Data"] + self._local_path = local_path + self._new_contents = new_contents + self._changed_contents = [(i + 1, l) for i, l in enumerate(new_contents)] + self._action = action + self._old_contents = old_contents - def Action(self): - return self._action + def Action(self): + return self._action - def ChangedContents(self): - return self._changed_contents + def ChangedContents(self): + return self._changed_contents - def NewContents(self): - return self._new_contents + def NewContents(self): + return self._new_contents - def LocalPath(self): - return self._local_path + def LocalPath(self): + return self._local_path - def AbsoluteLocalPath(self): - return self._local_path + def AbsoluteLocalPath(self): + return self._local_path - def OldContents(self): - return self._old_contents + def OldContents(self): + return self._old_contents diff --git a/pylintrc b/pylintrc index e353d9eb66..852445a1ce 100644 --- a/pylintrc +++ b/pylintrc @@ -28,6 +28,7 @@ disable= exec-used, fixme, import-error, + import-outside-toplevel, missing-docstring, no-init, no-member, diff --git a/tools_webrtc/PRESUBMIT.py b/tools_webrtc/PRESUBMIT.py index 27f8bb10d2..57d142e9d7 100644 --- a/tools_webrtc/PRESUBMIT.py +++ b/tools_webrtc/PRESUBMIT.py @@ -1,3 +1,5 @@ +#!/usr/bin/env vpython3 + # Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -6,45 +8,48 @@ # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. +# Runs PRESUBMIT.py in py3 mode by git cl presubmit. +USE_PYTHON3 = True + def _LicenseHeader(input_api): - """Returns the license header regexp.""" - # Accept any year number from 2003 to the current year - current_year = int(input_api.time.strftime('%Y')) - allowed_years = (str(s) for s in reversed(xrange(2003, current_year + 1))) - years_re = '(' + '|'.join(allowed_years) + ')' - license_header = ( - r'.*? Copyright( \(c\))? %(year)s The WebRTC [Pp]roject [Aa]uthors\. ' - r'All [Rr]ights [Rr]eserved\.\n' - r'.*?\n' - r'.*? Use of this source code is governed by a BSD-style license\n' - r'.*? that can be found in the LICENSE file in the root of the source\n' - r'.*? tree\. An additional intellectual property rights grant can be ' - r'found\n' - r'.*? in the file PATENTS\. All contributing project authors may\n' - r'.*? be found in the AUTHORS file in the root of the source tree\.\n' - ) % { - 'year': years_re, - } - return license_header + """Returns the license header regexp.""" + # Accept any year number from 2003 to the current year + current_year = int(input_api.time.strftime('%Y')) + allowed_years = (str(s) for s in reversed(range(2003, current_year + 1))) + years_re = '(' + '|'.join(allowed_years) + ')' + license_header = ( + r'.*? Copyright( \(c\))? %(year)s The WebRTC [Pp]roject [Aa]uthors\. ' + r'All [Rr]ights [Rr]eserved\.\n' + r'.*?\n' + r'.*? Use of this source code is governed by a BSD-style license\n' + r'.*? that can be found in the LICENSE file in the root of the source\n' + r'.*? tree\. An additional intellectual property rights grant can be ' + r'found\n' + r'.*? in the file PATENTS\. All contributing project authors may\n' + r'.*? be found in the AUTHORS file in the root of the source tree\.\n' + ) % { + 'year': years_re, + } + return license_header def _CommonChecks(input_api, output_api): - """Checks common to both upload and commit.""" - results = [] - results.extend( - input_api.canned_checks.CheckLicense(input_api, output_api, - _LicenseHeader(input_api))) - return results + """Checks common to both upload and commit.""" + results = [] + results.extend( + input_api.canned_checks.CheckLicense(input_api, output_api, + _LicenseHeader(input_api))) + return results def CheckChangeOnUpload(input_api, output_api): - results = [] - results.extend(_CommonChecks(input_api, output_api)) - return results + results = [] + results.extend(_CommonChecks(input_api, output_api)) + return results def CheckChangeOnCommit(input_api, output_api): - results = [] - results.extend(_CommonChecks(input_api, output_api)) - return results + results = [] + results.extend(_CommonChecks(input_api, output_api)) + return results diff --git a/tools_webrtc/android/build_aar.py b/tools_webrtc/android/build_aar.py index 9fc4bb0f39..fb5b67ae22 100755 --- a/tools_webrtc/android/build_aar.py +++ b/tools_webrtc/android/build_aar.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. # @@ -51,172 +51,167 @@ import find_depot_tools def _ParseArgs(): - parser = argparse.ArgumentParser(description='libwebrtc.aar generator.') - parser.add_argument( - '--build-dir', - type=os.path.abspath, - help='Build dir. By default will create and use temporary dir.') - parser.add_argument('--output', - default='libwebrtc.aar', - type=os.path.abspath, - help='Output file of the script.') - parser.add_argument( - '--arch', - default=DEFAULT_ARCHS, - nargs='*', - help='Architectures to build. Defaults to %(default)s.') - parser.add_argument('--use-goma', - action='store_true', - default=False, - help='Use goma.') - parser.add_argument('--verbose', - action='store_true', - default=False, - help='Debug logging.') - parser.add_argument( - '--extra-gn-args', - default=[], - nargs='*', - help="""Additional GN arguments to be used during Ninja generation. + parser = argparse.ArgumentParser(description='libwebrtc.aar generator.') + parser.add_argument( + '--build-dir', + type=os.path.abspath, + help='Build dir. By default will create and use temporary dir.') + parser.add_argument('--output', + default='libwebrtc.aar', + type=os.path.abspath, + help='Output file of the script.') + parser.add_argument('--arch', + default=DEFAULT_ARCHS, + nargs='*', + help='Architectures to build. Defaults to %(default)s.') + parser.add_argument('--use-goma', + action='store_true', + default=False, + help='Use goma.') + parser.add_argument('--verbose', + action='store_true', + default=False, + help='Debug logging.') + parser.add_argument( + '--extra-gn-args', + default=[], + nargs='*', + help="""Additional GN arguments to be used during Ninja generation. These are passed to gn inside `--args` switch and applied after any other arguments and will override any values defined by the script. Example of building debug aar file: build_aar.py --extra-gn-args='is_debug=true'""") - parser.add_argument( - '--extra-ninja-switches', - default=[], - nargs='*', - help="""Additional Ninja switches to be used during compilation. + parser.add_argument( + '--extra-ninja-switches', + default=[], + nargs='*', + help="""Additional Ninja switches to be used during compilation. These are applied after any other Ninja switches. Example of enabling verbose Ninja output: build_aar.py --extra-ninja-switches='-v'""") - parser.add_argument( - '--extra-gn-switches', - default=[], - nargs='*', - help="""Additional GN switches to be used during compilation. + parser.add_argument( + '--extra-gn-switches', + default=[], + nargs='*', + help="""Additional GN switches to be used during compilation. These are applied after any other GN switches. Example of enabling verbose GN output: build_aar.py --extra-gn-switches='-v'""") - return parser.parse_args() + return parser.parse_args() def _RunGN(args): - cmd = [ - sys.executable, - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py') - ] - cmd.extend(args) - logging.debug('Running: %r', cmd) - subprocess.check_call(cmd) + cmd = [ + sys.executable, + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py') + ] + cmd.extend(args) + logging.debug('Running: %r', cmd) + subprocess.check_call(cmd) def _RunNinja(output_directory, args): - cmd = [ - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja'), '-C', - output_directory - ] - cmd.extend(args) - logging.debug('Running: %r', cmd) - subprocess.check_call(cmd) + cmd = [ + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja'), '-C', + output_directory + ] + cmd.extend(args) + logging.debug('Running: %r', cmd) + subprocess.check_call(cmd) def _EncodeForGN(value): - """Encodes value as a GN literal.""" - if isinstance(value, str): - return '"' + value + '"' - elif isinstance(value, bool): - return repr(value).lower() - else: - return repr(value) + """Encodes value as a GN literal.""" + if isinstance(value, str): + return '"' + value + '"' + if isinstance(value, bool): + return repr(value).lower() + return repr(value) def _GetOutputDirectory(build_dir, arch): - """Returns the GN output directory for the target architecture.""" - return os.path.join(build_dir, arch) + """Returns the GN output directory for the target architecture.""" + return os.path.join(build_dir, arch) def _GetTargetCpu(arch): - """Returns target_cpu for the GN build with the given architecture.""" - if arch in ['armeabi', 'armeabi-v7a']: - return 'arm' - elif arch == 'arm64-v8a': - return 'arm64' - elif arch == 'x86': - return 'x86' - elif arch == 'x86_64': - return 'x64' - else: - raise Exception('Unknown arch: ' + arch) + """Returns target_cpu for the GN build with the given architecture.""" + if arch in ['armeabi', 'armeabi-v7a']: + return 'arm' + if arch == 'arm64-v8a': + return 'arm64' + if arch == 'x86': + return 'x86' + if arch == 'x86_64': + return 'x64' + raise Exception('Unknown arch: ' + arch) def _GetArmVersion(arch): - """Returns arm_version for the GN build with the given architecture.""" - if arch == 'armeabi': - return 6 - elif arch == 'armeabi-v7a': - return 7 - elif arch in ['arm64-v8a', 'x86', 'x86_64']: - return None - else: - raise Exception('Unknown arch: ' + arch) + """Returns arm_version for the GN build with the given architecture.""" + if arch == 'armeabi': + return 6 + if arch == 'armeabi-v7a': + return 7 + if arch in ['arm64-v8a', 'x86', 'x86_64']: + return None + raise Exception('Unknown arch: ' + arch) def Build(build_dir, arch, use_goma, extra_gn_args, extra_gn_switches, extra_ninja_switches): - """Generates target architecture using GN and builds it using ninja.""" - logging.info('Building: %s', arch) - output_directory = _GetOutputDirectory(build_dir, arch) - gn_args = { - 'target_os': 'android', - 'is_debug': False, - 'is_component_build': False, - 'rtc_include_tests': False, - 'target_cpu': _GetTargetCpu(arch), - 'use_goma': use_goma - } - arm_version = _GetArmVersion(arch) - if arm_version: - gn_args['arm_version'] = arm_version - gn_args_str = '--args=' + ' '.join( - [k + '=' + _EncodeForGN(v) - for k, v in gn_args.items()] + extra_gn_args) + """Generates target architecture using GN and builds it using ninja.""" + logging.info('Building: %s', arch) + output_directory = _GetOutputDirectory(build_dir, arch) + gn_args = { + 'target_os': 'android', + 'is_debug': False, + 'is_component_build': False, + 'rtc_include_tests': False, + 'target_cpu': _GetTargetCpu(arch), + 'use_goma': use_goma + } + arm_version = _GetArmVersion(arch) + if arm_version: + gn_args['arm_version'] = arm_version + gn_args_str = '--args=' + ' '.join( + [k + '=' + _EncodeForGN(v) for k, v in gn_args.items()] + extra_gn_args) - gn_args_list = ['gen', output_directory, gn_args_str] - gn_args_list.extend(extra_gn_switches) - _RunGN(gn_args_list) + gn_args_list = ['gen', output_directory, gn_args_str] + gn_args_list.extend(extra_gn_switches) + _RunGN(gn_args_list) - ninja_args = TARGETS[:] - if use_goma: - ninja_args.extend(['-j', '200']) - ninja_args.extend(extra_ninja_switches) - _RunNinja(output_directory, ninja_args) + ninja_args = TARGETS[:] + if use_goma: + ninja_args.extend(['-j', '200']) + ninja_args.extend(extra_ninja_switches) + _RunNinja(output_directory, ninja_args) def CollectCommon(aar_file, build_dir, arch): - """Collects architecture independent files into the .aar-archive.""" - logging.info('Collecting common files.') - output_directory = _GetOutputDirectory(build_dir, arch) - aar_file.write(MANIFEST_FILE, 'AndroidManifest.xml') - aar_file.write(os.path.join(output_directory, JAR_FILE), 'classes.jar') + """Collects architecture independent files into the .aar-archive.""" + logging.info('Collecting common files.') + output_directory = _GetOutputDirectory(build_dir, arch) + aar_file.write(MANIFEST_FILE, 'AndroidManifest.xml') + aar_file.write(os.path.join(output_directory, JAR_FILE), 'classes.jar') def Collect(aar_file, build_dir, arch): - """Collects architecture specific files into the .aar-archive.""" - logging.info('Collecting: %s', arch) - output_directory = _GetOutputDirectory(build_dir, arch) + """Collects architecture specific files into the .aar-archive.""" + logging.info('Collecting: %s', arch) + output_directory = _GetOutputDirectory(build_dir, arch) - abi_dir = os.path.join('jni', arch) - for so_file in NEEDED_SO_FILES: - aar_file.write(os.path.join(output_directory, so_file), - os.path.join(abi_dir, so_file)) + abi_dir = os.path.join('jni', arch) + for so_file in NEEDED_SO_FILES: + aar_file.write(os.path.join(output_directory, so_file), + os.path.join(abi_dir, so_file)) def GenerateLicenses(output_dir, build_dir, archs): - builder = LicenseBuilder( - [_GetOutputDirectory(build_dir, arch) for arch in archs], TARGETS) - builder.GenerateLicenseText(output_dir) + builder = LicenseBuilder( + [_GetOutputDirectory(build_dir, arch) for arch in archs], TARGETS) + builder.GenerateLicenseText(output_dir) def BuildAar(archs, @@ -226,35 +221,35 @@ def BuildAar(archs, ext_build_dir=None, extra_gn_switches=None, extra_ninja_switches=None): - extra_gn_args = extra_gn_args or [] - extra_gn_switches = extra_gn_switches or [] - extra_ninja_switches = extra_ninja_switches or [] - build_dir = ext_build_dir if ext_build_dir else tempfile.mkdtemp() + extra_gn_args = extra_gn_args or [] + extra_gn_switches = extra_gn_switches or [] + extra_ninja_switches = extra_ninja_switches or [] + build_dir = ext_build_dir if ext_build_dir else tempfile.mkdtemp() + for arch in archs: + Build(build_dir, arch, use_goma, extra_gn_args, extra_gn_switches, + extra_ninja_switches) + + with zipfile.ZipFile(output_file, 'w') as aar_file: + # Architecture doesn't matter here, arbitrarily using the first one. + CollectCommon(aar_file, build_dir, archs[0]) for arch in archs: - Build(build_dir, arch, use_goma, extra_gn_args, extra_gn_switches, - extra_ninja_switches) + Collect(aar_file, build_dir, arch) - with zipfile.ZipFile(output_file, 'w') as aar_file: - # Architecture doesn't matter here, arbitrarily using the first one. - CollectCommon(aar_file, build_dir, archs[0]) - for arch in archs: - Collect(aar_file, build_dir, arch) + license_dir = os.path.dirname(os.path.realpath(output_file)) + GenerateLicenses(license_dir, build_dir, archs) - license_dir = os.path.dirname(os.path.realpath(output_file)) - GenerateLicenses(license_dir, build_dir, archs) - - if not ext_build_dir: - shutil.rmtree(build_dir, True) + if not ext_build_dir: + shutil.rmtree(build_dir, True) def main(): - args = _ParseArgs() - logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) + args = _ParseArgs() + logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) - BuildAar(args.arch, args.output, args.use_goma, args.extra_gn_args, - args.build_dir, args.extra_gn_switches, args.extra_ninja_switches) + BuildAar(args.arch, args.output, args.use_goma, args.extra_gn_args, + args.build_dir, args.extra_gn_switches, args.extra_ninja_switches) if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/android/test_aar.py b/tools_webrtc/android/test_aar.py index cb8ad121a2..7eb281aa9a 100755 --- a/tools_webrtc/android/test_aar.py +++ b/tools_webrtc/android/test_aar.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/env vpython3 # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. # @@ -7,8 +7,7 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. -"""Script for building and testing WebRTC AAR. -""" +"""Script for building and testing WebRTC AAR.""" import argparse import logging @@ -36,110 +35,109 @@ AAR_PROJECT_DIR = os.path.join(CHECKOUT_ROOT, 'examples/aarproject') def _ParseArgs(): - parser = argparse.ArgumentParser(description='Releases WebRTC on Bintray.') - parser.add_argument('--use-goma', - action='store_true', - default=False, - help='Use goma.') - parser.add_argument('--skip-tests', - action='store_true', - default=False, - help='Skips running the tests.') - parser.add_argument( - '--build-dir', - default=None, - help='Temporary directory to store the build files. If not specified, ' - 'a new directory will be created.') - parser.add_argument('--verbose', - action='store_true', - default=False, - help='Debug logging.') - return parser.parse_args() + parser = argparse.ArgumentParser(description='Releases WebRTC on Bintray.') + parser.add_argument('--use-goma', + action='store_true', + default=False, + help='Use goma.') + parser.add_argument('--skip-tests', + action='store_true', + default=False, + help='Skips running the tests.') + parser.add_argument( + '--build-dir', + default=None, + help='Temporary directory to store the build files. If not specified, ' + 'a new directory will be created.') + parser.add_argument('--verbose', + action='store_true', + default=False, + help='Debug logging.') + return parser.parse_args() def _GetCommitHash(): - commit_hash = subprocess.check_output( + commit_hash = subprocess.check_output( ['git', 'rev-parse', 'HEAD'], cwd=CHECKOUT_ROOT).decode('UTF-8').strip() - return commit_hash + return commit_hash def _GetCommitPos(): - commit_message = subprocess.check_output( - ['git', 'rev-list', '--format=%B', '--max-count=1', 'HEAD'], - cwd=CHECKOUT_ROOT).decode('UTF-8') - commit_pos_match = re.search(COMMIT_POSITION_REGEX, commit_message, - re.MULTILINE) - if not commit_pos_match: - raise Exception('Commit position not found in the commit message: %s' % - commit_message) - return commit_pos_match.group(1) + commit_message = subprocess.check_output( + ['git', 'rev-list', '--format=%B', '--max-count=1', 'HEAD'], + cwd=CHECKOUT_ROOT).decode('UTF-8') + commit_pos_match = re.search(COMMIT_POSITION_REGEX, commit_message, + re.MULTILINE) + if not commit_pos_match: + raise Exception('Commit position not found in the commit message: %s' % + commit_message) + return commit_pos_match.group(1) def _TestAAR(build_dir): - """Runs AppRTCMobile tests using the AAR. Returns true if the tests pass.""" - logging.info('Testing library.') + """Runs AppRTCMobile tests using the AAR. Returns true if the tests pass.""" + logging.info('Testing library.') - # Uninstall any existing version of AppRTCMobile. - logging.info( - 'Uninstalling previous AppRTCMobile versions. It is okay for ' - 'these commands to fail if AppRTCMobile is not installed.') - subprocess.call([ADB_BIN, 'uninstall', 'org.appspot.apprtc']) - subprocess.call([ADB_BIN, 'uninstall', 'org.appspot.apprtc.test']) + # Uninstall any existing version of AppRTCMobile. + logging.info('Uninstalling previous AppRTCMobile versions. It is okay for ' + 'these commands to fail if AppRTCMobile is not installed.') + subprocess.call([ADB_BIN, 'uninstall', 'org.appspot.apprtc']) + subprocess.call([ADB_BIN, 'uninstall', 'org.appspot.apprtc.test']) - # Run tests. - try: - # First clean the project. - subprocess.check_call([GRADLEW_BIN, 'clean'], cwd=AAR_PROJECT_DIR) - # Then run the tests. - subprocess.check_call([ - GRADLEW_BIN, - 'connectedDebugAndroidTest', - '-PaarDir=' + os.path.abspath(build_dir)], - cwd=AAR_PROJECT_DIR) - except subprocess.CalledProcessError: - logging.exception('Test failure.') - return False # Clean or tests failed + # Run tests. + try: + # First clean the project. + subprocess.check_call([GRADLEW_BIN, 'clean'], cwd=AAR_PROJECT_DIR) + # Then run the tests. + subprocess.check_call([ + GRADLEW_BIN, 'connectedDebugAndroidTest', + '-PaarDir=' + os.path.abspath(build_dir) + ], + cwd=AAR_PROJECT_DIR) + except subprocess.CalledProcessError: + logging.exception('Test failure.') + return False # Clean or tests failed - return True # Tests pass + return True # Tests pass def BuildAndTestAar(use_goma, skip_tests, build_dir): - version = '1.0.' + _GetCommitPos() - commit = _GetCommitHash() - logging.info( - 'Building and Testing AAR version %s with hash %s', version, commit) + version = '1.0.' + _GetCommitPos() + commit = _GetCommitHash() + logging.info('Building and Testing AAR version %s with hash %s', version, + commit) - # If build directory is not specified, create a temporary directory. - use_tmp_dir = not build_dir + # If build directory is not specified, create a temporary directory. + use_tmp_dir = not build_dir + if use_tmp_dir: + build_dir = tempfile.mkdtemp() + + try: + base_name = ARTIFACT_ID + '-' + version + aar_file = os.path.join(build_dir, base_name + '.aar') + + logging.info('Building at %s', build_dir) + BuildAar(ARCHS, + aar_file, + use_goma=use_goma, + ext_build_dir=os.path.join(build_dir, 'aar-build')) + + tests_pass = skip_tests or _TestAAR(build_dir) + if not tests_pass: + raise Exception('Test failure.') + + logging.info('Test success.') + + finally: if use_tmp_dir: - build_dir = tempfile.mkdtemp() - - try: - base_name = ARTIFACT_ID + '-' + version - aar_file = os.path.join(build_dir, base_name + '.aar') - - logging.info('Building at %s', build_dir) - BuildAar(ARCHS, - aar_file, - use_goma=use_goma, - ext_build_dir=os.path.join(build_dir, 'aar-build')) - - tests_pass = skip_tests or _TestAAR(build_dir) - if not tests_pass: - raise Exception('Test failure.') - - logging.info('Test success.') - - finally: - if use_tmp_dir: - shutil.rmtree(build_dir, True) + shutil.rmtree(build_dir, True) def main(): - args = _ParseArgs() - logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) - BuildAndTestAar(args.use_goma, args.skip_tests, args.build_dir) + args = _ParseArgs() + logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) + BuildAndTestAar(args.use_goma, args.skip_tests, args.build_dir) if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/apple/copy_framework_header.py b/tools_webrtc/apple/copy_framework_header.py index d194650934..3574a67d2a 100755 --- a/tools_webrtc/apple/copy_framework_header.py +++ b/tools_webrtc/apple/copy_framework_header.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -12,36 +13,34 @@ import re import sys -def replace_double_quote(line): - re_rtc_import = re.compile( - r'(\s*)#import\s+"(\S+/|)(\w+\+|)RTC(\w+)\.h"(.*)', re.DOTALL) - match = re_rtc_import.match(line) - if not match: - return line +def _ReplaceDoubleQuote(line): + re_rtc_import = re.compile(r'(\s*)#import\s+"(\S+/|)(\w+\+|)RTC(\w+)\.h"(.*)', + re.DOTALL) + match = re_rtc_import.match(line) + if not match: + return line - return '%s#import %s' % (match.group(1), match.group(3), - match.group(4), match.group(5)) + return '%s#import %s' % (match.group(1), match.group(3), + match.group(4), match.group(5)) -def process(input_file, output_file): - with open(input_file, 'rb') as fb, open(output_file, 'wb') as fw: - for line in fb.read().decode('UTF-8').splitlines(): - fw.write(replace_double_quote(line).encode('UTF-8')) - fw.write(b"\n") +def Process(input_file, output_file): + with open(input_file, 'rb') as fb, open(output_file, 'wb') as fw: + for line in fb.read().decode('UTF-8').splitlines(): + fw.write(_ReplaceDoubleQuote(line).encode('UTF-8')) + fw.write(b"\n") def main(): - parser = argparse.ArgumentParser( - description= - "Copy headers of framework and replace double-quoted includes to" + - " angle-bracketed respectively.") - parser.add_argument('--input', - help='Input header files to copy.', - type=str) - parser.add_argument('--output', help='Output file.', type=str) - parsed_args = parser.parse_args() - return process(parsed_args.input, parsed_args.output) + parser = argparse.ArgumentParser( + description= + "Copy headers of framework and replace double-quoted includes to" + + " angle-bracketed respectively.") + parser.add_argument('--input', help='Input header files to copy.', type=str) + parser.add_argument('--output', help='Output file.', type=str) + parsed_args = parser.parse_args() + return Process(parsed_args.input, parsed_args.output) if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/apple/copy_framework_header_test.py b/tools_webrtc/apple/copy_framework_header_test.py index 24bab3eb0b..7b8aceac6d 100644 --- a/tools_webrtc/apple/copy_framework_header_test.py +++ b/tools_webrtc/apple/copy_framework_header_test.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -8,28 +9,27 @@ # be found in the AUTHORS file in the root of the source tree. import unittest -from copy_framework_header import replace_double_quote +from copy_framework_header import _ReplaceDoubleQuote class TestCopyFramework(unittest.TestCase): - def testReplaceDoubleQuote(self): - self.assertEqual(replace_double_quote("""#import "RTCMacros.h\""""), - """#import """) - self.assertEqual(replace_double_quote("""#import "RTCMacros.h\"\n"""), - """#import \n""") - self.assertEqual( - replace_double_quote("""#import "UIDevice+RTCDevice.h\"\n"""), - """#import \n""") - self.assertEqual( - replace_double_quote("#import \"components/video_codec/" + - "RTCVideoDecoderFactoryH264.h\"\n"), - """#import \n""") - self.assertEqual( - replace_double_quote( - """@property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) *\n""" - ), - """@property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) *\n""") + def testReplaceDoubleQuote(self): + self.assertEqual(_ReplaceDoubleQuote("""#import "RTCMacros.h\""""), + """#import """) + self.assertEqual(_ReplaceDoubleQuote("""#import "RTCMacros.h\"\n"""), + """#import \n""") + self.assertEqual( + _ReplaceDoubleQuote("""#import "UIDevice+RTCDevice.h\"\n"""), + """#import \n""") + self.assertEqual( + _ReplaceDoubleQuote("#import \"components/video_codec/" + + "RTCVideoDecoderFactoryH264.h\"\n"), + """#import \n""") + self.assertEqual( + _ReplaceDoubleQuote( + """@property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) *\n"""), + """@property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) *\n""") if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tools_webrtc/autoroller/roll_deps.py b/tools_webrtc/autoroller/roll_deps.py index ac2a8651d5..0aa2320a63 100755 --- a/tools_webrtc/autoroller/roll_deps.py +++ b/tools_webrtc/autoroller/roll_deps.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -8,7 +9,6 @@ # be found in the AUTHORS file in the root of the source tree. """Script to automatically roll dependencies in the WebRTC DEPS file.""" -from __future__ import absolute_import import argparse import base64 @@ -18,18 +18,15 @@ import os import re import subprocess import sys - -import six.moves.urllib.request -import six.moves.urllib.error -import six.moves.urllib.parse +import urllib def FindSrcDirPath(): - """Returns the abs path to the src/ dir of the project.""" - src_dir = os.path.dirname(os.path.abspath(__file__)) - while os.path.basename(src_dir) != 'src': - src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) - return src_dir + """Returns the abs path to the src/ dir of the project.""" + src_dir = os.path.dirname(os.path.abspath(__file__)) + while os.path.basename(src_dir) != 'src': + src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) + return src_dir # Skip these dependencies (list without solution name prefix). @@ -99,42 +96,42 @@ ChromiumRevisionUpdate = collections.namedtuple('ChromiumRevisionUpdate', class RollError(Exception): - pass + pass def StrExpansion(): - return lambda str_value: str_value + return lambda str_value: str_value def VarLookup(local_scope): - return lambda var_name: local_scope['vars'][var_name] + return lambda var_name: local_scope['vars'][var_name] def ParseDepsDict(deps_content): - local_scope = {} - global_scope = { - 'Str': StrExpansion(), - 'Var': VarLookup(local_scope), - 'deps_os': {}, - } - exec(deps_content, global_scope, local_scope) - return local_scope + local_scope = {} + global_scope = { + 'Str': StrExpansion(), + 'Var': VarLookup(local_scope), + 'deps_os': {}, + } + exec(deps_content, global_scope, local_scope) + return local_scope def ParseLocalDepsFile(filename): - with open(filename, 'rb') as f: - deps_content = f.read() - return ParseDepsDict(deps_content) + with open(filename, 'rb') as f: + deps_content = f.read() + return ParseDepsDict(deps_content) def ParseCommitPosition(commit_message): - for line in reversed(commit_message.splitlines()): - m = COMMIT_POSITION_RE.match(line.strip()) - if m: - return int(m.group(1)) - logging.error('Failed to parse commit position id from:\n%s\n', - commit_message) - sys.exit(-1) + for line in reversed(commit_message.splitlines()): + m = COMMIT_POSITION_RE.match(line.strip()) + if m: + return int(m.group(1)) + logging.error('Failed to parse commit position id from:\n%s\n', + commit_message) + sys.exit(-1) def _RunCommand(command, @@ -142,69 +139,68 @@ def _RunCommand(command, ignore_exit_code=False, extra_env=None, input_data=None): - """Runs a command and returns the output from that command. + """Runs a command and returns the output from that command. If the command fails (exit code != 0), the function will exit the process. Returns: A tuple containing the stdout and stderr outputs as strings. """ - working_dir = working_dir or CHECKOUT_SRC_DIR - logging.debug('CMD: %s CWD: %s', ' '.join(command), working_dir) - env = os.environ.copy() - if extra_env: - assert all(isinstance(value, str) for value in extra_env.values()) - logging.debug('extra env: %s', extra_env) - env.update(extra_env) - p = subprocess.Popen(command, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=env, - cwd=working_dir, - universal_newlines=True) - std_output, err_output = p.communicate(input_data) - p.stdout.close() - p.stderr.close() - if not ignore_exit_code and p.returncode != 0: - logging.error('Command failed: %s\n' - 'stdout:\n%s\n' - 'stderr:\n%s\n', ' '.join(command), std_output, - err_output) - sys.exit(p.returncode) - return std_output, err_output + working_dir = working_dir or CHECKOUT_SRC_DIR + logging.debug('CMD: %s CWD: %s', ' '.join(command), working_dir) + env = os.environ.copy() + if extra_env: + assert all(isinstance(value, str) for value in list(extra_env.values())) + logging.debug('extra env: %s', extra_env) + env.update(extra_env) + p = subprocess.Popen(command, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env, + cwd=working_dir, + universal_newlines=True) + std_output, err_output = p.communicate(input_data) + p.stdout.close() + p.stderr.close() + if not ignore_exit_code and p.returncode != 0: + logging.error('Command failed: %s\n' + 'stdout:\n%s\n' + 'stderr:\n%s\n', ' '.join(command), std_output, err_output) + sys.exit(p.returncode) + return std_output, err_output def _GetBranches(): - """Returns a tuple of active,branches. + """Returns a tuple of active,branches. The 'active' is the name of the currently active branch and 'branches' is a list of all branches. """ - lines = _RunCommand(['git', 'branch'])[0].split('\n') - branches = [] - active = '' - for line in lines: - if '*' in line: - # The assumption is that the first char will always be the '*'. - active = line[1:].strip() - branches.append(active) - else: - branch = line.strip() - if branch: - branches.append(branch) - return active, branches + lines = _RunCommand(['git', 'branch'])[0].split('\n') + branches = [] + active = '' + for line in lines: + if '*' in line: + # The assumption is that the first char will always be the '*'. + active = line[1:].strip() + branches.append(active) + else: + branch = line.strip() + if branch: + branches.append(branch) + return active, branches def _ReadGitilesContent(url): - # Download and decode BASE64 content until - # https://code.google.com/p/gitiles/issues/detail?id=7 is fixed. - base64_content = ReadUrlContent(url + '?format=TEXT') - return base64.b64decode(base64_content[0]) + # Download and decode BASE64 content until + # https://code.google.com/p/gitiles/issues/detail?id=7 is fixed. + base64_content = ReadUrlContent(url + '?format=TEXT') + return base64.b64decode(base64_content[0]) def ReadRemoteCrFile(path_below_src, revision): - """Reads a remote Chromium file of a specific revision. + """Reads a remote Chromium file of a specific revision. Args: path_below_src: A path to the target file relative to src dir. @@ -212,35 +208,35 @@ def ReadRemoteCrFile(path_below_src, revision): Returns: A string with file content. """ - return _ReadGitilesContent(CHROMIUM_FILE_TEMPLATE % - (revision, path_below_src)) + return _ReadGitilesContent(CHROMIUM_FILE_TEMPLATE % + (revision, path_below_src)) def ReadRemoteCrCommit(revision): - """Reads a remote Chromium commit message. Returns a string.""" - return _ReadGitilesContent(CHROMIUM_COMMIT_TEMPLATE % revision) + """Reads a remote Chromium commit message. Returns a string.""" + return _ReadGitilesContent(CHROMIUM_COMMIT_TEMPLATE % revision) def ReadUrlContent(url): - """Connect to a remote host and read the contents. + """Connect to a remote host and read the contents. Args: url: URL to connect to. Returns: A list of lines. """ - conn = six.moves.urllib.request.urlopen(url) - try: - return conn.readlines() - except IOError as e: - logging.exception('Error connecting to %s. Error: %s', url, e) - raise - finally: - conn.close() + conn = urllib.request.urlopen(url) + try: + return conn.readlines() + except IOError as e: + logging.exception('Error connecting to %s. Error: %s', url, e) + raise + finally: + conn.close() def GetMatchingDepsEntries(depsentry_dict, dir_path): - """Gets all deps entries matching the provided path. + """Gets all deps entries matching the provided path. This list may contain more than one DepsEntry object. Example: dir_path='src/testing' would give results containing both @@ -252,78 +248,77 @@ def GetMatchingDepsEntries(depsentry_dict, dir_path): Returns: A list of DepsEntry objects. """ - result = [] - for path, depsentry in depsentry_dict.items(): - if path == dir_path: - result.append(depsentry) - else: - parts = path.split('/') - if all(part == parts[i] - for i, part in enumerate(dir_path.split('/'))): - result.append(depsentry) - return result + result = [] + for path, depsentry in list(depsentry_dict.items()): + if path == dir_path: + result.append(depsentry) + else: + parts = path.split('/') + if all(part == parts[i] for i, part in enumerate(dir_path.split('/'))): + result.append(depsentry) + return result def BuildDepsentryDict(deps_dict): - """Builds a dict of paths to DepsEntry objects from a raw deps dict.""" - result = {} + """Builds a dict of paths to DepsEntry objects from a raw deps dict.""" + result = {} - def AddDepsEntries(deps_subdict): - for path, dep in deps_subdict.items(): - if path in result: - continue - if not isinstance(dep, dict): - dep = {'url': dep} - if dep.get('dep_type') == 'cipd': - result[path] = CipdDepsEntry(path, dep['packages']) - else: - if '@' not in dep['url']: - continue - url, revision = dep['url'].split('@') - result[path] = DepsEntry(path, url, revision) + def AddDepsEntries(deps_subdict): + for path, dep in list(deps_subdict.items()): + if path in result: + continue + if not isinstance(dep, dict): + dep = {'url': dep} + if dep.get('dep_type') == 'cipd': + result[path] = CipdDepsEntry(path, dep['packages']) + else: + if '@' not in dep['url']: + continue + url, revision = dep['url'].split('@') + result[path] = DepsEntry(path, url, revision) - AddDepsEntries(deps_dict['deps']) - for deps_os in ['win', 'mac', 'unix', 'android', 'ios', 'unix']: - AddDepsEntries(deps_dict.get('deps_os', {}).get(deps_os, {})) - return result + AddDepsEntries(deps_dict['deps']) + for deps_os in ['win', 'mac', 'unix', 'android', 'ios', 'unix']: + AddDepsEntries(deps_dict.get('deps_os', {}).get(deps_os, {})) + return result def _FindChangedCipdPackages(path, old_pkgs, new_pkgs): - old_pkgs_names = {p['package'] for p in old_pkgs} - new_pkgs_names = {p['package'] for p in new_pkgs} - pkgs_equal = (old_pkgs_names == new_pkgs_names) - added_pkgs = [p for p in new_pkgs_names if p not in old_pkgs_names] - removed_pkgs = [p for p in old_pkgs_names if p not in new_pkgs_names] + old_pkgs_names = {p['package'] for p in old_pkgs} + new_pkgs_names = {p['package'] for p in new_pkgs} + pkgs_equal = (old_pkgs_names == new_pkgs_names) + added_pkgs = [p for p in new_pkgs_names if p not in old_pkgs_names] + removed_pkgs = [p for p in old_pkgs_names if p not in new_pkgs_names] - assert pkgs_equal, ('Old: %s\n New: %s.\nYou need to do a manual roll ' - 'and remove/add entries in DEPS so the old and new ' - 'list match.\nMost likely, you should add \"%s\" and ' - 'remove \"%s\"' % - (old_pkgs, new_pkgs, added_pkgs, removed_pkgs)) + assert pkgs_equal, ('Old: %s\n New: %s.\nYou need to do a manual roll ' + 'and remove/add entries in DEPS so the old and new ' + 'list match.\nMost likely, you should add \"%s\" and ' + 'remove \"%s\"' % + (old_pkgs, new_pkgs, added_pkgs, removed_pkgs)) - for old_pkg in old_pkgs: - for new_pkg in new_pkgs: - old_version = old_pkg['version'] - new_version = new_pkg['version'] - if (old_pkg['package'] == new_pkg['package'] and - old_version != new_version): - logging.debug('Roll dependency %s to %s', path, new_version) - yield ChangedCipdPackage(path, old_pkg['package'], old_version, - new_version) + for old_pkg in old_pkgs: + for new_pkg in new_pkgs: + old_version = old_pkg['version'] + new_version = new_pkg['version'] + if (old_pkg['package'] == new_pkg['package'] + and old_version != new_version): + logging.debug('Roll dependency %s to %s', path, new_version) + yield ChangedCipdPackage(path, old_pkg['package'], old_version, + new_version) def _FindNewDeps(old, new): - """ Gather dependencies only in `new` and return corresponding paths. """ - old_entries = set(BuildDepsentryDict(old)) - new_entries = set(BuildDepsentryDict(new)) - return [ - path for path in new_entries - old_entries - if path not in DONT_AUTOROLL_THESE - ] + """ Gather dependencies only in `new` and return corresponding paths. """ + old_entries = set(BuildDepsentryDict(old)) + new_entries = set(BuildDepsentryDict(new)) + return [ + path for path in new_entries - old_entries + if path not in DONT_AUTOROLL_THESE + ] def FindAddedDeps(webrtc_deps, new_cr_deps): - """ + """ Calculate new deps entries of interest. Ideally, that would mean: only appearing in chromium DEPS @@ -344,18 +339,18 @@ def FindAddedDeps(webrtc_deps, new_cr_deps): A list of paths added dependencies sitting in `ANDROID_DEPS_PATH`. A list of paths for other added dependencies. """ - all_added_deps = _FindNewDeps(webrtc_deps, new_cr_deps) - generated_android_deps = [ - path for path in all_added_deps if path.startswith(ANDROID_DEPS_PATH) - ] - other_deps = [ - path for path in all_added_deps if path not in generated_android_deps - ] - return generated_android_deps, other_deps + all_added_deps = _FindNewDeps(webrtc_deps, new_cr_deps) + generated_android_deps = [ + path for path in all_added_deps if path.startswith(ANDROID_DEPS_PATH) + ] + other_deps = [ + path for path in all_added_deps if path not in generated_android_deps + ] + return generated_android_deps, other_deps def FindRemovedDeps(webrtc_deps, new_cr_deps): - """ + """ Calculate obsolete deps entries. Ideally, that would mean: no more appearing in chromium DEPS @@ -378,20 +373,19 @@ def FindRemovedDeps(webrtc_deps, new_cr_deps): A list of paths of dependencies removed from `ANDROID_DEPS_PATH`. A list of paths of unexpected disappearing dependencies. """ - all_removed_deps = _FindNewDeps(new_cr_deps, webrtc_deps) - generated_android_deps = sorted([ - path for path in all_removed_deps if path.startswith(ANDROID_DEPS_PATH) - ]) - # Webrtc-only dependencies are handled in CalculateChangedDeps. - other_deps = sorted([ - path for path in all_removed_deps - if path not in generated_android_deps and path not in WEBRTC_ONLY_DEPS - ]) - return generated_android_deps, other_deps + all_removed_deps = _FindNewDeps(new_cr_deps, webrtc_deps) + generated_android_deps = sorted( + [path for path in all_removed_deps if path.startswith(ANDROID_DEPS_PATH)]) + # Webrtc-only dependencies are handled in CalculateChangedDeps. + other_deps = sorted([ + path for path in all_removed_deps + if path not in generated_android_deps and path not in WEBRTC_ONLY_DEPS + ]) + return generated_android_deps, other_deps def CalculateChangedDeps(webrtc_deps, new_cr_deps): - """ + """ Calculate changed deps entries based on entries defined in the WebRTC DEPS file: - If a shared dependency with the Chromium DEPS file: roll it to the same @@ -405,65 +399,64 @@ def CalculateChangedDeps(webrtc_deps, new_cr_deps): Returns: A list of ChangedDep objects representing the changed deps. """ - result = [] - webrtc_entries = BuildDepsentryDict(webrtc_deps) - new_cr_entries = BuildDepsentryDict(new_cr_deps) - for path, webrtc_deps_entry in webrtc_entries.items(): - if path in DONT_AUTOROLL_THESE: - continue - cr_deps_entry = new_cr_entries.get(path) - if cr_deps_entry: - assert type(cr_deps_entry) is type(webrtc_deps_entry) + result = [] + webrtc_entries = BuildDepsentryDict(webrtc_deps) + new_cr_entries = BuildDepsentryDict(new_cr_deps) + for path, webrtc_deps_entry in list(webrtc_entries.items()): + if path in DONT_AUTOROLL_THESE: + continue + cr_deps_entry = new_cr_entries.get(path) + if cr_deps_entry: + assert type(cr_deps_entry) is type(webrtc_deps_entry) - if isinstance(cr_deps_entry, CipdDepsEntry): - result.extend( - _FindChangedCipdPackages(path, webrtc_deps_entry.packages, - cr_deps_entry.packages)) - continue + if isinstance(cr_deps_entry, CipdDepsEntry): + result.extend( + _FindChangedCipdPackages(path, webrtc_deps_entry.packages, + cr_deps_entry.packages)) + continue - # Use the revision from Chromium's DEPS file. - new_rev = cr_deps_entry.revision - assert webrtc_deps_entry.url == cr_deps_entry.url, ( - 'WebRTC DEPS entry %s has a different URL %s than Chromium %s.' - % (path, webrtc_deps_entry.url, cr_deps_entry.url)) - else: - if isinstance(webrtc_deps_entry, DepsEntry): - # Use the HEAD of the deps repo. - stdout, _ = _RunCommand( - ['git', 'ls-remote', webrtc_deps_entry.url, 'HEAD']) - new_rev = stdout.strip().split('\t')[0] - else: - # The dependency has been removed from chromium. - # This is handled by FindRemovedDeps. - continue + # Use the revision from Chromium's DEPS file. + new_rev = cr_deps_entry.revision + assert webrtc_deps_entry.url == cr_deps_entry.url, ( + 'WebRTC DEPS entry %s has a different URL %s than Chromium %s.' % + (path, webrtc_deps_entry.url, cr_deps_entry.url)) + else: + if isinstance(webrtc_deps_entry, DepsEntry): + # Use the HEAD of the deps repo. + stdout, _ = _RunCommand( + ['git', 'ls-remote', webrtc_deps_entry.url, 'HEAD']) + new_rev = stdout.strip().split('\t')[0] + else: + # The dependency has been removed from chromium. + # This is handled by FindRemovedDeps. + continue - # Check if an update is necessary. - if webrtc_deps_entry.revision != new_rev: - logging.debug('Roll dependency %s to %s', path, new_rev) - result.append( - ChangedDep(path, webrtc_deps_entry.url, - webrtc_deps_entry.revision, new_rev)) - return sorted(result) + # Check if an update is necessary. + if webrtc_deps_entry.revision != new_rev: + logging.debug('Roll dependency %s to %s', path, new_rev) + result.append( + ChangedDep(path, webrtc_deps_entry.url, webrtc_deps_entry.revision, + new_rev)) + return sorted(result) def CalculateChangedClang(new_cr_rev): - def GetClangRev(lines): - for line in lines: - match = CLANG_REVISION_RE.match(line) - if match: - return match.group(1) - raise RollError('Could not parse Clang revision!') + def GetClangRev(lines): + for line in lines: + match = CLANG_REVISION_RE.match(line) + if match: + return match.group(1) + raise RollError('Could not parse Clang revision!') - with open(CLANG_UPDATE_SCRIPT_LOCAL_PATH, 'rb') as f: - current_lines = f.readlines() - current_rev = GetClangRev(current_lines) + with open(CLANG_UPDATE_SCRIPT_LOCAL_PATH, 'rb') as f: + current_lines = f.readlines() + current_rev = GetClangRev(current_lines) - new_clang_update_py = ReadRemoteCrFile(CLANG_UPDATE_SCRIPT_URL_PATH, - new_cr_rev).splitlines() - new_rev = GetClangRev(new_clang_update_py) - return ChangedDep(CLANG_UPDATE_SCRIPT_LOCAL_PATH, None, current_rev, - new_rev) + new_clang_update_py = ReadRemoteCrFile(CLANG_UPDATE_SCRIPT_URL_PATH, + new_cr_rev).splitlines() + new_rev = GetClangRev(new_clang_update_py) + return ChangedDep(CLANG_UPDATE_SCRIPT_LOCAL_PATH, None, current_rev, new_rev) def GenerateCommitMessage( @@ -475,174 +468,171 @@ def GenerateCommitMessage( removed_deps_paths=None, clang_change=None, ): - current_cr_rev = rev_update.current_chromium_rev[0:10] - new_cr_rev = rev_update.new_chromium_rev[0:10] - rev_interval = '%s..%s' % (current_cr_rev, new_cr_rev) - git_number_interval = '%s:%s' % (current_commit_pos, new_commit_pos) + current_cr_rev = rev_update.current_chromium_rev[0:10] + new_cr_rev = rev_update.new_chromium_rev[0:10] + rev_interval = '%s..%s' % (current_cr_rev, new_cr_rev) + git_number_interval = '%s:%s' % (current_commit_pos, new_commit_pos) - commit_msg = [ - 'Roll chromium_revision %s (%s)\n' % - (rev_interval, git_number_interval), - 'Change log: %s' % (CHROMIUM_LOG_TEMPLATE % rev_interval), - 'Full diff: %s\n' % (CHROMIUM_COMMIT_TEMPLATE % rev_interval) - ] + commit_msg = [ + 'Roll chromium_revision %s (%s)\n' % (rev_interval, git_number_interval), + 'Change log: %s' % (CHROMIUM_LOG_TEMPLATE % rev_interval), + 'Full diff: %s\n' % (CHROMIUM_COMMIT_TEMPLATE % rev_interval) + ] - def Section(adjective, deps): - noun = 'dependency' if len(deps) == 1 else 'dependencies' - commit_msg.append('%s %s' % (adjective, noun)) + def Section(adjective, deps): + noun = 'dependency' if len(deps) == 1 else 'dependencies' + commit_msg.append('%s %s' % (adjective, noun)) - if changed_deps_list: - Section('Changed', changed_deps_list) + if changed_deps_list: + Section('Changed', changed_deps_list) - for c in changed_deps_list: - if isinstance(c, ChangedCipdPackage): - commit_msg.append('* %s: %s..%s' % - (c.path, c.current_version, c.new_version)) - else: - commit_msg.append( - '* %s: %s/+log/%s..%s' % - (c.path, c.url, c.current_rev[0:10], c.new_rev[0:10])) + for c in changed_deps_list: + if isinstance(c, ChangedCipdPackage): + commit_msg.append('* %s: %s..%s' % + (c.path, c.current_version, c.new_version)) + else: + commit_msg.append('* %s: %s/+log/%s..%s' % + (c.path, c.url, c.current_rev[0:10], c.new_rev[0:10])) - if added_deps_paths: - Section('Added', added_deps_paths) - commit_msg.extend('* %s' % p for p in added_deps_paths) + if added_deps_paths: + Section('Added', added_deps_paths) + commit_msg.extend('* %s' % p for p in added_deps_paths) - if removed_deps_paths: - Section('Removed', removed_deps_paths) - commit_msg.extend('* %s' % p for p in removed_deps_paths) + if removed_deps_paths: + Section('Removed', removed_deps_paths) + commit_msg.extend('* %s' % p for p in removed_deps_paths) - if any([changed_deps_list, added_deps_paths, removed_deps_paths]): - change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, 'DEPS') - commit_msg.append('DEPS diff: %s\n' % change_url) - else: - commit_msg.append('No dependencies changed.') + if any([changed_deps_list, added_deps_paths, removed_deps_paths]): + change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, 'DEPS') + commit_msg.append('DEPS diff: %s\n' % change_url) + else: + commit_msg.append('No dependencies changed.') - if clang_change and clang_change.current_rev != clang_change.new_rev: - commit_msg.append('Clang version changed %s:%s' % - (clang_change.current_rev, clang_change.new_rev)) - change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, - CLANG_UPDATE_SCRIPT_URL_PATH) - commit_msg.append('Details: %s\n' % change_url) - else: - commit_msg.append('No update to Clang.\n') + if clang_change and clang_change.current_rev != clang_change.new_rev: + commit_msg.append('Clang version changed %s:%s' % + (clang_change.current_rev, clang_change.new_rev)) + change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, + CLANG_UPDATE_SCRIPT_URL_PATH) + commit_msg.append('Details: %s\n' % change_url) + else: + commit_msg.append('No update to Clang.\n') - commit_msg.append('BUG=None') - return '\n'.join(commit_msg) + commit_msg.append('BUG=None') + return '\n'.join(commit_msg) def UpdateDepsFile(deps_filename, rev_update, changed_deps, new_cr_content): - """Update the DEPS file with the new revision.""" + """Update the DEPS file with the new revision.""" - with open(deps_filename, 'r') as deps_file: - deps_content = deps_file.read() + with open(deps_filename, 'r') as deps_file: + deps_content = deps_file.read() - # Update the chromium_revision variable. - deps_content = deps_content.replace(rev_update.current_chromium_rev, - rev_update.new_chromium_rev) + # Update the chromium_revision variable. + deps_content = deps_content.replace(rev_update.current_chromium_rev, + rev_update.new_chromium_rev) - # Add and remove dependencies. For now: only generated android deps. - # Since gclient cannot add or remove deps, we on the fact that - # these android deps are located in one place we can copy/paste. - deps_re = re.compile(ANDROID_DEPS_START + '.*' + ANDROID_DEPS_END, - re.DOTALL) - new_deps = deps_re.search(new_cr_content) - old_deps = deps_re.search(deps_content) - if not new_deps or not old_deps: - faulty = 'Chromium' if not new_deps else 'WebRTC' - raise RollError('Was expecting to find "%s" and "%s"\n' - 'in %s DEPS' % - (ANDROID_DEPS_START, ANDROID_DEPS_END, faulty)) - deps_content = deps_re.sub(new_deps.group(0), deps_content) + # Add and remove dependencies. For now: only generated android deps. + # Since gclient cannot add or remove deps, we on the fact that + # these android deps are located in one place we can copy/paste. + deps_re = re.compile(ANDROID_DEPS_START + '.*' + ANDROID_DEPS_END, re.DOTALL) + new_deps = deps_re.search(new_cr_content) + old_deps = deps_re.search(deps_content) + if not new_deps or not old_deps: + faulty = 'Chromium' if not new_deps else 'WebRTC' + raise RollError('Was expecting to find "%s" and "%s"\n' + 'in %s DEPS' % + (ANDROID_DEPS_START, ANDROID_DEPS_END, faulty)) + deps_content = deps_re.sub(new_deps.group(0), deps_content) - with open(deps_filename, 'w') as deps_file: - deps_file.write(deps_content) + with open(deps_filename, 'w') as deps_file: + deps_file.write(deps_content) - # Update each individual DEPS entry. - for dep in changed_deps: - local_dep_dir = os.path.join(CHECKOUT_ROOT_DIR, dep.path) - if not os.path.isdir(local_dep_dir): - raise RollError( - 'Cannot find local directory %s. Either run\n' - 'gclient sync --deps=all\n' - 'or make sure the .gclient file for your solution contains all ' - 'platforms in the target_os list, i.e.\n' - 'target_os = ["android", "unix", "mac", "ios", "win"];\n' - 'Then run "gclient sync" again.' % local_dep_dir) - if isinstance(dep, ChangedCipdPackage): - package = dep.package.format() # Eliminate double curly brackets - update = '%s:%s@%s' % (dep.path, package, dep.new_version) - else: - update = '%s@%s' % (dep.path, dep.new_rev) - _RunCommand(['gclient', 'setdep', '--revision', update], - working_dir=CHECKOUT_SRC_DIR) + # Update each individual DEPS entry. + for dep in changed_deps: + local_dep_dir = os.path.join(CHECKOUT_ROOT_DIR, dep.path) + if not os.path.isdir(local_dep_dir): + raise RollError( + 'Cannot find local directory %s. Either run\n' + 'gclient sync --deps=all\n' + 'or make sure the .gclient file for your solution contains all ' + 'platforms in the target_os list, i.e.\n' + 'target_os = ["android", "unix", "mac", "ios", "win"];\n' + 'Then run "gclient sync" again.' % local_dep_dir) + if isinstance(dep, ChangedCipdPackage): + package = dep.package.format() # Eliminate double curly brackets + update = '%s:%s@%s' % (dep.path, package, dep.new_version) + else: + update = '%s@%s' % (dep.path, dep.new_rev) + _RunCommand(['gclient', 'setdep', '--revision', update], + working_dir=CHECKOUT_SRC_DIR) def _IsTreeClean(): - stdout, _ = _RunCommand(['git', 'status', '--porcelain']) - if len(stdout) == 0: - return True + stdout, _ = _RunCommand(['git', 'status', '--porcelain']) + if len(stdout) == 0: + return True - logging.error('Dirty/unversioned files:\n%s', stdout) - return False + logging.error('Dirty/unversioned files:\n%s', stdout) + return False def _EnsureUpdatedMainBranch(dry_run): - current_branch = _RunCommand(['git', 'rev-parse', '--abbrev-ref', - 'HEAD'])[0].splitlines()[0] - if current_branch != 'main': - logging.error('Please checkout the main branch and re-run this script.') - if not dry_run: - sys.exit(-1) + current_branch = _RunCommand(['git', 'rev-parse', '--abbrev-ref', + 'HEAD'])[0].splitlines()[0] + if current_branch != 'main': + logging.error('Please checkout the main branch and re-run this script.') + if not dry_run: + sys.exit(-1) - logging.info('Updating main branch...') - _RunCommand(['git', 'pull']) + logging.info('Updating main branch...') + _RunCommand(['git', 'pull']) def _CreateRollBranch(dry_run): - logging.info('Creating roll branch: %s', ROLL_BRANCH_NAME) - if not dry_run: - _RunCommand(['git', 'checkout', '-b', ROLL_BRANCH_NAME]) + logging.info('Creating roll branch: %s', ROLL_BRANCH_NAME) + if not dry_run: + _RunCommand(['git', 'checkout', '-b', ROLL_BRANCH_NAME]) def _RemovePreviousRollBranch(dry_run): - active_branch, branches = _GetBranches() - if active_branch == ROLL_BRANCH_NAME: - active_branch = 'main' - if ROLL_BRANCH_NAME in branches: - logging.info('Removing previous roll branch (%s)', ROLL_BRANCH_NAME) - if not dry_run: - _RunCommand(['git', 'checkout', active_branch]) - _RunCommand(['git', 'branch', '-D', ROLL_BRANCH_NAME]) + active_branch, branches = _GetBranches() + if active_branch == ROLL_BRANCH_NAME: + active_branch = 'main' + if ROLL_BRANCH_NAME in branches: + logging.info('Removing previous roll branch (%s)', ROLL_BRANCH_NAME) + if not dry_run: + _RunCommand(['git', 'checkout', active_branch]) + _RunCommand(['git', 'branch', '-D', ROLL_BRANCH_NAME]) def _LocalCommit(commit_msg, dry_run): - logging.info('Committing changes locally.') - if not dry_run: - _RunCommand(['git', 'add', '--update', '.']) - _RunCommand(['git', 'commit', '-m', commit_msg]) + logging.info('Committing changes locally.') + if not dry_run: + _RunCommand(['git', 'add', '--update', '.']) + _RunCommand(['git', 'commit', '-m', commit_msg]) def ChooseCQMode(skip_cq, cq_over, current_commit_pos, new_commit_pos): - if skip_cq: - return 0 - if (new_commit_pos - current_commit_pos) < cq_over: - return 1 - return 2 + if skip_cq: + return 0 + if (new_commit_pos - current_commit_pos) < cq_over: + return 1 + return 2 def _GetCcRecipients(changed_deps_list): - """Returns a list of emails to notify based on the changed deps list. + """Returns a list of emails to notify based on the changed deps list. """ - cc_recipients = [] - for c in changed_deps_list: - if 'libvpx' in c.path or 'libaom' in c.path: - cc_recipients.append('marpan@webrtc.org') - cc_recipients.append('jianj@chromium.org') - return cc_recipients + cc_recipients = [] + for c in changed_deps_list: + if 'libvpx' in c.path or 'libaom' in c.path: + cc_recipients.append('marpan@webrtc.org') + cc_recipients.append('jianj@chromium.org') + return cc_recipients def _UploadCL(commit_queue_mode, add_cc=None): - """Upload the committed changes as a changelist to Gerrit. + """Upload the committed changes as a changelist to Gerrit. commit_queue_mode: - 2: Submit to commit queue. @@ -651,140 +641,139 @@ def _UploadCL(commit_queue_mode, add_cc=None): add_cc: A list of email addresses to add as CC recipients. """ - cc_recipients = [NOTIFY_EMAIL] - if add_cc: - cc_recipients.extend(add_cc) - cmd = ['git', 'cl', 'upload', '--force', '--bypass-hooks'] - if commit_queue_mode >= 2: - logging.info('Sending the CL to the CQ...') - cmd.extend(['-o', 'label=Bot-Commit+1']) - cmd.extend(['-o', 'label=Commit-Queue+2']) - cmd.extend(['--send-mail', '--cc', ','.join(cc_recipients)]) - elif commit_queue_mode >= 1: - logging.info('Starting CQ dry run...') - cmd.extend(['-o', 'label=Commit-Queue+1']) - extra_env = { - 'EDITOR': 'true', - 'SKIP_GCE_AUTH_FOR_GIT': '1', - } - stdout, stderr = _RunCommand(cmd, extra_env=extra_env) - logging.debug('Output from "git cl upload":\nstdout:\n%s\n\nstderr:\n%s', - stdout, stderr) + cc_recipients = [NOTIFY_EMAIL] + if add_cc: + cc_recipients.extend(add_cc) + cmd = ['git', 'cl', 'upload', '--force', '--bypass-hooks'] + if commit_queue_mode >= 2: + logging.info('Sending the CL to the CQ...') + cmd.extend(['-o', 'label=Bot-Commit+1']) + cmd.extend(['-o', 'label=Commit-Queue+2']) + cmd.extend(['--send-mail', '--cc', ','.join(cc_recipients)]) + elif commit_queue_mode >= 1: + logging.info('Starting CQ dry run...') + cmd.extend(['-o', 'label=Commit-Queue+1']) + extra_env = { + 'EDITOR': 'true', + 'SKIP_GCE_AUTH_FOR_GIT': '1', + } + stdout, stderr = _RunCommand(cmd, extra_env=extra_env) + logging.debug('Output from "git cl upload":\nstdout:\n%s\n\nstderr:\n%s', + stdout, stderr) def GetRollRevisionRanges(opts, webrtc_deps): - current_cr_rev = webrtc_deps['vars']['chromium_revision'] - new_cr_rev = opts.revision - if not new_cr_rev: - stdout, _ = _RunCommand(['git', 'ls-remote', CHROMIUM_SRC_URL, 'HEAD']) - head_rev = stdout.strip().split('\t')[0] - logging.info('No revision specified. Using HEAD: %s', head_rev) - new_cr_rev = head_rev + current_cr_rev = webrtc_deps['vars']['chromium_revision'] + new_cr_rev = opts.revision + if not new_cr_rev: + stdout, _ = _RunCommand(['git', 'ls-remote', CHROMIUM_SRC_URL, 'HEAD']) + head_rev = stdout.strip().split('\t')[0] + logging.info('No revision specified. Using HEAD: %s', head_rev) + new_cr_rev = head_rev - return ChromiumRevisionUpdate(current_cr_rev, new_cr_rev) + return ChromiumRevisionUpdate(current_cr_rev, new_cr_rev) def main(): - p = argparse.ArgumentParser() - p.add_argument('--clean', + p = argparse.ArgumentParser() + p.add_argument('--clean', + action='store_true', + default=False, + help='Removes any previous local roll branch.') + p.add_argument('-r', + '--revision', + help=('Chromium Git revision to roll to. Defaults to the ' + 'Chromium HEAD revision if omitted.')) + p.add_argument('--dry-run', + action='store_true', + default=False, + help=('Calculate changes and modify DEPS, but don\'t create ' + 'any local branch, commit, upload CL or send any ' + 'tryjobs.')) + p.add_argument('-i', + '--ignore-unclean-workdir', + action='store_true', + default=False, + help=('Ignore if the current branch is not main or if there ' + 'are uncommitted changes (default: %(default)s).')) + grp = p.add_mutually_exclusive_group() + grp.add_argument('--skip-cq', action='store_true', default=False, - help='Removes any previous local roll branch.') - p.add_argument('-r', - '--revision', - help=('Chromium Git revision to roll to. Defaults to the ' - 'Chromium HEAD revision if omitted.')) - p.add_argument('--dry-run', - action='store_true', - default=False, - help=('Calculate changes and modify DEPS, but don\'t create ' - 'any local branch, commit, upload CL or send any ' - 'tryjobs.')) - p.add_argument('-i', - '--ignore-unclean-workdir', - action='store_true', - default=False, - help=('Ignore if the current branch is not main or if there ' - 'are uncommitted changes (default: %(default)s).')) - grp = p.add_mutually_exclusive_group() - grp.add_argument( - '--skip-cq', - action='store_true', - default=False, - help='Skip sending the CL to the CQ (default: %(default)s)') - grp.add_argument('--cq-over', - type=int, - default=1, - help=('Commit queue dry run if the revision difference ' - 'is below this number (default: %(default)s)')) - p.add_argument('-v', - '--verbose', - action='store_true', - default=False, - help='Be extra verbose in printing of log messages.') - opts = p.parse_args() + help='Skip sending the CL to the CQ (default: %(default)s)') + grp.add_argument('--cq-over', + type=int, + default=1, + help=('Commit queue dry run if the revision difference ' + 'is below this number (default: %(default)s)')) + p.add_argument('-v', + '--verbose', + action='store_true', + default=False, + help='Be extra verbose in printing of log messages.') + opts = p.parse_args() - if opts.verbose: - logging.basicConfig(level=logging.DEBUG) - else: - logging.basicConfig(level=logging.INFO) + if opts.verbose: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) - if not opts.ignore_unclean_workdir and not _IsTreeClean(): - logging.error('Please clean your local checkout first.') - return 1 + if not opts.ignore_unclean_workdir and not _IsTreeClean(): + logging.error('Please clean your local checkout first.') + return 1 - if opts.clean: - _RemovePreviousRollBranch(opts.dry_run) + if opts.clean: + _RemovePreviousRollBranch(opts.dry_run) - if not opts.ignore_unclean_workdir: - _EnsureUpdatedMainBranch(opts.dry_run) + if not opts.ignore_unclean_workdir: + _EnsureUpdatedMainBranch(opts.dry_run) - deps_filename = os.path.join(CHECKOUT_SRC_DIR, 'DEPS') - webrtc_deps = ParseLocalDepsFile(deps_filename) + deps_filename = os.path.join(CHECKOUT_SRC_DIR, 'DEPS') + webrtc_deps = ParseLocalDepsFile(deps_filename) - rev_update = GetRollRevisionRanges(opts, webrtc_deps) + rev_update = GetRollRevisionRanges(opts, webrtc_deps) - current_commit_pos = ParseCommitPosition( - ReadRemoteCrCommit(rev_update.current_chromium_rev)) - new_commit_pos = ParseCommitPosition( - ReadRemoteCrCommit(rev_update.new_chromium_rev)) + current_commit_pos = ParseCommitPosition( + ReadRemoteCrCommit(rev_update.current_chromium_rev)) + new_commit_pos = ParseCommitPosition( + ReadRemoteCrCommit(rev_update.new_chromium_rev)) - new_cr_content = ReadRemoteCrFile('DEPS', rev_update.new_chromium_rev) - new_cr_deps = ParseDepsDict(new_cr_content) - changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) - # Discard other deps, assumed to be chromium-only dependencies. - new_generated_android_deps, _ = FindAddedDeps(webrtc_deps, new_cr_deps) - removed_generated_android_deps, other_deps = FindRemovedDeps( - webrtc_deps, new_cr_deps) - if other_deps: - raise RollError('WebRTC DEPS entries are missing from Chromium: %s.\n' - 'Remove them or add them to either ' - 'WEBRTC_ONLY_DEPS or DONT_AUTOROLL_THESE.' % other_deps) - clang_change = CalculateChangedClang(rev_update.new_chromium_rev) - commit_msg = GenerateCommitMessage( - rev_update, - current_commit_pos, - new_commit_pos, - changed_deps, - added_deps_paths=new_generated_android_deps, - removed_deps_paths=removed_generated_android_deps, - clang_change=clang_change) - logging.debug('Commit message:\n%s', commit_msg) + new_cr_content = ReadRemoteCrFile('DEPS', rev_update.new_chromium_rev) + new_cr_deps = ParseDepsDict(new_cr_content) + changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) + # Discard other deps, assumed to be chromium-only dependencies. + new_generated_android_deps, _ = FindAddedDeps(webrtc_deps, new_cr_deps) + removed_generated_android_deps, other_deps = FindRemovedDeps( + webrtc_deps, new_cr_deps) + if other_deps: + raise RollError('WebRTC DEPS entries are missing from Chromium: %s.\n' + 'Remove them or add them to either ' + 'WEBRTC_ONLY_DEPS or DONT_AUTOROLL_THESE.' % other_deps) + clang_change = CalculateChangedClang(rev_update.new_chromium_rev) + commit_msg = GenerateCommitMessage( + rev_update, + current_commit_pos, + new_commit_pos, + changed_deps, + added_deps_paths=new_generated_android_deps, + removed_deps_paths=removed_generated_android_deps, + clang_change=clang_change) + logging.debug('Commit message:\n%s', commit_msg) - _CreateRollBranch(opts.dry_run) + _CreateRollBranch(opts.dry_run) + if not opts.dry_run: + UpdateDepsFile(deps_filename, rev_update, changed_deps, new_cr_content) + if _IsTreeClean(): + logging.info("No DEPS changes detected, skipping CL creation.") + else: + _LocalCommit(commit_msg, opts.dry_run) + commit_queue_mode = ChooseCQMode(opts.skip_cq, opts.cq_over, + current_commit_pos, new_commit_pos) + logging.info('Uploading CL...') if not opts.dry_run: - UpdateDepsFile(deps_filename, rev_update, changed_deps, new_cr_content) - if _IsTreeClean(): - logging.info("No DEPS changes detected, skipping CL creation.") - else: - _LocalCommit(commit_msg, opts.dry_run) - commit_queue_mode = ChooseCQMode(opts.skip_cq, opts.cq_over, - current_commit_pos, new_commit_pos) - logging.info('Uploading CL...') - if not opts.dry_run: - _UploadCL(commit_queue_mode, _GetCcRecipients(changed_deps)) - return 0 + _UploadCL(commit_queue_mode, _GetCcRecipients(changed_deps)) + return 0 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/autoroller/unittests/roll_deps_test.py b/tools_webrtc/autoroller/unittests/roll_deps_test.py index 93baebff5d..688b6651b8 100755 --- a/tools_webrtc/autoroller/unittests/roll_deps_test.py +++ b/tools_webrtc/autoroller/unittests/roll_deps_test.py @@ -1,4 +1,5 @@ -#!/usr/bin/env vpython +#!/usr/bin/env vpython3 + # Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -7,7 +8,6 @@ # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. -from __future__ import absolute_import import glob import os @@ -15,16 +15,11 @@ import shutil import sys import tempfile import unittest +import mock SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) PARENT_DIR = os.path.join(SCRIPT_DIR, os.pardir) sys.path.append(PARENT_DIR) -# Workaround for the presubmit, plan only to run in py3 now. -# TODO(webrtc:13418) Remove when py2 presubmit is gone. -if sys.version_info >= (3, 3): - from unittest import mock -else: - import mock import roll_deps from roll_deps import CalculateChangedDeps, FindAddedDeps, \ @@ -54,293 +49,284 @@ NO_CHROMIUM_REVISION_UPDATE = ChromiumRevisionUpdate('cafe', 'cafe') class TestError(Exception): - pass + pass -class FakeCmd(object): - def __init__(self): - self.expectations = [] +class FakeCmd: + def __init__(self): + self.expectations = [] - def AddExpectation(self, *args, **kwargs): - returns = kwargs.pop('_returns', None) - ignores = kwargs.pop('_ignores', []) - self.expectations.append((args, kwargs, returns, ignores)) + def AddExpectation(self, *args, **kwargs): + returns = kwargs.pop('_returns', None) + ignores = kwargs.pop('_ignores', []) + self.expectations.append((args, kwargs, returns, ignores)) - def __call__(self, *args, **kwargs): - if not self.expectations: - raise TestError('Got unexpected\n%s\n%s' % (args, kwargs)) - exp_args, exp_kwargs, exp_returns, ignores = self.expectations.pop(0) - for item in ignores: - kwargs.pop(item, None) - if args != exp_args or kwargs != exp_kwargs: - message = 'Expected:\n args: %s\n kwargs: %s\n' % (exp_args, - exp_kwargs) - message += 'Got:\n args: %s\n kwargs: %s\n' % (args, kwargs) - raise TestError(message) - return exp_returns + def __call__(self, *args, **kwargs): + if not self.expectations: + raise TestError('Got unexpected\n%s\n%s' % (args, kwargs)) + exp_args, exp_kwargs, exp_returns, ignores = self.expectations.pop(0) + for item in ignores: + kwargs.pop(item, None) + if args != exp_args or kwargs != exp_kwargs: + message = 'Expected:\n args: %s\n kwargs: %s\n' % (exp_args, exp_kwargs) + message += 'Got:\n args: %s\n kwargs: %s\n' % (args, kwargs) + raise TestError(message) + return exp_returns -class NullCmd(object): - """No-op mock when calls mustn't be checked. """ +class NullCmd: + """No-op mock when calls mustn't be checked. """ - def __call__(self, *args, **kwargs): - # Empty stdout and stderr. - return None, None + def __call__(self, *args, **kwargs): + # Empty stdout and stderr. + return None, None class TestRollChromiumRevision(unittest.TestCase): - def setUp(self): - self._output_dir = tempfile.mkdtemp() - test_data_dir = os.path.join(SCRIPT_DIR, 'testdata', 'roll_deps') - for test_file in glob.glob(os.path.join(test_data_dir, '*')): - shutil.copy(test_file, self._output_dir) - join = lambda f: os.path.join(self._output_dir, f) - self._webrtc_depsfile = join('DEPS') - self._new_cr_depsfile = join('DEPS.chromium.new') - self._webrtc_depsfile_android = join('DEPS.with_android_deps') - self._new_cr_depsfile_android = join('DEPS.chromium.with_android_deps') - self.fake = FakeCmd() + def setUp(self): + self._output_dir = tempfile.mkdtemp() + test_data_dir = os.path.join(SCRIPT_DIR, 'testdata', 'roll_deps') + for test_file in glob.glob(os.path.join(test_data_dir, '*')): + shutil.copy(test_file, self._output_dir) + join = lambda f: os.path.join(self._output_dir, f) + self._webrtc_depsfile = join('DEPS') + self._new_cr_depsfile = join('DEPS.chromium.new') + self._webrtc_depsfile_android = join('DEPS.with_android_deps') + self._new_cr_depsfile_android = join('DEPS.chromium.with_android_deps') + self.fake = FakeCmd() - def tearDown(self): - shutil.rmtree(self._output_dir, ignore_errors=True) - self.assertEqual(self.fake.expectations, []) + def tearDown(self): + shutil.rmtree(self._output_dir, ignore_errors=True) + self.assertEqual(self.fake.expectations, []) - def testVarLookup(self): - local_scope = {'foo': 'wrong', 'vars': {'foo': 'bar'}} - lookup = roll_deps.VarLookup(local_scope) - self.assertEqual(lookup('foo'), 'bar') + def testVarLookup(self): + local_scope = {'foo': 'wrong', 'vars': {'foo': 'bar'}} + lookup = roll_deps.VarLookup(local_scope) + self.assertEqual(lookup('foo'), 'bar') - def testUpdateDepsFile(self): - new_rev = 'aaaaabbbbbcccccdddddeeeeefffff0000011111' - current_rev = TEST_DATA_VARS['chromium_revision'] + def testUpdateDepsFile(self): + new_rev = 'aaaaabbbbbcccccdddddeeeeefffff0000011111' + current_rev = TEST_DATA_VARS['chromium_revision'] - with open(self._new_cr_depsfile_android) as deps_file: - new_cr_contents = deps_file.read() + with open(self._new_cr_depsfile_android) as deps_file: + new_cr_contents = deps_file.read() - UpdateDepsFile(self._webrtc_depsfile, - ChromiumRevisionUpdate(current_rev, new_rev), [], - new_cr_contents) - with open(self._webrtc_depsfile) as deps_file: - deps_contents = deps_file.read() - self.assertTrue( - new_rev in deps_contents, - 'Failed to find %s in\n%s' % (new_rev, deps_contents)) + UpdateDepsFile(self._webrtc_depsfile, + ChromiumRevisionUpdate(current_rev, new_rev), [], + new_cr_contents) + with open(self._webrtc_depsfile) as deps_file: + deps_contents = deps_file.read() + self.assertTrue(new_rev in deps_contents, + 'Failed to find %s in\n%s' % (new_rev, deps_contents)) - def _UpdateDepsSetup(self): - with open(self._webrtc_depsfile_android) as deps_file: - webrtc_contents = deps_file.read() - with open(self._new_cr_depsfile_android) as deps_file: - new_cr_contents = deps_file.read() - webrtc_deps = ParseDepsDict(webrtc_contents) - new_cr_deps = ParseDepsDict(new_cr_contents) + def _UpdateDepsSetup(self): + with open(self._webrtc_depsfile_android) as deps_file: + webrtc_contents = deps_file.read() + with open(self._new_cr_depsfile_android) as deps_file: + new_cr_contents = deps_file.read() + webrtc_deps = ParseDepsDict(webrtc_contents) + new_cr_deps = ParseDepsDict(new_cr_contents) - changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) - with mock.patch('roll_deps._RunCommand', NullCmd()): - UpdateDepsFile(self._webrtc_depsfile_android, - NO_CHROMIUM_REVISION_UPDATE, changed_deps, - new_cr_contents) + changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) + with mock.patch('roll_deps._RunCommand', NullCmd()): + UpdateDepsFile(self._webrtc_depsfile_android, NO_CHROMIUM_REVISION_UPDATE, + changed_deps, new_cr_contents) - with open(self._webrtc_depsfile_android) as deps_file: - updated_contents = deps_file.read() + with open(self._webrtc_depsfile_android) as deps_file: + updated_contents = deps_file.read() - return webrtc_contents, updated_contents + return webrtc_contents, updated_contents - def testUpdateAndroidGeneratedDeps(self): - _, updated_contents = self._UpdateDepsSetup() + def testUpdateAndroidGeneratedDeps(self): + _, updated_contents = self._UpdateDepsSetup() - changed = 'third_party/android_deps/libs/android_arch_core_common' - changed_version = '1.0.0-cr0' - self.assertTrue(changed in updated_contents) - self.assertTrue(changed_version in updated_contents) + changed = 'third_party/android_deps/libs/android_arch_core_common' + changed_version = '1.0.0-cr0' + self.assertTrue(changed in updated_contents) + self.assertTrue(changed_version in updated_contents) - def testAddAndroidGeneratedDeps(self): - webrtc_contents, updated_contents = self._UpdateDepsSetup() + def testAddAndroidGeneratedDeps(self): + webrtc_contents, updated_contents = self._UpdateDepsSetup() - added = 'third_party/android_deps/libs/android_arch_lifecycle_common' - self.assertFalse(added in webrtc_contents) - self.assertTrue(added in updated_contents) + added = 'third_party/android_deps/libs/android_arch_lifecycle_common' + self.assertFalse(added in webrtc_contents) + self.assertTrue(added in updated_contents) - def testRemoveAndroidGeneratedDeps(self): - webrtc_contents, updated_contents = self._UpdateDepsSetup() + def testRemoveAndroidGeneratedDeps(self): + webrtc_contents, updated_contents = self._UpdateDepsSetup() - removed = 'third_party/android_deps/libs/android_arch_lifecycle_runtime' - self.assertTrue(removed in webrtc_contents) - self.assertFalse(removed in updated_contents) + removed = 'third_party/android_deps/libs/android_arch_lifecycle_runtime' + self.assertTrue(removed in webrtc_contents) + self.assertFalse(removed in updated_contents) - def testParseDepsDict(self): - with open(self._webrtc_depsfile) as deps_file: - deps_contents = deps_file.read() - local_scope = ParseDepsDict(deps_contents) - vars_dict = local_scope['vars'] + def testParseDepsDict(self): + with open(self._webrtc_depsfile) as deps_file: + deps_contents = deps_file.read() + local_scope = ParseDepsDict(deps_contents) + vars_dict = local_scope['vars'] - def AssertVar(variable_name): - self.assertEqual(vars_dict[variable_name], - TEST_DATA_VARS[variable_name]) + def AssertVar(variable_name): + self.assertEqual(vars_dict[variable_name], TEST_DATA_VARS[variable_name]) - AssertVar('chromium_git') - AssertVar('chromium_revision') - self.assertEqual(len(local_scope['deps']), 3) - self.assertEqual(len(local_scope['deps_os']), 1) + AssertVar('chromium_git') + AssertVar('chromium_revision') + self.assertEqual(len(local_scope['deps']), 3) + self.assertEqual(len(local_scope['deps_os']), 1) - def testGetMatchingDepsEntriesReturnsPathInSimpleCase(self): - entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing/gtest') - self.assertEqual(len(entries), 1) - self.assertEqual(entries[0], DEPS_ENTRIES['src/testing/gtest']) + def testGetMatchingDepsEntriesReturnsPathInSimpleCase(self): + entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing/gtest') + self.assertEqual(len(entries), 1) + self.assertEqual(entries[0], DEPS_ENTRIES['src/testing/gtest']) - def testGetMatchingDepsEntriesHandlesSimilarStartingPaths(self): - entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing') - self.assertEqual(len(entries), 2) + def testGetMatchingDepsEntriesHandlesSimilarStartingPaths(self): + entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing') + self.assertEqual(len(entries), 2) - def testGetMatchingDepsEntriesHandlesTwoPathsWithIdenticalFirstParts(self): - entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/build') - self.assertEqual(len(entries), 1) + def testGetMatchingDepsEntriesHandlesTwoPathsWithIdenticalFirstParts(self): + entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/build') + self.assertEqual(len(entries), 1) - def testCalculateChangedDeps(self): - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile) - with mock.patch('roll_deps._RunCommand', self.fake): - _SetupGitLsRemoteCall( - self.fake, - 'https://chromium.googlesource.com/chromium/src/build', - BUILD_NEW_REV) - changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) + def testCalculateChangedDeps(self): + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile) + with mock.patch('roll_deps._RunCommand', self.fake): + _SetupGitLsRemoteCall( + self.fake, 'https://chromium.googlesource.com/chromium/src/build', + BUILD_NEW_REV) + changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) - self.assertEqual(len(changed_deps), 3) - self.assertEqual(changed_deps[0].path, 'src/build') - self.assertEqual(changed_deps[0].current_rev, BUILD_OLD_REV) - self.assertEqual(changed_deps[0].new_rev, BUILD_NEW_REV) + self.assertEqual(len(changed_deps), 3) + self.assertEqual(changed_deps[0].path, 'src/build') + self.assertEqual(changed_deps[0].current_rev, BUILD_OLD_REV) + self.assertEqual(changed_deps[0].new_rev, BUILD_NEW_REV) - self.assertEqual(changed_deps[1].path, 'src/buildtools/linux64') - self.assertEqual(changed_deps[1].package, 'gn/gn/linux-amd64') - self.assertEqual(changed_deps[1].current_version, - 'git_revision:69ec4fca1fa69ddadae13f9e6b7507efa0675263') - self.assertEqual(changed_deps[1].new_version, - 'git_revision:new-revision') + self.assertEqual(changed_deps[1].path, 'src/buildtools/linux64') + self.assertEqual(changed_deps[1].package, 'gn/gn/linux-amd64') + self.assertEqual(changed_deps[1].current_version, + 'git_revision:69ec4fca1fa69ddadae13f9e6b7507efa0675263') + self.assertEqual(changed_deps[1].new_version, 'git_revision:new-revision') - self.assertEqual(changed_deps[2].path, 'src/third_party/depot_tools') - self.assertEqual(changed_deps[2].current_rev, DEPOTTOOLS_OLD_REV) - self.assertEqual(changed_deps[2].new_rev, DEPOTTOOLS_NEW_REV) + self.assertEqual(changed_deps[2].path, 'src/third_party/depot_tools') + self.assertEqual(changed_deps[2].current_rev, DEPOTTOOLS_OLD_REV) + self.assertEqual(changed_deps[2].new_rev, DEPOTTOOLS_NEW_REV) - def testWithDistinctDeps(self): - """Check CalculateChangedDeps works when deps are added/removed.""" - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) - changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) - self.assertEqual(len(changed_deps), 1) - self.assertEqual( - changed_deps[0].path, - 'src/third_party/android_deps/libs/android_arch_core_common') - self.assertEqual( - changed_deps[0].package, - 'chromium/third_party/android_deps/libs/android_arch_core_common') - self.assertEqual(changed_deps[0].current_version, 'version:0.9.0') - self.assertEqual(changed_deps[0].new_version, 'version:1.0.0-cr0') + def testWithDistinctDeps(self): + """Check CalculateChangedDeps works when deps are added/removed.""" + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) + self.assertEqual(len(changed_deps), 1) + self.assertEqual( + changed_deps[0].path, + 'src/third_party/android_deps/libs/android_arch_core_common') + self.assertEqual( + changed_deps[0].package, + 'chromium/third_party/android_deps/libs/android_arch_core_common') + self.assertEqual(changed_deps[0].current_version, 'version:0.9.0') + self.assertEqual(changed_deps[0].new_version, 'version:1.0.0-cr0') - def testFindAddedDeps(self): - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) - added_android_paths, other_paths = FindAddedDeps( - webrtc_deps, new_cr_deps) - self.assertEqual(added_android_paths, [ - 'src/third_party/android_deps/libs/android_arch_lifecycle_common' - ]) - self.assertEqual(other_paths, []) + def testFindAddedDeps(self): + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + added_android_paths, other_paths = FindAddedDeps(webrtc_deps, new_cr_deps) + self.assertEqual( + added_android_paths, + ['src/third_party/android_deps/libs/android_arch_lifecycle_common']) + self.assertEqual(other_paths, []) - def testFindRemovedDeps(self): - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) - removed_android_paths, other_paths = FindRemovedDeps( - webrtc_deps, new_cr_deps) - self.assertEqual(removed_android_paths, [ - 'src/third_party/android_deps/libs/android_arch_lifecycle_runtime' - ]) - self.assertEqual(other_paths, []) + def testFindRemovedDeps(self): + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + removed_android_paths, other_paths = FindRemovedDeps( + webrtc_deps, new_cr_deps) + self.assertEqual( + removed_android_paths, + ['src/third_party/android_deps/libs/android_arch_lifecycle_runtime']) + self.assertEqual(other_paths, []) - def testMissingDepsIsDetected(self): - """Check error is reported when deps cannot be automatically removed.""" - # The situation at test is the following: - # * A WebRTC DEPS entry is missing from Chromium. - # * The dependency isn't an android_deps (those are supported). - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) - _, other_paths = FindRemovedDeps(webrtc_deps, new_cr_deps) - self.assertEqual( - other_paths, - ['src/buildtools/linux64', 'src/third_party/depot_tools']) + def testMissingDepsIsDetected(self): + """Check error is reported when deps cannot be automatically removed.""" + # The situation at test is the following: + # * A WebRTC DEPS entry is missing from Chromium. + # * The dependency isn't an android_deps (those are supported). + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + _, other_paths = FindRemovedDeps(webrtc_deps, new_cr_deps) + self.assertEqual(other_paths, + ['src/buildtools/linux64', 'src/third_party/depot_tools']) - def testExpectedDepsIsNotReportedMissing(self): - """Some deps musn't be seen as missing, even if absent from Chromium.""" - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) - removed_android_paths, other_paths = FindRemovedDeps( - webrtc_deps, new_cr_deps) - self.assertTrue('src/build' not in removed_android_paths) - self.assertTrue('src/build' not in other_paths) + def testExpectedDepsIsNotReportedMissing(self): + """Some deps musn't be seen as missing, even if absent from Chromium.""" + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + removed_android_paths, other_paths = FindRemovedDeps( + webrtc_deps, new_cr_deps) + self.assertTrue('src/build' not in removed_android_paths) + self.assertTrue('src/build' not in other_paths) - def _CommitMessageSetup(self): - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + def _CommitMessageSetup(self): + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) - changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) - added_paths, _ = FindAddedDeps(webrtc_deps, new_cr_deps) - removed_paths, _ = FindRemovedDeps(webrtc_deps, new_cr_deps) + changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) + added_paths, _ = FindAddedDeps(webrtc_deps, new_cr_deps) + removed_paths, _ = FindRemovedDeps(webrtc_deps, new_cr_deps) - current_commit_pos = 'cafe' - new_commit_pos = 'f00d' + current_commit_pos = 'cafe' + new_commit_pos = 'f00d' - commit_msg = GenerateCommitMessage(NO_CHROMIUM_REVISION_UPDATE, - current_commit_pos, - new_commit_pos, changed_deps, - added_paths, removed_paths) + commit_msg = GenerateCommitMessage(NO_CHROMIUM_REVISION_UPDATE, + current_commit_pos, new_commit_pos, + changed_deps, added_paths, removed_paths) - return [l.strip() for l in commit_msg.split('\n')] + return [l.strip() for l in commit_msg.split('\n')] - def testChangedDepsInCommitMessage(self): - commit_lines = self._CommitMessageSetup() + def testChangedDepsInCommitMessage(self): + commit_lines = self._CommitMessageSetup() - changed = '* src/third_party/android_deps/libs/' \ - 'android_arch_core_common: version:0.9.0..version:1.0.0-cr0' - self.assertTrue(changed in commit_lines) - # Check it is in adequate section. - changed_line = commit_lines.index(changed) - self.assertTrue('Changed' in commit_lines[changed_line - 1]) + changed = '* src/third_party/android_deps/libs/' \ + 'android_arch_core_common: version:0.9.0..version:1.0.0-cr0' + self.assertTrue(changed in commit_lines) + # Check it is in adequate section. + changed_line = commit_lines.index(changed) + self.assertTrue('Changed' in commit_lines[changed_line - 1]) - def testAddedDepsInCommitMessage(self): - commit_lines = self._CommitMessageSetup() + def testAddedDepsInCommitMessage(self): + commit_lines = self._CommitMessageSetup() - added = '* src/third_party/android_deps/libs/' \ - 'android_arch_lifecycle_common' - self.assertTrue(added in commit_lines) - # Check it is in adequate section. - added_line = commit_lines.index(added) - self.assertTrue('Added' in commit_lines[added_line - 1]) + added = '* src/third_party/android_deps/libs/' \ + 'android_arch_lifecycle_common' + self.assertTrue(added in commit_lines) + # Check it is in adequate section. + added_line = commit_lines.index(added) + self.assertTrue('Added' in commit_lines[added_line - 1]) - def testRemovedDepsInCommitMessage(self): - commit_lines = self._CommitMessageSetup() + def testRemovedDepsInCommitMessage(self): + commit_lines = self._CommitMessageSetup() - removed = '* src/third_party/android_deps/libs/' \ - 'android_arch_lifecycle_runtime' - self.assertTrue(removed in commit_lines) - # Check it is in adequate section. - removed_line = commit_lines.index(removed) - self.assertTrue('Removed' in commit_lines[removed_line - 1]) + removed = '* src/third_party/android_deps/libs/' \ + 'android_arch_lifecycle_runtime' + self.assertTrue(removed in commit_lines) + # Check it is in adequate section. + removed_line = commit_lines.index(removed) + self.assertTrue('Removed' in commit_lines[removed_line - 1]) class TestChooseCQMode(unittest.TestCase): - def testSkip(self): - self.assertEqual(ChooseCQMode(True, 99, 500000, 500100), 0) + def testSkip(self): + self.assertEqual(ChooseCQMode(True, 99, 500000, 500100), 0) - def testDryRun(self): - self.assertEqual(ChooseCQMode(False, 101, 500000, 500100), 1) + def testDryRun(self): + self.assertEqual(ChooseCQMode(False, 101, 500000, 500100), 1) - def testSubmit(self): - self.assertEqual(ChooseCQMode(False, 100, 500000, 500100), 2) + def testSubmit(self): + self.assertEqual(ChooseCQMode(False, 100, 500000, 500100), 2) def _SetupGitLsRemoteCall(cmd_fake, url, revision): - cmd = ['git', 'ls-remote', url, revision] - cmd_fake.AddExpectation(cmd, _returns=(revision, None)) + cmd = ['git', 'ls-remote', url, revision] + cmd_fake.AddExpectation(cmd, _returns=(revision, None)) if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tools_webrtc/binary_version_check.py b/tools_webrtc/binary_version_check.py index cc5d96172c..563fe36186 100644 --- a/tools_webrtc/binary_version_check.py +++ b/tools_webrtc/binary_version_check.py @@ -1,3 +1,5 @@ +#!/usr/bin/env vpython3 + # Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -16,19 +18,19 @@ WEBRTC_VERSION_RE = re.compile( if __name__ == '__main__': - args = sys.argv - if len(args) != 2: - print('Usage: binary_version_test.py ') - exit(1) - filename = sys.argv[1] - output = subprocess.check_output(['strings', filename]) - strings_in_binary = output.decode('utf-8').splitlines() - for symbol in strings_in_binary: - if WEBRTC_VERSION_RE.match(symbol): - with open('webrtc_binary_version_check', 'w') as f: - f.write(symbol) - exit(0) - print('WebRTC source timestamp not found in "%s"' % filename) - print('Check why "kSourceTimestamp" from call/version.cc is not linked ' - '(or why it has been optimized away by the compiler/linker)') - exit(1) + args = sys.argv + if len(args) != 2: + print('Usage: binary_version_test.py ') + sys.exit(1) + filename = sys.argv[1] + output = subprocess.check_output(['strings', filename]) + strings_in_binary = output.decode('utf-8').splitlines() + for symbol in strings_in_binary: + if WEBRTC_VERSION_RE.match(symbol): + with open('webrtc_binary_version_check', 'w') as f: + f.write(symbol) + sys.exit(0) + print('WebRTC source timestamp not found in "%s"' % filename) + print('Check why "kSourceTimestamp" from call/version.cc is not linked ' + '(or why it has been optimized away by the compiler/linker)') + sys.exit(1) diff --git a/tools_webrtc/clang_tidy.py b/tools_webrtc/clang_tidy.py index bce2549aed..b3f738eab3 100755 --- a/tools_webrtc/clang_tidy.py +++ b/tools_webrtc/clang_tidy.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -20,9 +21,8 @@ import shutil import subprocess import sys import tempfile -#pylint: disable=relative-import -from presubmit_checks_lib.build_helpers import GetClangTidyPath, \ - GetCompilationCommand +from presubmit_checks_lib.build_helpers import (GetClangTidyPath, + GetCompilationCommand) # We enable all checkers by default for investigation purpose. # This includes clang-analyzer-* checks. @@ -32,66 +32,66 @@ CHECKER_OPTION = '-checks=*' def Process(filepath, args): - # Build directory is needed to gather compilation flags. - # Create a temporary one (instead of reusing an existing one) - # to keep the CLI simple and unencumbered. - out_dir = tempfile.mkdtemp('clang_tidy') + # Build directory is needed to gather compilation flags. + # Create a temporary one (instead of reusing an existing one) + # to keep the CLI simple and unencumbered. + out_dir = tempfile.mkdtemp('clang_tidy') - try: - gn_args = [] # Use default build. - command = GetCompilationCommand(filepath, gn_args, out_dir) + try: + gn_args = [] # Use default build. + command = GetCompilationCommand(filepath, gn_args, out_dir) - # Remove warning flags. They aren't needed and they cause trouble - # when clang-tidy doesn't match most recent clang. - # Same battle for -f (e.g. -fcomplete-member-pointers). - command = [ - arg for arg in command - if not (arg.startswith('-W') or arg.startswith('-f')) - ] + # Remove warning flags. They aren't needed and they cause trouble + # when clang-tidy doesn't match most recent clang. + # Same battle for -f (e.g. -fcomplete-member-pointers). + command = [ + arg for arg in command + if not (arg.startswith('-W') or arg.startswith('-f')) + ] - # Path from build dir. - rel_path = os.path.relpath(os.path.abspath(filepath), out_dir) + # Path from build dir. + rel_path = os.path.relpath(os.path.abspath(filepath), out_dir) - # Replace clang++ by clang-tidy - command[0:1] = [GetClangTidyPath(), CHECKER_OPTION, rel_path - ] + args + ['--'] # Separator for clang flags. - print "Running: %s" % ' '.join(command) - # Run from build dir so that relative paths are correct. - p = subprocess.Popen(command, - cwd=out_dir, - stdout=sys.stdout, - stderr=sys.stderr) - p.communicate() - return p.returncode - finally: - shutil.rmtree(out_dir, ignore_errors=True) + # Replace clang++ by clang-tidy + command[0:1] = [GetClangTidyPath(), CHECKER_OPTION, rel_path + ] + args + ['--'] # Separator for clang flags. + print("Running: %s" % ' '.join(command)) + # Run from build dir so that relative paths are correct. + p = subprocess.Popen(command, + cwd=out_dir, + stdout=sys.stdout, + stderr=sys.stderr) + p.communicate() + return p.returncode + finally: + shutil.rmtree(out_dir, ignore_errors=True) def ValidateCC(filepath): - """We can only analyze .cc files. Provide explicit message about that.""" - if filepath.endswith('.cc'): - return filepath - msg = ('%s not supported.\n' - 'For now, we can only analyze translation units (.cc files).' % - filepath) - raise argparse.ArgumentTypeError(msg) + """We can only analyze .cc files. Provide explicit message about that.""" + if filepath.endswith('.cc'): + return filepath + msg = ('%s not supported.\n' + 'For now, we can only analyze translation units (.cc files).' % + filepath) + raise argparse.ArgumentTypeError(msg) def Main(): - description = ( - "Run clang-tidy on single cc file.\n" - "Use flags, defines and include paths as in default debug build.\n" - "WARNING, this is a POC version with rough edges.") - parser = argparse.ArgumentParser(description=description) - parser.add_argument('filepath', - help='Specifies the path of the .cc file to analyze.', - type=ValidateCC) - parser.add_argument('args', - nargs=argparse.REMAINDER, - help='Arguments passed to clang-tidy') - parsed_args = parser.parse_args() - return Process(parsed_args.filepath, parsed_args.args) + description = ( + "Run clang-tidy on single cc file.\n" + "Use flags, defines and include paths as in default debug build.\n" + "WARNING, this is a POC version with rough edges.") + parser = argparse.ArgumentParser(description=description) + parser.add_argument('filepath', + help='Specifies the path of the .cc file to analyze.', + type=ValidateCC) + parser.add_argument('args', + nargs=argparse.REMAINDER, + help='Arguments passed to clang-tidy') + parsed_args = parser.parse_args() + return Process(parsed_args.filepath, parsed_args.args) if __name__ == '__main__': - sys.exit(Main()) + sys.exit(Main()) diff --git a/tools_webrtc/coverage/generate_coverage_command.py b/tools_webrtc/coverage/generate_coverage_command.py index 7c701f8b55..650513ef1d 100644 --- a/tools_webrtc/coverage/generate_coverage_command.py +++ b/tools_webrtc/coverage/generate_coverage_command.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -27,21 +28,20 @@ TESTS = [ def main(): - cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + TESTS + - ['-b out/coverage', '-o out/report'] + - ['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\''] + - ['-c \'out/coverage/%s\'' % t for t in TESTS]) + cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + TESTS + + ['-b out/coverage', '-o out/report'] + + ['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\''] + + ['-c \'out/coverage/%s\'' % t for t in TESTS]) - def WithXvfb(binary): - return '-c \'%s testing/xvfb.py %s\'' % (sys.executable, binary) + def WithXvfb(binary): + return '-c \'%s testing/xvfb.py %s\'' % (sys.executable, binary) - modules_unittests = 'out/coverage/modules_unittests' - cmd[cmd.index('-c \'%s\'' % - modules_unittests)] = WithXvfb(modules_unittests) + modules_unittests = 'out/coverage/modules_unittests' + cmd[cmd.index('-c \'%s\'' % modules_unittests)] = WithXvfb(modules_unittests) - print ' '.join(cmd) - return 0 + print(' '.join(cmd)) + return 0 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/coverage/generate_ios_coverage_command.py b/tools_webrtc/coverage/generate_ios_coverage_command.py index e51bfa7206..249d8ce2a5 100644 --- a/tools_webrtc/coverage/generate_ios_coverage_command.py +++ b/tools_webrtc/coverage/generate_ios_coverage_command.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -46,7 +47,6 @@ if os.path.exists(binary_path): ========== ENDING OF PATCH ========== """ - import sys DIRECTORY = 'out/coverage' @@ -77,89 +77,89 @@ XC_TESTS = [ def FormatIossimTest(test_name, is_xctest=False): - args = ['%s/%s.app' % (DIRECTORY, test_name)] - if is_xctest: - args += ['%s/%s_module.xctest' % (DIRECTORY, test_name)] + args = ['%s/%s.app' % (DIRECTORY, test_name)] + if is_xctest: + args += ['%s/%s_module.xctest' % (DIRECTORY, test_name)] - return '-c \'%s/iossim %s\'' % (DIRECTORY, ' '.join(args)) + return '-c \'%s/iossim %s\'' % (DIRECTORY, ' '.join(args)) def GetGNArgs(is_simulator): - target_cpu = 'x64' if is_simulator else 'arm64' - return ([] + ['target_os="ios"'] + ['target_cpu="%s"' % target_cpu] + - ['use_clang_coverage=true'] + ['is_component_build=false'] + - ['dcheck_always_on=true']) + target_cpu = 'x64' if is_simulator else 'arm64' + return ([] + ['target_os="ios"'] + ['target_cpu="%s"' % target_cpu] + + ['use_clang_coverage=true'] + ['is_component_build=false'] + + ['dcheck_always_on=true']) def GenerateIOSSimulatorCommand(): - gn_args_string = ' '.join(GetGNArgs(is_simulator=True)) - gn_cmd = ['gn', 'gen', DIRECTORY, '--args=\'%s\'' % gn_args_string] + gn_args_string = ' '.join(GetGNArgs(is_simulator=True)) + gn_cmd = ['gn', 'gen', DIRECTORY, '--args=\'%s\'' % gn_args_string] - coverage_cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + - ["%s.app" % t for t in XC_TESTS + TESTS] + - ['-b %s' % DIRECTORY, '-o out/report'] + - ['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\''] + - [FormatIossimTest(t, is_xctest=True) for t in XC_TESTS] + - [FormatIossimTest(t, is_xctest=False) for t in TESTS]) + coverage_cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + + ["%s.app" % t for t in XC_TESTS + TESTS] + + ['-b %s' % DIRECTORY, '-o out/report'] + + ['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\''] + + [FormatIossimTest(t, is_xctest=True) for t in XC_TESTS] + + [FormatIossimTest(t, is_xctest=False) for t in TESTS]) - print 'To get code coverage using iOS sim just run following commands:' - print '' - print ' '.join(gn_cmd) - print '' - print ' '.join(coverage_cmd) - return 0 + print('To get code coverage using iOS sim just run following commands:') + print('') + print(' '.join(gn_cmd)) + print('') + print(' '.join(coverage_cmd)) + return 0 def GenerateIOSDeviceCommand(): - gn_args_string = ' '.join(GetGNArgs(is_simulator=False)) + gn_args_string = ' '.join(GetGNArgs(is_simulator=False)) - coverage_report_cmd = ( - [sys.executable, 'tools/code_coverage/coverage.py'] + - ['%s.app' % t for t in TESTS] + ['-b %s' % DIRECTORY] + - ['-o out/report'] + ['-p %s/merged.profdata' % DIRECTORY] + - ['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\'']) + coverage_report_cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + + ['%s.app' % t for t in TESTS] + ['-b %s' % DIRECTORY] + + ['-o out/report'] + + ['-p %s/merged.profdata' % DIRECTORY] + + ['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\'']) - print 'Computing code coverage for real iOS device is a little bit tedious.' - print '' - print 'You will need:' - print '' - print '1. Generate xcode project and open it with Xcode 10+:' - print ' gn gen %s --ide=xcode --args=\'%s\'' % (DIRECTORY, gn_args_string) - print ' open %s/all.xcworkspace' % DIRECTORY - print '' - print '2. Execute these Run targets manually with Xcode Run button and ' - print 'manually save generated coverage.profraw file to %s:' % DIRECTORY - print '\n'.join('- %s' % t for t in TESTS) - print '' - print '3. Execute these Test targets manually with Xcode Test button and ' - print 'manually save generated coverage.profraw file to %s:' % DIRECTORY - print '\n'.join('- %s' % t for t in XC_TESTS) - print '' - print '4. Merge *.profraw files to *.profdata using llvm-profdata tool:' - print(' build/mac_files/Xcode.app/Contents/Developer/Toolchains/' + - 'XcodeDefault.xctoolchain/usr/bin/llvm-profdata merge ' + - '-o %s/merged.profdata ' % DIRECTORY + - '-sparse=true %s/*.profraw' % DIRECTORY) - print '' - print '5. Generate coverage report:' - print ' ' + ' '.join(coverage_report_cmd) - return 0 + print('Computing code coverage for real iOS device is a little bit tedious.') + print('') + print('You will need:') + print('') + print('1. Generate xcode project and open it with Xcode 10+:') + print(' gn gen %s --ide=xcode --args=\'%s\'' % (DIRECTORY, gn_args_string)) + print(' open %s/all.xcworkspace' % DIRECTORY) + print('') + print('2. Execute these Run targets manually with Xcode Run button and ') + print('manually save generated coverage.profraw file to %s:' % DIRECTORY) + print('\n'.join('- %s' % t for t in TESTS)) + print('') + print('3. Execute these Test targets manually with Xcode Test button and ') + print('manually save generated coverage.profraw file to %s:' % DIRECTORY) + print('\n'.join('- %s' % t for t in XC_TESTS)) + print('') + print('4. Merge *.profraw files to *.profdata using llvm-profdata tool:') + print((' build/mac_files/Xcode.app/Contents/Developer/Toolchains/' + + 'XcodeDefault.xctoolchain/usr/bin/llvm-profdata merge ' + + '-o %s/merged.profdata ' % DIRECTORY + + '-sparse=true %s/*.profraw' % DIRECTORY)) + print('') + print('5. Generate coverage report:') + print(' ' + ' '.join(coverage_report_cmd)) + return 0 -def Main(): - if len(sys.argv) < 2: - print 'Please specify type of coverage:' - print ' %s simulator' % sys.argv[0] - print ' %s device' % sys.argv[0] - elif sys.argv[1] == 'simulator': - GenerateIOSSimulatorCommand() - elif sys.argv[1] == 'device': - GenerateIOSDeviceCommand() - else: - print 'Unsupported type of coverage' +def main(): + if len(sys.argv) < 2: + print('Please specify type of coverage:') + print(' %s simulator' % sys.argv[0]) + print(' %s device' % sys.argv[0]) + elif sys.argv[1] == 'simulator': + GenerateIOSSimulatorCommand() + elif sys.argv[1] == 'device': + GenerateIOSDeviceCommand() + else: + print('Unsupported type of coverage') - return 0 + return 0 if __name__ == '__main__': - sys.exit(Main()) + sys.exit(main()) diff --git a/tools_webrtc/cpu/cpu_mon.py b/tools_webrtc/cpu/cpu_mon.py index d89935aeab..9c25fbd088 100644 --- a/tools_webrtc/cpu/cpu_mon.py +++ b/tools_webrtc/cpu/cpu_mon.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # # Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. # @@ -8,76 +8,76 @@ # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. -import psutil import sys +import psutil import numpy from matplotlib import pyplot -class CpuSnapshot(object): - def __init__(self, label): - self.label = label - self.samples = [] +class CpuSnapshot: + def __init__(self, label): + self.label = label + self.samples = [] - def Capture(self, sample_count): - print('Capturing %d CPU samples for %s...' % - ((sample_count - len(self.samples)), self.label)) - while len(self.samples) < sample_count: - self.samples.append(psutil.cpu_percent(1.0, False)) + def Capture(self, sample_count): + print(('Capturing %d CPU samples for %s...' % + ((sample_count - len(self.samples)), self.label))) + while len(self.samples) < sample_count: + self.samples.append(psutil.cpu_percent(1.0, False)) - def Text(self): - return ('%s: avg=%s, median=%s, min=%s, max=%s' % - (self.label, numpy.average(self.samples), - numpy.median(self.samples), numpy.min( - self.samples), numpy.max(self.samples))) + def Text(self): + return ( + '%s: avg=%s, median=%s, min=%s, max=%s' % + (self.label, numpy.average(self.samples), numpy.median( + self.samples), numpy.min(self.samples), numpy.max(self.samples))) - def Max(self): - return numpy.max(self.samples) + def Max(self): + return numpy.max(self.samples) def GrabCpuSamples(sample_count): - print 'Label for snapshot (enter to quit): ' - label = raw_input().strip() - if len(label) == 0: - return None + print('Label for snapshot (enter to quit): ') + label = eval(input().strip()) + if len(label) == 0: + return None - snapshot = CpuSnapshot(label) - snapshot.Capture(sample_count) + snapshot = CpuSnapshot(label) + snapshot.Capture(sample_count) - return snapshot + return snapshot def main(): - print 'How many seconds to capture per snapshot (enter for 60)?' - sample_count = raw_input().strip() - if len(sample_count) > 0 and int(sample_count) > 0: - sample_count = int(sample_count) - else: - print 'Defaulting to 60 samples.' - sample_count = 60 + print('How many seconds to capture per snapshot (enter for 60)?') + sample_count = eval(input().strip()) + if len(sample_count) > 0 and int(sample_count) > 0: + sample_count = int(sample_count) + else: + print('Defaulting to 60 samples.') + sample_count = 60 - snapshots = [] - while True: - snapshot = GrabCpuSamples(sample_count) - if snapshot is None: - break - snapshots.append(snapshot) + snapshots = [] + while True: + snapshot = GrabCpuSamples(sample_count) + if snapshot is None: + break + snapshots.append(snapshot) - if len(snapshots) == 0: - print 'no samples captured' - return -1 + if len(snapshots) == 0: + print('no samples captured') + return -1 - pyplot.title('CPU usage') + pyplot.title('CPU usage') - for s in snapshots: - pyplot.plot(s.samples, label=s.Text(), linewidth=2) + for s in snapshots: + pyplot.plot(s.samples, label=s.Text(), linewidth=2) - pyplot.legend() + pyplot.legend() - pyplot.show() - return 0 + pyplot.show() + return 0 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/download_tools.py b/tools_webrtc/download_tools.py index 62602dea59..16d7e5f3f0 100755 --- a/tools_webrtc/download_tools.py +++ b/tools_webrtc/download_tools.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -27,34 +28,34 @@ import subprocess2 def main(directories): - if not directories: - directories = [SCRIPT_DIR] + if not directories: + directories = [SCRIPT_DIR] - for path in directories: - cmd = [ - sys.executable, - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, - 'download_from_google_storage.py'), - '--directory', - '--num_threads=10', - '--bucket', - 'chrome-webrtc-resources', - '--auto_platform', - '--recursive', - path, - ] - print 'Downloading precompiled tools...' + for path in directories: + cmd = [ + sys.executable, + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, + 'download_from_google_storage.py'), + '--directory', + '--num_threads=10', + '--bucket', + 'chrome-webrtc-resources', + '--auto_platform', + '--recursive', + path, + ] + print('Downloading precompiled tools...') - # Perform download similar to how gclient hooks execute. - try: - gclient_utils.CheckCallAndFilter(cmd, - cwd=SRC_DIR, - always_show_header=True) - except (gclient_utils.Error, subprocess2.CalledProcessError) as e: - print 'Error: %s' % str(e) - return 2 - return 0 + # Perform download similar to how gclient hooks execute. + try: + gclient_utils.CheckCallAndFilter(cmd, + cwd=SRC_DIR, + always_show_header=True) + except (gclient_utils.Error, subprocess2.CalledProcessError) as e: + print('Error: %s' % str(e)) + return 2 + return 0 if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) + sys.exit(main(sys.argv[1:])) diff --git a/tools_webrtc/ensure_webcam_is_running.py b/tools_webrtc/ensure_webcam_is_running.py index 0856938549..4428d79bd8 100755 --- a/tools_webrtc/ensure_webcam_is_running.py +++ b/tools_webrtc/ensure_webcam_is_running.py @@ -1,4 +1,5 @@ #!/usr/bin/env vpython3 + # Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -26,10 +27,10 @@ If any command line arguments are passed to the script, it is executed as a command in a subprocess. """ -# psutil is not installed on non-Linux machines by default. -import psutil # pylint: disable=F0401 import subprocess import sys +# psutil is not installed on non-Linux machines by default. +import psutil # pylint: disable=F0401 WEBCAM_WIN = ('schtasks', '/run', '/tn', 'ManyCam') WEBCAM_MAC = ('open', '/Applications/ManyCam/ManyCam.app') @@ -81,7 +82,7 @@ def StartWebCam(): def _ForcePythonInterpreter(cmd): """Returns the fixed command line to call the right python executable.""" out = cmd[:] - if out[0] == 'python': + if out[0] == 'vpython3': out[0] = sys.executable elif out[0].endswith('.py'): out.insert(0, sys.executable) @@ -95,8 +96,7 @@ def Main(argv): if argv: return subprocess.call(_ForcePythonInterpreter(argv)) - else: - return 0 + return 0 if __name__ == '__main__': diff --git a/tools_webrtc/executable_host_build.py b/tools_webrtc/executable_host_build.py index aac4be0b4b..e8c9aeb583 100644 --- a/tools_webrtc/executable_host_build.py +++ b/tools_webrtc/executable_host_build.py @@ -1,4 +1,4 @@ -#!/usr/bin/env/python +#!/usr/bin/env vpython3 # Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. # @@ -24,7 +24,7 @@ following executable in your out folder: You will be able to compile the same executable targeting your host machine by running: - $ python tools_webrtc/executable_host_build.py --executable_name random_exec + $ vpython3 tools_webrtc/executable_host_build.py --executable_name random_exec The generated executable will have the same name as the input executable with suffix '_host'. @@ -62,40 +62,39 @@ import find_depot_tools def _ParseArgs(): - desc = 'Generates a GN executable targeting the host machine.' - parser = argparse.ArgumentParser(description=desc) - parser.add_argument('--executable_name', - required=True, - help='Name of the executable to build') - args = parser.parse_args() - return args + desc = 'Generates a GN executable targeting the host machine.' + parser = argparse.ArgumentParser(description=desc) + parser.add_argument('--executable_name', + required=True, + help='Name of the executable to build') + args = parser.parse_args() + return args @contextmanager def HostBuildDir(): - temp_dir = tempfile.mkdtemp() - try: - yield temp_dir - finally: - shutil.rmtree(temp_dir) + temp_dir = tempfile.mkdtemp() + try: + yield temp_dir + finally: + shutil.rmtree(temp_dir) def _RunCommand(argv, cwd=SRC_DIR, **kwargs): - with open(os.devnull, 'w') as devnull: - subprocess.check_call(argv, cwd=cwd, stdout=devnull, **kwargs) + with open(os.devnull, 'w') as devnull: + subprocess.check_call(argv, cwd=cwd, stdout=devnull, **kwargs) def DepotToolPath(*args): - return os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, *args) + return os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, *args) if __name__ == '__main__': - ARGS = _ParseArgs() - EXECUTABLE_TO_BUILD = ARGS.executable_name - EXECUTABLE_FINAL_NAME = ARGS.executable_name + '_host' - with HostBuildDir() as build_dir: - _RunCommand([sys.executable, DepotToolPath('gn.py'), 'gen', build_dir]) - _RunCommand( - [DepotToolPath('ninja'), '-C', build_dir, EXECUTABLE_TO_BUILD]) - shutil.copy(os.path.join(build_dir, EXECUTABLE_TO_BUILD), - EXECUTABLE_FINAL_NAME) + ARGS = _ParseArgs() + EXECUTABLE_TO_BUILD = ARGS.executable_name + EXECUTABLE_FINAL_NAME = ARGS.executable_name + '_host' + with HostBuildDir() as build_dir: + _RunCommand([sys.executable, DepotToolPath('gn.py'), 'gen', build_dir]) + _RunCommand([DepotToolPath('ninja'), '-C', build_dir, EXECUTABLE_TO_BUILD]) + shutil.copy(os.path.join(build_dir, EXECUTABLE_TO_BUILD), + EXECUTABLE_FINAL_NAME) diff --git a/tools_webrtc/flags_compatibility.py b/tools_webrtc/flags_compatibility.py index bdafefb7a7..e7aeb93faf 100755 --- a/tools_webrtc/flags_compatibility.py +++ b/tools_webrtc/flags_compatibility.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. # @@ -36,10 +36,11 @@ def main(): def _ForcePythonInterpreter(cmd): """Returns the fixed command line to call the right python executable.""" out = cmd[:] - if out[0] == 'python': - out[0] = sys.executable - elif out[0].endswith('.py'): - out.insert(0, sys.executable) + if len(out) > 0: + if out[0] == 'python': + out[0] = sys.executable + elif out[0].endswith('.py'): + out.insert(0, sys.executable) return out diff --git a/tools_webrtc/get_landmines.py b/tools_webrtc/get_landmines.py index b80a360f7c..42dd8b7bfc 100755 --- a/tools_webrtc/get_landmines.py +++ b/tools_webrtc/get_landmines.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python3 +#!/usr/bin/env vpython3 + # Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -11,9 +12,6 @@ This file emits the list of reasons why a particular build needs to be clobbered (or a list of 'landmines'). """ -from __future__ import absolute_import -from __future__ import print_function - import os import sys @@ -26,46 +24,45 @@ host_os = landmine_utils.host_os # pylint: disable=invalid-name def print_landmines(): # pylint: disable=invalid-name - """ + """ ALL LANDMINES ARE EMITTED FROM HERE. """ - # DO NOT add landmines as part of a regular CL. Landmines are a last-effort - # bandaid fix if a CL that got landed has a build dependency bug and all - # bots need to be cleaned up. If you're writing a new CL that causes build - # dependency problems, fix the dependency problems instead of adding a - # landmine. - # See the Chromium version in src/build/get_landmines.py for usage examples. - print('Clobber to remove out/{Debug,Release}/args.gn (webrtc:5070)') - if host_os() == 'win': - print('Clobber to resolve some issues with corrupt .pdb files on bots.') - print('Clobber due to corrupt .pdb files (after #14623)') - print( - 'Clobber due to Win 64-bit Debug linking error (crbug.com/668961)') - print('Clobber due to Win Clang Debug linking errors in ' - 'https://codereview.webrtc.org/2786603002') - print('Clobber due to Win Debug linking errors in ' - 'https://codereview.webrtc.org/2832063003/') - print('Clobber win x86 bots (issues with isolated files).') - if host_os() == 'mac': - print('Clobber due to iOS compile errors (crbug.com/694721)') - print('Clobber to unblock https://codereview.webrtc.org/2709573003') - print('Clobber to fix https://codereview.webrtc.org/2709573003 after ' - 'landing') - print('Clobber to fix https://codereview.webrtc.org/2767383005 before' - 'landing (changing rtc_executable -> rtc_test on iOS)') - print('Clobber to fix https://codereview.webrtc.org/2767383005 before' - 'landing (changing rtc_executable -> rtc_test on iOS)') - print('Another landmine for low_bandwidth_audio_test (webrtc:7430)') - print('Clobber to change neteq_rtpplay type to executable') - print('Clobber to remove .xctest files.') - print('Clobber to remove .xctest files (take 2).') - print('Switching rtc_executable to rtc_test') + # DO NOT add landmines as part of a regular CL. Landmines are a last-effort + # bandaid fix if a CL that got landed has a build dependency bug and all + # bots need to be cleaned up. If you're writing a new CL that causes build + # dependency problems, fix the dependency problems instead of adding a + # landmine. + # See the Chromium version in src/build/get_landmines.py for usage examples. + print('Clobber to remove out/{Debug,Release}/args.gn (webrtc:5070)') + if host_os() == 'win': + print('Clobber to resolve some issues with corrupt .pdb files on bots.') + print('Clobber due to corrupt .pdb files (after #14623)') + print('Clobber due to Win 64-bit Debug linking error (crbug.com/668961)') + print('Clobber due to Win Clang Debug linking errors in ' + 'https://codereview.webrtc.org/2786603002') + print('Clobber due to Win Debug linking errors in ' + 'https://codereview.webrtc.org/2832063003/') + print('Clobber win x86 bots (issues with isolated files).') + if host_os() == 'mac': + print('Clobber due to iOS compile errors (crbug.com/694721)') + print('Clobber to unblock https://codereview.webrtc.org/2709573003') + print('Clobber to fix https://codereview.webrtc.org/2709573003 after ' + 'landing') + print('Clobber to fix https://codereview.webrtc.org/2767383005 before' + 'landing (changing rtc_executable -> rtc_test on iOS)') + print('Clobber to fix https://codereview.webrtc.org/2767383005 before' + 'landing (changing rtc_executable -> rtc_test on iOS)') + print('Another landmine for low_bandwidth_audio_test (webrtc:7430)') + print('Clobber to change neteq_rtpplay type to executable') + print('Clobber to remove .xctest files.') + print('Clobber to remove .xctest files (take 2).') + print('Switching rtc_executable to rtc_test') def main(): - print_landmines() - return 0 + print_landmines() + return 0 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/gn_check_autofix.py b/tools_webrtc/gn_check_autofix.py index 282dc4fc0f..f55d125e32 100644 --- a/tools_webrtc/gn_check_autofix.py +++ b/tools_webrtc/gn_check_autofix.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. # @@ -14,9 +14,9 @@ It will run `mb gen` in a temporary directory and it is really useful to check for different configurations. Usage: - $ python tools_webrtc/gn_check_autofix.py -m some_mater -b some_bot + $ vpython3 tools_webrtc/gn_check_autofix.py -m some_mater -b some_bot or - $ python tools_webrtc/gn_check_autofix.py -c some_mb_config + $ vpython3 tools_webrtc/gn_check_autofix.py -c some_mb_config """ import os @@ -38,70 +38,69 @@ TARGET_RE = re.compile( r'(?P\s*)\w*\("(?P\w*)"\) {$') -class TemporaryDirectory(object): - def __init__(self): - self._closed = False - self._name = None - self._name = tempfile.mkdtemp() +class TemporaryDirectory: + def __init__(self): + self._closed = False + self._name = None + self._name = tempfile.mkdtemp() - def __enter__(self): - return self._name + def __enter__(self): + return self._name - def __exit__(self, exc, value, _tb): - if self._name and not self._closed: - shutil.rmtree(self._name) - self._closed = True + def __exit__(self, exc, value, _tb): + if self._name and not self._closed: + shutil.rmtree(self._name) + self._closed = True def Run(cmd): - print 'Running:', ' '.join(cmd) - sub = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - return sub.communicate() + print('Running:', ' '.join(cmd)) + sub = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return sub.communicate() def FixErrors(filename, missing_deps, deleted_sources): - with open(filename) as f: - lines = f.readlines() + with open(filename) as f: + lines = f.readlines() - fixed_file = '' - indentation_level = None - for line in lines: - match = TARGET_RE.match(line) - if match: - target = match.group('target_name') - if target in missing_deps: - indentation_level = match.group('indentation_level') - elif indentation_level is not None: - match = re.match(indentation_level + '}$', line) - if match: - line = ('deps = [\n' + ''.join(' "' + dep + '",\n' - for dep in missing_deps[target]) - + ']\n') + line - indentation_level = None - elif line.strip().startswith('deps'): - is_empty_deps = line.strip() == 'deps = []' - line = 'deps = [\n' if is_empty_deps else line - line += ''.join(' "' + dep + '",\n' - for dep in missing_deps[target]) - line += ']\n' if is_empty_deps else '' - indentation_level = None + fixed_file = '' + indentation_level = None + for line in lines: + match = TARGET_RE.match(line) + if match: + target = match.group('target_name') + if target in missing_deps: + indentation_level = match.group('indentation_level') + elif indentation_level is not None: + match = re.match(indentation_level + '}$', line) + if match: + line = ('deps = [\n' + ''.join(' "' + dep + '",\n' + for dep in missing_deps[target]) + + ']\n') + line + indentation_level = None + elif line.strip().startswith('deps'): + is_empty_deps = line.strip() == 'deps = []' + line = 'deps = [\n' if is_empty_deps else line + line += ''.join(' "' + dep + '",\n' for dep in missing_deps[target]) + line += ']\n' if is_empty_deps else '' + indentation_level = None - if line.strip() not in deleted_sources: - fixed_file += line + if line.strip() not in deleted_sources: + fixed_file += line - with open(filename, 'w') as f: - f.write(fixed_file) + with open(filename, 'w') as f: + f.write(fixed_file) - Run(['gn', 'format', filename]) + Run(['gn', 'format', filename]) def FirstNonEmpty(iterable): - """Return first item which evaluates to True, or fallback to None.""" - return next((x for x in iterable if x), None) + """Return first item which evaluates to True, or fallback to None.""" + return next((x for x in iterable if x), None) def Rebase(base_path, dependency_path, dependency): - """Adapt paths so they work both in stand-alone WebRTC and Chromium tree. + """Adapt paths so they work both in stand-alone WebRTC and Chromium tree. To cope with varying top-level directory (WebRTC VS Chromium), we use: * relative paths for WebRTC modules. @@ -118,82 +117,81 @@ def Rebase(base_path, dependency_path, dependency): Full target path (E.g. '../rtc_base/time:timestamp_extrapolator'). """ - root = FirstNonEmpty(dependency_path.split('/')) - if root in CHROMIUM_DIRS: - # Chromium paths must remain absolute. E.g. //third_party//abseil-cpp... - rebased = dependency_path - else: - base_path = base_path.split(os.path.sep) - dependency_path = dependency_path.split(os.path.sep) + root = FirstNonEmpty(dependency_path.split('/')) + if root in CHROMIUM_DIRS: + # Chromium paths must remain absolute. E.g. //third_party//abseil-cpp... + rebased = dependency_path + else: + base_path = base_path.split(os.path.sep) + dependency_path = dependency_path.split(os.path.sep) - first_difference = None - shortest_length = min(len(dependency_path), len(base_path)) - for i in range(shortest_length): - if dependency_path[i] != base_path[i]: - first_difference = i - break + first_difference = None + shortest_length = min(len(dependency_path), len(base_path)) + for i in range(shortest_length): + if dependency_path[i] != base_path[i]: + first_difference = i + break - first_difference = first_difference or shortest_length - base_path = base_path[first_difference:] - dependency_path = dependency_path[first_difference:] - rebased = os.path.sep.join((['..'] * len(base_path)) + dependency_path) - return rebased + ':' + dependency + first_difference = first_difference or shortest_length + base_path = base_path[first_difference:] + dependency_path = dependency_path[first_difference:] + rebased = os.path.sep.join((['..'] * len(base_path)) + dependency_path) + return rebased + ':' + dependency def main(): - deleted_sources = set() - errors_by_file = defaultdict(lambda: defaultdict(set)) + deleted_sources = set() + errors_by_file = defaultdict(lambda: defaultdict(set)) - with TemporaryDirectory() as tmp_dir: - mb_script_path = os.path.join(SCRIPT_DIR, 'mb', 'mb.py') - mb_config_file_path = os.path.join(SCRIPT_DIR, 'mb', 'mb_config.pyl') - mb_gen_command = ([ - mb_script_path, - 'gen', - tmp_dir, - '--config-file', - mb_config_file_path, - ] + sys.argv[1:]) + with TemporaryDirectory() as tmp_dir: + mb_script_path = os.path.join(SCRIPT_DIR, 'mb', 'mb.py') + mb_config_file_path = os.path.join(SCRIPT_DIR, 'mb', 'mb_config.pyl') + mb_gen_command = ([ + mb_script_path, + 'gen', + tmp_dir, + '--config-file', + mb_config_file_path, + ] + sys.argv[1:]) - mb_output = Run(mb_gen_command) - errors = mb_output[0].split('ERROR')[1:] + mb_output = Run(mb_gen_command) + errors = mb_output[0].decode('utf-8').split('ERROR')[1:] - if mb_output[1]: - print mb_output[1] - return 1 + if mb_output[1]: + print(mb_output[1]) + return 1 - for error in errors: - error = error.splitlines() - target_msg = 'The target:' - if target_msg not in error: - target_msg = 'It is not in any dependency of' - if target_msg not in error: - print '\n'.join(error) - continue - index = error.index(target_msg) + 1 - path, target = error[index].strip().split(':') - if error[index + 1] in ('is including a file from the target:', - 'The include file is in the target(s):'): - dep = error[index + 2].strip() - dep_path, dep = dep.split(':') - dep = Rebase(path, dep_path, dep) - # Replacing /target:target with /target - dep = re.sub(r'/(\w+):(\1)$', r'/\1', dep) - path = os.path.join(path[2:], 'BUILD.gn') - errors_by_file[path][target].add(dep) - elif error[index + 1] == 'has a source file:': - deleted_file = '"' + os.path.basename( - error[index + 2].strip()) + '",' - deleted_sources.add(deleted_file) - else: - print '\n'.join(error) - continue + for error in errors: + error = error.splitlines() + target_msg = 'The target:' + if target_msg not in error: + target_msg = 'It is not in any dependency of' + if target_msg not in error: + print('\n'.join(error)) + continue + index = error.index(target_msg) + 1 + path, target = error[index].strip().split(':') + if error[index + 1] in ('is including a file from the target:', + 'The include file is in the target(s):'): + dep = error[index + 2].strip() + dep_path, dep = dep.split(':') + dep = Rebase(path, dep_path, dep) + # Replacing /target:target with /target + dep = re.sub(r'/(\w+):(\1)$', r'/\1', dep) + path = os.path.join(path[2:], 'BUILD.gn') + errors_by_file[path][target].add(dep) + elif error[index + 1] == 'has a source file:': + deleted_file = '"' + os.path.basename(error[index + 2].strip()) + '",' + deleted_sources.add(deleted_file) + else: + print('\n'.join(error)) + continue - for path, missing_deps in errors_by_file.items(): - FixErrors(path, missing_deps, deleted_sources) + for path, missing_deps in list(errors_by_file.items()): + FixErrors(path, missing_deps, deleted_sources) - return 0 + return 0 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/gtest-parallel-wrapper.py b/tools_webrtc/gtest-parallel-wrapper.py index ae48af34de..3b07da4552 100755 --- a/tools_webrtc/gtest-parallel-wrapper.py +++ b/tools_webrtc/gtest-parallel-wrapper.py @@ -53,7 +53,7 @@ For example: Will be converted into: - python gtest-parallel \ + vpython3 gtest-parallel \ --shard_index 0 \ --shard_count 1 \ --output_dir=SOME_OUTPUT_DIR \ @@ -82,8 +82,8 @@ Args = collections.namedtuple( ['gtest_parallel_args', 'test_env', 'output_dir', 'test_artifacts_dir']) -def _CatFiles(file_list, output_file): - with open(output_file, 'w') as output_file: +def _CatFiles(file_list, output_file_destination): + with open(output_file_destination, 'w') as output_file: for filename in file_list: with open(filename) as input_file: output_file.write(input_file.read()) @@ -100,7 +100,7 @@ def _ParseWorkersOption(workers): return max(result, 1) # Sanitize when using e.g. '0.5x'. -class ReconstructibleArgumentGroup(object): +class ReconstructibleArgumentGroup: """An argument group that can be converted back into a command line. This acts like ArgumentParser.add_argument_group, but names of arguments added @@ -154,7 +154,7 @@ def ParseArgs(argv=None): parser.add_argument('--store-test-artifacts', action='store_true') # No-sandbox is a Chromium-specific flag, ignore it. - # TODO(oprypin): Remove (bugs.webrtc.org/8115) + # TODO(bugs.webrtc.org/8115): Remove workaround when fixed. parser.add_argument('--no-sandbox', action='store_true', help=argparse.SUPPRESS) @@ -171,7 +171,7 @@ def ParseArgs(argv=None): } args_to_pass = [] for arg in unrecognized_args: - if any(arg.startswith(k) for k in webrtc_flags_to_change.keys()): + if any(arg.startswith(k) for k in list(webrtc_flags_to_change.keys())): arg_split = arg.split('=') args_to_pass.append(webrtc_flags_to_change[arg_split[0]] + '=' + arg_split[1]) diff --git a/tools_webrtc/gtest_parallel_wrapper_test.py b/tools_webrtc/gtest_parallel_wrapper_test.py index 82cb75bc6a..df31755505 100755 --- a/tools_webrtc/gtest_parallel_wrapper_test.py +++ b/tools_webrtc/gtest_parallel_wrapper_test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. # @@ -21,152 +21,147 @@ gtest_parallel_wrapper = __import__('gtest-parallel-wrapper') @contextmanager def TemporaryDirectory(): - tmp_dir = tempfile.mkdtemp() - yield tmp_dir - os.rmdir(tmp_dir) + tmp_dir = tempfile.mkdtemp() + yield tmp_dir + os.rmdir(tmp_dir) class GtestParallelWrapperHelpersTest(unittest.TestCase): - def testGetWorkersAsIs(self): - # pylint: disable=protected-access - self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('12'), 12) + def testGetWorkersAsIs(self): + # pylint: disable=protected-access + self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('12'), 12) - def testGetTwiceWorkers(self): - expected = 2 * multiprocessing.cpu_count() - # pylint: disable=protected-access - self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('2x'), - expected) + def testGetTwiceWorkers(self): + expected = 2 * multiprocessing.cpu_count() + # pylint: disable=protected-access + self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('2x'), expected) - def testGetHalfWorkers(self): - expected = max(multiprocessing.cpu_count() // 2, 1) - # pylint: disable=protected-access - self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('0.5x'), - expected) + def testGetHalfWorkers(self): + expected = max(multiprocessing.cpu_count() // 2, 1) + # pylint: disable=protected-access + self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('0.5x'), + expected) class GtestParallelWrapperTest(unittest.TestCase): - @classmethod - def _Expected(cls, gtest_parallel_args): - return ['--shard_index=0', '--shard_count=1'] + gtest_parallel_args + @classmethod + def _Expected(cls, gtest_parallel_args): + return ['--shard_index=0', '--shard_count=1'] + gtest_parallel_args - def testOverwrite(self): - result = gtest_parallel_wrapper.ParseArgs( - ['--timeout=123', 'exec', '--timeout', '124']) - expected = self._Expected(['--timeout=124', 'exec']) - self.assertEqual(result.gtest_parallel_args, expected) + def testOverwrite(self): + result = gtest_parallel_wrapper.ParseArgs( + ['--timeout=123', 'exec', '--timeout', '124']) + expected = self._Expected(['--timeout=124', 'exec']) + self.assertEqual(result.gtest_parallel_args, expected) - def testMixing(self): - result = gtest_parallel_wrapper.ParseArgs([ - '--timeout=123', '--param1', 'exec', '--param2', '--timeout', '124' - ]) - expected = self._Expected( - ['--timeout=124', 'exec', '--', '--param1', '--param2']) - self.assertEqual(result.gtest_parallel_args, expected) + def testMixing(self): + result = gtest_parallel_wrapper.ParseArgs( + ['--timeout=123', '--param1', 'exec', '--param2', '--timeout', '124']) + expected = self._Expected( + ['--timeout=124', 'exec', '--', '--param1', '--param2']) + self.assertEqual(result.gtest_parallel_args, expected) - def testMixingPositional(self): - result = gtest_parallel_wrapper.ParseArgs([ - '--timeout=123', 'exec', '--foo1', 'bar1', '--timeout', '124', - '--foo2', 'bar2' - ]) - expected = self._Expected([ - '--timeout=124', 'exec', '--', '--foo1', 'bar1', '--foo2', 'bar2' - ]) - self.assertEqual(result.gtest_parallel_args, expected) + def testMixingPositional(self): + result = gtest_parallel_wrapper.ParseArgs([ + '--timeout=123', 'exec', '--foo1', 'bar1', '--timeout', '124', '--foo2', + 'bar2' + ]) + expected = self._Expected( + ['--timeout=124', 'exec', '--', '--foo1', 'bar1', '--foo2', 'bar2']) + self.assertEqual(result.gtest_parallel_args, expected) - def testDoubleDash1(self): - result = gtest_parallel_wrapper.ParseArgs( - ['--timeout', '123', 'exec', '--', '--timeout', '124']) - expected = self._Expected( - ['--timeout=123', 'exec', '--', '--timeout', '124']) - self.assertEqual(result.gtest_parallel_args, expected) + def testDoubleDash1(self): + result = gtest_parallel_wrapper.ParseArgs( + ['--timeout', '123', 'exec', '--', '--timeout', '124']) + expected = self._Expected( + ['--timeout=123', 'exec', '--', '--timeout', '124']) + self.assertEqual(result.gtest_parallel_args, expected) - def testDoubleDash2(self): - result = gtest_parallel_wrapper.ParseArgs( - ['--timeout=123', '--', 'exec', '--timeout=124']) - expected = self._Expected( - ['--timeout=123', 'exec', '--', '--timeout=124']) - self.assertEqual(result.gtest_parallel_args, expected) + def testDoubleDash2(self): + result = gtest_parallel_wrapper.ParseArgs( + ['--timeout=123', '--', 'exec', '--timeout=124']) + expected = self._Expected(['--timeout=123', 'exec', '--', '--timeout=124']) + self.assertEqual(result.gtest_parallel_args, expected) - def testArtifacts(self): - with TemporaryDirectory() as tmp_dir: - output_dir = os.path.join(tmp_dir, 'foo') - result = gtest_parallel_wrapper.ParseArgs( - ['exec', '--store-test-artifacts', '--output_dir', output_dir]) - exp_artifacts_dir = os.path.join(output_dir, 'test_artifacts') - exp = self._Expected([ - '--output_dir=' + output_dir, 'exec', '--', - '--test_artifacts_dir=' + exp_artifacts_dir - ]) - self.assertEqual(result.gtest_parallel_args, exp) - self.assertEqual(result.output_dir, output_dir) - self.assertEqual(result.test_artifacts_dir, exp_artifacts_dir) + def testArtifacts(self): + with TemporaryDirectory() as tmp_dir: + output_dir = os.path.join(tmp_dir, 'foo') + result = gtest_parallel_wrapper.ParseArgs( + ['exec', '--store-test-artifacts', '--output_dir', output_dir]) + exp_artifacts_dir = os.path.join(output_dir, 'test_artifacts') + exp = self._Expected([ + '--output_dir=' + output_dir, 'exec', '--', + '--test_artifacts_dir=' + exp_artifacts_dir + ]) + self.assertEqual(result.gtest_parallel_args, exp) + self.assertEqual(result.output_dir, output_dir) + self.assertEqual(result.test_artifacts_dir, exp_artifacts_dir) - def testNoDirsSpecified(self): - result = gtest_parallel_wrapper.ParseArgs(['exec']) - self.assertEqual(result.output_dir, None) - self.assertEqual(result.test_artifacts_dir, None) + def testNoDirsSpecified(self): + result = gtest_parallel_wrapper.ParseArgs(['exec']) + self.assertEqual(result.output_dir, None) + self.assertEqual(result.test_artifacts_dir, None) - def testOutputDirSpecified(self): - result = gtest_parallel_wrapper.ParseArgs( - ['exec', '--output_dir', '/tmp/foo']) - self.assertEqual(result.output_dir, '/tmp/foo') - self.assertEqual(result.test_artifacts_dir, None) + def testOutputDirSpecified(self): + result = gtest_parallel_wrapper.ParseArgs( + ['exec', '--output_dir', '/tmp/foo']) + self.assertEqual(result.output_dir, '/tmp/foo') + self.assertEqual(result.test_artifacts_dir, None) - def testShortArg(self): - result = gtest_parallel_wrapper.ParseArgs(['-d', '/tmp/foo', 'exec']) - expected = self._Expected(['--output_dir=/tmp/foo', 'exec']) - self.assertEqual(result.gtest_parallel_args, expected) - self.assertEqual(result.output_dir, '/tmp/foo') + def testShortArg(self): + result = gtest_parallel_wrapper.ParseArgs(['-d', '/tmp/foo', 'exec']) + expected = self._Expected(['--output_dir=/tmp/foo', 'exec']) + self.assertEqual(result.gtest_parallel_args, expected) + self.assertEqual(result.output_dir, '/tmp/foo') - def testBoolArg(self): - result = gtest_parallel_wrapper.ParseArgs( - ['--gtest_also_run_disabled_tests', 'exec']) - expected = self._Expected(['--gtest_also_run_disabled_tests', 'exec']) - self.assertEqual(result.gtest_parallel_args, expected) + def testBoolArg(self): + result = gtest_parallel_wrapper.ParseArgs( + ['--gtest_also_run_disabled_tests', 'exec']) + expected = self._Expected(['--gtest_also_run_disabled_tests', 'exec']) + self.assertEqual(result.gtest_parallel_args, expected) - def testNoArgs(self): - result = gtest_parallel_wrapper.ParseArgs(['exec']) - expected = self._Expected(['exec']) - self.assertEqual(result.gtest_parallel_args, expected) + def testNoArgs(self): + result = gtest_parallel_wrapper.ParseArgs(['exec']) + expected = self._Expected(['exec']) + self.assertEqual(result.gtest_parallel_args, expected) - def testDocExample(self): - with TemporaryDirectory() as tmp_dir: - output_dir = os.path.join(tmp_dir, 'foo') - result = gtest_parallel_wrapper.ParseArgs([ - 'some_test', '--some_flag=some_value', '--another_flag', - '--output_dir=' + output_dir, '--store-test-artifacts', - '--isolated-script-test-perf-output=SOME_OTHER_DIR', - '--foo=bar', '--baz' - ]) - expected_artifacts_dir = os.path.join(output_dir, 'test_artifacts') - expected = self._Expected([ - '--output_dir=' + output_dir, 'some_test', '--', - '--test_artifacts_dir=' + expected_artifacts_dir, - '--some_flag=some_value', '--another_flag', - '--isolated_script_test_perf_output=SOME_OTHER_DIR', - '--foo=bar', '--baz' - ]) - self.assertEqual(result.gtest_parallel_args, expected) + def testDocExample(self): + with TemporaryDirectory() as tmp_dir: + output_dir = os.path.join(tmp_dir, 'foo') + result = gtest_parallel_wrapper.ParseArgs([ + 'some_test', '--some_flag=some_value', '--another_flag', + '--output_dir=' + output_dir, '--store-test-artifacts', + '--isolated-script-test-perf-output=SOME_OTHER_DIR', '--foo=bar', + '--baz' + ]) + expected_artifacts_dir = os.path.join(output_dir, 'test_artifacts') + expected = self._Expected([ + '--output_dir=' + output_dir, 'some_test', '--', + '--test_artifacts_dir=' + expected_artifacts_dir, + '--some_flag=some_value', '--another_flag', + '--isolated_script_test_perf_output=SOME_OTHER_DIR', '--foo=bar', + '--baz' + ]) + self.assertEqual(result.gtest_parallel_args, expected) - def testStandardWorkers(self): - """Check integer value is passed as-is.""" - result = gtest_parallel_wrapper.ParseArgs(['--workers', '17', 'exec']) - expected = self._Expected(['--workers=17', 'exec']) - self.assertEqual(result.gtest_parallel_args, expected) + def testStandardWorkers(self): + """Check integer value is passed as-is.""" + result = gtest_parallel_wrapper.ParseArgs(['--workers', '17', 'exec']) + expected = self._Expected(['--workers=17', 'exec']) + self.assertEqual(result.gtest_parallel_args, expected) - def testTwoWorkersPerCpuCore(self): - result = gtest_parallel_wrapper.ParseArgs(['--workers', '2x', 'exec']) - workers = 2 * multiprocessing.cpu_count() - expected = self._Expected(['--workers=%s' % workers, 'exec']) - self.assertEqual(result.gtest_parallel_args, expected) + def testTwoWorkersPerCpuCore(self): + result = gtest_parallel_wrapper.ParseArgs(['--workers', '2x', 'exec']) + workers = 2 * multiprocessing.cpu_count() + expected = self._Expected(['--workers=%s' % workers, 'exec']) + self.assertEqual(result.gtest_parallel_args, expected) - def testUseHalfTheCpuCores(self): - result = gtest_parallel_wrapper.ParseArgs( - ['--workers', '0.5x', 'exec']) - workers = max(multiprocessing.cpu_count() // 2, 1) - expected = self._Expected(['--workers=%s' % workers, 'exec']) - self.assertEqual(result.gtest_parallel_args, expected) + def testUseHalfTheCpuCores(self): + result = gtest_parallel_wrapper.ParseArgs(['--workers', '0.5x', 'exec']) + workers = max(multiprocessing.cpu_count() // 2, 1) + expected = self._Expected(['--workers=%s' % workers, 'exec']) + self.assertEqual(result.gtest_parallel_args, expected) if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tools_webrtc/ios/build_ios_libs.py b/tools_webrtc/ios/build_ios_libs.py index f0b28262c4..15a912e4ba 100755 --- a/tools_webrtc/ios/build_ios_libs.py +++ b/tools_webrtc/ios/build_ios_libs.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. # @@ -51,303 +51,296 @@ from generate_licenses import LicenseBuilder def _ParseArgs(): - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument('--build_config', - default='release', - choices=['debug', 'release'], - help='The build config. Can be "debug" or "release". ' - 'Defaults to "release".') - parser.add_argument( - '--arch', - nargs='+', - default=DEFAULT_ARCHS, - choices=ENABLED_ARCHS, - help='Architectures to build. Defaults to %(default)s.') - parser.add_argument( - '-c', - '--clean', - action='store_true', - default=False, - help='Removes the previously generated build output, if any.') - parser.add_argument( - '-p', - '--purify', - action='store_true', - default=False, - help='Purifies the previously generated build output by ' - 'removing the temporary results used when (re)building.') - parser.add_argument( - '-o', - '--output-dir', - type=os.path.abspath, - default=SDK_OUTPUT_DIR, - help='Specifies a directory to output the build artifacts to. ' - 'If specified together with -c, deletes the dir.') - parser.add_argument( - '-r', - '--revision', - type=int, - default=0, - help='Specifies a revision number to embed if building the framework.') - parser.add_argument('-e', - '--bitcode', - action='store_true', - default=False, - help='Compile with bitcode.') - parser.add_argument('--verbose', - action='store_true', - default=False, - help='Debug logging.') - parser.add_argument('--use-goma', - action='store_true', - default=False, - help='Use goma to build.') - parser.add_argument( - '--extra-gn-args', - default=[], - nargs='*', - help='Additional GN args to be used during Ninja generation.') + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument('--build_config', + default='release', + choices=['debug', 'release'], + help='The build config. Can be "debug" or "release". ' + 'Defaults to "release".') + parser.add_argument('--arch', + nargs='+', + default=DEFAULT_ARCHS, + choices=ENABLED_ARCHS, + help='Architectures to build. Defaults to %(default)s.') + parser.add_argument( + '-c', + '--clean', + action='store_true', + default=False, + help='Removes the previously generated build output, if any.') + parser.add_argument('-p', + '--purify', + action='store_true', + default=False, + help='Purifies the previously generated build output by ' + 'removing the temporary results used when (re)building.') + parser.add_argument( + '-o', + '--output-dir', + type=os.path.abspath, + default=SDK_OUTPUT_DIR, + help='Specifies a directory to output the build artifacts to. ' + 'If specified together with -c, deletes the dir.') + parser.add_argument( + '-r', + '--revision', + type=int, + default=0, + help='Specifies a revision number to embed if building the framework.') + parser.add_argument('-e', + '--bitcode', + action='store_true', + default=False, + help='Compile with bitcode.') + parser.add_argument('--verbose', + action='store_true', + default=False, + help='Debug logging.') + parser.add_argument('--use-goma', + action='store_true', + default=False, + help='Use goma to build.') + parser.add_argument( + '--extra-gn-args', + default=[], + nargs='*', + help='Additional GN args to be used during Ninja generation.') - return parser.parse_args() + return parser.parse_args() def _RunCommand(cmd): - logging.debug('Running: %r', cmd) - subprocess.check_call(cmd, cwd=SRC_DIR) + logging.debug('Running: %r', cmd) + subprocess.check_call(cmd, cwd=SRC_DIR) def _CleanArtifacts(output_dir): - if os.path.isdir(output_dir): - logging.info('Deleting %s', output_dir) - shutil.rmtree(output_dir) + if os.path.isdir(output_dir): + logging.info('Deleting %s', output_dir) + shutil.rmtree(output_dir) def _CleanTemporary(output_dir, architectures): - if os.path.isdir(output_dir): - logging.info('Removing temporary build files.') - for arch in architectures: - arch_lib_path = os.path.join(output_dir, arch) - if os.path.isdir(arch_lib_path): - shutil.rmtree(arch_lib_path) + if os.path.isdir(output_dir): + logging.info('Removing temporary build files.') + for arch in architectures: + arch_lib_path = os.path.join(output_dir, arch) + if os.path.isdir(arch_lib_path): + shutil.rmtree(arch_lib_path) def _ParseArchitecture(architectures): - result = dict() - for arch in architectures: - if ":" in arch: - target_environment, target_cpu = arch.split(":") - else: - logging.warning('The environment for build is not specified.') - logging.warning('It is assumed based on cpu type.') - logging.warning('See crbug.com/1138425 for more details.') - if arch == "x64": - target_environment = "simulator" - else: - target_environment = "device" - target_cpu = arch - archs = result.get(target_environment) - if archs is None: - result[target_environment] = {target_cpu} - else: - archs.add(target_cpu) + result = dict() + for arch in architectures: + if ":" in arch: + target_environment, target_cpu = arch.split(":") + else: + logging.warning('The environment for build is not specified.') + logging.warning('It is assumed based on cpu type.') + logging.warning('See crbug.com/1138425 for more details.') + if arch == "x64": + target_environment = "simulator" + else: + target_environment = "device" + target_cpu = arch + archs = result.get(target_environment) + if archs is None: + result[target_environment] = {target_cpu} + else: + archs.add(target_cpu) - return result + return result def BuildWebRTC(output_dir, target_environment, target_arch, flavor, gn_target_name, ios_deployment_target, libvpx_build_vp9, use_bitcode, use_goma, extra_gn_args): - gn_args = [ - 'target_os="ios"', 'ios_enable_code_signing=false', - 'is_component_build=false', 'rtc_include_tests=false', - ] + gn_args = [ + 'target_os="ios"', + 'ios_enable_code_signing=false', + 'is_component_build=false', + 'rtc_include_tests=false', + ] - # Add flavor option. - if flavor == 'debug': - gn_args.append('is_debug=true') - elif flavor == 'release': - gn_args.append('is_debug=false') - else: - raise ValueError('Unexpected flavor type: %s' % flavor) + # Add flavor option. + if flavor == 'debug': + gn_args.append('is_debug=true') + elif flavor == 'release': + gn_args.append('is_debug=false') + else: + raise ValueError('Unexpected flavor type: %s' % flavor) - gn_args.append('target_environment="%s"' % target_environment) + gn_args.append('target_environment="%s"' % target_environment) - gn_args.append('target_cpu="%s"' % target_arch) + gn_args.append('target_cpu="%s"' % target_arch) - gn_args.append('ios_deployment_target="%s"' % ios_deployment_target) + gn_args.append('ios_deployment_target="%s"' % ios_deployment_target) - gn_args.append('rtc_libvpx_build_vp9=' + - ('true' if libvpx_build_vp9 else 'false')) + gn_args.append('rtc_libvpx_build_vp9=' + + ('true' if libvpx_build_vp9 else 'false')) - gn_args.append('enable_ios_bitcode=' + - ('true' if use_bitcode else 'false')) - gn_args.append('use_goma=' + ('true' if use_goma else 'false')) - gn_args.append('rtc_enable_objc_symbol_export=true') + gn_args.append('enable_ios_bitcode=' + ('true' if use_bitcode else 'false')) + gn_args.append('use_goma=' + ('true' if use_goma else 'false')) + gn_args.append('rtc_enable_objc_symbol_export=true') - args_string = ' '.join(gn_args + extra_gn_args) - logging.info('Building WebRTC with args: %s', args_string) + args_string = ' '.join(gn_args + extra_gn_args) + logging.info('Building WebRTC with args: %s', args_string) - cmd = [ - sys.executable, - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py'), - 'gen', - output_dir, - '--args=' + args_string, - ] - _RunCommand(cmd) - logging.info('Building target: %s', gn_target_name) + cmd = [ + sys.executable, + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py'), + 'gen', + output_dir, + '--args=' + args_string, + ] + _RunCommand(cmd) + logging.info('Building target: %s', gn_target_name) - cmd = [ - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja'), - '-C', - output_dir, - gn_target_name, - ] - if use_goma: - cmd.extend(['-j', '200']) - _RunCommand(cmd) + cmd = [ + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja'), + '-C', + output_dir, + gn_target_name, + ] + if use_goma: + cmd.extend(['-j', '200']) + _RunCommand(cmd) def main(): - args = _ParseArgs() + args = _ParseArgs() - logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) + logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) - if args.clean: - _CleanArtifacts(args.output_dir) - return 0 + if args.clean: + _CleanArtifacts(args.output_dir) + return 0 - # architectures is typed as Dict[str, Set[str]], - # where key is for the environment (device or simulator) - # and value is for the cpu type. - architectures = _ParseArchitecture(args.arch) - gn_args = args.extra_gn_args + # architectures is typed as Dict[str, Set[str]], + # where key is for the environment (device or simulator) + # and value is for the cpu type. + architectures = _ParseArchitecture(args.arch) + gn_args = args.extra_gn_args - if args.purify: - _CleanTemporary(args.output_dir, architectures.keys()) - return 0 + if args.purify: + _CleanTemporary(args.output_dir, list(architectures.keys())) + return 0 - gn_target_name = 'framework_objc' - if not args.bitcode: - gn_args.append('enable_dsyms=true') - gn_args.append('enable_stripping=true') + gn_target_name = 'framework_objc' + if not args.bitcode: + gn_args.append('enable_dsyms=true') + gn_args.append('enable_stripping=true') - # Build all architectures. - framework_paths = [] - all_lib_paths = [] - for (environment, archs) in architectures.items(): - framework_path = os.path.join(args.output_dir, environment) - framework_paths.append(framework_path) - lib_paths = [] - for arch in archs: - lib_path = os.path.join(framework_path, arch + '_libs') - lib_paths.append(lib_path) - BuildWebRTC(lib_path, environment, arch, args.build_config, - gn_target_name, IOS_DEPLOYMENT_TARGET[environment], - LIBVPX_BUILD_VP9, args.bitcode, args.use_goma, gn_args) - all_lib_paths.extend(lib_paths) - - # Combine the slices. - dylib_path = os.path.join(SDK_FRAMEWORK_NAME, 'WebRTC') - # Dylibs will be combined, all other files are the same across archs. - shutil.rmtree( - os.path.join(framework_path, SDK_FRAMEWORK_NAME), - ignore_errors=True) - shutil.copytree( - os.path.join(lib_paths[0], SDK_FRAMEWORK_NAME), - os.path.join(framework_path, SDK_FRAMEWORK_NAME), - symlinks=True) - logging.info('Merging framework slices for %s.', environment) - dylib_paths = [os.path.join(path, dylib_path) for path in lib_paths] - out_dylib_path = os.path.join(framework_path, dylib_path) - if os.path.islink(out_dylib_path): - out_dylib_path = os.path.join(os.path.dirname(out_dylib_path), - os.readlink(out_dylib_path)) - try: - os.remove(out_dylib_path) - except OSError: - pass - cmd = ['lipo'] + dylib_paths + ['-create', '-output', out_dylib_path] - _RunCommand(cmd) - - # Merge the dSYM slices. - lib_dsym_dir_path = os.path.join(lib_paths[0], SDK_DSYM_NAME) - if os.path.isdir(lib_dsym_dir_path): - shutil.rmtree( - os.path.join(framework_path, SDK_DSYM_NAME), - ignore_errors=True) - shutil.copytree( - lib_dsym_dir_path, os.path.join(framework_path, SDK_DSYM_NAME)) - logging.info('Merging dSYM slices.') - dsym_path = os.path.join(SDK_DSYM_NAME, 'Contents', 'Resources', - 'DWARF', 'WebRTC') - lib_dsym_paths = [ - os.path.join(path, dsym_path) for path in lib_paths - ] - out_dsym_path = os.path.join(framework_path, dsym_path) - try: - os.remove(out_dsym_path) - except OSError: - pass - cmd = ['lipo' - ] + lib_dsym_paths + ['-create', '-output', out_dsym_path] - _RunCommand(cmd) - - # Check for Mac-style WebRTC.framework/Resources/ (for Catalyst)... - resources_dir = os.path.join(framework_path, SDK_FRAMEWORK_NAME, - 'Resources') - if not os.path.exists(resources_dir): - # ...then fall back to iOS-style WebRTC.framework/ - resources_dir = os.path.dirname(resources_dir) - - # Modify the version number. - # Format should be ... - # e.g. 55.0.14986 means - # branch cut 55, no hotfixes, and revision 14986. - infoplist_path = os.path.join(resources_dir, 'Info.plist') - cmd = [ - 'PlistBuddy', '-c', 'Print :CFBundleShortVersionString', - infoplist_path - ] - major_minor = subprocess.check_output(cmd).decode('utf-8').strip() - version_number = '%s.%s' % (major_minor, args.revision) - logging.info('Substituting revision number: %s', version_number) - cmd = [ - 'PlistBuddy', '-c', 'Set :CFBundleVersion ' + version_number, - infoplist_path - ] - _RunCommand(cmd) - _RunCommand(['plutil', '-convert', 'binary1', infoplist_path]) - - xcframework_dir = os.path.join(args.output_dir, SDK_XCFRAMEWORK_NAME) - if os.path.isdir(xcframework_dir): - shutil.rmtree(xcframework_dir) - - logging.info('Creating xcframework.') - cmd = ['xcodebuild', '-create-xcframework', '-output', xcframework_dir] - - # Apparently, xcodebuild needs absolute paths for input arguments - for framework_path in framework_paths: - cmd += [ - '-framework', - os.path.abspath(os.path.join(framework_path, SDK_FRAMEWORK_NAME)), - ] - dsym_full_path = os.path.join(framework_path, SDK_DSYM_NAME) - if os.path.exists(dsym_full_path): - cmd += ['-debug-symbols', os.path.abspath(dsym_full_path)] + # Build all architectures. + framework_paths = [] + all_lib_paths = [] + for (environment, archs) in list(architectures.items()): + framework_path = os.path.join(args.output_dir, environment) + framework_paths.append(framework_path) + lib_paths = [] + for arch in archs: + lib_path = os.path.join(framework_path, arch + '_libs') + lib_paths.append(lib_path) + BuildWebRTC(lib_path, environment, arch, args.build_config, + gn_target_name, IOS_DEPLOYMENT_TARGET[environment], + LIBVPX_BUILD_VP9, args.bitcode, args.use_goma, gn_args) + all_lib_paths.extend(lib_paths) + # Combine the slices. + dylib_path = os.path.join(SDK_FRAMEWORK_NAME, 'WebRTC') + # Dylibs will be combined, all other files are the same across archs. + shutil.rmtree(os.path.join(framework_path, SDK_FRAMEWORK_NAME), + ignore_errors=True) + shutil.copytree(os.path.join(lib_paths[0], SDK_FRAMEWORK_NAME), + os.path.join(framework_path, SDK_FRAMEWORK_NAME), + symlinks=True) + logging.info('Merging framework slices for %s.', environment) + dylib_paths = [os.path.join(path, dylib_path) for path in lib_paths] + out_dylib_path = os.path.join(framework_path, dylib_path) + if os.path.islink(out_dylib_path): + out_dylib_path = os.path.join(os.path.dirname(out_dylib_path), + os.readlink(out_dylib_path)) + try: + os.remove(out_dylib_path) + except OSError: + pass + cmd = ['lipo'] + dylib_paths + ['-create', '-output', out_dylib_path] _RunCommand(cmd) - # Generate the license file. - logging.info('Generate license file.') - gn_target_full_name = '//sdk:' + gn_target_name - builder = LicenseBuilder(all_lib_paths, [gn_target_full_name]) - builder.GenerateLicenseText( - os.path.join(args.output_dir, SDK_XCFRAMEWORK_NAME)) + # Merge the dSYM slices. + lib_dsym_dir_path = os.path.join(lib_paths[0], SDK_DSYM_NAME) + if os.path.isdir(lib_dsym_dir_path): + shutil.rmtree(os.path.join(framework_path, SDK_DSYM_NAME), + ignore_errors=True) + shutil.copytree(lib_dsym_dir_path, + os.path.join(framework_path, SDK_DSYM_NAME)) + logging.info('Merging dSYM slices.') + dsym_path = os.path.join(SDK_DSYM_NAME, 'Contents', 'Resources', 'DWARF', + 'WebRTC') + lib_dsym_paths = [os.path.join(path, dsym_path) for path in lib_paths] + out_dsym_path = os.path.join(framework_path, dsym_path) + try: + os.remove(out_dsym_path) + except OSError: + pass + cmd = ['lipo'] + lib_dsym_paths + ['-create', '-output', out_dsym_path] + _RunCommand(cmd) - logging.info('Done.') - return 0 + # Check for Mac-style WebRTC.framework/Resources/ (for Catalyst)... + resources_dir = os.path.join(framework_path, SDK_FRAMEWORK_NAME, + 'Resources') + if not os.path.exists(resources_dir): + # ...then fall back to iOS-style WebRTC.framework/ + resources_dir = os.path.dirname(resources_dir) + + # Modify the version number. + # Format should be ... + # e.g. 55.0.14986 means + # branch cut 55, no hotfixes, and revision 14986. + infoplist_path = os.path.join(resources_dir, 'Info.plist') + cmd = [ + 'PlistBuddy', '-c', 'Print :CFBundleShortVersionString', + infoplist_path + ] + major_minor = subprocess.check_output(cmd).decode('utf-8').strip() + version_number = '%s.%s' % (major_minor, args.revision) + logging.info('Substituting revision number: %s', version_number) + cmd = [ + 'PlistBuddy', '-c', 'Set :CFBundleVersion ' + version_number, + infoplist_path + ] + _RunCommand(cmd) + _RunCommand(['plutil', '-convert', 'binary1', infoplist_path]) + + xcframework_dir = os.path.join(args.output_dir, SDK_XCFRAMEWORK_NAME) + if os.path.isdir(xcframework_dir): + shutil.rmtree(xcframework_dir) + + logging.info('Creating xcframework.') + cmd = ['xcodebuild', '-create-xcframework', '-output', xcframework_dir] + + # Apparently, xcodebuild needs absolute paths for input arguments + for framework_path in framework_paths: + cmd += [ + '-framework', + os.path.abspath(os.path.join(framework_path, SDK_FRAMEWORK_NAME)), + ] + dsym_full_path = os.path.join(framework_path, SDK_DSYM_NAME) + if os.path.exists(dsym_full_path): + cmd += ['-debug-symbols', os.path.abspath(dsym_full_path)] + + _RunCommand(cmd) + + # Generate the license file. + logging.info('Generate license file.') + gn_target_full_name = '//sdk:' + gn_target_name + builder = LicenseBuilder(all_lib_paths, [gn_target_full_name]) + builder.GenerateLicenseText( + os.path.join(args.output_dir, SDK_XCFRAMEWORK_NAME)) + + logging.info('Done.') + return 0 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/ios/generate_modulemap.py b/tools_webrtc/ios/generate_modulemap.py index 4609385c38..1b61b8e3d1 100644 --- a/tools_webrtc/ios/generate_modulemap.py +++ b/tools_webrtc/ios/generate_modulemap.py @@ -1,3 +1,5 @@ +#!/usr/bin/env vpython3 + # Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -11,22 +13,22 @@ import sys def GenerateModulemap(): - parser = argparse.ArgumentParser(description='Generate modulemap') - parser.add_argument("-o", "--out", type=str, help="Output file.") - parser.add_argument("-n", "--name", type=str, help="Name of binary.") + parser = argparse.ArgumentParser(description='Generate modulemap') + parser.add_argument("-o", "--out", type=str, help="Output file.") + parser.add_argument("-n", "--name", type=str, help="Name of binary.") - args = parser.parse_args() + args = parser.parse_args() - with open(args.out, "w") as outfile: - module_template = 'framework module %s {\n' \ - ' umbrella header "%s.h"\n' \ - '\n' \ - ' export *\n' \ - ' module * { export * }\n' \ - '}\n' % (args.name, args.name) - outfile.write(module_template) - return 0 + with open(args.out, "w") as outfile: + module_template = 'framework module %s {\n' \ + ' umbrella header "%s.h"\n' \ + '\n' \ + ' export *\n' \ + ' module * { export * }\n' \ + '}\n' % (args.name, args.name) + outfile.write(module_template) + return 0 if __name__ == '__main__': - sys.exit(GenerateModulemap()) + sys.exit(GenerateModulemap()) diff --git a/tools_webrtc/ios/generate_umbrella_header.py b/tools_webrtc/ios/generate_umbrella_header.py index 4c700a1c31..1fd1eed38e 100644 --- a/tools_webrtc/ios/generate_umbrella_header.py +++ b/tools_webrtc/ios/generate_umbrella_header.py @@ -1,3 +1,5 @@ +#!/usr/bin/env vpython3 + # Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -14,20 +16,20 @@ import textwrap def GenerateUmbrellaHeader(): - parser = argparse.ArgumentParser(description='Generate umbrella header') - parser.add_argument("-o", "--out", type=str, help="Output file.") - parser.add_argument("-s", - "--sources", - default=[], - type=str, - nargs='+', - help="Headers to include.") + parser = argparse.ArgumentParser(description='Generate umbrella header') + parser.add_argument("-o", "--out", type=str, help="Output file.") + parser.add_argument("-s", + "--sources", + default=[], + type=str, + nargs='+', + help="Headers to include.") - args = parser.parse_args() + args = parser.parse_args() - with open(args.out, "w") as outfile: - outfile.write( - textwrap.dedent("""\ + with open(args.out, "w") as outfile: + outfile.write( + textwrap.dedent("""\ /* * Copyright %d The WebRTC project authors. All Rights Reserved. * @@ -38,11 +40,11 @@ def GenerateUmbrellaHeader(): * be found in the AUTHORS file in the root of the source tree. */\n\n""" % datetime.datetime.now().year)) - for s in args.sources: - outfile.write("#import \n".format(os.path.basename(s))) + for s in args.sources: + outfile.write("#import \n".format(os.path.basename(s))) - return 0 + return 0 if __name__ == '__main__': - sys.exit(GenerateUmbrellaHeader()) + sys.exit(GenerateUmbrellaHeader()) diff --git a/tools_webrtc/ios/merge_ios_libs.py b/tools_webrtc/ios/merge_ios_libs.py index 31ffc1ddd5..111825155e 100755 --- a/tools_webrtc/ios/merge_ios_libs.py +++ b/tools_webrtc/ios/merge_ios_libs.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env vpython3 # Copyright 2016 The WebRTC project authors. All Rights Reserved. # @@ -10,18 +10,19 @@ """Script for merging generated iOS libraries.""" import sys - import argparse import os import re import subprocess +from six.moves import range + # Valid arch subdir names. VALID_ARCHS = ['arm_libs', 'arm64_libs', 'ia32_libs', 'x64_libs'] def MergeLibs(lib_base_dir): - """Merges generated iOS libraries for different archs. + """Merges generated iOS libraries for different archs. Uses libtool to generate FAT archive files for each generated library. @@ -32,96 +33,94 @@ def MergeLibs(lib_base_dir): Returns: Exit code of libtool. """ - output_dir_name = 'fat_libs' - archs = [arch for arch in os.listdir(lib_base_dir) if arch in VALID_ARCHS] - # For each arch, find (library name, libary path) for arch. We will merge - # all libraries with the same name. - libs = {} - for lib_dir in [os.path.join(lib_base_dir, arch) for arch in VALID_ARCHS]: - if not os.path.exists(lib_dir): - continue - for dirpath, _, filenames in os.walk(lib_dir): - for filename in filenames: - if not filename.endswith('.a'): - continue - entry = libs.get(filename, []) - entry.append(os.path.join(dirpath, filename)) - libs[filename] = entry - orphaned_libs = {} - valid_libs = {} - for library, paths in libs.items(): - if len(paths) < len(archs): - orphaned_libs[library] = paths - else: - valid_libs[library] = paths - for library, paths in orphaned_libs.items(): - components = library[:-2].split('_')[:-1] - found = False - # Find directly matching parent libs by stripping suffix. - while components and not found: - parent_library = '_'.join(components) + '.a' - if parent_library in valid_libs: - valid_libs[parent_library].extend(paths) - found = True - break - components = components[:-1] - # Find next best match by finding parent libs with the same prefix. - if not found: - base_prefix = library[:-2].split('_')[0] - for valid_lib, valid_paths in valid_libs.items(): - if valid_lib[:len(base_prefix)] == base_prefix: - valid_paths.extend(paths) - found = True - break - assert found + output_dir_name = 'fat_libs' + archs = [arch for arch in os.listdir(lib_base_dir) if arch in VALID_ARCHS] + # For each arch, find (library name, libary path) for arch. We will merge + # all libraries with the same name. + libs = {} + for lib_dir in [os.path.join(lib_base_dir, arch) for arch in VALID_ARCHS]: + if not os.path.exists(lib_dir): + continue + for dirpath, _, filenames in os.walk(lib_dir): + for filename in filenames: + if not filename.endswith('.a'): + continue + entry = libs.get(filename, []) + entry.append(os.path.join(dirpath, filename)) + libs[filename] = entry + orphaned_libs = {} + valid_libs = {} + for library, paths in list(libs.items()): + if len(paths) < len(archs): + orphaned_libs[library] = paths + else: + valid_libs[library] = paths + for library, paths in list(orphaned_libs.items()): + components = library[:-2].split('_')[:-1] + found = False + # Find directly matching parent libs by stripping suffix. + while components and not found: + parent_library = '_'.join(components) + '.a' + if parent_library in valid_libs: + valid_libs[parent_library].extend(paths) + found = True + break + components = components[:-1] + # Find next best match by finding parent libs with the same prefix. + if not found: + base_prefix = library[:-2].split('_')[0] + for valid_lib, valid_paths in list(valid_libs.items()): + if valid_lib[:len(base_prefix)] == base_prefix: + valid_paths.extend(paths) + found = True + break + assert found - # Create output directory. - output_dir_path = os.path.join(lib_base_dir, output_dir_name) - if not os.path.exists(output_dir_path): - os.mkdir(output_dir_path) + # Create output directory. + output_dir_path = os.path.join(lib_base_dir, output_dir_name) + if not os.path.exists(output_dir_path): + os.mkdir(output_dir_path) - # Use this so libtool merged binaries are always the same. - env = os.environ.copy() - env['ZERO_AR_DATE'] = '1' + # Use this so libtool merged binaries are always the same. + env = os.environ.copy() + env['ZERO_AR_DATE'] = '1' - # Ignore certain errors. - libtool_re = re.compile(r'^.*libtool:.*file: .* has no symbols$') + # Ignore certain errors. + libtool_re = re.compile(r'^.*libtool:.*file: .* has no symbols$') - # Merge libraries using libtool. - libtool_returncode = 0 - for library, paths in valid_libs.items(): - cmd_list = [ - 'libtool', '-static', '-v', '-o', - os.path.join(output_dir_path, library) - ] + paths - libtoolout = subprocess.Popen(cmd_list, - stderr=subprocess.PIPE, - env=env) - _, err = libtoolout.communicate() - for line in err.splitlines(): - if not libtool_re.match(line): - print >> sys.stderr, line - # Unconditionally touch the output .a file on the command line if present - # and the command succeeded. A bit hacky. - libtool_returncode = libtoolout.returncode - if not libtool_returncode: - for i in range(len(cmd_list) - 1): - if cmd_list[i] == '-o' and cmd_list[i + 1].endswith('.a'): - os.utime(cmd_list[i + 1], None) - break - return libtool_returncode + # Merge libraries using libtool. + libtool_returncode = 0 + for library, paths in list(valid_libs.items()): + cmd_list = [ + 'libtool', '-static', '-v', '-o', + os.path.join(output_dir_path, library) + ] + paths + libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env) + _, err = libtoolout.communicate() + for line in err.splitlines(): + if not libtool_re.match(line): + print(line, file=sys.stderr) + # Unconditionally touch the output .a file on the command line if present + # and the command succeeded. A bit hacky. + libtool_returncode = libtoolout.returncode + if not libtool_returncode: + for i in range(len(cmd_list) - 1): + if cmd_list[i] == '-o' and cmd_list[i + 1].endswith('.a'): + os.utime(cmd_list[i + 1], None) + break + return libtool_returncode -def Main(): - parser_description = 'Merge WebRTC libraries.' - parser = argparse.ArgumentParser(description=parser_description) - parser.add_argument('lib_base_dir', - help='Directory with built libraries. ', - type=str) - args = parser.parse_args() - lib_base_dir = args.lib_base_dir - MergeLibs(lib_base_dir) +def main(): + parser_description = 'Merge WebRTC libraries.' + parser = argparse.ArgumentParser(description=parser_description) + parser.add_argument('lib_base_dir', + help='Directory with built libraries. ', + type=str) + args = parser.parse_args() + lib_base_dir = args.lib_base_dir + MergeLibs(lib_base_dir) if __name__ == '__main__': - sys.exit(Main()) + sys.exit(main()) diff --git a/tools_webrtc/libs/generate_licenses.py b/tools_webrtc/libs/generate_licenses.py index 86b4cd01f0..1c2db06313 100755 --- a/tools_webrtc/libs/generate_licenses.py +++ b/tools_webrtc/libs/generate_licenses.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/env vpython3 # Copyright 2016 The WebRTC project authors. All Rights Reserved. # @@ -13,7 +13,8 @@ Licenses are taken from dependent libraries which are determined by GN desc command `gn desc` on all targets specified via `--target` argument. One can see all dependencies by invoking this command: -$ gn.py desc --all --format=json | python -m json.tool +$ gn.py desc --all --format=json | \ + vpython3 -m json.tool (see "deps" subarray) Libraries are mapped to licenses via LIB_TO_LICENSES_DICT dictionary. @@ -21,18 +22,13 @@ Libraries are mapped to licenses via LIB_TO_LICENSES_DICT dictionary. """ import sys - import argparse import json import logging import os import re import subprocess -try: - # python 3.2+ - from html import escape -except ImportError: - from cgi import escape +from html import escape # Third_party library to licences mapping. Keys are names of the libraries # (right after the `third_party/` prefix) @@ -107,11 +103,11 @@ LIB_REGEX_TO_LICENSES_DICT = { def FindSrcDirPath(): - """Returns the abs path to the src/ dir of the project.""" - src_dir = os.path.dirname(os.path.abspath(__file__)) - while os.path.basename(src_dir) != 'src': - src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) - return src_dir + """Returns the abs path to the src/ dir of the project.""" + src_dir = os.path.dirname(os.path.abspath(__file__)) + while os.path.basename(src_dir) != 'src': + src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) + return src_dir SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0])) @@ -124,29 +120,29 @@ THIRD_PARTY_LIB_SIMPLE_NAME_REGEX = r'^.*/third_party/([\w\-+]+).*$' THIRD_PARTY_LIB_REGEX_TEMPLATE = r'^.*/third_party/%s$' -class LicenseBuilder(object): - def __init__(self, - buildfile_dirs, - targets, - lib_to_licenses_dict=None, - lib_regex_to_licenses_dict=None): - if lib_to_licenses_dict is None: - lib_to_licenses_dict = LIB_TO_LICENSES_DICT +class LicenseBuilder: + def __init__(self, + buildfile_dirs, + targets, + lib_to_licenses_dict=None, + lib_regex_to_licenses_dict=None): + if lib_to_licenses_dict is None: + lib_to_licenses_dict = LIB_TO_LICENSES_DICT - if lib_regex_to_licenses_dict is None: - lib_regex_to_licenses_dict = LIB_REGEX_TO_LICENSES_DICT + if lib_regex_to_licenses_dict is None: + lib_regex_to_licenses_dict = LIB_REGEX_TO_LICENSES_DICT - self.buildfile_dirs = buildfile_dirs - self.targets = targets - self.lib_to_licenses_dict = lib_to_licenses_dict - self.lib_regex_to_licenses_dict = lib_regex_to_licenses_dict + self.buildfile_dirs = buildfile_dirs + self.targets = targets + self.lib_to_licenses_dict = lib_to_licenses_dict + self.lib_regex_to_licenses_dict = lib_regex_to_licenses_dict - self.common_licenses_dict = self.lib_to_licenses_dict.copy() - self.common_licenses_dict.update(self.lib_regex_to_licenses_dict) + self.common_licenses_dict = self.lib_to_licenses_dict.copy() + self.common_licenses_dict.update(self.lib_regex_to_licenses_dict) - @staticmethod - def _ParseLibraryName(dep): - """Returns library name after third_party + @staticmethod + def _ParseLibraryName(dep): + """Returns library name after third_party Input one of: //a/b/third_party/libname:c @@ -155,11 +151,11 @@ class LicenseBuilder(object): Outputs libname or None if this is not a third_party dependency. """ - groups = re.match(THIRD_PARTY_LIB_SIMPLE_NAME_REGEX, dep) - return groups.group(1) if groups else None + groups = re.match(THIRD_PARTY_LIB_SIMPLE_NAME_REGEX, dep) + return groups.group(1) if groups else None - def _ParseLibrary(self, dep): - """Returns library simple or regex name that matches `dep` after third_party + def _ParseLibrary(self, dep): + """Returns library simple or regex name that matches `dep` after third_party This method matches `dep` dependency against simple names in LIB_TO_LICENSES_DICT and regular expression names in @@ -167,109 +163,104 @@ class LicenseBuilder(object): Outputs matched dict key or None if this is not a third_party dependency. """ - libname = LicenseBuilder._ParseLibraryName(dep) + libname = LicenseBuilder._ParseLibraryName(dep) - for lib_regex in self.lib_regex_to_licenses_dict: - if re.match(THIRD_PARTY_LIB_REGEX_TEMPLATE % lib_regex, dep): - return lib_regex + for lib_regex in self.lib_regex_to_licenses_dict: + if re.match(THIRD_PARTY_LIB_REGEX_TEMPLATE % lib_regex, dep): + return lib_regex - return libname + return libname - @staticmethod - def _RunGN(buildfile_dir, target): - cmd = [ - sys.executable, - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py'), - 'desc', - '--all', - '--format=json', - os.path.abspath(buildfile_dir), - target, - ] - logging.debug('Running: %r', cmd) - output_json = subprocess.check_output(cmd, cwd=WEBRTC_ROOT).decode('UTF-8') - logging.debug('Output: %s', output_json) - return output_json + @staticmethod + def _RunGN(buildfile_dir, target): + cmd = [ + sys.executable, + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py'), + 'desc', + '--all', + '--format=json', + os.path.abspath(buildfile_dir), + target, + ] + logging.debug('Running: %r', cmd) + output_json = subprocess.check_output(cmd, cwd=WEBRTC_ROOT).decode('UTF-8') + logging.debug('Output: %s', output_json) + return output_json - def _GetThirdPartyLibraries(self, buildfile_dir, target): - output = json.loads(LicenseBuilder._RunGN(buildfile_dir, target)) - libraries = set() - for described_target in output.values(): - third_party_libs = (self._ParseLibrary(dep) - for dep in described_target['deps']) - libraries |= set(lib for lib in third_party_libs if lib) - return libraries + def _GetThirdPartyLibraries(self, buildfile_dir, target): + output = json.loads(LicenseBuilder._RunGN(buildfile_dir, target)) + libraries = set() + for described_target in list(output.values()): + third_party_libs = (self._ParseLibrary(dep) + for dep in described_target['deps']) + libraries |= set(lib for lib in third_party_libs if lib) + return libraries - def GenerateLicenseText(self, output_dir): - # Get a list of third_party libs from gn. For fat libraries we must consider - # all architectures, hence the multiple buildfile directories. - third_party_libs = set() - for buildfile in self.buildfile_dirs: - for target in self.targets: - third_party_libs |= self._GetThirdPartyLibraries( - buildfile, target) - assert len(third_party_libs) > 0 + def GenerateLicenseText(self, output_dir): + # Get a list of third_party libs from gn. For fat libraries we must consider + # all architectures, hence the multiple buildfile directories. + third_party_libs = set() + for buildfile in self.buildfile_dirs: + for target in self.targets: + third_party_libs |= self._GetThirdPartyLibraries(buildfile, target) + assert len(third_party_libs) > 0 - missing_licenses = third_party_libs - set( - self.common_licenses_dict.keys()) - if missing_licenses: - error_msg = 'Missing licenses for following third_party targets: %s' % \ - ', '.join(sorted(missing_licenses)) - logging.error(error_msg) - raise Exception(error_msg) + missing_licenses = third_party_libs - set(self.common_licenses_dict.keys()) + if missing_licenses: + error_msg = 'Missing licenses for following third_party targets: %s' % \ + ', '.join(sorted(missing_licenses)) + logging.error(error_msg) + raise Exception(error_msg) - # Put webrtc at the front of the list. - license_libs = sorted(third_party_libs) - license_libs.insert(0, 'webrtc') + # Put webrtc at the front of the list. + license_libs = sorted(third_party_libs) + license_libs.insert(0, 'webrtc') - logging.info('List of licenses: %s', ', '.join(license_libs)) + logging.info('List of licenses: %s', ', '.join(license_libs)) - # Generate markdown. - output_license_file = open(os.path.join(output_dir, 'LICENSE.md'), - 'w+') - for license_lib in license_libs: - if len(self.common_licenses_dict[license_lib]) == 0: - logging.info( - 'Skipping compile time or internal dependency: %s', - license_lib) - continue # Compile time dependency + # Generate markdown. + output_license_file = open(os.path.join(output_dir, 'LICENSE.md'), 'w+') + for license_lib in license_libs: + if len(self.common_licenses_dict[license_lib]) == 0: + logging.info('Skipping compile time or internal dependency: %s', + license_lib) + continue # Compile time dependency - output_license_file.write('# %s\n' % license_lib) - output_license_file.write('```\n') - for path in self.common_licenses_dict[license_lib]: - license_path = os.path.join(WEBRTC_ROOT, path) - with open(license_path, 'r') as license_file: - license_text = escape(license_file.read(), quote=True) - output_license_file.write(license_text) - output_license_file.write('\n') - output_license_file.write('```\n\n') + output_license_file.write('# %s\n' % license_lib) + output_license_file.write('```\n') + for path in self.common_licenses_dict[license_lib]: + license_path = os.path.join(WEBRTC_ROOT, path) + with open(license_path, 'r') as license_file: + license_text = escape(license_file.read(), quote=True) + output_license_file.write(license_text) + output_license_file.write('\n') + output_license_file.write('```\n\n') - output_license_file.close() + output_license_file.close() def main(): - parser = argparse.ArgumentParser(description='Generate WebRTC LICENSE.md') - parser.add_argument('--verbose', - action='store_true', - default=False, - help='Debug logging.') - parser.add_argument('--target', - required=True, - action='append', - default=[], - help='Name of the GN target to generate a license for') - parser.add_argument('output_dir', - help='Directory to output LICENSE.md to.') - parser.add_argument('buildfile_dirs', - nargs='+', - help='Directories containing gn generated ninja files') - args = parser.parse_args() + parser = argparse.ArgumentParser(description='Generate WebRTC LICENSE.md') + parser.add_argument('--verbose', + action='store_true', + default=False, + help='Debug logging.') + parser.add_argument('--target', + required=True, + action='append', + default=[], + help='Name of the GN target to generate a license for') + parser.add_argument('output_dir', help='Directory to output LICENSE.md to.') + parser.add_argument('buildfile_dirs', + nargs='+', + help='Directories containing gn generated ninja files') + args = parser.parse_args() - logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) + logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) - builder = LicenseBuilder(args.buildfile_dirs, args.target) - builder.GenerateLicenseText(args.output_dir) + builder = LicenseBuilder(args.buildfile_dirs, args.target) + builder.GenerateLicenseText(args.output_dir) if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/libs/generate_licenses_test.py b/tools_webrtc/libs/generate_licenses_test.py index ebef78e132..6dfd8f3e22 100755 --- a/tools_webrtc/libs/generate_licenses_test.py +++ b/tools_webrtc/libs/generate_licenses_test.py @@ -1,5 +1,6 @@ -#!/usr/bin/env vpython -# pylint: disable=relative-import,protected-access,unused-argument +#!/usr/bin/env vpython3 + +# pylint: disable=protected-access,unused-argument # Copyright 2017 The WebRTC project authors. All Rights Reserved. # @@ -10,20 +11,15 @@ # be found in the AUTHORS file in the root of the source tree. import unittest -try: - # python 3.3+ - from unittest.mock import patch -except ImportError: - # From site-package - from mock import patch +from mock import patch from generate_licenses import LicenseBuilder class TestLicenseBuilder(unittest.TestCase): - @staticmethod - def _FakeRunGN(buildfile_dir, target): - return """ + @staticmethod + def _FakeRunGN(buildfile_dir, target): + return """ { "target1": { "deps": [ @@ -36,93 +32,89 @@ class TestLicenseBuilder(unittest.TestCase): } """ - def testParseLibraryName(self): - self.assertEqual( - LicenseBuilder._ParseLibraryName('//a/b/third_party/libname1:c'), - 'libname1') - self.assertEqual( - LicenseBuilder._ParseLibraryName( - '//a/b/third_party/libname2:c(d)'), 'libname2') - self.assertEqual( - LicenseBuilder._ParseLibraryName( - '//a/b/third_party/libname3/c:d(e)'), 'libname3') - self.assertEqual( - LicenseBuilder._ParseLibraryName('//a/b/not_third_party/c'), None) + def testParseLibraryName(self): + self.assertEqual( + LicenseBuilder._ParseLibraryName('//a/b/third_party/libname1:c'), + 'libname1') + self.assertEqual( + LicenseBuilder._ParseLibraryName('//a/b/third_party/libname2:c(d)'), + 'libname2') + self.assertEqual( + LicenseBuilder._ParseLibraryName('//a/b/third_party/libname3/c:d(e)'), + 'libname3') + self.assertEqual( + LicenseBuilder._ParseLibraryName('//a/b/not_third_party/c'), None) - def testParseLibrarySimpleMatch(self): - builder = LicenseBuilder([], [], {}, {}) - self.assertEqual(builder._ParseLibrary('//a/b/third_party/libname:c'), - 'libname') + def testParseLibrarySimpleMatch(self): + builder = LicenseBuilder([], [], {}, {}) + self.assertEqual(builder._ParseLibrary('//a/b/third_party/libname:c'), + 'libname') - def testParseLibraryRegExNoMatchFallbacksToDefaultLibname(self): - lib_dict = { - 'libname:foo.*': ['path/to/LICENSE'], - } - builder = LicenseBuilder([], [], lib_dict, {}) - self.assertEqual( - builder._ParseLibrary('//a/b/third_party/libname:bar_java'), - 'libname') + def testParseLibraryRegExNoMatchFallbacksToDefaultLibname(self): + lib_dict = { + 'libname:foo.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], lib_dict, {}) + self.assertEqual( + builder._ParseLibrary('//a/b/third_party/libname:bar_java'), 'libname') - def testParseLibraryRegExMatch(self): - lib_regex_dict = { - 'libname:foo.*': ['path/to/LICENSE'], - } - builder = LicenseBuilder([], [], {}, lib_regex_dict) - self.assertEqual( - builder._ParseLibrary('//a/b/third_party/libname:foo_bar_java'), - 'libname:foo.*') + def testParseLibraryRegExMatch(self): + lib_regex_dict = { + 'libname:foo.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], {}, lib_regex_dict) + self.assertEqual( + builder._ParseLibrary('//a/b/third_party/libname:foo_bar_java'), + 'libname:foo.*') - def testParseLibraryRegExMatchWithSubDirectory(self): - lib_regex_dict = { - 'libname/foo:bar.*': ['path/to/LICENSE'], - } - builder = LicenseBuilder([], [], {}, lib_regex_dict) - self.assertEqual( - builder._ParseLibrary('//a/b/third_party/libname/foo:bar_java'), - 'libname/foo:bar.*') + def testParseLibraryRegExMatchWithSubDirectory(self): + lib_regex_dict = { + 'libname/foo:bar.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], {}, lib_regex_dict) + self.assertEqual( + builder._ParseLibrary('//a/b/third_party/libname/foo:bar_java'), + 'libname/foo:bar.*') - def testParseLibraryRegExMatchWithStarInside(self): - lib_regex_dict = { - 'libname/foo.*bar.*': ['path/to/LICENSE'], - } - builder = LicenseBuilder([], [], {}, lib_regex_dict) - self.assertEqual( - builder._ParseLibrary( - '//a/b/third_party/libname/fooHAHA:bar_java'), - 'libname/foo.*bar.*') + def testParseLibraryRegExMatchWithStarInside(self): + lib_regex_dict = { + 'libname/foo.*bar.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], {}, lib_regex_dict) + self.assertEqual( + builder._ParseLibrary('//a/b/third_party/libname/fooHAHA:bar_java'), + 'libname/foo.*bar.*') - @patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) - def testGetThirdPartyLibrariesWithoutRegex(self): - builder = LicenseBuilder([], [], {}, {}) - self.assertEqual( - builder._GetThirdPartyLibraries('out/arm', 'target1'), - set(['libname1', 'libname2', 'libname3'])) + @patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) + def testGetThirdPartyLibrariesWithoutRegex(self): + builder = LicenseBuilder([], [], {}, {}) + self.assertEqual(builder._GetThirdPartyLibraries('out/arm', 'target1'), + set(['libname1', 'libname2', 'libname3'])) - @patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) - def testGetThirdPartyLibrariesWithRegex(self): - lib_regex_dict = { - 'libname2:c.*': ['path/to/LICENSE'], - } - builder = LicenseBuilder([], [], {}, lib_regex_dict) - self.assertEqual( - builder._GetThirdPartyLibraries('out/arm', 'target1'), - set(['libname1', 'libname2:c.*', 'libname3'])) + @patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) + def testGetThirdPartyLibrariesWithRegex(self): + lib_regex_dict = { + 'libname2:c.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], {}, lib_regex_dict) + self.assertEqual(builder._GetThirdPartyLibraries('out/arm', 'target1'), + set(['libname1', 'libname2:c.*', 'libname3'])) - @patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) - def testGenerateLicenseTextFailIfUnknownLibrary(self): - lib_dict = { - 'simple_library': ['path/to/LICENSE'], - } - builder = LicenseBuilder(['dummy_dir'], ['dummy_target'], lib_dict, {}) + @patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) + def testGenerateLicenseTextFailIfUnknownLibrary(self): + lib_dict = { + 'simple_library': ['path/to/LICENSE'], + } + builder = LicenseBuilder(['dummy_dir'], ['dummy_target'], lib_dict, {}) - with self.assertRaises(Exception) as context: - builder.GenerateLicenseText('dummy/dir') + with self.assertRaises(Exception) as context: + builder.GenerateLicenseText('dummy/dir') - self.assertEqual( - context.exception.args[0], - 'Missing licenses for following third_party targets: ' - 'libname1, libname2, libname3') + self.assertEqual( + context.exception.args[0], + 'Missing licenses for following third_party targets: ' + 'libname1, libname2, libname3') if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tools_webrtc/mb/PRESUBMIT.py b/tools_webrtc/mb/PRESUBMIT.py index 67122ce1c8..0374c8619f 100644 --- a/tools_webrtc/mb/PRESUBMIT.py +++ b/tools_webrtc/mb/PRESUBMIT.py @@ -1,3 +1,5 @@ +#!/usr/bin/env vpython3 + # Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -7,6 +9,10 @@ # be found in the AUTHORS file in the root of the source tree. +# Runs PRESUBMIT.py in py3 mode by git cl presubmit. +USE_PYTHON3 = True + + def _CommonChecks(input_api, output_api): results = [] @@ -27,15 +33,16 @@ def _CommonChecks(input_api, output_api): results.extend(input_api.RunTests(pylint_checks)) # Run the MB unittests. - results.extend(input_api.canned_checks.RunUnitTestsInDirectory( - input_api, - output_api, - '.', - [ r'^.+_unittest\.py$'], - skip_shebang_check=True)) + results.extend( + input_api.canned_checks.RunUnitTestsInDirectory(input_api, + output_api, + '.', + [r'^.+_unittest\.py$'], + skip_shebang_check=False, + run_on_python2=False)) # Validate the format of the mb_config.pyl file. - cmd = [input_api.python_executable, 'mb.py', 'validate'] + cmd = [input_api.python3_executable, 'mb.py', 'validate'] kwargs = {'cwd': input_api.PresubmitLocalPath()} results.extend(input_api.RunTests([ input_api.Command(name='mb_validate', diff --git a/tools_webrtc/mb/mb.bat b/tools_webrtc/mb/mb.bat index a82770e714..1252560a08 100755 --- a/tools_webrtc/mb/mb.bat +++ b/tools_webrtc/mb/mb.bat @@ -3,4 +3,4 @@ setlocal :: This is required with cygwin only. PATH=%~dp0;%PATH% set PYTHONDONTWRITEBYTECODE=1 -call python "%~dp0mb.py" %* +call vpython3 "%~dp0mb.py" %* diff --git a/tools_webrtc/mb/mb.py b/tools_webrtc/mb/mb.py index df86f8418e..f281a9afba 100755 --- a/tools_webrtc/mb/mb.py +++ b/tools_webrtc/mb/mb.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -13,8 +14,6 @@ MB is a wrapper script for GN that can be used to generate build files for sets of canned configurations and analyze them. """ -from __future__ import print_function - import argparse import ast import errno @@ -28,10 +27,7 @@ import sys import subprocess import tempfile import traceback -try: - from urllib2 import urlopen # for Python2 -except ImportError: - from urllib.request import urlopen # for Python3 +from urllib.request import urlopen SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) SRC_DIR = os.path.dirname(os.path.dirname(SCRIPT_DIR)) @@ -280,7 +276,7 @@ class MetaBuildWrapper(object): def CmdExport(self): self.ReadConfigFile() obj = {} - for builder_group, builders in self.builder_groups.items(): + for builder_group, builders in list(self.builder_groups.items()): obj[builder_group] = {} for builder in builders: config = self.builder_groups[builder_group][builder] @@ -290,7 +286,7 @@ class MetaBuildWrapper(object): if isinstance(config, dict): args = { k: self.FlattenConfig(v)['gn_args'] - for k, v in config.items() + for k, v in list(config.items()) } elif config.startswith('//'): args = config @@ -476,15 +472,15 @@ class MetaBuildWrapper(object): # Build a list of all of the configs referenced by builders. all_configs = {} for builder_group in self.builder_groups: - for config in self.builder_groups[builder_group].values(): + for config in list(self.builder_groups[builder_group].values()): if isinstance(config, dict): - for c in config.values(): + for c in list(config.values()): all_configs[c] = builder_group else: all_configs[config] = builder_group # Check that every referenced args file or config actually exists. - for config, loc in all_configs.items(): + for config, loc in list(all_configs.items()): if config.startswith('//'): if not self.Exists(self.ToAbsPath(config)): errs.append('Unknown args file "%s" referenced from "%s".' % @@ -500,7 +496,7 @@ class MetaBuildWrapper(object): # Figure out the whole list of mixins, and check that every mixin # listed by a config or another mixin actually exists. referenced_mixins = set() - for config, mixins in self.configs.items(): + for config, mixins in list(self.configs.items()): for mixin in mixins: if not mixin in self.mixins: errs.append('Unknown mixin "%s" referenced by config "%s".' % @@ -1172,7 +1168,7 @@ class MetaBuildWrapper(object): self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var]))) if cmd[0] == self.executable: - cmd = ['python'] + cmd[1:] + cmd = ['vpython3'] + cmd[1:] self.Print(*[shell_quoter(arg) for arg in cmd]) def PrintJSON(self, obj): diff --git a/tools_webrtc/mb/mb_unittest.py b/tools_webrtc/mb/mb_unittest.py index 712cd74adc..556868a6b1 100755 --- a/tools_webrtc/mb/mb_unittest.py +++ b/tools_webrtc/mb/mb_unittest.py @@ -1,4 +1,5 @@ -#!/usr/bin/python +#!/usr/bin/env vpython3 + # Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -11,10 +12,7 @@ import ast import json -try: - from StringIO import StringIO # for Python2 -except ImportError: - from io import StringIO # for Python3 +from io import StringIO # for Python3 import os import re import sys @@ -35,14 +33,14 @@ class FakeMBW(mb.MetaBuildWrapper): self.default_isolate_map = ('c:\\fake_src\\testing\\buildbot\\' 'gn_isolate_map.pyl') self.platform = 'win32' - self.executable = 'c:\\python\\python.exe' + self.executable = 'c:\\python\\vpython3.exe' self.sep = '\\' self.cwd = 'c:\\fake_src\\out\\Default' else: self.src_dir = '/fake_src' self.default_config = '/fake_src/tools_webrtc/mb/mb_config.pyl' self.default_isolate_map = '/fake_src/testing/buildbot/gn_isolate_map.pyl' - self.executable = '/usr/bin/python' + self.executable = '/usr/bin/vpython3' self.platform = 'linux2' self.sep = '/' self.cwd = '/fake_src/out/Default' @@ -197,7 +195,7 @@ class UnitTest(unittest.TestCase): mbw.ToAbsPath('//build/args/bots/fake_group/fake_args_bot.gn'), 'is_debug = false\n') if files: - for path, contents in files.items(): + for path, contents in list(files.items()): mbw.files[path] = contents return mbw @@ -846,8 +844,8 @@ class UnitTest(unittest.TestCase): '/fake_src/out/Default/base_unittests.archive.json': ("{\"base_unittests\":\"fake_hash\"}"), '/fake_src/third_party/depot_tools/cipd_manifest.txt': - ("# vpython\n" - "/some/vpython/pkg git_revision:deadbeef\n"), + ("# vpython3\n" + "/some/vpython3/pkg git_revision:deadbeef\n"), } task_json = json.dumps({'tasks': [{'task_id': '00000'}]}) collect_json = json.dumps({'00000': {'results': {}}}) diff --git a/tools_webrtc/network_emulator/config.py b/tools_webrtc/network_emulator/config.py index c1d3eaf3d1..9a18bdce45 100644 --- a/tools_webrtc/network_emulator/config.py +++ b/tools_webrtc/network_emulator/config.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -9,28 +10,27 @@ """Configuration class for network emulation.""" -class ConnectionConfig(object): - """Configuration containing the characteristics of a network connection.""" +class ConnectionConfig: + """Configuration containing the characteristics of a network connection.""" - def __init__(self, num, name, receive_bw_kbps, send_bw_kbps, delay_ms, - packet_loss_percent, queue_slots): - self.num = num - self.name = name - self.receive_bw_kbps = receive_bw_kbps - self.send_bw_kbps = send_bw_kbps - self.delay_ms = delay_ms - self.packet_loss_percent = packet_loss_percent - self.queue_slots = queue_slots + def __init__(self, num, name, receive_bw_kbps, send_bw_kbps, delay_ms, + packet_loss_percent, queue_slots): + self.num = num + self.name = name + self.receive_bw_kbps = receive_bw_kbps + self.send_bw_kbps = send_bw_kbps + self.delay_ms = delay_ms + self.packet_loss_percent = packet_loss_percent + self.queue_slots = queue_slots - def __str__(self): - """String representing the configuration. + def __str__(self): + """String representing the configuration. Returns: A string formatted and padded like this example: 12 Name 375 kbps 375 kbps 10 145 ms 0.1 % """ - left_aligned_name = self.name.ljust(24, ' ') - return '%2s %24s %5s kbps %5s kbps %4s %5s ms %3s %%' % ( - self.num, left_aligned_name, self.receive_bw_kbps, - self.send_bw_kbps, self.queue_slots, self.delay_ms, - self.packet_loss_percent) + left_aligned_name = self.name.ljust(24, ' ') + return '%2s %24s %5s kbps %5s kbps %4s %5s ms %3s %%' % ( + self.num, left_aligned_name, self.receive_bw_kbps, self.send_bw_kbps, + self.queue_slots, self.delay_ms, self.packet_loss_percent) diff --git a/tools_webrtc/network_emulator/emulate.py b/tools_webrtc/network_emulator/emulate.py index 51224c80b1..a35ccd36c8 100755 --- a/tools_webrtc/network_emulator/emulate.py +++ b/tools_webrtc/network_emulator/emulate.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -46,170 +47,163 @@ _DEFAULT_PRESET = _PRESETS_DICT[_DEFAULT_PRESET_ID] class NonStrippingEpilogOptionParser(optparse.OptionParser): - """Custom parser to let us show the epilog without weird line breaking.""" + """Custom parser to let us show the epilog without weird line breaking.""" - def format_epilog(self, formatter): - return self.epilog + def format_epilog(self, formatter): + return self.epilog def _GetExternalIp(): - """Finds out the machine's external IP by connecting to google.com.""" - external_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - external_socket.connect(('google.com', 80)) - return external_socket.getsockname()[0] + """Finds out the machine's external IP by connecting to google.com.""" + external_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + external_socket.connect(('google.com', 80)) + return external_socket.getsockname()[0] def _ParseArgs(): - """Define and parse the command-line arguments.""" - presets_string = '\n'.join(str(p) for p in _PRESETS) - parser = NonStrippingEpilogOptionParser(epilog=( - '\nAvailable presets:\n' - ' Bandwidth (kbps) Packet\n' - 'ID Name Receive Send Queue Delay loss \n' - '-- ---- --------- -------- ----- ------- ------\n' - '%s\n' % presets_string)) - parser.add_option('-p', - '--preset', - type='int', - default=_DEFAULT_PRESET_ID, - help=('ConnectionConfig configuration, specified by ID. ' - 'Default: %default')) - parser.add_option( - '-r', - '--receive-bw', - type='int', - default=_DEFAULT_PRESET.receive_bw_kbps, - help=('Receive bandwidth in kilobit/s. Default: %default')) - parser.add_option('-s', - '--send-bw', - type='int', - default=_DEFAULT_PRESET.send_bw_kbps, - help=('Send bandwidth in kilobit/s. Default: %default')) - parser.add_option('-d', - '--delay', - type='int', - default=_DEFAULT_PRESET.delay_ms, - help=('Delay in ms. Default: %default')) - parser.add_option('-l', - '--packet-loss', - type='float', - default=_DEFAULT_PRESET.packet_loss_percent, - help=('Packet loss in %. Default: %default')) - parser.add_option( - '-q', - '--queue', - type='int', - default=_DEFAULT_PRESET.queue_slots, - help=('Queue size as number of slots. Default: %default')) - parser.add_option( - '--port-range', - default='%s,%s' % _DEFAULT_PORT_RANGE, - help=('Range of ports for constrained network. Specify as ' - 'two comma separated integers. Default: %default')) - parser.add_option( - '--target-ip', - default=None, - help=('The interface IP address to apply the rules for. ' - 'Default: the external facing interface IP address.')) - parser.add_option('-v', - '--verbose', - action='store_true', - default=False, - help=('Turn on verbose output. Will print all \'ipfw\' ' - 'commands that are executed.')) + """Define and parse the command-line arguments.""" + presets_string = '\n'.join(str(p) for p in _PRESETS) + parser = NonStrippingEpilogOptionParser(epilog=( + '\nAvailable presets:\n' + ' Bandwidth (kbps) Packet\n' + 'ID Name Receive Send Queue Delay loss \n' + '-- ---- --------- -------- ----- ------- ------\n' + '%s\n' % presets_string)) + parser.add_option('-p', + '--preset', + type='int', + default=_DEFAULT_PRESET_ID, + help=('ConnectionConfig configuration, specified by ID. ' + 'Default: %default')) + parser.add_option('-r', + '--receive-bw', + type='int', + default=_DEFAULT_PRESET.receive_bw_kbps, + help=('Receive bandwidth in kilobit/s. Default: %default')) + parser.add_option('-s', + '--send-bw', + type='int', + default=_DEFAULT_PRESET.send_bw_kbps, + help=('Send bandwidth in kilobit/s. Default: %default')) + parser.add_option('-d', + '--delay', + type='int', + default=_DEFAULT_PRESET.delay_ms, + help=('Delay in ms. Default: %default')) + parser.add_option('-l', + '--packet-loss', + type='float', + default=_DEFAULT_PRESET.packet_loss_percent, + help=('Packet loss in %. Default: %default')) + parser.add_option('-q', + '--queue', + type='int', + default=_DEFAULT_PRESET.queue_slots, + help=('Queue size as number of slots. Default: %default')) + parser.add_option('--port-range', + default='%s,%s' % _DEFAULT_PORT_RANGE, + help=('Range of ports for constrained network. Specify as ' + 'two comma separated integers. Default: %default')) + parser.add_option('--target-ip', + default=None, + help=('The interface IP address to apply the rules for. ' + 'Default: the external facing interface IP address.')) + parser.add_option('-v', + '--verbose', + action='store_true', + default=False, + help=('Turn on verbose output. Will print all \'ipfw\' ' + 'commands that are executed.')) - options = parser.parse_args()[0] + options = parser.parse_args()[0] - # Find preset by ID, if specified. - if options.preset and not _PRESETS_DICT.has_key(options.preset): - parser.error('Invalid preset: %s' % options.preset) + # Find preset by ID, if specified. + if options.preset and options.preset not in _PRESETS_DICT: + parser.error('Invalid preset: %s' % options.preset) - # Simple validation of the IP address, if supplied. - if options.target_ip: - try: - socket.inet_aton(options.target_ip) - except socket.error: - parser.error('Invalid IP address specified: %s' % - options.target_ip) - - # Convert port range into the desired tuple format. + # Simple validation of the IP address, if supplied. + if options.target_ip: try: - if isinstance(options.port_range, str): - options.port_range = tuple( - int(port) for port in options.port_range.split(',')) - if len(options.port_range) != 2: - parser.error( - 'Invalid port range specified, please specify two ' - 'integers separated by a comma.') - except ValueError: - parser.error('Invalid port range specified.') + socket.inet_aton(options.target_ip) + except socket.error: + parser.error('Invalid IP address specified: %s' % options.target_ip) - _InitLogging(options.verbose) - return options + # Convert port range into the desired tuple format. + try: + if isinstance(options.port_range, str): + options.port_range = tuple( + int(port) for port in options.port_range.split(',')) + if len(options.port_range) != 2: + parser.error('Invalid port range specified, please specify two ' + 'integers separated by a comma.') + except ValueError: + parser.error('Invalid port range specified.') + + _InitLogging(options.verbose) + return options def _InitLogging(verbose): - """Setup logging.""" - log_level = _DEFAULT_LOG_LEVEL - if verbose: - log_level = logging.DEBUG - logging.basicConfig(level=log_level, format='%(message)s') + """Setup logging.""" + log_level = _DEFAULT_LOG_LEVEL + if verbose: + log_level = logging.DEBUG + logging.basicConfig(level=log_level, format='%(message)s') def main(): - options = _ParseArgs() + options = _ParseArgs() - # Build a configuration object. Override any preset configuration settings if - # a value of a setting was also given as a flag. - connection_config = _PRESETS_DICT[options.preset] - if options.receive_bw is not _DEFAULT_PRESET.receive_bw_kbps: - connection_config.receive_bw_kbps = options.receive_bw - if options.send_bw is not _DEFAULT_PRESET.send_bw_kbps: - connection_config.send_bw_kbps = options.send_bw - if options.delay is not _DEFAULT_PRESET.delay_ms: - connection_config.delay_ms = options.delay - if options.packet_loss is not _DEFAULT_PRESET.packet_loss_percent: - connection_config.packet_loss_percent = options.packet_loss - if options.queue is not _DEFAULT_PRESET.queue_slots: - connection_config.queue_slots = options.queue - emulator = network_emulator.NetworkEmulator(connection_config, - options.port_range) - try: - emulator.CheckPermissions() - except network_emulator.NetworkEmulatorError as e: - logging.error('Error: %s\n\nCause: %s', e.fail_msg, e.error) - return -1 + # Build a configuration object. Override any preset configuration settings if + # a value of a setting was also given as a flag. + connection_config = _PRESETS_DICT[options.preset] + if options.receive_bw is not _DEFAULT_PRESET.receive_bw_kbps: + connection_config.receive_bw_kbps = options.receive_bw + if options.send_bw is not _DEFAULT_PRESET.send_bw_kbps: + connection_config.send_bw_kbps = options.send_bw + if options.delay is not _DEFAULT_PRESET.delay_ms: + connection_config.delay_ms = options.delay + if options.packet_loss is not _DEFAULT_PRESET.packet_loss_percent: + connection_config.packet_loss_percent = options.packet_loss + if options.queue is not _DEFAULT_PRESET.queue_slots: + connection_config.queue_slots = options.queue + emulator = network_emulator.NetworkEmulator(connection_config, + options.port_range) + try: + emulator.CheckPermissions() + except network_emulator.NetworkEmulatorError as e: + logging.error('Error: %s\n\nCause: %s', e.fail_msg, e.error) + return -1 - if not options.target_ip: - external_ip = _GetExternalIp() - else: - external_ip = options.target_ip + if not options.target_ip: + external_ip = _GetExternalIp() + else: + external_ip = options.target_ip - logging.info('Constraining traffic to/from IP: %s', external_ip) - try: - emulator.Emulate(external_ip) - logging.info( - 'Started network emulation with the following configuration:\n' - ' Receive bandwidth: %s kbps (%s kB/s)\n' - ' Send bandwidth : %s kbps (%s kB/s)\n' - ' Delay : %s ms\n' - ' Packet loss : %s %%\n' - ' Queue slots : %s', connection_config.receive_bw_kbps, - connection_config.receive_bw_kbps / 8, - connection_config.send_bw_kbps, connection_config.send_bw_kbps / 8, - connection_config.delay_ms, connection_config.packet_loss_percent, - connection_config.queue_slots) - logging.info('Affected traffic: IP traffic on ports %s-%s', - options.port_range[0], options.port_range[1]) - raw_input('Press Enter to abort Network Emulation...') - logging.info('Flushing all Dummynet rules...') - network_emulator.Cleanup() - logging.info('Completed Network Emulation.') - return 0 - except network_emulator.NetworkEmulatorError as e: - logging.error('Error: %s\n\nCause: %s', e.fail_msg, e.error) - return -2 + logging.info('Constraining traffic to/from IP: %s', external_ip) + try: + emulator.Emulate(external_ip) + logging.info( + 'Started network emulation with the following configuration:\n' + ' Receive bandwidth: %s kbps (%s kB/s)\n' + ' Send bandwidth : %s kbps (%s kB/s)\n' + ' Delay : %s ms\n' + ' Packet loss : %s %%\n' + ' Queue slots : %s', connection_config.receive_bw_kbps, + connection_config.receive_bw_kbps / 8, connection_config.send_bw_kbps, + connection_config.send_bw_kbps / 8, connection_config.delay_ms, + connection_config.packet_loss_percent, connection_config.queue_slots) + logging.info('Affected traffic: IP traffic on ports %s-%s', + options.port_range[0], options.port_range[1]) + input('Press Enter to abort Network Emulation...') + logging.info('Flushing all Dummynet rules...') + network_emulator.Cleanup() + logging.info('Completed Network Emulation.') + return 0 + except network_emulator.NetworkEmulatorError as e: + logging.error('Error: %s\n\nCause: %s', e.fail_msg, e.error) + return -2 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/network_emulator/network_emulator.py b/tools_webrtc/network_emulator/network_emulator.py index f77753b0f6..a7776a5f92 100644 --- a/tools_webrtc/network_emulator/network_emulator.py +++ b/tools_webrtc/network_emulator/network_emulator.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -16,7 +17,7 @@ import sys class NetworkEmulatorError(BaseException): - """Exception raised for errors in the network emulator. + """Exception raised for errors in the network emulator. Attributes: fail_msg: User defined error message. @@ -26,88 +27,83 @@ class NetworkEmulatorError(BaseException): stderr: Error output of running the command. """ - def __init__(self, - fail_msg, - cmd=None, - returncode=None, - output=None, - error=None): - BaseException.__init__(self, fail_msg) - self.fail_msg = fail_msg - self.cmd = cmd - self.returncode = returncode - self.output = output - self.error = error + def __init__(self, + fail_msg, + cmd=None, + returncode=None, + output=None, + error=None): + BaseException.__init__(self, fail_msg) + self.fail_msg = fail_msg + self.cmd = cmd + self.returncode = returncode + self.output = output + self.error = error -class NetworkEmulator(object): - """A network emulator that can constrain the network using Dummynet.""" +class NetworkEmulator: + """A network emulator that can constrain the network using Dummynet.""" - def __init__(self, connection_config, port_range): - """Constructor. + def __init__(self, connection_config, port_range): + """Constructor. Args: connection_config: A config.ConnectionConfig object containing the characteristics for the connection to be emulation. port_range: Tuple containing two integers defining the port range. """ - self._pipe_counter = 0 - self._rule_counter = 0 - self._port_range = port_range - self._connection_config = connection_config + self._pipe_counter = 0 + self._rule_counter = 0 + self._port_range = port_range + self._connection_config = connection_config - def Emulate(self, target_ip): - """Starts a network emulation by setting up Dummynet rules. + def Emulate(self, target_ip): + """Starts a network emulation by setting up Dummynet rules. Args: target_ip: The IP address of the interface that shall be that have the network constraints applied to it. """ - receive_pipe_id = self._CreateDummynetPipe( - self._connection_config.receive_bw_kbps, - self._connection_config.delay_ms, - self._connection_config.packet_loss_percent, - self._connection_config.queue_slots) - logging.debug('Created receive pipe: %s', receive_pipe_id) - send_pipe_id = self._CreateDummynetPipe( - self._connection_config.send_bw_kbps, - self._connection_config.delay_ms, - self._connection_config.packet_loss_percent, - self._connection_config.queue_slots) - logging.debug('Created send pipe: %s', send_pipe_id) + receive_pipe_id = self._CreateDummynetPipe( + self._connection_config.receive_bw_kbps, + self._connection_config.delay_ms, + self._connection_config.packet_loss_percent, + self._connection_config.queue_slots) + logging.debug('Created receive pipe: %s', receive_pipe_id) + send_pipe_id = self._CreateDummynetPipe( + self._connection_config.send_bw_kbps, self._connection_config.delay_ms, + self._connection_config.packet_loss_percent, + self._connection_config.queue_slots) + logging.debug('Created send pipe: %s', send_pipe_id) - # Adding the rules will start the emulation. - incoming_rule_id = self._CreateDummynetRule(receive_pipe_id, 'any', - target_ip, - self._port_range) - logging.debug('Created incoming rule: %s', incoming_rule_id) - outgoing_rule_id = self._CreateDummynetRule(send_pipe_id, target_ip, - 'any', self._port_range) - logging.debug('Created outgoing rule: %s', outgoing_rule_id) + # Adding the rules will start the emulation. + incoming_rule_id = self._CreateDummynetRule(receive_pipe_id, 'any', + target_ip, self._port_range) + logging.debug('Created incoming rule: %s', incoming_rule_id) + outgoing_rule_id = self._CreateDummynetRule(send_pipe_id, target_ip, 'any', + self._port_range) + logging.debug('Created outgoing rule: %s', outgoing_rule_id) - @staticmethod - def CheckPermissions(): - """Checks if permissions are available to run Dummynet commands. + @staticmethod + def CheckPermissions(): + """Checks if permissions are available to run Dummynet commands. Raises: NetworkEmulatorError: If permissions to run Dummynet commands are not available. """ - try: - if os.getuid() != 0: - raise NetworkEmulatorError( - 'You must run this script with sudo.') - except AttributeError: + try: + if os.getuid() != 0: + raise NetworkEmulatorError('You must run this script with sudo.') + except AttributeError as permission_error: - # AttributeError will be raised on Windows. - if ctypes.windll.shell32.IsUserAnAdmin() == 0: - raise NetworkEmulatorError( - 'You must run this script with administrator' - ' privileges.') + # AttributeError will be raised on Windows. + if ctypes.windll.shell32.IsUserAnAdmin() == 0: + raise NetworkEmulatorError('You must run this script with administrator' + ' privileges.') from permission_error - def _CreateDummynetRule(self, pipe_id, from_address, to_address, - port_range): - """Creates a network emulation rule and returns its ID. + def _CreateDummynetRule(self, pipe_id, from_address, to_address, port_range): + """Creates a network emulation rule and returns its ID. Args: pipe_id: integer ID of the pipe. @@ -121,22 +117,20 @@ class NetworkEmulator(object): The ID of the rule, starting at 100. The rule ID increments with 100 for each rule being added. """ - self._rule_counter += 100 - add_part = [ - 'add', self._rule_counter, 'pipe', pipe_id, 'ip', 'from', - from_address, 'to', to_address - ] - _RunIpfwCommand(add_part + - ['src-port', '%s-%s' % port_range], - 'Failed to add Dummynet src-port rule.') - _RunIpfwCommand(add_part + - ['dst-port', '%s-%s' % port_range], - 'Failed to add Dummynet dst-port rule.') - return self._rule_counter + self._rule_counter += 100 + add_part = [ + 'add', self._rule_counter, 'pipe', pipe_id, 'ip', 'from', from_address, + 'to', to_address + ] + _RunIpfwCommand(add_part + ['src-port', '%s-%s' % port_range], + 'Failed to add Dummynet src-port rule.') + _RunIpfwCommand(add_part + ['dst-port', '%s-%s' % port_range], + 'Failed to add Dummynet dst-port rule.') + return self._rule_counter - def _CreateDummynetPipe(self, bandwidth_kbps, delay_ms, - packet_loss_percent, queue_slots): - """Creates a Dummynet pipe and return its ID. + def _CreateDummynetPipe(self, bandwidth_kbps, delay_ms, packet_loss_percent, + queue_slots): + """Creates a Dummynet pipe and return its ID. Args: bandwidth_kbps: Bandwidth. @@ -146,34 +140,33 @@ class NetworkEmulator(object): Returns: The ID of the pipe, starting at 1. """ - self._pipe_counter += 1 - cmd = [ - 'pipe', self._pipe_counter, 'config', 'bw', - str(bandwidth_kbps / 8) + 'KByte/s', 'delay', - '%sms' % delay_ms, 'plr', (packet_loss_percent / 100.0), 'queue', - queue_slots - ] - error_message = 'Failed to create Dummynet pipe. ' - if sys.platform.startswith('linux'): - error_message += ( - 'Make sure you have loaded the ipfw_mod.ko module to ' - 'your kernel (sudo insmod /path/to/ipfw_mod.ko).') - _RunIpfwCommand(cmd, error_message) - return self._pipe_counter + self._pipe_counter += 1 + cmd = [ + 'pipe', self._pipe_counter, 'config', 'bw', + str(bandwidth_kbps / 8) + 'KByte/s', 'delay', + '%sms' % delay_ms, 'plr', (packet_loss_percent / 100.0), 'queue', + queue_slots + ] + error_message = 'Failed to create Dummynet pipe. ' + if sys.platform.startswith('linux'): + error_message += ('Make sure you have loaded the ipfw_mod.ko module to ' + 'your kernel (sudo insmod /path/to/ipfw_mod.ko).') + _RunIpfwCommand(cmd, error_message) + return self._pipe_counter def Cleanup(): - """Stops the network emulation by flushing all Dummynet rules. + """Stops the network emulation by flushing all Dummynet rules. Notice that this will flush any rules that may have been created previously before starting the emulation. """ - _RunIpfwCommand(['-f', 'flush'], 'Failed to flush Dummynet rules!') - _RunIpfwCommand(['-f', 'pipe', 'flush'], 'Failed to flush Dummynet pipes!') + _RunIpfwCommand(['-f', 'flush'], 'Failed to flush Dummynet rules!') + _RunIpfwCommand(['-f', 'pipe', 'flush'], 'Failed to flush Dummynet pipes!') def _RunIpfwCommand(command, fail_msg=None): - """Executes a command and prefixes the appropriate command for + """Executes a command and prefixes the appropriate command for Windows or Linux/UNIX. Args: @@ -184,19 +177,19 @@ def _RunIpfwCommand(command, fail_msg=None): NetworkEmulatorError: If command fails a message is set by the fail_msg parameter. """ - if sys.platform == 'win32': - ipfw_command = ['ipfw.exe'] - else: - ipfw_command = ['sudo', '-n', 'ipfw'] + if sys.platform == 'win32': + ipfw_command = ['ipfw.exe'] + else: + ipfw_command = ['sudo', '-n', 'ipfw'] - cmd_list = ipfw_command[:] + [str(x) for x in command] - cmd_string = ' '.join(cmd_list) - logging.debug('Running command: %s', cmd_string) - process = subprocess.Popen(cmd_list, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - output, error = process.communicate() - if process.returncode != 0: - raise NetworkEmulatorError(fail_msg, cmd_string, process.returncode, - output, error) - return output.strip() + cmd_list = ipfw_command[:] + [str(x) for x in command] + cmd_string = ' '.join(cmd_list) + logging.debug('Running command: %s', cmd_string) + process = subprocess.Popen(cmd_list, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + output, error = process.communicate() + if process.returncode != 0: + raise NetworkEmulatorError(fail_msg, cmd_string, process.returncode, output, + error) + return output.strip() diff --git a/tools_webrtc/perf/catapult_uploader.py b/tools_webrtc/perf/catapult_uploader.py index 6818bd18ea..c11515197f 100644 --- a/tools_webrtc/perf/catapult_uploader.py +++ b/tools_webrtc/perf/catapult_uploader.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -8,12 +9,13 @@ # be found in the AUTHORS file in the root of the source tree. import datetime -import httplib2 import json import subprocess import time import zlib +import httplib2 + from tracing.value import histogram from tracing.value import histogram_set from tracing.value.diagnostics import generic_set @@ -21,52 +23,51 @@ from tracing.value.diagnostics import reserved_infos def _GenerateOauthToken(): - args = ['luci-auth', 'token'] - p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - if p.wait() == 0: - output = p.stdout.read() - return output.strip() - else: - raise RuntimeError( - 'Error generating authentication token.\nStdout: %s\nStderr:%s' % - (p.stdout.read(), p.stderr.read())) + args = ['luci-auth', 'token'] + p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + if p.wait() == 0: + output = p.stdout.read() + return output.strip() + raise RuntimeError( + 'Error generating authentication token.\nStdout: %s\nStderr:%s' % + (p.stdout.read(), p.stderr.read())) def _CreateHeaders(oauth_token): - return {'Authorization': 'Bearer %s' % oauth_token} + return {'Authorization': 'Bearer %s' % oauth_token} def _SendHistogramSet(url, histograms): - """Make a HTTP POST with the given JSON to the Performance Dashboard. + """Make a HTTP POST with the given JSON to the Performance Dashboard. Args: url: URL of Performance Dashboard instance, e.g. "https://chromeperf.appspot.com". histograms: a histogram set object that contains the data to be sent. """ - headers = _CreateHeaders(_GenerateOauthToken()) + headers = _CreateHeaders(_GenerateOauthToken()) - serialized = json.dumps(_ApplyHacks(histograms.AsDicts()), indent=4) + serialized = json.dumps(_ApplyHacks(histograms.AsDicts()), indent=4) - if url.startswith('http://localhost'): - # The catapult server turns off compression in developer mode. - data = serialized - else: - data = zlib.compress(serialized) + if url.startswith('http://localhost'): + # The catapult server turns off compression in developer mode. + data = serialized + else: + data = zlib.compress(serialized) - print 'Sending %d bytes to %s.' % (len(data), url + '/add_histograms') + print('Sending %d bytes to %s.' % (len(data), url + '/add_histograms')) - http = httplib2.Http() - response, content = http.request(url + '/add_histograms', - method='POST', - body=data, - headers=headers) - return response, content + http = httplib2.Http() + response, content = http.request(url + '/add_histograms', + method='POST', + body=data, + headers=headers) + return response, content def _WaitForUploadConfirmation(url, upload_token, wait_timeout, wait_polling_period): - """Make a HTTP GET requests to the Performance Dashboard untill upload + """Make a HTTP GET requests to the Performance Dashboard untill upload status is known or the time is out. Args: @@ -79,42 +80,43 @@ def _WaitForUploadConfirmation(url, upload_token, wait_timeout, wait_polling_period: (datetime.timedelta) Performance Dashboard will be polled every wait_polling_period amount of time. """ - assert wait_polling_period <= wait_timeout + assert wait_polling_period <= wait_timeout - headers = _CreateHeaders(_GenerateOauthToken()) - http = httplib2.Http() + headers = _CreateHeaders(_GenerateOauthToken()) + http = httplib2.Http() - oauth_refreshed = False - response = None - resp_json = None + oauth_refreshed = False + response = None + resp_json = None + current_time = datetime.datetime.now() + end_time = current_time + wait_timeout + next_poll_time = current_time + wait_polling_period + while datetime.datetime.now() < end_time: current_time = datetime.datetime.now() - end_time = current_time + wait_timeout - next_poll_time = current_time + wait_polling_period - while datetime.datetime.now() < end_time: - current_time = datetime.datetime.now() - if next_poll_time > current_time: - time.sleep((next_poll_time - current_time).total_seconds()) - next_poll_time = datetime.datetime.now() + wait_polling_period + if next_poll_time > current_time: + time.sleep((next_poll_time - current_time).total_seconds()) + next_poll_time = datetime.datetime.now() + wait_polling_period - response, content = http.request(url + '/uploads/' + upload_token, - method='GET', headers=headers) + response, content = http.request(url + '/uploads/' + upload_token, + method='GET', + headers=headers) - print 'Upload state polled. Response: %r.' % content + print('Upload state polled. Response: %r.' % content) - if not oauth_refreshed and response.status == 403: - print 'Oauth token refreshed. Continue polling.' - headers = _CreateHeaders(_GenerateOauthToken()) - oauth_refreshed = True - continue + if not oauth_refreshed and response.status == 403: + print('Oauth token refreshed. Continue polling.') + headers = _CreateHeaders(_GenerateOauthToken()) + oauth_refreshed = True + continue - if response.status != 200: - break + if response.status != 200: + break - resp_json = json.loads(content) - if resp_json['state'] == 'COMPLETED' or resp_json['state'] == 'FAILED': - break + resp_json = json.loads(content) + if resp_json['state'] == 'COMPLETED' or resp_json['state'] == 'FAILED': + break - return response, resp_json + return response, resp_json # Because of an issues on the Dashboard side few measurements over a large set @@ -124,7 +126,7 @@ def _WaitForUploadConfirmation(url, upload_token, wait_timeout, def _CheckFullUploadInfo(url, upload_token, min_measurements_amount=50, max_failed_measurements_percent=0.03): - """Make a HTTP GET requests to the Performance Dashboard to get full info + """Make a HTTP GET requests to the Performance Dashboard to get full info about upload (including measurements). Checks if upload is correct despite not having status "COMPLETED". @@ -138,125 +140,123 @@ def _CheckFullUploadInfo(url, upload_token, max_failed_measurements_percent: maximal percent of failured measurements to tolerate. """ - headers = _CreateHeaders(_GenerateOauthToken()) - http = httplib2.Http() + headers = _CreateHeaders(_GenerateOauthToken()) + http = httplib2.Http() - response, content = http.request(url + '/uploads/' + upload_token + - '?additional_info=measurements', - method='GET', headers=headers) - - if response.status != 200: - print 'Failed to reach the dashboard to get full upload info.' - return False - - resp_json = json.loads(content) - print 'Full upload info: %s.' % json.dumps(resp_json, indent=4) - - if 'measurements' in resp_json: - measurements_cnt = len(resp_json['measurements']) - not_completed_state_cnt = len([ - m for m in resp_json['measurements'] - if m['state'] != 'COMPLETED' - ]) - - if (measurements_cnt >= min_measurements_amount and - (not_completed_state_cnt / (measurements_cnt * 1.0) <= - max_failed_measurements_percent)): - print('Not all measurements were confirmed to upload. ' - 'Measurements count: %d, failed to upload or timed out: %d' % - (measurements_cnt, not_completed_state_cnt)) - return True + response, content = http.request(url + '/uploads/' + upload_token + + '?additional_info=measurements', + method='GET', + headers=headers) + if response.status != 200: + print('Failed to reach the dashboard to get full upload info.') return False + resp_json = json.loads(content) + print('Full upload info: %s.' % json.dumps(resp_json, indent=4)) + + if 'measurements' in resp_json: + measurements_cnt = len(resp_json['measurements']) + not_completed_state_cnt = len( + [m for m in resp_json['measurements'] if m['state'] != 'COMPLETED']) + + if (measurements_cnt >= min_measurements_amount + and (not_completed_state_cnt / + (measurements_cnt * 1.0) <= max_failed_measurements_percent)): + print(('Not all measurements were confirmed to upload. ' + 'Measurements count: %d, failed to upload or timed out: %d' % + (measurements_cnt, not_completed_state_cnt))) + return True + + return False + # TODO(https://crbug.com/1029452): HACKHACK # Remove once we have doubles in the proto and handle -infinity correctly. def _ApplyHacks(dicts): - def _NoInf(value): - if value == float('inf'): - return histogram.JS_MAX_VALUE - if value == float('-inf'): - return -histogram.JS_MAX_VALUE - return value + def _NoInf(value): + if value == float('inf'): + return histogram.JS_MAX_VALUE + if value == float('-inf'): + return -histogram.JS_MAX_VALUE + return value - for d in dicts: - if 'running' in d: - d['running'] = [_NoInf(value) for value in d['running']] - if 'sampleValues' in d: - d['sampleValues'] = [_NoInf(value) for value in d['sampleValues']] + for d in dicts: + if 'running' in d: + d['running'] = [_NoInf(value) for value in d['running']] + if 'sampleValues' in d: + d['sampleValues'] = [_NoInf(value) for value in d['sampleValues']] - return dicts + return dicts def _LoadHistogramSetFromProto(options): - hs = histogram_set.HistogramSet() - with options.input_results_file as f: - hs.ImportProto(f.read()) + hs = histogram_set.HistogramSet() + with options.input_results_file as f: + hs.ImportProto(f.read()) - return hs + return hs def _AddBuildInfo(histograms, options): - common_diagnostics = { - reserved_infos.MASTERS: options.perf_dashboard_machine_group, - reserved_infos.BOTS: options.bot, - reserved_infos.POINT_ID: options.commit_position, - reserved_infos.BENCHMARKS: options.test_suite, - reserved_infos.WEBRTC_REVISIONS: str(options.webrtc_git_hash), - reserved_infos.BUILD_URLS: options.build_page_url, - } + common_diagnostics = { + reserved_infos.MASTERS: options.perf_dashboard_machine_group, + reserved_infos.BOTS: options.bot, + reserved_infos.POINT_ID: options.commit_position, + reserved_infos.BENCHMARKS: options.test_suite, + reserved_infos.WEBRTC_REVISIONS: str(options.webrtc_git_hash), + reserved_infos.BUILD_URLS: options.build_page_url, + } - for k, v in common_diagnostics.items(): - histograms.AddSharedDiagnosticToAllHistograms( - k.name, generic_set.GenericSet([v])) + for k, v in list(common_diagnostics.items()): + histograms.AddSharedDiagnosticToAllHistograms(k.name, + generic_set.GenericSet([v])) def _DumpOutput(histograms, output_file): - with output_file: - json.dump(_ApplyHacks(histograms.AsDicts()), output_file, indent=4) + with output_file: + json.dump(_ApplyHacks(histograms.AsDicts()), output_file, indent=4) def UploadToDashboard(options): - histograms = _LoadHistogramSetFromProto(options) - _AddBuildInfo(histograms, options) + histograms = _LoadHistogramSetFromProto(options) + _AddBuildInfo(histograms, options) - if options.output_json_file: - _DumpOutput(histograms, options.output_json_file) + if options.output_json_file: + _DumpOutput(histograms, options.output_json_file) - response, content = _SendHistogramSet(options.dashboard_url, histograms) + response, content = _SendHistogramSet(options.dashboard_url, histograms) - if response.status != 200: - print('Upload failed with %d: %s\n\n%s' % (response.status, - response.reason, content)) - return 1 - - upload_token = json.loads(content).get('token') - if not options.wait_for_upload or not upload_token: - print('Received 200 from dashboard. ', - 'Not waiting for the upload status confirmation.') - return 0 - - response, resp_json = _WaitForUploadConfirmation( - options.dashboard_url, - upload_token, - datetime.timedelta(seconds=options.wait_timeout_sec), - datetime.timedelta(seconds=options.wait_polling_period_sec)) - - if ((resp_json and resp_json['state'] == 'COMPLETED') or - _CheckFullUploadInfo(options.dashboard_url, upload_token)): - print 'Upload completed.' - return 0 - - if response.status != 200: - print('Upload status poll failed with %d: %s' % (response.status, - response.reason)) - return 1 - - if resp_json['state'] == 'FAILED': - print 'Upload failed.' - return 1 - - print('Upload wasn\'t completed in a given time: %d seconds.' % - options.wait_timeout_sec) + if response.status != 200: + print(('Upload failed with %d: %s\n\n%s' % + (response.status, response.reason, content))) return 1 + + upload_token = json.loads(content).get('token') + if not options.wait_for_upload or not upload_token: + print(('Received 200 from dashboard. ', + 'Not waiting for the upload status confirmation.')) + return 0 + + response, resp_json = _WaitForUploadConfirmation( + options.dashboard_url, upload_token, + datetime.timedelta(seconds=options.wait_timeout_sec), + datetime.timedelta(seconds=options.wait_polling_period_sec)) + + if ((resp_json and resp_json['state'] == 'COMPLETED') + or _CheckFullUploadInfo(options.dashboard_url, upload_token)): + print('Upload completed.') + return 0 + + if response.status != 200: + print(('Upload status poll failed with %d: %s' % + (response.status, response.reason))) + return 1 + + if resp_json['state'] == 'FAILED': + print('Upload failed.') + return 1 + + print(('Upload wasn\'t completed in a given time: %d seconds.' % + options.wait_timeout_sec)) + return 1 diff --git a/tools_webrtc/perf/webrtc_dashboard_upload.py b/tools_webrtc/perf/webrtc_dashboard_upload.py index 19db0250cf..91101cad52 100644 --- a/tools_webrtc/perf/webrtc_dashboard_upload.py +++ b/tools_webrtc/perf/webrtc_dashboard_upload.py @@ -1,4 +1,5 @@ -#!/usr/bin/env vpython +#!/usr/bin/env vpython3 + # Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -28,95 +29,110 @@ import google.protobuf # pylint: disable=unused-import def _CreateParser(): - parser = argparse.ArgumentParser() - parser.add_argument('--perf-dashboard-machine-group', required=True, - help='The "master" the bots are grouped under. This ' - 'string is the group in the the perf dashboard path ' - 'group/bot/perf_id/metric/subtest.') - parser.add_argument('--bot', required=True, - help='The bot running the test (e.g. ' - 'webrtc-win-large-tests).') - parser.add_argument('--test-suite', required=True, - help='The key for the test in the dashboard (i.e. what ' - 'you select in the top-level test suite selector in ' - 'the dashboard') - parser.add_argument('--webrtc-git-hash', required=True, - help='webrtc.googlesource.com commit hash.') - parser.add_argument('--commit-position', type=int, required=True, - help='Commit pos corresponding to the git hash.') - parser.add_argument('--build-page-url', required=True, - help='URL to the build page for this build.') - parser.add_argument('--dashboard-url', required=True, - help='Which dashboard to use.') - parser.add_argument('--input-results-file', type=argparse.FileType(), - required=True, - help='A HistogramSet proto file with output from ' - 'WebRTC tests.') - parser.add_argument('--output-json-file', type=argparse.FileType('w'), - help='Where to write the output (for debugging).') - parser.add_argument('--outdir', required=True, - help='Path to the local out/ dir (usually out/Default)') - parser.add_argument('--wait-for-upload', action='store_true', - help='If specified, script will wait untill Chrome ' - 'perf dashboard confirms that the data was succesfully ' - 'proccessed and uploaded') - parser.add_argument('--wait-timeout-sec', type=int, default=1200, - help='Used only if wait-for-upload is True. Maximum ' - 'amount of time in seconds that the script will wait ' - 'for the confirmation.') - parser.add_argument('--wait-polling-period-sec', type=int, default=120, - help='Used only if wait-for-upload is True. Status ' - 'will be requested from the Dashboard every ' - 'wait-polling-period-sec seconds.') - return parser + parser = argparse.ArgumentParser() + parser.add_argument('--perf-dashboard-machine-group', + required=True, + help='The "master" the bots are grouped under. This ' + 'string is the group in the the perf dashboard path ' + 'group/bot/perf_id/metric/subtest.') + parser.add_argument('--bot', + required=True, + help='The bot running the test (e.g. ' + 'webrtc-win-large-tests).') + parser.add_argument('--test-suite', + required=True, + help='The key for the test in the dashboard (i.e. what ' + 'you select in the top-level test suite selector in ' + 'the dashboard') + parser.add_argument('--webrtc-git-hash', + required=True, + help='webrtc.googlesource.com commit hash.') + parser.add_argument('--commit-position', + type=int, + required=True, + help='Commit pos corresponding to the git hash.') + parser.add_argument('--build-page-url', + required=True, + help='URL to the build page for this build.') + parser.add_argument('--dashboard-url', + required=True, + help='Which dashboard to use.') + parser.add_argument('--input-results-file', + type=argparse.FileType(), + required=True, + help='A HistogramSet proto file with output from ' + 'WebRTC tests.') + parser.add_argument('--output-json-file', + type=argparse.FileType('w'), + help='Where to write the output (for debugging).') + parser.add_argument('--outdir', + required=True, + help='Path to the local out/ dir (usually out/Default)') + parser.add_argument('--wait-for-upload', + action='store_true', + help='If specified, script will wait untill Chrome ' + 'perf dashboard confirms that the data was succesfully ' + 'proccessed and uploaded') + parser.add_argument('--wait-timeout-sec', + type=int, + default=1200, + help='Used only if wait-for-upload is True. Maximum ' + 'amount of time in seconds that the script will wait ' + 'for the confirmation.') + parser.add_argument('--wait-polling-period-sec', + type=int, + default=120, + help='Used only if wait-for-upload is True. Status ' + 'will be requested from the Dashboard every ' + 'wait-polling-period-sec seconds.') + return parser def _ConfigurePythonPath(options): - # We just yank the python scripts we require into the PYTHONPATH. You could - # also imagine a solution where we use for instance - # protobuf:py_proto_runtime to copy catapult and protobuf code to out/. - # This is the convention in Chromium and WebRTC python scripts. We do need - # to build histogram_pb2 however, so that's why we add out/ to sys.path - # below. - # - # It would be better if there was an equivalent to py_binary in GN, but - # there's not. - script_dir = os.path.dirname(os.path.realpath(__file__)) - checkout_root = os.path.abspath( - os.path.join(script_dir, os.pardir, os.pardir)) + # We just yank the python scripts we require into the PYTHONPATH. You could + # also imagine a solution where we use for instance + # protobuf:py_proto_runtime to copy catapult and protobuf code to out/. + # This is the convention in Chromium and WebRTC python scripts. We do need + # to build histogram_pb2 however, so that's why we add out/ to sys.path + # below. + # + # It would be better if there was an equivalent to py_binary in GN, but + # there's not. + script_dir = os.path.dirname(os.path.realpath(__file__)) + checkout_root = os.path.abspath(os.path.join(script_dir, os.pardir, + os.pardir)) - sys.path.insert( - 0, os.path.join(checkout_root, 'third_party', 'catapult', 'tracing')) - sys.path.insert( - 0, os.path.join(checkout_root, 'third_party', 'protobuf', 'python')) + sys.path.insert( + 0, os.path.join(checkout_root, 'third_party', 'catapult', 'tracing')) + sys.path.insert( + 0, os.path.join(checkout_root, 'third_party', 'protobuf', 'python')) - # The webrtc_dashboard_upload gn rule will build the protobuf stub for - # python, so put it in the path for this script before we attempt to import - # it. - histogram_proto_path = os.path.join(options.outdir, 'pyproto', 'tracing', - 'tracing', 'proto') - sys.path.insert(0, histogram_proto_path) + # The webrtc_dashboard_upload gn rule will build the protobuf stub for + # python, so put it in the path for this script before we attempt to import + # it. + histogram_proto_path = os.path.join(options.outdir, 'pyproto', 'tracing', + 'tracing', 'proto') + sys.path.insert(0, histogram_proto_path) - # Fail early in case the proto hasn't been built. - from tracing.proto import histogram_proto - if not histogram_proto.HAS_PROTO: - raise ImportError( - 'Could not find histogram_pb2. You need to build the ' - 'webrtc_dashboard_upload target before invoking this ' - 'script. Expected to find ' - 'histogram_pb2.py in %s.' % histogram_proto_path) + # Fail early in case the proto hasn't been built. + from tracing.proto import histogram_proto + if not histogram_proto.HAS_PROTO: + raise ImportError('Could not find histogram_pb2. You need to build the ' + 'webrtc_dashboard_upload target before invoking this ' + 'script. Expected to find ' + 'histogram_pb2.py in %s.' % histogram_proto_path) def main(args): - parser = _CreateParser() - options = parser.parse_args(args) + parser = _CreateParser() + options = parser.parse_args(args) - _ConfigurePythonPath(options) + _ConfigurePythonPath(options) - import catapult_uploader + import catapult_uploader - return catapult_uploader.UploadToDashboard(options) + return catapult_uploader.UploadToDashboard(options) if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) + sys.exit(main(sys.argv[1:])) diff --git a/tools_webrtc/presubmit_checks_lib/build_helpers.py b/tools_webrtc/presubmit_checks_lib/build_helpers.py index e276631ed4..86fc1a0bbd 100644 --- a/tools_webrtc/presubmit_checks_lib/build_helpers.py +++ b/tools_webrtc/presubmit_checks_lib/build_helpers.py @@ -1,3 +1,5 @@ +#!/usr/bin/env vpython3 + # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -18,11 +20,11 @@ import tempfile def FindSrcDirPath(): - """Returns the abs path to the src/ dir of the project.""" - src_dir = os.path.dirname(os.path.abspath(__file__)) - while os.path.basename(src_dir) != 'src': - src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) - return src_dir + """Returns the abs path to the src/ dir of the project.""" + src_dir = os.path.dirname(os.path.abspath(__file__)) + while os.path.basename(src_dir) != 'src': + src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) + return src_dir SRC_DIR = FindSrcDirPath() @@ -31,16 +33,16 @@ import find_depot_tools def RunGnCommand(args, root_dir=None): - """Runs `gn` with provided args and return error if any.""" - try: - command = [ - sys.executable, - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py') - ] + args - subprocess.check_output(command, cwd=root_dir) - except subprocess.CalledProcessError as err: - return err.output - return None + """Runs `gn` with provided args and return error if any.""" + try: + command = [ + sys.executable, + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py') + ] + args + subprocess.check_output(command, cwd=root_dir) + except subprocess.CalledProcessError as err: + return err.output + return None # GN_ERROR_RE matches the summary of an error output by `gn check`. @@ -50,49 +52,49 @@ GN_ERROR_RE = re.compile(r'^ERROR .+(?:\n.*[^_\n].*$)+', re.MULTILINE) def RunGnCheck(root_dir=None): - """Runs `gn gen --check` with default args to detect mismatches between + """Runs `gn gen --check` with default args to detect mismatches between #includes and dependencies in the BUILD.gn files, as well as general build errors. Returns a list of error summary strings. """ - out_dir = tempfile.mkdtemp('gn') - try: - error = RunGnCommand(['gen', '--check', out_dir], root_dir) - finally: - shutil.rmtree(out_dir, ignore_errors=True) - return GN_ERROR_RE.findall(error) if error else [] + out_dir = tempfile.mkdtemp('gn') + try: + error = RunGnCommand(['gen', '--check', out_dir], root_dir) + finally: + shutil.rmtree(out_dir, ignore_errors=True) + return GN_ERROR_RE.findall(error.decode('utf-8')) if error else [] def RunNinjaCommand(args, root_dir=None): - """Runs ninja quietly. Any failure (e.g. clang not found) is + """Runs ninja quietly. Any failure (e.g. clang not found) is silently discarded, since this is unlikely an error in submitted CL.""" - command = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja')] + args - p = subprocess.Popen(command, - cwd=root_dir, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - out, _ = p.communicate() - return out + command = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja')] + args + p = subprocess.Popen(command, + cwd=root_dir, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, _ = p.communicate() + return out def GetClangTidyPath(): - """POC/WIP! Use the one we have, even it doesn't match clang's version.""" - tidy = ('third_party/android_ndk/toolchains/' - 'llvm/prebuilt/linux-x86_64/bin/clang-tidy') - return os.path.join(SRC_DIR, tidy) + """POC/WIP! Use the one we have, even it doesn't match clang's version.""" + tidy = ('third_party/android_ndk/toolchains/' + 'llvm/prebuilt/linux-x86_64/bin/clang-tidy') + return os.path.join(SRC_DIR, tidy) def GetCompilationDb(root_dir=None): - """Run ninja compdb tool to get proper flags, defines and include paths.""" - # The compdb tool expect a rule. - commands = json.loads(RunNinjaCommand(['-t', 'compdb', 'cxx'], root_dir)) - # Turns 'file' field into a key. - return {v['file']: v for v in commands} + """Run ninja compdb tool to get proper flags, defines and include paths.""" + # The compdb tool expect a rule. + commands = json.loads(RunNinjaCommand(['-t', 'compdb', 'cxx'], root_dir)) + # Turns 'file' field into a key. + return {v['file']: v for v in commands} def GetCompilationCommand(filepath, gn_args, work_dir): - """Get the whole command used to compile one cc file. + """Get the whole command used to compile one cc file. Typically, clang++ with flags, defines and include paths. Args: @@ -103,30 +105,30 @@ def GetCompilationCommand(filepath, gn_args, work_dir): Returns: Command as a list, ready to be consumed by subprocess.Popen. """ - gn_errors = RunGnCommand(['gen'] + gn_args + [work_dir]) - if gn_errors: - raise (RuntimeError('FYI, cannot complete check due to gn error:\n%s\n' - 'Please open a bug.' % gn_errors)) + gn_errors = RunGnCommand(['gen'] + gn_args + [work_dir]) + if gn_errors: + raise RuntimeError('FYI, cannot complete check due to gn error:\n%s\n' + 'Please open a bug.' % gn_errors) - # Needed for single file compilation. - commands = GetCompilationDb(work_dir) + # Needed for single file compilation. + commands = GetCompilationDb(work_dir) - # Path as referenced by ninja. - rel_path = os.path.relpath(os.path.abspath(filepath), work_dir) + # Path as referenced by ninja. + rel_path = os.path.relpath(os.path.abspath(filepath), work_dir) - # Gather defines, include path and flags (such as -std=c++11). - try: - compilation_entry = commands[rel_path] - except KeyError: - raise ValueError('%s: Not found in compilation database.\n' - 'Please check the path.' % filepath) - command = compilation_entry['command'].split() + # Gather defines, include path and flags (such as -std=c++11). + try: + compilation_entry = commands[rel_path] + except KeyError as not_found: + raise ValueError('%s: Not found in compilation database.\n' + 'Please check the path.' % filepath) from not_found + command = compilation_entry['command'].split() - # Remove troublesome flags. May trigger an error otherwise. - if '-MMD' in command: - command.remove('-MMD') - if '-MF' in command: - index = command.index('-MF') - del command[index:index + 2] # Remove filename as well. + # Remove troublesome flags. May trigger an error otherwise. + if '-MMD' in command: + command.remove('-MMD') + if '-MF' in command: + index = command.index('-MF') + del command[index:index + 2] # Remove filename as well. - return command + return command diff --git a/tools_webrtc/presubmit_checks_lib/build_helpers_test.py b/tools_webrtc/presubmit_checks_lib/build_helpers_test.py index 8c2438e8ed..42b94d6c29 100755 --- a/tools_webrtc/presubmit_checks_lib/build_helpers_test.py +++ b/tools_webrtc/presubmit_checks_lib/build_helpers_test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. # @@ -12,7 +12,6 @@ import re import os import unittest -#pylint: disable=relative-import import build_helpers TESTDATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), @@ -26,7 +25,7 @@ class GnCheckTest(unittest.TestCase): expected_error = re.compile('ERROR Dependency cycle') gn_output = build_helpers.RunGnCheck(test_dir) self.assertEqual(1, len(gn_output)) - self.assertRegexpMatches(gn_output[0], expected_error) + self.assertRegex(gn_output[0], expected_error) if __name__ == '__main__': diff --git a/tools_webrtc/presubmit_checks_lib/check_orphan_headers.py b/tools_webrtc/presubmit_checks_lib/check_orphan_headers.py index 29509972e5..ae5b5baf2f 100644 --- a/tools_webrtc/presubmit_checks_lib/check_orphan_headers.py +++ b/tools_webrtc/presubmit_checks_lib/check_orphan_headers.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -9,7 +10,6 @@ import os import re -import string # TARGET_RE matches a GN target, and extracts the target name and the contents. TARGET_RE = re.compile( @@ -26,27 +26,27 @@ SOURCE_FILE_RE = re.compile(r'.*\"(?P.*)\"') class NoBuildGnFoundError(Exception): - pass + pass class WrongFileTypeError(Exception): - pass + pass def _ReadFile(file_path): - """Returns the content of file_path in a string. + """Returns the content of file_path in a string. Args: file_path: the path of the file to read. Returns: A string with the content of the file. """ - with open(file_path) as f: - return f.read() + with open(file_path) as f: + return f.read() def GetBuildGnPathFromFilePath(file_path, file_exists_check, root_dir_path): - """Returns the BUILD.gn file responsible for file_path. + """Returns the BUILD.gn file responsible for file_path. Args: file_path: the absolute path to the .h file to check. @@ -58,23 +58,21 @@ def GetBuildGnPathFromFilePath(file_path, file_exists_check, root_dir_path): A string with the absolute path to the BUILD.gn file responsible to include file_path in a target. """ - if not file_path.endswith('.h'): - raise WrongFileTypeError( - 'File {} is not an header file (.h)'.format(file_path)) - candidate_dir = os.path.dirname(file_path) - while candidate_dir.startswith(root_dir_path): - candidate_build_gn_path = os.path.join(candidate_dir, 'BUILD.gn') - if file_exists_check(candidate_build_gn_path): - return candidate_build_gn_path - else: - candidate_dir = os.path.abspath( - os.path.join(candidate_dir, os.pardir)) - raise NoBuildGnFoundError( - 'No BUILD.gn file found for file: `{}`'.format(file_path)) + if not file_path.endswith('.h'): + raise WrongFileTypeError( + 'File {} is not an header file (.h)'.format(file_path)) + candidate_dir = os.path.dirname(file_path) + while candidate_dir.startswith(root_dir_path): + candidate_build_gn_path = os.path.join(candidate_dir, 'BUILD.gn') + if file_exists_check(candidate_build_gn_path): + return candidate_build_gn_path + candidate_dir = os.path.abspath(os.path.join(candidate_dir, os.pardir)) + raise NoBuildGnFoundError( + 'No BUILD.gn file found for file: `{}`'.format(file_path)) def IsHeaderInBuildGn(header_path, build_gn_path): - """Returns True if the header is listed in the BUILD.gn file. + """Returns True if the header is listed in the BUILD.gn file. Args: header_path: the absolute path to the header to check. @@ -85,15 +83,15 @@ def IsHeaderInBuildGn(header_path, build_gn_path): at least one GN target in the BUILD.gn file specified by the argument build_gn_path. """ - target_abs_path = os.path.dirname(build_gn_path) - build_gn_content = _ReadFile(build_gn_path) - headers_in_build_gn = GetHeadersInBuildGnFileSources( - build_gn_content, target_abs_path) - return header_path in headers_in_build_gn + target_abs_path = os.path.dirname(build_gn_path) + build_gn_content = _ReadFile(build_gn_path) + headers_in_build_gn = GetHeadersInBuildGnFileSources(build_gn_content, + target_abs_path) + return header_path in headers_in_build_gn def GetHeadersInBuildGnFileSources(file_content, target_abs_path): - """Returns a set with all the .h files in the file_content. + """Returns a set with all the .h files in the file_content. Args: file_content: a string with the content of the BUILD.gn file. @@ -104,15 +102,15 @@ def GetHeadersInBuildGnFileSources(file_content, target_abs_path): A set with all the headers (.h file) in the file_content. The set contains absolute paths. """ - headers_in_sources = set([]) - for target_match in TARGET_RE.finditer(file_content): - target_contents = target_match.group('target_contents') - for sources_match in SOURCES_RE.finditer(target_contents): - sources = sources_match.group('sources') - for source_file_match in SOURCE_FILE_RE.finditer(sources): - source_file = source_file_match.group('source_file') - if source_file.endswith('.h'): - source_file_tokens = string.split(source_file, '/') - headers_in_sources.add( - os.path.join(target_abs_path, *source_file_tokens)) - return headers_in_sources + headers_in_sources = set([]) + for target_match in TARGET_RE.finditer(file_content): + target_contents = target_match.group('target_contents') + for sources_match in SOURCES_RE.finditer(target_contents): + sources = sources_match.group('sources') + for source_file_match in SOURCE_FILE_RE.finditer(sources): + source_file = source_file_match.group('source_file') + if source_file.endswith('.h'): + source_file_tokens = source_file.split('/') + headers_in_sources.add( + os.path.join(target_abs_path, *source_file_tokens)) + return headers_in_sources diff --git a/tools_webrtc/presubmit_checks_lib/check_orphan_headers_test.py b/tools_webrtc/presubmit_checks_lib/check_orphan_headers_test.py index 79ac6a4b49..957d7b814a 100755 --- a/tools_webrtc/presubmit_checks_lib/check_orphan_headers_test.py +++ b/tools_webrtc/presubmit_checks_lib/check_orphan_headers_test.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -11,72 +12,67 @@ import os import sys import unittest -#pylint: disable=relative-import import check_orphan_headers - def _GetRootBasedOnPlatform(): - if sys.platform.startswith('win'): - return 'C:\\' - else: - return '/' + if sys.platform.startswith('win'): + return 'C:\\' + return '/' def _GetPath(*path_chunks): - return os.path.join(_GetRootBasedOnPlatform(), *path_chunks) + return os.path.join(_GetRootBasedOnPlatform(), *path_chunks) class GetBuildGnPathFromFilePathTest(unittest.TestCase): - def testGetBuildGnFromSameDirectory(self): - file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h') - expected_build_path = _GetPath('home', 'projects', 'webrtc', 'base', - 'BUILD.gn') - file_exists = lambda p: p == _GetPath('home', 'projects', 'webrtc', - 'base', 'BUILD.gn') - src_dir_path = _GetPath('home', 'projects', 'webrtc') - self.assertEqual( - expected_build_path, - check_orphan_headers.GetBuildGnPathFromFilePath( - file_path, file_exists, src_dir_path)) + def testGetBuildGnFromSameDirectory(self): + file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h') + expected_build_path = _GetPath('home', 'projects', 'webrtc', 'base', + 'BUILD.gn') + file_exists = lambda p: p == _GetPath('home', 'projects', 'webrtc', 'base', + 'BUILD.gn') + src_dir_path = _GetPath('home', 'projects', 'webrtc') + self.assertEqual( + expected_build_path, + check_orphan_headers.GetBuildGnPathFromFilePath(file_path, file_exists, + src_dir_path)) - def testGetBuildPathFromParentDirectory(self): - file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h') - expected_build_path = _GetPath('home', 'projects', 'webrtc', - 'BUILD.gn') - file_exists = lambda p: p == _GetPath('home', 'projects', 'webrtc', - 'BUILD.gn') - src_dir_path = _GetPath('home', 'projects', 'webrtc') - self.assertEqual( - expected_build_path, - check_orphan_headers.GetBuildGnPathFromFilePath( - file_path, file_exists, src_dir_path)) + def testGetBuildPathFromParentDirectory(self): + file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h') + expected_build_path = _GetPath('home', 'projects', 'webrtc', 'BUILD.gn') + file_exists = lambda p: p == _GetPath('home', 'projects', 'webrtc', + 'BUILD.gn') + src_dir_path = _GetPath('home', 'projects', 'webrtc') + self.assertEqual( + expected_build_path, + check_orphan_headers.GetBuildGnPathFromFilePath(file_path, file_exists, + src_dir_path)) - def testExceptionIfNoBuildGnFilesAreFound(self): - with self.assertRaises(check_orphan_headers.NoBuildGnFoundError): - file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h') - file_exists = lambda p: False - src_dir_path = _GetPath('home', 'projects', 'webrtc') - check_orphan_headers.GetBuildGnPathFromFilePath( - file_path, file_exists, src_dir_path) + def testExceptionIfNoBuildGnFilesAreFound(self): + with self.assertRaises(check_orphan_headers.NoBuildGnFoundError): + file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h') + file_exists = lambda p: False + src_dir_path = _GetPath('home', 'projects', 'webrtc') + check_orphan_headers.GetBuildGnPathFromFilePath(file_path, file_exists, + src_dir_path) - def testExceptionIfFilePathIsNotAnHeader(self): - with self.assertRaises(check_orphan_headers.WrongFileTypeError): - file_path = _GetPath('home', 'projects', 'webrtc', 'base', - 'foo.cc') - file_exists = lambda p: False - src_dir_path = _GetPath('home', 'projects', 'webrtc') - check_orphan_headers.GetBuildGnPathFromFilePath( - file_path, file_exists, src_dir_path) + def testExceptionIfFilePathIsNotAnHeader(self): + with self.assertRaises(check_orphan_headers.WrongFileTypeError): + file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.cc') + file_exists = lambda p: False + src_dir_path = _GetPath('home', 'projects', 'webrtc') + check_orphan_headers.GetBuildGnPathFromFilePath(file_path, file_exists, + src_dir_path) class GetHeadersInBuildGnFileSourcesTest(unittest.TestCase): - def testEmptyFileReturnsEmptySet(self): - self.assertEqual( - set([]), - check_orphan_headers.GetHeadersInBuildGnFileSources('', '/a/b')) + def testEmptyFileReturnsEmptySet(self): + self.assertEqual( + set([]), + check_orphan_headers.GetHeadersInBuildGnFileSources('', '/a/b')) - def testReturnsSetOfHeadersFromFileContent(self): - file_content = """ + def testReturnsSetOfHeadersFromFileContent(self): + file_content = """ # Some comments if (is_android) { import("//a/b/c.gni") @@ -101,17 +97,17 @@ class GetHeadersInBuildGnFileSourcesTest(unittest.TestCase): sources = ["baz/foo.h"] } """ - target_abs_path = _GetPath('a', 'b') - self.assertEqual( - set([ - _GetPath('a', 'b', 'foo.h'), - _GetPath('a', 'b', 'bar.h'), - _GetPath('a', 'b', 'public_foo.h'), - _GetPath('a', 'b', 'baz', 'foo.h'), - ]), - check_orphan_headers.GetHeadersInBuildGnFileSources( - file_content, target_abs_path)) + target_abs_path = _GetPath('a', 'b') + self.assertEqual( + set([ + _GetPath('a', 'b', 'foo.h'), + _GetPath('a', 'b', 'bar.h'), + _GetPath('a', 'b', 'public_foo.h'), + _GetPath('a', 'b', 'baz', 'foo.h'), + ]), + check_orphan_headers.GetHeadersInBuildGnFileSources( + file_content, target_abs_path)) if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tools_webrtc/presubmit_checks_lib/check_package_boundaries.py b/tools_webrtc/presubmit_checks_lib/check_package_boundaries.py index 7d81bae16e..6477a17c39 100644 --- a/tools_webrtc/presubmit_checks_lib/check_package_boundaries.py +++ b/tools_webrtc/presubmit_checks_lib/check_package_boundaries.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. # @@ -33,104 +33,101 @@ class PackageBoundaryViolation( collections.namedtuple( 'PackageBoundaryViolation', 'build_file_path target_name source_file subpackage')): - def __str__(self): - return ERROR_MESSAGE.format(**self._asdict()) + def __str__(self): + return ERROR_MESSAGE.format(**self._asdict()) def _BuildSubpackagesPattern(packages, query): - """Returns a regular expression that matches source files inside subpackages + """Returns a regular expression that matches source files inside subpackages of the given query.""" - query += os.path.sep - length = len(query) - pattern = r'\s*"(?P(?P' - pattern += '|'.join( - re.escape(package[length:].replace(os.path.sep, '/')) - for package in packages if package.startswith(query)) - pattern += r')/[\w\./]*)"' - return re.compile(pattern) + query += os.path.sep + length = len(query) + pattern = r'\s*"(?P(?P' + pattern += '|'.join( + re.escape(package[length:].replace(os.path.sep, '/')) + for package in packages if package.startswith(query)) + pattern += r')/[\w\./]*)"' + return re.compile(pattern) def _ReadFileAndPrependLines(file_path): - """Reads the contents of a file.""" - with open(file_path) as f: - return "".join(f.readlines()) + """Reads the contents of a file.""" + with open(file_path) as f: + return "".join(f.readlines()) def _CheckBuildFile(build_file_path, packages): - """Iterates over all the targets of the given BUILD.gn file, and verifies that + """Iterates over all the targets of the given BUILD.gn file, and verifies that the source files referenced by it don't belong to any of it's subpackages. Returns an iterator over PackageBoundaryViolations for this package. """ - package = os.path.dirname(build_file_path) - subpackages_re = _BuildSubpackagesPattern(packages, package) + package = os.path.dirname(build_file_path) + subpackages_re = _BuildSubpackagesPattern(packages, package) - build_file_contents = _ReadFileAndPrependLines(build_file_path) - for target_match in TARGET_RE.finditer(build_file_contents): - target_name = target_match.group('target_name') - target_contents = target_match.group('target_contents') - for sources_match in SOURCES_RE.finditer(target_contents): - sources = sources_match.group('sources') - for subpackages_match in subpackages_re.finditer(sources): - subpackage = subpackages_match.group('subpackage') - source_file = subpackages_match.group('source_file') - if subpackage: - yield PackageBoundaryViolation(build_file_path, - target_name, source_file, - subpackage) + build_file_contents = _ReadFileAndPrependLines(build_file_path) + for target_match in TARGET_RE.finditer(build_file_contents): + target_name = target_match.group('target_name') + target_contents = target_match.group('target_contents') + for sources_match in SOURCES_RE.finditer(target_contents): + sources = sources_match.group('sources') + for subpackages_match in subpackages_re.finditer(sources): + subpackage = subpackages_match.group('subpackage') + source_file = subpackages_match.group('source_file') + if subpackage: + yield PackageBoundaryViolation(build_file_path, target_name, + source_file, subpackage) def CheckPackageBoundaries(root_dir, build_files=None): - packages = [ - root for root, _, files in os.walk(root_dir) if 'BUILD.gn' in files - ] + packages = [ + root for root, _, files in os.walk(root_dir) if 'BUILD.gn' in files + ] - if build_files is not None: - for build_file_path in build_files: - assert build_file_path.startswith(root_dir) - else: - build_files = [ - os.path.join(package, 'BUILD.gn') for package in packages - ] - - messages = [] + if build_files is not None: for build_file_path in build_files: - messages.extend(_CheckBuildFile(build_file_path, packages)) - return messages + assert build_file_path.startswith(root_dir) + else: + build_files = [os.path.join(package, 'BUILD.gn') for package in packages] + + messages = [] + for build_file_path in build_files: + messages.extend(_CheckBuildFile(build_file_path, packages)) + return messages def main(argv): - parser = argparse.ArgumentParser( - description='Script that checks package boundary violations in GN ' - 'build files.') + parser = argparse.ArgumentParser( + description='Script that checks package boundary violations in GN ' + 'build files.') - parser.add_argument('root_dir', - metavar='ROOT_DIR', - help='The root directory that contains all BUILD.gn ' - 'files to be processed.') - parser.add_argument('build_files', - metavar='BUILD_FILE', - nargs='*', - help='A list of BUILD.gn files to be processed. If no ' - 'files are given, all BUILD.gn files under ROOT_DIR ' - 'will be processed.') - parser.add_argument('--max_messages', - type=int, - default=None, - help='If set, the maximum number of violations to be ' - 'displayed.') + parser.add_argument('root_dir', + metavar='ROOT_DIR', + help='The root directory that contains all BUILD.gn ' + 'files to be processed.') + parser.add_argument('build_files', + metavar='BUILD_FILE', + nargs='*', + help='A list of BUILD.gn files to be processed. If no ' + 'files are given, all BUILD.gn files under ROOT_DIR ' + 'will be processed.') + parser.add_argument('--max_messages', + type=int, + default=None, + help='If set, the maximum number of violations to be ' + 'displayed.') - args = parser.parse_args(argv) + args = parser.parse_args(argv) - messages = CheckPackageBoundaries(args.root_dir, args.build_files) - messages = messages[:args.max_messages] + messages = CheckPackageBoundaries(args.root_dir, args.build_files) + messages = messages[:args.max_messages] - for i, message in enumerate(messages): - if i > 0: - print - print message + for i, message in enumerate(messages): + if i > 0: + print() + print(message) - return bool(messages) + return bool(messages) if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) + sys.exit(main(sys.argv[1:])) diff --git a/tools_webrtc/presubmit_checks_lib/check_package_boundaries_test.py b/tools_webrtc/presubmit_checks_lib/check_package_boundaries_test.py index 8d173372c1..611af3a236 100755 --- a/tools_webrtc/presubmit_checks_lib/check_package_boundaries_test.py +++ b/tools_webrtc/presubmit_checks_lib/check_package_boundaries_test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. # @@ -12,8 +12,7 @@ import ast import os import unittest -#pylint: disable=relative-import -from check_package_boundaries import CheckPackageBoundaries +import check_package_boundaries MSG_FORMAT = 'ERROR:check_package_boundaries.py: Unexpected %s.' TESTDATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), @@ -21,54 +20,52 @@ TESTDATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), def ReadPylFile(file_path): - with open(file_path) as f: - return ast.literal_eval(f.read()) + with open(file_path) as f: + return ast.literal_eval(f.read()) class UnitTest(unittest.TestCase): - def _RunTest(self, test_dir, check_all_build_files=False): - build_files = [os.path.join(test_dir, 'BUILD.gn')] - if check_all_build_files: - build_files = None + def _RunTest(self, test_dir, check_all_build_files=False): + build_files = [os.path.join(test_dir, 'BUILD.gn')] + if check_all_build_files: + build_files = None - messages = [] - for violation in CheckPackageBoundaries(test_dir, build_files): - build_file_path = os.path.relpath(violation.build_file_path, - test_dir) - build_file_path = build_file_path.replace(os.path.sep, '/') - messages.append( - violation._replace(build_file_path=build_file_path)) + messages = [] + for violation in check_package_boundaries.CheckPackageBoundaries( + test_dir, build_files): + build_file_path = os.path.relpath(violation.build_file_path, test_dir) + build_file_path = build_file_path.replace(os.path.sep, '/') + messages.append(violation._replace(build_file_path=build_file_path)) - expected_messages = ReadPylFile(os.path.join(test_dir, 'expected.pyl')) - self.assertListEqual(sorted(expected_messages), sorted(messages)) + expected_messages = ReadPylFile(os.path.join(test_dir, 'expected.pyl')) + self.assertListEqual(sorted(expected_messages), sorted(messages)) - def testNoErrors(self): - self._RunTest(os.path.join(TESTDATA_DIR, 'no_errors')) + def testNoErrors(self): + self._RunTest(os.path.join(TESTDATA_DIR, 'no_errors')) - def testMultipleErrorsSingleTarget(self): - self._RunTest( - os.path.join(TESTDATA_DIR, 'multiple_errors_single_target')) + def testMultipleErrorsSingleTarget(self): + self._RunTest(os.path.join(TESTDATA_DIR, 'multiple_errors_single_target')) - def testMultipleErrorsMultipleTargets(self): - self._RunTest( - os.path.join(TESTDATA_DIR, 'multiple_errors_multiple_targets')) + def testMultipleErrorsMultipleTargets(self): + self._RunTest(os.path.join(TESTDATA_DIR, + 'multiple_errors_multiple_targets')) - def testCommonPrefix(self): - self._RunTest(os.path.join(TESTDATA_DIR, 'common_prefix')) + def testCommonPrefix(self): + self._RunTest(os.path.join(TESTDATA_DIR, 'common_prefix')) - def testAllBuildFiles(self): - self._RunTest(os.path.join(TESTDATA_DIR, 'all_build_files'), True) + def testAllBuildFiles(self): + self._RunTest(os.path.join(TESTDATA_DIR, 'all_build_files'), True) - def testSanitizeFilename(self): - # The `dangerous_filename` test case contains a directory with '++' in its - # name. If it's not properly escaped, a regex error would be raised. - self._RunTest(os.path.join(TESTDATA_DIR, 'dangerous_filename'), True) + def testSanitizeFilename(self): + # The `dangerous_filename` test case contains a directory with '++' in its + # name. If it's not properly escaped, a regex error would be raised. + self._RunTest(os.path.join(TESTDATA_DIR, 'dangerous_filename'), True) - def testRelativeFilename(self): - test_dir = os.path.join(TESTDATA_DIR, 'all_build_files') - with self.assertRaises(AssertionError): - CheckPackageBoundaries(test_dir, ["BUILD.gn"]) + def testRelativeFilename(self): + test_dir = os.path.join(TESTDATA_DIR, 'all_build_files') + with self.assertRaises(AssertionError): + check_package_boundaries.CheckPackageBoundaries(test_dir, ["BUILD.gn"]) if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tools_webrtc/sslroots/README.md b/tools_webrtc/sslroots/README.md index b81bd4455a..594d9ab4f5 100644 --- a/tools_webrtc/sslroots/README.md +++ b/tools_webrtc/sslroots/README.md @@ -9,7 +9,7 @@ by WebRTC follow this instructions: 2. Launch the script: ``` -$ python tools_webrtc/sslroots/generate_sslroots.py roots.pem +$ vpython3 tools_webrtc/sslroots/generate_sslroots.py roots.pem ``` 3. Step 2 should have generated an ssl_roots.h file right next to roots.pem. diff --git a/tools_webrtc/sslroots/generate_sslroots.py b/tools_webrtc/sslroots/generate_sslroots.py index ff0052e3c7..2cd0c89950 100644 --- a/tools_webrtc/sslroots/generate_sslroots.py +++ b/tools_webrtc/sslroots/generate_sslroots.py @@ -1,3 +1,5 @@ +#!/usr/bin/env vpython3 + # -*- coding:utf-8 -*- # Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. # @@ -17,7 +19,7 @@ Arguments: generated file size. """ -import commands +import subprocess from optparse import OptionParser import os import re @@ -39,180 +41,174 @@ _VERBOSE = 'verbose' def main(): - """The main entrypoint.""" - parser = OptionParser('usage %prog FILE') - parser.add_option('-v', '--verbose', dest='verbose', action='store_true') - parser.add_option('-f', - '--full_cert', - dest='full_cert', - action='store_true') - options, args = parser.parse_args() - if len(args) < 1: - parser.error('No crt file specified.') - return - root_dir = _SplitCrt(args[0], options) - _GenCFiles(root_dir, options) - _Cleanup(root_dir) + """The main entrypoint.""" + parser = OptionParser('usage %prog FILE') + parser.add_option('-v', '--verbose', dest='verbose', action='store_true') + parser.add_option('-f', '--full_cert', dest='full_cert', action='store_true') + options, args = parser.parse_args() + if len(args) < 1: + parser.error('No crt file specified.') + return + root_dir = _SplitCrt(args[0], options) + _GenCFiles(root_dir, options) + _Cleanup(root_dir) def _SplitCrt(source_file, options): - sub_file_blocks = [] - label_name = '' - root_dir = os.path.dirname(os.path.abspath(source_file)) + '/' - _PrintOutput(root_dir, options) - f = open(source_file) - for line in f: - if line.startswith('# Label: '): - sub_file_blocks.append(line) - label = re.search(r'\".*\"', line) - temp_label = label.group(0) - end = len(temp_label) - 1 - label_name = _SafeName(temp_label[1:end]) - elif line.startswith('-----END CERTIFICATE-----'): - sub_file_blocks.append(line) - new_file_name = root_dir + _PREFIX + label_name + _EXTENSION - _PrintOutput('Generating: ' + new_file_name, options) - new_file = open(new_file_name, 'w') - for out_line in sub_file_blocks: - new_file.write(out_line) - new_file.close() - sub_file_blocks = [] - else: - sub_file_blocks.append(line) - f.close() - return root_dir + sub_file_blocks = [] + label_name = '' + root_dir = os.path.dirname(os.path.abspath(source_file)) + '/' + _PrintOutput(root_dir, options) + f = open(source_file) + for line in f: + if line.startswith('# Label: '): + sub_file_blocks.append(line) + label = re.search(r'\".*\"', line) + temp_label = label.group(0) + end = len(temp_label) - 1 + label_name = _SafeName(temp_label[1:end]) + elif line.startswith('-----END CERTIFICATE-----'): + sub_file_blocks.append(line) + new_file_name = root_dir + _PREFIX + label_name + _EXTENSION + _PrintOutput('Generating: ' + new_file_name, options) + new_file = open(new_file_name, 'w') + for out_line in sub_file_blocks: + new_file.write(out_line) + new_file.close() + sub_file_blocks = [] + else: + sub_file_blocks.append(line) + f.close() + return root_dir def _GenCFiles(root_dir, options): - output_header_file = open(root_dir + _GENERATED_FILE, 'w') - output_header_file.write(_CreateOutputHeader()) - if options.full_cert: - subject_name_list = _CreateArraySectionHeader(_SUBJECT_NAME_VARIABLE, - _CHAR_TYPE, options) - public_key_list = _CreateArraySectionHeader(_PUBLIC_KEY_VARIABLE, - _CHAR_TYPE, options) - certificate_list = _CreateArraySectionHeader(_CERTIFICATE_VARIABLE, - _CHAR_TYPE, options) - certificate_size_list = _CreateArraySectionHeader( - _CERTIFICATE_SIZE_VARIABLE, _INT_TYPE, options) + output_header_file = open(root_dir + _GENERATED_FILE, 'w') + output_header_file.write(_CreateOutputHeader()) + if options.full_cert: + subject_name_list = _CreateArraySectionHeader(_SUBJECT_NAME_VARIABLE, + _CHAR_TYPE, options) + public_key_list = _CreateArraySectionHeader(_PUBLIC_KEY_VARIABLE, + _CHAR_TYPE, options) + certificate_list = _CreateArraySectionHeader(_CERTIFICATE_VARIABLE, + _CHAR_TYPE, options) + certificate_size_list = _CreateArraySectionHeader(_CERTIFICATE_SIZE_VARIABLE, + _INT_TYPE, options) - for _, _, files in os.walk(root_dir): - for current_file in files: - if current_file.startswith(_PREFIX): - prefix_length = len(_PREFIX) - length = len(current_file) - len(_EXTENSION) - label = current_file[prefix_length:length] - filtered_output, cert_size = _CreateCertSection( - root_dir, current_file, label, options) - output_header_file.write(filtered_output + '\n\n\n') - if options.full_cert: - subject_name_list += _AddLabelToArray( - label, _SUBJECT_NAME_ARRAY) - public_key_list += _AddLabelToArray( - label, _PUBLIC_KEY_ARRAY) - certificate_list += _AddLabelToArray(label, _CERTIFICATE_ARRAY) - certificate_size_list += (' %s,\n') % (cert_size) + for _, _, files in os.walk(root_dir): + for current_file in files: + if current_file.startswith(_PREFIX): + prefix_length = len(_PREFIX) + length = len(current_file) - len(_EXTENSION) + label = current_file[prefix_length:length] + filtered_output, cert_size = _CreateCertSection(root_dir, current_file, + label, options) + output_header_file.write(filtered_output + '\n\n\n') + if options.full_cert: + subject_name_list += _AddLabelToArray(label, _SUBJECT_NAME_ARRAY) + public_key_list += _AddLabelToArray(label, _PUBLIC_KEY_ARRAY) + certificate_list += _AddLabelToArray(label, _CERTIFICATE_ARRAY) + certificate_size_list += (' %s,\n') % (cert_size) - if options.full_cert: - subject_name_list += _CreateArraySectionFooter() - output_header_file.write(subject_name_list) - public_key_list += _CreateArraySectionFooter() - output_header_file.write(public_key_list) - certificate_list += _CreateArraySectionFooter() - output_header_file.write(certificate_list) - certificate_size_list += _CreateArraySectionFooter() - output_header_file.write(certificate_size_list) - output_header_file.write(_CreateOutputFooter()) - output_header_file.close() + if options.full_cert: + subject_name_list += _CreateArraySectionFooter() + output_header_file.write(subject_name_list) + public_key_list += _CreateArraySectionFooter() + output_header_file.write(public_key_list) + certificate_list += _CreateArraySectionFooter() + output_header_file.write(certificate_list) + certificate_size_list += _CreateArraySectionFooter() + output_header_file.write(certificate_size_list) + output_header_file.write(_CreateOutputFooter()) + output_header_file.close() def _Cleanup(root_dir): - for f in os.listdir(root_dir): - if f.startswith(_PREFIX): - os.remove(root_dir + f) + for f in os.listdir(root_dir): + if f.startswith(_PREFIX): + os.remove(root_dir + f) def _CreateCertSection(root_dir, source_file, label, options): - command = 'openssl x509 -in %s%s -noout -C' % (root_dir, source_file) - _PrintOutput(command, options) - output = commands.getstatusoutput(command)[1] - renamed_output = output.replace('unsigned char XXX_', - 'const unsigned char ' + label + '_') - filtered_output = '' - cert_block = '^const unsigned char.*?};$' - prog = re.compile(cert_block, re.IGNORECASE | re.MULTILINE | re.DOTALL) - if not options.full_cert: - filtered_output = prog.sub('', renamed_output, count=2) - else: - filtered_output = renamed_output + command = 'openssl x509 -in %s%s -noout -C' % (root_dir, source_file) + _PrintOutput(command, options) + output = subprocess.getstatusoutput(command)[1] + renamed_output = output.replace('unsigned char XXX_', + 'const unsigned char ' + label + '_') + filtered_output = '' + cert_block = '^const unsigned char.*?};$' + prog = re.compile(cert_block, re.IGNORECASE | re.MULTILINE | re.DOTALL) + if not options.full_cert: + filtered_output = prog.sub('', renamed_output, count=2) + else: + filtered_output = renamed_output - cert_size_block = r'\d\d\d+' - prog2 = re.compile(cert_size_block, re.MULTILINE | re.VERBOSE) - result = prog2.findall(renamed_output) - cert_size = result[len(result) - 1] + cert_size_block = r'\d\d\d+' + prog2 = re.compile(cert_size_block, re.MULTILINE | re.VERBOSE) + result = prog2.findall(renamed_output) + cert_size = result[len(result) - 1] - return filtered_output, cert_size + return filtered_output, cert_size def _CreateOutputHeader(): - output = ( - '/*\n' - ' * Copyright 2004 The WebRTC Project Authors. All rights ' - 'reserved.\n' - ' *\n' - ' * Use of this source code is governed by a BSD-style license\n' - ' * that can be found in the LICENSE file in the root of the ' - 'source\n' - ' * tree. An additional intellectual property rights grant can be ' - 'found\n' - ' * in the file PATENTS. All contributing project authors may\n' - ' * be found in the AUTHORS file in the root of the source tree.\n' - ' */\n\n' - '#ifndef RTC_BASE_SSL_ROOTS_H_\n' - '#define RTC_BASE_SSL_ROOTS_H_\n\n' - '// This file is the root certificates in C form that are needed to' - ' connect to\n// Google.\n\n' - '// It was generated with the following command line:\n' - '// > python tools_webrtc/sslroots/generate_sslroots.py' - '\n// https://pki.goog/roots.pem\n\n' - '// clang-format off\n' - '// Don\'t bother formatting generated code,\n' - '// also it would breaks subject/issuer lines.\n\n') - return output + output = ('/*\n' + ' * Copyright 2004 The WebRTC Project Authors. All rights ' + 'reserved.\n' + ' *\n' + ' * Use of this source code is governed by a BSD-style license\n' + ' * that can be found in the LICENSE file in the root of the ' + 'source\n' + ' * tree. An additional intellectual property rights grant can be ' + 'found\n' + ' * in the file PATENTS. All contributing project authors may\n' + ' * be found in the AUTHORS file in the root of the source tree.\n' + ' */\n\n' + '#ifndef RTC_BASE_SSL_ROOTS_H_\n' + '#define RTC_BASE_SSL_ROOTS_H_\n\n' + '// This file is the root certificates in C form that are needed to' + ' connect to\n// Google.\n\n' + '// It was generated with the following command line:\n' + '// > vpython3 tools_webrtc/sslroots/generate_sslroots.py' + '\n// https://pki.goog/roots.pem\n\n' + '// clang-format off\n' + '// Don\'t bother formatting generated code,\n' + '// also it would breaks subject/issuer lines.\n\n') + return output def _CreateOutputFooter(): - output = ('// clang-format on\n\n' '#endif // RTC_BASE_SSL_ROOTS_H_\n') - return output + output = ('// clang-format on\n\n#endif // RTC_BASE_SSL_ROOTS_H_\n') + return output def _CreateArraySectionHeader(type_name, type_type, options): - output = ('const %s kSSLCert%sList[] = {\n') % (type_type, type_name) - _PrintOutput(output, options) - return output + output = ('const %s kSSLCert%sList[] = {\n') % (type_type, type_name) + _PrintOutput(output, options) + return output def _AddLabelToArray(label, type_name): - return ' %s_%s,\n' % (label, type_name) + return ' %s_%s,\n' % (label, type_name) def _CreateArraySectionFooter(): - return '};\n\n' + return '};\n\n' def _SafeName(original_file_name): - bad_chars = ' -./\\()áéíőú' - replacement_chars = '' - for _ in bad_chars: - replacement_chars += '_' - translation_table = string.maketrans(bad_chars, replacement_chars) - return original_file_name.translate(translation_table) + bad_chars = ' -./\\()áéíőú' + replacement_chars = '' + for _ in bad_chars: + replacement_chars += '_' + translation_table = string.maketrans(bad_chars, replacement_chars) + return original_file_name.translate(translation_table) def _PrintOutput(output, options): - if options.verbose: - print output + if options.verbose: + print(output) if __name__ == '__main__': - main() + main() diff --git a/tools_webrtc/version_updater/update_version.py b/tools_webrtc/version_updater/update_version.py index 6cefb3f9c6..af1ea0f09f 100644 --- a/tools_webrtc/version_updater/update_version.py +++ b/tools_webrtc/version_updater/update_version.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 + # Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -19,11 +20,11 @@ import sys def FindSrcDirPath(): - """Returns the abs path to the src/ dir of the project.""" - src_dir = os.path.dirname(os.path.abspath(__file__)) - while os.path.basename(src_dir) != 'src': - src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) - return src_dir + """Returns the abs path to the src/ dir of the project.""" + src_dir = os.path.dirname(os.path.abspath(__file__)) + while os.path.basename(src_dir) != 'src': + src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) + return src_dir UPDATE_BRANCH_NAME = 'webrtc_version_update' @@ -33,140 +34,132 @@ NOTIFY_EMAIL = 'mbonadei@webrtc.org' def _RemovePreviousUpdateBranch(): - active_branch, branches = _GetBranches() - if active_branch == UPDATE_BRANCH_NAME: - active_branch = 'master' - if UPDATE_BRANCH_NAME in branches: - logging.info('Removing previous update branch (%s)', - UPDATE_BRANCH_NAME) - subprocess.check_call(['git', 'checkout', active_branch]) - subprocess.check_call(['git', 'branch', '-D', UPDATE_BRANCH_NAME]) - logging.info('No branch to remove') + active_branch, branches = _GetBranches() + if active_branch == UPDATE_BRANCH_NAME: + active_branch = 'master' + if UPDATE_BRANCH_NAME in branches: + logging.info('Removing previous update branch (%s)', UPDATE_BRANCH_NAME) + subprocess.check_call(['git', 'checkout', active_branch]) + subprocess.check_call(['git', 'branch', '-D', UPDATE_BRANCH_NAME]) + logging.info('No branch to remove') def _GetLastAuthor(): - """Returns a string with the author of the last commit.""" - author = subprocess.check_output(['git', 'log', - '-1', - '--pretty=format:"%an"']).splitlines() - return author + """Returns a string with the author of the last commit.""" + author = subprocess.check_output( + ['git', 'log', '-1', '--pretty=format:"%an"']).splitlines() + return author def _GetBranches(): - """Returns a tuple (active, branches). + """Returns a tuple (active, branches). 'active' is a string with name of the currently active branch, while 'branches' is the list of all branches. """ - lines = subprocess.check_output(['git', 'branch']).splitlines() - branches = [] - active = '' - for line in lines: - if '*' in line: - # The assumption is that the first char will always be the '*'. - active = line[1:].strip() - branches.append(active) - else: - branch = line.strip() - if branch: - branches.append(branch) - return active, branches + lines = subprocess.check_output(['git', 'branch']).splitlines() + branches = [] + active = '' + for line in lines: + if '*' in line: + # The assumption is that the first char will always be the '*'. + active = line[1:].strip() + branches.append(active) + else: + branch = line.strip() + if branch: + branches.append(branch) + return active, branches def _CreateUpdateBranch(): - logging.info('Creating update branch: %s', UPDATE_BRANCH_NAME) - subprocess.check_call(['git', 'checkout', '-b', UPDATE_BRANCH_NAME]) + logging.info('Creating update branch: %s', UPDATE_BRANCH_NAME) + subprocess.check_call(['git', 'checkout', '-b', UPDATE_BRANCH_NAME]) def _UpdateWebRTCVersion(filename): - with open(filename) as f: - content = f.read() - d = datetime.datetime.utcnow() - # pylint: disable=line-too-long - new_content = re.sub( - r'WebRTC source stamp [0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}', - r'WebRTC source stamp %02d-%02d-%02dT%02d:%02d:%02d' % (d.year, - d.month, - d.day, - d.hour, - d.minute, - d.second), - content, - flags=re.MULTILINE) - # pylint: enable=line-too-long - with open(filename, 'w') as f: - f.write(new_content) + with open(filename) as f: + content = f.read() + d = datetime.datetime.utcnow() + # pylint: disable=line-too-long + new_content = re.sub( + r'WebRTC source stamp [0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}', + r'WebRTC source stamp %02d-%02d-%02dT%02d:%02d:%02d' % + (d.year, d.month, d.day, d.hour, d.minute, d.second), + content, + flags=re.MULTILINE) + # pylint: enable=line-too-long + with open(filename, 'w') as f: + f.write(new_content) def _IsTreeClean(): - stdout = subprocess.check_output(['git', 'status', '--porcelain']) - if len(stdout) == 0: - return True - return False + stdout = subprocess.check_output(['git', 'status', '--porcelain']) + if len(stdout) == 0: + return True + return False def _LocalCommit(): - logging.info('Committing changes locally.') - d = datetime.datetime.utcnow() + logging.info('Committing changes locally.') + d = datetime.datetime.utcnow() - git_author = subprocess.check_output(['git', 'config', - 'user.email']).strip() - commit_msg = ('Update WebRTC code version (%02d-%02d-%02dT%02d:%02d:%02d).' - '\n\nBug: None') - commit_msg = commit_msg % (d.year, d.month, d.day, d.hour, d.minute, - d.second) - subprocess.check_call(['git', 'add', '--update', '.']) - subprocess.check_call(['git', 'commit', '-m', commit_msg]) + commit_msg = ('Update WebRTC code version (%02d-%02d-%02dT%02d:%02d:%02d).' + '\n\nBug: None') + commit_msg = commit_msg % (d.year, d.month, d.day, d.hour, d.minute, d.second) + subprocess.check_call(['git', 'add', '--update', '.']) + subprocess.check_call(['git', 'commit', '-m', commit_msg]) def _UploadCL(commit_queue_mode): - """Upload the committed changes as a changelist to Gerrit. + """Upload the committed changes as a changelist to Gerrit. commit_queue_mode: - 2: Submit to commit queue. - 1: Run trybots but do not submit to CQ. - 0: Skip CQ, upload only. """ - cmd = ['git', 'cl', 'upload', '--force', '--bypass-hooks', - '--bypass-watchlist'] - if commit_queue_mode >= 2: - logging.info('Sending the CL to the CQ...') - cmd.extend(['-o', 'label=Bot-Commit+1']) - cmd.extend(['-o', 'label=Commit-Queue+2']) - cmd.extend(['--send-mail', '--cc', NOTIFY_EMAIL]) - elif commit_queue_mode >= 1: - logging.info('Starting CQ dry run...') - cmd.extend(['-o', 'label=Commit-Queue+1']) - subprocess.check_call(cmd) + cmd = [ + 'git', 'cl', 'upload', '--force', '--bypass-hooks', '--bypass-watchlist' + ] + if commit_queue_mode >= 2: + logging.info('Sending the CL to the CQ...') + cmd.extend(['-o', 'label=Bot-Commit+1']) + cmd.extend(['-o', 'label=Commit-Queue+2']) + cmd.extend(['--send-mail', '--cc', NOTIFY_EMAIL]) + elif commit_queue_mode >= 1: + logging.info('Starting CQ dry run...') + cmd.extend(['-o', 'label=Commit-Queue+1']) + subprocess.check_call(cmd) def main(): - logging.basicConfig(level=logging.INFO) - p = argparse.ArgumentParser() - p.add_argument('--clean', - action='store_true', - default=False, - help='Removes any previous local update branch.') - opts = p.parse_args() + logging.basicConfig(level=logging.INFO) + p = argparse.ArgumentParser() + p.add_argument('--clean', + action='store_true', + default=False, + help='Removes any previous local update branch.') + opts = p.parse_args() - if opts.clean: - _RemovePreviousUpdateBranch() + if opts.clean: + _RemovePreviousUpdateBranch() - if _GetLastAuthor() == 'webrtc-version-updater': - logging.info('Last commit is a version change, skipping CL.') - return 0 - - version_filename = os.path.join(CHECKOUT_SRC_DIR, 'call', 'version.cc') - _CreateUpdateBranch() - _UpdateWebRTCVersion(version_filename) - if _IsTreeClean(): - logging.info('No WebRTC version change detected, skipping CL.') - else: - _LocalCommit() - logging.info('Uploading CL...') - _UploadCL(2) + if _GetLastAuthor() == 'webrtc-version-updater': + logging.info('Last commit is a version change, skipping CL.') return 0 + version_filename = os.path.join(CHECKOUT_SRC_DIR, 'call', 'version.cc') + _CreateUpdateBranch() + _UpdateWebRTCVersion(version_filename) + if _IsTreeClean(): + logging.info('No WebRTC version change detected, skipping CL.') + else: + _LocalCommit() + logging.info('Uploading CL...') + _UploadCL(2) + return 0 + if __name__ == '__main__': sys.exit(main()) diff --git a/tools_webrtc/vim/webrtc.ycm_extra_conf.py b/tools_webrtc/vim/webrtc.ycm_extra_conf.py index 12a09ed924..d11f79ae24 100644 --- a/tools_webrtc/vim/webrtc.ycm_extra_conf.py +++ b/tools_webrtc/vim/webrtc.ycm_extra_conf.py @@ -1,3 +1,5 @@ +#!/usr/bin/env vpython3 + # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -75,12 +77,8 @@ _EXTENSION_FLAGS = { } -def PathExists(*args): - return os.path.exists(os.path.join(*args)) - - def FindWebrtcSrcFromFilename(filename): - """Searches for the root of the WebRTC checkout. + """Searches for the root of the WebRTC checkout. Simply checks parent directories until it finds .gclient and src/. @@ -90,20 +88,20 @@ def FindWebrtcSrcFromFilename(filename): Returns: (String) Path of 'src/', or None if unable to find. """ - curdir = os.path.normpath(os.path.dirname(filename)) - while not (os.path.basename(curdir) == 'src' - and PathExists(curdir, 'DEPS') and - (PathExists(curdir, '..', '.gclient') - or PathExists(curdir, '.git'))): - nextdir = os.path.normpath(os.path.join(curdir, '..')) - if nextdir == curdir: - return None - curdir = nextdir - return curdir + curdir = os.path.normpath(os.path.dirname(filename)) + while not (os.path.basename(curdir) == 'src' + and os.path.exists(os.path.join(curdir, 'DEPS')) and + (os.path.exists(os.path.join(curdir, '..', '.gclient')) + or os.path.exists(os.path.join(curdir, '.git')))): + nextdir = os.path.normpath(os.path.join(curdir, '..')) + if nextdir == curdir: + return None + curdir = nextdir + return curdir def GetDefaultSourceFile(webrtc_root, filename): - """Returns the default source file to use as an alternative to `filename`. + """Returns the default source file to use as an alternative to `filename`. Compile flags used to build the default source file is assumed to be a close-enough approximation for building `filename`. @@ -115,13 +113,13 @@ def GetDefaultSourceFile(webrtc_root, filename): Returns: (String) Absolute path to substitute source file. """ - if 'test.' in filename: - return os.path.join(webrtc_root, 'base', 'logging_unittest.cc') - return os.path.join(webrtc_root, 'base', 'logging.cc') + if 'test.' in filename: + return os.path.join(webrtc_root, 'base', 'logging_unittest.cc') + return os.path.join(webrtc_root, 'base', 'logging.cc') def GetNinjaBuildOutputsForSourceFile(out_dir, filename): - """Returns a list of build outputs for filename. + """Returns a list of build outputs for filename. The list is generated by invoking 'ninja -t query' tool to retrieve a list of inputs and outputs of `filename`. This list is then filtered to only include @@ -135,35 +133,35 @@ def GetNinjaBuildOutputsForSourceFile(out_dir, filename): (List of Strings) List of target names. Will return [] if `filename` doesn't yield any .o or .obj outputs. """ - # Ninja needs the path to the source file relative to the output build - # directory. - rel_filename = os.path.relpath(filename, out_dir) + # Ninja needs the path to the source file relative to the output build + # directory. + rel_filename = os.path.relpath(filename, out_dir) - p = subprocess.Popen(['ninja', '-C', out_dir, '-t', 'query', rel_filename], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) - stdout, _ = p.communicate() - if p.returncode != 0: - return [] + p = subprocess.Popen(['ninja', '-C', out_dir, '-t', 'query', rel_filename], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True) + stdout, _ = p.communicate() + if p.returncode != 0: + return [] - # The output looks like: - # ../../relative/path/to/source.cc: - # outputs: - # obj/reative/path/to/target.source.o - # obj/some/other/target2.source.o - # another/target.txt - # - outputs_text = stdout.partition('\n outputs:\n')[2] - output_lines = [line.strip() for line in outputs_text.split('\n')] - return [ - target for target in output_lines - if target and (target.endswith('.o') or target.endswith('.obj')) - ] + # The output looks like: + # ../../relative/path/to/source.cc: + # outputs: + # obj/reative/path/to/target.source.o + # obj/some/other/target2.source.o + # another/target.txt + # + outputs_text = stdout.partition('\n outputs:\n')[2] + output_lines = [line.strip() for line in outputs_text.split('\n')] + return [ + target for target in output_lines + if target and (target.endswith('.o') or target.endswith('.obj')) + ] def GetClangCommandLineForNinjaOutput(out_dir, build_target): - """Returns the Clang command line for building `build_target` + """Returns the Clang command line for building `build_target` Asks ninja for the list of commands used to build `filename` and returns the final Clang invocation. @@ -176,25 +174,25 @@ def GetClangCommandLineForNinjaOutput(out_dir, build_target): (String or None) Clang command line or None if a Clang command line couldn't be determined. """ - p = subprocess.Popen( - ['ninja', '-v', '-C', out_dir, '-t', 'commands', build_target], - stdout=subprocess.PIPE, - universal_newlines=True) - stdout, _ = p.communicate() - if p.returncode != 0: - return None - - # Ninja will return multiple build steps for all dependencies up to - # `build_target`. The build step we want is the last Clang invocation, which - # is expected to be the one that outputs `build_target`. - for line in reversed(stdout.split('\n')): - if 'clang' in line: - return line + p = subprocess.Popen( + ['ninja', '-v', '-C', out_dir, '-t', 'commands', build_target], + stdout=subprocess.PIPE, + universal_newlines=True) + stdout, _ = p.communicate() + if p.returncode != 0: return None + # Ninja will return multiple build steps for all dependencies up to + # `build_target`. The build step we want is the last Clang invocation, which + # is expected to be the one that outputs `build_target`. + for line in reversed(stdout.split('\n')): + if 'clang' in line: + return line + return None + def GetClangCommandLineFromNinjaForSource(out_dir, filename): - """Returns a Clang command line used to build `filename`. + """Returns a Clang command line used to build `filename`. The same source file could be built multiple times using different tool chains. In such cases, this command returns the first Clang invocation. We @@ -210,17 +208,17 @@ def GetClangCommandLineFromNinjaForSource(out_dir, filename): (String or None): Command line for Clang invocation using `filename` as a source. Returns None if no such command line could be found. """ - build_targets = GetNinjaBuildOutputsForSourceFile(out_dir, filename) - for build_target in build_targets: - command_line = GetClangCommandLineForNinjaOutput(out_dir, build_target) - if command_line: - return command_line - return None + build_targets = GetNinjaBuildOutputsForSourceFile(out_dir, filename) + for build_target in build_targets: + command_line = GetClangCommandLineForNinjaOutput(out_dir, build_target) + if command_line: + return command_line + return None def GetClangOptionsFromCommandLine(clang_commandline, out_dir, additional_flags): - """Extracts relevant command line options from `clang_commandline` + """Extracts relevant command line options from `clang_commandline` Args: clang_commandline: (String) Full Clang invocation. @@ -232,48 +230,46 @@ def GetClangOptionsFromCommandLine(clang_commandline, out_dir, (List of Strings) The list of command line flags for this source file. Can be empty. """ - clang_flags = [] + additional_flags + clang_flags = [] + additional_flags - # Parse flags that are important for YCM's purposes. - clang_tokens = shlex.split(clang_commandline) - for flag_index, flag in enumerate(clang_tokens): - if flag.startswith('-I'): - # Relative paths need to be resolved, because they're relative to - # the output dir, not the source. - if flag[2] == '/': - clang_flags.append(flag) - else: - abs_path = os.path.normpath(os.path.join(out_dir, flag[2:])) - clang_flags.append('-I' + abs_path) - elif flag.startswith('-std'): - clang_flags.append(flag) - elif flag.startswith('-') and flag[1] in 'DWFfmO': - if (flag == '-Wno-deprecated-register' or - flag == '-Wno-header-guard'): - # These flags causes libclang (3.3) to crash. Remove it until - # things are fixed. - continue - clang_flags.append(flag) - elif flag == '-isysroot': - # On Mac -isysroot is used to find the system headers. - # Copy over both flags. - if flag_index + 1 < len(clang_tokens): - clang_flags.append(flag) - clang_flags.append(clang_tokens[flag_index + 1]) - elif flag.startswith('--sysroot='): - # On Linux we use a sysroot image. - sysroot_path = flag.lstrip('--sysroot=') - if sysroot_path.startswith('/'): - clang_flags.append(flag) - else: - abs_path = os.path.normpath(os.path.join( - out_dir, sysroot_path)) - clang_flags.append('--sysroot=' + abs_path) - return clang_flags + # Parse flags that are important for YCM's purposes. + clang_tokens = shlex.split(clang_commandline) + for flag_index, flag in enumerate(clang_tokens): + if flag.startswith('-I'): + # Relative paths need to be resolved, because they're relative to + # the output dir, not the source. + if flag[2] == '/': + clang_flags.append(flag) + else: + abs_path = os.path.normpath(os.path.join(out_dir, flag[2:])) + clang_flags.append('-I' + abs_path) + elif flag.startswith('-std'): + clang_flags.append(flag) + elif flag.startswith('-') and flag[1] in 'DWFfmO': + if flag in ['-Wno-deprecated-register', '-Wno-header-guard']: + # These flags causes libclang (3.3) to crash. Remove it until + # things are fixed. + continue + clang_flags.append(flag) + elif flag == '-isysroot': + # On Mac -isysroot is used to find the system headers. + # Copy over both flags. + if flag_index + 1 < len(clang_tokens): + clang_flags.append(flag) + clang_flags.append(clang_tokens[flag_index + 1]) + elif flag.startswith('--sysroot='): + # On Linux we use a sysroot image. + sysroot_path = flag.lstrip('--sysroot=') + if sysroot_path.startswith('/'): + clang_flags.append(flag) + else: + abs_path = os.path.normpath(os.path.join(out_dir, sysroot_path)) + clang_flags.append('--sysroot=' + abs_path) + return clang_flags def GetClangOptionsFromNinjaForFilename(webrtc_root, filename): - """Returns the Clang command line options needed for building `filename`. + """Returns the Clang command line options needed for building `filename`. Command line options are based on the command used by ninja for building `filename`. If `filename` is a .h file, uses its companion .cc or .cpp file. @@ -289,55 +285,54 @@ def GetClangOptionsFromNinjaForFilename(webrtc_root, filename): (List of Strings) The list of command line flags for this source file. Can be empty. """ - if not webrtc_root: - return [] + if not webrtc_root: + return [] - # Generally, everyone benefits from including WebRTC's src/, because all of - # WebRTC's includes are relative to that. - additional_flags = ['-I' + os.path.join(webrtc_root)] + # Generally, everyone benefits from including WebRTC's src/, because all of + # WebRTC's includes are relative to that. + additional_flags = ['-I' + os.path.join(webrtc_root)] - # Version of Clang used to compile WebRTC can be newer then version of - # libclang that YCM uses for completion. So it's possible that YCM's - # libclang doesn't know about some used warning options, which causes - # compilation warnings (and errors, because of '-Werror'); - additional_flags.append('-Wno-unknown-warning-option') + # Version of Clang used to compile WebRTC can be newer then version of + # libclang that YCM uses for completion. So it's possible that YCM's + # libclang doesn't know about some used warning options, which causes + # compilation warnings (and errors, because of '-Werror'); + additional_flags.append('-Wno-unknown-warning-option') - sys.path.append(os.path.join(webrtc_root, 'tools', 'vim')) - from ninja_output import GetNinjaOutputDirectory - out_dir = GetNinjaOutputDirectory(webrtc_root) + sys.path.append(os.path.join(webrtc_root, 'tools', 'vim')) + from ninja_output import GetNinjaOutputDirectory + out_dir = GetNinjaOutputDirectory(webrtc_root) - basename, extension = os.path.splitext(filename) - if extension == '.h': - candidates = [basename + ext for ext in _HEADER_ALTERNATES] - else: - candidates = [filename] + basename, extension = os.path.splitext(filename) + if extension == '.h': + candidates = [basename + ext for ext in _HEADER_ALTERNATES] + else: + candidates = [filename] - clang_line = None - buildable_extension = extension - for candidate in candidates: - clang_line = GetClangCommandLineFromNinjaForSource(out_dir, candidate) - if clang_line: - buildable_extension = os.path.splitext(candidate)[1] - break + clang_line = None + buildable_extension = extension + for candidate in candidates: + clang_line = GetClangCommandLineFromNinjaForSource(out_dir, candidate) + if clang_line: + buildable_extension = os.path.splitext(candidate)[1] + break - additional_flags += _EXTENSION_FLAGS.get(buildable_extension, []) + additional_flags += _EXTENSION_FLAGS.get(buildable_extension, []) - if not clang_line: - # If ninja didn't know about filename or it's companion files, then try - # a default build target. It is possible that the file is new, or - # build.ninja is stale. - clang_line = GetClangCommandLineFromNinjaForSource( - out_dir, GetDefaultSourceFile(webrtc_root, filename)) + if not clang_line: + # If ninja didn't know about filename or it's companion files, then try + # a default build target. It is possible that the file is new, or + # build.ninja is stale. + clang_line = GetClangCommandLineFromNinjaForSource( + out_dir, GetDefaultSourceFile(webrtc_root, filename)) - if not clang_line: - return additional_flags + if not clang_line: + return additional_flags - return GetClangOptionsFromCommandLine(clang_line, out_dir, - additional_flags) + return GetClangOptionsFromCommandLine(clang_line, out_dir, additional_flags) def FlagsForFile(filename): - """This is the main entry point for YCM. Its interface is fixed. + """This is the main entry point for YCM. Its interface is fixed. Args: filename: (String) Path to source file being edited. @@ -347,16 +342,15 @@ def FlagsForFile(filename): 'flags': (List of Strings) Command line flags. 'do_cache': (Boolean) True if the result should be cached. """ - abs_filename = os.path.abspath(filename) - webrtc_root = FindWebrtcSrcFromFilename(abs_filename) - clang_flags = GetClangOptionsFromNinjaForFilename(webrtc_root, - abs_filename) + abs_filename = os.path.abspath(filename) + webrtc_root = FindWebrtcSrcFromFilename(abs_filename) + clang_flags = GetClangOptionsFromNinjaForFilename(webrtc_root, abs_filename) - # If clang_flags could not be determined, then assume that was due to a - # transient failure. Preventing YCM from caching the flags allows us to - # try to determine the flags again. - should_cache_flags_for_file = bool(clang_flags) + # If clang_flags could not be determined, then assume that was due to a + # transient failure. Preventing YCM from caching the flags allows us to + # try to determine the flags again. + should_cache_flags_for_file = bool(clang_flags) - final_flags = _DEFAULT_FLAGS + clang_flags + final_flags = _DEFAULT_FLAGS + clang_flags - return {'flags': final_flags, 'do_cache': should_cache_flags_for_file} + return {'flags': final_flags, 'do_cache': should_cache_flags_for_file}