Overhaul ansible-test sanity
implementation. (#22177)
- Tests are run to completion instead of stopping on first failure. - Test results are now parsed instead of passing through to the console. - Test results can be saved in junit xml format. - Test results will show up on the Shippable "Tests" result tab. - Added an experimental --lint option for easier integration with other tools. - Code smell tests are now usable with the --list-tests, --test and --skip-test options. - Code split out from executor.py into sanity.py. - Rename download-logs to download.py and add support for test and coverage results. - Miscellaneous improvements.
This commit is contained in:
parent
237411613d
commit
d66ce40ecb
9 changed files with 1070 additions and 459 deletions
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
from __future__ import absolute_import, print_function
|
from __future__ import absolute_import, print_function
|
||||||
|
|
||||||
import glob
|
|
||||||
import os
|
import os
|
||||||
import tempfile
|
import tempfile
|
||||||
import time
|
import time
|
||||||
|
@ -14,7 +13,6 @@ import random
|
||||||
import pipes
|
import pipes
|
||||||
import string
|
import string
|
||||||
import atexit
|
import atexit
|
||||||
import re
|
|
||||||
|
|
||||||
import lib.pytar
|
import lib.pytar
|
||||||
import lib.thread
|
import lib.thread
|
||||||
|
@ -38,7 +36,6 @@ from lib.util import (
|
||||||
MissingEnvironmentVariable,
|
MissingEnvironmentVariable,
|
||||||
display,
|
display,
|
||||||
run_command,
|
run_command,
|
||||||
deepest_path,
|
|
||||||
common_environment,
|
common_environment,
|
||||||
remove_tree,
|
remove_tree,
|
||||||
make_dirs,
|
make_dirs,
|
||||||
|
@ -58,7 +55,6 @@ from lib.target import (
|
||||||
walk_windows_integration_targets,
|
walk_windows_integration_targets,
|
||||||
walk_units_targets,
|
walk_units_targets,
|
||||||
walk_compile_targets,
|
walk_compile_targets,
|
||||||
walk_sanity_targets,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
from lib.changes import (
|
from lib.changes import (
|
||||||
|
@ -137,6 +133,10 @@ def install_command_requirements(args):
|
||||||
if args.coverage:
|
if args.coverage:
|
||||||
cmd += ['coverage']
|
cmd += ['coverage']
|
||||||
|
|
||||||
|
if isinstance(args, SanityConfig):
|
||||||
|
if args.junit:
|
||||||
|
cmd += ['junit-xml']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
run_command(args, cmd)
|
run_command(args, cmd)
|
||||||
except SubprocessError as ex:
|
except SubprocessError as ex:
|
||||||
|
@ -709,298 +709,6 @@ def command_compile(args):
|
||||||
run_command(args, command)
|
run_command(args, command)
|
||||||
|
|
||||||
|
|
||||||
def command_sanity(args):
|
|
||||||
"""
|
|
||||||
:type args: SanityConfig
|
|
||||||
"""
|
|
||||||
changes = get_changes_filter(args)
|
|
||||||
require = (args.require or []) + changes
|
|
||||||
targets = SanityTargets(args.include, args.exclude, require)
|
|
||||||
|
|
||||||
if not targets.include:
|
|
||||||
raise AllTargetsSkipped()
|
|
||||||
|
|
||||||
if args.delegate:
|
|
||||||
raise Delegate(require=changes)
|
|
||||||
|
|
||||||
install_command_requirements(args)
|
|
||||||
|
|
||||||
tests = SANITY_TESTS
|
|
||||||
|
|
||||||
if args.test:
|
|
||||||
tests = [t for t in tests if t.name in args.test]
|
|
||||||
|
|
||||||
if args.skip_test:
|
|
||||||
tests = [t for t in tests if t.name not in args.skip_test]
|
|
||||||
|
|
||||||
for test in tests:
|
|
||||||
if args.list_tests:
|
|
||||||
display.info(test.name)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if test.intercept:
|
|
||||||
versions = SUPPORTED_PYTHON_VERSIONS
|
|
||||||
else:
|
|
||||||
versions = None,
|
|
||||||
|
|
||||||
for version in versions:
|
|
||||||
if args.python and version and version != args.python:
|
|
||||||
continue
|
|
||||||
|
|
||||||
display.info('Sanity check using %s%s' % (test.name, ' with Python %s' % version if version else ''))
|
|
||||||
|
|
||||||
if test.intercept:
|
|
||||||
test.func(args, targets, python_version=version)
|
|
||||||
else:
|
|
||||||
test.func(args, targets)
|
|
||||||
|
|
||||||
|
|
||||||
def command_sanity_code_smell(args, _):
|
|
||||||
"""
|
|
||||||
:type args: SanityConfig
|
|
||||||
:type _: SanityTargets
|
|
||||||
"""
|
|
||||||
with open('test/sanity/code-smell/skip.txt', 'r') as skip_fd:
|
|
||||||
skip_tests = skip_fd.read().splitlines()
|
|
||||||
|
|
||||||
tests = glob.glob('test/sanity/code-smell/*')
|
|
||||||
tests = sorted(p for p in tests
|
|
||||||
if os.access(p, os.X_OK)
|
|
||||||
and os.path.isfile(p)
|
|
||||||
and os.path.basename(p) not in skip_tests)
|
|
||||||
|
|
||||||
env = ansible_environment(args)
|
|
||||||
|
|
||||||
for test in tests:
|
|
||||||
display.info('Code smell check using %s' % os.path.basename(test))
|
|
||||||
run_command(args, [test], env=env)
|
|
||||||
|
|
||||||
|
|
||||||
def command_sanity_validate_modules(args, targets):
|
|
||||||
"""
|
|
||||||
:type args: SanityConfig
|
|
||||||
:type targets: SanityTargets
|
|
||||||
"""
|
|
||||||
env = ansible_environment(args)
|
|
||||||
|
|
||||||
paths = [deepest_path(i.path, 'lib/ansible/modules/') for i in targets.include_external]
|
|
||||||
paths = sorted(set(p for p in paths if p))
|
|
||||||
|
|
||||||
if not paths:
|
|
||||||
display.info('No tests applicable.', verbosity=1)
|
|
||||||
return
|
|
||||||
|
|
||||||
cmd = ['test/sanity/validate-modules/validate-modules'] + paths
|
|
||||||
|
|
||||||
with open('test/sanity/validate-modules/skip.txt', 'r') as skip_fd:
|
|
||||||
skip_paths = skip_fd.read().splitlines()
|
|
||||||
|
|
||||||
skip_paths += [e.path for e in targets.exclude_external]
|
|
||||||
|
|
||||||
if skip_paths:
|
|
||||||
cmd += ['--exclude', '^(%s)' % '|'.join(skip_paths)]
|
|
||||||
|
|
||||||
if args.base_branch:
|
|
||||||
cmd.extend([
|
|
||||||
'--base-branch', args.base_branch,
|
|
||||||
])
|
|
||||||
else:
|
|
||||||
display.warning('Cannot perform module comparison against the base branch. Base branch not detected when running locally.')
|
|
||||||
|
|
||||||
run_command(args, cmd, env=env)
|
|
||||||
|
|
||||||
|
|
||||||
def command_sanity_shellcheck(args, targets):
|
|
||||||
"""
|
|
||||||
:type args: SanityConfig
|
|
||||||
:type targets: SanityTargets
|
|
||||||
"""
|
|
||||||
with open('test/sanity/shellcheck/skip.txt', 'r') as skip_fd:
|
|
||||||
skip_paths = set(skip_fd.read().splitlines())
|
|
||||||
|
|
||||||
with open('test/sanity/shellcheck/exclude.txt', 'r') as exclude_fd:
|
|
||||||
exclude = set(exclude_fd.read().splitlines())
|
|
||||||
|
|
||||||
paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] == '.sh' and i.path not in skip_paths)
|
|
||||||
|
|
||||||
if not paths:
|
|
||||||
display.info('No tests applicable.', verbosity=1)
|
|
||||||
return
|
|
||||||
|
|
||||||
run_command(args, ['shellcheck', '-e', ','.join(sorted(exclude))] + paths)
|
|
||||||
|
|
||||||
|
|
||||||
def command_sanity_pep8(args, targets):
|
|
||||||
"""
|
|
||||||
:type args: SanityConfig
|
|
||||||
:type targets: SanityTargets
|
|
||||||
"""
|
|
||||||
skip_path = 'test/sanity/pep8/skip.txt'
|
|
||||||
legacy_path = 'test/sanity/pep8/legacy-files.txt'
|
|
||||||
|
|
||||||
with open(skip_path, 'r') as skip_fd:
|
|
||||||
skip_paths = set(skip_fd.read().splitlines())
|
|
||||||
|
|
||||||
with open(legacy_path, 'r') as legacy_fd:
|
|
||||||
legacy_paths = set(legacy_fd.read().splitlines())
|
|
||||||
|
|
||||||
with open('test/sanity/pep8/legacy-ignore.txt', 'r') as ignore_fd:
|
|
||||||
legacy_ignore = set(ignore_fd.read().splitlines())
|
|
||||||
|
|
||||||
with open('test/sanity/pep8/current-ignore.txt', 'r') as ignore_fd:
|
|
||||||
current_ignore = sorted(ignore_fd.read().splitlines())
|
|
||||||
|
|
||||||
paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] == '.py' and i.path not in skip_paths)
|
|
||||||
|
|
||||||
if not paths:
|
|
||||||
display.info('No tests applicable.', verbosity=1)
|
|
||||||
return
|
|
||||||
|
|
||||||
cmd = [
|
|
||||||
'pep8',
|
|
||||||
'--max-line-length', '160',
|
|
||||||
'--config', '/dev/null',
|
|
||||||
'--ignore', ','.join(sorted(current_ignore)),
|
|
||||||
] + paths
|
|
||||||
|
|
||||||
try:
|
|
||||||
stdout, stderr = run_command(args, cmd, capture=True)
|
|
||||||
status = 0
|
|
||||||
except SubprocessError as ex:
|
|
||||||
stdout = ex.stdout
|
|
||||||
stderr = ex.stderr
|
|
||||||
status = ex.status
|
|
||||||
|
|
||||||
if stderr:
|
|
||||||
raise SubprocessError(cmd=cmd, status=status, stderr=stderr)
|
|
||||||
|
|
||||||
if args.explain:
|
|
||||||
return
|
|
||||||
|
|
||||||
pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<code>[A-Z0-9]{4}) (?P<message>.*)$'
|
|
||||||
|
|
||||||
results = [re.search(pattern, line).groupdict() for line in stdout.splitlines()]
|
|
||||||
|
|
||||||
for result in results:
|
|
||||||
for key in 'line', 'column':
|
|
||||||
result[key] = int(result[key])
|
|
||||||
|
|
||||||
failed_result_paths = set([result['path'] for result in results])
|
|
||||||
passed_legacy_paths = set([path for path in paths if path in legacy_paths and path not in failed_result_paths])
|
|
||||||
|
|
||||||
errors = []
|
|
||||||
summary = {}
|
|
||||||
|
|
||||||
for path in sorted(passed_legacy_paths):
|
|
||||||
# Keep files out of the list which no longer require the relaxed rule set.
|
|
||||||
errors.append('PEP 8: %s: Passes current rule set. Remove from legacy list (%s).' % (path, legacy_path))
|
|
||||||
|
|
||||||
for path in sorted(skip_paths):
|
|
||||||
if not os.path.exists(path):
|
|
||||||
# Keep files out of the list which no longer exist in the repo.
|
|
||||||
errors.append('PEP 8: %s: Does not exist. Remove from skip list (%s).' % (path, skip_path))
|
|
||||||
|
|
||||||
for path in sorted(legacy_paths):
|
|
||||||
if not os.path.exists(path):
|
|
||||||
# Keep files out of the list which no longer exist in the repo.
|
|
||||||
errors.append('PEP 8: %s: Does not exist. Remove from legacy list (%s).' % (path, legacy_path))
|
|
||||||
|
|
||||||
for result in results:
|
|
||||||
path = result['path']
|
|
||||||
line = result['line']
|
|
||||||
column = result['column']
|
|
||||||
code = result['code']
|
|
||||||
message = result['message']
|
|
||||||
|
|
||||||
msg = 'PEP 8: %s:%s:%s: %s %s' % (path, line, column, code, message)
|
|
||||||
|
|
||||||
if path in legacy_paths:
|
|
||||||
msg += ' (legacy)'
|
|
||||||
else:
|
|
||||||
msg += ' (current)'
|
|
||||||
|
|
||||||
if path in legacy_paths and code in legacy_ignore:
|
|
||||||
# Files on the legacy list are permitted to have errors on the legacy ignore list.
|
|
||||||
# However, we want to report on their existence to track progress towards eliminating these exceptions.
|
|
||||||
display.info(msg, verbosity=3)
|
|
||||||
|
|
||||||
key = '%s %s' % (code, re.sub('[0-9]+', 'NNN', message))
|
|
||||||
|
|
||||||
if key not in summary:
|
|
||||||
summary[key] = 0
|
|
||||||
|
|
||||||
summary[key] += 1
|
|
||||||
else:
|
|
||||||
# Files not on the legacy list and errors not on the legacy ignore list are PEP 8 policy errors.
|
|
||||||
errors.append(msg)
|
|
||||||
|
|
||||||
for error in errors:
|
|
||||||
display.error(error)
|
|
||||||
|
|
||||||
if summary:
|
|
||||||
lines = []
|
|
||||||
count = 0
|
|
||||||
|
|
||||||
for key in sorted(summary):
|
|
||||||
count += summary[key]
|
|
||||||
lines.append('PEP 8: %5d %s' % (summary[key], key))
|
|
||||||
|
|
||||||
display.info('PEP 8: There were %d different legacy issues found (%d total):' %
|
|
||||||
(len(summary), count), verbosity=1)
|
|
||||||
|
|
||||||
display.info('PEP 8: Count Code Message', verbosity=1)
|
|
||||||
|
|
||||||
for line in lines:
|
|
||||||
display.info(line, verbosity=1)
|
|
||||||
|
|
||||||
if errors:
|
|
||||||
raise ApplicationError('PEP 8: There are %d issues which need to be resolved.' % len(errors))
|
|
||||||
|
|
||||||
|
|
||||||
def command_sanity_yamllint(args, targets):
|
|
||||||
"""
|
|
||||||
:type args: SanityConfig
|
|
||||||
:type targets: SanityTargets
|
|
||||||
"""
|
|
||||||
paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] in ('.yml', '.yaml'))
|
|
||||||
|
|
||||||
if not paths:
|
|
||||||
display.info('No tests applicable.', verbosity=1)
|
|
||||||
return
|
|
||||||
|
|
||||||
run_command(args, ['yamllint'] + paths)
|
|
||||||
|
|
||||||
|
|
||||||
def command_sanity_ansible_doc(args, targets, python_version):
|
|
||||||
"""
|
|
||||||
:type args: SanityConfig
|
|
||||||
:type targets: SanityTargets
|
|
||||||
:type python_version: str
|
|
||||||
"""
|
|
||||||
with open('test/sanity/ansible-doc/skip.txt', 'r') as skip_fd:
|
|
||||||
skip_modules = set(skip_fd.read().splitlines())
|
|
||||||
|
|
||||||
modules = sorted(set(m for i in targets.include_external for m in i.modules) -
|
|
||||||
set(m for i in targets.exclude_external for m in i.modules) -
|
|
||||||
skip_modules)
|
|
||||||
|
|
||||||
if not modules:
|
|
||||||
display.info('No tests applicable.', verbosity=1)
|
|
||||||
return
|
|
||||||
|
|
||||||
env = ansible_environment(args)
|
|
||||||
cmd = ['ansible-doc'] + modules
|
|
||||||
|
|
||||||
stdout, stderr = intercept_command(args, cmd, env=env, capture=True, python_version=python_version)
|
|
||||||
|
|
||||||
if stderr:
|
|
||||||
display.error('Output on stderr from ansible-doc is considered an error.')
|
|
||||||
raise SubprocessError(cmd, stderr=stderr)
|
|
||||||
|
|
||||||
if stdout:
|
|
||||||
display.info(stdout.strip(), verbosity=3)
|
|
||||||
|
|
||||||
|
|
||||||
def intercept_command(args, cmd, capture=False, env=None, data=None, cwd=None, python_version=None):
|
def intercept_command(args, cmd, capture=False, env=None, data=None, cwd=None, python_version=None):
|
||||||
"""
|
"""
|
||||||
:type args: TestConfig
|
:type args: TestConfig
|
||||||
|
@ -1306,40 +1014,6 @@ class NoTestsForChanges(ApplicationWarning):
|
||||||
super(NoTestsForChanges, self).__init__('No tests found for detected changes.')
|
super(NoTestsForChanges, self).__init__('No tests found for detected changes.')
|
||||||
|
|
||||||
|
|
||||||
class SanityTargets(object):
|
|
||||||
"""Sanity test target information."""
|
|
||||||
def __init__(self, include, exclude, require):
|
|
||||||
"""
|
|
||||||
:type include: list[str]
|
|
||||||
:type exclude: list[str]
|
|
||||||
:type require: list[str]
|
|
||||||
"""
|
|
||||||
self.all = not include
|
|
||||||
self.targets = tuple(sorted(walk_sanity_targets()))
|
|
||||||
self.include = walk_internal_targets(self.targets, include, exclude, require)
|
|
||||||
self.include_external, self.exclude_external = walk_external_targets(self.targets, include, exclude, require)
|
|
||||||
|
|
||||||
|
|
||||||
class SanityTest(object):
|
|
||||||
"""Sanity test base class."""
|
|
||||||
def __init__(self, name):
|
|
||||||
self.name = name
|
|
||||||
|
|
||||||
|
|
||||||
class SanityFunc(SanityTest):
|
|
||||||
"""Sanity test function information."""
|
|
||||||
def __init__(self, name, func, intercept=True):
|
|
||||||
"""
|
|
||||||
:type name: str
|
|
||||||
:type func: (SanityConfig, SanityTargets) -> None
|
|
||||||
:type intercept: bool
|
|
||||||
"""
|
|
||||||
super(SanityFunc, self).__init__(name)
|
|
||||||
|
|
||||||
self.func = func
|
|
||||||
self.intercept = intercept
|
|
||||||
|
|
||||||
|
|
||||||
class TestConfig(EnvironmentConfig):
|
class TestConfig(EnvironmentConfig):
|
||||||
"""Configuration common to all test commands."""
|
"""Configuration common to all test commands."""
|
||||||
def __init__(self, args, command):
|
def __init__(self, args, command):
|
||||||
|
@ -1384,6 +1058,8 @@ class SanityConfig(TestConfig):
|
||||||
self.test = args.test # type: list [str]
|
self.test = args.test # type: list [str]
|
||||||
self.skip_test = args.skip_test # type: list [str]
|
self.skip_test = args.skip_test # type: list [str]
|
||||||
self.list_tests = args.list_tests # type: bool
|
self.list_tests = args.list_tests # type: bool
|
||||||
|
self.lint = args.lint # type: bool
|
||||||
|
self.junit = args.junit # type: bool
|
||||||
|
|
||||||
if args.base_branch:
|
if args.base_branch:
|
||||||
self.base_branch = args.base_branch # str
|
self.base_branch = args.base_branch # str
|
||||||
|
@ -1485,15 +1161,3 @@ class AllTargetsSkipped(ApplicationWarning):
|
||||||
"""All targets skipped."""
|
"""All targets skipped."""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(AllTargetsSkipped, self).__init__('All targets skipped.')
|
super(AllTargetsSkipped, self).__init__('All targets skipped.')
|
||||||
|
|
||||||
|
|
||||||
SANITY_TESTS = (
|
|
||||||
# tests which ignore include/exclude (they're so fast it doesn't matter)
|
|
||||||
SanityFunc('code-smell', command_sanity_code_smell, intercept=False),
|
|
||||||
# tests which honor include/exclude
|
|
||||||
SanityFunc('shellcheck', command_sanity_shellcheck, intercept=False),
|
|
||||||
SanityFunc('pep8', command_sanity_pep8, intercept=False),
|
|
||||||
SanityFunc('yamllint', command_sanity_yamllint, intercept=False),
|
|
||||||
SanityFunc('validate-modules', command_sanity_validate_modules, intercept=False),
|
|
||||||
SanityFunc('ansible-doc', command_sanity_ansible_doc),
|
|
||||||
)
|
|
||||||
|
|
827
test/runner/lib/sanity.py
Normal file
827
test/runner/lib/sanity.py
Normal file
|
@ -0,0 +1,827 @@
|
||||||
|
"""Execute Ansible sanity tests."""
|
||||||
|
|
||||||
|
from __future__ import absolute_import, print_function
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import glob
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
from xml.etree.ElementTree import (
|
||||||
|
fromstring,
|
||||||
|
Element,
|
||||||
|
)
|
||||||
|
|
||||||
|
from lib.util import (
|
||||||
|
ApplicationError,
|
||||||
|
SubprocessError,
|
||||||
|
display,
|
||||||
|
run_command,
|
||||||
|
deepest_path,
|
||||||
|
)
|
||||||
|
|
||||||
|
from lib.ansible_util import (
|
||||||
|
ansible_environment,
|
||||||
|
)
|
||||||
|
|
||||||
|
from lib.target import (
|
||||||
|
walk_external_targets,
|
||||||
|
walk_internal_targets,
|
||||||
|
walk_sanity_targets,
|
||||||
|
)
|
||||||
|
|
||||||
|
from lib.executor import (
|
||||||
|
get_changes_filter,
|
||||||
|
AllTargetsSkipped,
|
||||||
|
Delegate,
|
||||||
|
install_command_requirements,
|
||||||
|
SUPPORTED_PYTHON_VERSIONS,
|
||||||
|
intercept_command,
|
||||||
|
SanityConfig,
|
||||||
|
)
|
||||||
|
|
||||||
|
PEP8_SKIP_PATH = 'test/sanity/pep8/skip.txt'
|
||||||
|
PEP8_LEGACY_PATH = 'test/sanity/pep8/legacy-files.txt'
|
||||||
|
|
||||||
|
|
||||||
|
def command_sanity(args):
|
||||||
|
"""
|
||||||
|
:type args: SanityConfig
|
||||||
|
"""
|
||||||
|
changes = get_changes_filter(args)
|
||||||
|
require = (args.require or []) + changes
|
||||||
|
targets = SanityTargets(args.include, args.exclude, require)
|
||||||
|
|
||||||
|
if not targets.include:
|
||||||
|
raise AllTargetsSkipped()
|
||||||
|
|
||||||
|
if args.delegate:
|
||||||
|
raise Delegate(require=changes)
|
||||||
|
|
||||||
|
install_command_requirements(args)
|
||||||
|
|
||||||
|
tests = sanity_get_tests()
|
||||||
|
|
||||||
|
if args.test:
|
||||||
|
tests = [t for t in tests if t.name in args.test]
|
||||||
|
|
||||||
|
if args.skip_test:
|
||||||
|
tests = [t for t in tests if t.name not in args.skip_test]
|
||||||
|
|
||||||
|
total = 0
|
||||||
|
failed = []
|
||||||
|
|
||||||
|
for test in tests:
|
||||||
|
if args.list_tests:
|
||||||
|
display.info(test.name)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if test.intercept:
|
||||||
|
versions = SUPPORTED_PYTHON_VERSIONS
|
||||||
|
else:
|
||||||
|
versions = None,
|
||||||
|
|
||||||
|
for version in versions:
|
||||||
|
if args.python and version and version != args.python:
|
||||||
|
continue
|
||||||
|
|
||||||
|
display.info('Sanity check using %s%s' % (test.name, ' with Python %s' % version if version else ''))
|
||||||
|
|
||||||
|
options = ''
|
||||||
|
|
||||||
|
if test.script:
|
||||||
|
result = test.func(args, targets, test.script)
|
||||||
|
elif test.intercept:
|
||||||
|
result = test.func(args, targets, python_version=version)
|
||||||
|
options = ' --python %s' % version
|
||||||
|
else:
|
||||||
|
result = test.func(args, targets)
|
||||||
|
|
||||||
|
result.write(args)
|
||||||
|
|
||||||
|
total += 1
|
||||||
|
|
||||||
|
if isinstance(result, SanityFailure):
|
||||||
|
failed.append(result.test + options)
|
||||||
|
|
||||||
|
if failed:
|
||||||
|
raise ApplicationError('The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % (
|
||||||
|
len(failed), total, '\n'.join(failed)))
|
||||||
|
|
||||||
|
|
||||||
|
def command_sanity_code_smell(args, _, script):
|
||||||
|
"""
|
||||||
|
:type args: SanityConfig
|
||||||
|
:type _: SanityTargets
|
||||||
|
:type script: str
|
||||||
|
:rtype: SanityResult
|
||||||
|
"""
|
||||||
|
test = os.path.splitext(os.path.basename(script))[0]
|
||||||
|
|
||||||
|
cmd = [script]
|
||||||
|
env = ansible_environment(args)
|
||||||
|
|
||||||
|
# Since the output from scripts end up in other places besides the console, we don't want color here.
|
||||||
|
env.pop('ANSIBLE_FORCE_COLOR')
|
||||||
|
|
||||||
|
try:
|
||||||
|
stdout, stderr = run_command(args, cmd, env=env, capture=True)
|
||||||
|
status = 0
|
||||||
|
except SubprocessError as ex:
|
||||||
|
stdout = ex.stdout
|
||||||
|
stderr = ex.stderr
|
||||||
|
status = ex.status
|
||||||
|
|
||||||
|
if stderr or status:
|
||||||
|
summary = str(SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout))
|
||||||
|
return SanityFailure(test, summary=summary)
|
||||||
|
|
||||||
|
return SanitySuccess(test)
|
||||||
|
|
||||||
|
|
||||||
|
def command_sanity_validate_modules(args, targets):
|
||||||
|
"""
|
||||||
|
:type args: SanityConfig
|
||||||
|
:type targets: SanityTargets
|
||||||
|
:rtype: SanityResult
|
||||||
|
"""
|
||||||
|
test = 'validate-modules'
|
||||||
|
env = ansible_environment(args)
|
||||||
|
|
||||||
|
paths = [deepest_path(i.path, 'lib/ansible/modules/') for i in targets.include_external]
|
||||||
|
paths = sorted(set(p for p in paths if p))
|
||||||
|
|
||||||
|
if not paths:
|
||||||
|
return SanitySkipped(test)
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
'test/sanity/validate-modules/validate-modules',
|
||||||
|
'--format', 'json',
|
||||||
|
] + paths
|
||||||
|
|
||||||
|
with open('test/sanity/validate-modules/skip.txt', 'r') as skip_fd:
|
||||||
|
skip_paths = skip_fd.read().splitlines()
|
||||||
|
|
||||||
|
skip_paths += [e.path for e in targets.exclude_external]
|
||||||
|
|
||||||
|
if skip_paths:
|
||||||
|
cmd += ['--exclude', '^(%s)' % '|'.join(skip_paths)]
|
||||||
|
|
||||||
|
if args.base_branch:
|
||||||
|
cmd.extend([
|
||||||
|
'--base-branch', args.base_branch,
|
||||||
|
])
|
||||||
|
else:
|
||||||
|
display.warning('Cannot perform module comparison against the base branch. Base branch not detected when running locally.')
|
||||||
|
|
||||||
|
try:
|
||||||
|
stdout, stderr = run_command(args, cmd, env=env, capture=True)
|
||||||
|
status = 0
|
||||||
|
except SubprocessError as ex:
|
||||||
|
stdout = ex.stdout
|
||||||
|
stderr = ex.stderr
|
||||||
|
status = ex.status
|
||||||
|
|
||||||
|
if stderr or status not in (0, 3):
|
||||||
|
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
|
||||||
|
|
||||||
|
if args.explain:
|
||||||
|
return SanitySkipped(test)
|
||||||
|
|
||||||
|
messages = json.loads(stdout)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
|
||||||
|
for filename in messages:
|
||||||
|
output = messages[filename]
|
||||||
|
|
||||||
|
for item in output['errors']:
|
||||||
|
results.append(SanityMessage(
|
||||||
|
path=filename,
|
||||||
|
line=int(item['line']) if 'line' in item else 0,
|
||||||
|
column=int(item['column']) if 'column' in item else 0,
|
||||||
|
level='error',
|
||||||
|
code='E%s' % item['code'],
|
||||||
|
message=item['msg'],
|
||||||
|
))
|
||||||
|
|
||||||
|
if results:
|
||||||
|
return SanityFailure(test, messages=results)
|
||||||
|
|
||||||
|
return SanitySuccess(test)
|
||||||
|
|
||||||
|
|
||||||
|
def command_sanity_shellcheck(args, targets):
|
||||||
|
"""
|
||||||
|
:type args: SanityConfig
|
||||||
|
:type targets: SanityTargets
|
||||||
|
:rtype: SanityResult
|
||||||
|
"""
|
||||||
|
test = 'shellcheck'
|
||||||
|
|
||||||
|
with open('test/sanity/shellcheck/skip.txt', 'r') as skip_fd:
|
||||||
|
skip_paths = set(skip_fd.read().splitlines())
|
||||||
|
|
||||||
|
with open('test/sanity/shellcheck/exclude.txt', 'r') as exclude_fd:
|
||||||
|
exclude = set(exclude_fd.read().splitlines())
|
||||||
|
|
||||||
|
paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] == '.sh' and i.path not in skip_paths)
|
||||||
|
|
||||||
|
if not paths:
|
||||||
|
return SanitySkipped(test)
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
'shellcheck',
|
||||||
|
'-e', ','.join(sorted(exclude)),
|
||||||
|
'--format', 'checkstyle',
|
||||||
|
] + paths
|
||||||
|
|
||||||
|
try:
|
||||||
|
stdout, stderr = run_command(args, cmd, capture=True)
|
||||||
|
status = 0
|
||||||
|
except SubprocessError as ex:
|
||||||
|
stdout = ex.stdout
|
||||||
|
stderr = ex.stderr
|
||||||
|
status = ex.status
|
||||||
|
|
||||||
|
if stderr or status > 1:
|
||||||
|
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
|
||||||
|
|
||||||
|
if args.explain:
|
||||||
|
return SanitySkipped(test)
|
||||||
|
|
||||||
|
# json output is missing file paths in older versions of shellcheck, so we'll use xml instead
|
||||||
|
root = fromstring(stdout) # type: Element
|
||||||
|
|
||||||
|
results = []
|
||||||
|
|
||||||
|
for item in root: # type: Element
|
||||||
|
for entry in item: # type: Element
|
||||||
|
results.append(SanityMessage(
|
||||||
|
message=entry.attrib['message'],
|
||||||
|
path=item.attrib['name'],
|
||||||
|
line=int(entry.attrib['line']),
|
||||||
|
column=int(entry.attrib['column']),
|
||||||
|
level=entry.attrib['severity'],
|
||||||
|
code=entry.attrib['source'].replace('ShellCheck.', ''),
|
||||||
|
))
|
||||||
|
|
||||||
|
if results:
|
||||||
|
return SanityFailure(test, messages=results)
|
||||||
|
|
||||||
|
return SanitySuccess(test)
|
||||||
|
|
||||||
|
|
||||||
|
def command_sanity_pep8(args, targets):
|
||||||
|
"""
|
||||||
|
:type args: SanityConfig
|
||||||
|
:type targets: SanityTargets
|
||||||
|
:rtype: SanityResult
|
||||||
|
"""
|
||||||
|
test = 'pep8'
|
||||||
|
|
||||||
|
with open(PEP8_SKIP_PATH, 'r') as skip_fd:
|
||||||
|
skip_paths = skip_fd.read().splitlines()
|
||||||
|
|
||||||
|
with open(PEP8_LEGACY_PATH, 'r') as legacy_fd:
|
||||||
|
legacy_paths = legacy_fd.read().splitlines()
|
||||||
|
|
||||||
|
with open('test/sanity/pep8/legacy-ignore.txt', 'r') as ignore_fd:
|
||||||
|
legacy_ignore = set(ignore_fd.read().splitlines())
|
||||||
|
|
||||||
|
with open('test/sanity/pep8/current-ignore.txt', 'r') as ignore_fd:
|
||||||
|
current_ignore = sorted(ignore_fd.read().splitlines())
|
||||||
|
|
||||||
|
skip_paths_set = set(skip_paths)
|
||||||
|
legacy_paths_set = set(legacy_paths)
|
||||||
|
|
||||||
|
paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] == '.py' and i.path not in skip_paths_set)
|
||||||
|
|
||||||
|
if not paths:
|
||||||
|
return SanitySkipped(test)
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
'pep8',
|
||||||
|
'--max-line-length', '160',
|
||||||
|
'--config', '/dev/null',
|
||||||
|
'--ignore', ','.join(sorted(current_ignore)),
|
||||||
|
] + paths
|
||||||
|
|
||||||
|
try:
|
||||||
|
stdout, stderr = run_command(args, cmd, capture=True)
|
||||||
|
status = 0
|
||||||
|
except SubprocessError as ex:
|
||||||
|
stdout = ex.stdout
|
||||||
|
stderr = ex.stderr
|
||||||
|
status = ex.status
|
||||||
|
|
||||||
|
if stderr:
|
||||||
|
raise SubprocessError(cmd=cmd, status=status, stderr=stderr)
|
||||||
|
|
||||||
|
if args.explain:
|
||||||
|
return SanitySkipped(test)
|
||||||
|
|
||||||
|
pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<code>[WE][0-9]{3}) (?P<message>.*)$'
|
||||||
|
|
||||||
|
results = [re.search(pattern, line).groupdict() for line in stdout.splitlines()]
|
||||||
|
|
||||||
|
results = [SanityMessage(
|
||||||
|
message=r['message'],
|
||||||
|
path=r['path'],
|
||||||
|
line=int(r['line']),
|
||||||
|
column=int(r['column']),
|
||||||
|
level='warning' if r['code'].startswith('W') else 'error',
|
||||||
|
code=r['code'],
|
||||||
|
) for r in results]
|
||||||
|
|
||||||
|
failed_result_paths = set([result.path for result in results])
|
||||||
|
used_paths = set(paths)
|
||||||
|
|
||||||
|
errors = []
|
||||||
|
summary = {}
|
||||||
|
|
||||||
|
line = 0
|
||||||
|
|
||||||
|
for path in legacy_paths:
|
||||||
|
line += 1
|
||||||
|
|
||||||
|
if not os.path.exists(path):
|
||||||
|
# Keep files out of the list which no longer exist in the repo.
|
||||||
|
errors.append(SanityMessage(
|
||||||
|
code='A101',
|
||||||
|
message='Remove "%s" since it does not exist' % path,
|
||||||
|
path=PEP8_LEGACY_PATH,
|
||||||
|
line=line,
|
||||||
|
column=1,
|
||||||
|
))
|
||||||
|
|
||||||
|
if path in used_paths and path not in failed_result_paths:
|
||||||
|
# Keep files out of the list which no longer require the relaxed rule set.
|
||||||
|
errors.append(SanityMessage(
|
||||||
|
code='A201',
|
||||||
|
message='Remove "%s" since it passes the current rule set' % path,
|
||||||
|
path=PEP8_LEGACY_PATH,
|
||||||
|
line=line,
|
||||||
|
column=1,
|
||||||
|
))
|
||||||
|
|
||||||
|
line = 0
|
||||||
|
|
||||||
|
for path in skip_paths:
|
||||||
|
line += 1
|
||||||
|
|
||||||
|
if not os.path.exists(path):
|
||||||
|
# Keep files out of the list which no longer exist in the repo.
|
||||||
|
errors.append(SanityMessage(
|
||||||
|
code='A101',
|
||||||
|
message='Remove "%s" since it does not exist' % path,
|
||||||
|
path=PEP8_SKIP_PATH,
|
||||||
|
line=line,
|
||||||
|
column=1,
|
||||||
|
))
|
||||||
|
|
||||||
|
for result in results:
|
||||||
|
if result.path in legacy_paths_set and result.code in legacy_ignore:
|
||||||
|
# Files on the legacy list are permitted to have errors on the legacy ignore list.
|
||||||
|
# However, we want to report on their existence to track progress towards eliminating these exceptions.
|
||||||
|
display.info('PEP 8: %s (legacy)' % result, verbosity=3)
|
||||||
|
|
||||||
|
key = '%s %s' % (result.code, re.sub('[0-9]+', 'NNN', result.message))
|
||||||
|
|
||||||
|
if key not in summary:
|
||||||
|
summary[key] = 0
|
||||||
|
|
||||||
|
summary[key] += 1
|
||||||
|
else:
|
||||||
|
# Files not on the legacy list and errors not on the legacy ignore list are PEP 8 policy errors.
|
||||||
|
errors.append(result)
|
||||||
|
|
||||||
|
if summary:
|
||||||
|
lines = []
|
||||||
|
count = 0
|
||||||
|
|
||||||
|
for key in sorted(summary):
|
||||||
|
count += summary[key]
|
||||||
|
lines.append('PEP 8: %5d %s' % (summary[key], key))
|
||||||
|
|
||||||
|
display.info('PEP 8: There were %d different legacy issues found (%d total):' % (len(summary), count), verbosity=1)
|
||||||
|
display.info('PEP 8: Count Code Message', verbosity=1)
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
display.info(line, verbosity=1)
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
return SanityFailure(test, messages=errors)
|
||||||
|
|
||||||
|
return SanitySuccess(test)
|
||||||
|
|
||||||
|
|
||||||
|
def command_sanity_yamllint(args, targets):
|
||||||
|
"""
|
||||||
|
:type args: SanityConfig
|
||||||
|
:type targets: SanityTargets
|
||||||
|
:rtype: SanityResult
|
||||||
|
"""
|
||||||
|
test = 'yamllint'
|
||||||
|
|
||||||
|
paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] in ('.yml', '.yaml'))
|
||||||
|
|
||||||
|
if not paths:
|
||||||
|
return SanitySkipped(test)
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
'yamllint',
|
||||||
|
'--format', 'parsable',
|
||||||
|
] + paths
|
||||||
|
|
||||||
|
try:
|
||||||
|
stdout, stderr = run_command(args, cmd, capture=True)
|
||||||
|
status = 0
|
||||||
|
except SubprocessError as ex:
|
||||||
|
stdout = ex.stdout
|
||||||
|
stderr = ex.stderr
|
||||||
|
status = ex.status
|
||||||
|
|
||||||
|
if stderr:
|
||||||
|
raise SubprocessError(cmd=cmd, status=status, stderr=stderr)
|
||||||
|
|
||||||
|
if args.explain:
|
||||||
|
return SanitySkipped(test)
|
||||||
|
|
||||||
|
pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): \[(?P<level>warning|error)\] (?P<message>.*)$'
|
||||||
|
|
||||||
|
results = [re.search(pattern, line).groupdict() for line in stdout.splitlines()]
|
||||||
|
|
||||||
|
results = [SanityMessage(
|
||||||
|
message=r['message'],
|
||||||
|
path=r['path'],
|
||||||
|
line=int(r['line']),
|
||||||
|
column=int(r['column']),
|
||||||
|
level=r['level'],
|
||||||
|
) for r in results]
|
||||||
|
|
||||||
|
if results:
|
||||||
|
return SanityFailure(test, messages=results)
|
||||||
|
|
||||||
|
return SanitySuccess(test)
|
||||||
|
|
||||||
|
|
||||||
|
def command_sanity_ansible_doc(args, targets, python_version):
|
||||||
|
"""
|
||||||
|
:type args: SanityConfig
|
||||||
|
:type targets: SanityTargets
|
||||||
|
:type python_version: str
|
||||||
|
:rtype: SanityResult
|
||||||
|
"""
|
||||||
|
test = 'ansible-doc'
|
||||||
|
|
||||||
|
with open('test/sanity/ansible-doc/skip.txt', 'r') as skip_fd:
|
||||||
|
skip_modules = set(skip_fd.read().splitlines())
|
||||||
|
|
||||||
|
modules = sorted(set(m for i in targets.include_external for m in i.modules) -
|
||||||
|
set(m for i in targets.exclude_external for m in i.modules) -
|
||||||
|
skip_modules)
|
||||||
|
|
||||||
|
if not modules:
|
||||||
|
return SanitySkipped(test, python_version=python_version)
|
||||||
|
|
||||||
|
env = ansible_environment(args)
|
||||||
|
cmd = ['ansible-doc'] + modules
|
||||||
|
|
||||||
|
try:
|
||||||
|
stdout, stderr = intercept_command(args, cmd, env=env, capture=True, python_version=python_version)
|
||||||
|
status = 0
|
||||||
|
except SubprocessError as ex:
|
||||||
|
stdout = ex.stdout
|
||||||
|
stderr = ex.stderr
|
||||||
|
status = ex.status
|
||||||
|
|
||||||
|
if status:
|
||||||
|
summary = str(SubprocessError(cmd=cmd, status=status, stderr=stderr))
|
||||||
|
return SanityFailure(test, summary=summary, python_version=python_version)
|
||||||
|
|
||||||
|
if stdout:
|
||||||
|
display.info(stdout.strip(), verbosity=3)
|
||||||
|
|
||||||
|
if stderr:
|
||||||
|
summary = 'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError(cmd, stderr=stderr)
|
||||||
|
return SanityFailure(test, summary=summary, python_version=python_version)
|
||||||
|
|
||||||
|
return SanitySuccess(test, python_version=python_version)
|
||||||
|
|
||||||
|
|
||||||
|
def collect_code_smell_tests():
|
||||||
|
"""
|
||||||
|
:rtype: tuple(SanityFunc)
|
||||||
|
"""
|
||||||
|
with open('test/sanity/code-smell/skip.txt', 'r') as skip_fd:
|
||||||
|
skip_tests = skip_fd.read().splitlines()
|
||||||
|
|
||||||
|
paths = glob.glob('test/sanity/code-smell/*')
|
||||||
|
paths = sorted(p for p in paths
|
||||||
|
if os.access(p, os.X_OK)
|
||||||
|
and os.path.isfile(p)
|
||||||
|
and os.path.basename(p) not in skip_tests)
|
||||||
|
|
||||||
|
tests = tuple(SanityFunc(os.path.splitext(os.path.basename(p))[0], command_sanity_code_smell, script=p, intercept=False) for p in paths)
|
||||||
|
|
||||||
|
return tests
|
||||||
|
|
||||||
|
|
||||||
|
def sanity_init():
|
||||||
|
"""Initialize full sanity test list (includes code-smell scripts determined at runtime)."""
|
||||||
|
global SANITY_TESTS # pylint: disable=locally-disabled, global-statement
|
||||||
|
SANITY_TESTS = tuple(sorted(SANITY_TESTS + collect_code_smell_tests(), key=lambda k: k.name))
|
||||||
|
|
||||||
|
|
||||||
|
def sanity_get_tests():
|
||||||
|
"""
|
||||||
|
:rtype: tuple(SanityFunc)
|
||||||
|
"""
|
||||||
|
return SANITY_TESTS
|
||||||
|
|
||||||
|
|
||||||
|
class SanityResult(object):
|
||||||
|
"""Base class for sanity test results."""
|
||||||
|
def __init__(self, test, python_version=None):
|
||||||
|
"""
|
||||||
|
:type test: str
|
||||||
|
:type python_version: str
|
||||||
|
"""
|
||||||
|
self.test = test
|
||||||
|
self.python_version = python_version
|
||||||
|
|
||||||
|
try:
|
||||||
|
import junit_xml
|
||||||
|
except ImportError:
|
||||||
|
junit_xml = None
|
||||||
|
|
||||||
|
self.junit = junit_xml
|
||||||
|
|
||||||
|
def write(self, args):
|
||||||
|
"""
|
||||||
|
:type args: SanityConfig
|
||||||
|
"""
|
||||||
|
self.write_console()
|
||||||
|
|
||||||
|
if args.lint:
|
||||||
|
self.write_lint()
|
||||||
|
|
||||||
|
if args.junit:
|
||||||
|
if self.junit:
|
||||||
|
self.write_junit(args)
|
||||||
|
else:
|
||||||
|
display.warning('Skipping junit xml output because the `junit-xml` python package was not found.', unique=True)
|
||||||
|
|
||||||
|
def write_console(self):
|
||||||
|
"""Write results to console."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def write_lint(self):
|
||||||
|
"""Write lint results to stdout."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def write_junit(self, args):
|
||||||
|
"""
|
||||||
|
:type args: SanityConfig
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def save_junit(self, args, test_case, properties=None):
|
||||||
|
"""
|
||||||
|
:type args: SanityConfig
|
||||||
|
:type test_case: junit_xml.TestCase
|
||||||
|
:type properties: dict[str, str] | None
|
||||||
|
:rtype: str | None
|
||||||
|
"""
|
||||||
|
path = 'test/results/junit/ansible-test-%s' % self.test
|
||||||
|
|
||||||
|
if self.python_version:
|
||||||
|
path += '-python-%s' % self.python_version
|
||||||
|
|
||||||
|
path += '.xml'
|
||||||
|
|
||||||
|
test_suites = [
|
||||||
|
self.junit.TestSuite(
|
||||||
|
name='ansible-test',
|
||||||
|
test_cases=[test_case],
|
||||||
|
timestamp=datetime.datetime.utcnow().replace(microsecond=0).isoformat(),
|
||||||
|
properties=properties,
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
report = self.junit.TestSuite.to_xml_string(test_suites=test_suites, prettyprint=True, encoding='utf-8')
|
||||||
|
|
||||||
|
if args.explain:
|
||||||
|
return
|
||||||
|
|
||||||
|
with open(path, 'wb') as xml:
|
||||||
|
xml.write(report.encode('utf-8', 'strict'))
|
||||||
|
|
||||||
|
|
||||||
|
class SanitySuccess(SanityResult):
|
||||||
|
"""Sanity test success."""
|
||||||
|
def __init__(self, test, python_version=None):
|
||||||
|
"""
|
||||||
|
:type test: str
|
||||||
|
:type python_version: str
|
||||||
|
"""
|
||||||
|
super(SanitySuccess, self).__init__(test, python_version)
|
||||||
|
|
||||||
|
def write_junit(self, args):
|
||||||
|
"""
|
||||||
|
:type args: SanityConfig
|
||||||
|
"""
|
||||||
|
test_case = self.junit.TestCase(name=self.test)
|
||||||
|
|
||||||
|
self.save_junit(args, test_case)
|
||||||
|
|
||||||
|
|
||||||
|
class SanitySkipped(SanityResult):
|
||||||
|
"""Sanity test skipped."""
|
||||||
|
def __init__(self, test, python_version=None):
|
||||||
|
"""
|
||||||
|
:type test: str
|
||||||
|
:type python_version: str
|
||||||
|
"""
|
||||||
|
super(SanitySkipped, self).__init__(test, python_version)
|
||||||
|
|
||||||
|
def write_console(self):
|
||||||
|
"""Write results to console."""
|
||||||
|
display.info('No tests applicable.', verbosity=1)
|
||||||
|
|
||||||
|
def write_junit(self, args):
|
||||||
|
"""
|
||||||
|
:type args: SanityConfig
|
||||||
|
"""
|
||||||
|
test_case = self.junit.TestCase(name=self.test)
|
||||||
|
test_case.add_skipped_info('No tests applicable.')
|
||||||
|
|
||||||
|
self.save_junit(args, test_case)
|
||||||
|
|
||||||
|
|
||||||
|
class SanityFailure(SanityResult):
|
||||||
|
"""Sanity test failure."""
|
||||||
|
def __init__(self, test, python_version=None, messages=None, summary=None):
|
||||||
|
"""
|
||||||
|
:type test: str
|
||||||
|
:type python_version: str
|
||||||
|
:type messages: list[SanityMessage]
|
||||||
|
:type summary: str
|
||||||
|
"""
|
||||||
|
super(SanityFailure, self).__init__(test, python_version)
|
||||||
|
|
||||||
|
self.messages = messages
|
||||||
|
self.summary = summary
|
||||||
|
|
||||||
|
def write_console(self):
|
||||||
|
"""Write results to console."""
|
||||||
|
if self.summary:
|
||||||
|
display.error(self.summary)
|
||||||
|
else:
|
||||||
|
display.error('Found %d %s issue(s) which need to be resolved:' % (len(self.messages), self.test))
|
||||||
|
|
||||||
|
for message in self.messages:
|
||||||
|
display.error(message)
|
||||||
|
|
||||||
|
def write_lint(self):
|
||||||
|
"""Write lint results to stdout."""
|
||||||
|
if self.summary:
|
||||||
|
command = self.format_command()
|
||||||
|
message = 'The test `%s` failed. See stderr output for details.' % command
|
||||||
|
path = 'test/runner/ansible-test'
|
||||||
|
message = SanityMessage(message, path)
|
||||||
|
print(message)
|
||||||
|
else:
|
||||||
|
for message in self.messages:
|
||||||
|
print(message)
|
||||||
|
|
||||||
|
def write_junit(self, args):
|
||||||
|
"""
|
||||||
|
:type args: SanityConfig
|
||||||
|
"""
|
||||||
|
title = self.format_title()
|
||||||
|
output = self.format_block()
|
||||||
|
|
||||||
|
# Hack to remove ANSI color reset code from SubprocessError messages.
|
||||||
|
output = output.replace(display.clear, '')
|
||||||
|
|
||||||
|
test_case = self.junit.TestCase(classname='sanity', name=self.test)
|
||||||
|
|
||||||
|
# Include a leading newline to improve readability on Shippable "Tests" tab.
|
||||||
|
# Without this, the first line becomes indented.
|
||||||
|
test_case.add_failure_info(message=title, output='\n%s' % output)
|
||||||
|
|
||||||
|
self.save_junit(args, test_case)
|
||||||
|
|
||||||
|
def format_command(self):
|
||||||
|
"""
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
command = 'ansible-test sanity --test %s' % self.test
|
||||||
|
|
||||||
|
if self.python_version:
|
||||||
|
command += ' --python %s' % self.python_version
|
||||||
|
|
||||||
|
return command
|
||||||
|
|
||||||
|
def format_title(self):
|
||||||
|
"""
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
command = self.format_command()
|
||||||
|
|
||||||
|
if self.summary:
|
||||||
|
reason = 'error'
|
||||||
|
else:
|
||||||
|
reason = 'error' if len(self.messages) == 1 else 'errors'
|
||||||
|
|
||||||
|
title = 'The test `%s` failed with the following %s:' % (command, reason)
|
||||||
|
|
||||||
|
return title
|
||||||
|
|
||||||
|
def format_block(self):
|
||||||
|
"""
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
if self.summary:
|
||||||
|
block = self.summary
|
||||||
|
else:
|
||||||
|
block = '\n'.join(str(m) for m in self.messages)
|
||||||
|
|
||||||
|
message = block.strip()
|
||||||
|
|
||||||
|
return message
|
||||||
|
|
||||||
|
|
||||||
|
class SanityMessage(object):
|
||||||
|
"""Single sanity test message for one file."""
|
||||||
|
def __init__(self, message, path, line=0, column=0, level='error', code=None):
|
||||||
|
"""
|
||||||
|
:type message: str
|
||||||
|
:type path: str
|
||||||
|
:type line: int
|
||||||
|
:type column: int
|
||||||
|
:type level: str
|
||||||
|
:type code: str | None
|
||||||
|
"""
|
||||||
|
self.path = path
|
||||||
|
self.line = line
|
||||||
|
self.column = column
|
||||||
|
self.level = level
|
||||||
|
self.code = code
|
||||||
|
self.message = message
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if self.code:
|
||||||
|
msg = '%s %s' % (self.code, self.message)
|
||||||
|
else:
|
||||||
|
msg = self.message
|
||||||
|
|
||||||
|
return '%s:%s:%s: %s' % (self.path, self.line, self.column, msg)
|
||||||
|
|
||||||
|
|
||||||
|
class SanityTargets(object):
|
||||||
|
"""Sanity test target information."""
|
||||||
|
def __init__(self, include, exclude, require):
|
||||||
|
"""
|
||||||
|
:type include: list[str]
|
||||||
|
:type exclude: list[str]
|
||||||
|
:type require: list[str]
|
||||||
|
"""
|
||||||
|
self.all = not include
|
||||||
|
self.targets = tuple(sorted(walk_sanity_targets()))
|
||||||
|
self.include = walk_internal_targets(self.targets, include, exclude, require)
|
||||||
|
self.include_external, self.exclude_external = walk_external_targets(self.targets, include, exclude, require)
|
||||||
|
|
||||||
|
|
||||||
|
class SanityTest(object):
|
||||||
|
"""Sanity test base class."""
|
||||||
|
def __init__(self, name):
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
|
||||||
|
class SanityFunc(SanityTest):
|
||||||
|
"""Sanity test function information."""
|
||||||
|
def __init__(self, name, func, intercept=True, script=None):
|
||||||
|
"""
|
||||||
|
:type name: str
|
||||||
|
:type func: (SanityConfig, SanityTargets) -> SanityResult
|
||||||
|
:type intercept: bool
|
||||||
|
:type script: str | None
|
||||||
|
"""
|
||||||
|
super(SanityFunc, self).__init__(name)
|
||||||
|
|
||||||
|
self.func = func
|
||||||
|
self.intercept = intercept
|
||||||
|
self.script = script
|
||||||
|
|
||||||
|
|
||||||
|
SANITY_TESTS = (
|
||||||
|
SanityFunc('shellcheck', command_sanity_shellcheck, intercept=False),
|
||||||
|
SanityFunc('pep8', command_sanity_pep8, intercept=False),
|
||||||
|
SanityFunc('yamllint', command_sanity_yamllint, intercept=False),
|
||||||
|
SanityFunc('validate-modules', command_sanity_validate_modules, intercept=False),
|
||||||
|
SanityFunc('ansible-doc', command_sanity_ansible_doc),
|
||||||
|
)
|
|
@ -287,6 +287,8 @@ class Display(object):
|
||||||
self.verbosity = 0
|
self.verbosity = 0
|
||||||
self.color = True
|
self.color = True
|
||||||
self.warnings = []
|
self.warnings = []
|
||||||
|
self.warnings_unique = set()
|
||||||
|
self.info_stderr = False
|
||||||
|
|
||||||
def __warning(self, message):
|
def __warning(self, message):
|
||||||
"""
|
"""
|
||||||
|
@ -304,10 +306,17 @@ class Display(object):
|
||||||
for warning in self.warnings:
|
for warning in self.warnings:
|
||||||
self.__warning(warning)
|
self.__warning(warning)
|
||||||
|
|
||||||
def warning(self, message):
|
def warning(self, message, unique=False):
|
||||||
"""
|
"""
|
||||||
:type message: str
|
:type message: str
|
||||||
|
:type unique: bool
|
||||||
"""
|
"""
|
||||||
|
if unique:
|
||||||
|
if message in self.warnings_unique:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.warnings_unique.add(message)
|
||||||
|
|
||||||
self.__warning(message)
|
self.__warning(message)
|
||||||
self.warnings.append(message)
|
self.warnings.append(message)
|
||||||
|
|
||||||
|
@ -330,7 +339,7 @@ class Display(object):
|
||||||
"""
|
"""
|
||||||
if self.verbosity >= verbosity:
|
if self.verbosity >= verbosity:
|
||||||
color = self.verbosity_colors.get(verbosity, self.yellow)
|
color = self.verbosity_colors.get(verbosity, self.yellow)
|
||||||
self.print_message(message, color=color)
|
self.print_message(message, color=color, fd=sys.stderr if self.info_stderr else sys.stdout)
|
||||||
|
|
||||||
def print_message(self, message, color=None, fd=sys.stdout): # pylint: disable=locally-disabled, invalid-name
|
def print_message(self, message, color=None, fd=sys.stdout): # pylint: disable=locally-disabled, invalid-name
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -24,9 +24,7 @@ from lib.executor import (
|
||||||
command_windows_integration,
|
command_windows_integration,
|
||||||
command_units,
|
command_units,
|
||||||
command_compile,
|
command_compile,
|
||||||
command_sanity,
|
|
||||||
command_shell,
|
command_shell,
|
||||||
SANITY_TESTS,
|
|
||||||
SUPPORTED_PYTHON_VERSIONS,
|
SUPPORTED_PYTHON_VERSIONS,
|
||||||
COMPILE_PYTHON_VERSIONS,
|
COMPILE_PYTHON_VERSIONS,
|
||||||
PosixIntegrationConfig,
|
PosixIntegrationConfig,
|
||||||
|
@ -42,6 +40,12 @@ from lib.executor import (
|
||||||
check_startup,
|
check_startup,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from lib.sanity import (
|
||||||
|
command_sanity,
|
||||||
|
sanity_init,
|
||||||
|
sanity_get_tests,
|
||||||
|
)
|
||||||
|
|
||||||
from lib.target import (
|
from lib.target import (
|
||||||
find_target_completion,
|
find_target_completion,
|
||||||
walk_posix_integration_targets,
|
walk_posix_integration_targets,
|
||||||
|
@ -64,10 +68,12 @@ def main():
|
||||||
try:
|
try:
|
||||||
git_root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
|
git_root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
|
||||||
os.chdir(git_root)
|
os.chdir(git_root)
|
||||||
|
sanity_init()
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
config = args.config(args)
|
config = args.config(args)
|
||||||
display.verbosity = config.verbosity
|
display.verbosity = config.verbosity
|
||||||
display.color = config.color
|
display.color = config.color
|
||||||
|
display.info_stderr = isinstance(config, SanityConfig) and config.lint
|
||||||
check_startup()
|
check_startup()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -267,19 +273,27 @@ def parse_args():
|
||||||
sanity.add_argument('--test',
|
sanity.add_argument('--test',
|
||||||
metavar='TEST',
|
metavar='TEST',
|
||||||
action='append',
|
action='append',
|
||||||
choices=[t.name for t in SANITY_TESTS],
|
choices=[test.name for test in sanity_get_tests()],
|
||||||
help='tests to run')
|
help='tests to run').completer = complete_sanity_test
|
||||||
|
|
||||||
sanity.add_argument('--skip-test',
|
sanity.add_argument('--skip-test',
|
||||||
metavar='TEST',
|
metavar='TEST',
|
||||||
action='append',
|
action='append',
|
||||||
choices=[t.name for t in SANITY_TESTS],
|
choices=[test.name for test in sanity_get_tests()],
|
||||||
help='tests to skip')
|
help='tests to skip').completer = complete_sanity_test
|
||||||
|
|
||||||
sanity.add_argument('--list-tests',
|
sanity.add_argument('--list-tests',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help='list available tests')
|
help='list available tests')
|
||||||
|
|
||||||
|
sanity.add_argument('--lint',
|
||||||
|
action='store_true',
|
||||||
|
help='write lint output to stdout, everything else stderr')
|
||||||
|
|
||||||
|
sanity.add_argument('--junit',
|
||||||
|
action='store_true',
|
||||||
|
help='write test failures to junit xml files')
|
||||||
|
|
||||||
sanity.add_argument('--python',
|
sanity.add_argument('--python',
|
||||||
metavar='VERSION',
|
metavar='VERSION',
|
||||||
choices=SUPPORTED_PYTHON_VERSIONS,
|
choices=SUPPORTED_PYTHON_VERSIONS,
|
||||||
|
@ -540,5 +554,18 @@ def complete_network_platform(prefix, parsed_args, **_):
|
||||||
return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)]
|
return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)]
|
||||||
|
|
||||||
|
|
||||||
|
def complete_sanity_test(prefix, parsed_args, **_):
|
||||||
|
"""
|
||||||
|
:type prefix: unicode
|
||||||
|
:type parsed_args: any
|
||||||
|
:rtype: list[str]
|
||||||
|
"""
|
||||||
|
del parsed_args
|
||||||
|
|
||||||
|
tests = sorted(t.name for t in sanity_get_tests())
|
||||||
|
|
||||||
|
return [i for i in tests if i.startswith(prefix)]
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -3,7 +3,9 @@
|
||||||
ITERKEYS_USERS=$(grep -r -I iterkeys . \
|
ITERKEYS_USERS=$(grep -r -I iterkeys . \
|
||||||
--exclude-dir .git \
|
--exclude-dir .git \
|
||||||
--exclude-dir .tox \
|
--exclude-dir .tox \
|
||||||
|
--exclude-dir .idea \
|
||||||
--exclude-dir docsite \
|
--exclude-dir docsite \
|
||||||
|
--exclude-dir results \
|
||||||
| grep -v \
|
| grep -v \
|
||||||
-e lib/ansible/compat/six/_six.py \
|
-e lib/ansible/compat/six/_six.py \
|
||||||
-e lib/ansible/module_utils/six.py \
|
-e lib/ansible/module_utils/six.py \
|
||||||
|
|
|
@ -1,105 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
# (c) 2016 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import os
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from argparse import ArgumentParser
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
api_key = get_api_key()
|
|
||||||
|
|
||||||
parser = ArgumentParser(description='Download logs from all jobs in a Shippable run.')
|
|
||||||
|
|
||||||
parser.add_argument('run_id',
|
|
||||||
help='shippable run id.')
|
|
||||||
|
|
||||||
parser.add_argument('-v', '--verbose',
|
|
||||||
dest='verbose',
|
|
||||||
action='store_true',
|
|
||||||
help='show what is being downloaded')
|
|
||||||
|
|
||||||
parser.add_argument('-t', '--test',
|
|
||||||
dest='test',
|
|
||||||
action='store_true',
|
|
||||||
help='show what would be downloaded without downloading')
|
|
||||||
|
|
||||||
parser.add_argument('--key',
|
|
||||||
dest='api_key',
|
|
||||||
default=api_key,
|
|
||||||
required=api_key is None,
|
|
||||||
help='api key for accessing Shippable')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
headers = dict(
|
|
||||||
Authorization='apiToken %s' % args.api_key,
|
|
||||||
)
|
|
||||||
|
|
||||||
response = requests.get('https://api.shippable.com/jobs?runIds=%s' % args.run_id, headers=headers)
|
|
||||||
|
|
||||||
if response.status_code != 200:
|
|
||||||
raise Exception(response.content)
|
|
||||||
|
|
||||||
body = response.json()
|
|
||||||
output_dir = args.run_id
|
|
||||||
|
|
||||||
if not args.test:
|
|
||||||
if not os.path.exists(output_dir):
|
|
||||||
os.mkdir(output_dir)
|
|
||||||
|
|
||||||
for j in body:
|
|
||||||
job_id = j['id']
|
|
||||||
job_number = j['jobNumber']
|
|
||||||
path = os.path.join(output_dir, '%s.log' % job_number)
|
|
||||||
url = 'https://api.shippable.com/jobs/%s/consoles?download=true' % job_id
|
|
||||||
|
|
||||||
if args.verbose or args.test:
|
|
||||||
print('%s' % path)
|
|
||||||
|
|
||||||
if os.path.exists(path):
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not args.test:
|
|
||||||
response = requests.get(url, headers=headers)
|
|
||||||
|
|
||||||
if response.status_code != 200:
|
|
||||||
raise Exception(response.content)
|
|
||||||
|
|
||||||
log = response.content
|
|
||||||
|
|
||||||
with open(path, 'w') as f:
|
|
||||||
f.write(log)
|
|
||||||
|
|
||||||
|
|
||||||
def get_api_key():
|
|
||||||
path = os.path.join(os.environ['HOME'], '.shippable.key')
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(path, 'r') as f:
|
|
||||||
return f.read().strip()
|
|
||||||
except IOError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
187
test/utils/shippable/download.py
Executable file
187
test/utils/shippable/download.py
Executable file
|
@ -0,0 +1,187 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
# PYTHON_ARGCOMPLETE_OK
|
||||||
|
|
||||||
|
# (c) 2016 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
|
||||||
|
try:
|
||||||
|
import argcomplete
|
||||||
|
except ImportError:
|
||||||
|
argcomplete = None
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
api_key = get_api_key()
|
||||||
|
|
||||||
|
parser = ArgumentParser(description='Download results from a Shippable run.')
|
||||||
|
|
||||||
|
parser.add_argument('run_id',
|
||||||
|
help='shippable run id.')
|
||||||
|
|
||||||
|
parser.add_argument('-v', '--verbose',
|
||||||
|
dest='verbose',
|
||||||
|
action='store_true',
|
||||||
|
help='show what is being downloaded')
|
||||||
|
|
||||||
|
parser.add_argument('-t', '--test',
|
||||||
|
dest='test',
|
||||||
|
action='store_true',
|
||||||
|
help='show what would be downloaded without downloading')
|
||||||
|
|
||||||
|
parser.add_argument('--key',
|
||||||
|
dest='api_key',
|
||||||
|
default=api_key,
|
||||||
|
required=api_key is None,
|
||||||
|
help='api key for accessing Shippable')
|
||||||
|
|
||||||
|
parser.add_argument('--console-logs',
|
||||||
|
action='store_true',
|
||||||
|
help='download console logs')
|
||||||
|
|
||||||
|
parser.add_argument('--test-results',
|
||||||
|
action='store_true',
|
||||||
|
help='download test results')
|
||||||
|
|
||||||
|
parser.add_argument('--coverage-results',
|
||||||
|
action='store_true',
|
||||||
|
help='download code coverage results')
|
||||||
|
|
||||||
|
parser.add_argument('--all',
|
||||||
|
action='store_true',
|
||||||
|
help='download everything')
|
||||||
|
|
||||||
|
parser.add_argument('--job-number',
|
||||||
|
action='append',
|
||||||
|
type=int,
|
||||||
|
help='limit downloads to the given job number')
|
||||||
|
|
||||||
|
if argcomplete:
|
||||||
|
argcomplete.autocomplete(parser)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.all:
|
||||||
|
args.console_logs = True
|
||||||
|
args.test_results = True
|
||||||
|
args.coverage_results = True
|
||||||
|
|
||||||
|
if not args.console_logs and not args.test_results and not args.coverage_results:
|
||||||
|
parser.error('At least one download option is required: --console-logs, --test-results, --coverage-results')
|
||||||
|
|
||||||
|
headers = dict(
|
||||||
|
Authorization='apiToken %s' % args.api_key,
|
||||||
|
)
|
||||||
|
|
||||||
|
response = requests.get('https://api.shippable.com/jobs?runIds=%s' % args.run_id, headers=headers)
|
||||||
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
raise Exception(response.content)
|
||||||
|
|
||||||
|
body = response.json()
|
||||||
|
output_dir = args.run_id
|
||||||
|
|
||||||
|
if not args.test:
|
||||||
|
if not os.path.exists(output_dir):
|
||||||
|
os.mkdir(output_dir)
|
||||||
|
|
||||||
|
for j in body:
|
||||||
|
job_id = j['id']
|
||||||
|
job_number = j['jobNumber']
|
||||||
|
|
||||||
|
if args.job_number and job_number not in args.job_number:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if args.console_logs:
|
||||||
|
path = os.path.join(output_dir, '%s-console.log' % job_number)
|
||||||
|
url = 'https://api.shippable.com/jobs/%s/consoles?download=true' % job_id
|
||||||
|
download(args, headers, path, url)
|
||||||
|
|
||||||
|
if args.test_results:
|
||||||
|
path = os.path.join(output_dir, '%s-test.json' % job_number)
|
||||||
|
url = 'https://api.shippable.com/jobs/%s/jobTestReports' % job_id
|
||||||
|
download(args, headers, path, url)
|
||||||
|
extract_contents(args, path, os.path.join(output_dir, '%s-test' % job_number))
|
||||||
|
|
||||||
|
if args.coverage_results:
|
||||||
|
path = os.path.join(output_dir, '%s-coverage.json' % job_number)
|
||||||
|
url = 'https://api.shippable.com/jobs/%s/jobCoverageReports' % job_id
|
||||||
|
download(args, headers, path, url)
|
||||||
|
extract_contents(args, path, os.path.join(output_dir, '%s-coverage' % job_number))
|
||||||
|
|
||||||
|
|
||||||
|
def extract_contents(args, path, output_dir):
|
||||||
|
if not args.test:
|
||||||
|
with open(path, 'r') as json_fd:
|
||||||
|
items = json.load(json_fd)
|
||||||
|
|
||||||
|
for item in items:
|
||||||
|
contents = item['contents']
|
||||||
|
path = output_dir + '/' + re.sub('^/*', '', item['path'])
|
||||||
|
|
||||||
|
directory = os.path.dirname(path)
|
||||||
|
|
||||||
|
if not os.path.exists(directory):
|
||||||
|
os.makedirs(directory)
|
||||||
|
|
||||||
|
if args.verbose:
|
||||||
|
print(path)
|
||||||
|
|
||||||
|
if not os.path.exists(path):
|
||||||
|
with open(path, 'w') as output_fd:
|
||||||
|
output_fd.write(contents)
|
||||||
|
|
||||||
|
|
||||||
|
def download(args, headers, path, url):
|
||||||
|
if args.verbose or args.test:
|
||||||
|
print(path)
|
||||||
|
|
||||||
|
if os.path.exists(path):
|
||||||
|
return
|
||||||
|
|
||||||
|
if not args.test:
|
||||||
|
response = requests.get(url, headers=headers)
|
||||||
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
raise Exception(response.content)
|
||||||
|
|
||||||
|
content = response.content
|
||||||
|
|
||||||
|
with open(path, 'w') as f:
|
||||||
|
f.write(content)
|
||||||
|
|
||||||
|
|
||||||
|
def get_api_key():
|
||||||
|
path = os.path.join(os.environ['HOME'], '.shippable.key')
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(path, 'r') as f:
|
||||||
|
return f.read().strip()
|
||||||
|
except IOError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
|
@ -18,5 +18,5 @@ ln -sf x86_64-linux-gnu-gcc-4.9 /usr/bin/x86_64-linux-gnu-gcc
|
||||||
retry.py pip install tox --disable-pip-version-check
|
retry.py pip install tox --disable-pip-version-check
|
||||||
|
|
||||||
ansible-test compile --color -v
|
ansible-test compile --color -v
|
||||||
ansible-test sanity --color -v --tox --skip-test ansible-doc --python 2.7
|
ansible-test sanity --color -v --junit --tox --skip-test ansible-doc --python 2.7
|
||||||
ansible-test sanity --color -v --tox --test ansible-doc --coverage
|
ansible-test sanity --color -v --junit --tox --test ansible-doc --coverage
|
||||||
|
|
|
@ -30,11 +30,11 @@ function cleanup
|
||||||
{
|
{
|
||||||
if find test/results/coverage/ -mindepth 1 -name '.*' -prune -o -print -quit | grep -q .; then
|
if find test/results/coverage/ -mindepth 1 -name '.*' -prune -o -print -quit | grep -q .; then
|
||||||
ansible-test coverage xml --color -v --requirements
|
ansible-test coverage xml --color -v --requirements
|
||||||
cp -av test/results/reports/coverage.xml shippable/codecoverage/coverage.xml
|
cp -a test/results/reports/coverage.xml shippable/codecoverage/coverage.xml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
rmdir shippable/testresults/
|
rmdir shippable/testresults/
|
||||||
cp -av test/results/junit/ shippable/testresults/
|
cp -a test/results/junit/ shippable/testresults/
|
||||||
}
|
}
|
||||||
|
|
||||||
trap cleanup EXIT
|
trap cleanup EXIT
|
||||||
|
|
Loading…
Reference in a new issue