tests.py 6.54 KB
Newer Older
1 2 3 4 5 6 7 8
"""
Unit test tasks
"""
import os
import sys
from paver.easy import sh, task, cmdopts, needs
from pavelib.utils.test import suites
from pavelib.utils.envs import Env
9
from optparse import make_option
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28

try:
    from pygments.console import colorize
except ImportError:
    colorize = lambda color, text: text  # pylint: disable-msg=invalid-name

__test__ = False  # do not collect


@task
@needs(
    'pavelib.prereqs.install_prereqs',
    'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
    ("system=", "s", "System to act on"),
    ("test_id=", "t", "Test id"),
    ("failed", "f", "Run only failed tests"),
    ("fail_fast", "x", "Run only failed tests"),
29 30 31 32
    ("fasttest", "a", "Run without collectstatic"),
    make_option("--verbose", action="store_const", const=2, dest="verbosity"),
    make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
    make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
33 34 35 36 37 38 39 40 41 42 43 44
])
def test_system(options):
    """
    Run tests on our djangoapps for lms and cms
    """
    system = getattr(options, 'system', None)
    test_id = getattr(options, 'test_id', None)

    opts = {
        'failed_only': getattr(options, 'failed', None),
        'fail_fast': getattr(options, 'fail_fast', None),
        'fasttest': getattr(options, 'fasttest', None),
45
        'verbosity': getattr(options, 'verbosity', 1),
46 47 48 49 50
    }

    if test_id:
        if not system:
            system = test_id.split('/')[0]
51 52
        if system == 'common':
            system = 'lms'
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
        opts['test_id'] = test_id

    if test_id or system:
        system_tests = [suites.SystemTestSuite(system, **opts)]
    else:
        system_tests = []
        for syst in ('cms', 'lms'):
            system_tests.append(suites.SystemTestSuite(syst, **opts))

    test_suite = suites.PythonTestSuite('python tests', subsuites=system_tests, **opts)
    test_suite.run()


@task
@needs(
    'pavelib.prereqs.install_prereqs',
    'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
    ("lib=", "l", "lib to test"),
    ("test_id=", "t", "Test id"),
    ("failed", "f", "Run only failed tests"),
    ("fail_fast", "x", "Run only failed tests"),
76 77 78
    make_option("--verbose", action="store_const", const=2, dest="verbosity"),
    make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
    make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
79 80 81
])
def test_lib(options):
    """
82
    Run tests for common/lib/ and pavelib/ (paver-tests)
83 84 85 86 87 88 89
    """
    lib = getattr(options, 'lib', None)
    test_id = getattr(options, 'test_id', lib)

    opts = {
        'failed_only': getattr(options, 'failed', None),
        'fail_fast': getattr(options, 'fail_fast', None),
90
        'verbosity': getattr(options, 'verbosity', 1),
91 92 93
    }

    if test_id:
94 95 96 97
        if '/' in test_id:
            lib = '/'.join(test_id.split('/')[0:3])
        else:
            lib = 'common/lib/' + test_id.split('.')[0]
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
        opts['test_id'] = test_id
        lib_tests = [suites.LibTestSuite(lib, **opts)]
    else:
        lib_tests = [suites.LibTestSuite(d, **opts) for d in Env.LIB_TEST_DIRS]

    test_suite = suites.PythonTestSuite('python tests', subsuites=lib_tests, **opts)
    test_suite.run()


@task
@needs(
    'pavelib.prereqs.install_prereqs',
    'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
    ("failed", "f", "Run only failed tests"),
    ("fail_fast", "x", "Run only failed tests"),
115 116 117
    make_option("--verbose", action="store_const", const=2, dest="verbosity"),
    make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
    make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
118 119 120 121 122 123 124 125
])
def test_python(options):
    """
    Run all python tests
    """
    opts = {
        'failed_only': getattr(options, 'failed', None),
        'fail_fast': getattr(options, 'fail_fast', None),
126
        'verbosity': getattr(options, 'verbosity', 1),
127 128 129 130 131 132 133 134 135 136 137
    }

    python_suite = suites.PythonTestSuite('Python Tests', **opts)
    python_suite.run()


@task
@needs(
    'pavelib.prereqs.install_prereqs',
    'pavelib.utils.test.utils.clean_reports_dir',
)
138 139 140 141 142 143
@cmdopts([
    make_option("--verbose", action="store_const", const=2, dest="verbosity"),
    make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
    make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
])
def test(options):
144 145 146
    """
    Run all tests
    """
147 148 149
    opts = {
        'verbosity': getattr(options, 'verbosity', 1)
    }
150
    # Subsuites to be added to the main suite
151
    python_suite = suites.PythonTestSuite('Python Tests', **opts)
152 153 154
    js_suite = suites.JsTestSuite('JS Tests', mode='run', with_coverage=True)

    # Main suite to be run
155
    all_unittests_suite = suites.TestSuite('All Tests', subsuites=[js_suite, python_suite])
156 157 158 159 160
    all_unittests_suite.run()


@task
@needs('pavelib.prereqs.install_prereqs')
161 162 163 164
@cmdopts([
    ("compare_branch", "b", "Branch to compare against, defaults to origin/master"),
])
def coverage(options):
165 166 167
    """
    Build the html, xml, and diff coverage reports
    """
168 169
    compare_branch = getattr(options, 'compare_branch', 'origin/master')

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
    for directory in Env.LIB_TEST_DIRS + ['cms', 'lms']:
        report_dir = Env.REPORT_DIR / directory

        if (report_dir / '.coverage').isfile():
            # Generate the coverage.py HTML report
            sh("coverage html --rcfile={dir}/.coveragerc".format(dir=directory))

            # Generate the coverage.py XML report
            sh("coverage xml -o {report_dir}/coverage.xml --rcfile={dir}/.coveragerc".format(
                report_dir=report_dir,
                dir=directory
            ))

    # Find all coverage XML files (both Python and JavaScript)
    xml_reports = []

    for filepath in Env.REPORT_DIR.walk():
        if filepath.basename() == 'coverage.xml':
            xml_reports.append(filepath)

    if not xml_reports:
        err_msg = colorize(
            'red',
            "No coverage info found.  Run `paver test` before running `paver coverage`.\n"
        )
        sys.stderr.write(err_msg)
    else:
        xml_report_str = ' '.join(xml_reports)
        diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html')

        # Generate the diff coverage reports (HTML and console)
201 202 203 204 205 206 207 208 209
        sh(
            "diff-cover {xml_report_str} --compare-branch={compare_branch} "
            "--html-report {diff_html_path}".format(
                xml_report_str=xml_report_str,
                compare_branch=compare_branch,
                diff_html_path=diff_html_path,
            )
        )

210
        print("\n")