Commit c6ab15c6 by Sarina Canelake

Merge pull request #7352 from edx/sarina/pep8-quality-report

Improve pep8 reporting
parents a1e446d4 e77211b7
...@@ -24,6 +24,7 @@ from ..tests.helpers import ( ...@@ -24,6 +24,7 @@ from ..tests.helpers import (
CohortFactory, CourseCohortFactory, CourseCohortSettingsFactory CohortFactory, CourseCohortFactory, CourseCohortSettingsFactory
) )
@patch("openedx.core.djangoapps.course_groups.cohorts.tracker") @patch("openedx.core.djangoapps.course_groups.cohorts.tracker")
class TestCohortSignals(TestCase): class TestCohortSignals(TestCase):
def setUp(self): def setUp(self):
...@@ -405,7 +406,6 @@ class TestCohorts(ModuleStoreTestCase): ...@@ -405,7 +406,6 @@ class TestCohorts(ModuleStoreTestCase):
"No groups->default cohort for user2" "No groups->default cohort for user2"
) )
def test_auto_cohorting_randomization(self): def test_auto_cohorting_randomization(self):
""" """
Make sure cohorts.get_cohort() randomizes properly. Make sure cohorts.get_cohort() randomizes properly.
......
...@@ -204,7 +204,7 @@ class TestPreferenceAPI(TestCase): ...@@ -204,7 +204,7 @@ class TestPreferenceAPI(TestCase):
too_long_key = "x" * 256 too_long_key = "x" * 256
with self.assertRaises(PreferenceValidationError) as context_manager: with self.assertRaises(PreferenceValidationError) as context_manager:
update_user_preferences(self.user, { too_long_key: "new_value"}) update_user_preferences(self.user, {too_long_key: "new_value"})
errors = context_manager.exception.preference_errors errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1) self.assertEqual(len(errors.keys()), 1)
self.assertEqual( self.assertEqual(
...@@ -217,7 +217,7 @@ class TestPreferenceAPI(TestCase): ...@@ -217,7 +217,7 @@ class TestPreferenceAPI(TestCase):
for empty_value in ("", " "): for empty_value in ("", " "):
with self.assertRaises(PreferenceValidationError) as context_manager: with self.assertRaises(PreferenceValidationError) as context_manager:
update_user_preferences(self.user, { self.test_preference_key: empty_value}) update_user_preferences(self.user, {self.test_preference_key: empty_value})
errors = context_manager.exception.preference_errors errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1) self.assertEqual(len(errors.keys()), 1)
self.assertEqual( self.assertEqual(
...@@ -230,7 +230,7 @@ class TestPreferenceAPI(TestCase): ...@@ -230,7 +230,7 @@ class TestPreferenceAPI(TestCase):
user_preference_save.side_effect = [Exception, None] user_preference_save.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager: with self.assertRaises(PreferenceUpdateError) as context_manager:
update_user_preferences(self.user, { self.test_preference_key: "new_value"}) update_user_preferences(self.user, {self.test_preference_key: "new_value"})
self.assertEqual( self.assertEqual(
context_manager.exception.developer_message, context_manager.exception.developer_message,
u"Save failed for user preference 'test_key' with value 'new_value': " u"Save failed for user preference 'test_key' with value 'new_value': "
......
...@@ -204,5 +204,5 @@ class PreferencesDetailView(APIView): ...@@ -204,5 +204,5 @@ class PreferencesDetailView(APIView):
if not preference_existed: if not preference_existed:
return Response(status=status.HTTP_404_NOT_FOUND) return Response(status=status.HTTP_404_NOT_FOUND)
return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_204_NO_CONTENT)
...@@ -1686,7 +1686,6 @@ class TestFacebookRegistrationView( ...@@ -1686,7 +1686,6 @@ class TestFacebookRegistrationView(
self._verify_user_existence(user_exists=False, social_link_exists=False) self._verify_user_existence(user_exists=False, social_link_exists=False)
@skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled") @skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled")
class TestGoogleRegistrationView( class TestGoogleRegistrationView(
ThirdPartyRegistrationTestMixin, ThirdPartyOAuthTestMixinGoogle, TransactionTestCase ThirdPartyRegistrationTestMixin, ThirdPartyOAuthTestMixinGoogle, TransactionTestCase
......
...@@ -382,8 +382,9 @@ class RegistrationView(APIView): ...@@ -382,8 +382,9 @@ class RegistrationView(APIView):
# Translators: These instructions appear on the registration form, immediately # Translators: These instructions appear on the registration form, immediately
# below a field meant to hold the user's public username. # below a field meant to hold the user's public username.
username_instructions = _( username_instructions = _(
u"The name that will identify you in your courses - {bold_start}(cannot be changed later){bold_end}").format(bold_start=u'<strong>', bold_end=u'</strong>' u"The name that will identify you in your courses - "
) "{bold_start}(cannot be changed later){bold_end}"
).format(bold_start=u'<strong>', bold_end=u'</strong>')
# Translators: This example username is used as a placeholder in # Translators: This example username is used as a placeholder in
# a field on the registration form meant to hold the user's username. # a field on the registration form meant to hold the user's username.
......
"""
Tests for paver quality tasks
"""
import os import os
import tempfile import tempfile
import unittest import unittest
from mock import patch from mock import patch, MagicMock
from ddt import ddt, file_data from ddt import ddt, file_data
import pavelib.quality import pavelib.quality
...@@ -11,22 +14,26 @@ from paver.easy import BuildFailure ...@@ -11,22 +14,26 @@ from paver.easy import BuildFailure
@ddt @ddt
class TestPaverQualityViolations(unittest.TestCase): class TestPaverQualityViolations(unittest.TestCase):
"""
For testing the paver violations-counting tasks
"""
def setUp(self): def setUp(self):
super(TestPaverQualityViolations, self).setUp()
self.f = tempfile.NamedTemporaryFile(delete=False) self.f = tempfile.NamedTemporaryFile(delete=False)
self.f.close() self.f.close()
self.addCleanup(os.remove, self.f.name)
def test_pylint_parser_other_string(self): def test_pylint_parser_other_string(self):
with open(self.f.name, 'w') as f: with open(self.f.name, 'w') as f:
f.write("hello") f.write("hello")
num = pavelib.quality._count_pylint_violations(f.name) num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access
self.assertEqual(num, 0) self.assertEqual(num, 0)
def test_pylint_parser_pep8(self): def test_pylint_parser_pep8(self):
# Pep8 violations should be ignored. # Pep8 violations should be ignored.
with open(self.f.name, 'w') as f: with open(self.f.name, 'w') as f:
f.write("foo/hello/test.py:304:15: E203 whitespace before ':'") f.write("foo/hello/test.py:304:15: E203 whitespace before ':'")
num = pavelib.quality._count_pylint_violations(f.name) num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access
self.assertEqual(num, 0) self.assertEqual(num, 0)
@file_data('pylint_test_list.json') @file_data('pylint_test_list.json')
...@@ -38,18 +45,15 @@ class TestPaverQualityViolations(unittest.TestCase): ...@@ -38,18 +45,15 @@ class TestPaverQualityViolations(unittest.TestCase):
""" """
with open(self.f.name, 'w') as f: with open(self.f.name, 'w') as f:
f.write(value) f.write(value)
num = pavelib.quality._count_pylint_violations(f.name) num = pavelib.quality._count_pylint_violations(f.name) # pylint: disable=protected-access
self.assertEqual(num, 1) self.assertEqual(num, 1)
def test_pep8_parser(self): def test_pep8_parser(self):
with open(self.f.name, 'w') as f: with open(self.f.name, 'w') as f:
f.write("hello\nhithere") f.write("hello\nhithere")
num = pavelib.quality._count_pep8_violations(f.name) num, _violations = pavelib.quality._pep8_violations(f.name) # pylint: disable=protected-access
self.assertEqual(num, 2) self.assertEqual(num, 2)
def tearDown(self):
os.remove(self.f.name)
class TestPaverRunQuality(unittest.TestCase): class TestPaverRunQuality(unittest.TestCase):
""" """
...@@ -57,27 +61,33 @@ class TestPaverRunQuality(unittest.TestCase): ...@@ -57,27 +61,33 @@ class TestPaverRunQuality(unittest.TestCase):
""" """
def setUp(self): def setUp(self):
super(TestPaverRunQuality, self).setUp()
# mock the @needs decorator to skip it # mock the @needs decorator to skip it
self._mock_paver_needs = patch.object(pavelib.quality.run_quality, 'needs').start() self._mock_paver_needs = patch.object(pavelib.quality.run_quality, 'needs').start()
self._mock_paver_needs.return_value = 0 self._mock_paver_needs.return_value = 0
self._mock_paver_sh = patch('pavelib.quality.sh').start() patcher = patch('pavelib.quality.sh')
self.addCleanup(self._mock_paver_sh.stop()) self._mock_paver_sh = patcher.start()
self.addCleanup(self._mock_paver_needs.stop()) self.addCleanup(patcher.stop)
self.addCleanup(self._mock_paver_needs.stop)
def test_failure_on_diffquality_pep8(self): def test_failure_on_diffquality_pep8(self):
""" """
If pep8 diff-quality fails due to the percentage threshold, pylint If pep8 finds errors, pylint should still be run
should still be run
""" """
# Mock _get_pep8_violations to return a violation
# Underlying sh call must fail when it is running the pep8 diff-quality task _mock_pep8_violations = MagicMock(
self._mock_paver_sh.side_effect = CustomShMock().fail_on_pep8 return_value=(1, ['lms/envs/common.py:32:2: E225 missing whitespace around operator'])
with self.assertRaises(SystemExit): )
pavelib.quality.run_quality("") with patch('pavelib.quality._get_pep8_violations', _mock_pep8_violations):
self.assertRaises(BuildFailure) with self.assertRaises(SystemExit):
# Test that both pep8 and pylint were called by counting the calls pavelib.quality.run_quality("")
self.assertEqual(self._mock_paver_sh.call_count, 2) self.assertRaises(BuildFailure)
# Test that both pep8 and pylint were called by counting the calls to _get_pep8_violations
# (for pep8) and sh (for diff-quality pylint)
self.assertEqual(_mock_pep8_violations.call_count, 1)
self.assertEqual(self._mock_paver_sh.call_count, 1)
def test_failure_on_diffquality_pylint(self): def test_failure_on_diffquality_pylint(self):
""" """
...@@ -86,11 +96,16 @@ class TestPaverRunQuality(unittest.TestCase): ...@@ -86,11 +96,16 @@ class TestPaverRunQuality(unittest.TestCase):
# Underlying sh call must fail when it is running the pylint diff-quality task # Underlying sh call must fail when it is running the pylint diff-quality task
self._mock_paver_sh.side_effect = CustomShMock().fail_on_pylint self._mock_paver_sh.side_effect = CustomShMock().fail_on_pylint
with self.assertRaises(SystemExit): _mock_pep8_violations = MagicMock(return_value=(0, []))
pavelib.quality.run_quality("") with patch('pavelib.quality._get_pep8_violations', _mock_pep8_violations):
self.assertRaises(BuildFailure) with self.assertRaises(SystemExit):
pavelib.quality.run_quality("")
self.assertRaises(BuildFailure)
# Test that both pep8 and pylint were called by counting the calls # Test that both pep8 and pylint were called by counting the calls
self.assertEqual(self._mock_paver_sh.call_count, 2) # Assert that _get_pep8_violations (which calls "pep8") is called once
self.assertEqual(_mock_pep8_violations.call_count, 1)
# And assert that sh was called once (for the call to "pylint")
self.assertEqual(self._mock_paver_sh.call_count, 1)
def test_other_exception(self): def test_other_exception(self):
""" """
...@@ -105,8 +120,13 @@ class TestPaverRunQuality(unittest.TestCase): ...@@ -105,8 +120,13 @@ class TestPaverRunQuality(unittest.TestCase):
def test_no_diff_quality_failures(self): def test_no_diff_quality_failures(self):
# Assert nothing is raised # Assert nothing is raised
pavelib.quality.run_quality("") _mock_pep8_violations = MagicMock(return_value=(0, []))
self.assertEqual(self._mock_paver_sh.call_count, 2) with patch('pavelib.quality._get_pep8_violations', _mock_pep8_violations):
pavelib.quality.run_quality("")
# Assert that _get_pep8_violations (which calls "pep8") is called once
self.assertEqual(_mock_pep8_violations.call_count, 1)
# And assert that sh was called once (for the call to "pylint")
self.assertEqual(self._mock_paver_sh.call_count, 1)
class CustomShMock(object): class CustomShMock(object):
...@@ -115,17 +135,6 @@ class CustomShMock(object): ...@@ -115,17 +135,6 @@ class CustomShMock(object):
of them need to have certain responses. of them need to have certain responses.
""" """
def fail_on_pep8(self, arg):
"""
For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what
is going to fail when we pass in a percentage ("p") requirement.
"""
if "pep8" in arg:
# Essentially mock diff-quality exiting with 1
paver.easy.sh("exit 1")
else:
return
def fail_on_pylint(self, arg): def fail_on_pylint(self, arg):
""" """
For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what For our tests, we need the call for diff-quality running pep8 reports to fail, since that is what
......
...@@ -56,7 +56,7 @@ def find_fixme(options): ...@@ -56,7 +56,7 @@ def find_fixme(options):
num_fixme += _count_pylint_violations( num_fixme += _count_pylint_violations(
"{report_dir}/pylint_fixme.report".format(report_dir=report_dir)) "{report_dir}/pylint_fixme.report".format(report_dir=report_dir))
print("Number of pylint fixmes: " + str(num_fixme)) print "Number of pylint fixmes: " + str(num_fixme)
@task @task
...@@ -75,7 +75,7 @@ def run_pylint(options): ...@@ -75,7 +75,7 @@ def run_pylint(options):
violations_limit = int(getattr(options, 'limit', -1)) violations_limit = int(getattr(options, 'limit', -1))
errors = getattr(options, 'errors', False) errors = getattr(options, 'errors', False)
systems = getattr(options, 'system', ALL_SYSTEMS).split(',') systems = getattr(options, 'system', ALL_SYSTEMS).split(',')
# Make sure the metrics subdirectory exists # Make sure the metrics subdirectory exists
Env.METRICS_DIR.makedirs_p() Env.METRICS_DIR.makedirs_p()
...@@ -119,7 +119,7 @@ def run_pylint(options): ...@@ -119,7 +119,7 @@ def run_pylint(options):
# Print number of violations to log # Print number of violations to log
violations_count_str = "Number of pylint violations: " + str(num_violations) violations_count_str = "Number of pylint violations: " + str(num_violations)
print(violations_count_str) print violations_count_str
# Also write the number of violations to a file # Also write the number of violations to a file
with open(Env.METRICS_DIR / "pylint", "w") as f: with open(Env.METRICS_DIR / "pylint", "w") as f:
...@@ -139,7 +139,7 @@ def _count_pylint_violations(report_file): ...@@ -139,7 +139,7 @@ def _count_pylint_violations(report_file):
# An example string: # An example string:
# common/lib/xmodule/xmodule/tests/test_conditional.py:21: [C0111(missing-docstring), DummySystem] Missing docstring # common/lib/xmodule/xmodule/tests/test_conditional.py:21: [C0111(missing-docstring), DummySystem] Missing docstring
# More examples can be found in the unit tests for this method # More examples can be found in the unit tests for this method
pylint_pattern = re.compile(".(\d+):\ \[(\D\d+.+\]).") pylint_pattern = re.compile(r".(\d+):\ \[(\D\d+.+\]).")
for line in open(report_file): for line in open(report_file):
violation_list_for_line = pylint_pattern.split(line) violation_list_for_line = pylint_pattern.split(line)
...@@ -150,52 +150,67 @@ def _count_pylint_violations(report_file): ...@@ -150,52 +150,67 @@ def _count_pylint_violations(report_file):
return num_violations_report return num_violations_report
@task def _get_pep8_violations():
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
("system=", "s", "System to act on"),
])
def run_pep8(options):
""" """
Run pep8 on system code. Runs pep8. Returns a tuple of (number_of_violations, violations_string)
Fail the task if any violations are found. where violations_string is a string of all pep8 violations found, separated
by new lines.
""" """
systems = getattr(options, 'system', ALL_SYSTEMS).split(',')
report_dir = (Env.REPORT_DIR / 'pep8') report_dir = (Env.REPORT_DIR / 'pep8')
report_dir.rmtree(ignore_errors=True) report_dir.rmtree(ignore_errors=True)
report_dir.makedirs_p() report_dir.makedirs_p()
# Make sure the metrics subdirectory exists # Make sure the metrics subdirectory exists
Env.METRICS_DIR.makedirs_p() Env.METRICS_DIR.makedirs_p()
for system in systems: sh('pep8 . | tee {report_dir}/pep8.report -a'.format(report_dir=report_dir))
sh('pep8 {system} | tee {report_dir}/pep8.report -a'.format(system=system, report_dir=report_dir))
count = _count_pep8_violations( count, violations_list = _pep8_violations(
"{report_dir}/pep8.report".format(report_dir=report_dir) "{report_dir}/pep8.report".format(report_dir=report_dir)
) )
return (count, violations_list)
def _pep8_violations(report_file):
"""
Returns a tuple of (num_violations, violations_list) for all
pep8 violations in the given report_file.
"""
with open(report_file) as f:
violations_list = f.readlines()
num_lines = len(violations_list)
return num_lines, violations_list
@task
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
("system=", "s", "System to act on"),
])
def run_pep8(options): # pylint: disable=unused-argument
"""
Run pep8 on system code.
Fail the task if any violations are found.
"""
(count, violations_list) = _get_pep8_violations()
violations_list = ''.join(violations_list)
# Print number of violations to log # Print number of violations to log
violations_count_str = "Number of pep8 violations: {count}".format(count=count) violations_count_str = "Number of pep8 violations: {count}".format(count=count)
print(violations_count_str) print violations_count_str
print violations_list
# Also write the number of violations to a file # Also write the number of violations to a file
with open(Env.METRICS_DIR / "pep8", "w") as f: with open(Env.METRICS_DIR / "pep8", "w") as f:
f.write(violations_count_str) f.write(violations_count_str + '\n\n')
f.write(violations_list)
# Fail if any violations are found # Fail if any violations are found
if count: if count:
raise Exception( failure_string = "Too many pep8 violations. " + violations_count_str
"Too many pep8 violations. Number of violations found: {count}.".format( failure_string += "\n\nViolations:\n{violations_list}".format(violations_list=violations_list)
count=count raise Exception(failure_string)
)
)
def _count_pep8_violations(report_file):
num_lines = sum(1 for line in open(report_file))
return num_lines
@task @task
...@@ -232,12 +247,60 @@ def run_quality(options): ...@@ -232,12 +247,60 @@ def run_quality(options):
quality of the branch vs the compare branch is less than 80%, then this task will fail. quality of the branch vs the compare branch is less than 80%, then this task will fail.
This threshold would be applied to both pep8 and pylint. This threshold would be applied to both pep8 and pylint.
""" """
# Directory to put the diff reports in. # Directory to put the diff reports in.
# This makes the folder if it doesn't already exist. # This makes the folder if it doesn't already exist.
dquality_dir = (Env.REPORT_DIR / "diff_quality").makedirs_p() dquality_dir = (Env.REPORT_DIR / "diff_quality").makedirs_p()
diff_quality_percentage_failure = False diff_quality_percentage_failure = False
def _pep8_output(count, violations_list, is_html=False):
"""
Given a count & list of pep8 violations, pretty-print the pep8 output.
If `is_html`, will print out with HTML markup.
"""
if is_html:
lines = ['<body>\n']
sep = '-------------<br/>\n'
title = "<h1>Quality Report: pep8</h1>\n"
violations_bullets = ''.join(
['<li>{violation}</li><br/>\n'.format(violation=violation) for violation in violations_list]
)
violations_str = '<ul>\n{bullets}</ul>\n'.format(bullets=violations_bullets)
violations_count_str = "<b>Violations</b>: {count}<br/>\n"
fail_line = "<b>FAILURE</b>: pep8 count should be 0<br/>\n"
else:
lines = []
sep = '-------------\n'
title = "Quality Report: pep8\n"
violations_str = ''.join(violations_list)
violations_count_str = "Violations: {count}\n"
fail_line = "FAILURE: pep8 count should be 0\n"
violations_count_str = violations_count_str.format(count=count)
lines.extend([sep, title, sep, violations_str, sep, violations_count_str])
if count > 0:
lines.append(fail_line)
lines.append(sep + '\n')
if is_html:
lines.append('</body>')
return ''.join(lines)
# Run pep8 directly since we have 0 violations on master
(count, violations_list) = _get_pep8_violations()
# Print number of violations to log
print _pep8_output(count, violations_list)
# Also write the number of violations to a file
with open(dquality_dir / "diff_quality_pep8.html", "w") as f:
f.write(_pep8_output(count, violations_list, is_html=True))
if count > 0:
diff_quality_percentage_failure = True
# ----- Set up for diff-quality pylint call -----
# Set the string, if needed, to be used for the diff-quality --compare-branch switch. # Set the string, if needed, to be used for the diff-quality --compare-branch switch.
compare_branch = getattr(options, 'compare_branch', None) compare_branch = getattr(options, 'compare_branch', None)
compare_branch_string = '' compare_branch_string = ''
...@@ -250,29 +313,6 @@ def run_quality(options): ...@@ -250,29 +313,6 @@ def run_quality(options):
if diff_threshold > -1: if diff_threshold > -1:
percentage_string = '--fail-under={0}'.format(diff_threshold) percentage_string = '--fail-under={0}'.format(diff_threshold)
# Generate diff-quality html report for pep8, and print to console
# If pep8 reports exist, use those
# Otherwise, `diff-quality` will call pep8 itself
pep8_files = get_violations_reports("pep8")
pep8_reports = u' '.join(pep8_files)
try:
sh(
"diff-quality --violations=pep8 {pep8_reports} {percentage_string} "
"{compare_branch_string} --html-report {dquality_dir}/diff_quality_pep8.html".format(
pep8_reports=pep8_reports,
percentage_string=percentage_string,
compare_branch_string=compare_branch_string,
dquality_dir=dquality_dir
)
)
except BuildFailure, error_message:
if is_percentage_failure(error_message):
diff_quality_percentage_failure = True
else:
raise BuildFailure(error_message)
# Generate diff-quality html report for pylint, and print to console # Generate diff-quality html report for pylint, and print to console
# If pylint reports exist, use those # If pylint reports exist, use those
# Otherwise, `diff-quality` will call pylint itself # Otherwise, `diff-quality` will call pylint itself
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment