Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
E
edx-platform
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
edx
edx-platform
Commits
5ea529ae
Commit
5ea529ae
authored
May 11, 2016
by
Robert Raposa
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Revert: Enhance Jenkins integration of safe template linting
parent
8340703f
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
38 additions
and
420 deletions
+38
-420
pavelib/paver_tests/test_paver_quality.py
+0
-59
pavelib/paver_tests/test_safecommit.py
+0
-44
pavelib/paver_tests/test_safelint.py
+15
-71
pavelib/quality.py
+21
-195
scripts/all-tests.sh
+1
-1
scripts/generic-ci-tests.sh
+1
-3
scripts/safelint_thresholds.sh
+0
-47
No files found.
pavelib/paver_tests/test_paver_quality.py
View file @
5ea529ae
...
...
@@ -4,7 +4,6 @@ Tests for paver quality tasks
import
os
from
path
import
Path
as
path
import
tempfile
import
textwrap
import
unittest
from
mock
import
patch
,
MagicMock
,
mock_open
from
ddt
import
ddt
,
file_data
...
...
@@ -133,64 +132,6 @@ class TestPaverReportViolationsCounts(unittest.TestCase):
actual_count
=
pavelib
.
quality
.
_get_count_from_last_line
(
self
.
f
.
name
,
"foo"
)
# pylint: disable=protected-access
self
.
assertEqual
(
actual_count
,
None
)
def
test_get_safelint_counts_happy
(
self
):
report
=
textwrap
.
dedent
(
"""
test.html: 30:53: javascript-jquery-append: $('#test').append(print_tos);
javascript-concat-html: 310 violations
javascript-escape: 7 violations
2608 violations total
"""
)
with
open
(
self
.
f
.
name
,
'w'
)
as
f
:
f
.
write
(
report
)
counts
=
pavelib
.
quality
.
_get_safelint_counts
(
self
.
f
.
name
)
# pylint: disable=protected-access
self
.
assertDictEqual
(
counts
,
{
'rules'
:
{
'javascript-concat-html'
:
310
,
'javascript-escape'
:
7
,
},
'total'
:
2608
,
})
def
test_get_safelint_counts_bad_counts
(
self
):
report
=
textwrap
.
dedent
(
"""
javascript-concat-html: violations
"""
)
with
open
(
self
.
f
.
name
,
'w'
)
as
f
:
f
.
write
(
report
)
counts
=
pavelib
.
quality
.
_get_safelint_counts
(
self
.
f
.
name
)
# pylint: disable=protected-access
self
.
assertDictEqual
(
counts
,
{
'rules'
:
{},
'total'
:
None
,
})
def
test_get_safecommit_count_happy
(
self
):
report
=
textwrap
.
dedent
(
"""
Linting lms/templates/navigation.html:
2 violations total
Linting scripts/tests/templates/test.underscore:
3 violations total
"""
)
with
open
(
self
.
f
.
name
,
'w'
)
as
f
:
f
.
write
(
report
)
count
=
pavelib
.
quality
.
_get_safecommit_count
(
self
.
f
.
name
)
# pylint: disable=protected-access
self
.
assertEqual
(
count
,
5
)
def
test_get_safecommit_count_bad_counts
(
self
):
report
=
textwrap
.
dedent
(
"""
Linting lms/templates/navigation.html:
"""
)
with
open
(
self
.
f
.
name
,
'w'
)
as
f
:
f
.
write
(
report
)
count
=
pavelib
.
quality
.
_get_safecommit_count
(
self
.
f
.
name
)
# pylint: disable=protected-access
self
.
assertIsNone
(
count
)
class
TestPrepareReportDir
(
unittest
.
TestCase
):
"""
...
...
pavelib/paver_tests/test_safecommit.py
deleted
100644 → 0
View file @
8340703f
"""
Tests for paver safecommit quality tasks
"""
from
mock
import
patch
import
pavelib.quality
from
paver.easy
import
call_task
from
.utils
import
PaverTestCase
class
PaverSafeCommitTest
(
PaverTestCase
):
"""
Test run_safecommit_report with a mocked environment in order to pass in
opts.
"""
def
setUp
(
self
):
super
(
PaverSafeCommitTest
,
self
)
.
setUp
()
self
.
reset_task_messages
()
@patch.object
(
pavelib
.
quality
,
'_write_metric'
)
@patch.object
(
pavelib
.
quality
,
'_prepare_report_dir'
)
@patch.object
(
pavelib
.
quality
,
'_get_safecommit_count'
)
def
test_safecommit_violation_number_not_found
(
self
,
_mock_count
,
_mock_report_dir
,
_mock_write_metric
):
"""
run_safecommit_report encounters an error parsing the safecommit output
log.
"""
_mock_count
.
return_value
=
None
with
self
.
assertRaises
(
SystemExit
):
call_task
(
'pavelib.quality.run_safecommit_report'
)
@patch.object
(
pavelib
.
quality
,
'_write_metric'
)
@patch.object
(
pavelib
.
quality
,
'_prepare_report_dir'
)
@patch.object
(
pavelib
.
quality
,
'_get_safecommit_count'
)
def
test_safecommit_vanilla
(
self
,
_mock_count
,
_mock_report_dir
,
_mock_write_metric
):
"""
run_safecommit_report finds violations.
"""
_mock_count
.
return_value
=
0
call_task
(
'pavelib.quality.run_safecommit_report'
)
pavelib/paver_tests/test_safelint.py
View file @
5ea529ae
"""
Tests for paver
safelint
quality tasks
Tests for paver quality tasks
"""
from
mock
import
patch
...
...
@@ -20,99 +20,43 @@ class PaverSafeLintTest(PaverTestCase):
@patch.object
(
pavelib
.
quality
,
'_write_metric'
)
@patch.object
(
pavelib
.
quality
,
'_prepare_report_dir'
)
@patch.object
(
pavelib
.
quality
,
'_get_
safelint_counts
'
)
def
test_safelint_violation_number_not_found
(
self
,
_mock_count
s
,
_mock_report_dir
,
_mock_write_metric
):
@patch.object
(
pavelib
.
quality
,
'_get_
count_from_last_line
'
)
def
test_safelint_violation_number_not_found
(
self
,
_mock_count
,
_mock_report_dir
,
_mock_write_metric
):
"""
run_safelint encounters an error parsing the safelint output log
"""
_mock_count
s
.
return_value
=
{}
_mock_count
.
return_value
=
None
with
self
.
assertRaises
(
SystemExit
):
call_task
(
'pavelib.quality.run_safelint'
)
@patch.object
(
pavelib
.
quality
,
'_write_metric'
)
@patch.object
(
pavelib
.
quality
,
'_prepare_report_dir'
)
@patch.object
(
pavelib
.
quality
,
'_get_
safelint_counts
'
)
def
test_safelint_vanilla
(
self
,
_mock_count
s
,
_mock_report_dir
,
_mock_write_metric
):
@patch.object
(
pavelib
.
quality
,
'_get_
count_from_last_line
'
)
def
test_safelint_vanilla
(
self
,
_mock_count
,
_mock_report_dir
,
_mock_write_metric
):
"""
run_safelint finds violations, but a limit was not set
"""
_mock_count
s
.
return_value
=
{
'total'
:
0
}
_mock_count
.
return_value
=
1
call_task
(
'pavelib.quality.run_safelint'
)
@patch.object
(
pavelib
.
quality
,
'_write_metric'
)
@patch.object
(
pavelib
.
quality
,
'_prepare_report_dir'
)
@patch.object
(
pavelib
.
quality
,
'_get_safelint_counts'
)
def
test_safelint_invalid_thresholds_option
(
self
,
_mock_counts
,
_mock_report_dir
,
_mock_write_metric
):
"""
run_safelint fails when thresholds option is poorly formatted
"""
_mock_counts
.
return_value
=
{
'total'
:
0
}
with
self
.
assertRaises
(
SystemExit
):
call_task
(
'pavelib.quality.run_safelint'
,
options
=
{
"thresholds"
:
"invalid"
})
@patch.object
(
pavelib
.
quality
,
'_write_metric'
)
@patch.object
(
pavelib
.
quality
,
'_prepare_report_dir'
)
@patch.object
(
pavelib
.
quality
,
'_get_safelint_counts'
)
def
test_safelint_invalid_thresholds_option_key
(
self
,
_mock_counts
,
_mock_report_dir
,
_mock_write_metric
):
"""
run_safelint fails when thresholds option is poorly formatted
"""
_mock_counts
.
return_value
=
{
'total'
:
0
}
with
self
.
assertRaises
(
SystemExit
):
call_task
(
'pavelib.quality.run_safelint'
,
options
=
{
"thresholds"
:
'{"invalid": 3}'
})
@patch.object
(
pavelib
.
quality
,
'_write_metric'
)
@patch.object
(
pavelib
.
quality
,
'_prepare_report_dir'
)
@patch.object
(
pavelib
.
quality
,
'_get_safelint_counts'
)
def
test_safelint_too_many_violations
(
self
,
_mock_counts
,
_mock_report_dir
,
_mock_write_metric
):
@patch.object
(
pavelib
.
quality
,
'_get_count_from_last_line'
)
def
test_safelint_too_many_violations
(
self
,
_mock_count
,
_mock_report_dir
,
_mock_write_metric
):
"""
run_safelint finds more violations than are allowed
"""
_mock_count
s
.
return_value
=
{
'total'
:
4
}
_mock_count
.
return_value
=
4
with
self
.
assertRaises
(
SystemExit
):
call_task
(
'pavelib.quality.run_safelint'
,
options
=
{
"
thresholds"
:
'{"total": 3}'
})
call_task
(
'pavelib.quality.run_safelint'
,
options
=
{
"
limit"
:
"3"
})
@patch.object
(
pavelib
.
quality
,
'_write_metric'
)
@patch.object
(
pavelib
.
quality
,
'_prepare_report_dir'
)
@patch.object
(
pavelib
.
quality
,
'_get_
safelint_counts
'
)
def
test_safelint_under_limit
(
self
,
_mock_count
s
,
_mock_report_dir
,
_mock_write_metric
):
@patch.object
(
pavelib
.
quality
,
'_get_
count_from_last_line
'
)
def
test_safelint_under_limit
(
self
,
_mock_count
,
_mock_report_dir
,
_mock_write_metric
):
"""
run_safelint finds fewer violations than are allowed
"""
_mock_counts
.
return_value
=
{
'total'
:
4
}
# No System Exit is expected
call_task
(
'pavelib.quality.run_safelint'
,
options
=
{
"thresholds"
:
'{"total": 5}'
})
@patch.object
(
pavelib
.
quality
,
'_write_metric'
)
@patch.object
(
pavelib
.
quality
,
'_prepare_report_dir'
)
@patch.object
(
pavelib
.
quality
,
'_get_safelint_counts'
)
def
test_safelint_rule_violation_number_not_found
(
self
,
_mock_counts
,
_mock_report_dir
,
_mock_write_metric
):
"""
run_safelint encounters an error parsing the safelint output log for a
given rule threshold that was set.
"""
_mock_counts
.
return_value
=
{
'total'
:
4
}
with
self
.
assertRaises
(
SystemExit
):
call_task
(
'pavelib.quality.run_safelint'
,
options
=
{
"thresholds"
:
'{"rules": {"javascript-escape": 3}}'
})
@patch.object
(
pavelib
.
quality
,
'_write_metric'
)
@patch.object
(
pavelib
.
quality
,
'_prepare_report_dir'
)
@patch.object
(
pavelib
.
quality
,
'_get_safelint_counts'
)
def
test_safelint_too_many_rule_violations
(
self
,
_mock_counts
,
_mock_report_dir
,
_mock_write_metric
):
"""
run_safelint finds more rule violations than are allowed
"""
_mock_counts
.
return_value
=
{
'total'
:
4
,
'rules'
:
{
'javascript-escape'
:
4
}}
with
self
.
assertRaises
(
SystemExit
):
call_task
(
'pavelib.quality.run_safelint'
,
options
=
{
"thresholds"
:
'{"rules": {"javascript-escape": 3}}'
})
@patch.object
(
pavelib
.
quality
,
'_write_metric'
)
@patch.object
(
pavelib
.
quality
,
'_prepare_report_dir'
)
@patch.object
(
pavelib
.
quality
,
'_get_safelint_counts'
)
def
test_safelint_under_rule_limit
(
self
,
_mock_counts
,
_mock_report_dir
,
_mock_write_metric
):
"""
run_safelint finds fewer rule violations than are allowed
"""
_mock_counts
.
return_value
=
{
'total'
:
4
,
'rules'
:
{
'javascript-escape'
:
4
}}
_mock_count
.
return_value
=
4
# No System Exit is expected
call_task
(
'pavelib.quality.run_safelint'
,
options
=
{
"
thresholds"
:
'{"rules": {"javascript-escape": 5}}'
})
call_task
(
'pavelib.quality.run_safelint'
,
options
=
{
"
limit"
:
"5"
})
pavelib/quality.py
View file @
5ea529ae
...
...
@@ -2,7 +2,6 @@
Check code quality using pep8, pylint, and diff_quality.
"""
from
paver.easy
import
sh
,
task
,
cmdopts
,
needs
,
BuildFailure
import
json
import
os
import
re
...
...
@@ -136,8 +135,8 @@ def run_pylint(options):
# Fail number of violations is greater than the limit
if
num_violations
>
violations_limit
>
-
1
:
raise
BuildFailure
(
"Failed. Too many pylint violations. "
"The limit is {violations_limit}."
.
format
(
violations_limit
=
violations_limit
))
raise
Exception
(
"Failed. Too many pylint violations. "
"The limit is {violations_limit}."
.
format
(
violations_limit
=
violations_limit
))
def
_count_pylint_violations
(
report_file
):
...
...
@@ -219,7 +218,7 @@ def run_pep8(options): # pylint: disable=unused-argument
if
count
:
failure_string
=
"Too many pep8 violations. "
+
violations_count_str
failure_string
+=
"
\n\n
Violations:
\n
{violations_list}"
.
format
(
violations_list
=
violations_list
)
raise
BuildFailure
(
failure_string
)
raise
Exception
(
failure_string
)
@task
...
...
@@ -294,7 +293,7 @@ def run_jshint(options):
# Fail if number of violations is greater than the limit
if
num_violations
>
violations_limit
>
-
1
:
raise
BuildFailure
(
raise
Exception
(
"JSHint Failed. Too many violations ({count}).
\n
The limit is {violations_limit}."
.
format
(
count
=
num_violations
,
violations_limit
=
violations_limit
)
...
...
@@ -304,151 +303,47 @@ def run_jshint(options):
@task
@needs
(
'pavelib.prereqs.install_python_prereqs'
)
@cmdopts
([
(
"
thresholds="
,
"t"
,
"json containing limit for number of acceptable violations per rule
"
),
(
"
limit="
,
"l"
,
"limit for number of acceptable violations
"
),
])
def
run_safelint
(
options
):
"""
Runs safe_template_linter.py on the codebase
"""
thresholds_option
=
getattr
(
options
,
'thresholds'
,
'{}'
)
try
:
violation_thresholds
=
json
.
loads
(
thresholds_option
)
except
ValueError
:
violation_thresholds
=
None
if
isinstance
(
violation_thresholds
,
dict
)
is
False
or
\
any
(
key
not
in
(
"total"
,
"rules"
)
for
key
in
violation_thresholds
.
keys
()):
raise
BuildFailure
(
"""Error. Thresholds option "{thresholds_option}" was not supplied using proper format.
\n
"""
"""Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """
"""with property names in double-quotes."""
.
format
(
thresholds_option
=
thresholds_option
)
)
violations_limit
=
int
(
getattr
(
options
,
'limit'
,
-
1
))
safelint_script
=
"safe_template_linter.py"
safelint_report_dir
=
(
Env
.
REPORT_DIR
/
"safelint"
)
safelint_report
=
safelint_report_dir
/
"safelint.report"
_prepare_report_dir
(
safelint_report_dir
)
sh
(
"{repo_root}/scripts/
{safelint_script} --rule-totals
>> {safelint_report}"
.
format
(
"{repo_root}/scripts/
safe_template_linter.py
>> {safelint_report}"
.
format
(
repo_root
=
Env
.
REPO_ROOT
,
safelint_script
=
safelint_script
,
safelint_report
=
safelint_report
,
),
ignore_error
=
True
)
safelint_counts
=
_get_safelint_counts
(
safelint_report
)
try
:
metrics_str
=
"Number of {safelint_script} violations: {num_violations}
\n
"
.
format
(
safelint_script
=
safelint_script
,
num_violations
=
int
(
safelint_counts
[
'total'
])
)
if
'rules'
in
safelint_counts
and
any
(
safelint_counts
[
'rules'
]):
metrics_str
+=
"
\n
"
rule_keys
=
sorted
(
safelint_counts
[
'rules'
]
.
keys
())
for
rule
in
rule_keys
:
metrics_str
+=
"{rule} violations: {count}
\n
"
.
format
(
rule
=
rule
,
count
=
int
(
safelint_counts
[
'rules'
][
rule
])
)
num_violations
=
int
(
_get_count_from_last_line
(
safelint_report
,
"safelint"
))
except
TypeError
:
raise
BuildFailure
(
"Error. Number of
{safelint_script}
violations could not be found in {safelint_report}"
.
format
(
safelint_
script
=
safelint_script
,
safelint_
report
=
safelint_report
"Error. Number of
safelint
violations could not be found in {safelint_report}"
.
format
(
safelint_report
=
safelint_report
)
)
metrics_report
=
(
Env
.
METRICS_DIR
/
"safelint"
)
# Record the metric
_write_metric
(
metrics_str
,
metrics_report
)
# Print number of violations to log.
sh
(
"cat {metrics_report}"
.
format
(
metrics_report
=
metrics_report
),
ignore_error
=
True
)
error_message
=
""
# Test total violations against threshold.
if
'total'
in
violation_thresholds
.
keys
():
if
violation_thresholds
[
'total'
]
<
safelint_counts
[
'total'
]:
error_message
=
"Too many violations total ({count}).
\n
The limit is {violations_limit}."
.
format
(
count
=
safelint_counts
[
'total'
],
violations_limit
=
violation_thresholds
[
'total'
]
)
_write_metric
(
num_violations
,
(
Env
.
METRICS_DIR
/
"safelint"
))
# Test rule violations against thresholds.
if
'rules'
in
violation_thresholds
:
threshold_keys
=
sorted
(
violation_thresholds
[
'rules'
]
.
keys
())
for
threshold_key
in
threshold_keys
:
if
threshold_key
not
in
safelint_counts
[
'rules'
]:
error_message
+=
(
"
\n
Number of {safelint_script} violations for {rule} could not be found in "
"{safelint_report}."
)
.
format
(
safelint_script
=
safelint_script
,
rule
=
threshold_key
,
safelint_report
=
safelint_report
)
elif
violation_thresholds
[
'rules'
][
threshold_key
]
<
safelint_counts
[
'rules'
][
threshold_key
]:
error_message
+=
\
"
\n
Too many {rule} violations ({count}).
\n
The {rule} limit is {violations_limit}."
.
format
(
rule
=
threshold_key
,
count
=
safelint_counts
[
'rules'
][
threshold_key
],
violations_limit
=
violation_thresholds
[
'rules'
][
threshold_key
],
)
if
error_message
is
not
""
:
raise
BuildFailure
(
"SafeTemplateLinter Failed.
\n
{error_message}
\n
"
"See {safelint_report} or run the following command to hone in on the problem:
\n
"
" ./scripts/safe-commit-linter.sh -h"
.
format
(
error_message
=
error_message
,
safelint_report
=
safelint_report
)
)
@task
@needs
(
'pavelib.prereqs.install_python_prereqs'
)
def
run_safecommit_report
():
"""
Runs safe-commit-linter.sh on the current branch.
"""
safecommit_script
=
"safe-commit-linter.sh"
safecommit_report_dir
=
(
Env
.
REPORT_DIR
/
"safecommit"
)
safecommit_report
=
safecommit_report_dir
/
"safecommit.report"
_prepare_report_dir
(
safecommit_report_dir
)
sh
(
"{repo_root}/scripts/{safecommit_script} >> {safecommit_report}"
.
format
(
repo_root
=
Env
.
REPO_ROOT
,
safecommit_script
=
safecommit_script
,
safecommit_report
=
safecommit_report
,
),
ignore_error
=
True
)
# Output report to console.
sh
(
"cat {safecommit_report}"
.
format
(
safecommit_report
=
safecommit_report
),
ignore_error
=
True
)
safecommit_count
=
_get_safecommit_count
(
safecommit_report
)
try
:
num_violations
=
int
(
safecommit_count
)
except
TypeError
:
raise
BuildFailure
(
"Error. Number of {safecommit_script} violations could not be found in {safecommit_report}"
.
format
(
safecommit_script
=
safecommit_script
,
safecommit_report
=
safecommit_report
# Fail if number of violations is greater than the limit
if
num_violations
>
violations_limit
>
-
1
:
raise
Exception
(
"SafeTemplateLinter Failed. Too many violations ({count}).
\n
The limit is {violations_limit}."
.
format
(
count
=
num_violations
,
violations_limit
=
violations_limit
)
)
# Print number of violations to log.
violations_count_str
=
"Number of {safecommit_script} violations: {num_violations}
\n
"
.
format
(
safecommit_script
=
safecommit_script
,
num_violations
=
num_violations
)
# Record the metric
metrics_report
=
(
Env
.
METRICS_DIR
/
"safecommit"
)
_write_metric
(
violations_count_str
,
metrics_report
)
# Output report to console.
sh
(
"cat {metrics_report}"
.
format
(
metrics_report
=
metrics_report
),
ignore_error
=
True
)
def
_write_metric
(
metric
,
filename
):
"""
...
...
@@ -468,28 +363,15 @@ def _prepare_report_dir(dir_name):
dir_name
.
mkdir_p
()
def
_get_
report_contents
(
filename
,
last_line_only
=
Fals
e
):
def
_get_
last_report_line
(
filenam
e
):
"""
Returns the contents of the given file. Use last_line_only to only return
the last line, which can be used for getting output from quality output
files.
Arguments:
last_line_only: True to return the last line only, False to return a
string with full contents.
Returns:
String containing full contents of the report, or the last line.
Returns the last line of a given file. Used for getting output from quality output files.
"""
file_not_found_message
=
"The following log file could not be found: {file}"
.
format
(
file
=
filename
)
if
os
.
path
.
isfile
(
filename
):
with
open
(
filename
,
'r'
)
as
report_file
:
if
last_line_only
:
lines
=
report_file
.
readlines
()
return
lines
[
len
(
lines
)
-
1
]
else
:
return
report_file
.
read
()
lines
=
report_file
.
readlines
()
return
lines
[
len
(
lines
)
-
1
]
else
:
# Raise a build error if the file is not found
raise
BuildFailure
(
file_not_found_message
)
...
...
@@ -500,7 +382,7 @@ def _get_count_from_last_line(filename, file_type):
This will return the number in the last line of a file.
It is returning only the value (as a floating number).
"""
last_line
=
_get_
report_contents
(
filename
,
last_line_only
=
Tru
e
)
last_line
=
_get_
last_report_line
(
filenam
e
)
if
file_type
is
"python_complexity"
:
# Example of the last line of a complexity report: "Average complexity: A (1.93953443446)"
regex
=
r'\d+.\d+'
...
...
@@ -516,62 +398,6 @@ def _get_count_from_last_line(filename, file_type):
return
None
def
_get_safelint_counts
(
filename
):
"""
This returns a dict of violations from the safelint report.
Arguments:
filename: The name of the safelint report.
Returns:
A dict containing the following:
rules: A dict containing the count for each rule as follows:
violation-rule-id: N, where N is the number of violations
total: M, where M is the number of total violations
"""
report_contents
=
_get_report_contents
(
filename
)
rule_count_regex
=
re
.
compile
(
r"^(?P<rule_id>[a-z-]+):\s+(?P<count>\d+) violations"
,
re
.
MULTILINE
)
total_count_regex
=
re
.
compile
(
r"^(?P<count>\d+) violations total"
,
re
.
MULTILINE
)
violations
=
{
'rules'
:
{}}
for
violation_match
in
rule_count_regex
.
finditer
(
report_contents
):
try
:
violations
[
'rules'
][
violation_match
.
group
(
'rule_id'
)]
=
int
(
violation_match
.
group
(
'count'
))
except
ValueError
:
violations
[
'rules'
][
violation_match
.
group
(
'rule_id'
)]
=
None
try
:
violations
[
'total'
]
=
int
(
total_count_regex
.
search
(
report_contents
)
.
group
(
'count'
))
# An AttributeError will occur if the regex finds no matches.
# A ValueError will occur if the returned regex cannot be cast as a float.
except
(
AttributeError
,
ValueError
):
violations
[
'total'
]
=
None
return
violations
def
_get_safecommit_count
(
filename
):
"""
Returns the violation count from the safecommit report.
Arguments:
filename: The name of the safecommit report.
Returns:
The count of safecommit violations, or None if there is a problem.
"""
report_contents
=
_get_report_contents
(
filename
)
validation_count
=
None
file_count_regex
=
re
.
compile
(
r"^(?P<count>\d+) violations total"
,
re
.
MULTILINE
)
try
:
for
count_match
in
file_count_regex
.
finditer
(
report_contents
):
if
validation_count
is
None
:
validation_count
=
0
validation_count
+=
int
(
count_match
.
group
(
'count'
))
except
ValueError
:
validation_count
=
None
return
validation_count
@task
@needs
(
'pavelib.prereqs.install_python_prereqs'
)
@cmdopts
([
...
...
scripts/all-tests.sh
View file @
5ea529ae
...
...
@@ -13,7 +13,7 @@ set -e
# Violations thresholds for failing the build
export
PYLINT_THRESHOLD
=
4175
export
JSHINT_THRESHOLD
=
7550
source
scripts/safelint_thresholds.sh
export
SAFELINT_THRESHOLD
=
2700
doCheckVars
()
{
if
[
-n
"
$CIRCLECI
"
]
;
then
...
...
scripts/generic-ci-tests.sh
View file @
5ea529ae
...
...
@@ -85,9 +85,7 @@ case "$TEST_SUITE" in
echo
"Running code complexity report (python)."
paver run_complexity
>
reports/code_complexity.log
||
echo
"Unable to calculate code complexity. Ignoring error."
echo
"Running safe template linter report."
paver run_safelint
-t
$SAFELINT_THRESHOLDS
>
safelint.log
||
{
cat
safelint.log
;
EXIT
=
1
;
}
echo
"Running safe commit linter report."
paver run_safecommit_report
>
safecommit.log
||
{
cat
safecommit.log
;
EXIT
=
1
;
}
paver run_safelint
-l
$SAFELINT_THRESHOLD
>
safelint.log
||
{
cat
safelint.log
;
EXIT
=
1
;
}
# Run quality task. Pass in the 'fail-under' percentage to diff-quality
echo
"Running diff quality."
paver run_quality
-p
100
||
EXIT
=
1
...
...
scripts/safelint_thresholds.sh
deleted
100755 → 0
View file @
8340703f
#!/usr/bin/env bash
set
-e
###############################################################################
#
# safelint_thresholds.sh
#
# The thresholds used for paver run_safelint when used with various CI
# systems.
#
###############################################################################
# Violations thresholds for failing the build
export
SAFELINT_THRESHOLDS
=
'
{
"rules": {
"javascript-concat-html": 313,
"javascript-escape": 7,
"javascript-interpolate": 71,
"javascript-jquery-append": 120,
"javascript-jquery-html": 313,
"javascript-jquery-insert-into-target": 26,
"javascript-jquery-insertion": 30,
"javascript-jquery-prepend": 12,
"mako-html-entities": 0,
"mako-invalid-html-filter": 33,
"mako-invalid-js-filter": 249,
"mako-js-html-string": 0,
"mako-js-missing-quotes": 0,
"mako-missing-default": 248,
"mako-multiple-page-tags": 0,
"mako-unknown-context": 0,
"mako-unparseable-expression": 0,
"mako-unwanted-html-filter": 0,
"python-close-before-format": 0,
"python-concat-html": 28,
"python-custom-escape": 13,
"python-deprecated-display-name": 53,
"python-interpolate-html": 68,
"python-parse-error": 0,
"python-requires-html-or-text": 0,
"python-wrap-html": 289,
"underscore-not-escaped": 709
},
"total": 2565
}'
export
SAFELINT_THRESHOLDS
=
${
SAFELINT_THRESHOLDS
//[[
:space:]]/
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment