Commit 143a22fb by Robert Raposa

Enhance Jenkins integration of safe templates take 2

- Handle case when no files are linted
- Skip deleted files for safe commit script
- Add verbose options for safe commit
parent ebc11849
......@@ -4,6 +4,7 @@ Tests for paver quality tasks
import os
from path import Path as path
import tempfile
import textwrap
import unittest
from mock import patch, MagicMock, mock_open
from ddt import ddt, file_data
......@@ -59,7 +60,8 @@ class TestPaverQualityViolations(unittest.TestCase):
class TestPaverReportViolationsCounts(unittest.TestCase):
"""
For testing run_jshint and run_complexity utils
For testing utility functions for getting counts from reports for
run_jshint, run_complexity, run_safelint, and run_safecommit_report.
"""
def setUp(self):
......@@ -132,6 +134,91 @@ class TestPaverReportViolationsCounts(unittest.TestCase):
actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access
self.assertEqual(actual_count, None)
def test_get_safelint_counts_happy(self):
"""
Test happy path getting violation counts from safelint report.
"""
report = textwrap.dedent("""
test.html: 30:53: javascript-jquery-append: $('#test').append(print_tos);
javascript-concat-html: 310 violations
javascript-escape: 7 violations
2608 violations total
""")
with open(self.f.name, 'w') as f:
f.write(report)
counts = pavelib.quality._get_safelint_counts(self.f.name) # pylint: disable=protected-access
self.assertDictEqual(counts, {
'rules': {
'javascript-concat-html': 310,
'javascript-escape': 7,
},
'total': 2608,
})
def test_get_safelint_counts_bad_counts(self):
"""
Test getting violation counts from truncated and malformed safelint
report.
"""
report = textwrap.dedent("""
javascript-concat-html: violations
""")
with open(self.f.name, 'w') as f:
f.write(report)
counts = pavelib.quality._get_safelint_counts(self.f.name) # pylint: disable=protected-access
self.assertDictEqual(counts, {
'rules': {},
'total': None,
})
def test_get_safecommit_count_happy(self):
"""
Test happy path getting violation count from safecommit report.
"""
report = textwrap.dedent("""
Linting lms/templates/navigation.html:
2 violations total
Linting scripts/tests/templates/test.underscore:
3 violations total
""")
with open(self.f.name, 'w') as f:
f.write(report)
count = pavelib.quality._get_safecommit_count(self.f.name) # pylint: disable=protected-access
self.assertEqual(count, 5)
def test_get_safecommit_count_bad_counts(self):
"""
Test getting violation count from truncated safecommit report.
"""
report = textwrap.dedent("""
Linting lms/templates/navigation.html:
""")
with open(self.f.name, 'w') as f:
f.write(report)
count = pavelib.quality._get_safecommit_count(self.f.name) # pylint: disable=protected-access
self.assertIsNone(count)
def test_get_safecommit_count_no_files(self):
"""
Test getting violation count from safecommit report where no files were
linted.
"""
report = textwrap.dedent("""
No files linted.
""")
with open(self.f.name, 'w') as f:
f.write(report)
count = pavelib.quality._get_safecommit_count(self.f.name) # pylint: disable=protected-access
self.assertEqual(count, 0)
class TestPrepareReportDir(unittest.TestCase):
"""
......
"""
Tests for paver safecommit quality tasks
"""
from mock import patch
import pavelib.quality
from paver.easy import call_task
from .utils import PaverTestCase
class PaverSafeCommitTest(PaverTestCase):
"""
Test run_safecommit_report with a mocked environment in order to pass in
opts.
"""
def setUp(self):
super(PaverSafeCommitTest, self).setUp()
self.reset_task_messages()
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_safecommit_count')
def test_safecommit_violation_number_not_found(self, _mock_count, _mock_report_dir, _mock_write_metric):
"""
run_safecommit_report encounters an error parsing the safecommit output
log.
"""
_mock_count.return_value = None
with self.assertRaises(SystemExit):
call_task('pavelib.quality.run_safecommit_report')
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_safecommit_count')
def test_safecommit_vanilla(self, _mock_count, _mock_report_dir, _mock_write_metric):
"""
run_safecommit_report finds violations.
"""
_mock_count.return_value = 0
call_task('pavelib.quality.run_safecommit_report')
"""
Tests for paver quality tasks
Tests for paver safelint quality tasks
"""
from mock import patch
......@@ -20,43 +20,99 @@ class PaverSafeLintTest(PaverTestCase):
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_count_from_last_line')
def test_safelint_violation_number_not_found(self, _mock_count, _mock_report_dir, _mock_write_metric):
@patch.object(pavelib.quality, '_get_safelint_counts')
def test_safelint_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric):
"""
run_safelint encounters an error parsing the safelint output log
"""
_mock_count.return_value = None
_mock_counts.return_value = {}
with self.assertRaises(SystemExit):
call_task('pavelib.quality.run_safelint')
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_count_from_last_line')
def test_safelint_vanilla(self, _mock_count, _mock_report_dir, _mock_write_metric):
@patch.object(pavelib.quality, '_get_safelint_counts')
def test_safelint_vanilla(self, _mock_counts, _mock_report_dir, _mock_write_metric):
"""
run_safelint finds violations, but a limit was not set
"""
_mock_count.return_value = 1
_mock_counts.return_value = {'total': 0}
call_task('pavelib.quality.run_safelint')
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_count_from_last_line')
def test_safelint_too_many_violations(self, _mock_count, _mock_report_dir, _mock_write_metric):
@patch.object(pavelib.quality, '_get_safelint_counts')
def test_safelint_invalid_thresholds_option(self, _mock_counts, _mock_report_dir, _mock_write_metric):
"""
run_safelint fails when thresholds option is poorly formatted
"""
_mock_counts.return_value = {'total': 0}
with self.assertRaises(SystemExit):
call_task('pavelib.quality.run_safelint', options={"thresholds": "invalid"})
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_safelint_counts')
def test_safelint_invalid_thresholds_option_key(self, _mock_counts, _mock_report_dir, _mock_write_metric):
"""
run_safelint fails when thresholds option is poorly formatted
"""
_mock_counts.return_value = {'total': 0}
with self.assertRaises(SystemExit):
call_task('pavelib.quality.run_safelint', options={"thresholds": '{"invalid": 3}'})
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_safelint_counts')
def test_safelint_too_many_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric):
"""
run_safelint finds more violations than are allowed
"""
_mock_count.return_value = 4
_mock_counts.return_value = {'total': 4}
with self.assertRaises(SystemExit):
call_task('pavelib.quality.run_safelint', options={"limit": "3"})
call_task('pavelib.quality.run_safelint', options={"thresholds": '{"total": 3}'})
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_count_from_last_line')
def test_safelint_under_limit(self, _mock_count, _mock_report_dir, _mock_write_metric):
@patch.object(pavelib.quality, '_get_safelint_counts')
def test_safelint_under_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric):
"""
run_safelint finds fewer violations than are allowed
"""
_mock_count.return_value = 4
_mock_counts.return_value = {'total': 4}
# No System Exit is expected
call_task('pavelib.quality.run_safelint', options={"thresholds": '{"total": 5}'})
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_safelint_counts')
def test_safelint_rule_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric):
"""
run_safelint encounters an error parsing the safelint output log for a
given rule threshold that was set.
"""
_mock_counts.return_value = {'total': 4}
with self.assertRaises(SystemExit):
call_task('pavelib.quality.run_safelint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'})
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_safelint_counts')
def test_safelint_too_many_rule_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric):
"""
run_safelint finds more rule violations than are allowed
"""
_mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}}
with self.assertRaises(SystemExit):
call_task('pavelib.quality.run_safelint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'})
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_safelint_counts')
def test_safelint_under_rule_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric):
"""
run_safelint finds fewer rule violations than are allowed
"""
_mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}}
# No System Exit is expected
call_task('pavelib.quality.run_safelint', options={"limit": "5"})
call_task('pavelib.quality.run_safelint', options={"thresholds": '{"rules": {"javascript-escape": 5}}'})
......@@ -2,6 +2,7 @@
Check code quality using pep8, pylint, and diff_quality.
"""
from paver.easy import sh, task, cmdopts, needs, BuildFailure
import json
import os
import re
......@@ -135,8 +136,8 @@ def run_pylint(options):
# Fail number of violations is greater than the limit
if num_violations > violations_limit > -1:
raise Exception("Failed. Too many pylint violations. "
"The limit is {violations_limit}.".format(violations_limit=violations_limit))
raise BuildFailure("Failed. Too many pylint violations. "
"The limit is {violations_limit}.".format(violations_limit=violations_limit))
def _count_pylint_violations(report_file):
......@@ -218,7 +219,7 @@ def run_pep8(options): # pylint: disable=unused-argument
if count:
failure_string = "Too many pep8 violations. " + violations_count_str
failure_string += "\n\nViolations:\n{violations_list}".format(violations_list=violations_list)
raise Exception(failure_string)
raise BuildFailure(failure_string)
@task
......@@ -293,7 +294,7 @@ def run_jshint(options):
# Fail if number of violations is greater than the limit
if num_violations > violations_limit > -1:
raise Exception(
raise BuildFailure(
"JSHint Failed. Too many violations ({count}).\nThe limit is {violations_limit}.".format(
count=num_violations, violations_limit=violations_limit
)
......@@ -303,48 +304,150 @@ def run_jshint(options):
@task
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
("limit=", "l", "limit for number of acceptable violations"),
("thresholds=", "t", "json containing limit for number of acceptable violations per rule"),
])
def run_safelint(options):
"""
Runs safe_template_linter.py on the codebase
"""
violations_limit = int(getattr(options, 'limit', -1))
thresholds_option = getattr(options, 'thresholds', '{}')
try:
violation_thresholds = json.loads(thresholds_option)
except ValueError:
violation_thresholds = None
if isinstance(violation_thresholds, dict) is False or \
any(key not in ("total", "rules") for key in violation_thresholds.keys()):
raise BuildFailure(
"""Error. Thresholds option "{thresholds_option}" was not supplied using proper format.\n"""
"""Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """
"""with property names in double-quotes.""".format(
thresholds_option=thresholds_option
)
)
safelint_script = "safe_template_linter.py"
safelint_report_dir = (Env.REPORT_DIR / "safelint")
safelint_report = safelint_report_dir / "safelint.report"
_prepare_report_dir(safelint_report_dir)
sh(
"{repo_root}/scripts/safe_template_linter.py >> {safelint_report}".format(
"{repo_root}/scripts/{safelint_script} --rule-totals >> {safelint_report}".format(
repo_root=Env.REPO_ROOT,
safelint_script=safelint_script,
safelint_report=safelint_report,
),
ignore_error=True
)
safelint_counts = _get_safelint_counts(safelint_report)
try:
num_violations = int(_get_count_from_last_line(safelint_report, "safelint"))
metrics_str = "Number of {safelint_script} violations: {num_violations}\n".format(
safelint_script=safelint_script, num_violations=int(safelint_counts['total'])
)
if 'rules' in safelint_counts and any(safelint_counts['rules']):
metrics_str += "\n"
rule_keys = sorted(safelint_counts['rules'].keys())
for rule in rule_keys:
metrics_str += "{rule} violations: {count}\n".format(
rule=rule,
count=int(safelint_counts['rules'][rule])
)
except TypeError:
raise BuildFailure(
"Error. Number of safelint violations could not be found in {safelint_report}".format(
safelint_report=safelint_report
"Error. Number of {safelint_script} violations could not be found in {safelint_report}".format(
safelint_script=safelint_script, safelint_report=safelint_report
)
)
metrics_report = (Env.METRICS_DIR / "safelint")
# Record the metric
_write_metric(num_violations, (Env.METRICS_DIR / "safelint"))
_write_metric(metrics_str, metrics_report)
# Print number of violations to log.
sh("cat {metrics_report}".format(metrics_report=metrics_report), ignore_error=True)
# Fail if number of violations is greater than the limit
if num_violations > violations_limit > -1:
raise Exception(
"SafeTemplateLinter Failed. Too many violations ({count}).\nThe limit is {violations_limit}.".format(
count=num_violations, violations_limit=violations_limit
error_message = ""
# Test total violations against threshold.
if 'total' in violation_thresholds.keys():
if violation_thresholds['total'] < safelint_counts['total']:
error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format(
count=safelint_counts['total'], violations_limit=violation_thresholds['total']
)
# Test rule violations against thresholds.
if 'rules' in violation_thresholds:
threshold_keys = sorted(violation_thresholds['rules'].keys())
for threshold_key in threshold_keys:
if threshold_key not in safelint_counts['rules']:
error_message += (
"\nNumber of {safelint_script} violations for {rule} could not be found in "
"{safelint_report}."
).format(
safelint_script=safelint_script, rule=threshold_key, safelint_report=safelint_report
)
elif violation_thresholds['rules'][threshold_key] < safelint_counts['rules'][threshold_key]:
error_message += \
"\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format(
rule=threshold_key, count=safelint_counts['rules'][threshold_key],
violations_limit=violation_thresholds['rules'][threshold_key],
)
if error_message is not "":
raise BuildFailure(
"SafeTemplateLinter Failed.\n{error_message}\n"
"See {safelint_report} or run the following command to hone in on the problem:\n"
" ./scripts/safe-commit-linter.sh -h".format(
error_message=error_message, safelint_report=safelint_report
)
)
@task
@needs('pavelib.prereqs.install_python_prereqs')
def run_safecommit_report():
"""
Runs safe-commit-linter.sh on the current branch.
"""
safecommit_script = "safe-commit-linter.sh"
safecommit_report_dir = (Env.REPORT_DIR / "safecommit")
safecommit_report = safecommit_report_dir / "safecommit.report"
_prepare_report_dir(safecommit_report_dir)
sh(
"{repo_root}/scripts/{safecommit_script} | tee {safecommit_report}".format(
repo_root=Env.REPO_ROOT,
safecommit_script=safecommit_script,
safecommit_report=safecommit_report,
),
ignore_error=True
)
safecommit_count = _get_safecommit_count(safecommit_report)
try:
num_violations = int(safecommit_count)
except TypeError:
raise BuildFailure(
"Error. Number of {safecommit_script} violations could not be found in {safecommit_report}".format(
safecommit_script=safecommit_script, safecommit_report=safecommit_report
)
)
# Print number of violations to log.
violations_count_str = "Number of {safecommit_script} violations: {num_violations}\n".format(
safecommit_script=safecommit_script, num_violations=num_violations
)
# Record the metric
metrics_report = (Env.METRICS_DIR / "safecommit")
_write_metric(violations_count_str, metrics_report)
# Output report to console.
sh("cat {metrics_report}".format(metrics_report=metrics_report), ignore_error=True)
def _write_metric(metric, filename):
"""
Write a given metric to a given file
......@@ -363,15 +466,28 @@ def _prepare_report_dir(dir_name):
dir_name.mkdir_p()
def _get_last_report_line(filename):
def _get_report_contents(filename, last_line_only=False):
"""
Returns the last line of a given file. Used for getting output from quality output files.
Returns the contents of the given file. Use last_line_only to only return
the last line, which can be used for getting output from quality output
files.
Arguments:
last_line_only: True to return the last line only, False to return a
string with full contents.
Returns:
String containing full contents of the report, or the last line.
"""
file_not_found_message = "The following log file could not be found: {file}".format(file=filename)
if os.path.isfile(filename):
with open(filename, 'r') as report_file:
lines = report_file.readlines()
return lines[len(lines) - 1]
if last_line_only:
lines = report_file.readlines()
return lines[len(lines) - 1]
else:
return report_file.read()
else:
# Raise a build error if the file is not found
raise BuildFailure(file_not_found_message)
......@@ -382,7 +498,7 @@ def _get_count_from_last_line(filename, file_type):
This will return the number in the last line of a file.
It is returning only the value (as a floating number).
"""
last_line = _get_last_report_line(filename)
last_line = _get_report_contents(filename, last_line_only=True)
if file_type is "python_complexity":
# Example of the last line of a complexity report: "Average complexity: A (1.93953443446)"
regex = r'\d+.\d+'
......@@ -398,6 +514,66 @@ def _get_count_from_last_line(filename, file_type):
return None
def _get_safelint_counts(filename):
"""
This returns a dict of violations from the safelint report.
Arguments:
filename: The name of the safelint report.
Returns:
A dict containing the following:
rules: A dict containing the count for each rule as follows:
violation-rule-id: N, where N is the number of violations
total: M, where M is the number of total violations
"""
report_contents = _get_report_contents(filename)
rule_count_regex = re.compile(r"^(?P<rule_id>[a-z-]+):\s+(?P<count>\d+) violations", re.MULTILINE)
total_count_regex = re.compile(r"^(?P<count>\d+) violations total", re.MULTILINE)
violations = {'rules': {}}
for violation_match in rule_count_regex.finditer(report_contents):
try:
violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count'))
except ValueError:
violations['rules'][violation_match.group('rule_id')] = None
try:
violations['total'] = int(total_count_regex.search(report_contents).group('count'))
# An AttributeError will occur if the regex finds no matches.
# A ValueError will occur if the returned regex cannot be cast as a float.
except (AttributeError, ValueError):
violations['total'] = None
return violations
def _get_safecommit_count(filename):
"""
Returns the violation count from the safecommit report.
Arguments:
filename: The name of the safecommit report.
Returns:
The count of safecommit violations, or None if there is a problem.
"""
report_contents = _get_report_contents(filename)
if 'No files linted' in report_contents:
return 0
file_count_regex = re.compile(r"^(?P<count>\d+) violations total", re.MULTILINE)
try:
validation_count = None
for count_match in file_count_regex.finditer(report_contents):
if validation_count is None:
validation_count = 0
validation_count += int(count_match.group('count'))
return validation_count
except ValueError:
return None
@task
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
......
......@@ -13,7 +13,9 @@ set -e
# Violations thresholds for failing the build
export PYLINT_THRESHOLD=4175
export JSHINT_THRESHOLD=7550
export SAFELINT_THRESHOLD=2700
SAFELINT_THRESHOLDS=`cat scripts/safelint_thresholds.json`
export SAFELINT_THRESHOLDS=${SAFELINT_THRESHOLDS//[[:space:]]/}
doCheckVars() {
if [ -n "$CIRCLECI" ] ; then
......
......@@ -85,7 +85,9 @@ case "$TEST_SUITE" in
echo "Running code complexity report (python)."
paver run_complexity > reports/code_complexity.log || echo "Unable to calculate code complexity. Ignoring error."
echo "Running safe template linter report."
paver run_safelint -l $SAFELINT_THRESHOLD > safelint.log || { cat safelint.log; EXIT=1; }
paver run_safelint -t $SAFELINT_THRESHOLDS > safelint.log || { cat safelint.log; EXIT=1; }
echo "Running safe commit linter report."
paver run_safecommit_report > safecommit.log || { cat safecommit.log; EXIT=1; }
# Run quality task. Pass in the 'fail-under' percentage to diff-quality
echo "Running diff quality."
paver run_quality -p 100 || EXIT=1
......
......@@ -19,6 +19,7 @@ show_help() {
echo " -m, --main-branch=COMMIT Run against files changed between the"
echo " current branch and this commit."
echo " Defaults to origin/master."
echo " -v, --verbose Output details of git commands run."
echo ""
echo "This scripts does not give a grand total. Be sure to check for"
echo "0 violations on each file."
......@@ -30,12 +31,25 @@ show_help() {
}
show_verbose() {
echo "Files linted is based on the following:"
echo "- Current commit: ${current_branch_hash}"
echo "- Main commit: ${MAIN_COMMIT}"
echo "- Merge base command: ${merge_base_command}"
echo "- Merge base: ${merge_base}"
echo "- Diff command: ${diff_command}"
}
for i in "$@"; do
case $i in
-m=*|--main-branch=*)
MAIN_COMMIT="${i#*=}"
shift # past argument=value
;;
-v|--verbose)
VERBOSE=true
;;
-h|--help|*)
# help or unknown option
show_help
......@@ -51,11 +65,24 @@ if [ -z "${MAIN_COMMIT+x}" ]; then
MAIN_COMMIT="origin/master"
fi
merge_base=`git merge-base "$current_branch_hash" "$MAIN_COMMIT"`
diff_files=`git diff --name-only "$current_branch_hash" "$merge_base"`
merge_base_command="git merge-base $current_branch_hash $MAIN_COMMIT"
merge_base=$(${merge_base_command})
diff_command="git diff --name-only --diff-filter=ACM $current_branch_hash $merge_base"
diff_files=$(${diff_command})
for f in $diff_files; do
if [ "$diff_files" = "" ]; then
# When no files are found, automatically display verbose details to help
# understand why.
show_verbose
echo ""
echo "Linting $f:"
./scripts/safe_template_linter.py $f
done
echo "No files linted."
else
if [ ${VERBOSE} ] ; then
show_verbose
fi
for f in $diff_files; do
echo ""
echo "Linting $f:"
./scripts/safe_template_linter.py $f
done
fi
{
"rules": {
"javascript-concat-html": 313,
"javascript-escape": 7,
"javascript-interpolate": 71,
"javascript-jquery-append": 120,
"javascript-jquery-html": 313,
"javascript-jquery-insert-into-target": 26,
"javascript-jquery-insertion": 30,
"javascript-jquery-prepend": 12,
"mako-html-entities": 0,
"mako-invalid-html-filter": 33,
"mako-invalid-js-filter": 222,
"mako-js-html-string": 0,
"mako-js-missing-quotes": 0,
"mako-missing-default": 234,
"mako-multiple-page-tags": 0,
"mako-unknown-context": 0,
"mako-unparseable-expression": 0,
"mako-unwanted-html-filter": 0,
"python-close-before-format": 0,
"python-concat-html": 28,
"python-custom-escape": 13,
"python-deprecated-display-name": 53,
"python-interpolate-html": 68,
"python-parse-error": 0,
"python-requires-html-or-text": 0,
"python-wrap-html": 279,
"underscore-not-escaped": 709
},
"total": 2509
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment