Commit def3467e by clytwynec

Merge pull request #5672 from benpatterson/testeng/rebase-upstream

Testeng/rebase upstream
parents 2bb44a00 03b9ed74
import os
import sys
from textwrap import dedent
from bs4 import BeautifulSoup
import multiprocessing
FIRST = dedent(
'''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
<title>CMS Python Test Coverage Report</title>
<link rel='stylesheet' href='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/style.css' type='text/css'>
<script type='text/javascript' src='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/jquery.min.js'></script>
<script type='text/javascript' src='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/jquery.tablesorter.min.js'></script>
<script type='text/javascript' src='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/jquery.hotkeys.js'></script>
<script type='text/javascript' src='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/coverage_html.js'></script>
<script type='text/javascript' charset='utf-8'>
jQuery(document).ready(coverage.index_ready);
</script>
<style>
.hide-content {
display: none;
}
</style>
</head>''')
LAST = dedent(
'''<script type="text/javascript">
String.prototype.replaceAll = function (find, replace) {
var str = this;
return str.replace(new RegExp(find, 'g'), replace);
};
$('.file a').click(function(event) {
event.preventDefault();
var id = "#" + event.currentTarget.innerHTML.replaceAll('/', '_');
if (typeof window.last_source_file_id !== 'undefined'){
$(window.last_source_file_id).addClass( "hide-content" );
}
window.last_source_file_id = id;
$(id).removeClass( "hide-content" );
location.href = id;
});
</script>
</body>
</html>''')
class ReportMerge(object):
"""Merge multiple html coverage reports"""
DESTINATION = os.path.join(os.environ['HOME'], 'results', os.environ['TDDIUM_SESSION_ID'], 'session')
def __init__(self):
self.reports_dir = os.path.realpath(__file__).replace("scripts/cov_merge.py", "reports/")
def _files(self, cover_path):
"""
Return list of file paths in `cover_path`. `cover_path` will be something like */reports/cms/cover
"""
include = lambda f: f.endswith('.html') and os.path.basename(f) != 'index.html'
return [os.path.join(cover_path, f) for f in os.listdir(cover_path) if include(f)]
def merge(self, modules, output_file=None):
"""
Merge reports for `modules`
Arguments:
output_file (str): name of output report file -- only used for bok_choy reports
"""
for module in modules:
for (path, _, _) in os.walk(os.path.join(self.reports_dir, module)):
if os.path.basename(path) == 'cover':
self.merge_report(path, output_file)
def merge_report(self, path, output_file):
"""
Collect multiple parts of a report and join them to create a single report.
Arguments:
path (str): path where multiple files are located to be merged
output_file (str): name of output report file -- only used for bok_choy reports
"""
content = list()
# Extract total coverage percentage and file links table
index_html = os.path.join(path, 'index.html')
with open(index_html) as index_file:
soup = BeautifulSoup(index_file)
total_percentage = soup.find('div', id='header')
total_percentage.find('img').decompose()
index_table = soup.find('div', id='index')
# Extract file names
files = [os.path.join(path, name['href']) for name in index_table.find_all('a')]
if not files:
return
print 'Merging Report for {}'.format(path)
# Collect different parts of html report
content.append(FIRST)
content.append('<body>')
content.append(str(total_percentage))
content.append(str(index_table))
for html in files:
content.append(self._html_content(html))
content.append(LAST)
if output_file:
report_path = os.path.join(self.DESTINATION, output_file)
else:
report_filename = path.split('reports/')[1].split('/cover')[0].replace('/', '_')
report_path = os.path.join(self.DESTINATION, report_filename+'_coverage.html')
# Write everything to single report file
with open(report_path, 'w') as report_file:
report_file.write('\n'.join(content))
print 'Report Merged for {}'.format(path)
def _html_content(self, html):
"""
Returns html tags of interest for file specified by `html`
"""
# Create id for each link in file links table
navigate_div_id = os.path.basename(html).split('.')[0].replace('/', '_')
navigate_div_start = "<div id='{}' class='hide-content'>\n".format(navigate_div_id)
navigate_div_close = "\n</div>".format(navigate_div_id)
content = list()
content.append(navigate_div_start)
with open(html) as html_file:
soup = BeautifulSoup(html_file)
header = soup.find('div', id='header')
header.find('img').decompose()
source = soup.find('div', id='source')
source_img = source.find('img')
if source_img:
source_img.decompose()
content.append(str(header))
content.append(str(source))
content.append(navigate_div_close)
return '\n'.join(content)
if __name__ == '__main__':
args = sys.argv
if 'bok_choy' in args[1]:
paths = ['bok_choy']
rm = ReportMerge()
rm.merge(paths, output_file=args[2])
elif 'unit' in args[1]:
paths = ['common', 'cms', 'lms']
for pth in paths:
rm = ReportMerge()
mp = multiprocessing.Process(target=rm.merge, args=([pth],))
mp.start()
else:
print 'Unsupported Test Suit'
#!/bin/bash
case $1 in
"shard1")
echo "Collecting Coverage for Bok-Choy Shard1"
paver bokchoy_coverage
echo "Merging Coverage into a Single HTML File for Bok-Choy Shard1"
python ./scripts/cov_merge.py bok_choy bok_choy_shard1_coverage.html
;;
"shard2")
echo "Collecting Coverage for Bok-Choy Shard2"
paver bokchoy_coverage
echo "Merging Coverage into a Single HTML File for Bok-Choy Shard2"
python ./scripts/cov_merge.py bok_choy bok_choy_shard2_coverage.html
;;
"shard3")
echo "Collecting Coverage for Bok-Choy Shard3"
paver bokchoy_coverage
echo "Merging Coverage into a Single HTML File for Bok-Choy Shard3"
python ./scripts/cov_merge.py bok_choy bok_choy_shard3_coverage.html
;;
*)
echo "Invalid Bok-Choy Shard Value!";;
esac
{
"acceptance.lms": "lms/*.py",
"acceptance.studio": "cms/*.py",
"acceptance.common_lib": "common/lib/*.py",
"acceptance.common_app": "common/djangoapps/*.py"
}
"""
Aggregate coverage data from XML reports.
groups.json is a JSON-encoded dict mapping group names to source file glob patterns:
{
"group_1": "group1/*.py",
"group_2": "group2/*.py"
}
This would calculate line coverage percentages for source files in each group,
and send those metrics to DataDog:
testeng.coverage.group_1 ==> 89.123
testeng.coverage.group_2 ==> 45.523
The tool uses the *union* of covered lines across each of the input
coverage XML reports. If a line is covered *anywhere*, it's considered covered.
"""
import fnmatch
import json
from lxml import etree
class CoverageParseError(Exception):
"""
Error occurred while parsing a coverage report.
"""
pass
class CoverageData(object):
"""
Aggregate coverage reports.
"""
def __init__(self):
"""
Initialize the coverage data, which has no information until you add a report.
"""
self._coverage = dict()
def add_report(self, report_str):
"""
Add the coverage information from the XML `report_str` to the aggregate data.
Raises a `CoverageParseError` if the report XML is not a valid coverage report.
"""
try:
root = etree.fromstring(report_str)
except etree.XMLSyntaxError:
raise CoverageParseError("Warning: Could not parse report as XML")
if root is not None:
# Get all classes (source files) in the report
for class_node in root.xpath('//class'):
class_filename = class_node.get('filename')
if class_filename is None:
continue
# If we haven't seen this source file before, create a dict
# to store its coverage information.
if class_filename not in self._coverage:
self._coverage[class_filename] = dict()
# Store info for each line in the source file
for line in class_node.xpath('lines/line'):
hits = line.get('hits')
line_num = line.get('number')
# Ignore lines that do not have the right attributes
if line_num is not None:
try:
line_num = int(line_num)
hits = int(hits)
except ValueError:
pass
else:
# If any report says the line is covered, set it to covered
if hits > 0:
self._coverage[class_filename][line_num] = 1
# Otherwise if the line is not already covered, set it to uncovered
elif line_num not in self._coverage[class_filename]:
self._coverage[class_filename][line_num] = 0
def coverage(self, source_pattern="*"):
"""
Calculate line coverage percentage (float) for source files that match
`source_pattern` (a fnmatch-style glob pattern).
If coverage could not be calculated (e.g. because no source files match
the pattern), returns None.
"""
num_covered = 0
total = 0
# Find source files that match the pattern then calculate total lines and number covered
for filename in fnmatch.filter(self._coverage.keys(), source_pattern):
num_covered += sum(self._coverage[filename].values())
total += len(self._coverage[filename])
# Calculate the percentage
if total > 0:
return float(num_covered) / float(total) * 100.0
else:
print u"Warning: No lines found in source files that match {}".format(source_pattern)
return None
@staticmethod
def _parse_report(report_path):
"""
Parse the coverage report as XML and return the resulting tree.
If the report could not be found or parsed, return None.
"""
try:
return etree.parse(report_path)
except IOError:
print u"Warning: Could not open report at '{path}'".format(path=report_path)
return None
except ValueError:
print u"Warning: Could not parse report at '{path}' as XML".format(path=report_path)
return None
class CoverageMetrics(object):
"""
Collect Coverage Reports for DataDog.
"""
def __init__(self, group_json_path, report_paths):
self._group_json_path = group_json_path
self._report_paths = report_paths
def coverage_metrics(self):
"""
Find, parse, and create coverage metrics to be sent to DataDog.
"""
print "Loading group definitions..."
group_dict = self.load_group_defs(self._group_json_path)
print "Parsing reports..."
metrics = self.parse_reports(self._report_paths)
print "Creating metrics..."
stats = self.create_metrics(metrics, group_dict)
print "Done."
return stats
@staticmethod
def load_group_defs(group_json_path):
"""
Load the dictionary mapping group names to source file patterns
from the file located at `group_json_path`.
Exits with an error message if the groups could not be parsed.
"""
try:
with open(group_json_path) as json_file:
return json.load(json_file)
except IOError:
print u"Could not open group definition file at '{}'".format(group_json_path)
raise
except ValueError:
print u"Could not parse group definitions in '{}'".format(group_json_path)
raise
@staticmethod
def parse_reports(report_paths):
"""
Parses each coverage report in `report_paths` and returns
a `CoverageData` object containing the aggregate coverage information.
"""
data = CoverageData()
for path in report_paths:
try:
with open(path) as report_file:
data.add_report(report_file.read())
except IOError:
print u"Warning: could not open {}".format(path)
except CoverageParseError:
print u"Warning: could not parse {} as an XML coverage report".format(path)
return data
@staticmethod
def create_metrics(data, groups):
"""
Given a `CoverageData` object, create coverage percentages for each group.
`groups` is a dict mapping aggregate group names to source file patterns.
Group names are used in the name of the metric sent to DataDog.
"""
metrics = {}
for group_name, pattern in groups.iteritems():
metric = 'test_eng.coverage.{group}'.format(group=group_name.replace(' ', '_'))
percent = data.coverage(pattern)
if percent is not None:
print u"Sending {} ==> {}%".format(metric, percent)
metrics[metric] = percent
return metrics
"""
Publish Build Stats.
"""
import os
import subprocess
from dogapi import dog_http_api
from coverage_metrics import CoverageMetrics
class PublishStats(object):
"""
Publish stats to DataDog.
"""
def __init__(self, api_key):
dog_http_api.api_key = api_key
@staticmethod
def report_metrics(metrics):
"""
Send metrics to DataDog.
Arguments:
metrics (dict): data to publish
"""
for key, value in metrics.iteritems():
print u"Sending {} ==> {}%".format(key, value)
dog_http_api.metric(key, value)
def main(api_key):
"""
Send Stats for everything to DataDog.
"""
dir_path = os.path.dirname(os.path.relpath(__file__))
unit_reports_cmd = ['find', 'reports', '-name', '"coverage.xml"']
unit_report_paths = subprocess.check_output(unit_reports_cmd)
cov_metrics = CoverageMetrics(os.path.join(dir_path, 'unit_test_groups.json'), unit_report_paths)
coverage_metrics = cov_metrics.coverage_metrics()
# Publish Coverage Stats to DataDog
PublishStats(api_key).report_metrics(coverage_metrics)
if __name__ == "__main__":
API_KEY = os.environ.get('DATADOG_API_KEY')
if API_KEY:
main(API_KEY)
else:
print 'SKIP: Publish Stats to Datadog'
{
"unit.lms": "lms/*.py",
"unit.studio": "cms/*.py",
"unit.javascript": "*.js",
"unit.common_lib": "common/lib/*.py",
"unit.common_app": "common/djangoapps/*.py"
}
current_path=`pwd`
reports_path=$current_path/reports
dest_path=$HOME/results/$TDDIUM_SESSION_ID/session/
echo "Getting Quality Reports... "
pep8_rpt=$reports_path/diff_quality/diff_quality_pep8.html
pylint_rpt=$reports_path/diff_quality/diff_quality_pylint.html
cp -f $pep8_rpt $dest_path
cp -f $pylint_rpt $dest_path
echo "Reports can be found in "$dest_path
import tarfile
import os
import shutil
full_path = os.path.realpath(__file__)
source_dir = full_path.replace("scripts/post_worker.py", "reports/")
output_filename = full_path.replace("post_worker.py", "reports.tar.gz")
print "source dir:", source_dir
count = 0
# walk through every subdirectory & add the folder if it is not empty
with tarfile.open(output_filename, "w:gz") as tar:
for (path, dirs, files) in os.walk(source_dir):
if len(files) > 0:
print "tarring:", path
tar.add(path, arcname=os.path.basename(path))
count += 1
tar.close()
session_path = os.path.join(
os.environ['HOME'],
'results',
os.environ['TDDIUM_SESSION_ID'],
'session')
file_dest = os.path.join(session_path, 'reports.tar.gz')
# if the tar file is not empty, copy it to the proper place
if count > 0:
print 'copying tar file to:', file_dest
shutil.copyfile(output_filename, file_dest)
# finding if there is any screenshot or log file
print 'attaching failed screenshots and logs (if any)'
for (path, dirs, files) in os.walk('test_root/log'):
for filename in files:
if filename.find('png') != -1 or filename.find('log') != -1:
filepath = os.path.join(path, filename)
print 'copying file:', filepath
destpath = os.path.join(session_path, filename)
print 'destination:', destpath
shutil.copyfile(filepath, destpath)
print 'TDDIUM_SESSION_ID:', os.environ['TDDIUM_SESSION_ID']
mkdir -p jscover-dist && wget http://files.edx.org/testeng/JSCover-1.0.2.zip -P jscover-dist && unzip jscover-dist/JSCover-1.0.2.zip -d jscover-dist/ && cp jscover-dist/target/dist/JSCover-all.jar jscover-dist && export JSCOVER_JAR=$PWD/jscover-dist/JSCover-all.jar && paver test
echo '******************************************************'
echo 'Collecting Coverage...'
paver coverage
echo 'Coverage Collection Completed'
current_path=`pwd`
reports_path=$current_path/reports
dest_path=$HOME/results/$TDDIUM_SESSION_ID/session/
unit_combined_rpt=$reports_path/diff_coverage_combined.html
echo 'Copying '$unit_combined_rpt' to '$dest_path
cp -f $unit_combined_rpt $dest_path
echo '******************************************************'
tddium:
:firefox: '28.0'
:timeout: 3600
:timeout_hook: 900
:tool_config:
git:
:version: "1.8.5.5"
:hooks:
:pre_setup: "virtualenv $HOME/python-env && $HOME/python-env/bin/pip install -r requirements/edx/paver.txt && $HOME/python-env/bin/pip install -r requirements/edx/pre.txt && $HOME/python-env/bin/pip install -r requirements/edx/base.txt && $HOME/python-env/bin/pip install -r requirements/edx/github.txt && $HOME/python-env/bin/pip install -r requirements/edx/local.txt && $HOME/python-env/bin/pip install -r requirements/edx/post.txt"
:post_worker: 'python ./scripts/post_worker.py'
:cache:
:key_paths:
- requirements/edx/paver.txt
- requirements/edx/pre.txt
- requirements/edx/base.txt
- requirements/edx/github.txt
- requirements/edx/local.txt
- requirements/edx/post.txt
- package.json
:mongodb:
:version: "2.6.4"
:mysql:
:version: "5.5"
:python:
:python_version: "2.7"
:headless: true
:java:
:java_version: "java-7-openjdk"
:test_pattern: 'none'
:tests:
- :type: custom
:command: paver run_quality && bash ./scripts/post_quality_build.sh
:invocation: single
:output: exit-status
:report_files:
- "reports/diff_quality/diff_quality_pylint.html"
- :type: junit
:mode: basic
:command: bash ./scripts/run_ut.sh && python ./scripts/cov_merge.py unit && python ./scripts/metrics/publish.py
:invocation: single
:output: exit-status
:report_files:
- "reports/cms/nosetests.xml"
- "reports/common/lib/capa/nosetests.xml"
- "reports/common/lib/calc/nosetests.xml"
- "reports/common/lib/chem/nosetests.xml"
- "reports/common/lib/sandbox-packages/nosetests.xml"
- "reports/common/lib/symmath/nosetests.xml"
- "reports/common/lib/xmodule/nosetests.xml"
- "reports/lms/nosetests.xml"
- "reports/javascript/javascript_xunit.xml"
- :type: junit
:mode: basic
:command: paver test_acceptance -s lms --extra_args="-v 3 --tag shard_1"
:invocation: single
:output: exit-status
:report_files:
- "reports/acceptance/lms.xml"
- :type: junit
:mode: basic
:command: paver test_acceptance -s lms --extra_args="-v 3 --tag shard_2"
:invocation: single
:output: exit-status
:report_files:
- "reports/acceptance/lms.xml"
- :type: junit
:mode: basic
:command: paver test_acceptance -s cms --extra_args="-v 3 --tag shard_1"
:invocation: single
:output: exit-status
:report_files:
- "reports/acceptance/cms.xml"
- :type: junit
:mode: basic
:command: paver test_acceptance -s cms --extra_args="-v 3 --tag shard_2 --tag shard_3"
:invocation: single
:output: exit-status
:report_files:
- "reports/acceptance/cms.xml"
- :type: junit
:mode: basic
:command: paver test_bokchoy --extra_args="-a shard_1" && bash ./scripts/coverage.sh shard1
:invocation: single
:output: exit-status
:report_files:
- "reports/bok_choy/xunit.xml"
- :type: junit
:mode: basic
:command: paver test_bokchoy --extra_args="-a shard_2" && bash ./scripts/coverage.sh shard2
:invocation: single
:output: exit-status
:report_files:
- "reports/bok_choy/xunit.xml"
- :type: junit
:mode: basic
:command: paver test_bokchoy --extra_args="-a shard_1=False,shard_2=False" && bash ./scripts/coverage.sh shard3
:invocation: single
:output: exit-status
:report_files:
- "reports/bok_choy/xunit.xml"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment