Commit 7713bed4 by Jesse Zoldak

Clean up unused CI scripts

Most of these were originally created under TE-465 for
aggregating results in Solano. See PR #5672.
parent 205f3417
#!/usr/bin/env bash
set -e
###############################################################################
#
# edx-acceptance.sh
#
# Execute acceptance tests for edx-platform.
#
# This script can be called from a Jenkins
# job that defines these environment variables:
#
# `TEST_SUITE` defines which acceptance test suite to run
# Possible values are:
#
# - "lms": Run the acceptance (Selenium) tests for the LMS
# - "cms": Run the acceptance (Selenium) tests for Studio
#
# `FEATURE_PATH` is the path to the lettuce .feature file
# containing the tests to run. If empty, run all the tests.
#
# Other assumptions:
#
# - The edx-platform git repository is checked out by the Jenkins git plugin.
#
# - Jenkins logs in as user "jenkins"
#
# - The Jenkins file system root is "/home/jenkins"
#
# - An init script creates a virtualenv at "/home/jenkins/edx-venv"
# with some requirements pre-installed (such as scipy)
#
# Jenkins worker setup:
# See the edx/configuration repo for Jenkins worker provisioning scripts.
# The provisioning scripts install requirements that this script depends on!
#
###############################################################################
source $HOME/jenkins_env
# Clean up previous builds
git clean -qxfd
# Clear the mongo database
# Note that this prevents us from running jobs in parallel on a single worker.
mongo --quiet --eval 'db.getMongo().getDBNames().forEach(function(i){db.getSiblingDB(i).dropDatabase()})'
# Ensure we have fetched origin/master
# Some of the reporting tools compare the checked out branch to origin/master;
# depending on how the GitHub plugin refspec is configured, this may
# not already be fetched.
git fetch origin master:refs/remotes/origin/master
# Reset the jenkins worker's ruby environment back to
# the state it was in when the instance was spun up.
if [ -e $HOME/edx-rbenv_clean.tar.gz ]; then
rm -rf $HOME/.rbenv
tar -C $HOME -xf $HOME/edx-rbenv_clean.tar.gz
fi
# Bootstrap Ruby requirements so we can run the tests
bundle install
# Ensure the Ruby environment contains no stray gems
bundle clean --force
# Reset the jenkins worker's virtualenv back to the
# state it was in when the instance was spun up.
if [ -e $HOME/edx-venv_clean.tar.gz ]; then
rm -rf $HOME/edx-venv
tar -C $HOME -xf $HOME/edx-venv_clean.tar.gz
fi
# Activate the Python virtualenv
source $HOME/edx-venv/bin/activate
paver test_acceptance -s ${TEST_SUITE} --extra_args="-v 3 ${FEATURE_PATH}"
import os
import sys
from textwrap import dedent
from bs4 import BeautifulSoup
import multiprocessing
FIRST = dedent(
'''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
<title>CMS Python Test Coverage Report</title>
<link rel='stylesheet' href='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/style.css' type='text/css'>
<script type='text/javascript' src='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/jquery.min.js'></script>
<script type='text/javascript' src='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/jquery.tablesorter.min.js'></script>
<script type='text/javascript' src='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/jquery.hotkeys.js'></script>
<script type='text/javascript' src='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/coverage_html.js'></script>
<script type='text/javascript' charset='utf-8'>
jQuery(document).ready(coverage.index_ready);
</script>
<style>
.hide-content {
display: none;
}
</style>
</head>''')
LAST = dedent(
'''<script type="text/javascript">
String.prototype.replaceAll = function (find, replace) {
var str = this;
return str.replace(new RegExp(find, 'g'), replace);
};
$('.file a').click(function(event) {
event.preventDefault();
var id = "#" + event.currentTarget.innerHTML.replaceAll('/', '_');
if (typeof window.last_source_file_id !== 'undefined'){
$(window.last_source_file_id).addClass( "hide-content" );
}
window.last_source_file_id = id;
$(id).removeClass( "hide-content" );
location.href = id;
});
</script>
</body>
</html>''')
class ReportMerge(object):
"""Merge multiple html coverage reports"""
DESTINATION = os.path.join(os.environ['HOME'], 'results', os.environ['TDDIUM_SESSION_ID'], 'session')
def __init__(self):
self.reports_dir = os.path.realpath(__file__).replace("scripts/cov_merge.py", "reports/")
def _files(self, cover_path):
"""
Return list of file paths in `cover_path`. `cover_path` will be something like */reports/cms/cover
"""
include = lambda f: f.endswith('.html') and os.path.basename(f) != 'index.html'
return [os.path.join(cover_path, f) for f in os.listdir(cover_path) if include(f)]
def merge(self, modules, output_file=None):
"""
Merge reports for `modules`
Arguments:
output_file (str): name of output report file -- only used for bok_choy reports
"""
for module in modules:
for (path, _, _) in os.walk(os.path.join(self.reports_dir, module)):
if os.path.basename(path) == 'cover':
self.merge_report(path, output_file)
def merge_report(self, path, output_file):
"""
Collect multiple parts of a report and join them to create a single report.
Arguments:
path (str): path where multiple files are located to be merged
output_file (str): name of output report file -- only used for bok_choy reports
"""
content = list()
# Extract total coverage percentage and file links table
index_html = os.path.join(path, 'index.html')
with open(index_html) as index_file:
soup = BeautifulSoup(index_file)
total_percentage = soup.find('div', id='header')
total_percentage.find('img').decompose()
index_table = soup.find('div', id='index')
# Extract file names
files = [os.path.join(path, name['href']) for name in index_table.find_all('a')]
if not files:
return
print 'Merging Report for {}'.format(path)
# Collect different parts of html report
content.append(FIRST)
content.append('<body>')
content.append(str(total_percentage))
content.append(str(index_table))
for html in files:
content.append(self._html_content(html))
content.append(LAST)
if output_file:
report_path = os.path.join(self.DESTINATION, output_file)
else:
report_filename = path.split('reports/')[1].split('/cover')[0].replace('/', '_')
report_path = os.path.join(self.DESTINATION, report_filename + '_coverage.html')
# Write everything to single report file
with open(report_path, 'w') as report_file:
report_file.write('\n'.join(content))
print 'Report Merged for {}'.format(path)
def _html_content(self, html):
"""
Returns html tags of interest for file specified by `html`
"""
# Create id for each link in file links table
navigate_div_id = os.path.basename(html).split('.')[0].replace('/', '_')
navigate_div_start = "<div id='{}' class='hide-content'>\n".format(navigate_div_id)
navigate_div_close = "\n</div>".format(navigate_div_id)
content = list()
content.append(navigate_div_start)
with open(html) as html_file:
soup = BeautifulSoup(html_file)
header = soup.find('div', id='header')
header.find('img').decompose()
source = soup.find('div', id='source')
source_img = source.find('img')
if source_img:
source_img.decompose()
content.append(str(header))
content.append(str(source))
content.append(navigate_div_close)
return '\n'.join(content)
if __name__ == '__main__':
args = sys.argv
if 'bok_choy' in args[1]:
paths = ['bok_choy']
rm = ReportMerge()
rm.merge(paths, output_file=args[2])
elif 'unit' in args[1]:
paths = ['common', 'cms', 'lms']
for pth in paths:
rm = ReportMerge()
mp = multiprocessing.Process(target=rm.merge, args=([pth],))
mp.start()
else:
print 'Unsupported Test Suit'
#!/bin/bash
case $1 in
"shard1")
echo "Collecting Coverage for Bok-Choy Shard1"
paver bokchoy_coverage
echo "Merging Coverage into a Single HTML File for Bok-Choy Shard1"
python ./scripts/cov_merge.py bok_choy bok_choy_shard1_coverage.html
;;
"shard2")
echo "Collecting Coverage for Bok-Choy Shard2"
paver bokchoy_coverage
echo "Merging Coverage into a Single HTML File for Bok-Choy Shard2"
python ./scripts/cov_merge.py bok_choy bok_choy_shard2_coverage.html
;;
"shard3")
echo "Collecting Coverage for Bok-Choy Shard3"
paver bokchoy_coverage
echo "Merging Coverage into a Single HTML File for Bok-Choy Shard3"
python ./scripts/cov_merge.py bok_choy bok_choy_shard3_coverage.html
;;
*)
echo "Invalid Bok-Choy Shard Value!";;
esac
{
"acceptance.lms": "lms/*.py",
"acceptance.studio": "cms/*.py",
"acceptance.common_lib": "common/lib/*.py",
"acceptance.common_app": "common/djangoapps/*.py"
}
"""
Aggregate coverage data from XML reports.
groups.json is a JSON-encoded dict mapping group names to source file glob patterns:
{
"group_1": "group1/*.py",
"group_2": "group2/*.py"
}
This would calculate line coverage percentages for source files in each group,
and send those metrics to DataDog:
testeng.coverage.group_1 ==> 89.123
testeng.coverage.group_2 ==> 45.523
The tool uses the *union* of covered lines across each of the input
coverage XML reports. If a line is covered *anywhere*, it's considered covered.
"""
import fnmatch
import json
from lxml import etree
class CoverageParseError(Exception):
"""
Error occurred while parsing a coverage report.
"""
pass
class CoverageData(object):
"""
Aggregate coverage reports.
"""
def __init__(self):
"""
Initialize the coverage data, which has no information until you add a report.
"""
self._coverage = dict()
def add_report(self, report_str):
"""
Add the coverage information from the XML `report_str` to the aggregate data.
Raises a `CoverageParseError` if the report XML is not a valid coverage report.
"""
try:
root = etree.fromstring(report_str)
except etree.XMLSyntaxError:
raise CoverageParseError("Warning: Could not parse report as XML")
if root is not None:
# Get all classes (source files) in the report
for class_node in root.xpath('//class'):
class_filename = class_node.get('filename')
if class_filename is None:
continue
# If we haven't seen this source file before, create a dict
# to store its coverage information.
if class_filename not in self._coverage:
self._coverage[class_filename] = dict()
# Store info for each line in the source file
for line in class_node.xpath('lines/line'):
hits = line.get('hits')
line_num = line.get('number')
# Ignore lines that do not have the right attributes
if line_num is not None:
try:
line_num = int(line_num)
hits = int(hits)
except ValueError:
pass
else:
# If any report says the line is covered, set it to covered
if hits > 0:
self._coverage[class_filename][line_num] = 1
# Otherwise if the line is not already covered, set it to uncovered
elif line_num not in self._coverage[class_filename]:
self._coverage[class_filename][line_num] = 0
def coverage(self, source_pattern="*"):
"""
Calculate line coverage percentage (float) for source files that match
`source_pattern` (a fnmatch-style glob pattern).
If coverage could not be calculated (e.g. because no source files match
the pattern), returns None.
"""
num_covered = 0
total = 0
# Find source files that match the pattern then calculate total lines and number covered
for filename in fnmatch.filter(self._coverage.keys(), source_pattern):
num_covered += sum(self._coverage[filename].values())
total += len(self._coverage[filename])
# Calculate the percentage
if total > 0:
return float(num_covered) / float(total) * 100.0
else:
print u"Warning: No lines found in source files that match {}".format(source_pattern)
return None
@staticmethod
def _parse_report(report_path):
"""
Parse the coverage report as XML and return the resulting tree.
If the report could not be found or parsed, return None.
"""
try:
return etree.parse(report_path)
except IOError:
print u"Warning: Could not open report at '{path}'".format(path=report_path)
return None
except ValueError:
print u"Warning: Could not parse report at '{path}' as XML".format(path=report_path)
return None
class CoverageMetrics(object):
"""
Collect Coverage Reports for DataDog.
"""
def __init__(self, group_json_path, report_paths):
self._group_json_path = group_json_path
self._report_paths = report_paths
def coverage_metrics(self):
"""
Find, parse, and create coverage metrics to be sent to DataDog.
"""
print "Loading group definitions..."
group_dict = self.load_group_defs(self._group_json_path)
print "Parsing reports..."
metrics = self.parse_reports(self._report_paths)
print "Creating metrics..."
stats = self.create_metrics(metrics, group_dict)
print "Done."
return stats
@staticmethod
def load_group_defs(group_json_path):
"""
Load the dictionary mapping group names to source file patterns
from the file located at `group_json_path`.
Exits with an error message if the groups could not be parsed.
"""
try:
with open(group_json_path) as json_file:
return json.load(json_file)
except IOError:
print u"Could not open group definition file at '{}'".format(group_json_path)
raise
except ValueError:
print u"Could not parse group definitions in '{}'".format(group_json_path)
raise
@staticmethod
def parse_reports(report_paths):
"""
Parses each coverage report in `report_paths` and returns
a `CoverageData` object containing the aggregate coverage information.
"""
data = CoverageData()
for path in report_paths:
try:
with open(path) as report_file:
data.add_report(report_file.read())
except IOError:
print u"Warning: could not open {}".format(path)
except CoverageParseError:
print u"Warning: could not parse {} as an XML coverage report".format(path)
return data
@staticmethod
def create_metrics(data, groups):
"""
Given a `CoverageData` object, create coverage percentages for each group.
`groups` is a dict mapping aggregate group names to source file patterns.
Group names are used in the name of the metric sent to DataDog.
"""
metrics = {}
for group_name, pattern in groups.iteritems():
metric = 'test_eng.coverage.{group}'.format(group=group_name.replace(' ', '_'))
percent = data.coverage(pattern)
if percent is not None:
print u"Sending {} ==> {}%".format(metric, percent)
metrics[metric] = percent
return metrics
"""
Publish Build Stats.
"""
import os
import subprocess
from dogapi import dog_http_api
from coverage_metrics import CoverageMetrics
class PublishStats(object):
"""
Publish stats to DataDog.
"""
def __init__(self, api_key):
dog_http_api.api_key = api_key
@staticmethod
def report_metrics(metrics):
"""
Send metrics to DataDog.
Arguments:
metrics (dict): data to publish
"""
for key, value in metrics.iteritems():
print u"Sending {} ==> {}%".format(key, value)
dog_http_api.metric(key, value)
def main(api_key):
"""
Send Stats for everything to DataDog.
"""
dir_path = os.path.dirname(os.path.relpath(__file__))
unit_reports_cmd = ['find', 'reports', '-name', '"coverage.xml"']
unit_report_paths = subprocess.check_output(unit_reports_cmd)
cov_metrics = CoverageMetrics(os.path.join(dir_path, 'unit_test_groups.json'), unit_report_paths)
coverage_metrics = cov_metrics.coverage_metrics()
# Publish Coverage Stats to DataDog
PublishStats(api_key).report_metrics(coverage_metrics)
if __name__ == "__main__":
API_KEY = os.environ.get('DATADOG_API_KEY')
if API_KEY:
main(API_KEY)
else:
print 'SKIP: Publish Stats to Datadog'
{
"unit.lms": "lms/*.py",
"unit.studio": "cms/*.py",
"unit.javascript": "*.js",
"unit.common_lib": "common/lib/*.py",
"unit.common_app": "common/djangoapps/*.py"
}
#!/bin/sh
EXIT=0
store_exit_code() {
code=$?
if [ ${code} -ne 0 ]
then
EXIT=${code}
fi
}
echo 'Configuring jscover...'
mkdir -p jscover-dist && wget http://files.edx.org/testeng/JSCover-1.0.2.zip -P jscover-dist && unzip jscover-dist/JSCover-1.0.2.zip -d jscover-dist/ && cp jscover-dist/target/dist/JSCover-all.jar jscover-dist && export JSCOVER_JAR=$PWD/jscover-dist/JSCover-all.jar
store_exit_code
echo 'jscover configured'
paver test
store_exit_code
echo 'Collecting Coverage...'
paver coverage
store_exit_code
echo 'Coverage Collection Completed'
current_path=`pwd`
reports_path=${current_path}/reports
dest_path=${HOME}/results/${TDDIUM_SESSION_ID}/session/
unit_combined_rpt=${reports_path}/diff_coverage_combined.html
echo 'Copying '${unit_combined_rpt}' to '${dest_path}
cp -f ${unit_combined_rpt} ${dest_path}
store_exit_code
echo 'Copied '${unit_combined_rpt}
echo 'Merging unit coverage reports...'
python ./scripts/cov_merge.py unit && python ./scripts/metrics/publish.py
store_exit_code
echo 'Unit coverage reports merged'
exit ${EXIT}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment