cov_merge.py 6.04 KB
Newer Older
Muhammad Ammar committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
import os
import sys
from textwrap import dedent
from bs4 import BeautifulSoup
import multiprocessing


FIRST = dedent(
    '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
    <html>
    <head>
    <meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
    <title>CMS Python Test Coverage Report</title>
    <link rel='stylesheet' href='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/style.css' type='text/css'>

    <script type='text/javascript' src='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/jquery.min.js'></script>
    <script type='text/javascript' src='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/jquery.tablesorter.min.js'></script>
    <script type='text/javascript' src='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/jquery.hotkeys.js'></script>
    <script type='text/javascript' src='https://googledrive.com/host/0B0bNP036USIkLWdyRFFlSDNzZHc/coverage_html.js'></script>
    <script type='text/javascript' charset='utf-8'>
        jQuery(document).ready(coverage.index_ready);
    </script>
    <style>
        .hide-content {
            display: none;
        }
    </style>
    </head>''')


LAST = dedent(
    '''<script type="text/javascript">
    String.prototype.replaceAll = function (find, replace) {
        var str = this;
        return str.replace(new RegExp(find, 'g'), replace);
    };

    $('.file a').click(function(event) {
        event.preventDefault();
        var id = "#" + event.currentTarget.innerHTML.replaceAll('/', '_');
        if (typeof window.last_source_file_id !== 'undefined'){
            $(window.last_source_file_id).addClass( "hide-content" );
        }
        window.last_source_file_id = id;
        $(id).removeClass( "hide-content" );
        location.href = id;
    });
    </script>

    </body>
    </html>''')


class ReportMerge(object):
    """Merge multiple html coverage reports"""

    DESTINATION = os.path.join(os.environ['HOME'], 'results', os.environ['TDDIUM_SESSION_ID'], 'session')

    def __init__(self):
        self.reports_dir = os.path.realpath(__file__).replace("scripts/cov_merge.py", "reports/")

    def _files(self, cover_path):
        """
        Return list of file paths in `cover_path`. `cover_path` will be something like */reports/cms/cover
        """
        include = lambda f: f.endswith('.html') and os.path.basename(f) != 'index.html'
        return [os.path.join(cover_path, f) for f in os.listdir(cover_path) if include(f)]

    def merge(self, modules, output_file=None):
        """
        Merge reports for `modules`

        Arguments:
            output_file (str): name of output report file -- only used for bok_choy reports

        """
        for module in modules:
            for (path, _, _) in os.walk(os.path.join(self.reports_dir, module)):
                if os.path.basename(path) == 'cover':
                    self.merge_report(path, output_file)

    def merge_report(self, path, output_file):
        """
        Collect multiple parts of a report and join them to create a single report.

        Arguments:
            path (str): path where multiple files are located to be merged
            output_file (str): name of output report file -- only used for bok_choy reports

        """
        content = list()

        # Extract total coverage percentage and file links table
        index_html = os.path.join(path, 'index.html')
        with open(index_html) as index_file:
            soup = BeautifulSoup(index_file)
            total_percentage = soup.find('div', id='header')
            total_percentage.find('img').decompose()
            index_table = soup.find('div', id='index')

        # Extract file names
        files = [os.path.join(path, name['href']) for name in index_table.find_all('a')]
        if not files:
            return

        print 'Merging Report for {}'.format(path)

        # Collect different parts of html report
        content.append(FIRST)
        content.append('<body>')
        content.append(str(total_percentage))
        content.append(str(index_table))
        for html in files:
            content.append(self._html_content(html))

        content.append(LAST)

        if output_file:
            report_path = os.path.join(self.DESTINATION, output_file)
        else:
            report_filename = path.split('reports/')[1].split('/cover')[0].replace('/', '_')
122
            report_path = os.path.join(self.DESTINATION, report_filename + '_coverage.html')
Muhammad Ammar committed
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172

        # Write everything to single report file
        with open(report_path, 'w') as report_file:
            report_file.write('\n'.join(content))

        print 'Report Merged for {}'.format(path)

    def _html_content(self, html):
        """
        Returns html tags of interest for file specified by `html`
        """
        # Create id for each link in file links table
        navigate_div_id = os.path.basename(html).split('.')[0].replace('/', '_')
        navigate_div_start = "<div id='{}' class='hide-content'>\n".format(navigate_div_id)
        navigate_div_close = "\n</div>".format(navigate_div_id)

        content = list()
        content.append(navigate_div_start)

        with open(html) as html_file:
            soup = BeautifulSoup(html_file)
            header = soup.find('div', id='header')
            header.find('img').decompose()
            source = soup.find('div', id='source')
            source_img = source.find('img')
            if source_img:
                source_img.decompose()

            content.append(str(header))
            content.append(str(source))

        content.append(navigate_div_close)

        return '\n'.join(content)

if __name__ == '__main__':
    args = sys.argv

    if 'bok_choy' in args[1]:
        paths = ['bok_choy']
        rm = ReportMerge()
        rm.merge(paths, output_file=args[2])
    elif 'unit' in args[1]:
        paths = ['common', 'cms', 'lms']
        for pth in paths:
            rm = ReportMerge()
            mp = multiprocessing.Process(target=rm.merge, args=([pth],))
            mp.start()
    else:
        print 'Unsupported Test Suit'