Commit 64e49168 by Gabriel Falcão

Merge pull request #359 from infoxchange/subunit-output

Subunit output
parents 4454a23f 855dc4e8
......@@ -220,8 +220,39 @@ that it's colorful.
Continuous-Integration_ server, like Hudson_. You may choose the
levels 1, 2 or 3, so that the output won't look messy.
***************************************
integrating with continuous integration
***************************************
Lettuce can use Subunit_ to output test results.
Subunit is a stream format that can be multiplexed, viewed in real time or
converted to many different formats (such as xUnit/jUnit XML format).
.. highlight:: bash
::
user@machine:~/projects/myproj$ lettuce --with-subunit > output.log
user@machine:~/projects/myproj$ subunit2junitxml < subunit.bin > lettucetests.xml
The `--subunit-file` flag can be used to specify a filename other than
`subunit.bin` this is important if you're combining test runs.
including coverage
==================
You can also get test coverage information using the `coverage` package.
.. highlight:: bash
::
user@machine:~/projects/myproj$ coverage run lettuce --with-subunit
user@machine:~/projects/myproj$ coverage xml
***********************
getting help from shell
=======================
***********************
.. highlight:: bash
......@@ -234,3 +265,4 @@ Shows all the options described here.
.. _Continuous-Integration: http://www.martinfowler.com/articles/continuousIntegration.html
.. _Hudson: http://hudson-ci.org/
.. _Subunit: https://launchpad.net/subunit
......@@ -43,6 +43,7 @@ from lettuce.registry import CALLBACK_REGISTRY
from lettuce.exceptions import StepLoadingError
from lettuce.plugins import (
xunit_output,
subunit_output,
autopdb
)
from lettuce import fs
......@@ -87,8 +88,9 @@ class Runner(object):
features and step definitions on there.
"""
def __init__(self, base_path, scenarios=None, verbosity=0, random=False,
enable_xunit=False, xunit_filename=None, tags=None,
failfast=False, auto_pdb=False):
enable_xunit=False, xunit_filename=None,
enable_subunit=False, subunit_filename=None,
tags=None, failfast=False, auto_pdb=False):
""" lettuce.Runner will try to find a terrain.py file and
import it from within `base_path`
"""
......@@ -126,6 +128,9 @@ class Runner(object):
if enable_xunit:
xunit_output.enable(filename=xunit_filename)
if enable_subunit:
subunit_output.enable(filename=subunit_filename)
reload(output)
self.output = output
......
......@@ -66,6 +66,18 @@ def main(args=sys.argv[1:]):
help='Write JUnit XML to this file. Defaults to '
'lettucetests.xml')
parser.add_option("--with-subunit",
dest="enable_subunit",
action="store_true",
default=False,
help='Output Subunit test results to a file')
parser.add_option("--subunit-file",
dest="subunit_filename",
default=None,
help='Write Subunit data to this file. Defaults to '
'subunit.bin')
parser.add_option("--failfast",
dest="failfast",
default=False,
......@@ -98,6 +110,8 @@ def main(args=sys.argv[1:]):
random=options.random,
enable_xunit=options.enable_xunit,
xunit_filename=options.xunit_file,
enable_subunit=options.enable_subunit,
subunit_filename=options.subunit_filename,
failfast=options.failfast,
auto_pdb=options.auto_pdb,
tags=tags,
......
......@@ -458,6 +458,8 @@ class Step(object):
if run_callbacks:
call_hook('before_each', 'step', step)
call_hook('before_output', 'step', step)
if not steps_failed and not steps_undefined:
step.run(ignore_case)
steps_passed.append(step)
......@@ -473,6 +475,9 @@ class Step(object):
finally:
all_steps.append(step)
call_hook('after_output', 'step', step)
if run_callbacks:
call_hook('after_each', 'step', step)
......
......@@ -79,6 +79,18 @@ class Command(BaseCommand):
make_option('--xunit-file', action='store', dest='xunit_file', default=None,
help='Write JUnit XML to this file. Defaults to lettucetests.xml'),
make_option('--with-subunit',
action='store_true',
dest='enable_subunit',
default=False,
help='Output Subunit test results to a file'),
make_option('--subunit-file',
action='store',
dest='subunit_file',
default=None,
help='Write Subunit to this file. Defaults to subunit.bin'),
make_option("--failfast", dest="failfast", default=False,
action="store_true", help='Stop running in the first failure'),
......@@ -161,7 +173,9 @@ class Command(BaseCommand):
runner = Runner(path, options.get('scenarios'), verbosity,
enable_xunit=options.get('enable_xunit'),
enable_subunit=options.get('enable_subunit'),
xunit_filename=options.get('xunit_file'),
subunit_filename=options.get('subunit_file'),
tags=tags, failfast=failfast, auto_pdb=auto_pdb)
result = runner.run()
......@@ -181,7 +195,7 @@ class Command(BaseCommand):
finally:
registry.call_hook('after', 'harvest', results)
if test_database:
self._testrunner.teardown_databases(self._old_db_config)
......
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERsteps.pyCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import sys
from StringIO import StringIO
from lettuce.terrain import before, after
from subunit.v2 import StreamResultToBytes
from subunit.iso8601 import Utc
def open_file(filename):
"""
open a subunit file
this is not a context manager because it is used asynchronously by
hooks
out of the scope of enable() because we want to patch it in our tests
"""
filename = filename or 'subunit.bin'
return open(filename, 'wb')
def close_file(file_):
"""
"""
file_.close()
def enable(filename=None):
file_ = open_file(filename)
streamresult = StreamResultToBytes(file_)
streamresult.startTestRun()
real_stdout = sys.stdout
real_stderr = sys.stderr
@before.each_scenario
def before_scenario(scenario):
# create redirects for stdout and stderr
scenario.stdout = StringIO()
scenario.stderr = StringIO()
try:
test_tags = scenario.tags
except AttributeError:
test_tags = ()
streamresult.status(test_id=get_test_id(scenario),
test_status='inprogress',
test_tags=test_tags,
timestamp=now())
@before.step_output
def capture_output(step):
# only consider steps for background
if not step.scenario:
return
sys.stdout = step.scenario.stdout
sys.stderr = step.scenario.stderr
@after.step_output
def uncapture_output(step):
sys.stdout = real_stdout
sys.stderr = real_stderr
@after.each_scenario
def after_scenario(scenario):
streamresult.status(test_id=get_test_id(scenario),
file_name='stdout',
file_bytes=scenario.stdout.getvalue().encode('utf-8'),
mime_type='text/plain; charset=utf8',
eof=True)
streamresult.status(test_id=get_test_id(scenario),
file_name='stderr',
file_bytes=scenario.stderr.getvalue().encode('utf-8'),
mime_type='text/plain; charset=utf8',
eof=True)
if scenario.passed:
streamresult.status(test_id=get_test_id(scenario),
test_status='success',
timestamp=now())
else:
streamresult.status(test_id=get_test_id(scenario),
test_status='fail',
timestamp=now())
@after.each_step
def after_step(step):
# only consider steps for background
if not step.scenario:
return
test_id = get_test_id(step.scenario)
if step.passed:
marker = u'✔'
elif not step.defined_at:
marker = u'?'
elif step.failed:
marker = u'❌'
try:
streamresult.status(test_id=test_id,
file_name='traceback',
file_bytes=step.why.traceback.encode('utf-8'),
mime_type='text/plain; charset=utf8')
except AttributeError:
pass
elif not step.ran:
marker = u' '
else:
raise AssertionError("Internal error")
steps = u'{marker} {sentence}\n'.format(
marker=marker,
sentence=step.sentence)
streamresult.status(test_id=test_id,
file_name='steps',
file_bytes=steps.encode('utf-8'),
mime_type='text/plain; charset=utf8')
@after.all
def after_all(total):
streamresult.stopTestRun()
close_file(file_)
def get_test_id(scenario):
try:
return '{feature}: {scenario}'.format(
feature=scenario.feature.name,
scenario=scenario.name)
except AttributeError:
return '{feature}: Background'.format(
feature=scenario.feature.name)
def now():
"""
A timestamp suitable for subunit
"""
return datetime.datetime.now(tz=Utc())
......@@ -93,6 +93,8 @@ CALLBACK_REGISTRY = CallbackDict(
'step': {
'before_each': [],
'after_each': [],
'before_output': [],
'after_output': [],
},
'scenario': {
'before_each': [],
......
......@@ -53,6 +53,7 @@ class Main(object):
for name, where, when in (
('all', 'all', '%(0)s'),
('each_step', 'step', '%(0)s_each'),
('step_output', 'step', '%(0)s_output'),
('each_scenario', 'scenario', '%(0)s_each'),
('each_background', 'background', '%(0)s_each'),
('each_feature', 'feature', '%(0)s_each'),
......
......@@ -10,6 +10,7 @@ lxml
mock==1.0b1
mox==0.5.3
nose==1.1.2
python-subunit==0.0.13
sure==1.1.7
tornado==2.3
tox==1.4.2
......
......@@ -29,7 +29,7 @@ def get_packages():
return packages
required_modules = ['sure', 'fuzzywuzzy']
required_modules = ['sure', 'fuzzywuzzy', 'python-subunit']
if sys.version_info[:2] < (2, 6):
required_modules.append('multiprocessing')
......
# -*- coding: utf-8 -*-
from lettuce import step
@step(u'Given I do nothing')
def given_i_do_nothing(step):
pass
@step(u'Then I see that the test passes')
def then_i_see_that_the_test_passes(step):
pass
@step(u'Then I should not see "([^"]+)"')
def then_should_not_see(step, email):
pass
@step(u'Given some email addresses')
def given_email_addresses(step):
pass
Feature: ignore slow steps
As a python developer
I want to run only the fast tests
So that I can be really happy
@slow-ish
Scenario: this one is kinda slow
Given I do nothing
Then I see that the test passes
@fast-ish
Scenario: this one is fast!!
Given I do nothing
Then I see that the test passes
Scenario: this scenario is not tagged
Given I do nothing
Then I should not see "harvey@nom.cat"
Scenario: this scenario is also not tagged
Given some email addresses
| email |
| harvey@nom.cat |
Then I see that the test passes
......@@ -2,9 +2,9 @@
from lettuce import step
@step(u'my dæmi that passes')
def given_my_daemi_that_passes(step, d):
step.given(u'my "INNSKRÁ" that blows a exception')
def given_my_daemi_that_passes(step):
pass
@step('my "(.*)" that blows an exception')
def given_my_daemi_that_blows_a_exception(step, name):
assert False
Feature: Writes to console
As a test developer
I want to capture my console output
So that I can get the debugging my system wrote
Scenario: Write to stdout
When I write to stdout
Then I am happy
Scenario: write to stderr
When I write to stderr
Then I am happy
import sys
from lettuce import step
@step('When I write to stdout')
def write_stdout(step):
print >> sys.stdout, "Badger"
@step('When I write to stderr')
def write_stderr(step):
print >> sys.stderr, "Mushroom"
@step('Then I am happy')
def happy(step):
pass
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from cStringIO import StringIO
from nose.tools import with_setup, assert_equal
from subunit.v2 import ByteStreamToStreamResult
from testtools import StreamToDict
from lettuce import Runner, registry
from lettuce.plugins import subunit_output
from tests.asserts import prepare_stdout
from tests.functional.test_runner import feature_name
class Includes(object):
def __init__(self, d):
self.d = d
def __eq__(self, a):
return all((v == a[k] for k, v in self.d.iteritems()))
def __repr__(self):
return '{klass}({d})'.format(
klass=self.__class__.__name__,
d=self.d)
class Keys(object):
def __init__(self, *keys):
self.keys = keys
def __eq__(self, a):
return set(a.keys()) == set(self.keys)
class ContentContains(object):
def __init__(self, text):
self.text = text
def __eq__(self, a):
return self.text in a.as_text()
def __repr__(self):
return '{klass}({text})'.format(
klass=self.__class__.__name__,
text=self.text)
class State(object):
expect = []
def handle_dict(self, test):
try:
d = self.expect.pop(0)
except IndexError:
raise AssertionError("Unexpected {test}".format(test=test))
assert_equal(d, test)
def close_file(self, file_):
"""
Close and check the file
"""
file_.seek(0)
case = ByteStreamToStreamResult(file_)
result = StreamToDict(self.handle_dict)
result.startTestRun()
case.run(result)
result.stopTestRun()
file_.close()
def setup(self):
"""
Set up the for the test case
"""
prepare_stdout()
output = StringIO()
self.patch = (subunit_output.open_file, subunit_output.close_file)
subunit_output.open_file = lambda f: output
subunit_output.close_file = self.close_file
def teardown(self):
"""
Tear down the test case
"""
subunit_output.open_file, subunit_output.close_file = self.patch
assert_equal(len(self.expect), 0, "Expected results left")
registry.clear()
state = State()
@with_setup(state.setup, state.teardown)
def test_subunit_output_with_no_errors():
"""
Test Subunit output with no errors
"""
state.expect = [
Includes({
'id': 'one commented scenario: Do nothing',
'status': 'success',
'details': Keys('stdout', 'stderr', 'steps'),
}),
]
runner = Runner(feature_name('commented_feature'), enable_subunit=True)
runner.run()
@with_setup(state.setup, state.teardown)
def test_subunit_output_with_one_error():
"""
Test Subunit output with one error
"""
state.expect = [
Includes({
'status': 'success',
'details': Keys('stdout', 'stderr', 'steps'),
}),
Includes({
'status': 'fail',
'details': Keys('stdout', 'stderr', 'traceback', 'steps'),
}),
]
runner = Runner(feature_name('error_traceback'), enable_subunit=True)
runner.run()
@with_setup(state.setup, state.teardown)
def test_subunit_output_with_tags():
"""
Test Subunit output with tags
"""
state.expect = [
Includes({
'status': 'success',
'tags': set(['slow-ish']),
}),
Includes({
'status': 'success',
'tags': set(['fast-ish']),
}),
Includes({
'status': 'success',
'tags': set(),
}),
Includes({
'status': 'success',
'tags': set(),
}),
]
runner = Runner(feature_name('tagged_features'), enable_subunit=True)
runner.run()
@with_setup(state.setup, state.teardown)
def test_subunit_output_unicode():
"""
Test Subunit output with unicode traceback
"""
state.expect = [
Includes({
'status': 'success',
}),
Includes({
'status': 'fail',
'details': Includes({
'traceback': ContentContains('given_my_daemi_that_blows_a_exception'),
}),
}),
]
runner = Runner(feature_name('unicode_traceback'), enable_subunit=True)
runner.run()
@with_setup(state.setup, state.teardown)
def test_subunit_output_console():
"""
Test Subunit output to console
"""
state.expect = [
Includes({
'status': 'success',
'details': Includes({
'stdout': ContentContains('Badger'),
}),
}),
Includes({
'status': 'success',
'details': Includes({
'stderr': ContentContains('Mushroom'),
}),
}),
]
runner = Runner(feature_name('writes_to_console'), enable_subunit=True)
runner.run()
@with_setup(state.setup, state.teardown)
def test_subunit_output_undefined_steps():
"""
Test Subunit output with undefined steps
"""
state.expect = [
Includes({
'status': 'fail',
'details': Includes({
'steps': ContentContains('? When this test step is undefined\n'),
}),
}),
Includes({
'status': 'fail',
'details': Includes({
'steps': ContentContains('? When this test step is undefined\n'),
}),
}),
]
runner = Runner(feature_name('undefined_steps'), enable_subunit=True)
runner.run()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment