Commit 2d19136b by Brian Beggs Committed by GitHub

Merge pull request #16009 from edx/jmbowman/pytest_unit_tests

PLAT-1677 Switch to pytest for unit tests
parents c1bdc75c ca97e946
...@@ -17,6 +17,7 @@ omit = ...@@ -17,6 +17,7 @@ omit =
cms/djangoapps/contentstore/views/dev.py cms/djangoapps/contentstore/views/dev.py
cms/djangoapps/*/migrations/* cms/djangoapps/*/migrations/*
cms/djangoapps/*/features/* cms/djangoapps/*/features/*
cms/lib/*/migrations/*
lms/debug/* lms/debug/*
lms/envs/* lms/envs/*
lms/djangoapps/*/migrations/* lms/djangoapps/*/migrations/*
...@@ -25,6 +26,7 @@ omit = ...@@ -25,6 +26,7 @@ omit =
common/djangoapps/*/migrations/* common/djangoapps/*/migrations/*
openedx/core/djangoapps/*/migrations/* openedx/core/djangoapps/*/migrations/*
openedx/core/djangoapps/debug/* openedx/core/djangoapps/debug/*
openedx/features/*/migrations/*
concurrency=multiprocessing concurrency=multiprocessing
......
...@@ -64,6 +64,7 @@ conf/locale/messages.mo ...@@ -64,6 +64,7 @@ conf/locale/messages.mo
.testids/ .testids/
.noseids .noseids
nosetests.xml nosetests.xml
.cache/
.coverage .coverage
.coverage.* .coverage.*
coverage.xml coverage.xml
......
"""
Studio unit test configuration and fixtures.
This module needs to exist because the pytest.ini in the cms package stops
pytest from looking for the conftest.py module in the parent directory when
only running cms tests.
"""
from __future__ import absolute_import, unicode_literals
import importlib
import os
import contracts
import pytest
def pytest_configure(config):
"""
Do core setup operations from manage.py before collecting tests.
"""
if config.getoption('help'):
return
enable_contracts = os.environ.get('ENABLE_CONTRACTS', False)
if not enable_contracts:
contracts.disable_all()
settings_module = os.environ.get('DJANGO_SETTINGS_MODULE')
startup_module = 'cms.startup' if settings_module.startswith('cms') else 'lms.startup'
startup = importlib.import_module(startup_module)
startup.run()
@pytest.fixture(autouse=True, scope='function')
def _django_clear_site_cache():
"""
pytest-django uses this fixture to automatically clear the Site object
cache by replacing it with a new dictionary. edx-django-sites-extensions
grabs the cache dictionary at startup, and uses that one for all lookups
from then on. Our CacheIsolationMixin class tries to clear the cache by
grabbing the current dictionary from the site models module and clearing
it. Long story short: if you use this all together, neither cache
clearing mechanism actually works. So override this fixture to not mess
with what has been working for us so far.
"""
pass
[pytest]
DJANGO_SETTINGS_MODULE = cms.envs.test
addopts = --nomigrations --reuse-db --durations=20 -p no:randomly
norecursedirs = envs
python_classes =
python_files = tests.py test_*.py *_tests.py
from django.conf import settings
def pytest_configure():
"""
Use Django's default settings for tests in common/lib.
"""
settings.configure()
[pytest]
addopts = --nomigrations --reuse-db --durations=20
norecursedirs = .cache
python_classes =
python_files = tests.py test_*.py tests_*.py *_tests.py __init__.py
...@@ -13,12 +13,6 @@ import uuid ...@@ -13,12 +13,6 @@ import uuid
import ddt import ddt
from contracts import contract from contracts import contract
from nose.plugins.attrib import attr from nose.plugins.attrib import attr
# For the cache tests to work, we need to be using the Django default
# settings (not our usual cms or lms test settings) and they need to
# be configured before importing from django.core.cache
from django.conf import settings
if not settings.configured:
settings.configure()
from django.core.cache import caches, InvalidCacheBackendError from django.core.cache import caches, InvalidCacheBackendError
from openedx.core.lib import tempdir from openedx.core.lib import tempdir
......
[pytest]
addopts = -p no:randomly --durations=20
norecursedirs = .cache
"""
Default unit test configuration and fixtures.
"""
from __future__ import absolute_import, unicode_literals
# Import hooks and fixture overrides from the cms package to
# avoid duplicating the implementation
from cms.conftest import _django_clear_site_cache, pytest_configure # pylint: disable=unused-import
...@@ -130,14 +130,15 @@ however, run any acceptance tests. ...@@ -130,14 +130,15 @@ however, run any acceptance tests.
Note - Note -
`paver` is a scripting tool. To get information about various options, you can run the this command. `paver` is a scripting tool. To get information about various options, you can run the this command.
:: ::
paver -h
paver -h
Running Python Unit tests Running Python Unit tests
------------------------- -------------------------
We use `nose <https://nose.readthedocs.org/en/latest/>`__ through the We use `pytest <https://pytest.org/>`__ to run the test suite.
`django-nose plugin <https://pypi.python.org/pypi/django-nose>`__ to run
the test suite.
For example, this command runs all the python test scripts. For example, this command runs all the python test scripts.
...@@ -194,7 +195,7 @@ To run a single django test class use this command. ...@@ -194,7 +195,7 @@ To run a single django test class use this command.
:: ::
paver test_system -t lms/djangoapps/courseware/tests/tests.py:ActivateLoginTest paver test_system -t lms/djangoapps/courseware/tests/tests.py::ActivateLoginTest
When developing tests, it is often helpful to be able to really just run When developing tests, it is often helpful to be able to really just run
one single test without the overhead of PIP installs, UX builds, etc. In one single test without the overhead of PIP installs, UX builds, etc. In
...@@ -204,23 +205,23 @@ the time of this writing, the command is the following. ...@@ -204,23 +205,23 @@ the time of this writing, the command is the following.
:: ::
python ./manage.py lms test --verbosity=1 lms/djangoapps/courseware/tests/test_courses.py --traceback --settings=test pytest lms/djangoapps/courseware/tests/test_courses.py
To run a single test format the command like this. To run a single test format the command like this.
:: ::
paver test_system -t lms/djangoapps/courseware/tests/tests.py:ActivateLoginTest.test_activate_login paver test_system -t lms/djangoapps/courseware/tests/tests.py::ActivateLoginTest::test_activate_login
The ``lms`` suite of tests runs with randomized order, by default. You can use ``--randomize`` to randomize the test case sequence. In the
You can override these by using ``--no-randomize`` to disable randomization. short term, this is likely to reveal bugs in our test setup and teardown;
please fix (or at least file tickets for) any such issues you encounter.
You can also enable test concurrency with the ``--processes=N`` flag (where ``N`` You can also enable test concurrency with the ``--processes=N`` flag (where ``N``
is the number of processes to run tests with, and ``-1`` means one process per is the number of processes to run tests with, and ``-1`` means one process per
available core). Note, however, that when running concurrently, breakpoints may available core). Note, however, that when running concurrently, breakpoints may
not work correctly, and you will not be able to run single test methods (only not work correctly.
single test classes).
For example: For example:
...@@ -239,44 +240,44 @@ To re-run all failing django tests from lms or cms, use the ...@@ -239,44 +240,44 @@ To re-run all failing django tests from lms or cms, use the
paver test_system -s lms --failed paver test_system -s lms --failed
paver test_system -s cms --failed paver test_system -s cms --failed
There is also a ``--fail_fast``, ``-x`` option that will stop nosetests There is also a ``--exitfirst``, ``-x`` option that will stop pytest
after the first failure. after the first failure.
common/lib tests are tested with the ``test_lib`` task, which also common/lib tests are tested with the ``test_lib`` task, which also
accepts the ``--failed`` and ``--fail_fast`` options. accepts the ``--failed`` and ``--exitfirst`` options.
:: ::
paver test_lib -l common/lib/calc paver test_lib -l common/lib/calc
paver test_lib -l common/lib/xmodule --failed paver test_lib -l common/lib/xmodule --failed
For example, this command runs a single nose test file. For example, this command runs a single python unit test file.
:: ::
nosetests common/lib/xmodule/xmodule/tests/test_stringify.py pytest common/lib/xmodule/xmodule/tests/test_stringify.py
This command runs a single nose test within a specified file. This command runs a single python unit test within a specified file.
:: ::
nosetests common/lib/xmodule/xmodule/tests/test_stringify.py:test_stringify pytest common/lib/xmodule/xmodule/tests/test_stringify.py::test_stringify
This is an example of how to run a single test and get stdout, with proper env config. This is an example of how to run a single test and get stdout shown immediately, with proper env config.
:: ::
python manage.py cms --settings test test contentstore.tests.test_import_nostatic -s pytest cms/djangoapps/contentstore/tests/test_import.py -s
These are examples of how to run a single test and get stdout and get coverage. These are examples of how to run a single test and get coverage.
:: ::
python -m coverage run which ./manage.py cms --settings test test --traceback --logging-clear-handlers --liveserver=localhost:8000-9000 contentstore.tests.test_import_nostatic -s # cms example pytest cms/djangoapps/contentstore/tests/test_import.py --cov # cms example
python -m coverage run which ./manage.py lms --settings test test --traceback --logging-clear-handlers --liveserver=localhost:8000-9000 courseware.tests.test_module_render -s # lms example pytest lms/djangoapps/courseware/tests/test_module_render.py --cov # lms example
Use this command to generate coverage report. Use this command to generate a coverage report.
:: ::
...@@ -297,31 +298,32 @@ you can run one of these commands. ...@@ -297,31 +298,32 @@ you can run one of these commands.
:: ::
paver test_system -s cms -t common/djangoapps/terrain/stubs/tests/test_youtube_stub.py paver test_system -s cms -t common/djangoapps/terrain/stubs/tests/test_youtube_stub.py
python -m coverage run `which ./manage.py` cms --settings test test --traceback common/djangoapps/terrain/stubs/tests/test_youtube_stub.py pytest common/djangoapps/terrain/stubs/tests/test_youtube_stub.py
Very handy: if you pass the ``--pdb`` flag to a paver test function, or Very handy: if you pass the ``--pdb`` flag to a paver test function, or
uncomment the ``pdb=1`` line in ``setup.cfg``, the test runner uncomment the ``pdb=1`` line in ``setup.cfg``, the test runner
will drop you into pdb on error. This lets you go up and down the stack will drop you into pdb on error. This lets you go up and down the stack
and see what the values of the variables are. Check out `the pdb and see what the values of the variables are. Check out `the pdb
documentation <http://docs.python.org/library/pdb.html>`__ documentation <http://docs.python.org/library/pdb.html>`__ Note that this
only works if you aren't collecting coverage statistics (pdb and coverage.py
use the same mechanism to trace code execution).
Use this command to put a temporary debugging breakpoint in a test. Use this command to put a temporary debugging breakpoint in a test.
If you check this in, your tests will hang on jenkins. If you check this in, your tests will hang on jenkins.
:: ::
from nose.tools import set_trace; set_trace() import pdb; pdb.set_trace()
Note: More on the ``--failed`` functionality Note: More on the ``--failed`` functionality:
* In order to use this, you must run the tests first. If you haven't already * In order to use this, you must run the tests first. If you haven't already
run the tests, or if no tests failed in the previous run, then using the run the tests, or if no tests failed in the previous run, then using the
``--failed`` switch will result in **all** of the tests being run. See more ``--failed`` switch will result in **all** of the tests being run. See more
about this in the `nose documentation about this in the `pytest documentation
<http://nose.readthedocs.org/en/latest/plugins/testid.html#looping-over-failed-tests>`__. <https://docs.pytest.org/en/latest/cache.html>`__.
* Note that ``paver test_python`` calls nosetests separately for cms and lms. * Note that ``paver test_python`` calls pytest separately for cms and lms.
This means that if tests failed only in lms on the previous run, then calling This means that if tests failed only in lms on the previous run, then calling
``paver test_python --failed`` will run **all of the tests for cms** in ``paver test_python --failed`` will run **all of the tests for cms** in
addition to the previously failing lms tests. If you want it to run only the addition to the previously failing lms tests. If you want it to run only the
...@@ -501,7 +503,7 @@ To run all the bok choy accessibility tests use this command. ...@@ -501,7 +503,7 @@ To run all the bok choy accessibility tests use this command.
paver test_a11y paver test_a11y
To run specific tests, use the ``-t`` flag to specify a nose-style test spec To run specific tests, use the ``-t`` flag to specify a pytest-style test spec
relative to the ``common/test/acceptance/tests`` directory. This is an example for it. relative to the ``common/test/acceptance/tests`` directory. This is an example for it.
:: ::
...@@ -565,7 +567,7 @@ Note if setup has already been done, you can run:: ...@@ -565,7 +567,7 @@ Note if setup has already been done, you can run::
You must run BOTH `--testsonly` and `--fasttest`. You must run BOTH `--testsonly` and `--fasttest`.
3. When done, you can kill your servers in the first terminal/ssh session with 3. When done, you can kill your servers in the first terminal/ssh session with
Control-C. *Warning*: Only hit Control-C one time so the nose test framework can Control-C. *Warning*: Only hit Control-C one time so the pytest framework can
properly clean up. properly clean up.
Running Lettuce Acceptance Tests Running Lettuce Acceptance Tests
...@@ -644,7 +646,7 @@ Running Tests on Paver Scripts ...@@ -644,7 +646,7 @@ Running Tests on Paver Scripts
To run tests on the scripts that power the various Paver commands, use the following command:: To run tests on the scripts that power the various Paver commands, use the following command::
nosetests pavelib pytest pavelib
Testing internationalization with dummy translations Testing internationalization with dummy translations
...@@ -814,7 +816,7 @@ To view JavaScript code style quality run this command. ...@@ -814,7 +816,7 @@ To view JavaScript code style quality run this command.
:: ::
paver run_eslint --limit=50000 paver run_eslint --limit=50000
......
...@@ -207,16 +207,17 @@ class CertificateDownloadableStatusTests(WebCertificateTestMixin, ModuleStoreTes ...@@ -207,16 +207,17 @@ class CertificateDownloadableStatusTests(WebCertificateTestMixin, ModuleStoreTes
) )
@ddt.data( @ddt.data(
(False, datetime.now(pytz.UTC) + timedelta(days=2), False), (False, timedelta(days=2), False),
(False, datetime.now(pytz.UTC) - timedelta(days=2), True), (False, -timedelta(days=2), True),
(True, datetime.now(pytz.UTC) + timedelta(days=2), True) (True, timedelta(days=2), True)
) )
@ddt.unpack @ddt.unpack
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True}) @patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True})
def test_cert_api_return(self, self_paced, cert_avail_date, cert_downloadable_status): def test_cert_api_return(self, self_paced, cert_avail_delta, cert_downloadable_status):
""" """
Test 'downloadable status' Test 'downloadable status'
""" """
cert_avail_date = datetime.now(pytz.UTC) + cert_avail_delta
self.course.self_paced = self_paced self.course.self_paced = self_paced
self.course.certificate_available_date = cert_avail_date self.course.certificate_available_date = cert_avail_date
self.course.save() self.course.save()
......
from unittest import TestCase
import ddt import ddt
from django.test import TestCase
from mock import call, patch from mock import call, patch
from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.keys import CourseKey
from nose.tools import assert_true
from lms.djangoapps.certificates.tasks import generate_certificate from lms.djangoapps.certificates.tasks import generate_certificate
from student.tests.factories import UserFactory from student.tests.factories import UserFactory
......
...@@ -257,6 +257,7 @@ class OverrideFieldData(FieldData): ...@@ -257,6 +257,7 @@ class OverrideFieldData(FieldData):
class OverrideModulestoreFieldData(OverrideFieldData): class OverrideModulestoreFieldData(OverrideFieldData):
"""Apply field data overrides at the modulestore level. No student context required.""" """Apply field data overrides at the modulestore level. No student context required."""
provider_classes = None
@classmethod @classmethod
def wrap(cls, block, field_data): # pylint: disable=arguments-differ def wrap(cls, block, field_data): # pylint: disable=arguments-differ
......
...@@ -133,6 +133,7 @@ class ModuleRenderTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase): ...@@ -133,6 +133,7 @@ class ModuleRenderTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
Set up the course and user context Set up the course and user context
""" """
super(ModuleRenderTestCase, self).setUp() super(ModuleRenderTestCase, self).setUp()
OverrideFieldData.provider_classes = None
self.mock_user = UserFactory() self.mock_user = UserFactory()
self.mock_user.id = 1 self.mock_user.id = 1
...@@ -154,6 +155,10 @@ class ModuleRenderTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase): ...@@ -154,6 +155,10 @@ class ModuleRenderTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
) )
) )
def tearDown(self):
OverrideFieldData.provider_classes = None
super(ModuleRenderTestCase, self).tearDown()
def test_get_module(self): def test_get_module(self):
self.assertEqual( self.assertEqual(
None, None,
......
...@@ -1707,28 +1707,24 @@ class GroupModeratorPermissionsTestCase(ModuleStoreTestCase): ...@@ -1707,28 +1707,24 @@ class GroupModeratorPermissionsTestCase(ModuleStoreTestCase):
# cohorted_user (who is in the cohort but not the verified enrollment track), # cohorted_user (who is in the cohort but not the verified enrollment track),
# and plain_user (who is neither in the cohort nor the verified enrollment track) # and plain_user (who is neither in the cohort nor the verified enrollment track)
self.group_moderator = UserFactory(username='group_moderator', email='group_moderator@edx.org') self.group_moderator = UserFactory(username='group_moderator', email='group_moderator@edx.org')
self.group_moderator.id = 1
CourseEnrollmentFactory( CourseEnrollmentFactory(
course_id=self.course.id, course_id=self.course.id,
user=self.group_moderator, user=self.group_moderator,
mode=verified_coursemode mode=verified_coursemode
) )
self.verified_user = UserFactory(username='verified', email='verified@edx.org') self.verified_user = UserFactory(username='verified', email='verified@edx.org')
self.verified_user.id = 2
CourseEnrollmentFactory( CourseEnrollmentFactory(
course_id=self.course.id, course_id=self.course.id,
user=self.verified_user, user=self.verified_user,
mode=verified_coursemode mode=verified_coursemode
) )
self.cohorted_user = UserFactory(username='cohort', email='cohort@edx.org') self.cohorted_user = UserFactory(username='cohort', email='cohort@edx.org')
self.cohorted_user.id = 3
CourseEnrollmentFactory( CourseEnrollmentFactory(
course_id=self.course.id, course_id=self.course.id,
user=self.cohorted_user, user=self.cohorted_user,
mode=audit_coursemode mode=audit_coursemode
) )
self.plain_user = UserFactory(username='plain', email='plain@edx.org') self.plain_user = UserFactory(username='plain', email='plain@edx.org')
self.plain_user.id = 4
CourseEnrollmentFactory( CourseEnrollmentFactory(
course_id=self.course.id, course_id=self.course.id,
user=self.plain_user, user=self.plain_user,
...@@ -1737,7 +1733,7 @@ class GroupModeratorPermissionsTestCase(ModuleStoreTestCase): ...@@ -1737,7 +1733,7 @@ class GroupModeratorPermissionsTestCase(ModuleStoreTestCase):
CohortFactory( CohortFactory(
course_id=self.course.id, course_id=self.course.id,
name='Test Cohort', name='Test Cohort',
users=[self.verified_user, self.cohorted_user] users=[self.group_moderator, self.cohorted_user]
) )
# Give group moderator permissions to group_moderator # Give group moderator permissions to group_moderator
......
...@@ -260,6 +260,11 @@ class ScoreChangedSignalRelayTest(TestCase): ...@@ -260,6 +260,11 @@ class ScoreChangedSignalRelayTest(TestCase):
@ddt.ddt @ddt.ddt
class RecalculateUserGradeSignalsTest(TestCase): class RecalculateUserGradeSignalsTest(TestCase):
SIGNALS = {
'COHORT_MEMBERSHIP_UPDATED': COHORT_MEMBERSHIP_UPDATED,
'ENROLLMENT_TRACK_UPDATED': ENROLLMENT_TRACK_UPDATED,
}
def setUp(self): def setUp(self):
super(RecalculateUserGradeSignalsTest, self).setUp() super(RecalculateUserGradeSignalsTest, self).setUp()
self.user = UserFactory() self.user = UserFactory()
...@@ -267,9 +272,10 @@ class RecalculateUserGradeSignalsTest(TestCase): ...@@ -267,9 +272,10 @@ class RecalculateUserGradeSignalsTest(TestCase):
@patch('lms.djangoapps.grades.signals.handlers.CourseGradeFactory.update') @patch('lms.djangoapps.grades.signals.handlers.CourseGradeFactory.update')
@patch('lms.djangoapps.grades.signals.handlers.CourseGradeFactory.read') @patch('lms.djangoapps.grades.signals.handlers.CourseGradeFactory.read')
@ddt.data(*itertools.product((COHORT_MEMBERSHIP_UPDATED, ENROLLMENT_TRACK_UPDATED), (True, False), (True, False))) @ddt.data(*itertools.product(('COHORT_MEMBERSHIP_UPDATED', 'ENROLLMENT_TRACK_UPDATED'),
(True, False), (True, False)))
@ddt.unpack @ddt.unpack
def test_recalculate_on_signal(self, signal, write_only_if_engaged, has_grade, read_mock, update_mock): def test_recalculate_on_signal(self, signal_name, write_only_if_engaged, has_grade, read_mock, update_mock):
""" """
Tests the grades handler for signals that trigger regrading. Tests the grades handler for signals that trigger regrading.
The handler should call CourseGradeFactory.update() with the The handler should call CourseGradeFactory.update() with the
...@@ -279,6 +285,7 @@ class RecalculateUserGradeSignalsTest(TestCase): ...@@ -279,6 +285,7 @@ class RecalculateUserGradeSignalsTest(TestCase):
if not has_grade: if not has_grade:
read_mock.return_value = None read_mock.return_value = None
with waffle().override(WRITE_ONLY_IF_ENGAGED, active=write_only_if_engaged): with waffle().override(WRITE_ONLY_IF_ENGAGED, active=write_only_if_engaged):
signal = self.SIGNALS[signal_name]
signal.send(sender=None, user=self.user, course_key=self.course_key) signal.send(sender=None, user=self.user, course_key=self.course_key)
if not write_only_if_engaged and not has_grade: if not write_only_if_engaged and not has_grade:
......
from contextlib import contextmanager from contextlib import contextmanager
from datetime import datetime, timedelta from datetime import datetime, timedelta
import itertools import itertools
from unittest import TestCase
import ddt import ddt
import pytz import pytz
import waffle import waffle
from django.test import TestCase
from course_modes.models import CourseMode from course_modes.models import CourseMode
from openedx.core.djangoapps.certificates import api from openedx.core.djangoapps.certificates import api
......
...@@ -549,11 +549,15 @@ class CreditProviderCallbackViewTests(UserMixin, TestCase): ...@@ -549,11 +549,15 @@ class CreditProviderCallbackViewTests(UserMixin, TestCase):
self.assertEqual(response.status_code, 403) self.assertEqual(response.status_code, 403)
@ddt.data( @ddt.data(
to_timestamp(datetime.datetime.now(pytz.UTC) - datetime.timedelta(0, 60 * 15 + 1)), -datetime.timedelta(0, 60 * 15 + 1),
'invalid' 'invalid'
) )
def test_post_with_invalid_timestamp(self, timestamp): def test_post_with_invalid_timestamp(self, timedelta):
""" Verify HTTP 400 is returned for requests with an invalid timestamp. """ """ Verify HTTP 400 is returned for requests with an invalid timestamp. """
if timedelta == 'invalid':
timestamp = timedelta
else:
timestamp = to_timestamp(datetime.datetime.now(pytz.UTC) + timedelta)
request_uuid = self._create_credit_request_and_get_uuid() request_uuid = self._create_credit_request_and_get_uuid()
response = self._credit_provider_callback(request_uuid, 'approved', timestamp=timestamp) response = self._credit_provider_callback(request_uuid, 'approved', timestamp=timestamp)
self.assertEqual(response.status_code, 400) self.assertEqual(response.status_code, 400)
......
import datetime import datetime
import itertools import itertools
from copy import deepcopy
from unittest import skipUnless from unittest import skipUnless
import attr import attr
...@@ -201,10 +202,8 @@ class TestSendRecurringNudge(CacheIsolationTestCase): ...@@ -201,10 +202,8 @@ class TestSendRecurringNudge(CacheIsolationTestCase):
@ddt.data(*itertools.product((1, 10, 100), (3, 10))) @ddt.data(*itertools.product((1, 10, 100), (3, 10)))
@ddt.unpack @ddt.unpack
@override_settings()
def test_templates(self, message_count, day): def test_templates(self, message_count, day):
settings.TEMPLATES[0]['OPTIONS']['string_if_invalid'] = "TEMPLATE WARNING - MISSING VARIABLE [%s]"
user = UserFactory.create() user = UserFactory.create()
schedules = [ schedules = [
ScheduleFactory.create( ScheduleFactory.create(
...@@ -226,20 +225,23 @@ class TestSendRecurringNudge(CacheIsolationTestCase): ...@@ -226,20 +225,23 @@ class TestSendRecurringNudge(CacheIsolationTestCase):
sent_messages = [] sent_messages = []
with patch.object(tasks, '_recurring_nudge_schedule_send') as mock_schedule_send: templates_override = deepcopy(settings.TEMPLATES)
mock_schedule_send.apply_async = lambda args, *_a, **_kw: sent_messages.append(args) templates_override[0]['OPTIONS']['string_if_invalid'] = "TEMPLATE WARNING - MISSING VARIABLE [%s]"
with self.settings(TEMPLATES=templates_override):
with patch.object(tasks, '_recurring_nudge_schedule_send') as mock_schedule_send:
mock_schedule_send.apply_async = lambda args, *_a, **_kw: sent_messages.append(args)
with self.assertNumQueries(2): with self.assertNumQueries(2):
tasks.recurring_nudge_schedule_hour( tasks.recurring_nudge_schedule_hour(
self.site_config.site.id, day, test_time_str, [schedules[0].enrollment.course.org], self.site_config.site.id, day, test_time_str, [schedules[0].enrollment.course.org],
) )
self.assertEqual(len(sent_messages), 1) self.assertEqual(len(sent_messages), 1)
for args in sent_messages: for args in sent_messages:
tasks._recurring_nudge_schedule_send(*args) tasks._recurring_nudge_schedule_send(*args)
self.assertEqual(mock_channel.deliver.call_count, 1) self.assertEqual(mock_channel.deliver.call_count, 1)
for (_name, (_msg, email), _kwargs) in mock_channel.deliver.mock_calls: for (_name, (_msg, email), _kwargs) in mock_channel.deliver.mock_calls:
for template in attr.astuple(email): for template in attr.astuple(email):
self.assertNotIn("TEMPLATE WARNING", template) self.assertNotIn("TEMPLATE WARNING", template)
...@@ -47,7 +47,6 @@ class TestPaverBokChoyCmd(unittest.TestCase): ...@@ -47,7 +47,6 @@ class TestPaverBokChoyCmd(unittest.TestCase):
"-m", "-m",
"pytest", "pytest",
"{}/common/test/acceptance/{}".format(REPO_DIR, name), "{}/common/test/acceptance/{}".format(REPO_DIR, name),
"--durations=20",
"--junitxml={}/reports/bok_choy{}/xunit.xml".format(REPO_DIR, shard_str), "--junitxml={}/reports/bok_choy{}/xunit.xml".format(REPO_DIR, shard_str),
"--verbose", "--verbose",
] ]
......
...@@ -31,11 +31,15 @@ __test__ = False # do not collect ...@@ -31,11 +31,15 @@ __test__ = False # do not collect
("fail-fast", "x", "Fail suite on first failed test"), ("fail-fast", "x", "Fail suite on first failed test"),
("fasttest", "a", "Run without collectstatic"), ("fasttest", "a", "Run without collectstatic"),
make_option( make_option(
"--eval-attr", dest="eval_attr",
help="Only run tests matching given attribute expression."
),
make_option(
'-c', '--cov-args', default='', '-c', '--cov-args', default='',
help='adds as args to coverage for the test run' help='adds as args to coverage for the test run'
), ),
('skip-clean', 'C', 'skip cleaning repository before running tests'), ('skip-clean', 'C', 'skip cleaning repository before running tests'),
('processes=', 'p', 'number of processes to use running tests'), make_option('-p', '--processes', dest='processes', default=0, help='number of processes to use running tests'),
make_option('-r', '--randomize', action='store_true', help='run the tests in a random order'), make_option('-r', '--randomize', action='store_true', help='run the tests in a random order'),
make_option('--no-randomize', action='store_false', dest='randomize', help="don't run the tests in a random order"), make_option('--no-randomize', action='store_false', dest='randomize', help="don't run the tests in a random order"),
make_option("--verbose", action="store_const", const=2, dest="verbosity"), make_option("--verbose", action="store_const", const=2, dest="verbosity"),
...@@ -53,14 +57,6 @@ __test__ = False # do not collect ...@@ -53,14 +57,6 @@ __test__ = False # do not collect
dest='disable_migrations', dest='disable_migrations',
help="Create tables by applying migrations." help="Create tables by applying migrations."
), ),
("fail_fast", None, "deprecated in favor of fail-fast"),
("test_id=", None, "deprecated in favor of test-id"),
('cov_args=', None, 'deprecated in favor of cov-args'),
make_option(
"-e", "--extra_args", default="",
help="deprecated, pass extra options directly in the paver commandline"
),
('skip_clean', None, 'deprecated in favor of skip-clean'),
], share_with=['pavelib.utils.test.utils.clean_reports_dir']) ], share_with=['pavelib.utils.test.utils.clean_reports_dir'])
@PassthroughTask @PassthroughTask
@timed @timed
...@@ -119,14 +115,6 @@ def test_system(options, passthrough_options): ...@@ -119,14 +115,6 @@ def test_system(options, passthrough_options):
make_option("--verbose", action="store_const", const=2, dest="verbosity"), make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"), make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity", default=1), make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
('cov_args=', None, 'deprecated in favor of cov-args'),
make_option(
'-e', '--extra_args', default='',
help='deprecated, pass extra options directly in the paver commandline'
),
("fail_fast", None, "deprecated in favor of fail-fast"),
('skip_clean', None, 'deprecated in favor of skip-clean'),
("test_id=", None, "deprecated in favor of test-id"),
], share_with=['pavelib.utils.test.utils.clean_reports_dir']) ], share_with=['pavelib.utils.test.utils.clean_reports_dir'])
@PassthroughTask @PassthroughTask
@timed @timed
...@@ -153,8 +141,9 @@ def test_lib(options, passthrough_options): ...@@ -153,8 +141,9 @@ def test_lib(options, passthrough_options):
suites.LibTestSuite( suites.LibTestSuite(
d, d,
passthrough_options=passthrough_options, passthrough_options=passthrough_options,
append_coverage=(i != 0),
**options.test_lib **options.test_lib
) for d in Env.LIB_TEST_DIRS ) for i, d in enumerate(Env.LIB_TEST_DIRS)
] ]
test_suite = suites.PythonTestSuite( test_suite = suites.PythonTestSuite(
...@@ -186,12 +175,6 @@ def test_lib(options, passthrough_options): ...@@ -186,12 +175,6 @@ def test_lib(options, passthrough_options):
dest='disable_migrations', dest='disable_migrations',
help="Create tables directly from apps' models. Can also be used by exporting DISABLE_MIGRATIONS=1." help="Create tables directly from apps' models. Can also be used by exporting DISABLE_MIGRATIONS=1."
), ),
('cov_args=', None, 'deprecated in favor of cov-args'),
make_option(
'-e', '--extra_args', default='',
help='deprecated, pass extra options directly in the paver commandline'
),
("fail_fast", None, "deprecated in favor of fail-fast"),
]) ])
@PassthroughTask @PassthroughTask
@timed @timed
...@@ -220,11 +203,6 @@ def test_python(options, passthrough_options): ...@@ -220,11 +203,6 @@ def test_python(options, passthrough_options):
make_option("--verbose", action="store_const", const=2, dest="verbosity"), make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"), make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity", default=1), make_option("-v", "--verbosity", action="count", dest="verbosity", default=1),
('cov_args=', None, 'deprecated in favor of cov-args'),
make_option(
'-e', '--extra_args', default='',
help='deprecated, pass extra options directly in the paver commandline'
),
]) ])
@PassthroughTask @PassthroughTask
@timed @timed
...@@ -249,7 +227,6 @@ def test(options, passthrough_options): ...@@ -249,7 +227,6 @@ def test(options, passthrough_options):
@needs('pavelib.prereqs.install_coverage_prereqs') @needs('pavelib.prereqs.install_coverage_prereqs')
@cmdopts([ @cmdopts([
("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"),
("compare_branch=", None, "deprecated in favor of compare-branch"),
]) ])
@timed @timed
def coverage(): def coverage():
...@@ -287,7 +264,6 @@ def coverage(): ...@@ -287,7 +264,6 @@ def coverage():
@needs('pavelib.prereqs.install_coverage_prereqs') @needs('pavelib.prereqs.install_coverage_prereqs')
@cmdopts([ @cmdopts([
("compare-branch=", "b", "Branch to compare against, defaults to origin/master"), ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"),
("compare_branch=", None, "deprecated in favor of compare-branch"),
], share_with=['coverage']) ], share_with=['coverage'])
@timed @timed
def diff_coverage(options): def diff_coverage(options):
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
TestSuite class and subclasses TestSuite class and subclasses
""" """
from .suite import TestSuite from .suite import TestSuite
from .nose_suite import NoseTestSuite, SystemTestSuite, LibTestSuite from .pytest_suite import PytestSuite, SystemTestSuite, LibTestSuite
from .python_suite import PythonTestSuite from .python_suite import PythonTestSuite
from .js_suite import JsTestSuite from .js_suite import JsTestSuite
from .acceptance_suite import AcceptanceTestSuite from .acceptance_suite import AcceptanceTestSuite
......
...@@ -343,7 +343,6 @@ class BokChoyTestSuite(TestSuite): ...@@ -343,7 +343,6 @@ class BokChoyTestSuite(TestSuite):
"-m", "-m",
"pytest", "pytest",
test_spec, test_spec,
"--durations=20",
] + self.verbosity_processes_command ] + self.verbosity_processes_command
if self.extra_args: if self.extra_args:
cmd.append(self.extra_args) cmd.append(self.extra_args)
......
...@@ -5,7 +5,7 @@ import os ...@@ -5,7 +5,7 @@ import os
from pavelib.utils.test import utils as test_utils from pavelib.utils.test import utils as test_utils
from pavelib.utils.test.suites.suite import TestSuite from pavelib.utils.test.suites.suite import TestSuite
from pavelib.utils.test.suites.nose_suite import LibTestSuite, SystemTestSuite from pavelib.utils.test.suites.pytest_suite import LibTestSuite, SystemTestSuite
from pavelib.utils.envs import Env from pavelib.utils.envs import Env
__test__ = False # do not collect __test__ = False # do not collect
......
...@@ -61,6 +61,14 @@ class TestSuite(object): ...@@ -61,6 +61,14 @@ class TestSuite(object):
""" """
return None return None
@staticmethod
def is_success(exit_code):
"""
Determine if the given exit code represents a success of the test
suite. By default, only a zero counts as a success.
"""
return exit_code == 0
def run_test(self): def run_test(self):
""" """
Runs a self.cmd in a subprocess and waits for it to finish. Runs a self.cmd in a subprocess and waits for it to finish.
...@@ -88,7 +96,7 @@ class TestSuite(object): ...@@ -88,7 +96,7 @@ class TestSuite(object):
try: try:
process = subprocess.Popen(cmd, **kwargs) process = subprocess.Popen(cmd, **kwargs)
return (process.wait() == 0) return self.is_success(process.wait())
except KeyboardInterrupt: except KeyboardInterrupt:
kill_process(process) kill_process(process)
sys.exit(1) sys.exit(1)
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
# Third-party: # Third-party:
git+https://github.com/jazzband/django-pipeline.git@d068a019169c9de5ee20ece041a6dea236422852#egg=django-pipeline==1.5.3 git+https://github.com/jazzband/django-pipeline.git@d068a019169c9de5ee20ece041a6dea236422852#egg=django-pipeline==1.5.3
git+https://github.com/edx/django-wiki.git@v0.0.11#egg=django-wiki==0.0.11 git+https://github.com/edx/django-wiki.git@v0.0.14#egg=django-wiki==0.0.14
git+https://github.com/edx/django-openid-auth.git@0.14#egg=django-openid-auth==0.14 git+https://github.com/edx/django-openid-auth.git@0.14#egg=django-openid-auth==0.14
git+https://github.com/edx/MongoDBProxy.git@25b99097615bda06bd7cdfe5669ed80dc2a7fed0#egg=MongoDBProxy==0.1.0 git+https://github.com/edx/MongoDBProxy.git@25b99097615bda06bd7cdfe5669ed80dc2a7fed0#egg=MongoDBProxy==0.1.0
git+https://github.com/edx/nltk.git@2.0.6#egg=nltk==2.0.6 git+https://github.com/edx/nltk.git@2.0.6#egg=nltk==2.0.6
......
...@@ -15,5 +15,8 @@ pysqlite==2.8.3 ...@@ -15,5 +15,8 @@ pysqlite==2.8.3
pytest==3.1.3 pytest==3.1.3
pytest-attrib==0.1.3 pytest-attrib==0.1.3
pytest-catchlog==1.2.2 pytest-catchlog==1.2.2
pytest-cov==2.5.1
pytest-django==3.1.2 pytest-django==3.1.2
pytest-xdist==1.18.1 pytest-forked==0.2
pytest-randomly==1.2.1
pytest-xdist==1.20.0
...@@ -29,7 +29,7 @@ set -e ...@@ -29,7 +29,7 @@ set -e
# `SHARD` is a number indicating which subset of the tests to build. # `SHARD` is a number indicating which subset of the tests to build.
# #
# For "bok-choy" and "lms-unit", the tests are put into shard groups # For "bok-choy" and "lms-unit", the tests are put into shard groups
# using the nose 'attr' decorator (e.g. "@attr(shard=1)"). Anything with # using the 'attr' decorator (e.g. "@attr(shard=1)"). Anything with
# the 'shard=n' attribute will run in the nth shard. If there isn't a # the 'shard=n' attribute will run in the nth shard. If there isn't a
# shard explicitly assigned, the test will run in the last shard. # shard explicitly assigned, the test will run in the last shard.
# #
...@@ -68,8 +68,9 @@ function emptyxunit { ...@@ -68,8 +68,9 @@ function emptyxunit {
END END
} }
PAVER_ARGS="--cov-args='-p' --with-xunitmp -v" PAVER_ARGS="-v"
PARALLEL="--processes=-1" PARALLEL="--processes=-1"
export SUBSET_JOB=$JOB_NAME
case "$TEST_SUITE" in case "$TEST_SUITE" in
"quality") "quality")
...@@ -108,10 +109,10 @@ case "$TEST_SUITE" in ...@@ -108,10 +109,10 @@ case "$TEST_SUITE" in
paver test_system -s lms $PAVER_ARGS $PARALLEL 2> lms-tests.log paver test_system -s lms $PAVER_ARGS $PARALLEL 2> lms-tests.log
;; ;;
[1-3]) [1-3])
paver test_system -s lms --attr="shard=$SHARD" $PAVER_ARGS $PARALLEL 2> lms-tests.$SHARD.log paver test_system -s lms --eval-attr="shard==$SHARD" $PAVER_ARGS $PARALLEL 2> lms-tests.$SHARD.log
;; ;;
4|"noshard") 4|"noshard")
paver test_system -s lms --attr='!shard' $PAVER_ARGS $PARALLEL 2> lms-tests.4.log paver test_system -s lms --eval-attr='not shard' $PAVER_ARGS $PARALLEL 2> lms-tests.4.log
;; ;;
*) *)
# If no shard is specified, rather than running all tests, create an empty xunit file. This is a # If no shard is specified, rather than running all tests, create an empty xunit file. This is a
......
...@@ -15,7 +15,11 @@ process-timeout=300 ...@@ -15,7 +15,11 @@ process-timeout=300
#pdb=1 #pdb=1
[tool:pytest] [tool:pytest]
norecursedirs = .git conf node_modules test_root cms/envs lms/envs DJANGO_SETTINGS_MODULE = lms.envs.test
addopts = --nomigrations --reuse-db --durations=20
norecursedirs = .* *.egg build conf dist node_modules test_root cms/envs lms/envs
python_classes =
python_files = tests.py test_*.py tests_*.py *_tests.py __init__.py
[pep8] [pep8]
# error codes: http://pep8.readthedocs.org/en/latest/intro.html#error-codes # error codes: http://pep8.readthedocs.org/en/latest/intro.html#error-codes
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment