Commit bc599a06 by Brian Wilson

Update tests with more complete coverage.

parent e75dd465
......@@ -153,7 +153,11 @@ class CourseEmailTemplate(models.Model):
If one isn't stored, an exception is thrown.
"""
return CourseEmailTemplate.objects.get()
try:
return CourseEmailTemplate.objects.get()
except CourseEmailTemplate.DoesNotExist:
log.exception("Attempting to fetch a non-existent course email template")
raise
@staticmethod
def _render(format_string, message_body, context):
......
......@@ -35,16 +35,16 @@ from django.core.urlresolvers import reverse
from bulk_email.models import (
CourseEmail, Optout, CourseEmailTemplate,
SEND_TO_MYSELF, SEND_TO_STAFF, SEND_TO_ALL,
SEND_TO_MYSELF, SEND_TO_ALL, TO_OPTIONS,
)
from courseware.access import _course_staff_group_name, _course_instructor_group_name
from courseware.courses import get_course_by_id, course_image_url
from courseware.courses import get_course, course_image_url
from instructor_task.models import InstructorTask
from instructor_task.subtasks import (
update_subtask_status,
create_subtask_status,
increment_subtask_status,
update_instructor_task_for_subtasks,
initialize_subtask_info,
)
log = get_task_logger(__name__)
......@@ -82,9 +82,13 @@ def _get_recipient_queryset(user_id, to_option, course_id, course_location):
Recipients who are in more than one category (e.g. enrolled in the course and are staff or self)
will be properly deduped.
"""
if to_option not in TO_OPTIONS:
log.error("Unexpected bulk email TO_OPTION found: %s", to_option)
raise Exception("Unexpected bulk email TO_OPTION found: {0}".format(to_option))
if to_option == SEND_TO_MYSELF:
recipient_qset = User.objects.filter(id=user_id)
elif to_option == SEND_TO_ALL or to_option == SEND_TO_STAFF:
else:
staff_grpname = _course_staff_group_name(course_location)
staff_group, _ = Group.objects.get_or_create(name=staff_grpname)
staff_qset = staff_group.user_set.all()
......@@ -102,9 +106,7 @@ def _get_recipient_queryset(user_id, to_option, course_id, course_location):
)
recipient_qset = recipient_qset | enrollment_qset
recipient_qset = recipient_qset.distinct()
else:
log.error("Unexpected bulk email TO_OPTION found: %s", to_option)
raise Exception("Unexpected bulk email TO_OPTION found: {0}".format(to_option))
recipient_qset = recipient_qset.order_by('pk')
return recipient_qset
......@@ -146,7 +148,7 @@ def perform_delegate_email_batches(entry_id, course_id, task_input, action_name)
# Perfunctory check, since expansion is made for convenience of other task
# code that doesn't need the entry_id.
if course_id != entry.course_id:
format_msg = "Course id conflict: explicit value %s does not match task value %s"
format_msg = "Course id conflict: explicit value {} does not match task value {}"
raise ValueError(format_msg.format(course_id, entry.course_id))
email_id = task_input['email_id']
......@@ -162,14 +164,14 @@ def perform_delegate_email_batches(entry_id, course_id, task_input, action_name)
# Sanity check that course for email_obj matches that of the task referencing it.
if course_id != email_obj.course_id:
format_msg = "Course id conflict: explicit value %s does not match email value %s"
format_msg = "Course id conflict: explicit value {} does not match email value {}"
raise ValueError(format_msg.format(course_id, email_obj.course_id))
try:
course = get_course_by_id(course_id, depth=1)
except Http404 as exc:
log.exception("Task %s: get_course_by_id failed: %s", task_id, exc.args[0])
raise ValueError("Course not found: " + exc.args[0])
course = get_course(course_id)
except ValueError:
log.exception("Task %s: course not found: %s", task_id, course_id)
raise
global_email_context = _get_course_email_context(course)
recipient_qset = _get_recipient_queryset(user_id, to_option, course_id, course.location)
......@@ -222,7 +224,7 @@ def perform_delegate_email_batches(entry_id, course_id, task_input, action_name)
raise Exception(error_msg)
# Update the InstructorTask with information about the subtasks we've defined.
progress = update_instructor_task_for_subtasks(entry, action_name, total_num_emails, subtask_id_list)
progress = initialize_subtask_info(entry, action_name, total_num_emails, subtask_id_list)
num_subtasks = len(subtask_id_list)
log.info("Preparing to queue %d email tasks (%d emails) for course %s, email %s, to %s",
num_subtasks, total_num_emails, course_id, email_id, to_option)
......@@ -298,15 +300,13 @@ def send_course_email(entry_id, email_id, to_list, global_email_context, subtask
)
except Exception:
# Unexpected exception. Try to write out the failure to the entry before failing.
_, send_exception, traceback = exc_info()
traceback_string = format_exc(traceback) if traceback is not None else ''
log.error("Send-email task %s: failed unexpectedly: %s %s", current_task_id, send_exception, traceback_string)
log.exception("Send-email task %s: failed unexpectedly!", current_task_id)
# We got here for really unexpected reasons. Since we don't know how far
# the task got in emailing, we count all recipients as having failed.
# It at least keeps the counts consistent.
new_subtask_status = increment_subtask_status(subtask_status, failed=num_to_send, state=FAILURE)
update_subtask_status(entry_id, current_task_id, new_subtask_status)
raise send_exception
raise
if send_exception is None:
# Update the InstructorTask object that is storing its progress.
......@@ -318,11 +318,11 @@ def send_course_email(entry_id, email_id, to_list, global_email_context, subtask
# was encountered has already been updated before the retry call was made,
# so we only log here.
log.warning("Send-email task %s: being retried", current_task_id)
raise send_exception
raise send_exception # pylint: disable=E0702
else:
log.error("Send-email task %s: failed: %s", current_task_id, send_exception)
update_subtask_status(entry_id, current_task_id, new_subtask_status)
raise send_exception
raise send_exception # pylint: disable=E0702
log.info("Send-email task %s: returning status %s", current_task_id, new_subtask_status)
return new_subtask_status
......@@ -406,7 +406,16 @@ def _send_course_email(entry_id, email_id, to_list, global_email_context, subtas
course_title = global_email_context['course_title']
subject = "[" + course_title + "] " + course_email.subject
course_title_no_quotes = re.sub(r'"', '', course_title)
from_addr = '"{0}" Course Staff <{1}>'.format(course_title_no_quotes, settings.DEFAULT_BULK_FROM_EMAIL)
course_num = course_email.course_id.split('/')[1] # course_id = 'org/course_num/run'
# Substitute a '_' anywhere a non-(ascii, period, or dash) character appears.
INVALID_CHARS = re.compile(r"[^\w.-]")
course_num = INVALID_CHARS.sub('_', course_num)
# Make a unique from name and address for each course, eg
# "COURSE_TITLE" Course Staff <coursenum-no-reply@courseupdates.edx.org>
from_addr = '"{0}" Course Staff <{1}-{2}>'.format(
course_title_no_quotes, course_num, settings.DEFAULT_BULK_FROM_EMAIL
)
course_email_template = CourseEmailTemplate.get_template()
try:
......@@ -440,9 +449,13 @@ def _send_course_email(entry_id, email_id, to_list, global_email_context, subtas
)
email_msg.attach_alternative(html_msg, 'text/html')
# Throttle if we have gotten the rate limiter
# Throttle if we have gotten the rate limiter. This is not very high-tech,
# but if a task has been retried for rate-limiting reasons, then we sleep
# for a period of time between all emails within this task. Choice of
# the value depends on the number of workers that might be sending email in
# parallel, and what the SES throttle rate is.
if throttle:
sleep(0.2)
sleep(settings.BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)
try:
log.debug('Email with id %s to be sent to %s', email_id, email)
......
......@@ -33,7 +33,7 @@ class MockCourseEmailResult(object):
"""Wrapper for mock email function."""
def mock_increment_subtask_status(original_status, **kwargs): # pylint: disable=W0613
"""Increments count of number of emails sent."""
self.emails_sent += kwargs['succeeded']
self.emails_sent += kwargs.get('succeeded', 0)
return increment_subtask_status(original_status, **kwargs)
return mock_increment_subtask_status
......
......@@ -2,7 +2,7 @@
Unit tests for handling email sending errors
"""
from itertools import cycle
from mock import patch, Mock
from mock import patch
from smtplib import SMTPDataError, SMTPServerDisconnected, SMTPConnectError
from django.test.utils import override_settings
......@@ -16,9 +16,10 @@ from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from bulk_email.models import CourseEmail
from bulk_email.tasks import perform_delegate_email_batches
from bulk_email.models import CourseEmail, SEND_TO_ALL
from bulk_email.tasks import perform_delegate_email_batches, send_course_email
from instructor_task.models import InstructorTask
from instructor_task.subtasks import create_subtask_status
class EmailTestException(Exception):
......@@ -139,7 +140,7 @@ class TestEmailErrors(ModuleStoreTestCase):
@patch('bulk_email.tasks.increment_subtask_status')
@patch('bulk_email.tasks.log')
def test_nonexist_email(self, mock_log, result):
def test_nonexistent_email(self, mock_log, result):
"""
Tests retries when the email doesn't exist
"""
......@@ -155,8 +156,7 @@ class TestEmailErrors(ModuleStoreTestCase):
self.assertEqual(email_id, -1)
self.assertFalse(result.called)
@patch('bulk_email.tasks.log')
def test_nonexist_course(self, mock_log):
def test_nonexistent_course(self):
"""
Tests exception when the course in the email doesn't exist
"""
......@@ -165,14 +165,10 @@ class TestEmailErrors(ModuleStoreTestCase):
email.save()
entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=E1101
with self.assertRaises(Exception):
with self.assertRaisesRegexp(ValueError, "Course not found"):
perform_delegate_email_batches(entry.id, course_id, task_input, "action_name") # pylint: disable=E1101
((log_str, _, _), _) = mock_log.exception.call_args
self.assertTrue(mock_log.exception.called)
self.assertIn('get_course_by_id failed:', log_str)
@patch('bulk_email.tasks.log')
def test_nonexist_to_option(self, mock_log):
def test_nonexistent_to_option(self):
"""
Tests exception when the to_option in the email doesn't exist
"""
......@@ -180,9 +176,42 @@ class TestEmailErrors(ModuleStoreTestCase):
email.save()
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=E1101
with self.assertRaises(Exception):
with self.assertRaisesRegexp(Exception, 'Unexpected bulk email TO_OPTION found: IDONTEXIST'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=E1101
def test_wrong_course_id_in_task(self):
"""
Tests exception when the course_id in task is not the same as one explicitly passed in.
"""
email = CourseEmail(course_id=self.course.id, to_option=SEND_TO_ALL)
email.save()
entry = InstructorTask.create("bogus_task_id", "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=E1101
with self.assertRaisesRegexp(ValueError, 'does not match task value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=E1101
def test_wrong_course_id_in_email(self):
"""
Tests exception when the course_id in CourseEmail is not the same as one explicitly passed in.
"""
email = CourseEmail(course_id="bogus_course_id", to_option=SEND_TO_ALL)
email.save()
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=E1101
with self.assertRaisesRegexp(ValueError, 'does not match email value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=E1101
((log_str, opt_str), _) = mock_log.error.call_args
self.assertTrue(mock_log.error.called)
self.assertIn('Unexpected bulk email TO_OPTION found', log_str)
self.assertEqual("IDONTEXIST", opt_str)
def test_send_email_undefined_email(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=E1101
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-value"
subtask_status = create_subtask_status(subtask_id)
bogus_email_id = 1001
with self.assertRaises(CourseEmail.DoesNotExist):
# we skip the call that updates subtask status, since we've not set up the InstructorTask
# for the subtask, and it's not important to the test.
with patch('bulk_email.tasks.update_subtask_status'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status)
"""
Unit tests for bulk-email-related models.
"""
from django.test import TestCase
from django.core.management import call_command
from student.tests.factories import UserFactory
from bulk_email.models import CourseEmail, SEND_TO_STAFF, CourseEmailTemplate
class CourseEmailTest(TestCase):
"""Test the CourseEmail model."""
def test_creation(self):
course_id = 'abc/123/doremi'
sender = UserFactory.create()
to_option = SEND_TO_STAFF
subject = "dummy subject"
html_message = "<html>dummy message</html>"
email = CourseEmail.create(course_id, sender, to_option, subject, html_message)
self.assertEquals(email.course_id, course_id)
self.assertEquals(email.to_option, SEND_TO_STAFF)
self.assertEquals(email.subject, subject)
self.assertEquals(email.html_message, html_message)
self.assertEquals(email.sender, sender)
def test_bad_to_option(self):
course_id = 'abc/123/doremi'
sender = UserFactory.create()
to_option = "fake"
subject = "dummy subject"
html_message = "<html>dummy message</html>"
with self.assertRaises(ValueError):
CourseEmail.create(course_id, sender, to_option, subject, html_message)
class NoCourseEmailTemplateTest(TestCase):
"""Test the CourseEmailTemplate model without loading the template data."""
def test_get_missing_template(self):
with self.assertRaises(CourseEmailTemplate.DoesNotExist):
CourseEmailTemplate.get_template()
class CourseEmailTemplateTest(TestCase):
"""Test the CourseEmailTemplate model."""
def setUp(self):
# load initial content (since we don't run migrations as part of tests):
call_command("loaddata", "course_email_template.json")
def _get_sample_plain_context(self):
"""Provide sample context sufficient for rendering plaintext template"""
context = {
'course_title': "Bogus Course Title",
'course_url': "/location/of/course/url",
'account_settings_url': "/location/of/account/settings/url",
'platform_name': 'edX',
'email': 'your-email@test.com',
}
return context
def _get_sample_html_context(self):
"""Provide sample context sufficient for rendering HTML template"""
context = self._get_sample_plain_context()
context['course_image_url'] = "/location/of/course/image/url"
return context
def test_get_template(self):
template = CourseEmailTemplate.get_template()
self.assertIsNotNone(template.html_template)
self.assertIsNotNone(template.plain_template)
def test_render_html_without_context(self):
template = CourseEmailTemplate.get_template()
base_context = self._get_sample_html_context()
for keyname in base_context:
context = dict(base_context)
del context[keyname]
with self.assertRaises(KeyError):
template.render_htmltext("My new html text.", context)
def test_render_plaintext_without_context(self):
template = CourseEmailTemplate.get_template()
base_context = self._get_sample_plain_context()
for keyname in base_context:
context = dict(base_context)
del context[keyname]
with self.assertRaises(KeyError):
template.render_plaintext("My new plain text.", context)
def test_render_html(self):
template = CourseEmailTemplate.get_template()
context = self._get_sample_html_context()
template.render_htmltext("My new html text.", context)
def test_render_plain(self):
template = CourseEmailTemplate.get_template()
context = self._get_sample_plain_context()
template.render_plaintext("My new plain text.", context)
......@@ -50,8 +50,8 @@ def my_update_subtask_status(entry_id, current_task_id, new_subtask_status):
and run to completion before control is returned to the code that
invoked the retry. If the retries eventually end in failure (e.g. due to
a maximum number of retries being attempted), the "eager" code will return
the error for each retry that is on the stack. We want to just ignore the
later updates that are called as the result of the earlier retries.
the error for each retry as it is popped off the stack. We want to just ignore
the later updates that are called as the result of the earlier retries.
This should not be an issue in production, where status is updated before
a task is retried, and is then updated afterwards if the retry fails.
......@@ -93,7 +93,7 @@ class TestBulkEmailInstructorTask(InstructorTaskCourseTestCase):
to_option = SEND_TO_ALL
course_id = course_id or self.course.id
course_email = CourseEmail.create(course_id, self.instructor, to_option, "Test Subject", "<p>This is a test message</p>")
task_input = {'email_id': course_email.id}
task_input = {'email_id': course_email.id} # pylint: disable=E1101
task_id = str(uuid4())
instructor_task = InstructorTaskFactory.create(
course_id=course_id,
......@@ -106,13 +106,13 @@ class TestBulkEmailInstructorTask(InstructorTaskCourseTestCase):
def _run_task_with_mock_celery(self, task_class, entry_id, task_id):
"""Submit a task and mock how celery provides a current_task."""
self.current_task = Mock()
self.current_task.max_retries = settings.BULK_EMAIL_MAX_RETRIES
self.current_task.default_retry_delay = settings.BULK_EMAIL_DEFAULT_RETRY_DELAY
mock_current_task = Mock()
mock_current_task.max_retries = settings.BULK_EMAIL_MAX_RETRIES
mock_current_task.default_retry_delay = settings.BULK_EMAIL_DEFAULT_RETRY_DELAY
task_args = [entry_id, {}]
with patch('bulk_email.tasks._get_current_task') as mock_get_task:
mock_get_task.return_value = self.current_task
mock_get_task.return_value = mock_current_task
return task_class.apply(task_args, task_id=task_id).get()
def test_email_missing_current_task(self):
......@@ -126,12 +126,21 @@ class TestBulkEmailInstructorTask(InstructorTaskCourseTestCase):
with self.assertRaises(ValueError):
self._run_task_with_mock_celery(send_bulk_course_email, task_entry.id, task_entry.task_id)
def test_bad_task_id_on_update(self):
task_entry = self._create_input_entry()
def dummy_update_subtask_status(entry_id, _current_task_id, new_subtask_status):
"""Passes a bad value for task_id to test update_subtask_status"""
bogus_task_id = "this-is-bogus"
update_subtask_status(entry_id, bogus_task_id, new_subtask_status)
with self.assertRaises(ValueError):
with patch('bulk_email.tasks.update_subtask_status', dummy_update_subtask_status):
send_bulk_course_email(task_entry.id, {}) # pylint: disable=E1101
def _create_students(self, num_students):
"""Create students, a problem, and StudentModule objects for testing"""
students = [
self.create_student('robot%d' % i) for i in xrange(num_students)
]
return students
"""Create students for testing"""
return [self.create_student('robot%d' % i) for i in xrange(num_students)]
def _assert_single_subtask_status(self, entry, succeeded, failed=0, skipped=0, retried_nomax=0, retried_withmax=0):
"""Compare counts with 'subtasks' entry in InstructorTask table."""
......@@ -139,23 +148,22 @@ class TestBulkEmailInstructorTask(InstructorTaskCourseTestCase):
# verify subtask-level counts:
self.assertEquals(subtask_info.get('total'), 1)
self.assertEquals(subtask_info.get('succeeded'), 1 if succeeded > 0 else 0)
self.assertEquals(subtask_info['failed'], 0 if succeeded > 0 else 1)
# self.assertEquals(subtask_info['retried'], retried_nomax + retried_withmax)
self.assertEquals(subtask_info.get('failed'), 0 if succeeded > 0 else 1)
# verify individual subtask status:
subtask_status_info = subtask_info['status']
subtask_status_info = subtask_info.get('status')
task_id_list = subtask_status_info.keys()
self.assertEquals(len(task_id_list), 1)
task_id = task_id_list[0]
subtask_status = subtask_status_info.get(task_id)
print("Testing subtask status: {}".format(subtask_status))
self.assertEquals(subtask_status['task_id'], task_id)
self.assertEquals(subtask_status['attempted'], succeeded + failed)
self.assertEquals(subtask_status['succeeded'], succeeded)
self.assertEquals(subtask_status['skipped'], skipped)
self.assertEquals(subtask_status['failed'], failed)
self.assertEquals(subtask_status['retried_nomax'], retried_nomax)
self.assertEquals(subtask_status['retried_withmax'], retried_withmax)
self.assertEquals(subtask_status['state'], SUCCESS if succeeded > 0 else FAILURE)
self.assertEquals(subtask_status.get('task_id'), task_id)
self.assertEquals(subtask_status.get('attempted'), succeeded + failed)
self.assertEquals(subtask_status.get('succeeded'), succeeded)
self.assertEquals(subtask_status.get('skipped'), skipped)
self.assertEquals(subtask_status.get('failed'), failed)
self.assertEquals(subtask_status.get('retried_nomax'), retried_nomax)
self.assertEquals(subtask_status.get('retried_withmax'), retried_withmax)
self.assertEquals(subtask_status.get('state'), SUCCESS if succeeded > 0 else FAILURE)
def _test_run_with_task(self, task_class, action_name, total, succeeded, failed=0, skipped=0, retried_nomax=0, retried_withmax=0):
"""Run a task and check the number of emails processed."""
......@@ -171,8 +179,8 @@ class TestBulkEmailInstructorTask(InstructorTaskCourseTestCase):
status = json.loads(entry.task_output)
self.assertEquals(status.get('attempted'), succeeded + failed)
self.assertEquals(status.get('succeeded'), succeeded)
self.assertEquals(status['skipped'], skipped)
self.assertEquals(status['failed'], failed)
self.assertEquals(status.get('skipped'), skipped)
self.assertEquals(status.get('failed'), failed)
self.assertEquals(status.get('total'), total)
self.assertEquals(status.get('action_name'), action_name)
self.assertGreater(status.get('duration_ms'), 0)
......
......@@ -36,11 +36,31 @@ def get_request_for_thread():
del frame
def get_course(course_id, depth=0):
"""
Given a course id, return the corresponding course descriptor.
If course_id is not valid, raises a ValueError. This is appropriate
for internal use.
depth: The number of levels of children for the modulestore to cache.
None means infinite depth. Default is to fetch no children.
"""
try:
course_loc = CourseDescriptor.id_to_location(course_id)
return modulestore().get_instance(course_id, course_loc, depth=depth)
except (KeyError, ItemNotFoundError):
raise ValueError("Course not found: {}".format(course_id))
except InvalidLocationError:
raise ValueError("Invalid location: {}".format(course_id))
def get_course_by_id(course_id, depth=0):
"""
Given a course id, return the corresponding course descriptor.
If course_id is not valid, raises a 404.
depth: The number of levels of children for the modulestore to cache. None means infinite depth
"""
try:
......@@ -51,6 +71,7 @@ def get_course_by_id(course_id, depth=0):
except InvalidLocationError:
raise Http404("Invalid location")
def get_course_with_access(user, course_id, action, depth=0):
"""
Given a course_id, look up the corresponding course descriptor,
......@@ -182,7 +203,6 @@ def get_course_about_section(course, section_key):
raise KeyError("Invalid about key " + str(section_key))
def get_course_info_section(request, course, section_key):
"""
This returns the snippet of html to be rendered on the course info page,
......@@ -194,8 +214,6 @@ def get_course_info_section(request, course, section_key):
- updates
- guest_updates
"""
loc = Location(course.location.tag, course.location.org, course.location.course, 'course_info', section_key)
# Use an empty cache
......
......@@ -2,15 +2,18 @@
from django.test import TestCase
from django.http import Http404
from django.test.utils import override_settings
from courseware.courses import get_course_by_id, get_cms_course_link_by_id
from courseware.courses import get_course_by_id, get_course, get_cms_course_link_by_id
CMS_BASE_TEST = 'testcms'
class CoursesTest(TestCase):
"""Test methods related to fetching courses."""
def test_get_course_by_id_invalid_chars(self):
"""
Test that `get_course_by_id` throws a 404, rather than
an exception, when faced with unexpected characters
an exception, when faced with unexpected characters
(such as unicode characters, and symbols such as = and ' ')
"""
with self.assertRaises(Http404):
......@@ -18,6 +21,17 @@ class CoursesTest(TestCase):
get_course_by_id('MITx/foobar/business and management')
get_course_by_id('MITx/foobar/NiñøJoséMaríáßç')
def test_get_course_invalid_chars(self):
"""
Test that `get_course` throws a ValueError, rather than
a 404, when faced with unexpected characters
(such as unicode characters, and symbols such as = and ' ')
"""
with self.assertRaises(ValueError):
get_course('MITx/foobar/statistics=introduction')
get_course('MITx/foobar/business and management')
get_course('MITx/foobar/NiñøJoséMaríáßç')
@override_settings(CMS_BASE=CMS_BASE_TEST)
def test_get_cms_course_link_by_id(self):
"""
......
......@@ -90,10 +90,16 @@ def _update_instructor_task(instructor_task, task_result):
is usually not saved. In general, tasks that have finished (either with
success or failure) should have their entries updated by the task itself,
so are not updated here. Tasks that are still running are not updated
while they run. So the one exception to the no-save rule are tasks that
and saved while they run. The one exception to the no-save rule are tasks that
are in a "revoked" state. This may mean that the task never had the
opportunity to update the InstructorTask entry.
Tasks that are in progress and have subtasks doing the processing do not look
to the task's AsyncResult object. When subtasks are running, the
InstructorTask object itself is updated with the subtasks' progress,
not any AsyncResult object. In this case, the InstructorTask is
not updated at all.
Calculates json to store in "task_output" field of the `instructor_task`,
as well as updating the task_state.
......@@ -110,10 +116,12 @@ def _update_instructor_task(instructor_task, task_result):
returned_result = task_result.result
result_traceback = task_result.traceback
# Assume we don't always update the InstructorTask entry if we don't have to:
# Assume we don't always save the InstructorTask entry if we don't have to,
# but that in most cases we will update the InstructorTask in-place with its
# current progress.
entry_needs_updating = True
entry_needs_saving = False
task_output = None
entry_needs_updating = True
if instructor_task.task_state == PROGRESS and len(instructor_task.subtasks) > 0:
# This happens when running subtasks: the result object is marked with SUCCESS,
......
......@@ -5,7 +5,7 @@ from time import time
import json
from celery.utils.log import get_task_logger
from celery.states import SUCCESS, RETRY, READY_STATES
from celery.states import SUCCESS, READY_STATES
from django.db import transaction
......@@ -87,14 +87,7 @@ def increment_subtask_status(subtask_result, succeeded=0, failed=0, skipped=0, r
return new_result
# def _get_retry_count(subtask_result):
# """Return the number of retries counted for the given subtask."""
# retry_count = subtask_result.get('retried_nomax', 0)
# retry_count += subtask_result.get('retried_withmax', 0)
# return retry_count
def update_instructor_task_for_subtasks(entry, action_name, total_num, subtask_id_list):
def initialize_subtask_info(entry, action_name, total_num, subtask_id_list):
"""
Store initial subtask information to InstructorTask object.
......
......@@ -112,9 +112,9 @@ class BaseInstructorTask(Task):
except InstructorTask.DoesNotExist:
# if the InstructorTask object does not exist, then there's no point
# trying to update it.
pass
TASK_LOG.error("Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
else:
TASK_LOG.warning("background task (%s) failed: %s %s", task_id, einfo.exception, einfo.traceback)
TASK_LOG.warning("Task (%s) failed: %s %s", task_id, einfo.exception, einfo.traceback)
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
entry.task_state = FAILURE
entry.save_now()
......@@ -131,7 +131,15 @@ class UpdateProblemModuleStateError(Exception):
def _get_current_task():
"""Stub to make it easier to test without actually running Celery"""
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
......
......@@ -47,8 +47,24 @@ class InstructorTaskReportTest(InstructorTaskTestCase):
expected_ids.append(self._create_success_entry().task_id)
expected_ids.append(self._create_progress_entry().task_id)
task_ids = [instructor_task.task_id for instructor_task
in get_instructor_task_history(TEST_COURSE_ID, self.problem_url)]
in get_instructor_task_history(TEST_COURSE_ID, problem_url=self.problem_url)]
self.assertEquals(set(task_ids), set(expected_ids))
# make the same call using explicit task_type:
task_ids = [instructor_task.task_id for instructor_task
in get_instructor_task_history(
TEST_COURSE_ID,
problem_url=self.problem_url,
task_type='rescore_problem'
)]
self.assertEquals(set(task_ids), set(expected_ids))
# make the same call using a non-existent task_type:
task_ids = [instructor_task.task_id for instructor_task
in get_instructor_task_history(
TEST_COURSE_ID,
problem_url=self.problem_url,
task_type='dummy_type'
)]
self.assertEquals(set(task_ids), set())
class InstructorTaskModuleSubmitTest(InstructorTaskModuleTestCase):
......
......@@ -7,7 +7,6 @@ paths actually work.
"""
import json
from uuid import uuid4
from unittest import skip
from mock import Mock, MagicMock, patch
......@@ -97,16 +96,17 @@ class TestInstructorTasks(InstructorTaskModuleTestCase):
with self.assertRaises(ItemNotFoundError):
self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)
def _test_run_with_task(self, task_class, action_name, expected_num_succeeded):
def _test_run_with_task(self, task_class, action_name, expected_num_succeeded, expected_num_skipped=0):
"""Run a task and check the number of StudentModules processed."""
task_entry = self._create_input_entry()
status = self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)
# check return value
self.assertEquals(status.get('attempted'), expected_num_succeeded)
self.assertEquals(status.get('attempted'), expected_num_succeeded + expected_num_skipped)
self.assertEquals(status.get('succeeded'), expected_num_succeeded)
self.assertEquals(status.get('total'), expected_num_succeeded)
self.assertEquals(status.get('skipped'), expected_num_skipped)
self.assertEquals(status.get('total'), expected_num_succeeded + expected_num_skipped)
self.assertEquals(status.get('action_name'), action_name)
self.assertGreater('duration_ms', 0)
self.assertGreater(status.get('duration_ms'), 0)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(json.loads(entry.task_output), status)
......@@ -220,7 +220,6 @@ class TestRescoreInstructorTask(TestInstructorTasks):
def test_rescore_with_short_error_msg(self):
self._test_run_with_short_error_msg(rescore_problem)
@skip
def test_rescoring_unrescorable(self):
input_state = json.dumps({'done': True})
num_students = 1
......@@ -228,9 +227,7 @@ class TestRescoreInstructorTask(TestInstructorTasks):
task_entry = self._create_input_entry()
mock_instance = MagicMock()
del mock_instance.rescore_problem
# TODO: figure out why this patch isn't working, when it seems to work fine for
# the test_rescoring_success test below. Weird.
with patch('courseware.module_render.get_module_for_descriptor_internal') as mock_get_module:
with patch('instructor_task.tasks_helper.get_module_for_descriptor_internal') as mock_get_module:
mock_get_module.return_value = mock_instance
with self.assertRaises(UpdateProblemModuleStateError):
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
......@@ -247,8 +244,8 @@ class TestRescoreInstructorTask(TestInstructorTasks):
self._create_students_with_state(num_students, input_state)
task_entry = self._create_input_entry()
mock_instance = Mock()
mock_instance.rescore_problem = Mock({'success': 'correct'})
with patch('courseware.module_render.get_module_for_descriptor_internal') as mock_get_module:
mock_instance.rescore_problem = Mock(return_value={'success': 'correct'})
with patch('instructor_task.tasks_helper.get_module_for_descriptor_internal') as mock_get_module:
mock_get_module.return_value = mock_instance
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
# check return value
......@@ -258,7 +255,47 @@ class TestRescoreInstructorTask(TestInstructorTasks):
self.assertEquals(output.get('succeeded'), num_students)
self.assertEquals(output.get('total'), num_students)
self.assertEquals(output.get('action_name'), 'rescored')
self.assertGreater('duration_ms', 0)
self.assertGreater(output.get('duration_ms'), 0)
def test_rescoring_bad_result(self):
# Confirm that rescoring does not succeed if "success" key is not an expected value.
input_state = json.dumps({'done': True})
num_students = 10
self._create_students_with_state(num_students, input_state)
task_entry = self._create_input_entry()
mock_instance = Mock()
mock_instance.rescore_problem = Mock(return_value={'success': 'bogus'})
with patch('instructor_task.tasks_helper.get_module_for_descriptor_internal') as mock_get_module:
mock_get_module.return_value = mock_instance
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
# check return value
entry = InstructorTask.objects.get(id=task_entry.id)
output = json.loads(entry.task_output)
self.assertEquals(output.get('attempted'), num_students)
self.assertEquals(output.get('succeeded'), 0)
self.assertEquals(output.get('total'), num_students)
self.assertEquals(output.get('action_name'), 'rescored')
self.assertGreater(output.get('duration_ms'), 0)
def test_rescoring_missing_result(self):
# Confirm that rescoring does not succeed if "success" key is not returned.
input_state = json.dumps({'done': True})
num_students = 10
self._create_students_with_state(num_students, input_state)
task_entry = self._create_input_entry()
mock_instance = Mock()
mock_instance.rescore_problem = Mock(return_value={'bogus': 'value'})
with patch('instructor_task.tasks_helper.get_module_for_descriptor_internal') as mock_get_module:
mock_get_module.return_value = mock_instance
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
# check return value
entry = InstructorTask.objects.get(id=task_entry.id)
output = json.loads(entry.task_output)
self.assertEquals(output.get('attempted'), num_students)
self.assertEquals(output.get('succeeded'), 0)
self.assertEquals(output.get('total'), num_students)
self.assertEquals(output.get('action_name'), 'rescored')
self.assertGreater(output.get('duration_ms'), 0)
class TestResetAttemptsInstructorTask(TestInstructorTasks):
......@@ -297,6 +334,18 @@ class TestResetAttemptsInstructorTask(TestInstructorTasks):
# check that entries were reset
self._assert_num_attempts(students, 0)
def test_reset_with_zero_attempts(self):
initial_attempts = 0
input_state = json.dumps({'attempts': initial_attempts})
num_students = 10
students = self._create_students_with_state(num_students, input_state)
# check that entries were set correctly
self._assert_num_attempts(students, initial_attempts)
# run the task
self._test_run_with_task(reset_problem_attempts, 'reset', 0, expected_num_skipped=num_students)
# check that entries were reset
self._assert_num_attempts(students, 0)
def _test_reset_with_student(self, use_email):
"""Run a reset task for one student, with several StudentModules for the problem defined."""
num_students = 10
......@@ -323,7 +372,8 @@ class TestResetAttemptsInstructorTask(TestInstructorTasks):
self.assertEquals(status.get('succeeded'), 1)
self.assertEquals(status.get('total'), 1)
self.assertEquals(status.get('action_name'), 'reset')
self.assertGreater('duration_ms', 0)
self.assertGreater(status.get('duration_ms'), 0)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(json.loads(entry.task_output), status)
......
......@@ -154,6 +154,7 @@ def get_task_completion_info(instructor_task):
if instructor_task.task_state == PROGRESS:
# special message for providing progress updates:
# Translators: {action} is a past-tense verb that is localized separately. {attempted} and {succeeded} are counts.
msg_format = _("Progress: {action} {succeeded} of {attempted} so far")
elif student is not None and problem_url is not None:
# this reports on actions on problems for a particular student:
......
......@@ -140,12 +140,13 @@ PAID_COURSE_REGISTRATION_CURRENCY = ENV_TOKENS.get('PAID_COURSE_REGISTRATION_CUR
# Bulk Email overrides
DEFAULT_BULK_FROM_EMAIL = ENV_TOKENS.get('DEFAULT_BULK_FROM_EMAIL', DEFAULT_BULK_FROM_EMAIL)
EMAILS_PER_TASK = ENV_TOKENS.get('EMAILS_PER_TASK', 100)
EMAILS_PER_QUERY = ENV_TOKENS.get('EMAILS_PER_QUERY', 1000)
EMAILS_PER_TASK = ENV_TOKENS.get('EMAILS_PER_TASK', EMAILS_PER_TASK)
EMAILS_PER_QUERY = ENV_TOKENS.get('EMAILS_PER_QUERY', EMAILS_PER_QUERY)
BULK_EMAIL_DEFAULT_RETRY_DELAY = ENV_TOKENS.get('BULK_EMAIL_DEFAULT_RETRY_DELAY', BULK_EMAIL_DEFAULT_RETRY_DELAY)
BULK_EMAIL_MAX_RETRIES = ENV_TOKENS.get('BULK_EMAIL_MAX_RETRIES', BULK_EMAIL_MAX_RETRIES)
BULK_EMAIL_INFINITE_RETRY_CAP = ENV_TOKENS.get('BULK_EMAIL_INFINITE_RETRY_CAP', BULK_EMAIL_INFINITE_RETRY_CAP)
BULK_EMAIL_LOG_SENT_EMAILS = ENV_TOKENS.get('BULK_EMAIL_LOG_SENT_EMAILS', BULK_EMAIL_LOG_SENT_EMAILS)
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = ENV_TOKENS.get('BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS', BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)
# We want Bulk Email running on the high-priority queue, so we define the
# routing key that points to it. At the moment, the name is the same.
BULK_EMAIL_ROUTING_KEY = HIGH_PRIORITY_QUEUE
......
......@@ -822,6 +822,12 @@ BULK_EMAIL_ROUTING_KEY = HIGH_PRIORITY_QUEUE
# a bulk email message.
BULK_EMAIL_LOG_SENT_EMAILS = False
# Delay in seconds to sleep between individual mail messages being sent,
# when a bulk email task is retried for rate-related reasons. Choose this
# value depending on the number of workers that might be sending email in
# parallel, and what the SES rate is.
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = 0.02
################################### APPS ######################################
INSTALLED_APPS = (
# Standard ones that are always installed...
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment