Commit ea0027f3 by J. Cliff Dyer Committed by Sanford Student

refactor CAPA to use scorable xblock mixin

for TNL-6594
parent bf8aef33
......@@ -304,26 +304,25 @@ class LoncapaProblem(object):
maxscore += responder.get_max_score()
return maxscore
def get_score(self):
def calculate_score(self, correct_map=None):
"""
Compute score for this problem. The score is the number of points awarded.
Returns a dictionary {'score': integer, from 0 to get_max_score(),
'total': get_max_score()}.
Takes an optional correctness map for use in the rescore workflow.
"""
if correct_map is None:
correct_map = self.correct_map
correct = 0
for key in self.correct_map:
for key in correct_map:
try:
correct += self.correct_map.get_npoints(key)
correct += correct_map.get_npoints(key)
except Exception:
log.error('key=%s, correct_map = %s', key, self.correct_map)
log.error('key=%s, correct_map = %s', key, correct_map)
raise
if (not self.student_answers) or len(self.student_answers) == 0:
return {'score': 0,
'total': self.get_max_score()}
else:
return {'score': correct,
'total': self.get_max_score()}
return {'score': correct, 'total': self.get_max_score()}
def update_score(self, score_msg, queuekey):
"""
......@@ -397,7 +396,9 @@ class LoncapaProblem(object):
# if answers include File objects, convert them to filenames.
self.student_answers = convert_files_to_filenames(answers)
return self._grade_answers(answers)
new_cmap = self.get_grade_from_current_answers(answers)
self.correct_map = new_cmap
return self.correct_map
def supports_rescoring(self):
"""
......@@ -418,16 +419,10 @@ class LoncapaProblem(object):
"""
return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values())
def rescore_existing_answers(self):
"""
Rescore student responses. Called by capa_module.rescore_problem.
"""
return self._grade_answers(None)
def _grade_answers(self, student_answers):
def get_grade_from_current_answers(self, student_answers):
"""
Internal grading call used for checking new 'student_answers' and also
rescoring existing student_answers.
Gets the grade for the currently-saved problem state, but does not save it
to the block.
For new student_answers being graded, `student_answers` is a dict of all the
entries from request.POST, but with the first part of each key removed
......@@ -462,7 +457,6 @@ class LoncapaProblem(object):
results = responder.evaluate_answers(self.student_answers, oldcmap)
newcmap.update(results)
self.correct_map = newcmap
return newcmap
def get_question_answers(self):
......
......@@ -324,6 +324,7 @@ class CapaDescriptor(CapaFields, RawDescriptor):
hint_button = module_attr('hint_button')
handle_problem_html_error = module_attr('handle_problem_html_error')
handle_ungraded_response = module_attr('handle_ungraded_response')
has_submitted_answer = module_attr('has_submitted_answer')
is_attempted = module_attr('is_attempted')
is_correct = module_attr('is_correct')
is_past_due = module_attr('is_past_due')
......@@ -332,7 +333,7 @@ class CapaDescriptor(CapaFields, RawDescriptor):
make_dict_of_responses = module_attr('make_dict_of_responses')
new_lcp = module_attr('new_lcp')
publish_grade = module_attr('publish_grade')
rescore_problem = module_attr('rescore_problem')
rescore = module_attr('rescore')
reset_problem = module_attr('reset_problem')
save_problem = module_attr('save_problem')
set_state_from_lcp = module_attr('set_state_from_lcp')
......
......@@ -29,6 +29,7 @@ from xmodule.capa_module import CapaModule, CapaDescriptor, ComplexEncoder
from opaque_keys.edx.locations import Location
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xblock.scorable import Score
from . import get_test_system
from pytz import UTC
......@@ -130,9 +131,9 @@ class CapaFactory(object):
if override_get_score:
if correct:
# TODO: probably better to actually set the internal state properly, but...
module.get_score = lambda: {'score': 1, 'total': 1}
module.score = Score(raw_earned=1, raw_possible=1)
else:
module.get_score = lambda: {'score': 0, 'total': 1}
module.score = Score(raw_earned=0, raw_possible=1)
module.graded = 'False'
return module
......@@ -196,10 +197,10 @@ class CapaModuleTest(unittest.TestCase):
def test_import(self):
module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0)
self.assertEqual(module.get_score().raw_earned, 0)
other_module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0)
self.assertEqual(module.get_score().raw_earned, 0)
self.assertNotEqual(module.url_name, other_module.url_name,
"Factory should be creating unique names for each problem")
......@@ -208,31 +209,33 @@ class CapaModuleTest(unittest.TestCase):
Check that the factory creates correct and incorrect problems properly.
"""
module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0)
self.assertEqual(module.get_score().raw_earned, 0)
other_module = CapaFactory.create(correct=True)
self.assertEqual(other_module.get_score()['score'], 1)
self.assertEqual(other_module.get_score().raw_earned, 1)
def test_get_score(self):
"""
Do 1 test where the internals of get_score are properly set
@jbau Note: this obviously depends on a particular implementation of get_score, but I think this is actually
useful as unit-code coverage for this current implementation. I don't see a layer where LoncapaProblem
is tested directly
Tests the internals of get_score. In keeping with the ScorableXBlock spec,
Capa modules store their score independently of the LCP internals, so it must
be explicitly updated.
"""
student_answers = {'1_2_1': 'abcd'}
correct_map = CorrectMap(answer_id='1_2_1', correctness="correct", npoints=0.9)
module = CapaFactory.create(correct=True, override_get_score=False)
module.lcp.correct_map = correct_map
module.lcp.student_answers = student_answers
self.assertEqual(module.get_score()['score'], 0.9)
self.assertEqual(module.get_score().raw_earned, 0.0)
module.set_score(module.score_from_lcp())
self.assertEqual(module.get_score().raw_earned, 0.9)
other_correct_map = CorrectMap(answer_id='1_2_1', correctness="incorrect", npoints=0.1)
other_module = CapaFactory.create(correct=False, override_get_score=False)
other_module.lcp.correct_map = other_correct_map
other_module.lcp.student_answers = student_answers
self.assertEqual(other_module.get_score()['score'], 0.1)
self.assertEqual(other_module.get_score().raw_earned, 0.0)
other_module.set_score(other_module.score_from_lcp())
self.assertEqual(other_module.get_score().raw_earned, 0.1)
def test_showanswer_default(self):
"""
......@@ -1007,14 +1010,15 @@ class CapaModuleTest(unittest.TestCase):
# Simulate that all answers are marked correct, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'correct')
result = module.rescore_problem(only_if_higher=False)
mock_evaluate_answers.return_value = CorrectMap(
answer_id=CapaFactory.answer_key(),
correctness='correct',
npoints=1,
)
module.rescore(only_if_higher=False)
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
# Expect that we get no HTML
self.assertNotIn('contents', result)
self.assertEqual(module.is_correct(), True)
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 1)
......@@ -1028,10 +1032,10 @@ class CapaModuleTest(unittest.TestCase):
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect')
result = module.rescore_problem(only_if_higher=False)
module.rescore(only_if_higher=False)
# Expect that the problem is marked incorrect
self.assertEqual(result['success'], 'incorrect')
self.assertEqual(module.is_correct(), False)
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 0)
......@@ -1042,7 +1046,7 @@ class CapaModuleTest(unittest.TestCase):
# Try to rescore the problem, and get exception
with self.assertRaises(xmodule.exceptions.NotFoundError):
module.rescore_problem(only_if_higher=False)
module.rescore(only_if_higher=False)
def test_rescore_problem_not_supported(self):
module = CapaFactory.create(done=True)
......@@ -1051,7 +1055,7 @@ class CapaModuleTest(unittest.TestCase):
with patch('capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring:
mock_supports_rescoring.return_value = False
with self.assertRaises(NotImplementedError):
module.rescore_problem(only_if_higher=False)
module.rescore(only_if_higher=False)
def _rescore_problem_error_helper(self, exception_class):
"""Helper to allow testing all errors that rescoring might return."""
......@@ -1059,13 +1063,10 @@ class CapaModuleTest(unittest.TestCase):
module = CapaFactory.create(attempts=1, done=True)
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
with patch('capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore:
mock_rescore.side_effect = exception_class(u'test error \u03a9')
result = module.rescore_problem(only_if_higher=False)
# Expect an AJAX alert message in 'success'
expected_msg = u'Error: test error \u03a9'
self.assertEqual(result['success'], expected_msg)
with self.assertRaises(exception_class):
module.rescore(only_if_higher=False)
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
......
......@@ -18,6 +18,7 @@ from xmodule.capa_module import CapaModule
from opaque_keys.edx.locations import Location
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xblock.scorable import Score
from . import get_test_system
from pytz import UTC
......@@ -111,9 +112,9 @@ class CapaFactoryWithDelay(object):
if correct:
# Could set the internal state formally, but here we just jam in the score.
module.get_score = lambda: {'score': 1, 'total': 1}
module.score = Score(raw_earned=1, raw_possible=1)
else:
module.get_score = lambda: {'score': 0, 'total': 1}
module.score = Score(raw_earned=0, raw_possible=1)
return module
......
......@@ -2,6 +2,7 @@
Grades related signals.
"""
from contextlib import contextmanager
from crum import get_current_user
from logging import getLogger
from django.dispatch import receiver
......@@ -32,6 +33,8 @@ from ..tasks import recalculate_subsection_grade_v3, RECALCULATE_GRADE_DELAY
log = getLogger(__name__)
# define values to be used in grading events
GRADES_RESCORE_EVENT_TYPE = 'edx.grades.problem.rescored'
PROBLEM_SUBMITTED_EVENT_TYPE = 'edx.grades.problem.submitted'
......@@ -209,7 +212,7 @@ def enqueue_subsection_update(sender, **kwargs): # pylint: disable=unused-argum
Handles the PROBLEM_WEIGHTED_SCORE_CHANGED signal by
enqueueing a subsection update operation to occur asynchronously.
"""
_emit_problem_submitted_event(kwargs)
_emit_event(kwargs)
result = recalculate_subsection_grade_v3.apply_async(
kwargs=dict(
user_id=kwargs['user_id'],
......@@ -241,12 +244,14 @@ def recalculate_course_grade(sender, course, course_structure, user, **kwargs):
CourseGradeFactory().update(user, course=course, course_structure=course_structure)
def _emit_problem_submitted_event(kwargs):
def _emit_event(kwargs):
"""
Emits a problem submitted event only if
there is no current event transaction type,
i.e. we have not reached this point in the
code via a rescore or student state deletion.
Emits a problem submitted event only if there is no current event
transaction type, i.e. we have not reached this point in the code via a
rescore or student state deletion.
If the event transaction type has already been set and the transacation is
a rescore, emits a problem rescored event.
"""
root_type = get_event_transaction_type()
......@@ -267,3 +272,24 @@ def _emit_problem_submitted_event(kwargs):
'weighted_possible': kwargs.get('weighted_possible'),
}
)
if root_type == 'edx.grades.problem.rescored':
current_user = get_current_user()
if current_user is not None and hasattr(current_user, 'id'):
instructor_id = unicode(current_user.id)
else:
instructor_id = None
tracker.emit(
unicode(GRADES_RESCORE_EVENT_TYPE),
{
'course_id': unicode(kwargs['course_id']),
'user_id': unicode(kwargs['user_id']),
'problem_id': unicode(kwargs['usage_id']),
'new_weighted_earned': kwargs.get('weighted_earned'),
'new_weighted_possible': kwargs.get('weighted_possible'),
'only_if_higher': kwargs.get('only_if_higher'),
'instructor_id': instructor_id,
'event_transaction_id': unicode(get_event_transaction_id()),
'event_transaction_type': unicode(GRADES_RESCORE_EVENT_TYPE),
}
)
......@@ -160,10 +160,9 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
}
)
@patch('lms.djangoapps.instructor_task.tasks_helper.module_state.tracker')
@patch('lms.djangoapps.grades.signals.handlers.tracker')
@patch('lms.djangoapps.grades.models.tracker')
def test_rescoring_events(self, models_tracker, handlers_tracker, instructor_task_tracker):
def test_rescoring_events(self, models_tracker, handlers_tracker):
# submit answer
self.submit_question_answer('p1', {'2_1': 'choice_choice_3'})
models_tracker.reset_mock()
......@@ -187,11 +186,8 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
)
# check logging to make sure id's are tracked correctly across
# events
event_transaction_id = instructor_task_tracker.emit.mock_calls[0][1][1]['event_transaction_id']
self.assertEqual(
instructor_task_tracker.get_tracker().context.call_args[0],
('edx.grades.problem.rescored', {'course_id': unicode(self.course.id), 'org_id': unicode(self.course.org)})
)
event_transaction_id = handlers_tracker.emit.mock_calls[0][1][1]['event_transaction_id']
# make sure the id is propagated throughout the event flow
for call in models_tracker.emit.mock_calls:
self.assertEqual(event_transaction_id, call[1][1]['event_transaction_id'])
......@@ -206,7 +202,7 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
handlers_tracker.assert_not_called()
instructor_task_tracker.emit.assert_called_with(
handlers_tracker.emit.assert_called_with(
unicode(RESCORE_TYPE),
{
'course_id': unicode(self.course.id),
......
......@@ -100,6 +100,17 @@ def _get_xmodule_instance_args(request, task_id):
return xmodule_instance_args
def _supports_rescore(descriptor):
"""
Helper method to determine whether a given item supports rescoring.
In order to accommodate both XModules and XBlocks, we have to check
the descriptor itself then fall back on its module class.
"""
return hasattr(descriptor, 'rescore') or (
hasattr(descriptor, 'module_class') and hasattr(descriptor.module_class, 'rescore')
)
def _update_instructor_task(instructor_task, task_result):
"""
Updates and possibly saves a InstructorTask entry based on a task Result.
......@@ -246,10 +257,7 @@ def check_arguments_for_rescoring(usage_key):
corresponding module doesn't support rescoring calls.
"""
descriptor = modulestore().get_item(usage_key)
# TODO: Clean this up as part of TNL-6594 when CAPA uses the ScorableXBlockMixin
if (
not hasattr(descriptor, 'module_class') or not hasattr(descriptor.module_class, 'rescore_problem')
) and not hasattr(descriptor, 'rescore'):
if not _supports_rescore(descriptor):
msg = "Specified module does not support rescoring."
raise NotImplementedError(msg)
......@@ -264,8 +272,7 @@ def check_entrance_exam_problems_for_rescoring(exam_key): # pylint: disable=inv
any of the problem in entrance exam doesn't support re-scoring calls.
"""
problems = get_problems_in_section(exam_key).values()
if any(not hasattr(problem, 'module_class') or not hasattr(problem.module_class, 'rescore_problem')
for problem in problems):
if any(not _supports_rescore(problem) for problem in problems):
msg = _("Not all problems in entrance exam support re-scoring.")
raise NotImplementedError(msg)
......
......@@ -10,6 +10,7 @@ from time import time
from eventtracking import tracker
from opaque_keys.edx.keys import UsageKey
from xmodule.modulestore.django import modulestore
from capa.responsetypes import StudentInputError, ResponseError, LoncapaProblemError
from courseware.courses import get_course_by_id, get_problems_in_section
from courseware.models import StudentModule
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
......@@ -174,77 +175,30 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
TASK_LOG.warning(msg)
return UPDATE_STATUS_FAILED
# TODO: (TNL-6594) Remove this switch once rescore_problem support
# once CAPA uses ScorableXBlockMixin.
for method in ['rescore', 'rescore_problem']:
rescore_method = getattr(instance, method, None)
if rescore_method is not None:
break
else: # for-else: Neither method exists on the block.
if not hasattr(instance, 'rescore'):
# This should not happen, since it should be already checked in the
# caller, but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
# TODO: Remove the first part of this if-else with TNL-6594
# We check here to see if the problem has any submissions. If it does not, we don't want to rescore it
if hasattr(instance, "done"):
if not instance.done:
return UPDATE_STATUS_SKIPPED
elif not instance.has_submitted_answer():
return UPDATE_STATUS_SKIPPED
if not instance.has_submitted_answer():
return UPDATE_STATUS_SKIPPED
# Set the tracking info before this call, because it makes downstream
# calls that create events. We retrieve and store the id here because
# the request cache will be erased during downstream calls.
event_transaction_id = create_new_event_transaction_id()
create_new_event_transaction_id()
set_event_transaction_type(GRADES_RESCORE_EVENT_TYPE)
result = rescore_method(only_if_higher=task_input['only_if_higher'])
instance.save()
if result is None or result.get(u'success') in {u'correct', u'incorrect'}:
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s",
dict(
course=course_id,
loc=usage_key,
student=student
)
)
if result is not None: # Only for CAPA. This will get moved to the grade handler.
new_weighted_earned, new_weighted_possible = weighted_score(
result['new_raw_earned'] if result else None,
result['new_raw_possible'] if result else None,
module_descriptor.weight,
)
# TODO: remove this context manager after completion of AN-6134
context = course_context_from_course_id(course_id)
with tracker.get_tracker().context(GRADES_RESCORE_EVENT_TYPE, context):
tracker.emit(
unicode(GRADES_RESCORE_EVENT_TYPE),
{
'course_id': unicode(course_id),
'user_id': unicode(student.id),
'problem_id': unicode(usage_key),
'new_weighted_earned': new_weighted_earned,
'new_weighted_possible': new_weighted_possible,
'only_if_higher': task_input['only_if_higher'],
'instructor_id': unicode(xmodule_instance_args['request_info']['user_id']),
'event_transaction_id': unicode(event_transaction_id),
'event_transaction_type': unicode(GRADES_RESCORE_EVENT_TYPE),
}
)
return UPDATE_STATUS_SUCCEEDED
else:
# specific events from CAPA are not propagated up the stack. Do we want this?
try:
instance.rescore(only_if_higher=task_input['only_if_higher'])
except (LoncapaProblemError, StudentInputError, ResponseError):
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
u"and student %(student)s",
dict(
msg=result.get('success', result),
course=course_id,
loc=usage_key,
student=student
......@@ -252,6 +206,19 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
)
return UPDATE_STATUS_FAILED
instance.save()
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s",
dict(
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_SUCCEEDED
@outer_atomic
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module, _task_input):
......
......@@ -18,6 +18,7 @@ from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from openedx.core.djangoapps.util.testing import TestConditionalContent
from openedx.core.djangolib.testing.utils import get_mock_request
from capa.tests.response_xml_factory import (CodeResponseXMLFactory,
CustomResponseXMLFactory)
from xmodule.modulestore.tests.factories import ItemFactory
......@@ -275,7 +276,7 @@ class TestRescoringTask(TestIntegrationTask):
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
expected_message = "bad things happened"
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
with patch('capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore:
mock_rescore.side_effect = ZeroDivisionError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
self._assert_task_failure(instructor_task.id, 'rescore_problem', problem_url_name, expected_message)
......@@ -293,7 +294,7 @@ class TestRescoringTask(TestIntegrationTask):
# return an input error as if it were a numerical response, with an embedded unicode character:
expected_message = u"Could not interpret '2/3\u03a9' as a number"
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
with patch('capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore:
mock_rescore.side_effect = StudentInputError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
......
......@@ -307,30 +307,21 @@ class TestRescoreInstructorTask(TestInstructorTasks):
action_name='rescored'
)
@ddt.data(
('rescore', None),
('rescore_problem', {'success': 'correct', 'new_raw_earned': 1, 'new_raw_possible': 1})
)
@ddt.unpack
def test_rescoring_success(self, rescore_method, rescore_result):
def test_rescoring_success(self):
"""
Tests rescores a problem in a course, for all students succeeds.
"""
mock_instance = MagicMock()
other_method = ({'rescore', 'rescore_problem'} - {rescore_method}).pop()
getattr(mock_instance, rescore_method).return_value = rescore_result
delattr(mock_instance, other_method)
if rescore_method == 'rescore':
del mock_instance.done
mock_instance.has_submitted_answer.return_value = True
else:
mock_instance.done = True
getattr(mock_instance, 'rescore').return_value = None
mock_instance.has_submitted_answer.return_value = True
del mock_instance.done # old CAPA code used to use this value so we delete it here to be sure
num_students = 10
self._create_students_with_state(num_students)
task_entry = self._create_input_entry()
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal') as mock_get_module:
with patch(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
) as mock_get_module:
mock_get_module.return_value = mock_instance
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
......@@ -344,56 +335,6 @@ class TestRescoreInstructorTask(TestInstructorTasks):
action_name='rescored'
)
def test_rescoring_bad_result(self):
"""
Tests and confirm that rescoring does not succeed if "success" key is not an expected value.
"""
input_state = json.dumps({'done': True})
num_students = 10
self._create_students_with_state(num_students, input_state)
task_entry = self._create_input_entry()
mock_instance = Mock()
mock_instance.rescore_problem = Mock(return_value={'success': 'bogus'})
del mock_instance.rescore
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal') as mock_get_module:
mock_get_module.return_value = mock_instance
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
self.assert_task_output(
output=self.get_task_output(task_entry.id),
total=num_students,
attempted=num_students,
succeeded=0,
skipped=0,
failed=num_students,
action_name='rescored'
)
def test_rescoring_missing_result(self):
"""
Tests and confirm that rescoring does not succeed if "success" key is not returned.
"""
input_state = json.dumps({'done': True})
num_students = 10
self._create_students_with_state(num_students, input_state)
task_entry = self._create_input_entry()
mock_instance = Mock()
mock_instance.rescore_problem = Mock(return_value={'bogus': 'value'})
del mock_instance.rescore
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal') as mock_get_module:
mock_get_module.return_value = mock_instance
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
self.assert_task_output(
output=self.get_task_output(task_entry.id),
total=num_students,
attempted=num_students,
succeeded=0,
skipped=0,
failed=num_students,
action_name='rescored'
)
@attr(shard=3)
class TestResetAttemptsInstructorTask(TestInstructorTasks):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment