Commit ea0027f3 by J. Cliff Dyer Committed by Sanford Student

refactor CAPA to use scorable xblock mixin

for TNL-6594
parent bf8aef33
...@@ -304,26 +304,25 @@ class LoncapaProblem(object): ...@@ -304,26 +304,25 @@ class LoncapaProblem(object):
maxscore += responder.get_max_score() maxscore += responder.get_max_score()
return maxscore return maxscore
def get_score(self): def calculate_score(self, correct_map=None):
""" """
Compute score for this problem. The score is the number of points awarded. Compute score for this problem. The score is the number of points awarded.
Returns a dictionary {'score': integer, from 0 to get_max_score(), Returns a dictionary {'score': integer, from 0 to get_max_score(),
'total': get_max_score()}. 'total': get_max_score()}.
Takes an optional correctness map for use in the rescore workflow.
""" """
if correct_map is None:
correct_map = self.correct_map
correct = 0 correct = 0
for key in self.correct_map: for key in correct_map:
try: try:
correct += self.correct_map.get_npoints(key) correct += correct_map.get_npoints(key)
except Exception: except Exception:
log.error('key=%s, correct_map = %s', key, self.correct_map) log.error('key=%s, correct_map = %s', key, correct_map)
raise raise
if (not self.student_answers) or len(self.student_answers) == 0: return {'score': correct, 'total': self.get_max_score()}
return {'score': 0,
'total': self.get_max_score()}
else:
return {'score': correct,
'total': self.get_max_score()}
def update_score(self, score_msg, queuekey): def update_score(self, score_msg, queuekey):
""" """
...@@ -397,7 +396,9 @@ class LoncapaProblem(object): ...@@ -397,7 +396,9 @@ class LoncapaProblem(object):
# if answers include File objects, convert them to filenames. # if answers include File objects, convert them to filenames.
self.student_answers = convert_files_to_filenames(answers) self.student_answers = convert_files_to_filenames(answers)
return self._grade_answers(answers) new_cmap = self.get_grade_from_current_answers(answers)
self.correct_map = new_cmap
return self.correct_map
def supports_rescoring(self): def supports_rescoring(self):
""" """
...@@ -418,16 +419,10 @@ class LoncapaProblem(object): ...@@ -418,16 +419,10 @@ class LoncapaProblem(object):
""" """
return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values()) return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values())
def rescore_existing_answers(self): def get_grade_from_current_answers(self, student_answers):
"""
Rescore student responses. Called by capa_module.rescore_problem.
"""
return self._grade_answers(None)
def _grade_answers(self, student_answers):
""" """
Internal grading call used for checking new 'student_answers' and also Gets the grade for the currently-saved problem state, but does not save it
rescoring existing student_answers. to the block.
For new student_answers being graded, `student_answers` is a dict of all the For new student_answers being graded, `student_answers` is a dict of all the
entries from request.POST, but with the first part of each key removed entries from request.POST, but with the first part of each key removed
...@@ -462,7 +457,6 @@ class LoncapaProblem(object): ...@@ -462,7 +457,6 @@ class LoncapaProblem(object):
results = responder.evaluate_answers(self.student_answers, oldcmap) results = responder.evaluate_answers(self.student_answers, oldcmap)
newcmap.update(results) newcmap.update(results)
self.correct_map = newcmap
return newcmap return newcmap
def get_question_answers(self): def get_question_answers(self):
......
...@@ -24,6 +24,7 @@ from capa.inputtypes import Status ...@@ -24,6 +24,7 @@ from capa.inputtypes import Status
from capa.responsetypes import StudentInputError, ResponseError, LoncapaProblemError from capa.responsetypes import StudentInputError, ResponseError, LoncapaProblemError
from capa.util import convert_files_to_filenames, get_inner_html_from_xpath from capa.util import convert_files_to_filenames, get_inner_html_from_xpath
from xblock.fields import Boolean, Dict, Float, Integer, Scope, String, XMLString from xblock.fields import Boolean, Dict, Float, Integer, Scope, String, XMLString
from xblock.scorable import ScorableXBlockMixin, Score
from xmodule.capa_base_constants import RANDOMIZATION, SHOWANSWER, SHOW_CORRECTNESS from xmodule.capa_base_constants import RANDOMIZATION, SHOWANSWER, SHOW_CORRECTNESS
from xmodule.exceptions import NotFoundError from xmodule.exceptions import NotFoundError
from .fields import Date, Timedelta from .fields import Date, Timedelta
...@@ -219,7 +220,7 @@ class CapaFields(object): ...@@ -219,7 +220,7 @@ class CapaFields(object):
) )
class CapaMixin(CapaFields): class CapaMixin(ScorableXBlockMixin, CapaFields):
""" """
Core logic for Capa Problem, which can be used by XModules or XBlocks. Core logic for Capa Problem, which can be used by XModules or XBlocks.
""" """
...@@ -290,6 +291,8 @@ class CapaMixin(CapaFields): ...@@ -290,6 +291,8 @@ class CapaMixin(CapaFields):
self.set_state_from_lcp() self.set_state_from_lcp()
self.set_score(self.score_from_lcp())
assert self.seed is not None assert self.seed is not None
def choose_new_seed(self): def choose_new_seed(self):
...@@ -372,32 +375,28 @@ class CapaMixin(CapaFields): ...@@ -372,32 +375,28 @@ class CapaMixin(CapaFields):
""" """
self.last_submission_time = datetime.datetime.now(utc) self.last_submission_time = datetime.datetime.now(utc)
def get_score(self):
"""
Access the problem's score
"""
return self.lcp.get_score()
def get_progress(self): def get_progress(self):
""" """
For now, just return score / max_score For now, just return weighted earned / weighted possible
""" """
score_dict = self.get_score() score = self.get_score()
score = score_dict['score'] raw_earned = score.raw_earned
total = score_dict['total'] raw_possible = score.raw_possible
if total > 0: if raw_possible > 0:
if self.weight is not None: if self.weight is not None:
# Progress objects expect total > 0 # Progress objects expect total > 0
if self.weight == 0: if self.weight == 0:
return None return None
# scale score and total by weight/total: # scale score and total by weight/total:
score = score * self.weight / total weighted_earned = raw_earned * self.weight / raw_possible
total = self.weight weighted_possible = self.weight
else:
weighted_earned = raw_earned
weighted_possible = raw_possible
try: try:
return Progress(score, total) return Progress(weighted_earned, weighted_possible)
except (TypeError, ValueError): except (TypeError, ValueError):
log.exception("Got bad progress") log.exception("Got bad progress")
return None return None
...@@ -572,7 +571,7 @@ class CapaMixin(CapaFields): ...@@ -572,7 +571,7 @@ class CapaMixin(CapaFields):
# Next, generate a fresh LoncapaProblem # Next, generate a fresh LoncapaProblem
self.lcp = self.new_lcp(None) self.lcp = self.new_lcp(None)
self.set_state_from_lcp() self.set_state_from_lcp()
self.set_score(self.score_from_lcp())
# Prepend a scary warning to the student # Prepend a scary warning to the student
_ = self.runtime.service(self, "i18n").ugettext _ = self.runtime.service(self, "i18n").ugettext
warning_msg = _("Warning: The problem has been reset to its initial state!") warning_msg = _("Warning: The problem has been reset to its initial state!")
...@@ -879,8 +878,7 @@ class CapaMixin(CapaFields): ...@@ -879,8 +878,7 @@ class CapaMixin(CapaFields):
""" """
True iff full points True iff full points
""" """
score_dict = self.get_score() return self.score.raw_earned == self.score.raw_possible
return score_dict['score'] == score_dict['total']
def answer_available(self): def answer_available(self):
""" """
...@@ -949,6 +947,7 @@ class CapaMixin(CapaFields): ...@@ -949,6 +947,7 @@ class CapaMixin(CapaFields):
score_msg = data['xqueue_body'] score_msg = data['xqueue_body']
self.lcp.update_score(score_msg, queuekey) self.lcp.update_score(score_msg, queuekey)
self.set_state_from_lcp() self.set_state_from_lcp()
self.set_score(self.score_from_lcp())
self.publish_grade() self.publish_grade()
return dict() # No AJAX return is needed return dict() # No AJAX return is needed
...@@ -1132,18 +1131,17 @@ class CapaMixin(CapaFields): ...@@ -1132,18 +1131,17 @@ class CapaMixin(CapaFields):
""" """
Publishes the student's current grade to the system as an event Publishes the student's current grade to the system as an event
""" """
score = self.lcp.get_score()
self.runtime.publish( self.runtime.publish(
self, self,
'grade', 'grade',
{ {
'value': score['score'], 'value': self.score.raw_earned,
'max_value': score['total'], 'max_value': self.score.raw_possible,
'only_if_higher': only_if_higher, 'only_if_higher': only_if_higher,
} }
) )
return {'grade': score['score'], 'max_grade': score['total']} return {'grade': self.score.raw_earned, 'max_grade': self.score.raw_possible}
# pylint: disable=too-many-statements # pylint: disable=too-many-statements
def submit_problem(self, data, override_time=False): def submit_problem(self, data, override_time=False):
...@@ -1216,6 +1214,7 @@ class CapaMixin(CapaFields): ...@@ -1216,6 +1214,7 @@ class CapaMixin(CapaFields):
self.attempts = self.attempts + 1 self.attempts = self.attempts + 1
self.lcp.done = True self.lcp.done = True
self.set_state_from_lcp() self.set_state_from_lcp()
self.set_score(self.score_from_lcp())
self.set_last_submission_time() self.set_last_submission_time()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst: except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
...@@ -1227,6 +1226,7 @@ class CapaMixin(CapaFields): ...@@ -1227,6 +1226,7 @@ class CapaMixin(CapaFields):
# Save the user's state before failing # Save the user's state before failing
self.set_state_from_lcp() self.set_state_from_lcp()
self.set_score(self.score_from_lcp())
# If the user is a staff member, include # If the user is a staff member, include
# the full exception, including traceback, # the full exception, including traceback,
...@@ -1245,6 +1245,7 @@ class CapaMixin(CapaFields): ...@@ -1245,6 +1245,7 @@ class CapaMixin(CapaFields):
except Exception as err: except Exception as err:
# Save the user's state before failing # Save the user's state before failing
self.set_state_from_lcp() self.set_state_from_lcp()
self.set_score(self.score_from_lcp())
if self.runtime.DEBUG: if self.runtime.DEBUG:
msg = u"Error checking problem: {}".format(err.message) msg = u"Error checking problem: {}".format(err.message)
...@@ -1457,93 +1458,6 @@ class CapaMixin(CapaFields): ...@@ -1457,93 +1458,6 @@ class CapaMixin(CapaFields):
return input_metadata return input_metadata
def rescore_problem(self, only_if_higher):
"""
Checks whether the existing answers to a problem are correct.
This is called when the correct answer to a problem has been changed,
and the grade should be re-evaluated.
If only_if_higher is True, the answer and grade are updated
only if the resulting score is higher than before.
Returns a dict with one key:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
Raises NotFoundError if called on a problem that has not yet been
answered, or NotImplementedError if it's a problem that cannot be rescored.
Returns the error messages for exceptions occurring while performing
the rescoring, rather than throwing them.
"""
event_info = {'state': self.lcp.get_state(), 'problem_id': self.location.to_deprecated_string()}
_ = self.runtime.service(self, "i18n").ugettext
if not self.lcp.supports_rescoring():
event_info['failure'] = 'unsupported'
self.track_function_unmask('problem_rescore_fail', event_info)
# pylint: disable=line-too-long
# Translators: 'rescoring' refers to the act of re-submitting a student's solution so it can get a new score.
raise NotImplementedError(_("Problem's definition does not support rescoring."))
# pylint: enable=line-too-long
if not self.done:
event_info['failure'] = 'unanswered'
self.track_function_unmask('problem_rescore_fail', event_info)
raise NotFoundError(_("Problem must be answered before it can be graded again."))
# get old score, for comparison:
orig_score = self.lcp.get_score()
event_info['orig_score'] = orig_score['score']
event_info['orig_total'] = orig_score['total']
try:
correct_map = self.lcp.rescore_existing_answers()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("Input error in capa_module:problem_rescore", exc_info=True)
event_info['failure'] = 'input_error'
self.track_function_unmask('problem_rescore_fail', event_info)
return {'success': u"Error: {0}".format(inst.message)}
except Exception as err:
event_info['failure'] = 'unexpected'
self.track_function_unmask('problem_rescore_fail', event_info)
if self.runtime.DEBUG:
msg = u"Error checking problem: {0}".format(err.message)
msg += u'\nTraceback:\n' + traceback.format_exc()
return {'success': msg}
raise
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
self.publish_grade(only_if_higher)
new_score = self.lcp.get_score()
event_info['new_score'] = new_score['score']
event_info['new_total'] = new_score['total']
# success = correct if ALL questions in this problem are correct
success = 'correct'
for answer_id in correct_map:
if not correct_map.is_correct(answer_id):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
self.track_function_unmask('problem_rescore', event_info)
return {
'success': success,
'new_raw_earned': new_score['score'],
'new_raw_possible': new_score['total'],
}
def save_problem(self, data): def save_problem(self, data):
""" """
Save the passed in answers. Save the passed in answers.
...@@ -1584,6 +1498,7 @@ class CapaMixin(CapaFields): ...@@ -1584,6 +1498,7 @@ class CapaMixin(CapaFields):
self.lcp.has_saved_answers = True self.lcp.has_saved_answers = True
self.set_state_from_lcp() self.set_state_from_lcp()
self.set_score(self.score_from_lcp())
self.track_function_unmask('save_problem_success', event_info) self.track_function_unmask('save_problem_success', event_info)
msg = _("Your answers have been saved.") msg = _("Your answers have been saved.")
...@@ -1642,6 +1557,7 @@ class CapaMixin(CapaFields): ...@@ -1642,6 +1557,7 @@ class CapaMixin(CapaFields):
# Pull in the new problem seed # Pull in the new problem seed
self.set_state_from_lcp() self.set_state_from_lcp()
self.set_score(self.score_from_lcp())
# Grade may have changed, so publish new value # Grade may have changed, so publish new value
self.publish_grade() self.publish_grade()
...@@ -1653,3 +1569,116 @@ class CapaMixin(CapaFields): ...@@ -1653,3 +1569,116 @@ class CapaMixin(CapaFields):
'success': True, 'success': True,
'html': self.get_problem_html(encapsulate=False), 'html': self.get_problem_html(encapsulate=False),
} }
# ScorableXBlockMixin methods
def rescore(self, only_if_higher=False):
"""
Checks whether the existing answers to a problem are correct.
This is called when the correct answer to a problem has been changed,
and the grade should be re-evaluated.
If only_if_higher is True, the answer and grade are updated
only if the resulting score is higher than before.
Returns a dict with one key:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
Raises NotFoundError if called on a problem that has not yet been
answered, or NotImplementedError if it's a problem that cannot be rescored.
Returns the error messages for exceptions occurring while performing
the rescoring, rather than throwing them.
"""
event_info = {'state': self.lcp.get_state(), 'problem_id': self.location.to_deprecated_string()}
_ = self.runtime.service(self, "i18n").ugettext
if not self.lcp.supports_rescoring():
event_info['failure'] = 'unsupported'
self.track_function_unmask('problem_rescore_fail', event_info)
# pylint: disable=line-too-long
# Translators: 'rescoring' refers to the act of re-submitting a student's solution so it can get a new score.
raise NotImplementedError(_("Problem's definition does not support rescoring."))
# pylint: enable=line-too-long
if not self.done:
event_info['failure'] = 'unanswered'
self.track_function_unmask('problem_rescore_fail', event_info)
raise NotFoundError(_("Problem must be answered before it can be graded again."))
# get old score, for comparison:
orig_score = self.get_score()
event_info['orig_score'] = orig_score.raw_earned
event_info['orig_total'] = orig_score.raw_possible
try:
calculated_score = self.calculate_score()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("Input error in capa_module:problem_rescore", exc_info=True)
event_info['failure'] = 'input_error'
self.track_function_unmask('problem_rescore_fail', event_info)
raise
except Exception:
event_info['failure'] = 'unexpected'
self.track_function_unmask('problem_rescore_fail', event_info)
raise
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
self.set_score(calculated_score)
self.publish_grade(only_if_higher)
event_info['new_score'] = calculated_score.raw_earned
event_info['new_total'] = calculated_score.raw_possible
# success = correct if ALL questions in this problem are correct
success = 'correct'
for answer_id in self.lcp.correct_map:
if not self.lcp.correct_map.is_correct(answer_id):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info['correct_map'] = self.lcp.correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
self.track_function_unmask('problem_rescore', event_info)
def has_submitted_answer(self):
return self.done
def set_score(self, score):
"""
Sets the internal score for the problem. This is not derived directly
from the internal LCP in keeping with the ScorableXBlock spec.
"""
self.score = score
def get_score(self):
"""
Returns the score currently set on the block.
"""
return self.score
def calculate_score(self):
"""
Returns the score calculated from the current problem state.
Operates by creating a new correctness map based on the current
state of the LCP, and having the LCP generate a score from that.
"""
new_correctness = self.lcp.get_grade_from_current_answers(None)
new_score = self.lcp.calculate_score(new_correctness)
return Score(raw_earned=new_score['score'], raw_possible=new_score['total'])
def score_from_lcp(self):
"""
Returns the score associated with the correctness map
currently stored by the LCP.
"""
lcp_score = self.lcp.calculate_score()
return Score(raw_earned=lcp_score['score'], raw_possible=lcp_score['total'])
...@@ -324,6 +324,7 @@ class CapaDescriptor(CapaFields, RawDescriptor): ...@@ -324,6 +324,7 @@ class CapaDescriptor(CapaFields, RawDescriptor):
hint_button = module_attr('hint_button') hint_button = module_attr('hint_button')
handle_problem_html_error = module_attr('handle_problem_html_error') handle_problem_html_error = module_attr('handle_problem_html_error')
handle_ungraded_response = module_attr('handle_ungraded_response') handle_ungraded_response = module_attr('handle_ungraded_response')
has_submitted_answer = module_attr('has_submitted_answer')
is_attempted = module_attr('is_attempted') is_attempted = module_attr('is_attempted')
is_correct = module_attr('is_correct') is_correct = module_attr('is_correct')
is_past_due = module_attr('is_past_due') is_past_due = module_attr('is_past_due')
...@@ -332,7 +333,7 @@ class CapaDescriptor(CapaFields, RawDescriptor): ...@@ -332,7 +333,7 @@ class CapaDescriptor(CapaFields, RawDescriptor):
make_dict_of_responses = module_attr('make_dict_of_responses') make_dict_of_responses = module_attr('make_dict_of_responses')
new_lcp = module_attr('new_lcp') new_lcp = module_attr('new_lcp')
publish_grade = module_attr('publish_grade') publish_grade = module_attr('publish_grade')
rescore_problem = module_attr('rescore_problem') rescore = module_attr('rescore')
reset_problem = module_attr('reset_problem') reset_problem = module_attr('reset_problem')
save_problem = module_attr('save_problem') save_problem = module_attr('save_problem')
set_state_from_lcp = module_attr('set_state_from_lcp') set_state_from_lcp = module_attr('set_state_from_lcp')
......
...@@ -29,6 +29,7 @@ from xmodule.capa_module import CapaModule, CapaDescriptor, ComplexEncoder ...@@ -29,6 +29,7 @@ from xmodule.capa_module import CapaModule, CapaDescriptor, ComplexEncoder
from opaque_keys.edx.locations import Location from opaque_keys.edx.locations import Location
from xblock.field_data import DictFieldData from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds from xblock.fields import ScopeIds
from xblock.scorable import Score
from . import get_test_system from . import get_test_system
from pytz import UTC from pytz import UTC
...@@ -130,9 +131,9 @@ class CapaFactory(object): ...@@ -130,9 +131,9 @@ class CapaFactory(object):
if override_get_score: if override_get_score:
if correct: if correct:
# TODO: probably better to actually set the internal state properly, but... # TODO: probably better to actually set the internal state properly, but...
module.get_score = lambda: {'score': 1, 'total': 1} module.score = Score(raw_earned=1, raw_possible=1)
else: else:
module.get_score = lambda: {'score': 0, 'total': 1} module.score = Score(raw_earned=0, raw_possible=1)
module.graded = 'False' module.graded = 'False'
return module return module
...@@ -196,10 +197,10 @@ class CapaModuleTest(unittest.TestCase): ...@@ -196,10 +197,10 @@ class CapaModuleTest(unittest.TestCase):
def test_import(self): def test_import(self):
module = CapaFactory.create() module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0) self.assertEqual(module.get_score().raw_earned, 0)
other_module = CapaFactory.create() other_module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0) self.assertEqual(module.get_score().raw_earned, 0)
self.assertNotEqual(module.url_name, other_module.url_name, self.assertNotEqual(module.url_name, other_module.url_name,
"Factory should be creating unique names for each problem") "Factory should be creating unique names for each problem")
...@@ -208,31 +209,33 @@ class CapaModuleTest(unittest.TestCase): ...@@ -208,31 +209,33 @@ class CapaModuleTest(unittest.TestCase):
Check that the factory creates correct and incorrect problems properly. Check that the factory creates correct and incorrect problems properly.
""" """
module = CapaFactory.create() module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0) self.assertEqual(module.get_score().raw_earned, 0)
other_module = CapaFactory.create(correct=True) other_module = CapaFactory.create(correct=True)
self.assertEqual(other_module.get_score()['score'], 1) self.assertEqual(other_module.get_score().raw_earned, 1)
def test_get_score(self): def test_get_score(self):
""" """
Do 1 test where the internals of get_score are properly set Tests the internals of get_score. In keeping with the ScorableXBlock spec,
Capa modules store their score independently of the LCP internals, so it must
@jbau Note: this obviously depends on a particular implementation of get_score, but I think this is actually be explicitly updated.
useful as unit-code coverage for this current implementation. I don't see a layer where LoncapaProblem
is tested directly
""" """
student_answers = {'1_2_1': 'abcd'} student_answers = {'1_2_1': 'abcd'}
correct_map = CorrectMap(answer_id='1_2_1', correctness="correct", npoints=0.9) correct_map = CorrectMap(answer_id='1_2_1', correctness="correct", npoints=0.9)
module = CapaFactory.create(correct=True, override_get_score=False) module = CapaFactory.create(correct=True, override_get_score=False)
module.lcp.correct_map = correct_map module.lcp.correct_map = correct_map
module.lcp.student_answers = student_answers module.lcp.student_answers = student_answers
self.assertEqual(module.get_score()['score'], 0.9) self.assertEqual(module.get_score().raw_earned, 0.0)
module.set_score(module.score_from_lcp())
self.assertEqual(module.get_score().raw_earned, 0.9)
other_correct_map = CorrectMap(answer_id='1_2_1', correctness="incorrect", npoints=0.1) other_correct_map = CorrectMap(answer_id='1_2_1', correctness="incorrect", npoints=0.1)
other_module = CapaFactory.create(correct=False, override_get_score=False) other_module = CapaFactory.create(correct=False, override_get_score=False)
other_module.lcp.correct_map = other_correct_map other_module.lcp.correct_map = other_correct_map
other_module.lcp.student_answers = student_answers other_module.lcp.student_answers = student_answers
self.assertEqual(other_module.get_score()['score'], 0.1) self.assertEqual(other_module.get_score().raw_earned, 0.0)
other_module.set_score(other_module.score_from_lcp())
self.assertEqual(other_module.get_score().raw_earned, 0.1)
def test_showanswer_default(self): def test_showanswer_default(self):
""" """
...@@ -1007,14 +1010,15 @@ class CapaModuleTest(unittest.TestCase): ...@@ -1007,14 +1010,15 @@ class CapaModuleTest(unittest.TestCase):
# Simulate that all answers are marked correct, no matter # Simulate that all answers are marked correct, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers() # what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers: with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'correct') mock_evaluate_answers.return_value = CorrectMap(
result = module.rescore_problem(only_if_higher=False) answer_id=CapaFactory.answer_key(),
correctness='correct',
npoints=1,
)
module.rescore(only_if_higher=False)
# Expect that the problem is marked correct # Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct') self.assertEqual(module.is_correct(), True)
# Expect that we get no HTML
self.assertNotIn('contents', result)
# Expect that the number of attempts is not incremented # Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 1) self.assertEqual(module.attempts, 1)
...@@ -1028,10 +1032,10 @@ class CapaModuleTest(unittest.TestCase): ...@@ -1028,10 +1032,10 @@ class CapaModuleTest(unittest.TestCase):
# what the input is, by patching LoncapaResponse.evaluate_answers() # what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers: with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect') mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect')
result = module.rescore_problem(only_if_higher=False) module.rescore(only_if_higher=False)
# Expect that the problem is marked incorrect # Expect that the problem is marked incorrect
self.assertEqual(result['success'], 'incorrect') self.assertEqual(module.is_correct(), False)
# Expect that the number of attempts is not incremented # Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 0) self.assertEqual(module.attempts, 0)
...@@ -1042,7 +1046,7 @@ class CapaModuleTest(unittest.TestCase): ...@@ -1042,7 +1046,7 @@ class CapaModuleTest(unittest.TestCase):
# Try to rescore the problem, and get exception # Try to rescore the problem, and get exception
with self.assertRaises(xmodule.exceptions.NotFoundError): with self.assertRaises(xmodule.exceptions.NotFoundError):
module.rescore_problem(only_if_higher=False) module.rescore(only_if_higher=False)
def test_rescore_problem_not_supported(self): def test_rescore_problem_not_supported(self):
module = CapaFactory.create(done=True) module = CapaFactory.create(done=True)
...@@ -1051,7 +1055,7 @@ class CapaModuleTest(unittest.TestCase): ...@@ -1051,7 +1055,7 @@ class CapaModuleTest(unittest.TestCase):
with patch('capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring: with patch('capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring:
mock_supports_rescoring.return_value = False mock_supports_rescoring.return_value = False
with self.assertRaises(NotImplementedError): with self.assertRaises(NotImplementedError):
module.rescore_problem(only_if_higher=False) module.rescore(only_if_higher=False)
def _rescore_problem_error_helper(self, exception_class): def _rescore_problem_error_helper(self, exception_class):
"""Helper to allow testing all errors that rescoring might return.""" """Helper to allow testing all errors that rescoring might return."""
...@@ -1059,13 +1063,10 @@ class CapaModuleTest(unittest.TestCase): ...@@ -1059,13 +1063,10 @@ class CapaModuleTest(unittest.TestCase):
module = CapaFactory.create(attempts=1, done=True) module = CapaFactory.create(attempts=1, done=True)
# Simulate answering a problem that raises the exception # Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore: with patch('capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore:
mock_rescore.side_effect = exception_class(u'test error \u03a9') mock_rescore.side_effect = exception_class(u'test error \u03a9')
result = module.rescore_problem(only_if_higher=False) with self.assertRaises(exception_class):
module.rescore(only_if_higher=False)
# Expect an AJAX alert message in 'success'
expected_msg = u'Error: test error \u03a9'
self.assertEqual(result['success'], expected_msg)
# Expect that the number of attempts is NOT incremented # Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1) self.assertEqual(module.attempts, 1)
......
...@@ -18,6 +18,7 @@ from xmodule.capa_module import CapaModule ...@@ -18,6 +18,7 @@ from xmodule.capa_module import CapaModule
from opaque_keys.edx.locations import Location from opaque_keys.edx.locations import Location
from xblock.field_data import DictFieldData from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds from xblock.fields import ScopeIds
from xblock.scorable import Score
from . import get_test_system from . import get_test_system
from pytz import UTC from pytz import UTC
...@@ -111,9 +112,9 @@ class CapaFactoryWithDelay(object): ...@@ -111,9 +112,9 @@ class CapaFactoryWithDelay(object):
if correct: if correct:
# Could set the internal state formally, but here we just jam in the score. # Could set the internal state formally, but here we just jam in the score.
module.get_score = lambda: {'score': 1, 'total': 1} module.score = Score(raw_earned=1, raw_possible=1)
else: else:
module.get_score = lambda: {'score': 0, 'total': 1} module.score = Score(raw_earned=0, raw_possible=1)
return module return module
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
Grades related signals. Grades related signals.
""" """
from contextlib import contextmanager from contextlib import contextmanager
from crum import get_current_user
from logging import getLogger from logging import getLogger
from django.dispatch import receiver from django.dispatch import receiver
...@@ -32,6 +33,8 @@ from ..tasks import recalculate_subsection_grade_v3, RECALCULATE_GRADE_DELAY ...@@ -32,6 +33,8 @@ from ..tasks import recalculate_subsection_grade_v3, RECALCULATE_GRADE_DELAY
log = getLogger(__name__) log = getLogger(__name__)
# define values to be used in grading events
GRADES_RESCORE_EVENT_TYPE = 'edx.grades.problem.rescored'
PROBLEM_SUBMITTED_EVENT_TYPE = 'edx.grades.problem.submitted' PROBLEM_SUBMITTED_EVENT_TYPE = 'edx.grades.problem.submitted'
...@@ -209,7 +212,7 @@ def enqueue_subsection_update(sender, **kwargs): # pylint: disable=unused-argum ...@@ -209,7 +212,7 @@ def enqueue_subsection_update(sender, **kwargs): # pylint: disable=unused-argum
Handles the PROBLEM_WEIGHTED_SCORE_CHANGED signal by Handles the PROBLEM_WEIGHTED_SCORE_CHANGED signal by
enqueueing a subsection update operation to occur asynchronously. enqueueing a subsection update operation to occur asynchronously.
""" """
_emit_problem_submitted_event(kwargs) _emit_event(kwargs)
result = recalculate_subsection_grade_v3.apply_async( result = recalculate_subsection_grade_v3.apply_async(
kwargs=dict( kwargs=dict(
user_id=kwargs['user_id'], user_id=kwargs['user_id'],
...@@ -241,12 +244,14 @@ def recalculate_course_grade(sender, course, course_structure, user, **kwargs): ...@@ -241,12 +244,14 @@ def recalculate_course_grade(sender, course, course_structure, user, **kwargs):
CourseGradeFactory().update(user, course=course, course_structure=course_structure) CourseGradeFactory().update(user, course=course, course_structure=course_structure)
def _emit_problem_submitted_event(kwargs): def _emit_event(kwargs):
""" """
Emits a problem submitted event only if Emits a problem submitted event only if there is no current event
there is no current event transaction type, transaction type, i.e. we have not reached this point in the code via a
i.e. we have not reached this point in the rescore or student state deletion.
code via a rescore or student state deletion.
If the event transaction type has already been set and the transacation is
a rescore, emits a problem rescored event.
""" """
root_type = get_event_transaction_type() root_type = get_event_transaction_type()
...@@ -267,3 +272,24 @@ def _emit_problem_submitted_event(kwargs): ...@@ -267,3 +272,24 @@ def _emit_problem_submitted_event(kwargs):
'weighted_possible': kwargs.get('weighted_possible'), 'weighted_possible': kwargs.get('weighted_possible'),
} }
) )
if root_type == 'edx.grades.problem.rescored':
current_user = get_current_user()
if current_user is not None and hasattr(current_user, 'id'):
instructor_id = unicode(current_user.id)
else:
instructor_id = None
tracker.emit(
unicode(GRADES_RESCORE_EVENT_TYPE),
{
'course_id': unicode(kwargs['course_id']),
'user_id': unicode(kwargs['user_id']),
'problem_id': unicode(kwargs['usage_id']),
'new_weighted_earned': kwargs.get('weighted_earned'),
'new_weighted_possible': kwargs.get('weighted_possible'),
'only_if_higher': kwargs.get('only_if_higher'),
'instructor_id': instructor_id,
'event_transaction_id': unicode(get_event_transaction_id()),
'event_transaction_type': unicode(GRADES_RESCORE_EVENT_TYPE),
}
)
...@@ -160,10 +160,9 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe ...@@ -160,10 +160,9 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
} }
) )
@patch('lms.djangoapps.instructor_task.tasks_helper.module_state.tracker')
@patch('lms.djangoapps.grades.signals.handlers.tracker') @patch('lms.djangoapps.grades.signals.handlers.tracker')
@patch('lms.djangoapps.grades.models.tracker') @patch('lms.djangoapps.grades.models.tracker')
def test_rescoring_events(self, models_tracker, handlers_tracker, instructor_task_tracker): def test_rescoring_events(self, models_tracker, handlers_tracker):
# submit answer # submit answer
self.submit_question_answer('p1', {'2_1': 'choice_choice_3'}) self.submit_question_answer('p1', {'2_1': 'choice_choice_3'})
models_tracker.reset_mock() models_tracker.reset_mock()
...@@ -187,11 +186,8 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe ...@@ -187,11 +186,8 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
) )
# check logging to make sure id's are tracked correctly across # check logging to make sure id's are tracked correctly across
# events # events
event_transaction_id = instructor_task_tracker.emit.mock_calls[0][1][1]['event_transaction_id'] event_transaction_id = handlers_tracker.emit.mock_calls[0][1][1]['event_transaction_id']
self.assertEqual(
instructor_task_tracker.get_tracker().context.call_args[0],
('edx.grades.problem.rescored', {'course_id': unicode(self.course.id), 'org_id': unicode(self.course.org)})
)
# make sure the id is propagated throughout the event flow # make sure the id is propagated throughout the event flow
for call in models_tracker.emit.mock_calls: for call in models_tracker.emit.mock_calls:
self.assertEqual(event_transaction_id, call[1][1]['event_transaction_id']) self.assertEqual(event_transaction_id, call[1][1]['event_transaction_id'])
...@@ -206,7 +202,7 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe ...@@ -206,7 +202,7 @@ class GradesEventIntegrationTest(ProblemSubmissionTestMixin, SharedModuleStoreTe
handlers_tracker.assert_not_called() handlers_tracker.assert_not_called()
instructor_task_tracker.emit.assert_called_with( handlers_tracker.emit.assert_called_with(
unicode(RESCORE_TYPE), unicode(RESCORE_TYPE),
{ {
'course_id': unicode(self.course.id), 'course_id': unicode(self.course.id),
......
...@@ -100,6 +100,17 @@ def _get_xmodule_instance_args(request, task_id): ...@@ -100,6 +100,17 @@ def _get_xmodule_instance_args(request, task_id):
return xmodule_instance_args return xmodule_instance_args
def _supports_rescore(descriptor):
"""
Helper method to determine whether a given item supports rescoring.
In order to accommodate both XModules and XBlocks, we have to check
the descriptor itself then fall back on its module class.
"""
return hasattr(descriptor, 'rescore') or (
hasattr(descriptor, 'module_class') and hasattr(descriptor.module_class, 'rescore')
)
def _update_instructor_task(instructor_task, task_result): def _update_instructor_task(instructor_task, task_result):
""" """
Updates and possibly saves a InstructorTask entry based on a task Result. Updates and possibly saves a InstructorTask entry based on a task Result.
...@@ -246,10 +257,7 @@ def check_arguments_for_rescoring(usage_key): ...@@ -246,10 +257,7 @@ def check_arguments_for_rescoring(usage_key):
corresponding module doesn't support rescoring calls. corresponding module doesn't support rescoring calls.
""" """
descriptor = modulestore().get_item(usage_key) descriptor = modulestore().get_item(usage_key)
# TODO: Clean this up as part of TNL-6594 when CAPA uses the ScorableXBlockMixin if not _supports_rescore(descriptor):
if (
not hasattr(descriptor, 'module_class') or not hasattr(descriptor.module_class, 'rescore_problem')
) and not hasattr(descriptor, 'rescore'):
msg = "Specified module does not support rescoring." msg = "Specified module does not support rescoring."
raise NotImplementedError(msg) raise NotImplementedError(msg)
...@@ -264,8 +272,7 @@ def check_entrance_exam_problems_for_rescoring(exam_key): # pylint: disable=inv ...@@ -264,8 +272,7 @@ def check_entrance_exam_problems_for_rescoring(exam_key): # pylint: disable=inv
any of the problem in entrance exam doesn't support re-scoring calls. any of the problem in entrance exam doesn't support re-scoring calls.
""" """
problems = get_problems_in_section(exam_key).values() problems = get_problems_in_section(exam_key).values()
if any(not hasattr(problem, 'module_class') or not hasattr(problem.module_class, 'rescore_problem') if any(not _supports_rescore(problem) for problem in problems):
for problem in problems):
msg = _("Not all problems in entrance exam support re-scoring.") msg = _("Not all problems in entrance exam support re-scoring.")
raise NotImplementedError(msg) raise NotImplementedError(msg)
......
...@@ -10,6 +10,7 @@ from time import time ...@@ -10,6 +10,7 @@ from time import time
from eventtracking import tracker from eventtracking import tracker
from opaque_keys.edx.keys import UsageKey from opaque_keys.edx.keys import UsageKey
from xmodule.modulestore.django import modulestore from xmodule.modulestore.django import modulestore
from capa.responsetypes import StudentInputError, ResponseError, LoncapaProblemError
from courseware.courses import get_course_by_id, get_problems_in_section from courseware.courses import get_course_by_id, get_problems_in_section
from courseware.models import StudentModule from courseware.models import StudentModule
from courseware.model_data import DjangoKeyValueStore, FieldDataCache from courseware.model_data import DjangoKeyValueStore, FieldDataCache
...@@ -174,77 +175,30 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude ...@@ -174,77 +175,30 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
TASK_LOG.warning(msg) TASK_LOG.warning(msg)
return UPDATE_STATUS_FAILED return UPDATE_STATUS_FAILED
# TODO: (TNL-6594) Remove this switch once rescore_problem support if not hasattr(instance, 'rescore'):
# once CAPA uses ScorableXBlockMixin.
for method in ['rescore', 'rescore_problem']:
rescore_method = getattr(instance, method, None)
if rescore_method is not None:
break
else: # for-else: Neither method exists on the block.
# This should not happen, since it should be already checked in the # This should not happen, since it should be already checked in the
# caller, but check here to be sure. # caller, but check here to be sure.
msg = "Specified problem does not support rescoring." msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg) raise UpdateProblemModuleStateError(msg)
# TODO: Remove the first part of this if-else with TNL-6594
# We check here to see if the problem has any submissions. If it does not, we don't want to rescore it # We check here to see if the problem has any submissions. If it does not, we don't want to rescore it
if hasattr(instance, "done"): if not instance.has_submitted_answer():
if not instance.done: return UPDATE_STATUS_SKIPPED
return UPDATE_STATUS_SKIPPED
elif not instance.has_submitted_answer():
return UPDATE_STATUS_SKIPPED
# Set the tracking info before this call, because it makes downstream # Set the tracking info before this call, because it makes downstream
# calls that create events. We retrieve and store the id here because # calls that create events. We retrieve and store the id here because
# the request cache will be erased during downstream calls. # the request cache will be erased during downstream calls.
event_transaction_id = create_new_event_transaction_id() create_new_event_transaction_id()
set_event_transaction_type(GRADES_RESCORE_EVENT_TYPE) set_event_transaction_type(GRADES_RESCORE_EVENT_TYPE)
result = rescore_method(only_if_higher=task_input['only_if_higher']) # specific events from CAPA are not propagated up the stack. Do we want this?
instance.save() try:
instance.rescore(only_if_higher=task_input['only_if_higher'])
if result is None or result.get(u'success') in {u'correct', u'incorrect'}: except (LoncapaProblemError, StudentInputError, ResponseError):
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s",
dict(
course=course_id,
loc=usage_key,
student=student
)
)
if result is not None: # Only for CAPA. This will get moved to the grade handler.
new_weighted_earned, new_weighted_possible = weighted_score(
result['new_raw_earned'] if result else None,
result['new_raw_possible'] if result else None,
module_descriptor.weight,
)
# TODO: remove this context manager after completion of AN-6134
context = course_context_from_course_id(course_id)
with tracker.get_tracker().context(GRADES_RESCORE_EVENT_TYPE, context):
tracker.emit(
unicode(GRADES_RESCORE_EVENT_TYPE),
{
'course_id': unicode(course_id),
'user_id': unicode(student.id),
'problem_id': unicode(usage_key),
'new_weighted_earned': new_weighted_earned,
'new_weighted_possible': new_weighted_possible,
'only_if_higher': task_input['only_if_higher'],
'instructor_id': unicode(xmodule_instance_args['request_info']['user_id']),
'event_transaction_id': unicode(event_transaction_id),
'event_transaction_type': unicode(GRADES_RESCORE_EVENT_TYPE),
}
)
return UPDATE_STATUS_SUCCEEDED
else:
TASK_LOG.warning( TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s " u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s", u"and student %(student)s",
dict( dict(
msg=result.get('success', result),
course=course_id, course=course_id,
loc=usage_key, loc=usage_key,
student=student student=student
...@@ -252,6 +206,19 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude ...@@ -252,6 +206,19 @@ def rescore_problem_module_state(xmodule_instance_args, module_descriptor, stude
) )
return UPDATE_STATUS_FAILED return UPDATE_STATUS_FAILED
instance.save()
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s",
dict(
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_SUCCEEDED
@outer_atomic @outer_atomic
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module, _task_input): def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module, _task_input):
......
...@@ -18,6 +18,7 @@ from django.contrib.auth.models import User ...@@ -18,6 +18,7 @@ from django.contrib.auth.models import User
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
from openedx.core.djangoapps.util.testing import TestConditionalContent from openedx.core.djangoapps.util.testing import TestConditionalContent
from openedx.core.djangolib.testing.utils import get_mock_request
from capa.tests.response_xml_factory import (CodeResponseXMLFactory, from capa.tests.response_xml_factory import (CodeResponseXMLFactory,
CustomResponseXMLFactory) CustomResponseXMLFactory)
from xmodule.modulestore.tests.factories import ItemFactory from xmodule.modulestore.tests.factories import ItemFactory
...@@ -275,7 +276,7 @@ class TestRescoringTask(TestIntegrationTask): ...@@ -275,7 +276,7 @@ class TestRescoringTask(TestIntegrationTask):
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1]) self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
expected_message = "bad things happened" expected_message = "bad things happened"
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore: with patch('capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore:
mock_rescore.side_effect = ZeroDivisionError(expected_message) mock_rescore.side_effect = ZeroDivisionError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name) instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
self._assert_task_failure(instructor_task.id, 'rescore_problem', problem_url_name, expected_message) self._assert_task_failure(instructor_task.id, 'rescore_problem', problem_url_name, expected_message)
...@@ -293,7 +294,7 @@ class TestRescoringTask(TestIntegrationTask): ...@@ -293,7 +294,7 @@ class TestRescoringTask(TestIntegrationTask):
# return an input error as if it were a numerical response, with an embedded unicode character: # return an input error as if it were a numerical response, with an embedded unicode character:
expected_message = u"Could not interpret '2/3\u03a9' as a number" expected_message = u"Could not interpret '2/3\u03a9' as a number"
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore: with patch('capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore:
mock_rescore.side_effect = StudentInputError(expected_message) mock_rescore.side_effect = StudentInputError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name) instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
......
...@@ -307,30 +307,21 @@ class TestRescoreInstructorTask(TestInstructorTasks): ...@@ -307,30 +307,21 @@ class TestRescoreInstructorTask(TestInstructorTasks):
action_name='rescored' action_name='rescored'
) )
@ddt.data( def test_rescoring_success(self):
('rescore', None),
('rescore_problem', {'success': 'correct', 'new_raw_earned': 1, 'new_raw_possible': 1})
)
@ddt.unpack
def test_rescoring_success(self, rescore_method, rescore_result):
""" """
Tests rescores a problem in a course, for all students succeeds. Tests rescores a problem in a course, for all students succeeds.
""" """
mock_instance = MagicMock() mock_instance = MagicMock()
other_method = ({'rescore', 'rescore_problem'} - {rescore_method}).pop() getattr(mock_instance, 'rescore').return_value = None
getattr(mock_instance, rescore_method).return_value = rescore_result mock_instance.has_submitted_answer.return_value = True
delattr(mock_instance, other_method) del mock_instance.done # old CAPA code used to use this value so we delete it here to be sure
if rescore_method == 'rescore':
del mock_instance.done
mock_instance.has_submitted_answer.return_value = True
else:
mock_instance.done = True
num_students = 10 num_students = 10
self._create_students_with_state(num_students) self._create_students_with_state(num_students)
task_entry = self._create_input_entry() task_entry = self._create_input_entry()
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal') as mock_get_module: with patch(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
) as mock_get_module:
mock_get_module.return_value = mock_instance mock_get_module.return_value = mock_instance
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id) self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
...@@ -344,56 +335,6 @@ class TestRescoreInstructorTask(TestInstructorTasks): ...@@ -344,56 +335,6 @@ class TestRescoreInstructorTask(TestInstructorTasks):
action_name='rescored' action_name='rescored'
) )
def test_rescoring_bad_result(self):
"""
Tests and confirm that rescoring does not succeed if "success" key is not an expected value.
"""
input_state = json.dumps({'done': True})
num_students = 10
self._create_students_with_state(num_students, input_state)
task_entry = self._create_input_entry()
mock_instance = Mock()
mock_instance.rescore_problem = Mock(return_value={'success': 'bogus'})
del mock_instance.rescore
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal') as mock_get_module:
mock_get_module.return_value = mock_instance
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
self.assert_task_output(
output=self.get_task_output(task_entry.id),
total=num_students,
attempted=num_students,
succeeded=0,
skipped=0,
failed=num_students,
action_name='rescored'
)
def test_rescoring_missing_result(self):
"""
Tests and confirm that rescoring does not succeed if "success" key is not returned.
"""
input_state = json.dumps({'done': True})
num_students = 10
self._create_students_with_state(num_students, input_state)
task_entry = self._create_input_entry()
mock_instance = Mock()
mock_instance.rescore_problem = Mock(return_value={'bogus': 'value'})
del mock_instance.rescore
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal') as mock_get_module:
mock_get_module.return_value = mock_instance
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
self.assert_task_output(
output=self.get_task_output(task_entry.id),
total=num_students,
attempted=num_students,
succeeded=0,
skipped=0,
failed=num_students,
action_name='rescored'
)
@attr(shard=3) @attr(shard=3)
class TestResetAttemptsInstructorTask(TestInstructorTasks): class TestResetAttemptsInstructorTask(TestInstructorTasks):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment