Commit c676cfd6 by Brian Wilson

Rename fields in CourseTaskLog, including a task_key column for indexing. Use…

Rename fields in CourseTaskLog, including a task_key column for indexing.  Use 'rescore' instead of 'regrade'.  Clean up task submission.
parent 76773c5b
......@@ -269,22 +269,22 @@ class LoncapaProblem(object):
self.student_answers = convert_files_to_filenames(answers)
return self._grade_answers(answers)
def supports_regrading(self):
def supports_rescoring(self):
"""
Checks that the current problem definition permits regrading.
Checks that the current problem definition permits rescoring.
More precisely, it checks that there are no response types in
the current problem that are not fully supported (yet) for regrading.
the current problem that are not fully supported (yet) for rescoring.
This includes responsetypes for which the student's answer
is not properly stored in state, i.e. file submissions. At present,
we have no way to know if an existing response was actually a real
answer or merely the filename of a file submitted as an answer.
It turns out that because regrading is a background task, limiting
It turns out that because rescoring is a background task, limiting
it to responsetypes that don't support file submissions also means
that the responsetypes are synchronous. This is convenient as it
permits regrading to be complete when the regrading call returns.
permits rescoring to be complete when the rescoring call returns.
"""
# We check for synchronous grading and no file submissions by
# screening out all problems with a CodeResponse type.
......@@ -294,16 +294,16 @@ class LoncapaProblem(object):
return True
def regrade_existing_answers(self):
def rescore_existing_answers(self):
'''
Regrade student responses. Called by capa_module.regrade_problem.
Rescore student responses. Called by capa_module.rescore_problem.
'''
return self._grade_answers(None)
def _grade_answers(self, answers):
'''
Internal grading call used for checking new student answers and also
regrading existing student answers.
rescoring existing student answers.
answers is a dict of all the entries from request.POST, but with the first part
of each key removed (the string before the first "_").
......@@ -324,9 +324,9 @@ class LoncapaProblem(object):
# for file submissions. But we have no way of knowing if
# student_answers contains a proper answer or the filename of
# an earlier submission, so for now skip these entirely.
# TODO: figure out where to get file submissions when regrading.
# TODO: figure out where to get file submissions when rescoring.
if 'filesubmission' in responder.allowed_inputfields and answers is None:
raise Exception("Cannot regrade problems with possible file submissions")
raise Exception("Cannot rescore problems with possible file submissions")
# use 'answers' if it is provided, otherwise use the saved student_answers.
if answers is not None:
......
......@@ -812,7 +812,7 @@ class CapaModule(CapaFields, XModule):
'contents': html,
}
def regrade_problem(self):
def rescore_problem(self):
"""
Checks whether the existing answers to a problem are correct.
......@@ -823,23 +823,23 @@ class CapaModule(CapaFields, XModule):
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
Raises NotFoundError if called on a problem that has not yet been
answered, or NotImplementedError if it's a problem that cannot be regraded.
answered, or NotImplementedError if it's a problem that cannot be rescored.
Returns the error messages for exceptions occurring while performing
the regrading, rather than throwing them.
the rescoring, rather than throwing them.
"""
event_info = dict()
event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.url()
if not self.lcp.supports_regrading():
if not self.lcp.supports_rescoring():
event_info['failure'] = 'unsupported'
self.system.track_function('problem_regrade_fail', event_info)
raise NotImplementedError("Problem's definition does not support regrading")
self.system.track_function('problem_rescore_fail', event_info)
raise NotImplementedError("Problem's definition does not support rescoring")
if not self.done:
event_info['failure'] = 'unanswered'
self.system.track_function('problem_regrade_fail', event_info)
self.system.track_function('problem_rescore_fail', event_info)
raise NotFoundError('Problem must be answered before it can be graded again')
# get old score, for comparison:
......@@ -848,20 +848,20 @@ class CapaModule(CapaFields, XModule):
event_info['orig_max_score'] = orig_score['total']
try:
correct_map = self.lcp.regrade_existing_answers()
# regrading should have no effect on attempts, so don't
correct_map = self.lcp.rescore_existing_answers()
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("StudentInputError in capa_module:problem_regrade", exc_info=True)
log.warning("StudentInputError in capa_module:problem_rescore", exc_info=True)
event_info['failure'] = 'student_input_error'
self.system.track_function('problem_regrade_fail', event_info)
self.system.track_function('problem_rescore_fail', event_info)
return {'success': "Error: {0}".format(inst.message)}
except Exception, err:
event_info['failure'] = 'unexpected'
self.system.track_function('problem_regrade_fail', event_info)
self.system.track_function('problem_rescore_fail', event_info)
if self.system.DEBUG:
msg = "Error checking problem: " + str(err)
msg += '\nTraceback:\n' + traceback.format_exc()
......@@ -885,9 +885,9 @@ class CapaModule(CapaFields, XModule):
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
self.system.track_function('problem_regrade', event_info)
self.system.track_function('problem_rescore', event_info)
# psychometrics should be called on regrading requests in the same way as check-problem
# psychometrics should be called on rescoring requests in the same way as check-problem
if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback
self.system.psychometrics_handler(self.get_state_for_lcp())
......
......@@ -598,7 +598,7 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the problem was NOT reset
self.assertTrue('success' in result and not result['success'])
def test_regrade_problem_correct(self):
def test_rescore_problem_correct(self):
module = CapaFactory.create(attempts=1, done=True)
......@@ -606,7 +606,7 @@ class CapaModuleTest(unittest.TestCase):
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'correct')
result = module.regrade_problem()
result = module.rescore_problem()
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
......@@ -617,7 +617,7 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 1)
def test_regrade_problem_incorrect(self):
def test_rescore_problem_incorrect(self):
module = CapaFactory.create(attempts=0, done=True)
......@@ -625,7 +625,7 @@ class CapaModuleTest(unittest.TestCase):
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect')
result = module.regrade_problem()
result = module.rescore_problem()
# Expect that the problem is marked incorrect
self.assertEqual(result['success'], 'incorrect')
......@@ -633,24 +633,24 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 0)
def test_regrade_problem_not_done(self):
def test_rescore_problem_not_done(self):
# Simulate that the problem is NOT done
module = CapaFactory.create(done=False)
# Try to regrade the problem, and get exception
# Try to rescore the problem, and get exception
with self.assertRaises(xmodule.exceptions.NotFoundError):
module.regrade_problem()
module.rescore_problem()
def test_regrade_problem_not_supported(self):
def test_rescore_problem_not_supported(self):
module = CapaFactory.create(done=True)
# Try to regrade the problem, and get exception
with patch('capa.capa_problem.LoncapaProblem.supports_regrading') as mock_supports_regrading:
mock_supports_regrading.return_value = False
# Try to rescore the problem, and get exception
with patch('capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring:
mock_supports_rescoring.return_value = False
with self.assertRaises(NotImplementedError):
module.regrade_problem()
module.rescore_problem()
def test_regrade_problem_error(self):
def test_rescore_problem_error(self):
# Try each exception that capa_module should handle
for exception_class in [StudentInputError,
......@@ -661,9 +661,9 @@ class CapaModuleTest(unittest.TestCase):
module = CapaFactory.create(attempts=1, done=True)
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.regrade_existing_answers') as mock_regrade:
mock_regrade.side_effect = exception_class('test error')
result = module.regrade_problem()
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = exception_class('test error')
result = module.rescore_problem()
# Expect an AJAX alert message in 'success'
expected_msg = 'Error: test error'
......
......@@ -11,14 +11,14 @@ class Migration(SchemaMigration):
# Adding model 'CourseTaskLog'
db.create_table('courseware_coursetasklog', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task_name', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('task_type', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('student', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', null=True, to=orm['auth.User'])),
('task_args', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_key', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_input', self.gf('django.db.models.fields.CharField')(max_length=255)),
('task_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_state', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, db_index=True)),
('task_progress', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True, db_index=True)),
('requester', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['auth.User'])),
('task_output', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)),
('requester', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
))
......@@ -72,13 +72,13 @@ class Migration(SchemaMigration):
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'task_args': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'task_progress': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'db_index': 'True'}),
'task_input': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_output': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'task_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'task_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'})
},
'courseware.offlinecomputedgrade': {
......
......@@ -4,9 +4,9 @@ WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the mitx dir
1. Go to the edx-platform dir
2. ./manage.py schemamigration courseware --auto description_of_your_change
3. Add the migration file created in mitx/courseware/migrations/
3. Add the migration file created in edx-platform/lms/djangoapps/courseware/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
......@@ -269,28 +269,43 @@ class CourseTaskLog(models.Model):
"""
Stores information about background tasks that have been submitted to
perform course-specific work.
Examples include grading and regrading.
Examples include grading and rescoring.
`task_type` identifies the kind of task being performed, e.g. rescoring.
`course_id` uses the course run's unique id to identify the course.
`task_input` stores input arguments as JSON-serialized dict, for reporting purposes.
Examples include url of problem being rescored, id of student if only one student being rescored.
`task_key` stores relevant input arguments encoded into key value for testing to see
if the task is already running (together with task_type and course_id).
`task_id` stores the id used by celery for the background task.
`task_state` stores the last known state of the celery task
`task_output` stores the output of the celery task.
Format is a JSON-serialized dict. Content varies by task_type and task_state.
`requester` stores id of user who submitted the task
`created` stores date that entry was first created
`updated` stores date that entry was last modified
"""
task_name = models.CharField(max_length=50, db_index=True)
task_type = models.CharField(max_length=50, db_index=True)
course_id = models.CharField(max_length=255, db_index=True)
student = models.ForeignKey(User, null=True, db_index=True, related_name='+') # optional: None = task applies to all students
task_args = models.CharField(max_length=255, db_index=True)
task_key = models.CharField(max_length=255, db_index=True)
task_input = models.CharField(max_length=255)
task_id = models.CharField(max_length=255, db_index=True) # max_length from celery_taskmeta
task_state = models.CharField(max_length=50, null=True, db_index=True) # max_length from celery_taskmeta
task_progress = models.CharField(max_length=1024, null=True, db_index=True)
requester = models.ForeignKey(User, db_index=True, related_name='+')
task_output = models.CharField(max_length=1024, null=True)
requester = models.ForeignKey(User, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
def __repr__(self):
return 'CourseTaskLog<%r>' % ({
'task_name': self.task_name,
'task_type': self.task_type,
'course_id': self.course_id,
'student': self.student.username,
'task_args': self.task_args,
'task_input': self.task_input,
'task_id': self.task_id,
'task_state': self.task_state,
'task_progress': self.task_progress,
'task_output': self.task_output,
},)
def __unicode__(self):
......
......@@ -8,8 +8,8 @@ from celery.states import READY_STATES
from courseware.models import CourseTaskLog
from courseware.module_render import get_xqueue_callback_url_prefix
from courseware.tasks import (regrade_problem_for_all_students, regrade_problem_for_student,
reset_problem_attempts_for_all_students, delete_problem_state_for_all_students)
from courseware.tasks import (rescore_problem,
reset_problem_attempts, delete_problem_state)
from xmodule.modulestore.django import modulestore
......@@ -32,6 +32,17 @@ def get_running_course_tasks(course_id):
return course_tasks
def get_course_task_history(course_id, problem_url, student=None):
"""
Returns a query of CourseTaskLog objects of historical tasks for a given course,
that match a particular problem and optionally a student.
"""
_, task_key = _encode_problem_and_student_input(problem_url, student)
course_tasks = CourseTaskLog.objects.filter(course_id=course_id, task_key=task_key)
return course_tasks.order_by('-id')
def course_task_log_status(request, task_id=None):
"""
This returns the status of a course-related task as a JSON-serialized dict.
......@@ -68,18 +79,16 @@ def course_task_log_status(request, task_id=None):
return HttpResponse(json.dumps(output, indent=4))
def _task_is_running(course_id, task_name, task_args, student=None):
def _task_is_running(course_id, task_type, task_key):
"""Checks if a particular task is already running"""
runningTasks = CourseTaskLog.objects.filter(course_id=course_id, task_name=task_name, task_args=task_args)
if student is not None:
runningTasks = runningTasks.filter(student=student)
runningTasks = CourseTaskLog.objects.filter(course_id=course_id, task_type=task_type, task_key=task_key)
for state in READY_STATES:
runningTasks = runningTasks.exclude(task_state=state)
return len(runningTasks) > 0
@transaction.autocommit
def _reserve_task(course_id, task_name, task_args, requester, student=None):
def _reserve_task(course_id, task_type, task_key, task_input, requester):
"""
Creates a database entry to indicate that a task is in progress.
......@@ -88,17 +97,16 @@ def _reserve_task(course_id, task_name, task_args, requester, student=None):
Autocommit annotation makes sure the database entry is committed.
"""
if _task_is_running(course_id, task_name, task_args, student):
if _task_is_running(course_id, task_type, task_key):
raise AlreadyRunningError("requested task is already running")
# Create log entry now, so that future requests won't
# Create log entry now, so that future requests won't: no task_id yet....
tasklog_args = {'course_id': course_id,
'task_name': task_name,
'task_args': task_args,
'task_type': task_type,
'task_key': task_key,
'task_input': json.dumps(task_input),
'task_state': 'QUEUING',
'requester': requester}
if student is not None:
tasklog_args['student'] = student
course_task_log = CourseTaskLog.objects.create(**tasklog_args)
return course_task_log
......@@ -176,7 +184,7 @@ def _update_course_task_log(course_task_log_entry, task_result):
elif result_state == 'SUCCESS':
# save progress into the entry, even if it's not being saved here -- for EAGER,
# it needs to go back with the entry passed in.
course_task_log_entry.task_progress = json.dumps(returned_result)
course_task_log_entry.task_output = json.dumps(returned_result)
output['task_progress'] = returned_result
log.info("task succeeded: %s", returned_result)
......@@ -192,7 +200,7 @@ def _update_course_task_log(course_task_log_entry, task_result):
task_progress['traceback'] = result_traceback
# save progress into the entry, even if it's not being saved -- for EAGER,
# it needs to go back with the entry passed in.
course_task_log_entry.task_progress = json.dumps(task_progress)
course_task_log_entry.task_output = json.dumps(task_progress)
output['task_progress'] = task_progress
elif result_state == 'REVOKED':
......@@ -204,7 +212,7 @@ def _update_course_task_log(course_task_log_entry, task_result):
output['message'] = message
log.warning("background task (%s) revoked.", task_id)
task_progress = {'message': message}
course_task_log_entry.task_progress = json.dumps(task_progress)
course_task_log_entry.task_output = json.dumps(task_progress)
output['task_progress'] = task_progress
# always update the entry if the state has changed:
......@@ -249,28 +257,28 @@ def _get_course_task_log_status(task_id):
return None
# define ajax return value:
output = {}
status = {}
# if the task is not already known to be done, then we need to query
# the underlying task's result object:
if course_task_log_entry.task_state not in READY_STATES:
result = AsyncResult(task_id)
output.update(_update_course_task_log(course_task_log_entry, result))
elif course_task_log_entry.task_progress is not None:
status.update(_update_course_task_log(course_task_log_entry, result))
elif course_task_log_entry.task_output is not None:
# task is already known to have finished, but report on its status:
output['task_progress'] = json.loads(course_task_log_entry.task_progress)
status['task_progress'] = json.loads(course_task_log_entry.task_output)
# output basic information matching what's stored in CourseTaskLog:
output['task_id'] = course_task_log_entry.task_id
output['task_state'] = course_task_log_entry.task_state
output['in_progress'] = course_task_log_entry.task_state not in READY_STATES
# status basic information matching what's stored in CourseTaskLog:
status['task_id'] = course_task_log_entry.task_id
status['task_state'] = course_task_log_entry.task_state
status['in_progress'] = course_task_log_entry.task_state not in READY_STATES
if course_task_log_entry.task_state in READY_STATES:
succeeded, message = get_task_completion_message(course_task_log_entry)
output['message'] = message
output['succeeded'] = succeeded
status['message'] = message
status['succeeded'] = succeeded
return output
return status
def get_task_completion_message(course_task_log_entry):
......@@ -284,19 +292,26 @@ def get_task_completion_message(course_task_log_entry):
"""
succeeded = False
if course_task_log_entry.task_progress is None:
log.warning("No task_progress information found for course_task {0}".format(course_task_log_entry.task_id))
if course_task_log_entry.task_output is None:
log.warning("No task_output information found for course_task {0}".format(course_task_log_entry.task_id))
return (succeeded, "No status information available")
task_progress = json.loads(course_task_log_entry.task_progress)
task_output = json.loads(course_task_log_entry.task_output)
if course_task_log_entry.task_state in ['FAILURE', 'REVOKED']:
return(succeeded, task_progress['message'])
return(succeeded, task_output['message'])
action_name = task_progress['action_name']
num_attempted = task_progress['attempted']
num_updated = task_progress['updated']
num_total = task_progress['total']
if course_task_log_entry.student is not None:
action_name = task_output['action_name']
num_attempted = task_output['attempted']
num_updated = task_output['updated']
num_total = task_output['total']
if course_task_log_entry.task_input is None:
log.warning("No task_input information found for course_task {0}".format(course_task_log_entry.task_id))
return (succeeded, "No status information available")
task_input = json.loads(course_task_log_entry.task_input)
problem_url = task_input.get('problem_url', None)
student = task_input.get('student', None)
if student is not None:
if num_attempted == 0:
msg = "Unable to find submission to be {action} for student '{student}'"
elif num_updated == 0:
......@@ -314,60 +329,64 @@ def get_task_completion_message(course_task_log_entry):
elif num_updated < num_attempted:
msg = "Problem {action} for {updated} of {attempted} students"
if course_task_log_entry.student is not None and num_attempted != num_total:
if student is not None and num_attempted != num_total:
msg += " (out of {total})"
# Update status in task result object itself:
message = msg.format(action=action_name, updated=num_updated, attempted=num_attempted, total=num_total,
student=course_task_log_entry.student, problem=course_task_log_entry.task_args)
student=student, problem=problem_url)
return (succeeded, message)
########### Add task-submission methods here:
def _check_arguments_for_regrading(course_id, problem_url):
def _check_arguments_for_rescoring(course_id, problem_url):
"""
Do simple checks on the descriptor to confirm that it supports regrading.
Do simple checks on the descriptor to confirm that it supports rescoring.
Confirms first that the problem_url is defined (since that's currently typed
in). An ItemNotFoundException is raised if the corresponding module
descriptor doesn't exist. NotImplementedError is returned if the
corresponding module doesn't support regrading calls.
corresponding module doesn't support rescoring calls.
"""
descriptor = modulestore().get_instance(course_id, problem_url)
supports_regrade = False
supports_rescore = False
if hasattr(descriptor, 'module_class'):
module_class = descriptor.module_class
if hasattr(module_class, 'regrade_problem'):
supports_regrade = True
if hasattr(module_class, 'rescore_problem'):
supports_rescore = True
if not supports_regrade:
msg = "Specified module does not support regrading."
if not supports_rescore:
msg = "Specified module does not support rescoring."
raise NotImplementedError(msg)
def submit_regrade_problem_for_student(request, course_id, problem_url, student):
def _encode_problem_and_student_input(problem_url, student=None):
"""
Request a problem to be regraded as a background task.
Encode problem_url and optional student into task_key and task_input values.
The problem will be regraded for the specified student only. Parameters are the `course_id`,
the `problem_url`, and the `student` as a User object.
The url must specify the location of the problem, using i4x-type notation.
An exception is thrown if the problem doesn't exist, or if the particular
problem is already being regraded for this student.
`problem_url` is full URL of the problem.
`student` is the user object of the student
"""
# check arguments: let exceptions return up to the caller.
_check_arguments_for_regrading(course_id, problem_url)
if student is not None:
task_input = {'problem_url': problem_url, 'student': student.username}
task_key = "{student}_{problem}".format(student=student.id, problem=problem_url)
else:
task_input = {'problem_url': problem_url}
task_key = "{student}_{problem}".format(student="", problem=problem_url)
task_name = 'regrade_problem'
return task_input, task_key
# check to see if task is already running, and reserve it otherwise
course_task_log = _reserve_task(course_id, task_name, problem_url, request.user, student)
# Submit task:
task_args = [course_task_log.id, course_id, problem_url, student.username, _get_xmodule_instance_args(request)]
task_result = regrade_problem_for_student.apply_async(task_args)
def _submit_task(request, task_type, task_class, course_id, task_input, task_key):
"""
"""
# check to see if task is already running, and reserve it otherwise:
course_task_log = _reserve_task(course_id, task_type, task_key, task_input, request.user)
# submit task:
task_args = [course_task_log.id, course_id, task_input, _get_xmodule_instance_args(request)]
task_result = task_class.apply_async(task_args)
# Update info in table with the resulting task_id (and state).
_update_task(course_task_log, task_result)
......@@ -375,33 +394,46 @@ def submit_regrade_problem_for_student(request, course_id, problem_url, student)
return course_task_log
def submit_regrade_problem_for_all_students(request, course_id, problem_url):
def submit_rescore_problem_for_student(request, course_id, problem_url, student):
"""
Request a problem to be regraded as a background task.
Request a problem to be rescored as a background task.
The problem will be regraded for all students who have accessed the
particular problem in a course and have provided and checked an answer.
Parameters are the `course_id` and the `problem_url`.
The problem will be rescored for the specified student only. Parameters are the `course_id`,
the `problem_url`, and the `student` as a User object.
The url must specify the location of the problem, using i4x-type notation.
An exception is thrown if the problem doesn't exist, or if the particular
problem is already being regraded.
problem is already being rescored for this student.
"""
# check arguments: let exceptions return up to the caller.
_check_arguments_for_regrading(course_id, problem_url)
_check_arguments_for_rescoring(course_id, problem_url)
# check to see if task is already running, and reserve it otherwise
task_name = 'regrade_problem'
course_task_log = _reserve_task(course_id, task_name, problem_url, request.user)
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = _encode_problem_and_student_input(problem_url, student)
return _submit_task(request, task_type, task_class, course_id, task_input, task_key)
# Submit task:
task_args = [course_task_log.id, course_id, problem_url, _get_xmodule_instance_args(request)]
task_result = regrade_problem_for_all_students.apply_async(task_args)
# Update info in table with the resulting task_id (and state).
_update_task(course_task_log, task_result)
def submit_rescore_problem_for_all_students(request, course_id, problem_url):
"""
Request a problem to be rescored as a background task.
return course_task_log
The problem will be rescored for all students who have accessed the
particular problem in a course and have provided and checked an answer.
Parameters are the `course_id` and the `problem_url`.
The url must specify the location of the problem, using i4x-type notation.
An exception is thrown if the problem doesn't exist, or if the particular
problem is already being rescored.
"""
# check arguments: let exceptions return up to the caller.
_check_arguments_for_rescoring(course_id, problem_url)
# check to see if task is already running, and reserve it otherwise
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = _encode_problem_and_student_input(problem_url)
return _submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_reset_problem_attempts_for_all_students(request, course_id, problem_url):
......@@ -421,19 +453,10 @@ def submit_reset_problem_attempts_for_all_students(request, course_id, problem_u
# an exception will be raised. Let it pass up to the caller.
modulestore().get_instance(course_id, problem_url)
task_name = 'reset_problem_attempts'
# check to see if task is already running, and reserve it otherwise
course_task_log = _reserve_task(course_id, task_name, problem_url, request.user)
# Submit task:
task_args = [course_task_log.id, course_id, problem_url, _get_xmodule_instance_args(request)]
task_result = reset_problem_attempts_for_all_students.apply_async(task_args)
# Update info in table with the resulting task_id (and state).
_update_task(course_task_log, task_result)
return course_task_log
task_type = 'reset_problem_attempts'
task_class = reset_problem_attempts
task_input, task_key = _encode_problem_and_student_input(problem_url)
return _submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_delete_problem_state_for_all_students(request, course_id, problem_url):
......@@ -453,16 +476,7 @@ def submit_delete_problem_state_for_all_students(request, course_id, problem_url
# an exception will be raised. Let it pass up to the caller.
modulestore().get_instance(course_id, problem_url)
task_name = 'delete_problem_state'
# check to see if task is already running, and reserve it otherwise
course_task_log = _reserve_task(course_id, task_name, problem_url, request.user)
# Submit task:
task_args = [course_task_log.id, course_id, problem_url, _get_xmodule_instance_args(request)]
task_result = delete_problem_state_for_all_students.apply_async(task_args)
# Update info in table with the resulting task_id (and state).
_update_task(course_task_log, task_result)
return course_task_log
task_type = 'delete_problem_state'
task_class = delete_problem_state
task_input, task_key = _encode_problem_and_student_input(problem_url)
return _submit_task(request, task_type, task_class, course_id, task_input, task_key)
......@@ -33,14 +33,14 @@ class UpdateProblemModuleStateError(Exception):
pass
def _update_problem_module_state_internal(course_id, module_state_key, student, update_fcn, action_name, filter_fcn,
def _update_problem_module_state_internal(course_id, module_state_key, student_identifier, update_fcn, action_name, filter_fcn,
xmodule_instance_args):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student` is None, performs update on modules for all students on the specified problem.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered.
......@@ -75,8 +75,17 @@ def _update_problem_module_state_internal(course_id, module_state_key, student,
modules_to_update = StudentModule.objects.filter(course_id=course_id,
module_state_key=module_state_key)
# give the option of regrading an individual student. If not specified,
# then regrades all students who have responded to a problem so far
# give the option of rescoring an individual student. If not specified,
# then rescores all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
......@@ -109,9 +118,6 @@ def _update_problem_module_state_internal(course_id, module_state_key, student,
num_updated += 1
# update task status:
# TODO: decide on the frequency for updating this:
# -- it may not make sense to do so every time through the loop
# -- may depend on each iteration's duration
current_task.update_state(state='PROGRESS', meta=get_task_progress())
task_progress = get_task_progress()
......@@ -126,7 +132,7 @@ def _save_course_task_log_entry(entry):
entry.save()
def _update_problem_module_state(entry_id, course_id, module_state_key, student, update_fcn, action_name, filter_fcn,
def _update_problem_module_state(entry_id, course_id, module_state_key, student_ident, update_fcn, action_name, filter_fcn,
xmodule_instance_args):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
......@@ -147,16 +153,16 @@ def _update_problem_module_state(entry_id, course_id, module_state_key, student,
# get the CourseTaskLog to be updated. If this fails, then let the exception return to Celery.
# There's no point in catching it here.
entry = CourseTaskLog.objects.get(pk=entry_id)
entry.task_id = task_id
_save_course_task_log_entry(entry)
# add task_id to xmodule_instance_args, so that it can be output with tracking info:
xmodule_instance_args['task_id'] = task_id
entry.task_id = task_id
_save_course_task_log_entry(entry)
# now that we have an entry we can try to catch failures:
task_progress = None
try:
task_progress = _update_problem_module_state_internal(course_id, module_state_key, student, update_fcn,
task_progress = _update_problem_module_state_internal(course_id, module_state_key, student_ident, update_fcn,
action_name, filter_fcn, xmodule_instance_args)
except Exception:
# try to write out the failure to the entry before failing
......@@ -166,13 +172,13 @@ def _update_problem_module_state(entry_id, course_id, module_state_key, student,
task_log.warning("background task (%s) failed: %s %s", task_id, exception, traceback_string)
if traceback is not None:
task_progress['traceback'] = traceback_string
entry.task_progress = json.dumps(task_progress)
entry.task_output = json.dumps(task_progress)
entry.task_state = 'FAILURE'
_save_course_task_log_entry(entry)
raise
# if we get here, we assume we've succeeded, so update the CourseTaskLog entry in anticipation:
entry.task_progress = json.dumps(task_progress)
entry.task_output = json.dumps(task_progress)
entry.task_state = 'SUCCESS'
_save_course_task_log_entry(entry)
......@@ -203,13 +209,6 @@ def _update_problem_module_state_for_student(entry_id, course_id, problem_url, s
return (success, msg)
def _update_problem_module_state_for_all_students(entry_id, course_id, problem_url, update_fcn, action_name, filter_fcn=None, xmodule_instance_args=None):
"""
Update the StudentModule for all students. See _update_problem_module_state().
"""
return _update_problem_module_state(entry_id, course_id, problem_url, None, update_fcn, action_name, filter_fcn, xmodule_instance_args)
def _get_module_instance_for_task(course_id, student, module_descriptor, module_state_key, xmodule_instance_args=None,
grade_bucket_type=None):
"""
......@@ -245,19 +244,19 @@ def _get_module_instance_for_task(course_id, student, module_descriptor, module_
@transaction.autocommit
def _regrade_problem_module_state(module_descriptor, student_module, xmodule_instance_args=None):
def _rescore_problem_module_state(module_descriptor, student_module, xmodule_instance_args=None):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs regrading on the student's problem submission.
performs rescoring on the student's problem submission.
Throws exceptions if the regrading is fatal and should be aborted if in a loop.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
module_state_key = student_module.module_state_key
instance = _get_module_instance_for_task(course_id, student, module_descriptor, module_state_key, xmodule_instance_args, grade_bucket_type='regrade')
instance = _get_module_instance_for_task(course_id, student, module_descriptor, module_state_key, xmodule_instance_args, grade_bucket_type='rescore')
if instance is None:
# Either permissions just changed, or someone is trying to be clever
......@@ -267,51 +266,46 @@ def _regrade_problem_module_state(module_descriptor, student_module, xmodule_ins
task_log.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'regrade_problem'):
# if the first instance doesn't have a regrade method, we should
if not hasattr(instance, 'rescore_problem'):
# if the first instance doesn't have a rescore method, we should
# probably assume that no other instances will either.
msg = "Specified problem does not support regrading."
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.regrade_problem()
result = instance.rescore_problem()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
task_log.warning("error processing regrade call for problem {loc} and student {student}: "
task_log.warning("error processing rescore call for problem {loc} and student {student}: "
"unexpected response {msg}".format(msg=result, loc=module_state_key, student=student))
return False
elif result['success'] != 'correct' and result['success'] != 'incorrect':
task_log.warning("error processing regrade call for problem {loc} and student {student}: "
task_log.warning("error processing rescore call for problem {loc} and student {student}: "
"{msg}".format(msg=result['success'], loc=module_state_key, student=student))
return False
else:
task_log.debug("successfully processed regrade call for problem {loc} and student {student}: "
task_log.debug("successfully processed rescore call for problem {loc} and student {student}: "
"{msg}".format(msg=result['success'], loc=module_state_key, student=student))
return True
def filter_problem_module_state_for_done(modules_to_update):
"""Filter to apply for regrading, to limit module instances to those marked as done"""
"""Filter to apply for rescoring, to limit module instances to those marked as done"""
return modules_to_update.filter(state__contains='"done": true')
@task
def regrade_problem_for_student(entry_id, course_id, problem_url, student_identifier, xmodule_instance_args):
"""Regrades problem `problem_url` in `course_id` for specified student."""
action_name = 'regraded'
update_fcn = _regrade_problem_module_state
filter_fcn = filter_problem_module_state_for_done
return _update_problem_module_state_for_student(entry_id, course_id, problem_url, student_identifier,
update_fcn, action_name, filter_fcn, xmodule_instance_args)
@task
def regrade_problem_for_all_students(entry_id, course_id, problem_url, xmodule_instance_args):
"""Regrades problem `problem_url` in `course_id` for all students."""
action_name = 'regraded'
update_fcn = _regrade_problem_module_state
def rescore_problem(entry_id, course_id, task_input, xmodule_instance_args):
"""Rescores problem `problem_url` in `course_id` for all students."""
action_name = 'rescored'
update_fcn = _rescore_problem_module_state
filter_fcn = filter_problem_module_state_for_done
return _update_problem_module_state_for_all_students(entry_id, course_id, problem_url, update_fcn, action_name, filter_fcn,
xmodule_instance_args)
problem_url = task_input.get('problem_url')
student_ident = None
if 'student' in task_input:
student_ident = task_input['student']
return _update_problem_module_state(entry_id, course_id, problem_url, student_ident,
update_fcn, action_name, filter_fcn=filter_fcn,
xmodule_instance_args=xmodule_instance_args)
@transaction.autocommit
......@@ -342,22 +336,16 @@ def _reset_problem_attempts_module_state(module_descriptor, student_module, xmod
@task
def reset_problem_attempts_for_student(entry_id, course_id, problem_url, student_identifier, xmodule_instance_args):
"""Resets problem attempts to zero for `problem_url` in `course_id` for specified student."""
action_name = 'reset'
update_fcn = _reset_problem_attempts_module_state
return _update_problem_module_state_for_student(entry_id, course_id, problem_url, student_identifier,
update_fcn, action_name,
xmodule_instance_args=xmodule_instance_args)
@task
def reset_problem_attempts_for_all_students(entry_id, course_id, problem_url, xmodule_instance_args):
def reset_problem_attempts(entry_id, course_id, task_input, xmodule_instance_args):
"""Resets problem attempts to zero for `problem_url` in `course_id` for all students."""
action_name = 'reset'
update_fcn = _reset_problem_attempts_module_state
return _update_problem_module_state_for_all_students(entry_id, course_id, problem_url,
update_fcn, action_name,
problem_url = task_input.get('problem_url')
student_ident = None
if 'student' in task_input:
student_ident = task_input['student']
return _update_problem_module_state(entry_id, course_id, problem_url, student_ident,
update_fcn, action_name, filter_fcn=None,
xmodule_instance_args=xmodule_instance_args)
......@@ -375,37 +363,14 @@ def _delete_problem_module_state(module_descriptor, student_module, xmodule_inst
@task
def delete_problem_state_for_student(entry_id, course_id, problem_url, student_ident, xmodule_instance_args):
"""Deletes problem state entirely for `problem_url` in `course_id` for specified student."""
action_name = 'deleted'
update_fcn = _delete_problem_module_state
return _update_problem_module_state_for_student(entry_id, course_id, problem_url, student_ident,
update_fcn, action_name,
xmodule_instance_args=xmodule_instance_args)
@task
def delete_problem_state_for_all_students(entry_id, course_id, problem_url, xmodule_instance_args):
def delete_problem_state(entry_id, course_id, task_input, xmodule_instance_args):
"""Deletes problem state entirely for `problem_url` in `course_id` for all students."""
action_name = 'deleted'
update_fcn = _delete_problem_module_state
return _update_problem_module_state_for_all_students(entry_id, course_id, problem_url,
update_fcn, action_name,
problem_url = task_input.get('problem_url')
student_ident = None
if 'student' in task_input:
student_ident = task_input['student']
return _update_problem_module_state(entry_id, course_id, problem_url, student_ident,
update_fcn, action_name, filter_fcn=None,
xmodule_instance_args=xmodule_instance_args)
# Using @worker_ready.connect was an effort to call middleware initialization
# only once, when the worker was coming up. However, the actual worker task
# was not getting initialized, so it was likely running in a separate process
# from the worker server.
#@worker_ready.connect
#def initialize_middleware(**kwargs):
# # Initialize Django middleware - some middleware components
# # are initialized lazily when the first request is served. Since
# # the celery workers do not serve requests, the components never
# # get initialized, causing errors in some dependencies.
# # In particular, the Mako template middleware is used by some xmodules
# task_log.info("Initializing all middleware from worker_ready.connect hook")
#
# from django.core.handlers.base import BaseHandler
# BaseHandler().load_middleware()
......@@ -91,11 +91,11 @@ class StudentInfoFactory(DjangoModelFactory):
class CourseTaskLogFactory(DjangoModelFactory):
FACTORY_FOR = CourseTaskLog
task_name = 'regrade_problem'
task_type = 'rescore_problem'
course_id = "MITx/999/Robot_Super_Course"
student = SubFactory(UserFactory)
task_args = None
task_input = json.dumps({})
task_key = None
task_id = None
task_state = "QUEUED"
task_progress = None
task_output = None
requester = SubFactory(UserFactory)
......@@ -14,9 +14,10 @@ from xmodule.modulestore.exceptions import ItemNotFoundError
from courseware.tests.factories import UserFactory, CourseTaskLogFactory
from courseware.task_queue import (get_running_course_tasks,
course_task_log_status,
_encode_problem_and_student_input,
AlreadyRunningError,
submit_regrade_problem_for_all_students,
submit_regrade_problem_for_student,
submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students,
submit_delete_problem_state_for_all_students)
......@@ -52,15 +53,17 @@ class TaskQueueTestCase(TestCase):
number='1.23x',
problem_url_name=problem_url_name)
def _create_entry(self, task_state="QUEUED", task_progress=None, student=None):
def _create_entry(self, task_state="QUEUED", task_output=None, student=None):
task_id = str(uuid4())
progress_json = json.dumps(task_progress)
course_task_log = CourseTaskLogFactory.create(student=student,
requester=self.instructor,
task_args=self.problem_url,
progress_json = json.dumps(task_output)
task_input, task_key = _encode_problem_and_student_input(self.problem_url, student)
course_task_log = CourseTaskLogFactory.create(requester=self.instructor,
task_input=json.dumps(task_input),
task_key=task_key,
task_id=task_id,
task_state=task_state,
task_progress=progress_json)
task_output=progress_json)
return course_task_log
def _create_failure_entry(self):
......@@ -68,7 +71,7 @@ class TaskQueueTestCase(TestCase):
progress = {'message': TEST_FAILURE_MESSAGE,
'exception': 'RandomCauseError',
}
return self._create_entry(task_state="FAILURE", task_progress=progress)
return self._create_entry(task_state="FAILURE", task_output=progress)
def _create_success_entry(self, student=None):
return self._create_progress_entry(student=None, task_state="SUCCESS")
......@@ -78,10 +81,10 @@ class TaskQueueTestCase(TestCase):
progress = {'attempted': 3,
'updated': 2,
'total': 10,
'action_name': 'regraded',
'action_name': 'rescored',
'message': 'some random string that should summarize the other info',
}
return self._create_entry(task_state=task_state, task_progress=progress, student=student)
return self._create_entry(task_state=task_state, task_output=progress, student=student)
def test_fetch_running_tasks(self):
# when fetching running tasks, we get all running tasks, and only running tasks
......@@ -152,7 +155,7 @@ class TaskQueueTestCase(TestCase):
mock_result.result = {'attempted': 5,
'updated': 4,
'total': 10,
'action_name': 'regraded'}
'action_name': 'rescored'}
with patch('celery.result.AsyncResult.__new__') as mock_result_ctor:
mock_result_ctor.return_value = mock_result
response = course_task_log_status(Mock(), task_id=task_id)
......@@ -206,7 +209,7 @@ class TaskQueueTestCase(TestCase):
mock_result.result = {'attempted': attempted,
'updated': updated,
'total': total,
'action_name': 'regraded'}
'action_name': 'rescored'}
with patch('celery.result.AsyncResult.__new__') as mock_result_ctor:
mock_result_ctor.return_value = mock_result
response = course_task_log_status(Mock(), task_id=task_id)
......@@ -221,44 +224,44 @@ class TaskQueueTestCase(TestCase):
def test_success_messages(self):
_, output = self._get_output_for_task_success(0, 0, 10)
self.assertTrue("Unable to find any students with submissions to be regraded" in output['message'])
self.assertTrue("Unable to find any students with submissions to be rescored" in output['message'])
self.assertFalse(output['succeeded'])
_, output = self._get_output_for_task_success(10, 0, 10)
self.assertTrue("Problem failed to be regraded for any of 10 students" in output['message'])
self.assertTrue("Problem failed to be rescored for any of 10 students" in output['message'])
self.assertFalse(output['succeeded'])
_, output = self._get_output_for_task_success(10, 8, 10)
self.assertTrue("Problem regraded for 8 of 10 students" in output['message'])
self.assertTrue("Problem rescored for 8 of 10 students" in output['message'])
self.assertFalse(output['succeeded'])
_, output = self._get_output_for_task_success(10, 10, 10)
self.assertTrue("Problem successfully regraded for 10 students" in output['message'])
self.assertTrue("Problem successfully rescored for 10 students" in output['message'])
self.assertTrue(output['succeeded'])
_, output = self._get_output_for_task_success(0, 0, 1, student=self.student)
self.assertTrue("Unable to find submission to be regraded for student" in output['message'])
self.assertTrue("Unable to find submission to be rescored for student" in output['message'])
self.assertFalse(output['succeeded'])
_, output = self._get_output_for_task_success(1, 0, 1, student=self.student)
self.assertTrue("Problem failed to be regraded for student" in output['message'])
self.assertTrue("Problem failed to be rescored for student" in output['message'])
self.assertFalse(output['succeeded'])
_, output = self._get_output_for_task_success(1, 1, 1, student=self.student)
self.assertTrue("Problem successfully regraded for student" in output['message'])
self.assertTrue("Problem successfully rescored for student" in output['message'])
self.assertTrue(output['succeeded'])
def test_submit_nonexistent_modules(self):
# confirm that a regrade of a non-existent module returns an exception
# (Note that it is easier to test a non-regradable module in test_tasks,
# confirm that a rescore of a non-existent module returns an exception
# (Note that it is easier to test a non-rescorable module in test_tasks,
# where we are creating real modules.
problem_url = self.problem_url
course_id = "something else"
request = None
with self.assertRaises(ItemNotFoundError):
submit_regrade_problem_for_student(request, course_id, problem_url, self.student)
submit_rescore_problem_for_student(request, course_id, problem_url, self.student)
with self.assertRaises(ItemNotFoundError):
submit_regrade_problem_for_all_students(request, course_id, problem_url)
submit_rescore_problem_for_all_students(request, course_id, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_reset_problem_attempts_for_all_students(request, course_id, problem_url)
with self.assertRaises(ItemNotFoundError):
......@@ -267,12 +270,12 @@ class TaskQueueTestCase(TestCase):
def test_submit_when_running(self):
# get exception when trying to submit a task that is already running
course_task_log = self._create_progress_entry()
problem_url = course_task_log.task_args
problem_url = json.loads(course_task_log.task_input).get('problem_url')
course_id = course_task_log.course_id
# requester doesn't have to be the same when determining if a task is already running
request = Mock()
request.user = self.student
with self.assertRaises(AlreadyRunningError):
# just skip making the argument check, so we don't have to fake it deeper down
with patch('courseware.task_queue._check_arguments_for_regrading'):
submit_regrade_problem_for_all_students(request, course_id, problem_url)
with patch('courseware.task_queue._check_arguments_for_rescoring'):
submit_rescore_problem_for_all_students(request, course_id, problem_url)
......@@ -20,8 +20,8 @@ from xmodule.modulestore.exceptions import ItemNotFoundError
from student.tests.factories import CourseEnrollmentFactory, UserFactory, AdminFactory
from courseware.model_data import StudentModule
from courseware.task_queue import (submit_regrade_problem_for_all_students,
submit_regrade_problem_for_student,
from courseware.task_queue import (submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
course_task_log_status,
submit_reset_problem_attempts_for_all_students,
submit_delete_problem_state_for_all_students)
......@@ -38,9 +38,9 @@ TEST_SECTION_NAME = "Problem"
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class TestRegradingBase(LoginEnrollmentTestCase, ModuleStoreTestCase):
class TestRescoringBase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Test that all students' answers to a problem can be regraded after the
Test that all students' answers to a problem can be rescored after the
definition of the problem has been redefined.
"""
course = None
......@@ -69,11 +69,11 @@ class TestRegradingBase(LoginEnrollmentTestCase, ModuleStoreTestCase):
return '{0}@test.com'.format(username)
def login_username(self, username):
self.login(TestRegradingBase.get_user_email(username), "test")
self.login(TestRescoringBase.get_user_email(username), "test")
self.current_user = username
def _create_user(self, username, is_staff=False):
email = TestRegradingBase.get_user_email(username)
email = TestRescoringBase.get_user_email(username)
if (is_staff):
AdminFactory.create(username=username, email=email)
else:
......@@ -121,7 +121,7 @@ class TestRegradingBase(LoginEnrollmentTestCase, ModuleStoreTestCase):
'correct_option': 'Option 2',
'num_responses': 2}
problem_xml = factory.build_xml(**factory_args)
location = TestRegrading.problem_location(problem_url_name)
location = TestRescoring.problem_location(problem_url_name)
self.module_store.update_item(location, problem_xml)
def render_problem(self, username, problem_url_name):
......@@ -135,7 +135,7 @@ class TestRegradingBase(LoginEnrollmentTestCase, ModuleStoreTestCase):
# make ajax call:
modx_url = reverse('modx_dispatch',
kwargs={'course_id': self.course.id,
'location': TestRegrading.problem_location(problem_url_name),
'location': TestRescoring.problem_location(problem_url_name),
'dispatch': 'problem_get', })
resp = self.client.post(modx_url, {})
return resp
......@@ -158,7 +158,7 @@ class TestRegradingBase(LoginEnrollmentTestCase, ModuleStoreTestCase):
# make ajax call:
modx_url = reverse('modx_dispatch',
kwargs={'course_id': self.course.id,
'location': TestRegrading.problem_location(problem_url_name),
'location': TestRescoring.problem_location(problem_url_name),
'dispatch': 'problem_check', })
resp = self.client.post(modx_url, {
......@@ -176,21 +176,21 @@ class TestRegradingBase(LoginEnrollmentTestCase, ModuleStoreTestCase):
request.is_secure = Mock(return_value=False)
return request
def regrade_all_student_answers(self, instructor, problem_url_name):
"""Submits the current problem for regrading"""
return submit_regrade_problem_for_all_students(self.create_task_request(instructor), self.course.id,
TestRegradingBase.problem_location(problem_url_name))
def rescore_all_student_answers(self, instructor, problem_url_name):
"""Submits the current problem for rescoring"""
return submit_rescore_problem_for_all_students(self.create_task_request(instructor), self.course.id,
TestRescoringBase.problem_location(problem_url_name))
def regrade_one_student_answer(self, instructor, problem_url_name, student):
"""Submits the current problem for regrading for a particular student"""
return submit_regrade_problem_for_student(self.create_task_request(instructor), self.course.id,
TestRegradingBase.problem_location(problem_url_name),
def rescore_one_student_answer(self, instructor, problem_url_name, student):
"""Submits the current problem for rescoring for a particular student"""
return submit_rescore_problem_for_student(self.create_task_request(instructor), self.course.id,
TestRescoringBase.problem_location(problem_url_name),
student)
def show_correct_answer(self, problem_url_name):
modx_url = reverse('modx_dispatch',
kwargs={'course_id': self.course.id,
'location': TestRegradingBase.problem_location(problem_url_name),
'location': TestRescoringBase.problem_location(problem_url_name),
'dispatch': 'problem_show', })
return self.client.post(modx_url, {})
......@@ -215,8 +215,8 @@ class TestRegradingBase(LoginEnrollmentTestCase, ModuleStoreTestCase):
self.assertGreater(len(state['student_answers']), 0)
class TestRegrading(TestRegradingBase):
"""Test regrading problems in a background task."""
class TestRescoring(TestRescoringBase):
"""Test rescoring problems in a background task."""
def setUp(self):
self.initialize_course()
......@@ -227,12 +227,12 @@ class TestRegrading(TestRegradingBase):
self.create_student('u4')
self.logout()
def test_regrading_option_problem(self):
'''Run regrade scenario on option problem'''
def test_rescoring_option_problem(self):
'''Run rescore scenario on option problem'''
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = TestRegrading.problem_location(problem_url_name)
location = TestRescoring.problem_location(problem_url_name)
descriptor = self.module_store.get_instance(self.course.id, location)
# first store answers for each of the separate users:
......@@ -253,38 +253,39 @@ class TestRegrading(TestRegradingBase):
self.render_problem('u1', problem_url_name)
self.check_state('u1', descriptor, 2, 2, 1)
# regrade the problem for only one student -- only that student's grade should change:
self.regrade_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
# rescore the problem for only one student -- only that student's grade should change:
self.rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
self.check_state('u1', descriptor, 0, 2, 1)
self.check_state('u2', descriptor, 1, 2, 1)
self.check_state('u3', descriptor, 1, 2, 1)
self.check_state('u4', descriptor, 0, 2, 1)
# regrade the problem for all students
self.regrade_all_student_answers('instructor', problem_url_name)
# rescore the problem for all students
self.rescore_all_student_answers('instructor', problem_url_name)
self.check_state('u1', descriptor, 0, 2, 1)
self.check_state('u2', descriptor, 1, 2, 1)
self.check_state('u3', descriptor, 1, 2, 1)
self.check_state('u4', descriptor, 2, 2, 1)
def test_regrading_failure(self):
"""Simulate a failure in regrading a problem"""
def test_rescoring_failure(self):
"""Simulate a failure in rescoring a problem"""
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, ['Option 1', 'Option 1'])
expected_message = "bad things happened"
with patch('capa.capa_problem.LoncapaProblem.regrade_existing_answers') as mock_regrade:
mock_regrade.side_effect = ZeroDivisionError(expected_message)
course_task_log = self.regrade_all_student_answers('instructor', problem_url_name)
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = ZeroDivisionError(expected_message)
course_task_log = self.rescore_all_student_answers('instructor', problem_url_name)
# check task_log returned
self.assertEqual(course_task_log.task_state, 'FAILURE')
self.assertEqual(course_task_log.student, None)
self.assertEqual(course_task_log.requester.username, 'instructor')
self.assertEqual(course_task_log.task_name, 'regrade_problem')
self.assertEqual(course_task_log.task_args, TestRegrading.problem_location(problem_url_name))
status = json.loads(course_task_log.task_progress)
self.assertEqual(course_task_log.task_type, 'rescore_problem')
task_input = json.loads(course_task_log.task_input)
self.assertFalse('student' in task_input)
self.assertEqual(task_input['problem_url'], TestRescoring.problem_location(problem_url_name))
status = json.loads(course_task_log.task_output)
self.assertEqual(status['exception'], 'ZeroDivisionError')
self.assertEqual(status['message'], expected_message)
......@@ -294,17 +295,17 @@ class TestRegrading(TestRegradingBase):
status = json.loads(response.content)
self.assertEqual(status['message'], expected_message)
def test_regrading_non_problem(self):
def test_rescoring_non_problem(self):
"""confirm that a non-problem will not submit"""
problem_url_name = self.problem_section.location.url()
with self.assertRaises(NotImplementedError):
self.regrade_all_student_answers('instructor', problem_url_name)
self.rescore_all_student_answers('instructor', problem_url_name)
def test_regrading_nonexistent_problem(self):
def test_rescoring_nonexistent_problem(self):
"""confirm that a non-existent problem will not submit"""
problem_url_name = 'NonexistentProblem'
with self.assertRaises(ItemNotFoundError):
self.regrade_all_student_answers('instructor', problem_url_name)
self.rescore_all_student_answers('instructor', problem_url_name)
def define_code_response_problem(self, problem_url_name):
"""Define an arbitrary code-response problem.
......@@ -322,8 +323,8 @@ class TestRegrading(TestRegradingBase):
display_name=str(problem_url_name),
data=problem_xml)
def test_regrading_code_problem(self):
"""Run regrade scenario on problem with code submission"""
def test_rescoring_code_problem(self):
"""Run rescore scenario on problem with code submission"""
problem_url_name = 'H1P2'
self.define_code_response_problem(problem_url_name)
# we fully create the CodeResponse problem, but just pretend that we're queuing it:
......@@ -331,16 +332,16 @@ class TestRegrading(TestRegradingBase):
mock_send_to_queue.return_value = (0, "Successfully queued")
self.submit_student_answer('u1', problem_url_name, ["answer1", "answer2"])
course_task_log = self.regrade_all_student_answers('instructor', problem_url_name)
course_task_log = self.rescore_all_student_answers('instructor', problem_url_name)
self.assertEqual(course_task_log.task_state, 'FAILURE')
status = json.loads(course_task_log.task_progress)
status = json.loads(course_task_log.task_output)
self.assertEqual(status['exception'], 'NotImplementedError')
self.assertEqual(status['message'], "Problem's definition does not support regrading")
self.assertEqual(status['message'], "Problem's definition does not support rescoring")
mock_request = Mock()
response = course_task_log_status(mock_request, task_id=course_task_log.task_id)
status = json.loads(response.content)
self.assertEqual(status['message'], "Problem's definition does not support regrading")
self.assertEqual(status['message'], "Problem's definition does not support rescoring")
def define_randomized_custom_response_problem(self, problem_url_name, redefine=False):
"""
......@@ -367,7 +368,7 @@ class TestRegrading(TestRegradingBase):
""")
problem_xml = factory.build_xml(script=script, cfn="check_func", expect="42", num_responses=1)
if redefine:
self.module_store.update_item(TestRegradingBase.problem_location(problem_url_name), problem_xml)
self.module_store.update_item(TestRescoringBase.problem_location(problem_url_name), problem_xml)
else:
# Use "per-student" rerandomization so that check-problem can be called more than once.
# Using "always" means we cannot check a problem twice, but we want to call once to get the
......@@ -380,12 +381,12 @@ class TestRegrading(TestRegradingBase):
data=problem_xml,
metadata={"rerandomize": "per_student"})
def test_regrading_randomized_problem(self):
"""Run regrade scenario on custom problem that uses randomize"""
def test_rescoring_randomized_problem(self):
"""Run rescore scenario on custom problem that uses randomize"""
# First define the custom response problem:
problem_url_name = 'H1P1'
self.define_randomized_custom_response_problem(problem_url_name)
location = TestRegrading.problem_location(problem_url_name)
location = TestRescoring.problem_location(problem_url_name)
descriptor = self.module_store.get_instance(self.course.id, location)
# run with more than one user
userlist = ['u1', 'u2', 'u3', 'u4']
......@@ -415,23 +416,23 @@ class TestRegrading(TestRegradingBase):
self.render_problem('u1', problem_url_name)
self.check_state('u1', descriptor, 1, 1, 2)
# regrade the problem for only one student -- only that student's grade should change
# rescore the problem for only one student -- only that student's grade should change
# (and none of the attempts):
self.regrade_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
self.rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
self.check_state('u1', descriptor, 0, 1, 2)
self.check_state('u2', descriptor, 1, 1, 2)
self.check_state('u3', descriptor, 1, 1, 2)
self.check_state('u4', descriptor, 1, 1, 2)
# regrade the problem for all students
self.regrade_all_student_answers('instructor', problem_url_name)
# rescore the problem for all students
self.rescore_all_student_answers('instructor', problem_url_name)
# all grades should change to being wrong (with no change in attempts)
for username in userlist:
self.check_state(username, descriptor, 0, 1, 2)
class TestResetAttempts(TestRegradingBase):
class TestResetAttempts(TestRescoringBase):
"""Test resetting problem attempts in a background task."""
userlist = ['u1', 'u2', 'u3', 'u4']
......@@ -450,14 +451,14 @@ class TestResetAttempts(TestRegradingBase):
def reset_problem_attempts(self, instructor, problem_url_name):
"""Submits the current problem for resetting"""
return submit_reset_problem_attempts_for_all_students(self.create_task_request(instructor), self.course.id,
TestRegradingBase.problem_location(problem_url_name))
TestRescoringBase.problem_location(problem_url_name))
def test_reset_attempts_on_problem(self):
'''Run reset-attempts scenario on option problem'''
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = TestRegradingBase.problem_location(problem_url_name)
location = TestRescoringBase.problem_location(problem_url_name)
descriptor = self.module_store.get_instance(self.course.id, location)
num_attempts = 3
# first store answers for each of the separate users:
......@@ -486,11 +487,12 @@ class TestResetAttempts(TestRegradingBase):
# check task_log returned
self.assertEqual(course_task_log.task_state, 'FAILURE')
self.assertEqual(course_task_log.student, None)
self.assertEqual(course_task_log.requester.username, 'instructor')
self.assertEqual(course_task_log.task_name, 'reset_problem_attempts')
self.assertEqual(course_task_log.task_args, TestRegrading.problem_location(problem_url_name))
status = json.loads(course_task_log.task_progress)
self.assertEqual(course_task_log.task_type, 'reset_problem_attempts')
task_input = json.loads(course_task_log.task_input)
self.assertFalse('student' in task_input)
self.assertEqual(task_input['problem_url'], TestRescoring.problem_location(problem_url_name))
status = json.loads(course_task_log.task_output)
self.assertEqual(status['exception'], 'ZeroDivisionError')
self.assertEqual(status['message'], expected_message)
......@@ -513,7 +515,7 @@ class TestResetAttempts(TestRegradingBase):
self.reset_problem_attempts('instructor', problem_url_name)
class TestDeleteProblem(TestRegradingBase):
class TestDeleteProblem(TestRescoringBase):
"""Test deleting problem state in a background task."""
userlist = ['u1', 'u2', 'u3', 'u4']
......@@ -527,14 +529,14 @@ class TestDeleteProblem(TestRegradingBase):
def delete_problem_state(self, instructor, problem_url_name):
"""Submits the current problem for deletion"""
return submit_delete_problem_state_for_all_students(self.create_task_request(instructor), self.course.id,
TestRegradingBase.problem_location(problem_url_name))
TestRescoringBase.problem_location(problem_url_name))
def test_delete_problem_state(self):
'''Run delete-state scenario on option problem'''
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = TestRegradingBase.problem_location(problem_url_name)
location = TestRescoringBase.problem_location(problem_url_name)
descriptor = self.module_store.get_instance(self.course.id, location)
# first store answers for each of the separate users:
for username in self.userlist:
......@@ -562,11 +564,12 @@ class TestDeleteProblem(TestRegradingBase):
# check task_log returned
self.assertEqual(course_task_log.task_state, 'FAILURE')
self.assertEqual(course_task_log.student, None)
self.assertEqual(course_task_log.requester.username, 'instructor')
self.assertEqual(course_task_log.task_name, 'delete_problem_state')
self.assertEqual(course_task_log.task_args, TestRegrading.problem_location(problem_url_name))
status = json.loads(course_task_log.task_progress)
self.assertEqual(course_task_log.task_type, 'delete_problem_state')
task_input = json.loads(course_task_log.task_input)
self.assertFalse('student' in task_input)
self.assertEqual(task_input['problem_url'], TestRescoring.problem_location(problem_url_name))
status = json.loads(course_task_log.task_output)
self.assertEqual(status['exception'], 'ZeroDivisionError')
self.assertEqual(status['message'], expected_message)
......
......@@ -239,22 +239,22 @@ def instructor_dashboard(request, course_id):
track.views.server_track(request, action, {}, page='idashboard')
msg += dump_grading_context(course)
elif "Regrade ALL students' problem submissions" in action:
elif "Rescore ALL students' problem submissions" in action:
problem_urlname = request.POST.get('problem_for_all_students', '')
problem_url = get_module_url(problem_urlname)
try:
course_task_log_entry = task_queue.submit_regrade_problem_for_all_students(request, course_id, problem_url)
course_task_log_entry = task_queue.submit_rescore_problem_for_all_students(request, course_id, problem_url)
if course_task_log_entry is None:
msg += '<font color="red">Failed to create a background task for regrading "{0}".</font>'.format(problem_url)
msg += '<font color="red">Failed to create a background task for rescoring "{0}".</font>'.format(problem_url)
else:
track_msg = 'regrade problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id)
track_msg = 'rescore problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard')
except ItemNotFoundError as e:
log.error('Failure to regrade: unknown problem "{0}"'.format(e))
msg += '<font color="red">Failed to create a background task for regrading "{0}": problem not found.</font>'.format(problem_url)
log.error('Failure to rescore: unknown problem "{0}"'.format(e))
msg += '<font color="red">Failed to create a background task for rescoring "{0}": problem not found.</font>'.format(problem_url)
except Exception as e:
log.error("Encountered exception from regrade: {0}".format(e))
msg += '<font color="red">Failed to create a background task for regrading "{0}": {1}.</font>'.format(problem_url, e.message)
log.error("Encountered exception from rescore: {0}".format(e))
msg += '<font color="red">Failed to create a background task for rescoring "{0}": {1}.</font>'.format(problem_url, e.message)
elif "Reset ALL students' attempts" in action:
problem_urlname = request.POST.get('problem_for_all_students', '')
......@@ -301,7 +301,7 @@ def instructor_dashboard(request, course_id):
elif "Reset student's attempts" in action \
or "Delete student state for module" in action \
or "Regrade student's problem submission" in action:
or "Rescore student's problem submission" in action:
# get the form data
unique_student_identifier = request.POST.get('unique_student_identifier', '')
problem_urlname = request.POST.get('problem_for_student', '')
......@@ -356,15 +356,15 @@ def instructor_dashboard(request, course_id):
msg += "<font color='red'>Couldn't reset module state. </font>"
else:
try:
course_task_log_entry = task_queue.submit_regrade_problem_for_student(request, course_id, module_state_key, student)
course_task_log_entry = task_queue.submit_rescore_problem_for_student(request, course_id, module_state_key, student)
if course_task_log_entry is None:
msg += '<font color="red">Failed to create a background task for regrading "{0}" for student {1}.</font>'.format(module_state_key, unique_student_identifier)
msg += '<font color="red">Failed to create a background task for rescoring "{0}" for student {1}.</font>'.format(module_state_key, unique_student_identifier)
else:
track_msg = 'regrade problem {problem} for student {student} in {course}'.format(problem=module_state_key, student=unique_student_identifier, course=course_id)
track_msg = 'rescore problem {problem} for student {student} in {course}'.format(problem=module_state_key, student=unique_student_identifier, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard')
except Exception as e:
log.error("Encountered exception from regrade: {0}".format(e))
msg += '<font color="red">Failed to create a background task for regrading "{0}": {1}.</font>'.format(module_state_key, e.message)
log.error("Encountered exception from rescore: {0}".format(e))
msg += '<font color="red">Failed to create a background task for rescoring "{0}": {1}.</font>'.format(module_state_key, e.message)
elif "Get link to student's progress page" in action:
unique_student_identifier = request.POST.get('unique_student_identifier', '')
......@@ -1294,11 +1294,7 @@ def get_background_task_table(course_id, problem_url, student=None):
Returns a tuple of (msg, datatable), where the msg is a possible error message,
and the datatable is the datatable to be used for display.
"""
course_tasks = CourseTaskLog.objects.filter(course_id=course_id, task_args=problem_url)
if student is not None:
course_tasks = course_tasks.filter(student=student)
history_entries = course_tasks.order_by('-id')
history_entries = task_queue.get_course_task_history(course_id, problem_url, student)
datatable = None
msg = ""
# first check to see if there is any history at all
......@@ -1315,24 +1311,23 @@ def get_background_task_table(course_id, problem_url, student=None):
else:
datatable = {}
datatable['header'] = ["Order",
"Task Name",
"Student",
"Task Type",
"Task Id",
"Requester",
"Submitted",
"Duration",
"Duration (ms)",
"Task State",
"Task Status",
"Message"]
"Task Output"]
datatable['data'] = []
for i, course_task in enumerate(history_entries):
# get duration info, if known:
duration_ms = 'unknown'
if hasattr(course_task, 'task_progress'):
task_progress = json.loads(course_task.task_progress)
if 'duration_ms' in task_progress:
duration_ms = task_progress['duration_ms']
if hasattr(course_task, 'task_outputs'):
task_outputs = json.loads(course_task.task_output)
if 'duration_ms' in task_outputs:
duration_ms = task_outputs['duration_ms']
# get progress status message:
success, message = task_queue.get_task_completion_message(course_task)
if success:
......@@ -1341,17 +1336,14 @@ def get_background_task_table(course_id, problem_url, student=None):
status = "Incomplete"
# generate row for this task:
row = ["#{0}".format(len(history_entries) - i),
str(course_task.task_name),
str(course_task.student),
str(course_task.task_type),
str(course_task.task_id),
str(course_task.requester),
course_task.created.strftime("%Y/%m/%d %H:%M:%S"),
duration_ms,
#course_task.updated.strftime("%Y/%m/%d %H:%M:%S"),
str(course_task.task_state),
status,
message]
datatable['data'].append(row)
return msg, datatable
......@@ -312,7 +312,7 @@ function goto( mode)
<p>
Then select an action:
<input type="submit" name="action" value="Reset ALL students' attempts">
<input type="submit" name="action" value="Regrade ALL students' problem submissions">
<input type="submit" name="action" value="Rescore ALL students' problem submissions">
</p>
<p>
<p>These actions run in the background, and status for active tasks will appear in a table below.
......@@ -349,7 +349,7 @@ function goto( mode)
Then select an action:
<input type="submit" name="action" value="Reset student's attempts">
%if settings.MITX_FEATURES.get('ENABLE_COURSE_BACKGROUND_TASKS'):
<input type="submit" name="action" value="Regrade student's problem submission">
<input type="submit" name="action" value="Rescore student's problem submission">
%endif
</p>
......@@ -360,7 +360,7 @@ function goto( mode)
</p>
%endif
%if settings.MITX_FEATURES.get('ENABLE_COURSE_BACKGROUND_TASKS'):
<p>Regrading runs in the background, and status for active tasks will appear in a table below.
<p>Rescoring runs in the background, and status for active tasks will appear in a table below.
To see status for all tasks submitted for this course and student, click on this button:
</p>
<p>
......@@ -708,9 +708,8 @@ function goto( mode)
<div id="task-progress-wrapper">
<table class="stat_table">
<tr>
<th>Task Name</th>
<th>Task Arg</th>
<th>Student</th>
<th>Task Type</th>
<th>Task inputs</th>
<th>Task Id</th>
<th>Requester</th>
<th>Submitted</th>
......@@ -722,9 +721,8 @@ function goto( mode)
<tr id="task-progress-entry-${tasknum}" class="task-progress-entry"
data-task-id="${course_task.task_id}"
data-in-progress="true">
<td>${course_task.task_name}</td>
<td>${course_task.task_args}</td>
<td>${course_task.student}</td>
<td>${course_task.task_type}</td>
<td>${course_task.task_input}</td>
<td><div class="task-id">${course_task.task_id}</div></td>
<td>${course_task.requester}</td>
<td>${course_task.created}</td>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment