Commit c676cfd6 by Brian Wilson

Rename fields in CourseTaskLog, including a task_key column for indexing. Use…

Rename fields in CourseTaskLog, including a task_key column for indexing.  Use 'rescore' instead of 'regrade'.  Clean up task submission.
parent 76773c5b
......@@ -269,22 +269,22 @@ class LoncapaProblem(object):
self.student_answers = convert_files_to_filenames(answers)
return self._grade_answers(answers)
def supports_regrading(self):
def supports_rescoring(self):
"""
Checks that the current problem definition permits regrading.
Checks that the current problem definition permits rescoring.
More precisely, it checks that there are no response types in
the current problem that are not fully supported (yet) for regrading.
the current problem that are not fully supported (yet) for rescoring.
This includes responsetypes for which the student's answer
is not properly stored in state, i.e. file submissions. At present,
we have no way to know if an existing response was actually a real
answer or merely the filename of a file submitted as an answer.
It turns out that because regrading is a background task, limiting
It turns out that because rescoring is a background task, limiting
it to responsetypes that don't support file submissions also means
that the responsetypes are synchronous. This is convenient as it
permits regrading to be complete when the regrading call returns.
permits rescoring to be complete when the rescoring call returns.
"""
# We check for synchronous grading and no file submissions by
# screening out all problems with a CodeResponse type.
......@@ -294,16 +294,16 @@ class LoncapaProblem(object):
return True
def regrade_existing_answers(self):
def rescore_existing_answers(self):
'''
Regrade student responses. Called by capa_module.regrade_problem.
Rescore student responses. Called by capa_module.rescore_problem.
'''
return self._grade_answers(None)
def _grade_answers(self, answers):
'''
Internal grading call used for checking new student answers and also
regrading existing student answers.
rescoring existing student answers.
answers is a dict of all the entries from request.POST, but with the first part
of each key removed (the string before the first "_").
......@@ -324,9 +324,9 @@ class LoncapaProblem(object):
# for file submissions. But we have no way of knowing if
# student_answers contains a proper answer or the filename of
# an earlier submission, so for now skip these entirely.
# TODO: figure out where to get file submissions when regrading.
# TODO: figure out where to get file submissions when rescoring.
if 'filesubmission' in responder.allowed_inputfields and answers is None:
raise Exception("Cannot regrade problems with possible file submissions")
raise Exception("Cannot rescore problems with possible file submissions")
# use 'answers' if it is provided, otherwise use the saved student_answers.
if answers is not None:
......
......@@ -812,7 +812,7 @@ class CapaModule(CapaFields, XModule):
'contents': html,
}
def regrade_problem(self):
def rescore_problem(self):
"""
Checks whether the existing answers to a problem are correct.
......@@ -823,23 +823,23 @@ class CapaModule(CapaFields, XModule):
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
Raises NotFoundError if called on a problem that has not yet been
answered, or NotImplementedError if it's a problem that cannot be regraded.
answered, or NotImplementedError if it's a problem that cannot be rescored.
Returns the error messages for exceptions occurring while performing
the regrading, rather than throwing them.
the rescoring, rather than throwing them.
"""
event_info = dict()
event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.url()
if not self.lcp.supports_regrading():
if not self.lcp.supports_rescoring():
event_info['failure'] = 'unsupported'
self.system.track_function('problem_regrade_fail', event_info)
raise NotImplementedError("Problem's definition does not support regrading")
self.system.track_function('problem_rescore_fail', event_info)
raise NotImplementedError("Problem's definition does not support rescoring")
if not self.done:
event_info['failure'] = 'unanswered'
self.system.track_function('problem_regrade_fail', event_info)
self.system.track_function('problem_rescore_fail', event_info)
raise NotFoundError('Problem must be answered before it can be graded again')
# get old score, for comparison:
......@@ -848,20 +848,20 @@ class CapaModule(CapaFields, XModule):
event_info['orig_max_score'] = orig_score['total']
try:
correct_map = self.lcp.regrade_existing_answers()
# regrading should have no effect on attempts, so don't
correct_map = self.lcp.rescore_existing_answers()
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("StudentInputError in capa_module:problem_regrade", exc_info=True)
log.warning("StudentInputError in capa_module:problem_rescore", exc_info=True)
event_info['failure'] = 'student_input_error'
self.system.track_function('problem_regrade_fail', event_info)
self.system.track_function('problem_rescore_fail', event_info)
return {'success': "Error: {0}".format(inst.message)}
except Exception, err:
event_info['failure'] = 'unexpected'
self.system.track_function('problem_regrade_fail', event_info)
self.system.track_function('problem_rescore_fail', event_info)
if self.system.DEBUG:
msg = "Error checking problem: " + str(err)
msg += '\nTraceback:\n' + traceback.format_exc()
......@@ -885,9 +885,9 @@ class CapaModule(CapaFields, XModule):
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
self.system.track_function('problem_regrade', event_info)
self.system.track_function('problem_rescore', event_info)
# psychometrics should be called on regrading requests in the same way as check-problem
# psychometrics should be called on rescoring requests in the same way as check-problem
if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback
self.system.psychometrics_handler(self.get_state_for_lcp())
......
......@@ -598,7 +598,7 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the problem was NOT reset
self.assertTrue('success' in result and not result['success'])
def test_regrade_problem_correct(self):
def test_rescore_problem_correct(self):
module = CapaFactory.create(attempts=1, done=True)
......@@ -606,7 +606,7 @@ class CapaModuleTest(unittest.TestCase):
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'correct')
result = module.regrade_problem()
result = module.rescore_problem()
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
......@@ -617,7 +617,7 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 1)
def test_regrade_problem_incorrect(self):
def test_rescore_problem_incorrect(self):
module = CapaFactory.create(attempts=0, done=True)
......@@ -625,7 +625,7 @@ class CapaModuleTest(unittest.TestCase):
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect')
result = module.regrade_problem()
result = module.rescore_problem()
# Expect that the problem is marked incorrect
self.assertEqual(result['success'], 'incorrect')
......@@ -633,24 +633,24 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 0)
def test_regrade_problem_not_done(self):
def test_rescore_problem_not_done(self):
# Simulate that the problem is NOT done
module = CapaFactory.create(done=False)
# Try to regrade the problem, and get exception
# Try to rescore the problem, and get exception
with self.assertRaises(xmodule.exceptions.NotFoundError):
module.regrade_problem()
module.rescore_problem()
def test_regrade_problem_not_supported(self):
def test_rescore_problem_not_supported(self):
module = CapaFactory.create(done=True)
# Try to regrade the problem, and get exception
with patch('capa.capa_problem.LoncapaProblem.supports_regrading') as mock_supports_regrading:
mock_supports_regrading.return_value = False
# Try to rescore the problem, and get exception
with patch('capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring:
mock_supports_rescoring.return_value = False
with self.assertRaises(NotImplementedError):
module.regrade_problem()
module.rescore_problem()
def test_regrade_problem_error(self):
def test_rescore_problem_error(self):
# Try each exception that capa_module should handle
for exception_class in [StudentInputError,
......@@ -661,9 +661,9 @@ class CapaModuleTest(unittest.TestCase):
module = CapaFactory.create(attempts=1, done=True)
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.regrade_existing_answers') as mock_regrade:
mock_regrade.side_effect = exception_class('test error')
result = module.regrade_problem()
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = exception_class('test error')
result = module.rescore_problem()
# Expect an AJAX alert message in 'success'
expected_msg = 'Error: test error'
......
......@@ -11,14 +11,14 @@ class Migration(SchemaMigration):
# Adding model 'CourseTaskLog'
db.create_table('courseware_coursetasklog', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task_name', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('task_type', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('student', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', null=True, to=orm['auth.User'])),
('task_args', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_key', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_input', self.gf('django.db.models.fields.CharField')(max_length=255)),
('task_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_state', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, db_index=True)),
('task_progress', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True, db_index=True)),
('requester', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['auth.User'])),
('task_output', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)),
('requester', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
))
......@@ -72,13 +72,13 @@ class Migration(SchemaMigration):
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'task_args': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'task_progress': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'db_index': 'True'}),
'task_input': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_output': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'task_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'task_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'})
},
'courseware.offlinecomputedgrade': {
......
......@@ -4,9 +4,9 @@ WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the mitx dir
1. Go to the edx-platform dir
2. ./manage.py schemamigration courseware --auto description_of_your_change
3. Add the migration file created in mitx/courseware/migrations/
3. Add the migration file created in edx-platform/lms/djangoapps/courseware/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
......@@ -269,28 +269,43 @@ class CourseTaskLog(models.Model):
"""
Stores information about background tasks that have been submitted to
perform course-specific work.
Examples include grading and regrading.
Examples include grading and rescoring.
`task_type` identifies the kind of task being performed, e.g. rescoring.
`course_id` uses the course run's unique id to identify the course.
`task_input` stores input arguments as JSON-serialized dict, for reporting purposes.
Examples include url of problem being rescored, id of student if only one student being rescored.
`task_key` stores relevant input arguments encoded into key value for testing to see
if the task is already running (together with task_type and course_id).
`task_id` stores the id used by celery for the background task.
`task_state` stores the last known state of the celery task
`task_output` stores the output of the celery task.
Format is a JSON-serialized dict. Content varies by task_type and task_state.
`requester` stores id of user who submitted the task
`created` stores date that entry was first created
`updated` stores date that entry was last modified
"""
task_name = models.CharField(max_length=50, db_index=True)
task_type = models.CharField(max_length=50, db_index=True)
course_id = models.CharField(max_length=255, db_index=True)
student = models.ForeignKey(User, null=True, db_index=True, related_name='+') # optional: None = task applies to all students
task_args = models.CharField(max_length=255, db_index=True)
task_key = models.CharField(max_length=255, db_index=True)
task_input = models.CharField(max_length=255)
task_id = models.CharField(max_length=255, db_index=True) # max_length from celery_taskmeta
task_state = models.CharField(max_length=50, null=True, db_index=True) # max_length from celery_taskmeta
task_progress = models.CharField(max_length=1024, null=True, db_index=True)
requester = models.ForeignKey(User, db_index=True, related_name='+')
task_output = models.CharField(max_length=1024, null=True)
requester = models.ForeignKey(User, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
def __repr__(self):
return 'CourseTaskLog<%r>' % ({
'task_name': self.task_name,
'task_type': self.task_type,
'course_id': self.course_id,
'student': self.student.username,
'task_args': self.task_args,
'task_input': self.task_input,
'task_id': self.task_id,
'task_state': self.task_state,
'task_progress': self.task_progress,
'task_output': self.task_output,
},)
def __unicode__(self):
......
......@@ -91,11 +91,11 @@ class StudentInfoFactory(DjangoModelFactory):
class CourseTaskLogFactory(DjangoModelFactory):
FACTORY_FOR = CourseTaskLog
task_name = 'regrade_problem'
task_type = 'rescore_problem'
course_id = "MITx/999/Robot_Super_Course"
student = SubFactory(UserFactory)
task_args = None
task_input = json.dumps({})
task_key = None
task_id = None
task_state = "QUEUED"
task_progress = None
task_output = None
requester = SubFactory(UserFactory)
......@@ -14,9 +14,10 @@ from xmodule.modulestore.exceptions import ItemNotFoundError
from courseware.tests.factories import UserFactory, CourseTaskLogFactory
from courseware.task_queue import (get_running_course_tasks,
course_task_log_status,
_encode_problem_and_student_input,
AlreadyRunningError,
submit_regrade_problem_for_all_students,
submit_regrade_problem_for_student,
submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students,
submit_delete_problem_state_for_all_students)
......@@ -52,15 +53,17 @@ class TaskQueueTestCase(TestCase):
number='1.23x',
problem_url_name=problem_url_name)
def _create_entry(self, task_state="QUEUED", task_progress=None, student=None):
def _create_entry(self, task_state="QUEUED", task_output=None, student=None):
task_id = str(uuid4())
progress_json = json.dumps(task_progress)
course_task_log = CourseTaskLogFactory.create(student=student,
requester=self.instructor,
task_args=self.problem_url,
progress_json = json.dumps(task_output)
task_input, task_key = _encode_problem_and_student_input(self.problem_url, student)
course_task_log = CourseTaskLogFactory.create(requester=self.instructor,
task_input=json.dumps(task_input),
task_key=task_key,
task_id=task_id,
task_state=task_state,
task_progress=progress_json)
task_output=progress_json)
return course_task_log
def _create_failure_entry(self):
......@@ -68,7 +71,7 @@ class TaskQueueTestCase(TestCase):
progress = {'message': TEST_FAILURE_MESSAGE,
'exception': 'RandomCauseError',
}
return self._create_entry(task_state="FAILURE", task_progress=progress)
return self._create_entry(task_state="FAILURE", task_output=progress)
def _create_success_entry(self, student=None):
return self._create_progress_entry(student=None, task_state="SUCCESS")
......@@ -78,10 +81,10 @@ class TaskQueueTestCase(TestCase):
progress = {'attempted': 3,
'updated': 2,
'total': 10,
'action_name': 'regraded',
'action_name': 'rescored',
'message': 'some random string that should summarize the other info',
}
return self._create_entry(task_state=task_state, task_progress=progress, student=student)
return self._create_entry(task_state=task_state, task_output=progress, student=student)
def test_fetch_running_tasks(self):
# when fetching running tasks, we get all running tasks, and only running tasks
......@@ -152,7 +155,7 @@ class TaskQueueTestCase(TestCase):
mock_result.result = {'attempted': 5,
'updated': 4,
'total': 10,
'action_name': 'regraded'}
'action_name': 'rescored'}
with patch('celery.result.AsyncResult.__new__') as mock_result_ctor:
mock_result_ctor.return_value = mock_result
response = course_task_log_status(Mock(), task_id=task_id)
......@@ -206,7 +209,7 @@ class TaskQueueTestCase(TestCase):
mock_result.result = {'attempted': attempted,
'updated': updated,
'total': total,
'action_name': 'regraded'}
'action_name': 'rescored'}
with patch('celery.result.AsyncResult.__new__') as mock_result_ctor:
mock_result_ctor.return_value = mock_result
response = course_task_log_status(Mock(), task_id=task_id)
......@@ -221,44 +224,44 @@ class TaskQueueTestCase(TestCase):
def test_success_messages(self):
_, output = self._get_output_for_task_success(0, 0, 10)
self.assertTrue("Unable to find any students with submissions to be regraded" in output['message'])
self.assertTrue("Unable to find any students with submissions to be rescored" in output['message'])
self.assertFalse(output['succeeded'])
_, output = self._get_output_for_task_success(10, 0, 10)
self.assertTrue("Problem failed to be regraded for any of 10 students" in output['message'])
self.assertTrue("Problem failed to be rescored for any of 10 students" in output['message'])
self.assertFalse(output['succeeded'])
_, output = self._get_output_for_task_success(10, 8, 10)
self.assertTrue("Problem regraded for 8 of 10 students" in output['message'])
self.assertTrue("Problem rescored for 8 of 10 students" in output['message'])
self.assertFalse(output['succeeded'])
_, output = self._get_output_for_task_success(10, 10, 10)
self.assertTrue("Problem successfully regraded for 10 students" in output['message'])
self.assertTrue("Problem successfully rescored for 10 students" in output['message'])
self.assertTrue(output['succeeded'])
_, output = self._get_output_for_task_success(0, 0, 1, student=self.student)
self.assertTrue("Unable to find submission to be regraded for student" in output['message'])
self.assertTrue("Unable to find submission to be rescored for student" in output['message'])
self.assertFalse(output['succeeded'])
_, output = self._get_output_for_task_success(1, 0, 1, student=self.student)
self.assertTrue("Problem failed to be regraded for student" in output['message'])
self.assertTrue("Problem failed to be rescored for student" in output['message'])
self.assertFalse(output['succeeded'])
_, output = self._get_output_for_task_success(1, 1, 1, student=self.student)
self.assertTrue("Problem successfully regraded for student" in output['message'])
self.assertTrue("Problem successfully rescored for student" in output['message'])
self.assertTrue(output['succeeded'])
def test_submit_nonexistent_modules(self):
# confirm that a regrade of a non-existent module returns an exception
# (Note that it is easier to test a non-regradable module in test_tasks,
# confirm that a rescore of a non-existent module returns an exception
# (Note that it is easier to test a non-rescorable module in test_tasks,
# where we are creating real modules.
problem_url = self.problem_url
course_id = "something else"
request = None
with self.assertRaises(ItemNotFoundError):
submit_regrade_problem_for_student(request, course_id, problem_url, self.student)
submit_rescore_problem_for_student(request, course_id, problem_url, self.student)
with self.assertRaises(ItemNotFoundError):
submit_regrade_problem_for_all_students(request, course_id, problem_url)
submit_rescore_problem_for_all_students(request, course_id, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_reset_problem_attempts_for_all_students(request, course_id, problem_url)
with self.assertRaises(ItemNotFoundError):
......@@ -267,12 +270,12 @@ class TaskQueueTestCase(TestCase):
def test_submit_when_running(self):
# get exception when trying to submit a task that is already running
course_task_log = self._create_progress_entry()
problem_url = course_task_log.task_args
problem_url = json.loads(course_task_log.task_input).get('problem_url')
course_id = course_task_log.course_id
# requester doesn't have to be the same when determining if a task is already running
request = Mock()
request.user = self.student
with self.assertRaises(AlreadyRunningError):
# just skip making the argument check, so we don't have to fake it deeper down
with patch('courseware.task_queue._check_arguments_for_regrading'):
submit_regrade_problem_for_all_students(request, course_id, problem_url)
with patch('courseware.task_queue._check_arguments_for_rescoring'):
submit_rescore_problem_for_all_students(request, course_id, problem_url)
......@@ -239,22 +239,22 @@ def instructor_dashboard(request, course_id):
track.views.server_track(request, action, {}, page='idashboard')
msg += dump_grading_context(course)
elif "Regrade ALL students' problem submissions" in action:
elif "Rescore ALL students' problem submissions" in action:
problem_urlname = request.POST.get('problem_for_all_students', '')
problem_url = get_module_url(problem_urlname)
try:
course_task_log_entry = task_queue.submit_regrade_problem_for_all_students(request, course_id, problem_url)
course_task_log_entry = task_queue.submit_rescore_problem_for_all_students(request, course_id, problem_url)
if course_task_log_entry is None:
msg += '<font color="red">Failed to create a background task for regrading "{0}".</font>'.format(problem_url)
msg += '<font color="red">Failed to create a background task for rescoring "{0}".</font>'.format(problem_url)
else:
track_msg = 'regrade problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id)
track_msg = 'rescore problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard')
except ItemNotFoundError as e:
log.error('Failure to regrade: unknown problem "{0}"'.format(e))
msg += '<font color="red">Failed to create a background task for regrading "{0}": problem not found.</font>'.format(problem_url)
log.error('Failure to rescore: unknown problem "{0}"'.format(e))
msg += '<font color="red">Failed to create a background task for rescoring "{0}": problem not found.</font>'.format(problem_url)
except Exception as e:
log.error("Encountered exception from regrade: {0}".format(e))
msg += '<font color="red">Failed to create a background task for regrading "{0}": {1}.</font>'.format(problem_url, e.message)
log.error("Encountered exception from rescore: {0}".format(e))
msg += '<font color="red">Failed to create a background task for rescoring "{0}": {1}.</font>'.format(problem_url, e.message)
elif "Reset ALL students' attempts" in action:
problem_urlname = request.POST.get('problem_for_all_students', '')
......@@ -301,7 +301,7 @@ def instructor_dashboard(request, course_id):
elif "Reset student's attempts" in action \
or "Delete student state for module" in action \
or "Regrade student's problem submission" in action:
or "Rescore student's problem submission" in action:
# get the form data
unique_student_identifier = request.POST.get('unique_student_identifier', '')
problem_urlname = request.POST.get('problem_for_student', '')
......@@ -356,15 +356,15 @@ def instructor_dashboard(request, course_id):
msg += "<font color='red'>Couldn't reset module state. </font>"
else:
try:
course_task_log_entry = task_queue.submit_regrade_problem_for_student(request, course_id, module_state_key, student)
course_task_log_entry = task_queue.submit_rescore_problem_for_student(request, course_id, module_state_key, student)
if course_task_log_entry is None:
msg += '<font color="red">Failed to create a background task for regrading "{0}" for student {1}.</font>'.format(module_state_key, unique_student_identifier)
msg += '<font color="red">Failed to create a background task for rescoring "{0}" for student {1}.</font>'.format(module_state_key, unique_student_identifier)
else:
track_msg = 'regrade problem {problem} for student {student} in {course}'.format(problem=module_state_key, student=unique_student_identifier, course=course_id)
track_msg = 'rescore problem {problem} for student {student} in {course}'.format(problem=module_state_key, student=unique_student_identifier, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard')
except Exception as e:
log.error("Encountered exception from regrade: {0}".format(e))
msg += '<font color="red">Failed to create a background task for regrading "{0}": {1}.</font>'.format(module_state_key, e.message)
log.error("Encountered exception from rescore: {0}".format(e))
msg += '<font color="red">Failed to create a background task for rescoring "{0}": {1}.</font>'.format(module_state_key, e.message)
elif "Get link to student's progress page" in action:
unique_student_identifier = request.POST.get('unique_student_identifier', '')
......@@ -1288,17 +1288,13 @@ def get_background_task_table(course_id, problem_url, student=None):
Construct the "datatable" structure to represent background task history.
Filters the background task history to the specified course and problem.
If a student is provided, filters to only those tasks for which that student
If a student is provided, filters to only those tasks for which that student
was specified.
Returns a tuple of (msg, datatable), where the msg is a possible error message,
and the datatable is the datatable to be used for display.
"""
course_tasks = CourseTaskLog.objects.filter(course_id=course_id, task_args=problem_url)
if student is not None:
course_tasks = course_tasks.filter(student=student)
history_entries = course_tasks.order_by('-id')
history_entries = task_queue.get_course_task_history(course_id, problem_url, student)
datatable = None
msg = ""
# first check to see if there is any history at all
......@@ -1315,24 +1311,23 @@ def get_background_task_table(course_id, problem_url, student=None):
else:
datatable = {}
datatable['header'] = ["Order",
"Task Name",
"Student",
"Task Type",
"Task Id",
"Requester",
"Submitted",
"Duration",
"Duration (ms)",
"Task State",
"Task Status",
"Message"]
"Task Output"]
datatable['data'] = []
for i, course_task in enumerate(history_entries):
# get duration info, if known:
duration_ms = 'unknown'
if hasattr(course_task, 'task_progress'):
task_progress = json.loads(course_task.task_progress)
if 'duration_ms' in task_progress:
duration_ms = task_progress['duration_ms']
if hasattr(course_task, 'task_outputs'):
task_outputs = json.loads(course_task.task_output)
if 'duration_ms' in task_outputs:
duration_ms = task_outputs['duration_ms']
# get progress status message:
success, message = task_queue.get_task_completion_message(course_task)
if success:
......@@ -1341,17 +1336,14 @@ def get_background_task_table(course_id, problem_url, student=None):
status = "Incomplete"
# generate row for this task:
row = ["#{0}".format(len(history_entries) - i),
str(course_task.task_name),
str(course_task.student),
str(course_task.task_type),
str(course_task.task_id),
str(course_task.requester),
course_task.created.strftime("%Y/%m/%d %H:%M:%S"),
duration_ms,
#course_task.updated.strftime("%Y/%m/%d %H:%M:%S"),
str(course_task.task_state),
status,
message]
datatable['data'].append(row)
return msg, datatable
......@@ -312,7 +312,7 @@ function goto( mode)
<p>
Then select an action:
<input type="submit" name="action" value="Reset ALL students' attempts">
<input type="submit" name="action" value="Regrade ALL students' problem submissions">
<input type="submit" name="action" value="Rescore ALL students' problem submissions">
</p>
<p>
<p>These actions run in the background, and status for active tasks will appear in a table below.
......@@ -349,7 +349,7 @@ function goto( mode)
Then select an action:
<input type="submit" name="action" value="Reset student's attempts">
%if settings.MITX_FEATURES.get('ENABLE_COURSE_BACKGROUND_TASKS'):
<input type="submit" name="action" value="Regrade student's problem submission">
<input type="submit" name="action" value="Rescore student's problem submission">
%endif
</p>
......@@ -360,7 +360,7 @@ function goto( mode)
</p>
%endif
%if settings.MITX_FEATURES.get('ENABLE_COURSE_BACKGROUND_TASKS'):
<p>Regrading runs in the background, and status for active tasks will appear in a table below.
<p>Rescoring runs in the background, and status for active tasks will appear in a table below.
To see status for all tasks submitted for this course and student, click on this button:
</p>
<p>
......@@ -708,9 +708,8 @@ function goto( mode)
<div id="task-progress-wrapper">
<table class="stat_table">
<tr>
<th>Task Name</th>
<th>Task Arg</th>
<th>Student</th>
<th>Task Type</th>
<th>Task inputs</th>
<th>Task Id</th>
<th>Requester</th>
<th>Submitted</th>
......@@ -722,9 +721,8 @@ function goto( mode)
<tr id="task-progress-entry-${tasknum}" class="task-progress-entry"
data-task-id="${course_task.task_id}"
data-in-progress="true">
<td>${course_task.task_name}</td>
<td>${course_task.task_args}</td>
<td>${course_task.student}</td>
<td>${course_task.task_type}</td>
<td>${course_task.task_input}</td>
<td><div class="task-id">${course_task.task_id}</div></td>
<td>${course_task.requester}</td>
<td>${course_task.created}</td>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment