Commit d2b3977f by Brian Wilson

Add dogstat logging to background tasks.

parent 9e11a565
...@@ -20,6 +20,7 @@ LOGFIELDS = ['username', 'ip', 'event_source', 'event_type', 'event', 'agent', ' ...@@ -20,6 +20,7 @@ LOGFIELDS = ['username', 'ip', 'event_source', 'event_type', 'event', 'agent', '
def log_event(event): def log_event(event):
"""Write tracking event to log file, and optionally to TrackingLog model."""
event_str = json.dumps(event) event_str = json.dumps(event)
log.info(event_str[:settings.TRACK_MAX_EVENT]) log.info(event_str[:settings.TRACK_MAX_EVENT])
if settings.MITX_FEATURES.get('ENABLE_SQL_TRACKING_LOGS'): if settings.MITX_FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
...@@ -32,6 +33,11 @@ def log_event(event): ...@@ -32,6 +33,11 @@ def log_event(event):
def user_track(request): def user_track(request):
"""
Log when GET call to "event" URL is made by a user.
GET call should provide "event_type", "event", and "page" arguments.
"""
try: # TODO: Do the same for many of the optional META parameters try: # TODO: Do the same for many of the optional META parameters
username = request.user.username username = request.user.username
except: except:
...@@ -48,7 +54,6 @@ def user_track(request): ...@@ -48,7 +54,6 @@ def user_track(request):
except: except:
agent = '' agent = ''
# TODO: Move a bunch of this into log_event
event = { event = {
"username": username, "username": username,
"session": scookie, "session": scookie,
...@@ -66,6 +71,7 @@ def user_track(request): ...@@ -66,6 +71,7 @@ def user_track(request):
def server_track(request, event_type, event, page=None): def server_track(request, event_type, event, page=None):
"""Log events related to server requests."""
try: try:
username = request.user.username username = request.user.username
except: except:
...@@ -95,7 +101,7 @@ def server_track(request, event_type, event, page=None): ...@@ -95,7 +101,7 @@ def server_track(request, event_type, event, page=None):
def task_track(request_info, task_info, event_type, event, page=None): def task_track(request_info, task_info, event_type, event, page=None):
""" """
Outputs tracking information for events occuring within celery tasks. Logs tracking information for events occuring within celery tasks.
The `event_type` is a string naming the particular event being logged, The `event_type` is a string naming the particular event being logged,
while `event` is a dict containing whatever additional contextual information while `event` is a dict containing whatever additional contextual information
...@@ -103,9 +109,11 @@ def task_track(request_info, task_info, event_type, event, page=None): ...@@ -103,9 +109,11 @@ def task_track(request_info, task_info, event_type, event, page=None):
The `request_info` is a dict containing information about the original The `request_info` is a dict containing information about the original
task request. Relevant keys are `username`, `ip`, `agent`, and `host`. task request. Relevant keys are `username`, `ip`, `agent`, and `host`.
While the dict is required, the values in it are not, so that {} can be
passed in.
In addition, a `task_info` dict provides more information to be stored with In addition, a `task_info` dict provides more information about the current
the `event` dict. task, to be stored with the `event` dict. This may also be an empty dict.
The `page` parameter is optional, and allows the name of the page to The `page` parameter is optional, and allows the name of the page to
be provided. be provided.
...@@ -136,6 +144,7 @@ def task_track(request_info, task_info, event_type, event, page=None): ...@@ -136,6 +144,7 @@ def task_track(request_info, task_info, event_type, event, page=None):
@login_required @login_required
@ensure_csrf_cookie @ensure_csrf_cookie
def view_tracking_log(request, args=''): def view_tracking_log(request, args=''):
"""View to output contents of TrackingLog model. For staff use only."""
if not request.user.is_staff: if not request.user.is_staff:
return redirect('/') return redirect('/')
nlen = 100 nlen = 100
......
...@@ -15,25 +15,22 @@ This is used by capa_module. ...@@ -15,25 +15,22 @@ This is used by capa_module.
from datetime import datetime from datetime import datetime
import logging import logging
import math
import numpy
import os.path import os.path
import re import re
import sys
from lxml import etree from lxml import etree
from xml.sax.saxutils import unescape from xml.sax.saxutils import unescape
from copy import deepcopy from copy import deepcopy
from .correctmap import CorrectMap from .correctmap import CorrectMap
import inputtypes import capa.inputtypes as inputtypes
import customrender import capa.customrender as customrender
from .util import contextualize_text, convert_files_to_filenames from .util import contextualize_text, convert_files_to_filenames
import xqueue_interface import capa.xqueue_interface as xqueue_interface
# to be replaced with auto-registering # to be replaced with auto-registering
import responsetypes import capa.responsetypes as responsetypes
import safe_exec from capa.safe_exec import safe_exec
# dict of tagname, Response Class -- this should come from auto-registering # dict of tagname, Response Class -- this should come from auto-registering
response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__]) response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__])
...@@ -134,7 +131,6 @@ class LoncapaProblem(object): ...@@ -134,7 +131,6 @@ class LoncapaProblem(object):
self.extracted_tree = self._extract_html(self.tree) self.extracted_tree = self._extract_html(self.tree)
def do_reset(self): def do_reset(self):
''' '''
Reset internal state to unfinished, with no answers Reset internal state to unfinished, with no answers
...@@ -175,7 +171,7 @@ class LoncapaProblem(object): ...@@ -175,7 +171,7 @@ class LoncapaProblem(object):
Return the maximum score for this problem. Return the maximum score for this problem.
''' '''
maxscore = 0 maxscore = 0
for response, responder in self.responders.iteritems(): for responder in self.responders.values():
maxscore += responder.get_max_score() maxscore += responder.get_max_score()
return maxscore return maxscore
...@@ -220,7 +216,7 @@ class LoncapaProblem(object): ...@@ -220,7 +216,7 @@ class LoncapaProblem(object):
def ungraded_response(self, xqueue_msg, queuekey): def ungraded_response(self, xqueue_msg, queuekey):
''' '''
Handle any responses from the xqueue that do not contain grades Handle any responses from the xqueue that do not contain grades
Will try to pass the queue message to all inputtypes that can handle ungraded responses Will try to pass the queue message to all inputtypes that can handle ungraded responses
Does not return any value Does not return any value
''' '''
...@@ -257,7 +253,8 @@ class LoncapaProblem(object): ...@@ -257,7 +253,8 @@ class LoncapaProblem(object):
def grade_answers(self, answers): def grade_answers(self, answers):
''' '''
Grade student responses. Called by capa_module.check_problem. Grade student responses. Called by capa_module.check_problem.
answers is a dict of all the entries from request.POST, but with the first part
`answers` is a dict of all the entries from request.POST, but with the first part
of each key removed (the string before the first "_"). of each key removed (the string before the first "_").
Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123 Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123
...@@ -286,13 +283,12 @@ class LoncapaProblem(object): ...@@ -286,13 +283,12 @@ class LoncapaProblem(object):
that the responsetypes are synchronous. This is convenient as it that the responsetypes are synchronous. This is convenient as it
permits rescoring to be complete when the rescoring call returns. permits rescoring to be complete when the rescoring call returns.
""" """
# We check for synchronous grading and no file submissions by return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values())
# screening out all problems with a CodeResponse type. # for responder in self.responders.values():
for responder in self.responders.values(): # if 'filesubmission' in responder.allowed_inputfields:
if 'filesubmission' in responder.allowed_inputfields: # return False
return False #
# return True
return True
def rescore_existing_answers(self): def rescore_existing_answers(self):
''' '''
...@@ -300,15 +296,17 @@ class LoncapaProblem(object): ...@@ -300,15 +296,17 @@ class LoncapaProblem(object):
''' '''
return self._grade_answers(None) return self._grade_answers(None)
def _grade_answers(self, answers): def _grade_answers(self, student_answers):
''' '''
Internal grading call used for checking new student answers and also Internal grading call used for checking new 'student_answers' and also
rescoring existing student answers. rescoring existing student_answers.
answers is a dict of all the entries from request.POST, but with the first part For new student_answers being graded, `student_answers` is a dict of all the
of each key removed (the string before the first "_"). entries from request.POST, but with the first part of each key removed
(the string before the first "_"). Thus, for example,
input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123.
Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123 For rescoring, `student_answers` is None.
Calls the Response for each question in this problem, to do the actual grading. Calls the Response for each question in this problem, to do the actual grading.
''' '''
...@@ -325,18 +323,19 @@ class LoncapaProblem(object): ...@@ -325,18 +323,19 @@ class LoncapaProblem(object):
# student_answers contains a proper answer or the filename of # student_answers contains a proper answer or the filename of
# an earlier submission, so for now skip these entirely. # an earlier submission, so for now skip these entirely.
# TODO: figure out where to get file submissions when rescoring. # TODO: figure out where to get file submissions when rescoring.
if 'filesubmission' in responder.allowed_inputfields and answers is None: if 'filesubmission' in responder.allowed_inputfields and student_answers is None:
raise Exception("Cannot rescore problems with possible file submissions") raise Exception("Cannot rescore problems with possible file submissions")
# use 'answers' if it is provided, otherwise use the saved student_answers. # use 'student_answers' only if it is provided, and if it might contain a file
if answers is not None: # submission that would not exist in the persisted "student_answers".
results = responder.evaluate_answers(answers, oldcmap) if 'filesubmission' in responder.allowed_inputfields and student_answers is not None:
results = responder.evaluate_answers(student_answers, oldcmap)
else: else:
results = responder.evaluate_answers(self.student_answers, oldcmap) results = responder.evaluate_answers(self.student_answers, oldcmap)
newcmap.update(results) newcmap.update(results)
self.correct_map = newcmap self.correct_map = newcmap
# log.debug('%s: in grade_answers, answers=%s, cmap=%s' % (self,answers,newcmap)) # log.debug('%s: in grade_answers, student_answers=%s, cmap=%s' % (self,student_answers,newcmap))
return newcmap return newcmap
def get_question_answers(self): def get_question_answers(self):
...@@ -380,7 +379,6 @@ class LoncapaProblem(object): ...@@ -380,7 +379,6 @@ class LoncapaProblem(object):
html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context) html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context)
return html return html
def handle_input_ajax(self, get): def handle_input_ajax(self, get):
''' '''
InputTypes can support specialized AJAX calls. Find the correct input and pass along the correct data InputTypes can support specialized AJAX calls. Find the correct input and pass along the correct data
...@@ -397,8 +395,6 @@ class LoncapaProblem(object): ...@@ -397,8 +395,6 @@ class LoncapaProblem(object):
log.warning("Could not find matching input for id: %s" % input_id) log.warning("Could not find matching input for id: %s" % input_id)
return {} return {}
# ======= Private Methods Below ======== # ======= Private Methods Below ========
def _process_includes(self): def _process_includes(self):
...@@ -408,16 +404,16 @@ class LoncapaProblem(object): ...@@ -408,16 +404,16 @@ class LoncapaProblem(object):
''' '''
includes = self.tree.findall('.//include') includes = self.tree.findall('.//include')
for inc in includes: for inc in includes:
file = inc.get('file') filename = inc.get('file')
if file is not None: if filename is not None:
try: try:
# open using ModuleSystem OSFS filestore # open using ModuleSystem OSFS filestore
ifp = self.system.filestore.open(file) ifp = self.system.filestore.open(filename)
except Exception as err: except Exception as err:
log.warning('Error %s in problem xml include: %s' % ( log.warning('Error %s in problem xml include: %s' % (
err, etree.tostring(inc, pretty_print=True))) err, etree.tostring(inc, pretty_print=True)))
log.warning('Cannot find file %s in %s' % ( log.warning('Cannot find file %s in %s' % (
file, self.system.filestore)) filename, self.system.filestore))
# if debugging, don't fail - just log error # if debugging, don't fail - just log error
# TODO (vshnayder): need real error handling, display to users # TODO (vshnayder): need real error handling, display to users
if not self.system.get('DEBUG'): if not self.system.get('DEBUG'):
...@@ -430,7 +426,7 @@ class LoncapaProblem(object): ...@@ -430,7 +426,7 @@ class LoncapaProblem(object):
except Exception as err: except Exception as err:
log.warning('Error %s in problem xml include: %s' % ( log.warning('Error %s in problem xml include: %s' % (
err, etree.tostring(inc, pretty_print=True))) err, etree.tostring(inc, pretty_print=True)))
log.warning('Cannot parse XML in %s' % (file)) log.warning('Cannot parse XML in %s' % (filename))
# if debugging, don't fail - just log error # if debugging, don't fail - just log error
# TODO (vshnayder): same as above # TODO (vshnayder): same as above
if not self.system.get('DEBUG'): if not self.system.get('DEBUG'):
...@@ -438,11 +434,11 @@ class LoncapaProblem(object): ...@@ -438,11 +434,11 @@ class LoncapaProblem(object):
else: else:
continue continue
# insert new XML into tree in place of inlcude # insert new XML into tree in place of include
parent = inc.getparent() parent = inc.getparent()
parent.insert(parent.index(inc), incxml) parent.insert(parent.index(inc), incxml)
parent.remove(inc) parent.remove(inc)
log.debug('Included %s into %s' % (file, self.problem_id)) log.debug('Included %s into %s' % (filename, self.problem_id))
def _extract_system_path(self, script): def _extract_system_path(self, script):
""" """
...@@ -512,7 +508,7 @@ class LoncapaProblem(object): ...@@ -512,7 +508,7 @@ class LoncapaProblem(object):
if all_code: if all_code:
try: try:
safe_exec.safe_exec( safe_exec(
all_code, all_code,
context, context,
random_seed=self.seed, random_seed=self.seed,
...@@ -568,18 +564,18 @@ class LoncapaProblem(object): ...@@ -568,18 +564,18 @@ class LoncapaProblem(object):
value = "" value = ""
if self.student_answers and problemid in self.student_answers: if self.student_answers and problemid in self.student_answers:
value = self.student_answers[problemid] value = self.student_answers[problemid]
if input_id not in self.input_state: if input_id not in self.input_state:
self.input_state[input_id] = {} self.input_state[input_id] = {}
# do the rendering # do the rendering
state = {'value': value, state = {'value': value,
'status': status, 'status': status,
'id': input_id, 'id': input_id,
'input_state': self.input_state[input_id], 'input_state': self.input_state[input_id],
'feedback': {'message': msg, 'feedback': {'message': msg,
'hint': hint, 'hint': hint,
'hintmode': hintmode, }} 'hintmode': hintmode, }}
input_type_cls = inputtypes.registry.get_class_for_tag(problemtree.tag) input_type_cls = inputtypes.registry.get_class_for_tag(problemtree.tag)
# save the input type so that we can make ajax calls on it if we need to # save the input type so that we can make ajax calls on it if we need to
...@@ -603,7 +599,7 @@ class LoncapaProblem(object): ...@@ -603,7 +599,7 @@ class LoncapaProblem(object):
for item in problemtree: for item in problemtree:
item_xhtml = self._extract_html(item) item_xhtml = self._extract_html(item)
if item_xhtml is not None: if item_xhtml is not None:
tree.append(item_xhtml) tree.append(item_xhtml)
if tree.tag in html_transforms: if tree.tag in html_transforms:
tree.tag = html_transforms[problemtree.tag]['tag'] tree.tag = html_transforms[problemtree.tag]['tag']
......
...@@ -4,7 +4,6 @@ Tests of responsetypes ...@@ -4,7 +4,6 @@ Tests of responsetypes
from datetime import datetime from datetime import datetime
import json import json
from nose.plugins.skip import SkipTest
import os import os
import random import random
import unittest import unittest
...@@ -56,9 +55,18 @@ class ResponseTest(unittest.TestCase): ...@@ -56,9 +55,18 @@ class ResponseTest(unittest.TestCase):
self.assertEqual(result, 'incorrect', self.assertEqual(result, 'incorrect',
msg="%s should be marked incorrect" % str(input_str)) msg="%s should be marked incorrect" % str(input_str))
def _get_random_number_code(self):
"""Returns code to be used to generate a random result."""
return "str(random.randint(0, 1e9))"
def _get_random_number_result(self, seed_value):
"""Returns a result that should be generated using the random_number_code."""
rand = random.Random(seed_value)
return str(rand.randint(0, 1e9))
class MultiChoiceResponseTest(ResponseTest): class MultiChoiceResponseTest(ResponseTest):
from response_xml_factory import MultipleChoiceResponseXMLFactory from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
xml_factory_class = MultipleChoiceResponseXMLFactory xml_factory_class = MultipleChoiceResponseXMLFactory
def test_multiple_choice_grade(self): def test_multiple_choice_grade(self):
...@@ -80,7 +88,7 @@ class MultiChoiceResponseTest(ResponseTest): ...@@ -80,7 +88,7 @@ class MultiChoiceResponseTest(ResponseTest):
class TrueFalseResponseTest(ResponseTest): class TrueFalseResponseTest(ResponseTest):
from response_xml_factory import TrueFalseResponseXMLFactory from capa.tests.response_xml_factory import TrueFalseResponseXMLFactory
xml_factory_class = TrueFalseResponseXMLFactory xml_factory_class = TrueFalseResponseXMLFactory
def test_true_false_grade(self): def test_true_false_grade(self):
...@@ -120,7 +128,7 @@ class TrueFalseResponseTest(ResponseTest): ...@@ -120,7 +128,7 @@ class TrueFalseResponseTest(ResponseTest):
class ImageResponseTest(ResponseTest): class ImageResponseTest(ResponseTest):
from response_xml_factory import ImageResponseXMLFactory from capa.tests.response_xml_factory import ImageResponseXMLFactory
xml_factory_class = ImageResponseXMLFactory xml_factory_class = ImageResponseXMLFactory
def test_rectangle_grade(self): def test_rectangle_grade(self):
...@@ -184,7 +192,7 @@ class ImageResponseTest(ResponseTest): ...@@ -184,7 +192,7 @@ class ImageResponseTest(ResponseTest):
class SymbolicResponseTest(ResponseTest): class SymbolicResponseTest(ResponseTest):
from response_xml_factory import SymbolicResponseXMLFactory from capa.tests.response_xml_factory import SymbolicResponseXMLFactory
xml_factory_class = SymbolicResponseXMLFactory xml_factory_class = SymbolicResponseXMLFactory
def test_grade_single_input(self): def test_grade_single_input(self):
...@@ -224,8 +232,8 @@ class SymbolicResponseTest(ResponseTest): ...@@ -224,8 +232,8 @@ class SymbolicResponseTest(ResponseTest):
def test_complex_number_grade(self): def test_complex_number_grade(self):
problem = self.build_problem(math_display=True, problem = self.build_problem(math_display=True,
expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]", expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]",
options=["matrix", "imaginary"]) options=["matrix", "imaginary"])
# For LaTeX-style inputs, symmath_check() will try to contact # For LaTeX-style inputs, symmath_check() will try to contact
# a server to convert the input to MathML. # a server to convert the input to MathML.
...@@ -312,16 +320,16 @@ class SymbolicResponseTest(ResponseTest): ...@@ -312,16 +320,16 @@ class SymbolicResponseTest(ResponseTest):
# Should not allow multiple inputs, since we specify # Should not allow multiple inputs, since we specify
# only one "expect" value # only one "expect" value
with self.assertRaises(Exception): with self.assertRaises(Exception):
problem = self.build_problem(math_display=True, self.build_problem(math_display=True,
expect="2*x+3*y", expect="2*x+3*y",
num_inputs=3) num_inputs=3)
def _assert_symbolic_grade(self, problem, def _assert_symbolic_grade(self, problem,
student_input, student_input,
dynamath_input, dynamath_input,
expected_correctness): expected_correctness):
input_dict = {'1_2_1': str(student_input), input_dict = {'1_2_1': str(student_input),
'1_2_1_dynamath': str(dynamath_input)} '1_2_1_dynamath': str(dynamath_input)}
correct_map = problem.grade_answers(input_dict) correct_map = problem.grade_answers(input_dict)
...@@ -330,7 +338,7 @@ class SymbolicResponseTest(ResponseTest): ...@@ -330,7 +338,7 @@ class SymbolicResponseTest(ResponseTest):
class OptionResponseTest(ResponseTest): class OptionResponseTest(ResponseTest):
from response_xml_factory import OptionResponseXMLFactory from capa.tests.response_xml_factory import OptionResponseXMLFactory
xml_factory_class = OptionResponseXMLFactory xml_factory_class = OptionResponseXMLFactory
def test_grade(self): def test_grade(self):
...@@ -350,7 +358,7 @@ class FormulaResponseTest(ResponseTest): ...@@ -350,7 +358,7 @@ class FormulaResponseTest(ResponseTest):
""" """
Test the FormulaResponse class Test the FormulaResponse class
""" """
from response_xml_factory import FormulaResponseXMLFactory from capa.tests.response_xml_factory import FormulaResponseXMLFactory
xml_factory_class = FormulaResponseXMLFactory xml_factory_class = FormulaResponseXMLFactory
def test_grade(self): def test_grade(self):
...@@ -570,7 +578,7 @@ class FormulaResponseTest(ResponseTest): ...@@ -570,7 +578,7 @@ class FormulaResponseTest(ResponseTest):
class StringResponseTest(ResponseTest): class StringResponseTest(ResponseTest):
from response_xml_factory import StringResponseXMLFactory from capa.tests.response_xml_factory import StringResponseXMLFactory
xml_factory_class = StringResponseXMLFactory xml_factory_class = StringResponseXMLFactory
def test_case_sensitive(self): def test_case_sensitive(self):
...@@ -647,19 +655,19 @@ class StringResponseTest(ResponseTest): ...@@ -647,19 +655,19 @@ class StringResponseTest(ResponseTest):
hintfn="gimme_a_random_hint", hintfn="gimme_a_random_hint",
script=textwrap.dedent(""" script=textwrap.dedent("""
def gimme_a_random_hint(answer_ids, student_answers, new_cmap, old_cmap): def gimme_a_random_hint(answer_ids, student_answers, new_cmap, old_cmap):
answer = str(random.randint(0, 1e9)) answer = {code}
new_cmap.set_hint_and_mode(answer_ids[0], answer, "always") new_cmap.set_hint_and_mode(answer_ids[0], answer, "always")
""") """.format(code=self._get_random_number_code()))
) )
correct_map = problem.grade_answers({'1_2_1': '2'}) correct_map = problem.grade_answers({'1_2_1': '2'})
hint = correct_map.get_hint('1_2_1') hint = correct_map.get_hint('1_2_1')
r = random.Random(problem.seed) # rand = random.Random(problem.seed)
self.assertEqual(hint, str(r.randint(0, 1e9))) self.assertEqual(hint, self._get_random_number_result(problem.seed))
class CodeResponseTest(ResponseTest): class CodeResponseTest(ResponseTest):
from response_xml_factory import CodeResponseXMLFactory from capa.tests.response_xml_factory import CodeResponseXMLFactory
xml_factory_class = CodeResponseXMLFactory xml_factory_class = CodeResponseXMLFactory
def setUp(self): def setUp(self):
...@@ -673,6 +681,7 @@ class CodeResponseTest(ResponseTest): ...@@ -673,6 +681,7 @@ class CodeResponseTest(ResponseTest):
@staticmethod @staticmethod
def make_queuestate(key, time): def make_queuestate(key, time):
"""Create queuestate dict"""
timestr = datetime.strftime(time, dateformat) timestr = datetime.strftime(time, dateformat)
return {'key': key, 'time': timestr} return {'key': key, 'time': timestr}
...@@ -710,7 +719,7 @@ class CodeResponseTest(ResponseTest): ...@@ -710,7 +719,7 @@ class CodeResponseTest(ResponseTest):
old_cmap = CorrectMap() old_cmap = CorrectMap()
for i, answer_id in enumerate(answer_ids): for i, answer_id in enumerate(answer_ids):
queuekey = 1000 + i queuekey = 1000 + i
queuestate = CodeResponseTest.make_queuestate(1000 + i, datetime.now()) queuestate = CodeResponseTest.make_queuestate(queuekey, datetime.now())
old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
# Message format common to external graders # Message format common to external graders
...@@ -771,7 +780,7 @@ class CodeResponseTest(ResponseTest): ...@@ -771,7 +780,7 @@ class CodeResponseTest(ResponseTest):
for i, answer_id in enumerate(answer_ids): for i, answer_id in enumerate(answer_ids):
queuekey = 1000 + i queuekey = 1000 + i
latest_timestamp = datetime.now() latest_timestamp = datetime.now()
queuestate = CodeResponseTest.make_queuestate(1000 + i, latest_timestamp) queuestate = CodeResponseTest.make_queuestate(queuekey, latest_timestamp)
cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate))
self.problem.correct_map.update(cmap) self.problem.correct_map.update(cmap)
...@@ -796,7 +805,7 @@ class CodeResponseTest(ResponseTest): ...@@ -796,7 +805,7 @@ class CodeResponseTest(ResponseTest):
class ChoiceResponseTest(ResponseTest): class ChoiceResponseTest(ResponseTest):
from response_xml_factory import ChoiceResponseXMLFactory from capa.tests.response_xml_factory import ChoiceResponseXMLFactory
xml_factory_class = ChoiceResponseXMLFactory xml_factory_class = ChoiceResponseXMLFactory
def test_radio_group_grade(self): def test_radio_group_grade(self):
...@@ -828,7 +837,7 @@ class ChoiceResponseTest(ResponseTest): ...@@ -828,7 +837,7 @@ class ChoiceResponseTest(ResponseTest):
class JavascriptResponseTest(ResponseTest): class JavascriptResponseTest(ResponseTest):
from response_xml_factory import JavascriptResponseXMLFactory from capa.tests.response_xml_factory import JavascriptResponseXMLFactory
xml_factory_class = JavascriptResponseXMLFactory xml_factory_class = JavascriptResponseXMLFactory
def test_grade(self): def test_grade(self):
...@@ -858,7 +867,7 @@ class JavascriptResponseTest(ResponseTest): ...@@ -858,7 +867,7 @@ class JavascriptResponseTest(ResponseTest):
system.can_execute_unsafe_code = lambda: False system.can_execute_unsafe_code = lambda: False
with self.assertRaises(LoncapaProblemError): with self.assertRaises(LoncapaProblemError):
problem = self.build_problem( self.build_problem(
system=system, system=system,
generator_src="test_problem_generator.js", generator_src="test_problem_generator.js",
grader_src="test_problem_grader.js", grader_src="test_problem_grader.js",
...@@ -869,7 +878,7 @@ class JavascriptResponseTest(ResponseTest): ...@@ -869,7 +878,7 @@ class JavascriptResponseTest(ResponseTest):
class NumericalResponseTest(ResponseTest): class NumericalResponseTest(ResponseTest):
from response_xml_factory import NumericalResponseXMLFactory from capa.tests.response_xml_factory import NumericalResponseXMLFactory
xml_factory_class = NumericalResponseXMLFactory xml_factory_class = NumericalResponseXMLFactory
def test_grade_exact(self): def test_grade_exact(self):
...@@ -961,7 +970,7 @@ class NumericalResponseTest(ResponseTest): ...@@ -961,7 +970,7 @@ class NumericalResponseTest(ResponseTest):
class CustomResponseTest(ResponseTest): class CustomResponseTest(ResponseTest):
from response_xml_factory import CustomResponseXMLFactory from capa.tests.response_xml_factory import CustomResponseXMLFactory
xml_factory_class = CustomResponseXMLFactory xml_factory_class = CustomResponseXMLFactory
def test_inline_code(self): def test_inline_code(self):
...@@ -1000,15 +1009,14 @@ class CustomResponseTest(ResponseTest): ...@@ -1000,15 +1009,14 @@ class CustomResponseTest(ResponseTest):
def test_inline_randomization(self): def test_inline_randomization(self):
# Make sure the seed from the problem gets fed into the script execution. # Make sure the seed from the problem gets fed into the script execution.
inline_script = """messages[0] = str(random.randint(0, 1e9))""" inline_script = "messages[0] = {code}".format(code=self._get_random_number_code())
problem = self.build_problem(answer=inline_script) problem = self.build_problem(answer=inline_script)
input_dict = {'1_2_1': '0'} input_dict = {'1_2_1': '0'}
correctmap = problem.grade_answers(input_dict) correctmap = problem.grade_answers(input_dict)
input_msg = correctmap.get_msg('1_2_1') input_msg = correctmap.get_msg('1_2_1')
r = random.Random(problem.seed) self.assertEqual(input_msg, self._get_random_number_result(problem.seed))
self.assertEqual(input_msg, str(r.randint(0, 1e9)))
def test_function_code_single_input(self): def test_function_code_single_input(self):
# For function code, we pass in these arguments: # For function code, we pass in these arguments:
...@@ -1241,25 +1249,23 @@ class CustomResponseTest(ResponseTest): ...@@ -1241,25 +1249,23 @@ class CustomResponseTest(ResponseTest):
def test_setup_randomization(self): def test_setup_randomization(self):
# Ensure that the problem setup script gets the random seed from the problem. # Ensure that the problem setup script gets the random seed from the problem.
script = textwrap.dedent(""" script = textwrap.dedent("""
num = random.randint(0, 1e9) num = {code}
""") """.format(code=self._get_random_number_code()))
problem = self.build_problem(script=script) problem = self.build_problem(script=script)
r = random.Random(problem.seed) self.assertEqual(problem.context['num'], self._get_random_number_result(problem.seed))
self.assertEqual(r.randint(0, 1e9), problem.context['num'])
def test_check_function_randomization(self): def test_check_function_randomization(self):
# The check function should get random-seeded from the problem. # The check function should get random-seeded from the problem.
script = textwrap.dedent(""" script = textwrap.dedent("""
def check_func(expect, answer_given): def check_func(expect, answer_given):
return {'ok': True, 'msg': str(random.randint(0, 1e9))} return {{'ok': True, 'msg': {code} }}
""") """.format(code=self._get_random_number_code()))
problem = self.build_problem(script=script, cfn="check_func", expect="42") problem = self.build_problem(script=script, cfn="check_func", expect="42")
input_dict = {'1_2_1': '42'} input_dict = {'1_2_1': '42'}
correct_map = problem.grade_answers(input_dict) correct_map = problem.grade_answers(input_dict)
msg = correct_map.get_msg('1_2_1') msg = correct_map.get_msg('1_2_1')
r = random.Random(problem.seed) self.assertEqual(msg, self._get_random_number_result(problem.seed))
self.assertEqual(msg, str(r.randint(0, 1e9)))
def test_module_imports_inline(self): def test_module_imports_inline(self):
''' '''
...@@ -1320,7 +1326,7 @@ class CustomResponseTest(ResponseTest): ...@@ -1320,7 +1326,7 @@ class CustomResponseTest(ResponseTest):
class SchematicResponseTest(ResponseTest): class SchematicResponseTest(ResponseTest):
from response_xml_factory import SchematicResponseXMLFactory from capa.tests.response_xml_factory import SchematicResponseXMLFactory
xml_factory_class = SchematicResponseXMLFactory xml_factory_class = SchematicResponseXMLFactory
def test_grade(self): def test_grade(self):
...@@ -1349,11 +1355,10 @@ class SchematicResponseTest(ResponseTest): ...@@ -1349,11 +1355,10 @@ class SchematicResponseTest(ResponseTest):
def test_check_function_randomization(self): def test_check_function_randomization(self):
# The check function should get a random seed from the problem. # The check function should get a random seed from the problem.
script = "correct = ['correct' if (submission[0]['num'] == random.randint(0, 1e9)) else 'incorrect']" script = "correct = ['correct' if (submission[0]['num'] == {code}) else 'incorrect']".format(code=self._get_random_number_code())
problem = self.build_problem(answer=script) problem = self.build_problem(answer=script)
r = random.Random(problem.seed) submission_dict = {'num': self._get_random_number_result(problem.seed)}
submission_dict = {'num': r.randint(0, 1e9)}
input_dict = {'1_2_1': json.dumps(submission_dict)} input_dict = {'1_2_1': json.dumps(submission_dict)}
correct_map = problem.grade_answers(input_dict) correct_map = problem.grade_answers(input_dict)
...@@ -1372,7 +1377,7 @@ class SchematicResponseTest(ResponseTest): ...@@ -1372,7 +1377,7 @@ class SchematicResponseTest(ResponseTest):
class AnnotationResponseTest(ResponseTest): class AnnotationResponseTest(ResponseTest):
from response_xml_factory import AnnotationResponseXMLFactory from capa.tests.response_xml_factory import AnnotationResponseXMLFactory
xml_factory_class = AnnotationResponseXMLFactory xml_factory_class = AnnotationResponseXMLFactory
def test_grade(self): def test_grade(self):
...@@ -1393,7 +1398,7 @@ class AnnotationResponseTest(ResponseTest): ...@@ -1393,7 +1398,7 @@ class AnnotationResponseTest(ResponseTest):
{'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null'}}, {'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null'}},
] ]
for (index, test) in enumerate(tests): for test in tests:
expected_correctness = test['correctness'] expected_correctness = test['correctness']
expected_points = test['points'] expected_points = test['points']
answers = test['answers'] answers = test['answers']
......
...@@ -424,7 +424,7 @@ class CapaModule(CapaFields, XModule): ...@@ -424,7 +424,7 @@ class CapaModule(CapaFields, XModule):
# If we cannot construct the problem HTML, # If we cannot construct the problem HTML,
# then generate an error message instead. # then generate an error message instead.
except Exception, err: except Exception as err:
html = self.handle_problem_html_error(err) html = self.handle_problem_html_error(err)
# The convention is to pass the name of the check button # The convention is to pass the name of the check button
...@@ -780,7 +780,7 @@ class CapaModule(CapaFields, XModule): ...@@ -780,7 +780,7 @@ class CapaModule(CapaFields, XModule):
return {'success': msg} return {'success': msg}
except Exception, err: except Exception as err:
if self.system.DEBUG: if self.system.DEBUG:
msg = "Error checking problem: " + str(err) msg = "Error checking problem: " + str(err)
msg += '\nTraceback:\n' + traceback.format_exc() msg += '\nTraceback:\n' + traceback.format_exc()
...@@ -845,13 +845,10 @@ class CapaModule(CapaFields, XModule): ...@@ -845,13 +845,10 @@ class CapaModule(CapaFields, XModule):
# get old score, for comparison: # get old score, for comparison:
orig_score = self.lcp.get_score() orig_score = self.lcp.get_score()
event_info['orig_score'] = orig_score['score'] event_info['orig_score'] = orig_score['score']
event_info['orig_max_score'] = orig_score['total'] event_info['orig_total'] = orig_score['total']
try: try:
correct_map = self.lcp.rescore_existing_answers() correct_map = self.lcp.rescore_existing_answers()
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst: except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("StudentInputError in capa_module:problem_rescore", exc_info=True) log.warning("StudentInputError in capa_module:problem_rescore", exc_info=True)
...@@ -859,7 +856,7 @@ class CapaModule(CapaFields, XModule): ...@@ -859,7 +856,7 @@ class CapaModule(CapaFields, XModule):
self.system.track_function('problem_rescore_fail', event_info) self.system.track_function('problem_rescore_fail', event_info)
return {'success': "Error: {0}".format(inst.message)} return {'success': "Error: {0}".format(inst.message)}
except Exception, err: except Exception as err:
event_info['failure'] = 'unexpected' event_info['failure'] = 'unexpected'
self.system.track_function('problem_rescore_fail', event_info) self.system.track_function('problem_rescore_fail', event_info)
if self.system.DEBUG: if self.system.DEBUG:
...@@ -868,11 +865,15 @@ class CapaModule(CapaFields, XModule): ...@@ -868,11 +865,15 @@ class CapaModule(CapaFields, XModule):
return {'success': msg} return {'success': msg}
raise raise
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
self.publish_grade() self.publish_grade()
new_score = self.lcp.get_score() new_score = self.lcp.get_score()
event_info['new_score'] = new_score['score'] event_info['new_score'] = new_score['score']
event_info['new_max_score'] = new_score['total'] event_info['new_total'] = new_score['total']
# success = correct if ALL questions in this problem are correct # success = correct if ALL questions in this problem are correct
success = 'correct' success = 'correct'
......
...@@ -618,10 +618,11 @@ class CapaModuleTest(unittest.TestCase): ...@@ -618,10 +618,11 @@ class CapaModuleTest(unittest.TestCase):
self.assertEqual(module.attempts, 1) self.assertEqual(module.attempts, 1)
def test_rescore_problem_incorrect(self): def test_rescore_problem_incorrect(self):
# make sure it also works when attempts have been reset,
# so add this to the test:
module = CapaFactory.create(attempts=0, done=True) module = CapaFactory.create(attempts=0, done=True)
# Simulate that all answers are marked correct, no matter # Simulate that all answers are marked incorrect, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers() # what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers: with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect') mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect')
...@@ -650,27 +651,31 @@ class CapaModuleTest(unittest.TestCase): ...@@ -650,27 +651,31 @@ class CapaModuleTest(unittest.TestCase):
with self.assertRaises(NotImplementedError): with self.assertRaises(NotImplementedError):
module.rescore_problem() module.rescore_problem()
def test_rescore_problem_error(self): def _rescore_problem_error_helper(self, exception_class):
"""Helper to allow testing all errors that rescoring might return."""
# Create the module
module = CapaFactory.create(attempts=1, done=True)
# Try each exception that capa_module should handle # Simulate answering a problem that raises the exception
for exception_class in [StudentInputError, with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
LoncapaProblemError, mock_rescore.side_effect = exception_class('test error')
ResponseError]: result = module.rescore_problem()
# Create the module # Expect an AJAX alert message in 'success'
module = CapaFactory.create(attempts=1, done=True) expected_msg = 'Error: test error'
self.assertEqual(result['success'], expected_msg)
# Simulate answering a problem that raises the exception # Expect that the number of attempts is NOT incremented
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore: self.assertEqual(module.attempts, 1)
mock_rescore.side_effect = exception_class('test error')
result = module.rescore_problem()
# Expect an AJAX alert message in 'success' def test_rescore_problem_student_input_error(self):
expected_msg = 'Error: test error' self._rescore_problem_error_helper(StudentInputError)
self.assertEqual(result['success'], expected_msg)
# Expect that the number of attempts is NOT incremented def test_rescore_problem_problem_error(self):
self.assertEqual(module.attempts, 1) self._rescore_problem_error_helper(LoncapaProblemError)
def test_rescore_problem_response_error(self):
self._rescore_problem_error_helper(ResponseError)
def test_save_problem(self): def test_save_problem(self):
module = CapaFactory.create(done=False) module = CapaFactory.create(done=False)
......
...@@ -8,8 +8,8 @@ from django.db import models ...@@ -8,8 +8,8 @@ from django.db import models
class Migration(SchemaMigration): class Migration(SchemaMigration):
def forwards(self, orm): def forwards(self, orm):
# Adding model 'CourseTaskLog' # Adding model 'CourseTask'
db.create_table('courseware_coursetasklog', ( db.create_table('courseware_coursetask', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task_type', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)), ('task_type', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), ('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
...@@ -19,15 +19,15 @@ class Migration(SchemaMigration): ...@@ -19,15 +19,15 @@ class Migration(SchemaMigration):
('task_state', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, db_index=True)), ('task_state', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, db_index=True)),
('task_output', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)), ('task_output', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)),
('requester', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('requester', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)), ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)), ('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
)) ))
db.send_create_signal('courseware', ['CourseTaskLog']) db.send_create_signal('courseware', ['CourseTask'])
def backwards(self, orm): def backwards(self, orm):
# Deleting model 'CourseTaskLog' # Deleting model 'CourseTask'
db.delete_table('courseware_coursetasklog') db.delete_table('courseware_coursetask')
models = { models = {
...@@ -67,10 +67,10 @@ class Migration(SchemaMigration): ...@@ -67,10 +67,10 @@ class Migration(SchemaMigration):
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}, },
'courseware.coursetasklog': { 'courseware.coursetask': {
'Meta': {'object_name': 'CourseTaskLog'}, 'Meta': {'object_name': 'CourseTask'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
...@@ -79,7 +79,7 @@ class Migration(SchemaMigration): ...@@ -79,7 +79,7 @@ class Migration(SchemaMigration):
'task_output': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), 'task_output': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'task_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}), 'task_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'task_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'task_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}) 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}, },
'courseware.offlinecomputedgrade': { 'courseware.offlinecomputedgrade': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'OfflineComputedGrade'}, 'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'OfflineComputedGrade'},
......
...@@ -265,7 +265,7 @@ class OfflineComputedGradeLog(models.Model): ...@@ -265,7 +265,7 @@ class OfflineComputedGradeLog(models.Model):
return "[OCGLog] %s: %s" % (self.course_id, self.created) return "[OCGLog] %s: %s" % (self.course_id, self.created)
class CourseTaskLog(models.Model): class CourseTask(models.Model):
""" """
Stores information about background tasks that have been submitted to Stores information about background tasks that have been submitted to
perform course-specific work. perform course-specific work.
...@@ -295,11 +295,11 @@ class CourseTaskLog(models.Model): ...@@ -295,11 +295,11 @@ class CourseTaskLog(models.Model):
task_state = models.CharField(max_length=50, null=True, db_index=True) # max_length from celery_taskmeta task_state = models.CharField(max_length=50, null=True, db_index=True) # max_length from celery_taskmeta
task_output = models.CharField(max_length=1024, null=True) task_output = models.CharField(max_length=1024, null=True)
requester = models.ForeignKey(User, db_index=True) requester = models.ForeignKey(User, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True, db_index=True) updated = models.DateTimeField(auto_now=True)
def __repr__(self): def __repr__(self):
return 'CourseTaskLog<%r>' % ({ return 'CourseTask<%r>' % ({
'task_type': self.task_type, 'task_type': self.task_type,
'course_id': self.course_id, 'course_id': self.course_id,
'task_input': self.task_input, 'task_input': self.task_input,
......
...@@ -165,19 +165,19 @@ def get_xqueue_callback_url_prefix(request): ...@@ -165,19 +165,19 @@ def get_xqueue_callback_url_prefix(request):
""" """
Calculates default prefix based on request, but allows override via settings Calculates default prefix based on request, but allows override via settings
This is separated so that it can be called by the LMS before submitting This is separated from get_module_for_descriptor so that it can be called
background tasks to run. The xqueue callbacks should go back to the LMS, by the LMS before submitting background tasks to run. The xqueue callbacks
not to the worker. should go back to the LMS, not to the worker.
""" """
default_xqueue_callback_url_prefix = '{proto}://{host}'.format( prefix = '{proto}://{host}'.format(
proto=request.META.get('HTTP_X_FORWARDED_PROTO', 'https' if request.is_secure() else 'http'), proto=request.META.get('HTTP_X_FORWARDED_PROTO', 'https' if request.is_secure() else 'http'),
host=request.get_host() host=request.get_host()
) )
return settings.XQUEUE_INTERFACE.get('callback_url', default_xqueue_callback_url_prefix) return settings.XQUEUE_INTERFACE.get('callback_url', prefix)
def get_module_for_descriptor(user, request, descriptor, model_data_cache, course_id, def get_module_for_descriptor(user, request, descriptor, model_data_cache, course_id,
position=None, wrap_xmodule_display=True, grade_bucket_type=None): position=None, wrap_xmodule_display=True, grade_bucket_type=None):
""" """
Implements get_module, extracting out the request-specific functionality. Implements get_module, extracting out the request-specific functionality.
...@@ -192,14 +192,12 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours ...@@ -192,14 +192,12 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
return get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id, return get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
track_function, xqueue_callback_url_prefix, track_function, xqueue_callback_url_prefix,
position=position, position, wrap_xmodule_display, grade_bucket_type)
wrap_xmodule_display=wrap_xmodule_display,
grade_bucket_type=grade_bucket_type)
def get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id, def get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
track_function, xqueue_callback_url_prefix, track_function, xqueue_callback_url_prefix,
position=None, wrap_xmodule_display=True, grade_bucket_type=None): position=None, wrap_xmodule_display=True, grade_bucket_type=None):
""" """
Actually implement get_module, without requiring a request. Actually implement get_module, without requiring a request.
...@@ -267,15 +265,15 @@ def get_module_for_descriptor_internal(user, descriptor, model_data_cache, cours ...@@ -267,15 +265,15 @@ def get_module_for_descriptor_internal(user, descriptor, model_data_cache, cours
def inner_get_module(descriptor): def inner_get_module(descriptor):
""" """
Delegate to get_module. It does an access check, so may return None Delegate to get_module_for_descriptor_internal() with all values except `descriptor` set.
Because it does an access check, it may return None.
""" """
# TODO: fix this so that make_xqueue_callback uses the descriptor passed into # TODO: fix this so that make_xqueue_callback uses the descriptor passed into
# inner_get_module, not the parent's callback. Add it as an argument.... # inner_get_module, not the parent's callback. Add it as an argument....
return get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id, return get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
track_function, make_xqueue_callback, track_function, make_xqueue_callback,
position=position, position, wrap_xmodule_display, grade_bucket_type)
wrap_xmodule_display=wrap_xmodule_display,
grade_bucket_type=grade_bucket_type)
def xblock_model_data(descriptor): def xblock_model_data(descriptor):
return DbModel( return DbModel(
......
import hashlib
import json import json
import logging import logging
from django.http import HttpResponse from django.http import HttpResponse
from django.db import transaction from django.db import transaction
from celery.result import AsyncResult from celery.result import AsyncResult
from celery.states import READY_STATES from celery.states import READY_STATES, SUCCESS, FAILURE, REVOKED
from courseware.models import CourseTaskLog from courseware.models import CourseTask
from courseware.module_render import get_xqueue_callback_url_prefix from courseware.module_render import get_xqueue_callback_url_prefix
from courseware.tasks import (rescore_problem, from courseware.tasks import (PROGRESS, rescore_problem,
reset_problem_attempts, delete_problem_state) reset_problem_attempts, delete_problem_state)
from xmodule.modulestore.django import modulestore from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
# define a "state" used in CourseTask
QUEUING = 'QUEUING'
class AlreadyRunningError(Exception): class AlreadyRunningError(Exception):
pass pass
...@@ -22,11 +25,12 @@ class AlreadyRunningError(Exception): ...@@ -22,11 +25,12 @@ class AlreadyRunningError(Exception):
def get_running_course_tasks(course_id): def get_running_course_tasks(course_id):
""" """
Returns a query of CourseTaskLog objects of running tasks for a given course. Returns a query of CourseTask objects of running tasks for a given course.
Used to generate a list of tasks to display on the instructor dashboard. Used to generate a list of tasks to display on the instructor dashboard.
""" """
course_tasks = CourseTaskLog.objects.filter(course_id=course_id) course_tasks = CourseTask.objects.filter(course_id=course_id)
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
for state in READY_STATES: for state in READY_STATES:
course_tasks = course_tasks.exclude(task_state=state) course_tasks = course_tasks.exclude(task_state=state)
return course_tasks return course_tasks
...@@ -34,28 +38,27 @@ def get_running_course_tasks(course_id): ...@@ -34,28 +38,27 @@ def get_running_course_tasks(course_id):
def get_course_task_history(course_id, problem_url, student=None): def get_course_task_history(course_id, problem_url, student=None):
""" """
Returns a query of CourseTaskLog objects of historical tasks for a given course, Returns a query of CourseTask objects of historical tasks for a given course,
that match a particular problem and optionally a student. that match a particular problem and optionally a student.
""" """
_, task_key = _encode_problem_and_student_input(problem_url, student) _, task_key = _encode_problem_and_student_input(problem_url, student)
course_tasks = CourseTaskLog.objects.filter(course_id=course_id, task_key=task_key) course_tasks = CourseTask.objects.filter(course_id=course_id, task_key=task_key)
return course_tasks.order_by('-id') return course_tasks.order_by('-id')
def course_task_log_status(request, task_id=None): def course_task_status(request):
""" """
This returns the status of a course-related task as a JSON-serialized dict. View method that returns the status of a course-related task or tasks.
The task_id can be specified in one of three ways: Status is returned as a JSON-serialized dict, wrapped as the content of a HTTPResponse.
* explicitly as an argument to the method (by specifying in the url) The task_id can be specified to this view in one of three ways:
Returns a dict containing status information for the specified task_id
* by making a post request containing 'task_id' as a parameter with a single value * by making a request containing 'task_id' as a parameter with a single value
Returns a dict containing status information for the specified task_id Returns a dict containing status information for the specified task_id
* by making a post request containing 'task_ids' as a parameter, * by making a request containing 'task_ids' as a parameter,
with a list of task_id values. with a list of task_id values.
Returns a dict of dicts, with the task_id as key, and the corresponding Returns a dict of dicts, with the task_id as key, and the corresponding
dict containing status information for the specified task_id dict containing status information for the specified task_id
...@@ -64,15 +67,13 @@ def course_task_log_status(request, task_id=None): ...@@ -64,15 +67,13 @@ def course_task_log_status(request, task_id=None):
""" """
output = {} output = {}
if task_id is not None: if 'task_id' in request.REQUEST:
output = _get_course_task_log_status(task_id) task_id = request.REQUEST['task_id']
elif 'task_id' in request.POST: output = _get_course_task_status(task_id)
task_id = request.POST['task_id'] elif 'task_ids[]' in request.REQUEST:
output = _get_course_task_log_status(task_id) tasks = request.REQUEST.getlist('task_ids[]')
elif 'task_ids[]' in request.POST:
tasks = request.POST.getlist('task_ids[]')
for task_id in tasks: for task_id in tasks:
task_output = _get_course_task_log_status(task_id) task_output = _get_course_task_status(task_id)
if task_output is not None: if task_output is not None:
output[task_id] = task_output output[task_id] = task_output
...@@ -81,7 +82,8 @@ def course_task_log_status(request, task_id=None): ...@@ -81,7 +82,8 @@ def course_task_log_status(request, task_id=None):
def _task_is_running(course_id, task_type, task_key): def _task_is_running(course_id, task_type, task_key):
"""Checks if a particular task is already running""" """Checks if a particular task is already running"""
runningTasks = CourseTaskLog.objects.filter(course_id=course_id, task_type=task_type, task_key=task_key) runningTasks = CourseTask.objects.filter(course_id=course_id, task_type=task_type, task_key=task_key)
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
for state in READY_STATES: for state in READY_STATES:
runningTasks = runningTasks.exclude(task_state=state) runningTasks = runningTasks.exclude(task_state=state)
return len(runningTasks) > 0 return len(runningTasks) > 0
...@@ -92,7 +94,7 @@ def _reserve_task(course_id, task_type, task_key, task_input, requester): ...@@ -92,7 +94,7 @@ def _reserve_task(course_id, task_type, task_key, task_input, requester):
""" """
Creates a database entry to indicate that a task is in progress. Creates a database entry to indicate that a task is in progress.
An exception is thrown if the task is already in progress. Throws AlreadyRunningError if the task is already in progress.
Autocommit annotation makes sure the database entry is committed. Autocommit annotation makes sure the database entry is committed.
""" """
...@@ -108,23 +110,23 @@ def _reserve_task(course_id, task_type, task_key, task_input, requester): ...@@ -108,23 +110,23 @@ def _reserve_task(course_id, task_type, task_key, task_input, requester):
'task_state': 'QUEUING', 'task_state': 'QUEUING',
'requester': requester} 'requester': requester}
course_task_log = CourseTaskLog.objects.create(**tasklog_args) course_task = CourseTask.objects.create(**tasklog_args)
return course_task_log return course_task
@transaction.autocommit @transaction.autocommit
def _update_task(course_task_log, task_result): def _update_task(course_task, task_result):
""" """
Updates a database entry with information about the submitted task. Updates a database entry with information about the submitted task.
Autocommit annotation makes sure the database entry is committed. Autocommit annotation makes sure the database entry is committed.
""" """
# we at least update the entry with the task_id, and for EAGER mode, # we at least update the entry with the task_id, and for ALWAYS_EAGER mode,
# we update other status as well. (For non-EAGER modes, the entry # we update other status as well. (For non-ALWAYS_EAGER modes, the entry
# should not have changed except for setting PENDING state and the # should not have changed except for setting PENDING state and the
# addition of the task_id.) # addition of the task_id.)
_update_course_task_log(course_task_log, task_result) _update_course_task(course_task, task_result)
course_task_log.save() course_task.save()
def _get_xmodule_instance_args(request): def _get_xmodule_instance_args(request):
...@@ -132,7 +134,7 @@ def _get_xmodule_instance_args(request): ...@@ -132,7 +134,7 @@ def _get_xmodule_instance_args(request):
Calculate parameters needed for instantiating xmodule instances. Calculate parameters needed for instantiating xmodule instances.
The `request_info` will be passed to a tracking log function, to provide information The `request_info` will be passed to a tracking log function, to provide information
about the source of the task request. The `xqueue_callback_urul_prefix` is used to about the source of the task request. The `xqueue_callback_url_prefix` is used to
permit old-style xqueue callbacks directly to the appropriate module in the LMS. permit old-style xqueue callbacks directly to the appropriate module in the LMS.
""" """
request_info = {'username': request.user.username, request_info = {'username': request.user.username,
...@@ -147,48 +149,61 @@ def _get_xmodule_instance_args(request): ...@@ -147,48 +149,61 @@ def _get_xmodule_instance_args(request):
return xmodule_instance_args return xmodule_instance_args
def _update_course_task_log(course_task_log_entry, task_result): def _update_course_task(course_task, task_result):
""" """
Updates and possibly saves a CourseTaskLog entry based on a task Result. Updates and possibly saves a CourseTask entry based on a task Result.
Used when a task initially returns, as well as when updated status is Used when a task initially returns, as well as when updated status is
requested. requested.
Calculates json to store in task_progress field. The `course_task` that is passed in is updated in-place, but
is usually not saved. In general, tasks that have finished (either with
success or failure) should have their entries updated by the task itself,
so are not updated here. Tasks that are still running are not updated
while they run. So the one exception to the no-save rule are tasks that
are in a "revoked" state. This may mean that the task never had the
opportunity to update the CourseTask entry.
Calculates json to store in "task_output" field of the `course_task`,
as well as updating the task_state and task_id (which may not yet be set
if this is the first call after the task is submitted).
Returns a dict, with the following keys:
'message': status message reporting on progress, or providing exception message if failed.
'task_progress': dict containing progress information. This includes:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
'duration_ms': how long the task has (or had) been running.
'task_traceback': optional, returned if task failed and produced a traceback.
'succeeded': on complete tasks, indicates if the task outcome was successful:
did it achieve what it set out to do.
This is in contrast with a successful task_state, which indicates that the
task merely completed.
""" """
# Just pull values out of the result object once. If we check them later, # Pull values out of the result object as close to each other as possible.
# the state and result may have changed. # If we wait and check the values later, the values for the state and result
# are more likely to have changed. Pull the state out first, and
# then code assuming that the result may not exactly match the state.
task_id = task_result.task_id task_id = task_result.task_id
result_state = task_result.state result_state = task_result.state
returned_result = task_result.result returned_result = task_result.result
result_traceback = task_result.traceback result_traceback = task_result.traceback
# Assume we don't always update the CourseTaskLog entry if we don't have to: # Assume we don't always update the CourseTask entry if we don't have to:
entry_needs_saving = False entry_needs_saving = False
output = {} output = {}
if result_state == 'PROGRESS': if result_state == PROGRESS:
# construct a status message directly from the task result's result: # construct a status message directly from the task result's result:
if hasattr(task_result, 'result') and 'attempted' in returned_result:
fmt = "Attempted {attempted} of {total}, {action_name} {updated}"
message = fmt.format(attempted=returned_result['attempted'],
updated=returned_result['updated'],
total=returned_result['total'],
action_name=returned_result['action_name'])
output['message'] = message
log.info("task progress: %s", message)
else:
log.info("still making progress... ")
output['task_progress'] = returned_result
elif result_state == 'SUCCESS':
# save progress into the entry, even if it's not being saved here -- for EAGER,
# it needs to go back with the entry passed in. # it needs to go back with the entry passed in.
course_task_log_entry.task_output = json.dumps(returned_result) course_task.task_output = json.dumps(returned_result)
output['task_progress'] = returned_result output['task_progress'] = returned_result
log.info("task succeeded: %s", returned_result) log.info("background task (%s), succeeded: %s", task_id, returned_result)
elif result_state == 'FAILURE': elif result_state == FAILURE:
# on failure, the result's result contains the exception that caused the failure # on failure, the result's result contains the exception that caused the failure
exception = returned_result exception = returned_result
traceback = result_traceback if result_traceback is not None else '' traceback = result_traceback if result_traceback is not None else ''
...@@ -197,13 +212,15 @@ def _update_course_task_log(course_task_log_entry, task_result): ...@@ -197,13 +212,15 @@ def _update_course_task_log(course_task_log_entry, task_result):
log.warning("background task (%s) failed: %s %s", task_id, returned_result, traceback) log.warning("background task (%s) failed: %s %s", task_id, returned_result, traceback)
if result_traceback is not None: if result_traceback is not None:
output['task_traceback'] = result_traceback output['task_traceback'] = result_traceback
task_progress['traceback'] = result_traceback # truncate any traceback that goes into the CourseTask model:
# save progress into the entry, even if it's not being saved -- for EAGER, task_progress['traceback'] = result_traceback[:700]
# it needs to go back with the entry passed in. # save progress into the entry, even if it's not being saved:
course_task_log_entry.task_output = json.dumps(task_progress) # when celery is run in "ALWAYS_EAGER" mode, progress needs to go back
# with the entry passed in.
course_task.task_output = json.dumps(task_progress)
output['task_progress'] = task_progress output['task_progress'] = task_progress
elif result_state == 'REVOKED': elif result_state == REVOKED:
# on revocation, the result's result doesn't contain anything # on revocation, the result's result doesn't contain anything
# but we cannot rely on the worker thread to set this status, # but we cannot rely on the worker thread to set this status,
# so we set it here. # so we set it here.
...@@ -212,21 +229,24 @@ def _update_course_task_log(course_task_log_entry, task_result): ...@@ -212,21 +229,24 @@ def _update_course_task_log(course_task_log_entry, task_result):
output['message'] = message output['message'] = message
log.warning("background task (%s) revoked.", task_id) log.warning("background task (%s) revoked.", task_id)
task_progress = {'message': message} task_progress = {'message': message}
course_task_log_entry.task_output = json.dumps(task_progress) course_task.task_output = json.dumps(task_progress)
output['task_progress'] = task_progress output['task_progress'] = task_progress
# always update the entry if the state has changed: # Always update the local version of the entry if the state has changed.
if result_state != course_task_log_entry.task_state: # This is important for getting the task_id into the initial version
course_task_log_entry.task_state = result_state # of the course_task, and also for development environments
course_task_log_entry.task_id = task_id # when this code is executed when celery is run in "ALWAYS_EAGER" mode.
if result_state != course_task.task_state:
course_task.task_state = result_state
course_task.task_id = task_id
if entry_needs_saving: if entry_needs_saving:
course_task_log_entry.save() course_task.save()
return output return output
def _get_course_task_log_status(task_id): def _get_course_task_status(task_id):
""" """
Get the status for a given task_id. Get the status for a given task_id.
...@@ -248,56 +268,61 @@ def _get_course_task_log_status(task_id): ...@@ -248,56 +268,61 @@ def _get_course_task_log_status(task_id):
task merely completed. task merely completed.
If task doesn't exist, returns None. If task doesn't exist, returns None.
If task has been REVOKED, the CourseTask entry will be updated.
""" """
# First check if the task_id is known # First check if the task_id is known
try: try:
course_task_log_entry = CourseTaskLog.objects.get(task_id=task_id) course_task = CourseTask.objects.get(task_id=task_id)
except CourseTaskLog.DoesNotExist: except CourseTask.DoesNotExist:
# TODO: log a message here log.warning("query for CourseTask status failed: task_id=(%s) not found", task_id)
return None return None
# define ajax return value:
status = {} status = {}
# if the task is not already known to be done, then we need to query # if the task is not already known to be done, then we need to query
# the underlying task's result object: # the underlying task's result object:
if course_task_log_entry.task_state not in READY_STATES: if course_task.task_state not in READY_STATES:
result = AsyncResult(task_id) result = AsyncResult(task_id)
status.update(_update_course_task_log(course_task_log_entry, result)) status.update(_update_course_task(course_task, result))
elif course_task_log_entry.task_output is not None: elif course_task.task_output is not None:
# task is already known to have finished, but report on its status: # task is already known to have finished, but report on its status:
status['task_progress'] = json.loads(course_task_log_entry.task_output) status['task_progress'] = json.loads(course_task.task_output)
# status basic information matching what's stored in CourseTaskLog: # status basic information matching what's stored in CourseTask:
status['task_id'] = course_task_log_entry.task_id status['task_id'] = course_task.task_id
status['task_state'] = course_task_log_entry.task_state status['task_state'] = course_task.task_state
status['in_progress'] = course_task_log_entry.task_state not in READY_STATES status['in_progress'] = course_task.task_state not in READY_STATES
if course_task_log_entry.task_state in READY_STATES: if course_task.task_state in READY_STATES:
succeeded, message = get_task_completion_message(course_task_log_entry) succeeded, message = get_task_completion_info(course_task)
status['message'] = message status['message'] = message
status['succeeded'] = succeeded status['succeeded'] = succeeded
return status return status
def get_task_completion_message(course_task_log_entry): def get_task_completion_info(course_task):
""" """
Construct progress message from progress information in CourseTaskLog entry. Construct progress message from progress information in CourseTask entry.
Returns (boolean, message string) duple. Returns (boolean, message string) duple, where the boolean indicates
whether the task completed without incident. (It is possible for a
task to attempt many sub-tasks, such as rescoring many students' problem
responses, and while the task runs to completion, some of the students'
responses could not be rescored.)
Used for providing messages to course_task_log_status(), as well as Used for providing messages to course_task_status(), as well as
external calls for providing course task submission history information. external calls for providing course task submission history information.
""" """
succeeded = False succeeded = False
if course_task_log_entry.task_output is None: if course_task.task_output is None:
log.warning("No task_output information found for course_task {0}".format(course_task_log_entry.task_id)) log.warning("No task_output information found for course_task {0}".format(course_task.task_id))
return (succeeded, "No status information available") return (succeeded, "No status information available")
task_output = json.loads(course_task_log_entry.task_output) task_output = json.loads(course_task.task_output)
if course_task_log_entry.task_state in ['FAILURE', 'REVOKED']: if course_task.task_state in [FAILURE, REVOKED]:
return(succeeded, task_output['message']) return(succeeded, task_output['message'])
action_name = task_output['action_name'] action_name = task_output['action_name']
...@@ -305,58 +330,50 @@ def get_task_completion_message(course_task_log_entry): ...@@ -305,58 +330,50 @@ def get_task_completion_message(course_task_log_entry):
num_updated = task_output['updated'] num_updated = task_output['updated']
num_total = task_output['total'] num_total = task_output['total']
if course_task_log_entry.task_input is None: if course_task.task_input is None:
log.warning("No task_input information found for course_task {0}".format(course_task_log_entry.task_id)) log.warning("No task_input information found for course_task {0}".format(course_task.task_id))
return (succeeded, "No status information available") return (succeeded, "No status information available")
task_input = json.loads(course_task_log_entry.task_input) task_input = json.loads(course_task.task_input)
problem_url = task_input.get('problem_url', None) problem_url = task_input.get('problem_url')
student = task_input.get('student', None) student = task_input.get('student')
if student is not None: if student is not None:
if num_attempted == 0: if num_attempted == 0:
msg = "Unable to find submission to be {action} for student '{student}'" msg_format = "Unable to find submission to be {action} for student '{student}'"
elif num_updated == 0: elif num_updated == 0:
msg = "Problem failed to be {action} for student '{student}'" msg_format = "Problem failed to be {action} for student '{student}'"
else: else:
succeeded = True succeeded = True
msg = "Problem successfully {action} for student '{student}'" msg_format = "Problem successfully {action} for student '{student}'"
elif num_attempted == 0: elif num_attempted == 0:
msg = "Unable to find any students with submissions to be {action}" msg_format = "Unable to find any students with submissions to be {action}"
elif num_updated == 0: elif num_updated == 0:
msg = "Problem failed to be {action} for any of {attempted} students" msg_format = "Problem failed to be {action} for any of {attempted} students"
elif num_updated == num_attempted: elif num_updated == num_attempted:
succeeded = True succeeded = True
msg = "Problem successfully {action} for {attempted} students" msg_format = "Problem successfully {action} for {attempted} students"
elif num_updated < num_attempted: else: # num_updated < num_attempted
msg = "Problem {action} for {updated} of {attempted} students" msg_format = "Problem {action} for {updated} of {attempted} students"
if student is not None and num_attempted != num_total: if student is not None and num_attempted != num_total:
msg += " (out of {total})" msg_format += " (out of {total})"
# Update status in task result object itself: # Update status in task result object itself:
message = msg.format(action=action_name, updated=num_updated, attempted=num_attempted, total=num_total, message = msg_format.format(action=action_name, updated=num_updated, attempted=num_attempted, total=num_total,
student=student, problem=problem_url) student=student, problem=problem_url)
return (succeeded, message) return (succeeded, message)
########### Add task-submission methods here:
def _check_arguments_for_rescoring(course_id, problem_url): def _check_arguments_for_rescoring(course_id, problem_url):
""" """
Do simple checks on the descriptor to confirm that it supports rescoring. Do simple checks on the descriptor to confirm that it supports rescoring.
Confirms first that the problem_url is defined (since that's currently typed Confirms first that the problem_url is defined (since that's currently typed
in). An ItemNotFoundException is raised if the corresponding module in). An ItemNotFoundException is raised if the corresponding module
descriptor doesn't exist. NotImplementedError is returned if the descriptor doesn't exist. NotImplementedError is raised if the
corresponding module doesn't support rescoring calls. corresponding module doesn't support rescoring calls.
""" """
descriptor = modulestore().get_instance(course_id, problem_url) descriptor = modulestore().get_instance(course_id, problem_url)
supports_rescore = False if not hasattr(descriptor, 'module_class') or not hasattr(descriptor.module_class, 'rescore_problem'):
if hasattr(descriptor, 'module_class'):
module_class = descriptor.module_class
if hasattr(module_class, 'rescore_problem'):
supports_rescore = True
if not supports_rescore:
msg = "Specified module does not support rescoring." msg = "Specified module does not support rescoring."
raise NotImplementedError(msg) raise NotImplementedError(msg)
...@@ -370,28 +387,41 @@ def _encode_problem_and_student_input(problem_url, student=None): ...@@ -370,28 +387,41 @@ def _encode_problem_and_student_input(problem_url, student=None):
""" """
if student is not None: if student is not None:
task_input = {'problem_url': problem_url, 'student': student.username} task_input = {'problem_url': problem_url, 'student': student.username}
task_key = "{student}_{problem}".format(student=student.id, problem=problem_url) task_key_stub = "{student}_{problem}".format(student=student.id, problem=problem_url)
else: else:
task_input = {'problem_url': problem_url} task_input = {'problem_url': problem_url}
task_key = "{student}_{problem}".format(student="", problem=problem_url) task_key_stub = "{student}_{problem}".format(student="", problem=problem_url)
# create the key value by using MD5 hash:
task_key = hashlib.md5(task_key_stub).hexdigest()
return task_input, task_key return task_input, task_key
def _submit_task(request, task_type, task_class, course_id, task_input, task_key): def _submit_task(request, task_type, task_class, course_id, task_input, task_key):
""" """
Helper method to submit a task.
Reserves the requested task, based on the `course_id`, `task_type`, and `task_key`,
checking to see if the task is already running. The `task_input` is also passed so that
it can be stored in the resulting CourseTask entry. Arguments are extracted from
the `request` provided by the originating server request. Then the task is submitted to run
asynchronously, using the specified `task_class`. Finally the CourseTask entry is
updated in order to store the task_id.
`AlreadyRunningError` is raised if the task is already running.
""" """
# check to see if task is already running, and reserve it otherwise: # check to see if task is already running, and reserve it otherwise:
course_task_log = _reserve_task(course_id, task_type, task_key, task_input, request.user) course_task = _reserve_task(course_id, task_type, task_key, task_input, request.user)
# submit task: # submit task:
task_args = [course_task_log.id, course_id, task_input, _get_xmodule_instance_args(request)] task_args = [course_task.id, course_id, task_input, _get_xmodule_instance_args(request)]
task_result = task_class.apply_async(task_args) task_result = task_class.apply_async(task_args)
# Update info in table with the resulting task_id (and state). # Update info in table with the resulting task_id (and state).
_update_task(course_task_log, task_result) _update_task(course_task, task_result)
return course_task_log return course_task
def submit_rescore_problem_for_student(request, course_id, problem_url, student): def submit_rescore_problem_for_student(request, course_id, problem_url, student):
...@@ -402,8 +432,9 @@ def submit_rescore_problem_for_student(request, course_id, problem_url, student) ...@@ -402,8 +432,9 @@ def submit_rescore_problem_for_student(request, course_id, problem_url, student)
the `problem_url`, and the `student` as a User object. the `problem_url`, and the `student` as a User object.
The url must specify the location of the problem, using i4x-type notation. The url must specify the location of the problem, using i4x-type notation.
An exception is thrown if the problem doesn't exist, or if the particular ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
problem is already being rescored for this student. if the problem is already being rescored for this student, or NotImplementedError if
the problem doesn't support rescoring.
""" """
# check arguments: let exceptions return up to the caller. # check arguments: let exceptions return up to the caller.
_check_arguments_for_rescoring(course_id, problem_url) _check_arguments_for_rescoring(course_id, problem_url)
...@@ -423,8 +454,9 @@ def submit_rescore_problem_for_all_students(request, course_id, problem_url): ...@@ -423,8 +454,9 @@ def submit_rescore_problem_for_all_students(request, course_id, problem_url):
Parameters are the `course_id` and the `problem_url`. Parameters are the `course_id` and the `problem_url`.
The url must specify the location of the problem, using i4x-type notation. The url must specify the location of the problem, using i4x-type notation.
An exception is thrown if the problem doesn't exist, or if the particular ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
problem is already being rescored. if the problem is already being rescored, or NotImplementedError if the problem doesn't
support rescoring.
""" """
# check arguments: let exceptions return up to the caller. # check arguments: let exceptions return up to the caller.
_check_arguments_for_rescoring(course_id, problem_url) _check_arguments_for_rescoring(course_id, problem_url)
...@@ -445,8 +477,8 @@ def submit_reset_problem_attempts_for_all_students(request, course_id, problem_u ...@@ -445,8 +477,8 @@ def submit_reset_problem_attempts_for_all_students(request, course_id, problem_u
the `problem_url`. The url must specify the location of the problem, the `problem_url`. The url must specify the location of the problem,
using i4x-type notation. using i4x-type notation.
An exception is thrown if the problem doesn't exist, or if the particular ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
problem is already being reset. if the problem is already being reset.
""" """
# check arguments: make sure that the problem_url is defined # check arguments: make sure that the problem_url is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist, # (since that's currently typed in). If the corresponding module descriptor doesn't exist,
...@@ -468,8 +500,8 @@ def submit_delete_problem_state_for_all_students(request, course_id, problem_url ...@@ -468,8 +500,8 @@ def submit_delete_problem_state_for_all_students(request, course_id, problem_url
the `problem_url`. The url must specify the location of the problem, the `problem_url`. The url must specify the location of the problem,
using i4x-type notation. using i4x-type notation.
An exception is thrown if the problem doesn't exist, or if the particular ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
problem is already being deleted. if the particular problem is already being deleted.
""" """
# check arguments: make sure that the problem_url is defined # check arguments: make sure that the problem_url is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist, # (since that's currently typed in). If the corresponding module descriptor doesn't exist,
......
"""
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json import json
from time import time from time import time
from sys import exc_info from sys import exc_info
from traceback import format_exc from traceback import format_exc
from django.contrib.auth.models import User
from django.db import transaction
from celery import task, current_task from celery import task, current_task
from celery.utils.log import get_task_logger from celery.utils.log import get_task_logger
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.db import transaction
from dogapi import dog_stats_api
from xmodule.modulestore.django import modulestore from xmodule.modulestore.django import modulestore
import mitxmako.middleware as middleware import mitxmako.middleware as middleware
from track.views import task_track from track.views import task_track
from courseware.models import StudentModule, CourseTaskLog from courseware.models import StudentModule, CourseTask
from courseware.model_data import ModelDataCache from courseware.model_data import ModelDataCache
from courseware.module_render import get_module_for_descriptor_internal from courseware.module_render import get_module_for_descriptor_internal
# define different loggers for use within tasks and on client side # define different loggers for use within tasks and on client side
task_log = get_task_logger(__name__) TASK_LOG = get_task_logger(__name__)
# define custom task state:
PROGRESS = 'PROGRESS'
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
class UpdateProblemModuleStateError(Exception): class UpdateProblemModuleStateError(Exception):
""" """
Error signaling a fatal condition while updating problem modules. Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and that no more Used when the current module cannot be processed and no more
modules should be attempted. modules should be attempted.
""" """
pass pass
def _update_problem_module_state_internal(course_id, module_state_key, student_identifier, update_fcn, action_name, filter_fcn, def _perform_module_state_update(course_id, module_state_key, student_identifier, update_fcn, action_name, filter_fcn,
xmodule_instance_args): xmodule_instance_args):
""" """
Performs generic update by visiting StudentModule instances with the update_fcn provided. Performs generic update by visiting StudentModule instances with the update_fcn provided.
...@@ -43,16 +59,28 @@ def _update_problem_module_state_internal(course_id, module_state_key, student_i ...@@ -43,16 +59,28 @@ def _update_problem_module_state_internal(course_id, module_state_key, student_i
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem. to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered. argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering. The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
If no exceptions are raised, a dict containing the task's result is returned, with the following keys:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the CourseTaskLog and the next level, so that it can set the failure modes and capture the error trace in the CourseTask and the
result object. result object.
""" """
# get start time for task: # get start time for task:
start_time = time() start_time = time()
...@@ -65,7 +93,7 @@ def _update_problem_module_state_internal(course_id, module_state_key, student_i ...@@ -65,7 +93,7 @@ def _update_problem_module_state_internal(course_id, module_state_key, student_i
# So we look for the result: the defining of the lookup paths # So we look for the result: the defining of the lookup paths
# for templates. # for templates.
if 'main' not in middleware.lookup: if 'main' not in middleware.lookup:
task_log.info("Initializing Mako middleware explicitly") TASK_LOG.debug("Initializing Mako middleware explicitly")
middleware.MakoMiddleware() middleware.MakoMiddleware()
# find the problem descriptor: # find the problem descriptor:
...@@ -104,32 +132,33 @@ def _update_problem_module_state_internal(course_id, module_state_key, student_i ...@@ -104,32 +132,33 @@ def _update_problem_module_state_internal(course_id, module_state_key, student_i
'attempted': num_attempted, 'attempted': num_attempted,
'updated': num_updated, 'updated': num_updated,
'total': num_total, 'total': num_total,
'start_ms': int(start_time * 1000),
'duration_ms': int((current_time - start_time) * 1000), 'duration_ms': int((current_time - start_time) * 1000),
} }
return progress return progress
task_progress = get_task_progress()
current_task.update_state(state=PROGRESS, meta=task_progress)
for module_to_update in modules_to_update: for module_to_update in modules_to_update:
num_attempted += 1 num_attempted += 1
# There is no try here: if there's an error, we let it throw, and the task will # There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace. # be marked as FAILED, with a stack trace.
if update_fcn(module_descriptor, module_to_update, xmodule_instance_args): with dog_stats_api.timer('courseware.tasks.module.{0}.time'.format(action_name)):
# If the update_fcn returns true, then it performed some kind of work. if update_fcn(module_descriptor, module_to_update, xmodule_instance_args):
num_updated += 1 # If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
num_updated += 1
# update task status: # update task status:
current_task.update_state(state='PROGRESS', meta=get_task_progress()) task_progress = get_task_progress()
current_task.update_state(state=PROGRESS, meta=task_progress)
task_progress = get_task_progress()
# update progress without updating the state
current_task.update_state(state='PROGRESS', meta=task_progress)
return task_progress return task_progress
@transaction.autocommit @transaction.autocommit
def _save_course_task_log_entry(entry): def _save_course_task(course_task):
"""Writes CourseTaskLog entry immediately.""" """Writes CourseTask course_task immediately, ensuring the transaction is committed."""
entry.save() course_task.save()
def _update_problem_module_state(entry_id, course_id, module_state_key, student_ident, update_fcn, action_name, filter_fcn, def _update_problem_module_state(entry_id, course_id, module_state_key, student_ident, update_fcn, action_name, filter_fcn,
...@@ -137,85 +166,92 @@ def _update_problem_module_state(entry_id, course_id, module_state_key, student_ ...@@ -137,85 +166,92 @@ def _update_problem_module_state(entry_id, course_id, module_state_key, student_
""" """
Performs generic update by visiting StudentModule instances with the update_fcn provided. Performs generic update by visiting StudentModule instances with the update_fcn provided.
See _update_problem_module_state_internal function for more details on arguments. The `entry_id` is the primary key for the CourseTask entry representing the task. This function
updates the entry on success and failure of the _perform_module_state_update function it
wraps. It is setting the entry's value for task_state based on what Celery would set it to once
the task returns to Celery: FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to _perform_module_state_update, and documented there.
If no exceptions are raised, a dict containing the task's result is returned, with the following keys:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Before returning, this is also JSON-serialized and stored in the task_output column of the CourseTask entry.
The `entry_id` is the primary key for the CourseTaskLog entry representing the task. This function If exceptions were raised internally, they are caught and recorded in the CourseTask entry.
updates the entry on SUCCESS and FAILURE of the _update_problem_module_state_internal function it This is also a JSON-serialized dict, stored in the task_output column, containing the following keys:
wraps.
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Once the exception is caught, it is raised again and allowed to pass up to the
task-running level, so that it can also set the failure modes and capture the error trace in the
result object that Celery creates.
Once exceptions are caught and recorded in the CourseTaskLog entry, they are allowed to pass up to the
task-running level, so that it can also set the failure modes and capture the error trace in the result object.
""" """
task_id = current_task.request.id task_id = current_task.request.id
fmt = 'Starting to update problem modules as task "{task_id}": course "{course_id}" problem "{state_key}": nothing {action} yet' fmt = 'Starting to update problem modules as task "{task_id}": course "{course_id}" problem "{state_key}": nothing {action} yet'
task_log.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, action=action_name)) TASK_LOG.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, action=action_name))
# get the CourseTaskLog to be updated. If this fails, then let the exception return to Celery. # get the CourseTask to be updated. If this fails, then let the exception return to Celery.
# There's no point in catching it here. # There's no point in catching it here.
entry = CourseTaskLog.objects.get(pk=entry_id) entry = CourseTask.objects.get(pk=entry_id)
entry.task_id = task_id entry.task_id = task_id
_save_course_task_log_entry(entry) _save_course_task(entry)
# add task_id to xmodule_instance_args, so that it can be output with tracking info: # add task_id to xmodule_instance_args, so that it can be output with tracking info:
xmodule_instance_args['task_id'] = task_id if xmodule_instance_args is not None:
xmodule_instance_args['task_id'] = task_id
# now that we have an entry we can try to catch failures: # now that we have an entry we can try to catch failures:
task_progress = None task_progress = None
try: try:
task_progress = _update_problem_module_state_internal(course_id, module_state_key, student_ident, update_fcn, with dog_stats_api.timer('courseware.tasks.module.{0}.overall_time'.format(action_name)):
action_name, filter_fcn, xmodule_instance_args) task_progress = _perform_module_state_update(course_id, module_state_key, student_ident, update_fcn,
action_name, filter_fcn, xmodule_instance_args)
except Exception: except Exception:
# try to write out the failure to the entry before failing # try to write out the failure to the entry before failing
exception_type, exception, traceback = exc_info() exception_type, exception, traceback = exc_info()
traceback_string = format_exc(traceback) if traceback is not None else '' traceback_string = format_exc(traceback) if traceback is not None else ''
task_progress = {'exception': exception_type.__name__, 'message': str(exception.message)} task_progress = {'exception': exception_type.__name__, 'message': str(exception.message)}
task_log.warning("background task (%s) failed: %s %s", task_id, exception, traceback_string) TASK_LOG.warning("background task (%s) failed: %s %s", task_id, exception, traceback_string)
if traceback is not None: if traceback is not None:
task_progress['traceback'] = traceback_string task_progress['traceback'] = traceback_string[:700]
entry.task_output = json.dumps(task_progress) entry.task_output = json.dumps(task_progress)
entry.task_state = 'FAILURE' entry.task_state = FAILURE
_save_course_task_log_entry(entry) _save_course_task(entry)
raise raise
# if we get here, we assume we've succeeded, so update the CourseTaskLog entry in anticipation: # if we get here, we assume we've succeeded, so update the CourseTask entry in anticipation:
entry.task_output = json.dumps(task_progress) entry.task_output = json.dumps(task_progress)
entry.task_state = 'SUCCESS' entry.task_state = SUCCESS
_save_course_task_log_entry(entry) _save_course_task(entry)
# log and exit, returning task_progress info as task result: # log and exit, returning task_progress info as task result:
fmt = 'Finishing task "{task_id}": course "{course_id}" problem "{state_key}": final: {progress}' fmt = 'Finishing task "{task_id}": course "{course_id}" problem "{state_key}": final: {progress}'
task_log.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, progress=task_progress)) TASK_LOG.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, progress=task_progress))
return task_progress return task_progress
def _update_problem_module_state_for_student(entry_id, course_id, problem_url, student_identifier, def _get_task_id_from_xmodule_args(xmodule_instance_args):
update_fcn, action_name, filter_fcn=None, xmodule_instance_args=None): """Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
""" return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
Update the StudentModule for a given student. See _update_problem_module_state().
"""
msg = ''
success = False
# try to uniquely id student by email address or username
try:
if "@" in student_identifier:
student_to_update = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student_to_update = User.objects.get(username=student_identifier)
return _update_problem_module_state(entry_id, course_id, problem_url, student_to_update, update_fcn,
action_name, filter_fcn, xmodule_instance_args)
except User.DoesNotExist:
msg = "Couldn't find student with that email or username."
return (success, msg)
def _get_module_instance_for_task(course_id, student, module_descriptor, module_state_key, xmodule_instance_args=None, def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None): grade_bucket_type=None):
""" """
Fetches a StudentModule instance for a given course_id, student, and module_state_key. Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
Includes providing information for creating a track function and an XQueue callback, `xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
but does not require passing in a Request object. These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
""" """
# reconstitute the problem's corresponding XModule: # reconstitute the problem's corresponding XModule:
model_data_cache = ModelDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor) model_data_cache = ModelDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
...@@ -223,20 +259,20 @@ def _get_module_instance_for_task(course_id, student, module_descriptor, module_ ...@@ -223,20 +259,20 @@ def _get_module_instance_for_task(course_id, student, module_descriptor, module_
# get request-related tracking information from args passthrough, and supplement with task-specific # get request-related tracking information from args passthrough, and supplement with task-specific
# information: # information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {} request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": xmodule_instance_args['task_id']} task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function(): def make_track_function():
''' '''
Make a tracking function that logs what happened. Make a tracking function that logs what happened.
For insertion into ModuleSystem, and use by CapaModule.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
''' '''
def f(event_type, event): return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
return task_track(request_info, task_info, event_type, event, page='x_module_task')
return f
xqueue_callback_url_prefix = '' xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None: if xmodule_instance_args is not None else ''
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix')
return get_module_for_descriptor_internal(student, module_descriptor, model_data_cache, course_id, return get_module_for_descriptor_internal(student, module_descriptor, model_data_cache, course_id,
make_track_function(), xqueue_callback_url_prefix, make_track_function(), xqueue_callback_url_prefix,
...@@ -250,55 +286,73 @@ def _rescore_problem_module_state(module_descriptor, student_module, xmodule_ins ...@@ -250,55 +286,73 @@ def _rescore_problem_module_state(module_descriptor, student_module, xmodule_ins
performs rescoring on the student's problem submission. performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop. Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
and if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
''' '''
# unpack the StudentModule: # unpack the StudentModule:
course_id = student_module.course_id course_id = student_module.course_id
student = student_module.student student = student_module.student
module_state_key = student_module.module_state_key module_state_key = student_module.module_state_key
instance = _get_module_instance_for_task(course_id, student, module_descriptor, module_state_key, xmodule_instance_args, grade_bucket_type='rescore') instance = _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args, grade_bucket_type='rescore')
if instance is None: if instance is None:
# Either permissions just changed, or someone is trying to be clever # Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to. # and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(loc=module_state_key, msg = "No module {loc} for student {student}--access denied?".format(loc=module_state_key,
student=student) student=student)
task_log.debug(msg) TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg) raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'): if not hasattr(instance, 'rescore_problem'):
# if the first instance doesn't have a rescore method, we should # This should also not happen, since it should be already checked in the caller,
# probably assume that no other instances will either. # but check here to be sure.
msg = "Specified problem does not support rescoring." msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg) raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem() result = instance.rescore_problem()
if 'success' not in result: if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete: # don't consider these fatal, but false means that the individual call didn't complete:
task_log.warning("error processing rescore call for problem {loc} and student {student}: " TASK_LOG.warning("error processing rescore call for course {course}, problem {loc} and student {student}: "
"unexpected response {msg}".format(msg=result, loc=module_state_key, student=student)) "unexpected response {msg}".format(msg=result, course=course_id, loc=module_state_key, student=student))
return False return False
elif result['success'] != 'correct' and result['success'] != 'incorrect': elif result['success'] not in ['correct', 'incorrect']:
task_log.warning("error processing rescore call for problem {loc} and student {student}: " TASK_LOG.warning("error processing rescore call for course {course}, problem {loc} and student {student}: "
"{msg}".format(msg=result['success'], loc=module_state_key, student=student)) "{msg}".format(msg=result['success'], course=course_id, loc=module_state_key, student=student))
return False return False
else: else:
task_log.debug("successfully processed rescore call for problem {loc} and student {student}: " TASK_LOG.debug("successfully processed rescore call for course {course}, problem {loc} and student {student}: "
"{msg}".format(msg=result['success'], loc=module_state_key, student=student)) "{msg}".format(msg=result['success'], course=course_id, loc=module_state_key, student=student))
return True return True
def filter_problem_module_state_for_done(modules_to_update): def _filter_module_state_for_done(modules_to_update):
"""Filter to apply for rescoring, to limit module instances to those marked as done""" """Filter to apply for rescoring, to limit module instances to those marked as done"""
return modules_to_update.filter(state__contains='"done": true') return modules_to_update.filter(state__contains='"done": true')
@task @task
def rescore_problem(entry_id, course_id, task_input, xmodule_instance_args): def rescore_problem(entry_id, course_id, task_input, xmodule_instance_args):
"""Rescores problem `problem_url` in `course_id` for all students.""" """Rescores problem in `course_id`.
`entry_id` is the id value of the CourseTask entry that corresponds to this task.
`course_id` identifies the course.
`task_input` should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
'student': the identifier (username or email) of a particular user whose
problem submission should be rescored. If not specified, all problem
submissions will be rescored.
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
action_name = 'rescored' action_name = 'rescored'
update_fcn = _rescore_problem_module_state update_fcn = _rescore_problem_module_state
filter_fcn = filter_problem_module_state_for_done filter_fcn = lambda(modules_to_update): modules_to_update.filter(state__contains='"done": true')
problem_url = task_input.get('problem_url') problem_url = task_input.get('problem_url')
student_ident = None student_ident = None
if 'student' in task_input: if 'student' in task_input:
...@@ -309,11 +363,11 @@ def rescore_problem(entry_id, course_id, task_input, xmodule_instance_args): ...@@ -309,11 +363,11 @@ def rescore_problem(entry_id, course_id, task_input, xmodule_instance_args):
@transaction.autocommit @transaction.autocommit
def _reset_problem_attempts_module_state(module_descriptor, student_module, xmodule_instance_args=None): def _reset_problem_attempts_module_state(_module_descriptor, student_module, xmodule_instance_args=None):
""" """
Resets problem attempts to zero for specified `student_module`. Resets problem attempts to zero for specified `student_module`.
Always returns true, if it doesn't throw an exception. Always returns true, indicating success, if it doesn't raise an exception due to database error.
""" """
problem_state = json.loads(student_module.state) problem_state = json.loads(student_module.state)
if 'attempts' in problem_state: if 'attempts' in problem_state:
...@@ -326,8 +380,7 @@ def _reset_problem_attempts_module_state(module_descriptor, student_module, xmod ...@@ -326,8 +380,7 @@ def _reset_problem_attempts_module_state(module_descriptor, student_module, xmod
# get request-related tracking information from args passthrough, # get request-related tracking information from args passthrough,
# and supplement with task-specific information: # and supplement with task-specific information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {} request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_id = xmodule_instance_args['task_id'] if xmodule_instance_args is not None else "unknown-task_id" task_info = {"student": student_module.student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
task_info = {"student": student_module.student.username, "task_id": task_id}
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0} event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
task_track(request_info, task_info, 'problem_reset_attempts', event_info, page='x_module_task') task_track(request_info, task_info, 'problem_reset_attempts', event_info, page='x_module_task')
...@@ -337,40 +390,57 @@ def _reset_problem_attempts_module_state(module_descriptor, student_module, xmod ...@@ -337,40 +390,57 @@ def _reset_problem_attempts_module_state(module_descriptor, student_module, xmod
@task @task
def reset_problem_attempts(entry_id, course_id, task_input, xmodule_instance_args): def reset_problem_attempts(entry_id, course_id, task_input, xmodule_instance_args):
"""Resets problem attempts to zero for `problem_url` in `course_id` for all students.""" """Resets problem attempts to zero for `problem_url` in `course_id` for all students.
`entry_id` is the id value of the CourseTask entry that corresponds to this task.
`course_id` identifies the course.
`task_input` should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
action_name = 'reset' action_name = 'reset'
update_fcn = _reset_problem_attempts_module_state update_fcn = _reset_problem_attempts_module_state
problem_url = task_input.get('problem_url') problem_url = task_input.get('problem_url')
student_ident = None return _update_problem_module_state(entry_id, course_id, problem_url, None,
if 'student' in task_input:
student_ident = task_input['student']
return _update_problem_module_state(entry_id, course_id, problem_url, student_ident,
update_fcn, action_name, filter_fcn=None, update_fcn, action_name, filter_fcn=None,
xmodule_instance_args=xmodule_instance_args) xmodule_instance_args=xmodule_instance_args)
@transaction.autocommit @transaction.autocommit
def _delete_problem_module_state(module_descriptor, student_module, xmodule_instance_args=None): def _delete_problem_module_state(_module_descriptor, student_module, xmodule_instance_args=None):
"""Delete the StudentModule entry.""" """
Delete the StudentModule entry.
Always returns true, indicating success, if it doesn't raise an exception due to database error.
"""
student_module.delete() student_module.delete()
# get request-related tracking information from args passthrough, # get request-related tracking information from args passthrough,
# and supplement with task-specific information: # and supplement with task-specific information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {} request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_id = xmodule_instance_args['task_id'] if xmodule_instance_args is not None else "unknown-task_id" task_info = {"student": student_module.student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
task_info = {"student": student_module.student.username, "task_id": task_id}
task_track(request_info, task_info, 'problem_delete_state', {}, page='x_module_task') task_track(request_info, task_info, 'problem_delete_state', {}, page='x_module_task')
return True return True
@task @task
def delete_problem_state(entry_id, course_id, task_input, xmodule_instance_args): def delete_problem_state(entry_id, course_id, task_input, xmodule_instance_args):
"""Deletes problem state entirely for `problem_url` in `course_id` for all students.""" """Deletes problem state entirely for `problem_url` in `course_id` for all students.
`entry_id` is the id value of the CourseTask entry that corresponds to this task.
`course_id` identifies the course.
`task_input` should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
action_name = 'deleted' action_name = 'deleted'
update_fcn = _delete_problem_module_state update_fcn = _delete_problem_module_state
problem_url = task_input.get('problem_url') problem_url = task_input.get('problem_url')
student_ident = None return _update_problem_module_state(entry_id, course_id, problem_url, None,
if 'student' in task_input:
student_ident = task_input['student']
return _update_problem_module_state(entry_id, course_id, problem_url, student_ident,
update_fcn, action_name, filter_fcn=None, update_fcn, action_name, filter_fcn=None,
xmodule_instance_args=xmodule_instance_args) xmodule_instance_args=xmodule_instance_args)
...@@ -10,8 +10,8 @@ from student.tests.factories import CourseEnrollmentAllowedFactory as StudentCou ...@@ -10,8 +10,8 @@ from student.tests.factories import CourseEnrollmentAllowedFactory as StudentCou
from student.tests.factories import RegistrationFactory as StudentRegistrationFactory from student.tests.factories import RegistrationFactory as StudentRegistrationFactory
from courseware.models import StudentModule, XModuleContentField, XModuleSettingsField from courseware.models import StudentModule, XModuleContentField, XModuleSettingsField
from courseware.models import XModuleStudentInfoField, XModuleStudentPrefsField from courseware.models import XModuleStudentInfoField, XModuleStudentPrefsField
from courseware.models import CourseTaskLog from courseware.models import CourseTask
from celery.states import PENDING
from xmodule.modulestore import Location from xmodule.modulestore import Location
from pytz import UTC from pytz import UTC
...@@ -88,14 +88,14 @@ class StudentInfoFactory(DjangoModelFactory): ...@@ -88,14 +88,14 @@ class StudentInfoFactory(DjangoModelFactory):
student = SubFactory(UserFactory) student = SubFactory(UserFactory)
class CourseTaskLogFactory(DjangoModelFactory): class CourseTaskFactory(DjangoModelFactory):
FACTORY_FOR = CourseTaskLog FACTORY_FOR = CourseTask
task_type = 'rescore_problem' task_type = 'rescore_problem'
course_id = "MITx/999/Robot_Super_Course" course_id = "MITx/999/Robot_Super_Course"
task_input = json.dumps({}) task_input = json.dumps({})
task_key = None task_key = None
task_id = None task_id = None
task_state = "QUEUED" task_state = PENDING
task_output = None task_output = None
requester = SubFactory(UserFactory) requester = SubFactory(UserFactory)
...@@ -3,6 +3,8 @@ Test for LMS courseware background task queue management ...@@ -3,6 +3,8 @@ Test for LMS courseware background task queue management
""" """
import logging import logging
import json import json
from celery.states import SUCCESS, FAILURE, REVOKED
from mock import Mock, patch from mock import Mock, patch
from uuid import uuid4 from uuid import uuid4
...@@ -11,9 +13,11 @@ from django.test.testcases import TestCase ...@@ -11,9 +13,11 @@ from django.test.testcases import TestCase
from xmodule.modulestore.exceptions import ItemNotFoundError from xmodule.modulestore.exceptions import ItemNotFoundError
from courseware.tests.factories import UserFactory, CourseTaskLogFactory from courseware.tests.factories import UserFactory, CourseTaskFactory
from courseware.task_submit import (get_running_course_tasks, from courseware.tasks import PROGRESS
course_task_log_status, from courseware.task_submit import (QUEUING,
get_running_course_tasks,
course_task_status,
_encode_problem_and_student_input, _encode_problem_and_student_input,
AlreadyRunningError, AlreadyRunningError,
submit_rescore_problem_for_all_students, submit_rescore_problem_for_all_students,
...@@ -22,62 +26,60 @@ from courseware.task_submit import (get_running_course_tasks, ...@@ -22,62 +26,60 @@ from courseware.task_submit import (get_running_course_tasks,
submit_delete_problem_state_for_all_students) submit_delete_problem_state_for_all_students)
log = logging.getLogger("mitx." + __name__) log = logging.getLogger(__name__)
TEST_COURSE_ID = 'edx/1.23x/test_course'
TEST_FAILURE_MESSAGE = 'task failed horribly' TEST_FAILURE_MESSAGE = 'task failed horribly'
class TaskQueueTestCase(TestCase): class TaskSubmitTestCase(TestCase):
""" """
Check that background tasks are properly queued and report status. Check that background tasks are properly queued and report status.
""" """
student = None
instructor = None
problem_url = None
def setUp(self): def setUp(self):
self.student = UserFactory.create(username="student", email="student@edx.org") self.student = UserFactory.create(username="student", email="student@edx.org")
self.instructor = UserFactory.create(username="instructor", email="student@edx.org") self.instructor = UserFactory.create(username="instructor", email="instructor@edx.org")
self.problem_url = TaskQueueTestCase.problem_location("test_urlname") self.problem_url = TaskSubmitTestCase.problem_location("test_urlname")
@staticmethod @staticmethod
def problem_location(problem_url_name): def problem_location(problem_url_name):
""" """
Create an internal location for a test problem. Create an internal location for a test problem.
""" """
if "i4x:" in problem_url_name: return "i4x://{org}/{number}/problem/{problem_url_name}".format(org='edx',
return problem_url_name number='1.23x',
else: problem_url_name=problem_url_name)
return "i4x://{org}/{number}/problem/{problem_url_name}".format(org='edx',
number='1.23x', def _create_entry(self, task_state=QUEUING, task_output=None, student=None):
problem_url_name=problem_url_name) """Creates a CourseTask entry for testing."""
def _create_entry(self, task_state="QUEUED", task_output=None, student=None):
task_id = str(uuid4()) task_id = str(uuid4())
progress_json = json.dumps(task_output) progress_json = json.dumps(task_output)
task_input, task_key = _encode_problem_and_student_input(self.problem_url, student) task_input, task_key = _encode_problem_and_student_input(self.problem_url, student)
course_task_log = CourseTaskLogFactory.create(requester=self.instructor, course_task = CourseTaskFactory.create(course_id=TEST_COURSE_ID,
task_input=json.dumps(task_input), requester=self.instructor,
task_key=task_key, task_input=json.dumps(task_input),
task_id=task_id, task_key=task_key,
task_state=task_state, task_id=task_id,
task_output=progress_json) task_state=task_state,
return course_task_log task_output=progress_json)
return course_task
def _create_failure_entry(self): def _create_failure_entry(self):
"""Creates a CourseTask entry representing a failed task."""
# view task entry for task failure # view task entry for task failure
progress = {'message': TEST_FAILURE_MESSAGE, progress = {'message': TEST_FAILURE_MESSAGE,
'exception': 'RandomCauseError', 'exception': 'RandomCauseError',
} }
return self._create_entry(task_state="FAILURE", task_output=progress) return self._create_entry(task_state=FAILURE, task_output=progress)
def _create_success_entry(self, student=None): def _create_success_entry(self, student=None):
return self._create_progress_entry(student=None, task_state="SUCCESS") """Creates a CourseTask entry representing a successful task."""
return self._create_progress_entry(student, task_state=SUCCESS)
def _create_progress_entry(self, student=None, task_state="PROGRESS"): def _create_progress_entry(self, student=None, task_state=PROGRESS):
# view task entry for task failure """Creates a CourseTask entry representing a task in progress."""
progress = {'attempted': 3, progress = {'attempted': 3,
'updated': 2, 'updated': 2,
'total': 10, 'total': 10,
...@@ -88,141 +90,138 @@ class TaskQueueTestCase(TestCase): ...@@ -88,141 +90,138 @@ class TaskQueueTestCase(TestCase):
def test_fetch_running_tasks(self): def test_fetch_running_tasks(self):
# when fetching running tasks, we get all running tasks, and only running tasks # when fetching running tasks, we get all running tasks, and only running tasks
failure_task_ids = [(self._create_failure_entry()).task_id for _ in range(1, 4)] for _ in range(1, 5):
entry = self._create_failure_entry() self._create_failure_entry()
failure_task_ids.append(entry.task_id) self._create_success_entry()
course_id = entry.course_id # get course_id used by the factory progress_task_ids = [self._create_progress_entry().task_id for _ in range(1, 5)]
success_task_ids = [(self._create_success_entry()).task_id for _ in range(1, 5)] task_ids = [course_task.task_id for course_task in get_running_course_tasks(TEST_COURSE_ID)]
progress_task_ids = [(self._create_progress_entry()).task_id for _ in range(1, 5)] self.assertEquals(set(task_ids), set(progress_task_ids))
task_ids = [course_task_log.task_id for course_task_log in get_running_course_tasks(course_id)]
self.assertEquals(len(task_ids), len(progress_task_ids)) def _get_course_task_status(self, task_id):
for task_id in task_ids: request = Mock()
self.assertTrue(task_id in progress_task_ids) request.REQUEST = {'task_id': task_id}
self.assertFalse(task_id in success_task_ids) return course_task_status(request)
self.assertFalse(task_id in failure_task_ids)
def test_course_task_status(self):
def test_course_task_log_status_by_post(self): course_task = self._create_failure_entry()
# fetch status for existing tasks: by arg is tested elsewhere, task_id = course_task.task_id
# so test by POST arg
course_task_log = self._create_failure_entry()
task_id = course_task_log.task_id
request = Mock() request = Mock()
request.POST = {} request.REQUEST = {'task_id': task_id}
request.POST['task_id'] = task_id response = course_task_status(request)
response = course_task_log_status(request)
output = json.loads(response.content) output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id) self.assertEquals(output['task_id'], task_id)
def test_course_task_log_status_list_by_post(self): def test_course_task_status_list(self):
# Fetch status for existing tasks: by arg is tested elsewhere, # Fetch status for existing tasks by arg list, as if called from ajax.
# so test here by POST arg list, as if called from ajax.
# Note that ajax does something funny with the marshalling of # Note that ajax does something funny with the marshalling of
# list data, so the key value has "[]" appended to it. # list data, so the key value has "[]" appended to it.
task_ids = [(self._create_failure_entry()).task_id for _ in range(1, 5)] task_ids = [(self._create_failure_entry()).task_id for _ in range(1, 5)]
request = Mock() request = Mock()
request.POST = MultiValueDict({'task_ids[]': task_ids}) request.REQUEST = MultiValueDict({'task_ids[]': task_ids})
response = course_task_log_status(request) response = course_task_status(request)
output = json.loads(response.content) output = json.loads(response.content)
self.assertEquals(len(output), len(task_ids))
for task_id in task_ids: for task_id in task_ids:
self.assertEquals(output[task_id]['task_id'], task_id) self.assertEquals(output[task_id]['task_id'], task_id)
def test_initial_failure(self): def test_get_status_from_failure(self):
course_task_log = self._create_failure_entry() course_task = self._create_failure_entry()
task_id = course_task_log.task_id task_id = course_task.task_id
response = course_task_log_status(Mock(), task_id=task_id) response = self._get_course_task_status(task_id)
output = json.loads(response.content) output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id) self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], "FAILURE") self.assertEquals(output['task_state'], FAILURE)
self.assertFalse(output['in_progress']) self.assertFalse(output['in_progress'])
self.assertEquals(output['message'], TEST_FAILURE_MESSAGE) self.assertEquals(output['message'], TEST_FAILURE_MESSAGE)
def test_initial_success(self): def test_get_status_from_success(self):
course_task_log = self._create_success_entry() course_task = self._create_success_entry()
task_id = course_task_log.task_id task_id = course_task.task_id
response = course_task_log_status(Mock(), task_id=task_id) response = self._get_course_task_status(task_id)
output = json.loads(response.content) output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id) self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], "SUCCESS") self.assertEquals(output['task_state'], SUCCESS)
self.assertFalse(output['in_progress']) self.assertFalse(output['in_progress'])
def test_update_progress_to_progress(self): def test_update_progress_to_progress(self):
# view task entry for task in progress # view task entry for task in progress
course_task_log = self._create_progress_entry() course_task = self._create_progress_entry()
task_id = course_task_log.task_id task_id = course_task.task_id
mock_result = Mock() mock_result = Mock()
mock_result.task_id = task_id mock_result.task_id = task_id
mock_result.state = "PROGRESS" mock_result.state = PROGRESS
mock_result.result = {'attempted': 5, mock_result.result = {'attempted': 5,
'updated': 4, 'updated': 4,
'total': 10, 'total': 10,
'action_name': 'rescored'} 'action_name': 'rescored'}
with patch('celery.result.AsyncResult.__new__') as mock_result_ctor: with patch('celery.result.AsyncResult.__new__') as mock_result_ctor:
mock_result_ctor.return_value = mock_result mock_result_ctor.return_value = mock_result
response = course_task_log_status(Mock(), task_id=task_id) response = self._get_course_task_status(task_id)
output = json.loads(response.content) output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id) self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], "PROGRESS") self.assertEquals(output['task_state'], PROGRESS)
self.assertTrue(output['in_progress']) self.assertTrue(output['in_progress'])
# self.assertEquals(output['message'], ) # self.assertEquals(output['message'], )
def test_update_progress_to_failure(self): def test_update_progress_to_failure(self):
# view task entry for task in progress that later fails # view task entry for task in progress that later fails
course_task_log = self._create_progress_entry() course_task = self._create_progress_entry()
task_id = course_task_log.task_id task_id = course_task.task_id
mock_result = Mock() mock_result = Mock()
mock_result.task_id = task_id mock_result.task_id = task_id
mock_result.state = "FAILURE" mock_result.state = FAILURE
mock_result.result = NotImplementedError("This task later failed.") mock_result.result = NotImplementedError("This task later failed.")
mock_result.traceback = "random traceback" mock_result.traceback = "random traceback"
with patch('celery.result.AsyncResult.__new__') as mock_result_ctor: with patch('celery.result.AsyncResult.__new__') as mock_result_ctor:
mock_result_ctor.return_value = mock_result mock_result_ctor.return_value = mock_result
response = course_task_log_status(Mock(), task_id=task_id) response = self._get_course_task_status(task_id)
output = json.loads(response.content) output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id) self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], "FAILURE") self.assertEquals(output['task_state'], FAILURE)
self.assertFalse(output['in_progress']) self.assertFalse(output['in_progress'])
self.assertEquals(output['message'], "This task later failed.") self.assertEquals(output['message'], "This task later failed.")
def test_update_progress_to_revoked(self): def test_update_progress_to_revoked(self):
# view task entry for task in progress that later fails # view task entry for task in progress that later fails
course_task_log = self._create_progress_entry() course_task = self._create_progress_entry()
task_id = course_task_log.task_id task_id = course_task.task_id
mock_result = Mock() mock_result = Mock()
mock_result.task_id = task_id mock_result.task_id = task_id
mock_result.state = "REVOKED" mock_result.state = REVOKED
with patch('celery.result.AsyncResult.__new__') as mock_result_ctor: with patch('celery.result.AsyncResult.__new__') as mock_result_ctor:
mock_result_ctor.return_value = mock_result mock_result_ctor.return_value = mock_result
response = course_task_log_status(Mock(), task_id=task_id) response = self._get_course_task_status(task_id)
output = json.loads(response.content) output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id) self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], "REVOKED") self.assertEquals(output['task_state'], REVOKED)
self.assertFalse(output['in_progress']) self.assertFalse(output['in_progress'])
self.assertEquals(output['message'], "Task revoked before running") self.assertEquals(output['message'], "Task revoked before running")
def _get_output_for_task_success(self, attempted, updated, total, student=None): def _get_output_for_task_success(self, attempted, updated, total, student=None):
"""returns the task_id and the result returned by course_task_status()."""
# view task entry for task in progress # view task entry for task in progress
course_task_log = self._create_progress_entry(student) course_task = self._create_progress_entry(student)
task_id = course_task_log.task_id task_id = course_task.task_id
mock_result = Mock() mock_result = Mock()
mock_result.task_id = task_id mock_result.task_id = task_id
mock_result.state = "SUCCESS" mock_result.state = SUCCESS
mock_result.result = {'attempted': attempted, mock_result.result = {'attempted': attempted,
'updated': updated, 'updated': updated,
'total': total, 'total': total,
'action_name': 'rescored'} 'action_name': 'rescored'}
with patch('celery.result.AsyncResult.__new__') as mock_result_ctor: with patch('celery.result.AsyncResult.__new__') as mock_result_ctor:
mock_result_ctor.return_value = mock_result mock_result_ctor.return_value = mock_result
response = course_task_log_status(Mock(), task_id=task_id) response = self._get_course_task_status(task_id)
output = json.loads(response.content) output = json.loads(response.content)
return task_id, output return task_id, output
def test_update_progress_to_success(self): def test_update_progress_to_success(self):
task_id, output = self._get_output_for_task_success(10, 8, 10) task_id, output = self._get_output_for_task_success(10, 8, 10)
self.assertEquals(output['task_id'], task_id) self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], "SUCCESS") self.assertEquals(output['task_state'], SUCCESS)
self.assertFalse(output['in_progress']) self.assertFalse(output['in_progress'])
def test_success_messages(self): def teBROKENst_success_messages(self):
_, output = self._get_output_for_task_success(0, 0, 10) _, output = self._get_output_for_task_success(0, 0, 10)
self.assertTrue("Unable to find any students with submissions to be rescored" in output['message']) self.assertTrue("Unable to find any students with submissions to be rescored" in output['message'])
self.assertFalse(output['succeeded']) self.assertFalse(output['succeeded'])
...@@ -269,9 +268,9 @@ class TaskQueueTestCase(TestCase): ...@@ -269,9 +268,9 @@ class TaskQueueTestCase(TestCase):
def test_submit_when_running(self): def test_submit_when_running(self):
# get exception when trying to submit a task that is already running # get exception when trying to submit a task that is already running
course_task_log = self._create_progress_entry() course_task = self._create_progress_entry()
problem_url = json.loads(course_task_log.task_input).get('problem_url') problem_url = json.loads(course_task.task_input).get('problem_url')
course_id = course_task_log.course_id course_id = course_task.course_id
# requester doesn't have to be the same when determining if a task is already running # requester doesn't have to be the same when determining if a task is already running
request = Mock() request = Mock()
request.user = self.student request.user = self.student
......
...@@ -5,7 +5,9 @@ import logging ...@@ -5,7 +5,9 @@ import logging
import json import json
from mock import Mock, patch from mock import Mock, patch
import textwrap import textwrap
from uuid import uuid4
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User from django.contrib.auth.models import User
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
from django.test.utils import override_settings from django.test.utils import override_settings
...@@ -21,14 +23,15 @@ from student.tests.factories import CourseEnrollmentFactory, UserFactory, AdminF ...@@ -21,14 +23,15 @@ from student.tests.factories import CourseEnrollmentFactory, UserFactory, AdminF
from courseware.model_data import StudentModule from courseware.model_data import StudentModule
from courseware.task_submit import (submit_rescore_problem_for_all_students, from courseware.task_submit import (submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student, submit_rescore_problem_for_student,
course_task_log_status, course_task_status,
submit_reset_problem_attempts_for_all_students, submit_reset_problem_attempts_for_all_students,
submit_delete_problem_state_for_all_students) submit_delete_problem_state_for_all_students)
from courseware.tests.tests import LoginEnrollmentTestCase, TEST_DATA_MONGO_MODULESTORE from courseware.tests.tests import LoginEnrollmentTestCase, TEST_DATA_MONGO_MODULESTORE
from courseware.tests.factories import CourseTaskFactory
log = logging.getLogger("mitx." + __name__) log = logging.getLogger(__name__)
TEST_COURSE_ORG = 'edx' TEST_COURSE_ORG = 'edx'
...@@ -66,13 +69,16 @@ class TestRescoringBase(LoginEnrollmentTestCase, ModuleStoreTestCase): ...@@ -66,13 +69,16 @@ class TestRescoringBase(LoginEnrollmentTestCase, ModuleStoreTestCase):
@staticmethod @staticmethod
def get_user_email(username): def get_user_email(username):
"""Generate email address based on username"""
return '{0}@test.com'.format(username) return '{0}@test.com'.format(username)
def login_username(self, username): def login_username(self, username):
"""Login the user, given the `username`."""
self.login(TestRescoringBase.get_user_email(username), "test") self.login(TestRescoringBase.get_user_email(username), "test")
self.current_user = username self.current_user = username
def _create_user(self, username, is_staff=False): def _create_user(self, username, is_staff=False):
"""Creates a user and enrolls them in the test course."""
email = TestRescoringBase.get_user_email(username) email = TestRescoringBase.get_user_email(username)
if (is_staff): if (is_staff):
AdminFactory.create(username=username, email=email) AdminFactory.create(username=username, email=email)
...@@ -83,9 +89,11 @@ class TestRescoringBase(LoginEnrollmentTestCase, ModuleStoreTestCase): ...@@ -83,9 +89,11 @@ class TestRescoringBase(LoginEnrollmentTestCase, ModuleStoreTestCase):
return thisuser return thisuser
def create_instructor(self, username): def create_instructor(self, username):
"""Creates an instructor for the test course."""
return self._create_user(username, is_staff=True) return self._create_user(username, is_staff=True)
def create_student(self, username): def create_student(self, username):
"""Creates a student for the test course."""
return self._create_user(username, is_staff=False) return self._create_user(username, is_staff=False)
@staticmethod @staticmethod
...@@ -147,6 +155,7 @@ class TestRescoringBase(LoginEnrollmentTestCase, ModuleStoreTestCase): ...@@ -147,6 +155,7 @@ class TestRescoringBase(LoginEnrollmentTestCase, ModuleStoreTestCase):
Assumes the input list of responses has two values. Assumes the input list of responses has two values.
""" """
def get_input_id(response_id): def get_input_id(response_id):
"""Creates input id using information about the test course and the current problem."""
return 'input_i4x-{0}-{1}-problem-{2}_{3}'.format(TEST_COURSE_ORG.lower(), return 'input_i4x-{0}-{1}-problem-{2}_{3}'.format(TEST_COURSE_ORG.lower(),
TEST_COURSE_NUMBER.replace('.', '_'), TEST_COURSE_NUMBER.replace('.', '_'),
problem_url_name, response_id) problem_url_name, response_id)
...@@ -176,25 +185,38 @@ class TestRescoringBase(LoginEnrollmentTestCase, ModuleStoreTestCase): ...@@ -176,25 +185,38 @@ class TestRescoringBase(LoginEnrollmentTestCase, ModuleStoreTestCase):
request.is_secure = Mock(return_value=False) request.is_secure = Mock(return_value=False)
return request return request
def rescore_all_student_answers(self, instructor, problem_url_name): def submit_rescore_all_student_answers(self, instructor, problem_url_name):
"""Submits the current problem for rescoring""" """Submits the particular problem for rescoring"""
return submit_rescore_problem_for_all_students(self.create_task_request(instructor), self.course.id, return submit_rescore_problem_for_all_students(self.create_task_request(instructor), self.course.id,
TestRescoringBase.problem_location(problem_url_name)) TestRescoringBase.problem_location(problem_url_name))
def rescore_one_student_answer(self, instructor, problem_url_name, student): def submit_rescore_one_student_answer(self, instructor, problem_url_name, student):
"""Submits the current problem for rescoring for a particular student""" """Submits the particular problem for rescoring for a particular student"""
return submit_rescore_problem_for_student(self.create_task_request(instructor), self.course.id, return submit_rescore_problem_for_student(self.create_task_request(instructor), self.course.id,
TestRescoringBase.problem_location(problem_url_name), TestRescoringBase.problem_location(problem_url_name),
student) student)
def show_correct_answer(self, problem_url_name): def _create_course_task(self, task_state="QUEUED", task_input=None, student=None):
modx_url = reverse('modx_dispatch', """Creates a CourseTask entry for testing."""
kwargs={'course_id': self.course.id, task_id = str(uuid4())
'location': TestRescoringBase.problem_location(problem_url_name), task_key = "dummy value"
'dispatch': 'problem_show', }) course_task = CourseTaskFactory.create(requester=self.instructor,
return self.client.post(modx_url, {}) task_input=json.dumps(task_input),
task_key=task_key,
task_id=task_id,
task_state=task_state)
return course_task
def rescore_all_student_answers(self, instructor, problem_url_name):
"""Runs the task to rescore the current problem"""
#TODO: fix this...
# task_input = {'problem_url': TestRescoringBase.problem_location(problem_url_name)}
# rescore_problem(entry_id, self.course_id, task_input, xmodule_instance_args)
return submit_rescore_problem_for_all_students(self.create_task_request(instructor), self.course.id,
TestRescoringBase.problem_location(problem_url_name))
def get_student_module(self, username, descriptor): def get_student_module(self, username, descriptor):
"""Get StudentModule object for test course, given the `username` and the problem's `descriptor`."""
return StudentModule.objects.get(course_id=self.course.id, return StudentModule.objects.get(course_id=self.course.id,
student=User.objects.get(username=username), student=User.objects.get(username=username),
module_type=descriptor.location.category, module_type=descriptor.location.category,
...@@ -202,6 +224,13 @@ class TestRescoringBase(LoginEnrollmentTestCase, ModuleStoreTestCase): ...@@ -202,6 +224,13 @@ class TestRescoringBase(LoginEnrollmentTestCase, ModuleStoreTestCase):
) )
def check_state(self, username, descriptor, expected_score, expected_max_score, expected_attempts): def check_state(self, username, descriptor, expected_score, expected_max_score, expected_attempts):
"""
Check that the StudentModule state contains the expected values.
The student module is found for the test course, given the `username` and problem `descriptor`.
Values checked include the number of attempts, the score, and the max score for a problem.
"""
module = self.get_student_module(username, descriptor) module = self.get_student_module(username, descriptor)
self.assertEqual(module.grade, expected_score, "Scores were not equal") self.assertEqual(module.grade, expected_score, "Scores were not equal")
self.assertEqual(module.max_grade, expected_max_score, "Max scores were not equal") self.assertEqual(module.max_grade, expected_max_score, "Max scores were not equal")
...@@ -254,14 +283,14 @@ class TestRescoring(TestRescoringBase): ...@@ -254,14 +283,14 @@ class TestRescoring(TestRescoringBase):
self.check_state('u1', descriptor, 2, 2, 1) self.check_state('u1', descriptor, 2, 2, 1)
# rescore the problem for only one student -- only that student's grade should change: # rescore the problem for only one student -- only that student's grade should change:
self.rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1')) self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
self.check_state('u1', descriptor, 0, 2, 1) self.check_state('u1', descriptor, 0, 2, 1)
self.check_state('u2', descriptor, 1, 2, 1) self.check_state('u2', descriptor, 1, 2, 1)
self.check_state('u3', descriptor, 1, 2, 1) self.check_state('u3', descriptor, 1, 2, 1)
self.check_state('u4', descriptor, 0, 2, 1) self.check_state('u4', descriptor, 0, 2, 1)
# rescore the problem for all students # rescore the problem for all students
self.rescore_all_student_answers('instructor', problem_url_name) self.submit_rescore_all_student_answers('instructor', problem_url_name)
self.check_state('u1', descriptor, 0, 2, 1) self.check_state('u1', descriptor, 0, 2, 1)
self.check_state('u2', descriptor, 1, 2, 1) self.check_state('u2', descriptor, 1, 2, 1)
self.check_state('u3', descriptor, 1, 2, 1) self.check_state('u3', descriptor, 1, 2, 1)
...@@ -276,22 +305,23 @@ class TestRescoring(TestRescoringBase): ...@@ -276,22 +305,23 @@ class TestRescoring(TestRescoringBase):
expected_message = "bad things happened" expected_message = "bad things happened"
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore: with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = ZeroDivisionError(expected_message) mock_rescore.side_effect = ZeroDivisionError(expected_message)
course_task_log = self.rescore_all_student_answers('instructor', problem_url_name) course_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
# check task_log returned # check task_log returned
self.assertEqual(course_task_log.task_state, 'FAILURE') self.assertEqual(course_task.task_state, 'FAILURE')
self.assertEqual(course_task_log.requester.username, 'instructor') self.assertEqual(course_task.requester.username, 'instructor')
self.assertEqual(course_task_log.task_type, 'rescore_problem') self.assertEqual(course_task.task_type, 'rescore_problem')
task_input = json.loads(course_task_log.task_input) task_input = json.loads(course_task.task_input)
self.assertFalse('student' in task_input) self.assertFalse('student' in task_input)
self.assertEqual(task_input['problem_url'], TestRescoring.problem_location(problem_url_name)) self.assertEqual(task_input['problem_url'], TestRescoring.problem_location(problem_url_name))
status = json.loads(course_task_log.task_output) status = json.loads(course_task.task_output)
self.assertEqual(status['exception'], 'ZeroDivisionError') self.assertEqual(status['exception'], 'ZeroDivisionError')
self.assertEqual(status['message'], expected_message) self.assertEqual(status['message'], expected_message)
# check status returned: # check status returned:
mock_request = Mock() mock_request = Mock()
response = course_task_log_status(mock_request, task_id=course_task_log.task_id) mock_request.REQUEST = {'task_id': course_task.task_id}
response = course_task_status(mock_request)
status = json.loads(response.content) status = json.loads(response.content)
self.assertEqual(status['message'], expected_message) self.assertEqual(status['message'], expected_message)
...@@ -299,16 +329,17 @@ class TestRescoring(TestRescoringBase): ...@@ -299,16 +329,17 @@ class TestRescoring(TestRescoringBase):
"""confirm that a non-problem will not submit""" """confirm that a non-problem will not submit"""
problem_url_name = self.problem_section.location.url() problem_url_name = self.problem_section.location.url()
with self.assertRaises(NotImplementedError): with self.assertRaises(NotImplementedError):
self.rescore_all_student_answers('instructor', problem_url_name) self.submit_rescore_all_student_answers('instructor', problem_url_name)
def test_rescoring_nonexistent_problem(self): def test_rescoring_nonexistent_problem(self):
"""confirm that a non-existent problem will not submit""" """confirm that a non-existent problem will not submit"""
problem_url_name = 'NonexistentProblem' problem_url_name = 'NonexistentProblem'
with self.assertRaises(ItemNotFoundError): with self.assertRaises(ItemNotFoundError):
self.rescore_all_student_answers('instructor', problem_url_name) self.submit_rescore_all_student_answers('instructor', problem_url_name)
def define_code_response_problem(self, problem_url_name): def define_code_response_problem(self, problem_url_name):
"""Define an arbitrary code-response problem. """
Define an arbitrary code-response problem.
We'll end up mocking its evaluation later. We'll end up mocking its evaluation later.
""" """
...@@ -332,14 +363,15 @@ class TestRescoring(TestRescoringBase): ...@@ -332,14 +363,15 @@ class TestRescoring(TestRescoringBase):
mock_send_to_queue.return_value = (0, "Successfully queued") mock_send_to_queue.return_value = (0, "Successfully queued")
self.submit_student_answer('u1', problem_url_name, ["answer1", "answer2"]) self.submit_student_answer('u1', problem_url_name, ["answer1", "answer2"])
course_task_log = self.rescore_all_student_answers('instructor', problem_url_name) course_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
self.assertEqual(course_task_log.task_state, 'FAILURE') self.assertEqual(course_task.task_state, FAILURE)
status = json.loads(course_task_log.task_output) status = json.loads(course_task.task_output)
self.assertEqual(status['exception'], 'NotImplementedError') self.assertEqual(status['exception'], 'NotImplementedError')
self.assertEqual(status['message'], "Problem's definition does not support rescoring") self.assertEqual(status['message'], "Problem's definition does not support rescoring")
mock_request = Mock() mock_request = Mock()
response = course_task_log_status(mock_request, task_id=course_task_log.task_id) mock_request.REQUEST = {'task_id': course_task.task_id}
response = course_task_status(mock_request)
status = json.loads(response.content) status = json.loads(response.content)
self.assertEqual(status['message'], "Problem's definition does not support rescoring") self.assertEqual(status['message'], "Problem's definition does not support rescoring")
...@@ -418,14 +450,14 @@ class TestRescoring(TestRescoringBase): ...@@ -418,14 +450,14 @@ class TestRescoring(TestRescoringBase):
# rescore the problem for only one student -- only that student's grade should change # rescore the problem for only one student -- only that student's grade should change
# (and none of the attempts): # (and none of the attempts):
self.rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1')) self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
self.check_state('u1', descriptor, 0, 1, 2) self.check_state('u1', descriptor, 0, 1, 2)
self.check_state('u2', descriptor, 1, 1, 2) self.check_state('u2', descriptor, 1, 1, 2)
self.check_state('u3', descriptor, 1, 1, 2) self.check_state('u3', descriptor, 1, 1, 2)
self.check_state('u4', descriptor, 1, 1, 2) self.check_state('u4', descriptor, 1, 1, 2)
# rescore the problem for all students # rescore the problem for all students
self.rescore_all_student_answers('instructor', problem_url_name) self.submit_rescore_all_student_answers('instructor', problem_url_name)
# all grades should change to being wrong (with no change in attempts) # all grades should change to being wrong (with no change in attempts)
for username in userlist: for username in userlist:
...@@ -444,6 +476,7 @@ class TestResetAttempts(TestRescoringBase): ...@@ -444,6 +476,7 @@ class TestResetAttempts(TestRescoringBase):
self.logout() self.logout()
def get_num_attempts(self, username, descriptor): def get_num_attempts(self, username, descriptor):
"""returns number of attempts stored for `username` on problem `descriptor` for test course"""
module = self.get_student_module(username, descriptor) module = self.get_student_module(username, descriptor)
state = json.loads(module.state) state = json.loads(module.state)
return state['attempts'] return state['attempts']
...@@ -483,30 +516,31 @@ class TestResetAttempts(TestRescoringBase): ...@@ -483,30 +516,31 @@ class TestResetAttempts(TestRescoringBase):
expected_message = "bad things happened" expected_message = "bad things happened"
with patch('courseware.models.StudentModule.save') as mock_save: with patch('courseware.models.StudentModule.save') as mock_save:
mock_save.side_effect = ZeroDivisionError(expected_message) mock_save.side_effect = ZeroDivisionError(expected_message)
course_task_log = self.reset_problem_attempts('instructor', problem_url_name) course_task = self.reset_problem_attempts('instructor', problem_url_name)
# check task_log returned # check task_log returned
self.assertEqual(course_task_log.task_state, 'FAILURE') self.assertEqual(course_task.task_state, FAILURE)
self.assertEqual(course_task_log.requester.username, 'instructor') self.assertEqual(course_task.requester.username, 'instructor')
self.assertEqual(course_task_log.task_type, 'reset_problem_attempts') self.assertEqual(course_task.task_type, 'reset_problem_attempts')
task_input = json.loads(course_task_log.task_input) task_input = json.loads(course_task.task_input)
self.assertFalse('student' in task_input) self.assertFalse('student' in task_input)
self.assertEqual(task_input['problem_url'], TestRescoring.problem_location(problem_url_name)) self.assertEqual(task_input['problem_url'], TestRescoring.problem_location(problem_url_name))
status = json.loads(course_task_log.task_output) status = json.loads(course_task.task_output)
self.assertEqual(status['exception'], 'ZeroDivisionError') self.assertEqual(status['exception'], 'ZeroDivisionError')
self.assertEqual(status['message'], expected_message) self.assertEqual(status['message'], expected_message)
# check status returned: # check status returned:
mock_request = Mock() mock_request = Mock()
response = course_task_log_status(mock_request, task_id=course_task_log.task_id) mock_request.REQUEST = {'task_id': course_task.task_id}
response = course_task_status(mock_request)
status = json.loads(response.content) status = json.loads(response.content)
self.assertEqual(status['message'], expected_message) self.assertEqual(status['message'], expected_message)
def test_reset_non_problem(self): def test_reset_non_problem(self):
"""confirm that a non-problem can still be successfully reset""" """confirm that a non-problem can still be successfully reset"""
problem_url_name = self.problem_section.location.url() problem_url_name = self.problem_section.location.url()
course_task_log = self.reset_problem_attempts('instructor', problem_url_name) course_task = self.reset_problem_attempts('instructor', problem_url_name)
self.assertEqual(course_task_log.task_state, 'SUCCESS') self.assertEqual(course_task.task_state, SUCCESS)
def test_reset_nonexistent_problem(self): def test_reset_nonexistent_problem(self):
"""confirm that a non-existent problem will not submit""" """confirm that a non-existent problem will not submit"""
...@@ -560,30 +594,31 @@ class TestDeleteProblem(TestRescoringBase): ...@@ -560,30 +594,31 @@ class TestDeleteProblem(TestRescoringBase):
expected_message = "bad things happened" expected_message = "bad things happened"
with patch('courseware.models.StudentModule.delete') as mock_delete: with patch('courseware.models.StudentModule.delete') as mock_delete:
mock_delete.side_effect = ZeroDivisionError(expected_message) mock_delete.side_effect = ZeroDivisionError(expected_message)
course_task_log = self.delete_problem_state('instructor', problem_url_name) course_task = self.delete_problem_state('instructor', problem_url_name)
# check task_log returned # check task_log returned
self.assertEqual(course_task_log.task_state, 'FAILURE') self.assertEqual(course_task.task_state, FAILURE)
self.assertEqual(course_task_log.requester.username, 'instructor') self.assertEqual(course_task.requester.username, 'instructor')
self.assertEqual(course_task_log.task_type, 'delete_problem_state') self.assertEqual(course_task.task_type, 'delete_problem_state')
task_input = json.loads(course_task_log.task_input) task_input = json.loads(course_task.task_input)
self.assertFalse('student' in task_input) self.assertFalse('student' in task_input)
self.assertEqual(task_input['problem_url'], TestRescoring.problem_location(problem_url_name)) self.assertEqual(task_input['problem_url'], TestRescoring.problem_location(problem_url_name))
status = json.loads(course_task_log.task_output) status = json.loads(course_task.task_output)
self.assertEqual(status['exception'], 'ZeroDivisionError') self.assertEqual(status['exception'], 'ZeroDivisionError')
self.assertEqual(status['message'], expected_message) self.assertEqual(status['message'], expected_message)
# check status returned: # check status returned:
mock_request = Mock() mock_request = Mock()
response = course_task_log_status(mock_request, task_id=course_task_log.task_id) mock_request.REQUEST = {'task_id': course_task.task_id}
response = course_task_status(mock_request)
status = json.loads(response.content) status = json.loads(response.content)
self.assertEqual(status['message'], expected_message) self.assertEqual(status['message'], expected_message)
def test_delete_non_problem(self): def test_delete_non_problem(self):
"""confirm that a non-problem can still be successfully deleted""" """confirm that a non-problem can still be successfully deleted"""
problem_url_name = self.problem_section.location.url() problem_url_name = self.problem_section.location.url()
course_task_log = self.delete_problem_state('instructor', problem_url_name) course_task = self.delete_problem_state('instructor', problem_url_name)
self.assertEqual(course_task_log.task_state, 'SUCCESS') self.assertEqual(course_task.task_state, SUCCESS)
def test_delete_nonexistent_module(self): def test_delete_nonexistent_module(self):
"""confirm that a non-existent module will not submit""" """confirm that a non-existent module will not submit"""
......
...@@ -29,7 +29,7 @@ from courseware import task_submit ...@@ -29,7 +29,7 @@ from courseware import task_submit
from courseware.access import (has_access, get_access_group_name, from courseware.access import (has_access, get_access_group_name,
course_beta_test_group_name) course_beta_test_group_name)
from courseware.courses import get_course_with_access from courseware.courses import get_course_with_access
from courseware.models import StudentModule, CourseTaskLog from courseware.models import StudentModule
from django_comment_common.models import (Role, from django_comment_common.models import (Role,
FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR, FORUM_ROLE_MODERATOR,
...@@ -69,7 +69,8 @@ def instructor_dashboard(request, course_id): ...@@ -69,7 +69,8 @@ def instructor_dashboard(request, course_id):
msg = '' msg = ''
problems = [] problems = []
plots = [] plots = []
datatable = None
# the instructor dashboard page is modal: grades, psychometrics, admin # the instructor dashboard page is modal: grades, psychometrics, admin
# keep that state in request.session (defaults to grades mode) # keep that state in request.session (defaults to grades mode)
idash_mode = request.POST.get('idash_mode', '') idash_mode = request.POST.get('idash_mode', '')
...@@ -79,26 +80,29 @@ def instructor_dashboard(request, course_id): ...@@ -79,26 +80,29 @@ def instructor_dashboard(request, course_id):
idash_mode = request.session.get('idash_mode', 'Grades') idash_mode = request.session.get('idash_mode', 'Grades')
# assemble some course statistics for output to instructor # assemble some course statistics for output to instructor
datatable = {'header': ['Statistic', 'Value'], def get_course_stats_table():
'title': 'Course Statistics At A Glance', datatable = {'header': ['Statistic', 'Value'],
} 'title': 'Course Statistics At A Glance',
data = [['# Enrolled', CourseEnrollment.objects.filter(course_id=course_id).count()]] }
data += compute_course_stats(course).items() data = [['# Enrolled', CourseEnrollment.objects.filter(course_id=course_id).count()]]
if request.user.is_staff: data += compute_course_stats(course).items()
for field in course.fields: if request.user.is_staff:
if getattr(field.scope, 'user', False): for field in course.fields:
continue
data.append([field.name, json.dumps(field.read_json(course))])
for namespace in course.namespaces:
for field in getattr(course, namespace).fields:
if getattr(field.scope, 'user', False): if getattr(field.scope, 'user', False):
continue continue
data.append(["{}.{}".format(namespace, field.name), json.dumps(field.read_json(course))]) data.append([field.name, json.dumps(field.read_json(course))])
datatable['data'] = data for namespace in course.namespaces:
for field in getattr(course, namespace).fields:
if getattr(field.scope, 'user', False):
continue
data.append(["{}.{}".format(namespace, field.name), json.dumps(field.read_json(course))])
datatable['data'] = data
return datatable
def return_csv(fn, datatable, fp=None): def return_csv(fn, datatable, fp=None):
"""Outputs a CSV file from the contents of a datatable."""
if fp is None: if fp is None:
response = HttpResponse(mimetype='text/csv') response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename={0}'.format(fn) response['Content-Disposition'] = 'attachment; filename={0}'.format(fn)
...@@ -112,12 +116,15 @@ def instructor_dashboard(request, course_id): ...@@ -112,12 +116,15 @@ def instructor_dashboard(request, course_id):
return response return response
def get_staff_group(course): def get_staff_group(course):
"""Get or create the staff access group"""
return get_group(course, 'staff') return get_group(course, 'staff')
def get_instructor_group(course): def get_instructor_group(course):
"""Get or create the instructor access group"""
return get_group(course, 'instructor') return get_group(course, 'instructor')
def get_group(course, groupname): def get_group(course, groupname):
"""Get or create an access group"""
grpname = get_access_group_name(course, groupname) grpname = get_access_group_name(course, groupname)
try: try:
group = Group.objects.get(name=grpname) group = Group.objects.get(name=grpname)
...@@ -157,7 +164,7 @@ def instructor_dashboard(request, course_id): ...@@ -157,7 +164,7 @@ def instructor_dashboard(request, course_id):
return "i4x://" + org + "/" + course_name + "/" + urlname return "i4x://" + org + "/" + course_name + "/" + urlname
def get_student_from_identifier(unique_student_identifier): def get_student_from_identifier(unique_student_identifier):
# try to uniquely id student by email address or username """Gets a student object using either an email address or username"""
msg = "" msg = ""
try: try:
if "@" in unique_student_identifier: if "@" in unique_student_identifier:
...@@ -243,14 +250,13 @@ def instructor_dashboard(request, course_id): ...@@ -243,14 +250,13 @@ def instructor_dashboard(request, course_id):
problem_urlname = request.POST.get('problem_for_all_students', '') problem_urlname = request.POST.get('problem_for_all_students', '')
problem_url = get_module_url(problem_urlname) problem_url = get_module_url(problem_urlname)
try: try:
course_task_log_entry = task_submit.submit_rescore_problem_for_all_students(request, course_id, problem_url) course_task = task_submit.submit_rescore_problem_for_all_students(request, course_id, problem_url)
if course_task_log_entry is None: if course_task is None:
msg += '<font color="red">Failed to create a background task for rescoring "{0}".</font>'.format(problem_url) msg += '<font color="red">Failed to create a background task for rescoring "{0}".</font>'.format(problem_url)
else: else:
track_msg = 'rescore problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id) track_msg = 'rescore problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard') track.views.server_track(request, track_msg, {}, page='idashboard')
except ItemNotFoundError as e: except ItemNotFoundError as e:
log.error('Failure to rescore: unknown problem "{0}"'.format(e))
msg += '<font color="red">Failed to create a background task for rescoring "{0}": problem not found.</font>'.format(problem_url) msg += '<font color="red">Failed to create a background task for rescoring "{0}": problem not found.</font>'.format(problem_url)
except Exception as e: except Exception as e:
log.error("Encountered exception from rescore: {0}".format(e)) log.error("Encountered exception from rescore: {0}".format(e))
...@@ -260,8 +266,8 @@ def instructor_dashboard(request, course_id): ...@@ -260,8 +266,8 @@ def instructor_dashboard(request, course_id):
problem_urlname = request.POST.get('problem_for_all_students', '') problem_urlname = request.POST.get('problem_for_all_students', '')
problem_url = get_module_url(problem_urlname) problem_url = get_module_url(problem_urlname)
try: try:
course_task_log_entry = task_submit.submit_reset_problem_attempts_for_all_students(request, course_id, problem_url) course_task = task_submit.submit_reset_problem_attempts_for_all_students(request, course_id, problem_url)
if course_task_log_entry is None: if course_task is None:
msg += '<font color="red">Failed to create a background task for resetting "{0}".</font>'.format(problem_url) msg += '<font color="red">Failed to create a background task for resetting "{0}".</font>'.format(problem_url)
else: else:
track_msg = 'reset problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id) track_msg = 'reset problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id)
...@@ -286,9 +292,6 @@ def instructor_dashboard(request, course_id): ...@@ -286,9 +292,6 @@ def instructor_dashboard(request, course_id):
msg += message msg += message
if task_datatable is not None: if task_datatable is not None:
datatable = task_datatable datatable = task_datatable
datatable['title'] = "{course_id} > {location} > {student}".format(course_id=course_id,
location=problem_url,
student=student.username)
elif "Show Background Task History" in action: elif "Show Background Task History" in action:
problem_urlname = request.POST.get('problem_for_all_students', '') problem_urlname = request.POST.get('problem_for_all_students', '')
...@@ -297,11 +300,10 @@ def instructor_dashboard(request, course_id): ...@@ -297,11 +300,10 @@ def instructor_dashboard(request, course_id):
msg += message msg += message
if task_datatable is not None: if task_datatable is not None:
datatable = task_datatable datatable = task_datatable
datatable['title'] = "{course_id} > {location}".format(course_id=course_id, location=problem_url)
elif "Reset student's attempts" in action \ elif ("Reset student's attempts" in action or
or "Delete student state for module" in action \ "Delete student state for module" in action or
or "Rescore student's problem submission" in action: "Rescore student's problem submission" in action):
# get the form data # get the form data
unique_student_identifier = request.POST.get('unique_student_identifier', '') unique_student_identifier = request.POST.get('unique_student_identifier', '')
problem_urlname = request.POST.get('problem_for_student', '') problem_urlname = request.POST.get('problem_for_student', '')
...@@ -326,8 +328,8 @@ def instructor_dashboard(request, course_id): ...@@ -326,8 +328,8 @@ def instructor_dashboard(request, course_id):
try: try:
student_module.delete() student_module.delete()
msg += "<font color='red'>Deleted student module state for %s!</font>" % module_state_key msg += "<font color='red'>Deleted student module state for %s!</font>" % module_state_key
track_msg = 'delete student module state for problem {problem} for student {student} in {course}' track_format = 'delete student module state for problem {problem} for student {student} in {course}'
track_msg = track_msg.format(problem=problem_url, student=unique_student_identifier, course=course_id) track_msg = track_format.format(problem=problem_url, student=unique_student_identifier, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard') track.views.server_track(request, track_msg, {}, page='idashboard')
except: except:
msg += "Failed to delete module state for %s/%s" % (unique_student_identifier, problem_urlname) msg += "Failed to delete module state for %s/%s" % (unique_student_identifier, problem_urlname)
...@@ -342,28 +344,27 @@ def instructor_dashboard(request, course_id): ...@@ -342,28 +344,27 @@ def instructor_dashboard(request, course_id):
# save # save
student_module.state = json.dumps(problem_state) student_module.state = json.dumps(problem_state)
student_module.save() student_module.save()
track.views.server_track(request, track_format = '{instructor} reset attempts from {old_attempts} to 0 for {student} on problem {problem} in {course}'
'{instructor} reset attempts from {old_attempts} to 0 for {student} on problem {problem} in {course}'.format( track_msg = track_format.format(old_attempts=old_number_of_attempts,
old_attempts=old_number_of_attempts, student=student,
student=student, problem=student_module.module_state_key,
problem=student_module.module_state_key, instructor=request.user,
instructor=request.user, course=course_id)
course=course_id), track.views.server_track(request, track_msg, {}, page='idashboard')
{},
page='idashboard')
msg += "<font color='green'>Module state successfully reset!</font>" msg += "<font color='green'>Module state successfully reset!</font>"
except: except:
msg += "<font color='red'>Couldn't reset module state. </font>" msg += "<font color='red'>Couldn't reset module state. </font>"
else: else:
# "Rescore student's problem submission" case
try: try:
course_task_log_entry = task_submit.submit_rescore_problem_for_student(request, course_id, module_state_key, student) course_task = task_submit.submit_rescore_problem_for_student(request, course_id, module_state_key, student)
if course_task_log_entry is None: if course_task is None:
msg += '<font color="red">Failed to create a background task for rescoring "{0}" for student {1}.</font>'.format(module_state_key, unique_student_identifier) msg += '<font color="red">Failed to create a background task for rescoring "{0}" for student {1}.</font>'.format(module_state_key, unique_student_identifier)
else: else:
track_msg = 'rescore problem {problem} for student {student} in {course}'.format(problem=module_state_key, student=unique_student_identifier, course=course_id) track_msg = 'rescore problem {problem} for student {student} in {course}'.format(problem=module_state_key, student=unique_student_identifier, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard') track.views.server_track(request, track_msg, {}, page='idashboard')
except Exception as e: except Exception as e:
log.error("Encountered exception from rescore: {0}".format(e)) log.exception("Encountered exception from rescore: {0}")
msg += '<font color="red">Failed to create a background task for rescoring "{0}": {1}.</font>'.format(module_state_key, e.message) msg += '<font color="red">Failed to create a background task for rescoring "{0}": {1}.</font>'.format(module_state_key, e.message)
elif "Get link to student's progress page" in action: elif "Get link to student's progress page" in action:
...@@ -725,6 +726,9 @@ def instructor_dashboard(request, course_id): ...@@ -725,6 +726,9 @@ def instructor_dashboard(request, course_id):
else: else:
course_tasks = None course_tasks = None
course_stats = None
if datatable is None:
course_stats = get_course_stats_table()
#---------------------------------------- #----------------------------------------
# context for rendering # context for rendering
...@@ -734,6 +738,7 @@ def instructor_dashboard(request, course_id): ...@@ -734,6 +738,7 @@ def instructor_dashboard(request, course_id):
'instructor_access': instructor_access, 'instructor_access': instructor_access,
'forum_admin_access': forum_admin_access, 'forum_admin_access': forum_admin_access,
'datatable': datatable, 'datatable': datatable,
'course_stats': course_stats,
'msg': msg, 'msg': msg,
'modeflag': {idash_mode: 'selectedmode'}, 'modeflag': {idash_mode: 'selectedmode'},
'problems': problems, # psychometrics 'problems': problems, # psychometrics
...@@ -1302,48 +1307,48 @@ def get_background_task_table(course_id, problem_url, student=None): ...@@ -1302,48 +1307,48 @@ def get_background_task_table(course_id, problem_url, student=None):
# just won't find any entries.) # just won't find any entries.)
if (history_entries.count()) == 0: if (history_entries.count()) == 0:
if student is not None: if student is not None:
log.debug("Found no background tasks for request: {course}, {problem}, and student {student}".format(course=course_id, problem=problem_url, student=student.username))
template = '<font color="red">Failed to find any background tasks for course "{course}", module "{problem}" and student "{student}".</font>' template = '<font color="red">Failed to find any background tasks for course "{course}", module "{problem}" and student "{student}".</font>'
msg += template.format(course=course_id, problem=problem_url, student=student.username) msg += template.format(course=course_id, problem=problem_url, student=student.username)
else: else:
log.debug("Found no background tasks for request: {course}, {problem}".format(course=course_id, problem=problem_url))
msg += '<font color="red">Failed to find any background tasks for course "{course}" and module "{problem}".</font>'.format(course=course_id, problem=problem_url) msg += '<font color="red">Failed to find any background tasks for course "{course}" and module "{problem}".</font>'.format(course=course_id, problem=problem_url)
else: else:
datatable = {} datatable = {}
datatable['header'] = ["Order", datatable['header'] = ["Task Type",
"Task Type", "Task Id",
"Task Id", "Requester",
"Requester", "Submitted",
"Submitted", "Duration (ms)",
"Duration (ms)", "Task State",
"Task State", "Task Status",
"Task Status", "Task Output"]
"Task Output"]
datatable['data'] = [] datatable['data'] = []
for i, course_task in enumerate(history_entries): for course_task in history_entries:
# get duration info, if known: # get duration info, if known:
duration_ms = 'unknown' duration_ms = 'unknown'
if hasattr(course_task, 'task_outputs'): if hasattr(course_task, 'task_output'):
task_outputs = json.loads(course_task.task_output) task_output = json.loads(course_task.task_output)
if 'duration_ms' in task_outputs: if 'duration_ms' in task_output:
duration_ms = task_outputs['duration_ms'] duration_ms = task_output['duration_ms']
# get progress status message: # get progress status message:
success, message = task_submit.get_task_completion_message(course_task) success, task_message = task_submit.get_task_completion_info(course_task)
if success: status = "Complete" if success else "Incomplete"
status = "Complete"
else:
status = "Incomplete"
# generate row for this task: # generate row for this task:
row = ["#{0}".format(len(history_entries) - i), row = [str(course_task.task_type),
str(course_task.task_type), str(course_task.task_id),
str(course_task.task_id), str(course_task.requester),
str(course_task.requester), course_task.created.isoformat(' '),
course_task.created.strftime("%Y/%m/%d %H:%M:%S"), duration_ms,
duration_ms, str(course_task.task_state),
str(course_task.task_state), status,
status, task_message]
message]
datatable['data'].append(row) datatable['data'].append(row)
if student is not None:
datatable['title'] = "{course_id} > {location} > {student}".format(course_id=course_id,
location=problem_url,
student=student.username)
else:
datatable['title'] = "{course_id} > {location}".format(course_id=course_id, location=problem_url)
return msg, datatable return msg, datatable
...@@ -12,6 +12,13 @@ ...@@ -12,6 +12,13 @@
%if course_tasks is not None: %if course_tasks is not None:
<script type="text/javascript"> <script type="text/javascript">
// Define a CourseTaskProgress object for updating a table on the instructor
// dashboard that shows the current background tasks that are currently running
// for the instructor's course. Any tasks that were running when the page is
// first displayed are passed in as course_tasks, and populate the "Pending Course
// Task" table. The CourseTaskProgress is bound to this table, and periodically
// polls the LMS to see if any of the tasks has completed. Once a task is complete,
// it is not included in any further polling.
(function() { (function() {
...@@ -24,7 +31,7 @@ ...@@ -24,7 +31,7 @@
// then don't set the timeout at all.) // then don't set the timeout at all.)
var refresh_interval = 5000; var refresh_interval = 5000;
// Hardcode the initial delay, for the first refresh, to two seconds: // Hardcode the initial delay before the first refresh to two seconds:
var initial_refresh_delay = 2000; var initial_refresh_delay = 2000;
function CourseTaskProgress(element) { function CourseTaskProgress(element) {
...@@ -328,7 +335,7 @@ function goto( mode) ...@@ -328,7 +335,7 @@ function goto( mode)
<H2>Student-specific grade inspection and adjustment</h2> <H2>Student-specific grade inspection and adjustment</h2>
<p> <p>
Specify the edX email address or username of a student here: Specify the edX email address or username of a student here:
<input type="text" name="unique_student_identifier"> <input type="text" name="unique_student_identifier">
</p> </p>
<p> <p>
Click this, and a link to student's progress page will appear below: Click this, and a link to student's progress page will appear below:
...@@ -336,7 +343,7 @@ function goto( mode) ...@@ -336,7 +343,7 @@ function goto( mode)
</p> </p>
<p> <p>
Specify a particular problem in the course here by its url: Specify a particular problem in the course here by its url:
<input type="text" name="problem_for_student" size="60"> <input type="text" name="problem_for_student" size="60">
</p> </p>
<p> <p>
You may use just the "urlname" if a problem, or "modulename/urlname" if not. You may use just the "urlname" if a problem, or "modulename/urlname" if not.
...@@ -491,10 +498,6 @@ function goto( mode) ...@@ -491,10 +498,6 @@ function goto( mode)
##----------------------------------------------------------------------------- ##-----------------------------------------------------------------------------
%if modeflag.get('Data'): %if modeflag.get('Data'):
<p>
<input type="submit" name="action" value="Test Celery">
<p>
<hr width="40%" style="align:left"> <hr width="40%" style="align:left">
<p> <p>
<input type="submit" name="action" value="Download CSV of all student profile data"> <input type="submit" name="action" value="Download CSV of all student profile data">
...@@ -700,6 +703,30 @@ function goto( mode) ...@@ -700,6 +703,30 @@ function goto( mode)
##----------------------------------------------------------------------------- ##-----------------------------------------------------------------------------
%if datatable and modeflag.get('Psychometrics') is None:
<br/>
<br/>
<p>
<hr width="100%">
<h2>${datatable['title'] | h}</h2>
<table class="stat_table">
<tr>
%for hname in datatable['header']:
<th>${hname}</th>
%endfor
</tr>
%for row in datatable['data']:
<tr>
%for value in row:
<td>${value}</td>
%endfor
</tr>
%endfor
</table>
</p>
%endif
## Output tasks in progress ## Output tasks in progress
%if course_tasks is not None and len(course_tasks) > 0: %if course_tasks is not None and len(course_tasks) > 0:
...@@ -718,17 +745,17 @@ function goto( mode) ...@@ -718,17 +745,17 @@ function goto( mode)
<th>Task Progress</th> <th>Task Progress</th>
</tr> </tr>
%for tasknum, course_task in enumerate(course_tasks): %for tasknum, course_task in enumerate(course_tasks):
<tr id="task-progress-entry-${tasknum}" class="task-progress-entry" <tr id="task-progress-entry-${tasknum}" class="task-progress-entry"
data-task-id="${course_task.task_id}" data-task-id="${course_task.task_id}"
data-in-progress="true"> data-in-progress="true">
<td>${course_task.task_type}</td> <td>${course_task.task_type}</td>
<td>${course_task.task_input}</td> <td>${course_task.task_input}</td>
<td><div class="task-id">${course_task.task_id}</div></td> <td class="task-id">${course_task.task_id}</td>
<td>${course_task.requester}</td> <td>${course_task.requester}</td>
<td>${course_task.created}</td> <td>${course_task.created}</td>
<td><div class="task-state">${course_task.task_state}</div></td> <td class="task-state">${course_task.task_state}</td>
<td><div class="task-duration">unknown</div></td> <td class="task-duration">unknown</td>
<td><div class="task-progress">unknown</div></td> <td class="task-progress">unknown</td>
</tr> </tr>
%endfor %endfor
</table> </table>
...@@ -739,20 +766,20 @@ function goto( mode) ...@@ -739,20 +766,20 @@ function goto( mode)
##----------------------------------------------------------------------------- ##-----------------------------------------------------------------------------
%if datatable and modeflag.get('Psychometrics') is None: %if course_stats and modeflag.get('Psychometrics') is None:
<br/> <br/>
<br/> <br/>
<p> <p>
<hr width="100%"> <hr width="100%">
<h2>${datatable['title'] | h}</h2> <h2>${course_stats['title']}</h2>
<table class="stat_table"> <table class="stat_table">
<tr> <tr>
%for hname in datatable['header']: %for hname in course_stats['header']:
<th>${hname | h}</th> <th>${hname}</th>
%endfor %endfor
</tr> </tr>
%for row in datatable['data']: %for row in course_stats['data']:
<tr> <tr>
%for value in row: %for value in row:
<td>${value | h}</td> <td>${value | h}</td>
......
...@@ -58,7 +58,7 @@ urlpatterns = ('', # nopep8 ...@@ -58,7 +58,7 @@ urlpatterns = ('', # nopep8
name='auth_password_reset_done'), name='auth_password_reset_done'),
url(r'^heartbeat$', include('heartbeat.urls')), url(r'^heartbeat$', include('heartbeat.urls')),
url(r'^course_task_log_status/$', 'courseware.task_submit.course_task_log_status', name='course_task_log_status'), url(r'^course_task_status/$', 'courseware.task_submit.course_task_status', name='course_task_status'),
) )
# University profiles only make sense in the default edX context # University profiles only make sense in the default edX context
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment