Commit 54a3bc62 by brianhw

Merge pull request #40 from edx/feature/brian/regrade-celery-2

Feature/brian/regrade celery 2
parents c5dc404b fd206a41
......@@ -5,6 +5,13 @@ These are notable changes in edx-platform. This is a rolling list of changes,
in roughly chronological order, most recent first. Add your entries at or near
the top. Include a label indicating the component affected.
LMS: Problem rescoring. Added options on the Grades tab of the
Instructor Dashboard to allow all students' submissions for a
particular problem to be rescored. Also supports resetting all
students' number of attempts to zero. Provides a list of background
tasks that are currently running for the course, and an option to
see a history of background tasks for a given problem.
LMS: Forums. Added handling for case where discussion module can get `None` as
value of lms.start in `lms/djangoapps/django_comment_client/utils.py`
......
import json
import logging
import os
import pytz
import datetime
import dateutil.parser
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.http import Http404
from django.shortcuts import redirect
from django.conf import settings
from mitxmako.shortcuts import render_to_response
......@@ -22,6 +20,7 @@ LOGFIELDS = ['username', 'ip', 'event_source', 'event_type', 'event', 'agent', '
def log_event(event):
"""Write tracking event to log file, and optionally to TrackingLog model."""
event_str = json.dumps(event)
log.info(event_str[:settings.TRACK_MAX_EVENT])
if settings.MITX_FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
......@@ -34,6 +33,11 @@ def log_event(event):
def user_track(request):
"""
Log when GET call to "event" URL is made by a user.
GET call should provide "event_type", "event", and "page" arguments.
"""
try: # TODO: Do the same for many of the optional META parameters
username = request.user.username
except:
......@@ -50,7 +54,6 @@ def user_track(request):
except:
agent = ''
# TODO: Move a bunch of this into log_event
event = {
"username": username,
"session": scookie,
......@@ -68,6 +71,7 @@ def user_track(request):
def server_track(request, event_type, event, page=None):
"""Log events related to server requests."""
try:
username = request.user.username
except:
......@@ -95,9 +99,52 @@ def server_track(request, event_type, event, page=None):
log_event(event)
def task_track(request_info, task_info, event_type, event, page=None):
"""
Logs tracking information for events occuring within celery tasks.
The `event_type` is a string naming the particular event being logged,
while `event` is a dict containing whatever additional contextual information
is desired.
The `request_info` is a dict containing information about the original
task request. Relevant keys are `username`, `ip`, `agent`, and `host`.
While the dict is required, the values in it are not, so that {} can be
passed in.
In addition, a `task_info` dict provides more information about the current
task, to be stored with the `event` dict. This may also be an empty dict.
The `page` parameter is optional, and allows the name of the page to
be provided.
"""
# supplement event information with additional information
# about the task in which it is running.
full_event = dict(event, **task_info)
# All fields must be specified, in case the tracking information is
# also saved to the TrackingLog model. Get values from the task-level
# information, or just add placeholder values.
event = {
"username": request_info.get('username', 'unknown'),
"ip": request_info.get('ip', 'unknown'),
"event_source": "task",
"event_type": event_type,
"event": full_event,
"agent": request_info.get('agent', 'unknown'),
"page": page,
"time": datetime.datetime.utcnow().isoformat(),
"host": request_info.get('host', 'unknown')
}
log_event(event)
@login_required
@ensure_csrf_cookie
def view_tracking_log(request, args=''):
"""View to output contents of TrackingLog model. For staff use only."""
if not request.user.is_staff:
return redirect('/')
nlen = 100
......
......@@ -424,7 +424,7 @@ class CapaModule(CapaFields, XModule):
# If we cannot construct the problem HTML,
# then generate an error message instead.
except Exception, err:
except Exception as err:
html = self.handle_problem_html_error(err)
# The convention is to pass the name of the check button
......@@ -655,7 +655,7 @@ class CapaModule(CapaFields, XModule):
@staticmethod
def make_dict_of_responses(get):
'''Make dictionary of student responses (aka "answers")
get is POST dictionary (Djano QueryDict).
get is POST dictionary (Django QueryDict).
The *get* dict has keys of the form 'x_y', which are mapped
to key 'y' in the returned dict. For example,
......@@ -739,13 +739,13 @@ class CapaModule(CapaFields, XModule):
# Too late. Cannot submit
if self.closed():
event_info['failure'] = 'closed'
self.system.track_function('save_problem_check_fail', event_info)
self.system.track_function('problem_check_fail', event_info)
raise NotFoundError('Problem is closed')
# Problem submitted. Student should reset before checking again
if self.done and self.rerandomize == "always":
event_info['failure'] = 'unreset'
self.system.track_function('save_problem_check_fail', event_info)
self.system.track_function('problem_check_fail', event_info)
raise NotFoundError('Problem must be reset before it can be checked again')
# Problem queued. Students must wait a specified waittime before they are allowed to submit
......@@ -759,6 +759,8 @@ class CapaModule(CapaFields, XModule):
try:
correct_map = self.lcp.grade_answers(answers)
self.attempts = self.attempts + 1
self.lcp.done = True
self.set_state_from_lcp()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
......@@ -778,17 +780,13 @@ class CapaModule(CapaFields, XModule):
return {'success': msg}
except Exception, err:
except Exception as err:
if self.system.DEBUG:
msg = "Error checking problem: " + str(err)
msg += '\nTraceback:\n' + traceback.format_exc()
return {'success': msg}
raise
self.attempts = self.attempts + 1
self.lcp.done = True
self.set_state_from_lcp()
self.publish_grade()
# success = correct if ALL questions in this problem are correct
......@@ -802,7 +800,7 @@ class CapaModule(CapaFields, XModule):
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
self.system.track_function('save_problem_check', event_info)
self.system.track_function('problem_check', event_info)
if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback
self.system.psychometrics_handler(self.get_state_for_lcp())
......@@ -814,12 +812,92 @@ class CapaModule(CapaFields, XModule):
'contents': html,
}
def rescore_problem(self):
"""
Checks whether the existing answers to a problem are correct.
This is called when the correct answer to a problem has been changed,
and the grade should be re-evaluated.
Returns a dict with one key:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
Raises NotFoundError if called on a problem that has not yet been
answered, or NotImplementedError if it's a problem that cannot be rescored.
Returns the error messages for exceptions occurring while performing
the rescoring, rather than throwing them.
"""
event_info = {'state': self.lcp.get_state(), 'problem_id': self.location.url()}
if not self.lcp.supports_rescoring():
event_info['failure'] = 'unsupported'
self.system.track_function('problem_rescore_fail', event_info)
raise NotImplementedError("Problem's definition does not support rescoring")
if not self.done:
event_info['failure'] = 'unanswered'
self.system.track_function('problem_rescore_fail', event_info)
raise NotFoundError('Problem must be answered before it can be graded again')
# get old score, for comparison:
orig_score = self.lcp.get_score()
event_info['orig_score'] = orig_score['score']
event_info['orig_total'] = orig_score['total']
try:
correct_map = self.lcp.rescore_existing_answers()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("Input error in capa_module:problem_rescore", exc_info=True)
event_info['failure'] = 'input_error'
self.system.track_function('problem_rescore_fail', event_info)
return {'success': u"Error: {0}".format(inst.message)}
except Exception as err:
event_info['failure'] = 'unexpected'
self.system.track_function('problem_rescore_fail', event_info)
if self.system.DEBUG:
msg = u"Error checking problem: {0}".format(err.message)
msg += u'\nTraceback:\n' + traceback.format_exc()
return {'success': msg}
raise
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
self.publish_grade()
new_score = self.lcp.get_score()
event_info['new_score'] = new_score['score']
event_info['new_total'] = new_score['total']
# success = correct if ALL questions in this problem are correct
success = 'correct'
for answer_id in correct_map:
if not correct_map.is_correct(answer_id):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
self.system.track_function('problem_rescore', event_info)
# psychometrics should be called on rescoring requests in the same way as check-problem
if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback
self.system.psychometrics_handler(self.get_state_for_lcp())
return {'success': success}
def save_problem(self, get):
'''
"""
Save the passed in answers.
Returns a dict { 'success' : bool, ['error' : error-msg]},
with the error key only present if success is False.
'''
Returns a dict { 'success' : bool, 'msg' : message }
The message is informative on success, and an error message on failure.
"""
event_info = dict()
event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.url()
......
......@@ -19,6 +19,7 @@ from django.http import QueryDict
from . import test_system
from pytz import UTC
from capa.correctmap import CorrectMap
class CapaFactory(object):
......@@ -597,6 +598,85 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the problem was NOT reset
self.assertTrue('success' in result and not result['success'])
def test_rescore_problem_correct(self):
module = CapaFactory.create(attempts=1, done=True)
# Simulate that all answers are marked correct, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'correct')
result = module.rescore_problem()
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
# Expect that we get no HTML
self.assertFalse('contents' in result)
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 1)
def test_rescore_problem_incorrect(self):
# make sure it also works when attempts have been reset,
# so add this to the test:
module = CapaFactory.create(attempts=0, done=True)
# Simulate that all answers are marked incorrect, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect')
result = module.rescore_problem()
# Expect that the problem is marked incorrect
self.assertEqual(result['success'], 'incorrect')
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 0)
def test_rescore_problem_not_done(self):
# Simulate that the problem is NOT done
module = CapaFactory.create(done=False)
# Try to rescore the problem, and get exception
with self.assertRaises(xmodule.exceptions.NotFoundError):
module.rescore_problem()
def test_rescore_problem_not_supported(self):
module = CapaFactory.create(done=True)
# Try to rescore the problem, and get exception
with patch('capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring:
mock_supports_rescoring.return_value = False
with self.assertRaises(NotImplementedError):
module.rescore_problem()
def _rescore_problem_error_helper(self, exception_class):
"""Helper to allow testing all errors that rescoring might return."""
# Create the module
module = CapaFactory.create(attempts=1, done=True)
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = exception_class(u'test error \u03a9')
result = module.rescore_problem()
# Expect an AJAX alert message in 'success'
expected_msg = u'Error: test error \u03a9'
self.assertEqual(result['success'], expected_msg)
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_rescore_problem_student_input_error(self):
self._rescore_problem_error_helper(StudentInputError)
def test_rescore_problem_problem_error(self):
self._rescore_problem_error_helper(LoncapaProblemError)
def test_rescore_problem_response_error(self):
self._rescore_problem_error_helper(ResponseError)
def test_save_problem(self):
module = CapaFactory.create(done=False)
......
......@@ -20,7 +20,7 @@ from . import test_system
class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda dir: MemoryFS())
@patch('xmodule.modulestore.xml.OSFS', lambda directory: MemoryFS())
def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", course_dirs=[], load_error_modules=load_error_modules)
......@@ -41,7 +41,8 @@ class DummySystem(ImportSystem):
)
def render_template(self, template, context):
raise Exception("Shouldn't be called")
raise Exception("Shouldn't be called")
class ConditionalFactory(object):
"""
......@@ -93,7 +94,7 @@ class ConditionalFactory(object):
# return dict:
return {'cond_module': cond_module,
'source_module': source_module,
'child_module': child_module }
'child_module': child_module}
class ConditionalModuleBasicTest(unittest.TestCase):
......@@ -109,12 +110,11 @@ class ConditionalModuleBasicTest(unittest.TestCase):
'''verify that get_icon_class works independent of condition satisfaction'''
modules = ConditionalFactory.create(self.test_system)
for attempted in ["false", "true"]:
for icon_class in [ 'other', 'problem', 'video']:
for icon_class in ['other', 'problem', 'video']:
modules['source_module'].is_attempted = attempted
modules['child_module'].get_icon_class = lambda: icon_class
self.assertEqual(modules['cond_module'].get_icon_class(), icon_class)
def test_get_html(self):
modules = ConditionalFactory.create(self.test_system)
# because test_system returns the repr of the context dict passed to render_template,
......@@ -224,4 +224,3 @@ class ConditionalModuleXmlTest(unittest.TestCase):
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))
......@@ -4,9 +4,9 @@ WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the mitx dir
1. Go to the edx-platform dir
2. ./manage.py schemamigration courseware --auto description_of_your_change
3. Add the migration file created in mitx/courseware/migrations/
3. Add the migration file created in edx-platform/lms/djangoapps/courseware/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
......@@ -17,6 +17,7 @@ from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
class StudentModule(models.Model):
"""
Keeps student state for a particular module in a particular course.
......
......@@ -121,7 +121,7 @@ def toc_for_course(user, request, course, active_chapter, active_section, model_
def get_module(user, request, location, model_data_cache, course_id,
position=None, not_found_ok = False, wrap_xmodule_display=True,
position=None, not_found_ok=False, wrap_xmodule_display=True,
grade_bucket_type=None, depth=0):
"""
Get an instance of the xmodule class identified by location,
......@@ -161,16 +161,49 @@ def get_module(user, request, location, model_data_cache, course_id,
return None
def get_module_for_descriptor(user, request, descriptor, model_data_cache, course_id,
position=None, wrap_xmodule_display=True, grade_bucket_type=None):
def get_xqueue_callback_url_prefix(request):
"""
Actually implement get_module. See docstring there for details.
Calculates default prefix based on request, but allows override via settings
This is separated from get_module_for_descriptor so that it can be called
by the LMS before submitting background tasks to run. The xqueue callbacks
should go back to the LMS, not to the worker.
"""
prefix = '{proto}://{host}'.format(
proto=request.META.get('HTTP_X_FORWARDED_PROTO', 'https' if request.is_secure() else 'http'),
host=request.get_host()
)
return settings.XQUEUE_INTERFACE.get('callback_url', prefix)
def get_module_for_descriptor(user, request, descriptor, model_data_cache, course_id,
position=None, wrap_xmodule_display=True, grade_bucket_type=None):
"""
Implements get_module, extracting out the request-specific functionality.
See get_module() docstring for further details.
"""
# allow course staff to masquerade as student
if has_access(user, descriptor, 'staff', course_id):
setup_masquerade(request, True)
track_function = make_track_function(request)
xqueue_callback_url_prefix = get_xqueue_callback_url_prefix(request)
return get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
track_function, xqueue_callback_url_prefix,
position, wrap_xmodule_display, grade_bucket_type)
def get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
track_function, xqueue_callback_url_prefix,
position=None, wrap_xmodule_display=True, grade_bucket_type=None):
"""
Actually implement get_module, without requiring a request.
See get_module() docstring for further details.
"""
# Short circuit--if the user shouldn't have access, bail without doing any work
if not has_access(user, descriptor, 'load', course_id):
return None
......@@ -186,19 +219,13 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
def make_xqueue_callback(dispatch='score_update'):
# Fully qualified callback URL for external queueing system
xqueue_callback_url = '{proto}://{host}'.format(
host=request.get_host(),
proto=request.META.get('HTTP_X_FORWARDED_PROTO', 'https' if request.is_secure() else 'http')
)
xqueue_callback_url = settings.XQUEUE_INTERFACE.get('callback_url',xqueue_callback_url) # allow override
xqueue_callback_url += reverse('xqueue_callback',
kwargs=dict(course_id=course_id,
userid=str(user.id),
id=descriptor.location.url(),
dispatch=dispatch),
)
return xqueue_callback_url
relative_xqueue_callback_url = reverse('xqueue_callback',
kwargs=dict(course_id=course_id,
userid=str(user.id),
id=descriptor.location.url(),
dispatch=dispatch),
)
return xqueue_callback_url_prefix + relative_xqueue_callback_url
# Default queuename is course-specific and is derived from the course that
# contains the current module.
......@@ -211,20 +238,20 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS
}
#This is a hacky way to pass settings to the combined open ended xmodule
#It needs an S3 interface to upload images to S3
#It needs the open ended grading interface in order to get peer grading to be done
#this first checks to see if the descriptor is the correct one, and only sends settings if it is
# This is a hacky way to pass settings to the combined open ended xmodule
# It needs an S3 interface to upload images to S3
# It needs the open ended grading interface in order to get peer grading to be done
# this first checks to see if the descriptor is the correct one, and only sends settings if it is
#Get descriptor metadata fields indicating needs for various settings
# Get descriptor metadata fields indicating needs for various settings
needs_open_ended_interface = getattr(descriptor, "needs_open_ended_interface", False)
needs_s3_interface = getattr(descriptor, "needs_s3_interface", False)
#Initialize interfaces to None
# Initialize interfaces to None
open_ended_grading_interface = None
s3_interface = None
#Create interfaces if needed
# Create interfaces if needed
if needs_open_ended_interface:
open_ended_grading_interface = settings.OPEN_ENDED_GRADING_INTERFACE
open_ended_grading_interface['mock_peer_grading'] = settings.MOCK_PEER_GRADING
......@@ -238,10 +265,15 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
def inner_get_module(descriptor):
"""
Delegate to get_module. It does an access check, so may return None
Delegate to get_module_for_descriptor_internal() with all values except `descriptor` set.
Because it does an access check, it may return None.
"""
return get_module_for_descriptor(user, request, descriptor,
model_data_cache, course_id, position)
# TODO: fix this so that make_xqueue_callback uses the descriptor passed into
# inner_get_module, not the parent's callback. Add it as an argument....
return get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
track_function, make_xqueue_callback,
position, wrap_xmodule_display, grade_bucket_type)
def xblock_model_data(descriptor):
return DbModel(
......@@ -291,7 +323,7 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
# TODO (cpennington): When modules are shared between courses, the static
# prefix is going to have to be specific to the module, not the directory
# that the xml was loaded from
system = ModuleSystem(track_function=make_track_function(request),
system = ModuleSystem(track_function=track_function,
render_template=render_to_string,
ajax_url=ajax_url,
xqueue=xqueue,
......
......@@ -11,21 +11,22 @@ from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class ProgressTestCase(TestCase):
def setUp(self):
def setUp(self):
self.mockuser1 = MagicMock()
self.mockuser0 = MagicMock()
self.course = MagicMock()
self.mockuser1.is_authenticated.return_value = True
self.mockuser0.is_authenticated.return_value = False
self.course.id = 'edX/full/6.002_Spring_2012'
self.tab = {'name': 'same'}
self.active_page1 = 'progress'
self.active_page0 = 'stagnation'
self.mockuser1 = MagicMock()
self.mockuser0 = MagicMock()
self.course = MagicMock()
self.mockuser1.is_authenticated.return_value = True
self.mockuser0.is_authenticated.return_value = False
self.course.id = 'edX/full/6.002_Spring_2012'
self.tab = {'name': 'same'}
self.active_page1 = 'progress'
self.active_page0 = 'stagnation'
def test_progress(self):
def test_progress(self):
self.assertEqual(tabs._progress(self.tab, self.mockuser0, self.course,
self.active_page0), [])
......@@ -34,8 +35,8 @@ class ProgressTestCase(TestCase):
self.active_page1)[0].name, 'same')
self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course,
self.active_page1)[0].link,
reverse('progress', args = [self.course.id]))
self.active_page1)[0].link,
reverse('progress', args=[self.course.id]))
self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course,
self.active_page0)[0].is_active, False)
......@@ -63,15 +64,15 @@ class WikiTestCase(TestCase):
'same')
self.assertEqual(tabs._wiki(self.tab, self.user,
self.course, self.active_page1)[0].link,
self.course, self.active_page1)[0].link,
reverse('course_wiki', args=[self.course.id]))
self.assertEqual(tabs._wiki(self.tab, self.user,
self.course, self.active_page1)[0].is_active,
self.course, self.active_page1)[0].is_active,
True)
self.assertEqual(tabs._wiki(self.tab, self.user,
self.course, self.active_page0)[0].is_active,
self.course, self.active_page0)[0].is_active,
False)
@override_settings(WIKI_ENABLED=False)
......@@ -129,14 +130,13 @@ class StaticTabTestCase(TestCase):
self.assertEqual(tabs._static_tab(self.tabby, self.user,
self.course, self.active_page1)[0].link,
reverse('static_tab', args = [self.course.id,
self.tabby['url_slug']]))
reverse('static_tab', args=[self.course.id,
self.tabby['url_slug']]))
self.assertEqual(tabs._static_tab(self.tabby, self.user,
self.course, self.active_page1)[0].is_active,
True)
self.assertEqual(tabs._static_tab(self.tabby, self.user,
self.course, self.active_page0)[0].is_active,
False)
......@@ -183,7 +183,7 @@ class TextbooksTestCase(TestCase):
self.assertEqual(tabs._textbooks(self.tab, self.mockuser1,
self.course, self.active_page1)[1].name,
'Topology')
'Topology')
self.assertEqual(tabs._textbooks(self.tab, self.mockuser1,
self.course, self.active_page1)[1].link,
......@@ -206,6 +206,7 @@ class TextbooksTestCase(TestCase):
self.assertEqual(tabs._textbooks(self.tab, self.mockuser0,
self.course, self.active_pageX), [])
class KeyCheckerTestCase(TestCase):
def setUp(self):
......@@ -223,39 +224,36 @@ class KeyCheckerTestCase(TestCase):
class NullValidatorTestCase(TestCase):
def setUp(self):
self.d = {}
def setUp(self):
def test_null_validator(self):
self.dummy = {}
self.assertIsNone(tabs.null_validator(self.d))
def test_null_validator(self):
self.assertIsNone(tabs.null_validator(self.dummy))
class ValidateTabsTestCase(TestCase):
def setUp(self):
self.courses = [MagicMock() for i in range(0,5)]
self.courses = [MagicMock() for i in range(0, 5)]
self.courses[0].tabs = None
self.courses[1].tabs = [{'type':'courseware'}, {'type': 'fax'}]
self.courses[2].tabs = [{'type':'shadow'}, {'type': 'course_info'}]
self.courses[1].tabs = [{'type': 'courseware'}, {'type': 'fax'}]
self.courses[3].tabs = [{'type':'courseware'},{'type':'course_info', 'name': 'alice'},
{'type': 'wiki', 'name':'alice'}, {'type':'discussion', 'name': 'alice'},
{'type':'external_link', 'name': 'alice', 'link':'blink'},
{'type':'textbooks'}, {'type':'progress', 'name': 'alice'},
{'type':'static_tab', 'name':'alice', 'url_slug':'schlug'},
{'type': 'staff_grading'}]
self.courses[2].tabs = [{'type': 'shadow'}, {'type': 'course_info'}]
self.courses[4].tabs = [{'type':'courseware'},{'type': 'course_info'}, {'type': 'flying'}]
self.courses[3].tabs = [{'type': 'courseware'}, {'type': 'course_info', 'name': 'alice'},
{'type': 'wiki', 'name': 'alice'}, {'type': 'discussion', 'name': 'alice'},
{'type': 'external_link', 'name': 'alice', 'link': 'blink'},
{'type': 'textbooks'}, {'type': 'progress', 'name': 'alice'},
{'type': 'static_tab', 'name': 'alice', 'url_slug': 'schlug'},
{'type': 'staff_grading'}]
self.courses[4].tabs = [{'type': 'courseware'}, {'type': 'course_info'}, {'type': 'flying'}]
def test_validate_tabs(self):
self.assertIsNone(tabs.validate_tabs(self.courses[0]))
self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[1])
self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[2])
......@@ -268,15 +266,15 @@ class DiscussionLinkTestCase(ModuleStoreTestCase):
def setUp(self):
self.tabs_with_discussion = [
{'type':'courseware'},
{'type':'course_info'},
{'type':'discussion'},
{'type':'textbooks'},
{'type': 'courseware'},
{'type': 'course_info'},
{'type': 'discussion'},
{'type': 'textbooks'},
]
self.tabs_without_discussion = [
{'type':'courseware'},
{'type':'course_info'},
{'type':'textbooks'},
{'type': 'courseware'},
{'type': 'course_info'},
{'type': 'textbooks'},
]
@staticmethod
......
"""
API for submitting background tasks by an instructor for a course.
Also includes methods for getting information about tasks that have
already been submitted, filtered either by running state or input
arguments.
"""
from celery.states import READY_STATES
from xmodule.modulestore.django import modulestore
from instructor_task.models import InstructorTask
from instructor_task.tasks import (rescore_problem,
reset_problem_attempts,
delete_problem_state)
from instructor_task.api_helper import (check_arguments_for_rescoring,
encode_problem_and_student_input,
submit_task)
def get_running_instructor_tasks(course_id):
"""
Returns a query of InstructorTask objects of running tasks for a given course.
Used to generate a list of tasks to display on the instructor dashboard.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
for state in READY_STATES:
instructor_tasks = instructor_tasks.exclude(task_state=state)
return instructor_tasks.order_by('-id')
def get_instructor_task_history(course_id, problem_url, student=None):
"""
Returns a query of InstructorTask objects of historical tasks for a given course,
that match a particular problem and optionally a student.
"""
_, task_key = encode_problem_and_student_input(problem_url, student)
instructor_tasks = InstructorTask.objects.filter(course_id=course_id, task_key=task_key)
return instructor_tasks.order_by('-id')
def submit_rescore_problem_for_student(request, course_id, problem_url, student):
"""
Request a problem to be rescored as a background task.
The problem will be rescored for the specified student only. Parameters are the `course_id`,
the `problem_url`, and the `student` as a User object.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored for this student, or NotImplementedError if
the problem doesn't support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(course_id, problem_url)
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(problem_url, student)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_rescore_problem_for_all_students(request, course_id, problem_url):
"""
Request a problem to be rescored as a background task.
The problem will be rescored for all students who have accessed the
particular problem in a course and have provided and checked an answer.
Parameters are the `course_id` and the `problem_url`.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored, or NotImplementedError if the problem doesn't
support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(course_id, problem_url)
# check to see if task is already running, and reserve it otherwise
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_reset_problem_attempts_for_all_students(request, course_id, problem_url):
"""
Request to have attempts reset for a problem as a background task.
The problem's attempts will be reset for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `problem_url`. The url must specify the location of the problem,
using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being reset.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the problem_url is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_instance(course_id, problem_url)
task_type = 'reset_problem_attempts'
task_class = reset_problem_attempts
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_delete_problem_state_for_all_students(request, course_id, problem_url):
"""
Request to have state deleted for a problem as a background task.
The problem's state will be deleted for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `problem_url`. The url must specify the location of the problem,
using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the particular problem's state is already being deleted.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the problem_url is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_instance(course_id, problem_url)
task_type = 'delete_problem_state'
task_class = delete_problem_state
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'InstructorTask'
db.create_table('instructor_task_instructortask', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task_type', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_key', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_input', self.gf('django.db.models.fields.CharField')(max_length=255)),
('task_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_state', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, db_index=True)),
('task_output', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)),
('requester', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('instructor_task', ['InstructorTask'])
def backwards(self, orm):
# Deleting model 'InstructorTask'
db.delete_table('instructor_task_instructortask')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'instructor_task.instructortask': {
'Meta': {'object_name': 'InstructorTask'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_input': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_output': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'task_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'task_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['instructor_task']
\ No newline at end of file
"""
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py schemamigration instructor_task --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/instructor_task/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
"""
from uuid import uuid4
import json
from django.contrib.auth.models import User
from django.db import models, transaction
# define custom states used by InstructorTask
QUEUING = 'QUEUING'
PROGRESS = 'PROGRESS'
class InstructorTask(models.Model):
"""
Stores information about background tasks that have been submitted to
perform work by an instructor (or course staff).
Examples include grading and rescoring.
`task_type` identifies the kind of task being performed, e.g. rescoring.
`course_id` uses the course run's unique id to identify the course.
`task_key` stores relevant input arguments encoded into key value for testing to see
if the task is already running (together with task_type and course_id).
`task_input` stores input arguments as JSON-serialized dict, for reporting purposes.
Examples include url of problem being rescored, id of student if only one student being rescored.
`task_id` stores the id used by celery for the background task.
`task_state` stores the last known state of the celery task
`task_output` stores the output of the celery task.
Format is a JSON-serialized dict. Content varies by task_type and task_state.
`requester` stores id of user who submitted the task
`created` stores date that entry was first created
`updated` stores date that entry was last modified
"""
task_type = models.CharField(max_length=50, db_index=True)
course_id = models.CharField(max_length=255, db_index=True)
task_key = models.CharField(max_length=255, db_index=True)
task_input = models.CharField(max_length=255)
task_id = models.CharField(max_length=255, db_index=True) # max_length from celery_taskmeta
task_state = models.CharField(max_length=50, null=True, db_index=True) # max_length from celery_taskmeta
task_output = models.CharField(max_length=1024, null=True)
requester = models.ForeignKey(User, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True)
def __repr__(self):
return 'InstructorTask<%r>' % ({
'task_type': self.task_type,
'course_id': self.course_id,
'task_input': self.task_input,
'task_id': self.task_id,
'task_state': self.task_state,
'task_output': self.task_output,
},)
def __unicode__(self):
return unicode(repr(self))
@classmethod
def create(cls, course_id, task_type, task_key, task_input, requester):
# create the task_id here, and pass it into celery:
task_id = str(uuid4())
json_task_input = json.dumps(task_input)
# check length of task_input, and return an exception if it's too long:
if len(json_task_input) > 255:
fmt = 'Task input longer than 255: "{input}" for "{task}" of "{course}"'
msg = fmt.format(input=json_task_input, task=task_type, course=course_id)
raise ValueError(msg)
# create the task, then save it:
instructor_task = cls(course_id=course_id,
task_type=task_type,
task_id=task_id,
task_key=task_key,
task_input=json_task_input,
task_state=QUEUING,
requester=requester)
instructor_task.save_now()
return instructor_task
@transaction.autocommit
def save_now(self):
"""Writes InstructorTask immediately, ensuring the transaction is committed."""
self.save()
@staticmethod
def create_output_for_success(returned_result):
"""
Converts successful result to output format.
Raises a ValueError exception if the output is too long.
"""
# In future, there should be a check here that the resulting JSON
# will fit in the column. In the meantime, just return an exception.
json_output = json.dumps(returned_result)
if len(json_output) > 1023:
raise ValueError("Length of task output is too long: {0}".format(json_output))
return json_output
@staticmethod
def create_output_for_failure(exception, traceback_string):
"""
Converts failed result information to output format.
Traceback information is truncated or not included if it would result in an output string
that would not fit in the database. If the output is still too long, then the
exception message is also truncated.
Truncation is indicated by adding "..." to the end of the value.
"""
tag = '...'
task_progress = {'exception': type(exception).__name__, 'message': str(exception.message)}
if traceback_string is not None:
# truncate any traceback that goes into the InstructorTask model:
task_progress['traceback'] = traceback_string
json_output = json.dumps(task_progress)
# if the resulting output is too long, then first shorten the
# traceback, and then the message, until it fits.
too_long = len(json_output) - 1023
if too_long > 0:
if traceback_string is not None:
if too_long >= len(traceback_string) - len(tag):
# remove the traceback entry entirely (so no key or value)
del task_progress['traceback']
too_long -= (len(traceback_string) + len('traceback'))
else:
# truncate the traceback:
task_progress['traceback'] = traceback_string[:-(too_long + len(tag))] + tag
too_long = 0
if too_long > 0:
# we need to shorten the message:
task_progress['message'] = task_progress['message'][:-(too_long + len(tag))] + tag
json_output = json.dumps(task_progress)
return json_output
@staticmethod
def create_output_for_revoked():
"""Creates standard message to store in output format for revoked tasks."""
return json.dumps({'message': 'Task revoked before running'})
"""
This file contains tasks that are designed to perform background operations on the
running state of a course.
At present, these tasks all operate on StudentModule objects in one way or another,
so they share a visitor architecture. Each task defines an "update function" that
takes a module_descriptor, a particular StudentModule object, and xmodule_instance_args.
A task may optionally specify a "filter function" that takes a query for StudentModule
objects, and adds additional filter clauses.
A task also passes through "xmodule_instance_args", that are used to provide
information to our code that instantiates xmodule instances.
The task definition then calls the traversal function, passing in the three arguments
above, along with the id value for an InstructorTask object. The InstructorTask
object contains a 'task_input' row which is a JSON-encoded dict containing
a problem URL and optionally a student. These are used to set up the initial value
of the query for traversing StudentModule objects.
"""
from celery import task
from instructor_task.tasks_helper import (update_problem_module_state,
rescore_problem_module_state,
reset_attempts_module_state,
delete_problem_module_state)
@task
def rescore_problem(entry_id, xmodule_instance_args):
"""Rescores a problem in a course, for all students or one specific student.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
'student': the identifier (username or email) of a particular user whose
problem submission should be rescored. If not specified, all problem
submissions for the problem will be rescored.
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
action_name = 'rescored'
update_fcn = rescore_problem_module_state
filter_fcn = lambda(modules_to_update): modules_to_update.filter(state__contains='"done": true')
return update_problem_module_state(entry_id,
update_fcn, action_name, filter_fcn=filter_fcn,
xmodule_instance_args=xmodule_instance_args)
@task
def reset_problem_attempts(entry_id, xmodule_instance_args):
"""Resets problem attempts to zero for a particular problem for all students in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
action_name = 'reset'
update_fcn = reset_attempts_module_state
return update_problem_module_state(entry_id,
update_fcn, action_name, filter_fcn=None,
xmodule_instance_args=xmodule_instance_args)
@task
def delete_problem_state(entry_id, xmodule_instance_args):
"""Deletes problem state entirely for all students on a particular problem in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
action_name = 'deleted'
update_fcn = delete_problem_module_state
return update_problem_module_state(entry_id,
update_fcn, action_name, filter_fcn=None,
xmodule_instance_args=xmodule_instance_args)
import json
from factory import DjangoModelFactory, SubFactory
from student.tests.factories import UserFactory as StudentUserFactory
from instructor_task.models import InstructorTask
from celery.states import PENDING
class InstructorTaskFactory(DjangoModelFactory):
FACTORY_FOR = InstructorTask
task_type = 'rescore_problem'
course_id = "MITx/999/Robot_Super_Course"
task_input = json.dumps({})
task_key = None
task_id = None
task_state = PENDING
task_output = None
requester = SubFactory(StudentUserFactory)
"""
Test for LMS instructor background task queue management
"""
from xmodule.modulestore.exceptions import ItemNotFoundError
from courseware.tests.factories import UserFactory
from instructor_task.api import (get_running_instructor_tasks,
get_instructor_task_history,
submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students,
submit_delete_problem_state_for_all_students)
from instructor_task.api_helper import AlreadyRunningError
from instructor_task.models import InstructorTask, PROGRESS
from instructor_task.tests.test_base import (InstructorTaskTestCase,
InstructorTaskModuleTestCase,
TEST_COURSE_ID)
class InstructorTaskReportTest(InstructorTaskTestCase):
"""
Tests API and view methods that involve the reporting of status for background tasks.
"""
def test_get_running_instructor_tasks(self):
# when fetching running tasks, we get all running tasks, and only running tasks
for _ in range(1, 5):
self._create_failure_entry()
self._create_success_entry()
progress_task_ids = [self._create_progress_entry().task_id for _ in range(1, 5)]
task_ids = [instructor_task.task_id for instructor_task in get_running_instructor_tasks(TEST_COURSE_ID)]
self.assertEquals(set(task_ids), set(progress_task_ids))
def test_get_instructor_task_history(self):
# when fetching historical tasks, we get all tasks, including running tasks
expected_ids = []
for _ in range(1, 5):
expected_ids.append(self._create_failure_entry().task_id)
expected_ids.append(self._create_success_entry().task_id)
expected_ids.append(self._create_progress_entry().task_id)
task_ids = [instructor_task.task_id for instructor_task
in get_instructor_task_history(TEST_COURSE_ID, self.problem_url)]
self.assertEquals(set(task_ids), set(expected_ids))
class InstructorTaskSubmitTest(InstructorTaskModuleTestCase):
"""Tests API methods that involve the submission of background tasks."""
def setUp(self):
self.initialize_course()
self.student = UserFactory.create(username="student", email="student@edx.org")
self.instructor = UserFactory.create(username="instructor", email="instructor@edx.org")
def test_submit_nonexistent_modules(self):
# confirm that a rescore of a non-existent module returns an exception
problem_url = InstructorTaskModuleTestCase.problem_location("NonexistentProblem")
course_id = self.course.id
request = None
with self.assertRaises(ItemNotFoundError):
submit_rescore_problem_for_student(request, course_id, problem_url, self.student)
with self.assertRaises(ItemNotFoundError):
submit_rescore_problem_for_all_students(request, course_id, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_reset_problem_attempts_for_all_students(request, course_id, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_delete_problem_state_for_all_students(request, course_id, problem_url)
def test_submit_nonrescorable_modules(self):
# confirm that a rescore of an existent but unscorable module returns an exception
# (Note that it is easier to test a scoreable but non-rescorable module in test_tasks,
# where we are creating real modules.)
problem_url = self.problem_section.location.url()
course_id = self.course.id
request = None
with self.assertRaises(NotImplementedError):
submit_rescore_problem_for_student(request, course_id, problem_url, self.student)
with self.assertRaises(NotImplementedError):
submit_rescore_problem_for_all_students(request, course_id, problem_url)
def _test_submit_with_long_url(self, task_function, student=None):
problem_url_name = 'x' * 255
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
with self.assertRaises(ValueError):
if student is not None:
task_function(self.create_task_request(self.instructor), self.course.id, location, student)
else:
task_function(self.create_task_request(self.instructor), self.course.id, location)
def test_submit_rescore_all_with_long_url(self):
self._test_submit_with_long_url(submit_rescore_problem_for_all_students)
def test_submit_rescore_student_with_long_url(self):
self._test_submit_with_long_url(submit_rescore_problem_for_student, self.student)
def test_submit_reset_all_with_long_url(self):
self._test_submit_with_long_url(submit_reset_problem_attempts_for_all_students)
def test_submit_delete_all_with_long_url(self):
self._test_submit_with_long_url(submit_delete_problem_state_for_all_students)
def _test_submit_task(self, task_function, student=None):
# tests submit, and then tests a second identical submission.
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
if student is not None:
instructor_task = task_function(self.create_task_request(self.instructor),
self.course.id, location, student)
else:
instructor_task = task_function(self.create_task_request(self.instructor),
self.course.id, location)
# test resubmitting, by updating the existing record:
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
instructor_task.task_state = PROGRESS
instructor_task.save()
with self.assertRaises(AlreadyRunningError):
if student is not None:
task_function(self.create_task_request(self.instructor), self.course.id, location, student)
else:
task_function(self.create_task_request(self.instructor), self.course.id, location)
def test_submit_rescore_all(self):
self._test_submit_task(submit_rescore_problem_for_all_students)
def test_submit_rescore_student(self):
self._test_submit_task(submit_rescore_problem_for_student, self.student)
def test_submit_reset_all(self):
self._test_submit_task(submit_reset_problem_attempts_for_all_students)
def test_submit_delete_all(self):
self._test_submit_task(submit_delete_problem_state_for_all_students)
"""
Base test classes for LMS instructor-initiated background tasks
"""
import json
from uuid import uuid4
from mock import Mock
from celery.states import SUCCESS, FAILURE
from django.test.testcases import TestCase
from django.contrib.auth.models import User
from django.test.utils import override_settings
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from courseware.model_data import StudentModule
from courseware.tests.tests import LoginEnrollmentTestCase, TEST_DATA_MONGO_MODULESTORE
from instructor_task.api_helper import encode_problem_and_student_input
from instructor_task.models import PROGRESS, QUEUING
from instructor_task.tests.factories import InstructorTaskFactory
from instructor_task.views import instructor_task_status
TEST_COURSE_ORG = 'edx'
TEST_COURSE_NAME = 'Test Course'
TEST_COURSE_NUMBER = '1.23x'
TEST_SECTION_NAME = "Problem"
TEST_COURSE_ID = 'edx/1.23x/test_course'
TEST_FAILURE_MESSAGE = 'task failed horribly'
TEST_FAILURE_EXCEPTION = 'RandomCauseError'
OPTION_1 = 'Option 1'
OPTION_2 = 'Option 2'
class InstructorTaskTestCase(TestCase):
"""
Tests API and view methods that involve the reporting of status for background tasks.
"""
def setUp(self):
self.student = UserFactory.create(username="student", email="student@edx.org")
self.instructor = UserFactory.create(username="instructor", email="instructor@edx.org")
self.problem_url = InstructorTaskTestCase.problem_location("test_urlname")
@staticmethod
def problem_location(problem_url_name):
"""
Create an internal location for a test problem.
"""
return "i4x://{org}/{number}/problem/{problem_url_name}".format(org='edx',
number='1.23x',
problem_url_name=problem_url_name)
def _create_entry(self, task_state=QUEUING, task_output=None, student=None):
"""Creates a InstructorTask entry for testing."""
task_id = str(uuid4())
progress_json = json.dumps(task_output) if task_output is not None else None
task_input, task_key = encode_problem_and_student_input(self.problem_url, student)
instructor_task = InstructorTaskFactory.create(course_id=TEST_COURSE_ID,
requester=self.instructor,
task_input=json.dumps(task_input),
task_key=task_key,
task_id=task_id,
task_state=task_state,
task_output=progress_json)
return instructor_task
def _create_failure_entry(self):
"""Creates a InstructorTask entry representing a failed task."""
# view task entry for task failure
progress = {'message': TEST_FAILURE_MESSAGE,
'exception': TEST_FAILURE_EXCEPTION,
}
return self._create_entry(task_state=FAILURE, task_output=progress)
def _create_success_entry(self, student=None):
"""Creates a InstructorTask entry representing a successful task."""
return self._create_progress_entry(student, task_state=SUCCESS)
def _create_progress_entry(self, student=None, task_state=PROGRESS):
"""Creates a InstructorTask entry representing a task in progress."""
progress = {'attempted': 3,
'updated': 2,
'total': 5,
'action_name': 'rescored',
}
return self._create_entry(task_state=task_state, task_output=progress, student=student)
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class InstructorTaskModuleTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Base test class for InstructorTask-related tests that require
the setup of a course and problem in order to access StudentModule state.
"""
course = None
current_user = None
def initialize_course(self):
"""Create a course in the store, with a chapter and section."""
self.module_store = modulestore()
# Create the course
self.course = CourseFactory.create(org=TEST_COURSE_ORG,
number=TEST_COURSE_NUMBER,
display_name=TEST_COURSE_NAME)
# Add a chapter to the course
chapter = ItemFactory.create(parent_location=self.course.location,
display_name=TEST_SECTION_NAME)
# add a sequence to the course to which the problems can be added
self.problem_section = ItemFactory.create(parent_location=chapter.location,
template='i4x://edx/templates/sequential/Empty',
display_name=TEST_SECTION_NAME)
@staticmethod
def get_user_email(username):
"""Generate email address based on username"""
return '{0}@test.com'.format(username)
def login_username(self, username):
"""Login the user, given the `username`."""
if self.current_user != username:
self.login(InstructorTaskModuleTestCase.get_user_email(username), "test")
self.current_user = username
def _create_user(self, username, is_staff=False):
"""Creates a user and enrolls them in the test course."""
email = InstructorTaskModuleTestCase.get_user_email(username)
thisuser = UserFactory.create(username=username, email=email, is_staff=is_staff)
CourseEnrollmentFactory.create(user=thisuser, course_id=self.course.id)
return thisuser
def create_instructor(self, username):
"""Creates an instructor for the test course."""
return self._create_user(username, is_staff=True)
def create_student(self, username):
"""Creates a student for the test course."""
return self._create_user(username, is_staff=False)
@staticmethod
def problem_location(problem_url_name):
"""
Create an internal location for a test problem.
"""
if "i4x:" in problem_url_name:
return problem_url_name
else:
return "i4x://{org}/{number}/problem/{problem_url_name}".format(org=TEST_COURSE_ORG,
number=TEST_COURSE_NUMBER,
problem_url_name=problem_url_name)
def define_option_problem(self, problem_url_name):
"""Create the problem definition so the answer is Option 1"""
factory = OptionResponseXMLFactory()
factory_args = {'question_text': 'The correct answer is {0}'.format(OPTION_1),
'options': [OPTION_1, OPTION_2],
'correct_option': OPTION_1,
'num_responses': 2}
problem_xml = factory.build_xml(**factory_args)
ItemFactory.create(parent_location=self.problem_section.location,
template="i4x://edx/templates/problem/Blank_Common_Problem",
display_name=str(problem_url_name),
data=problem_xml)
def redefine_option_problem(self, problem_url_name):
"""Change the problem definition so the answer is Option 2"""
factory = OptionResponseXMLFactory()
factory_args = {'question_text': 'The correct answer is {0}'.format(OPTION_2),
'options': [OPTION_1, OPTION_2],
'correct_option': OPTION_2,
'num_responses': 2}
problem_xml = factory.build_xml(**factory_args)
location = InstructorTaskTestCase.problem_location(problem_url_name)
self.module_store.update_item(location, problem_xml)
def get_student_module(self, username, descriptor):
"""Get StudentModule object for test course, given the `username` and the problem's `descriptor`."""
return StudentModule.objects.get(course_id=self.course.id,
student=User.objects.get(username=username),
module_type=descriptor.location.category,
module_state_key=descriptor.location.url(),
)
@staticmethod
def get_task_status(task_id):
"""Use api method to fetch task status, using mock request."""
mock_request = Mock()
mock_request.REQUEST = {'task_id': task_id}
response = instructor_task_status(mock_request)
status = json.loads(response.content)
return status
def create_task_request(self, requester_username):
"""Generate request that can be used for submitting tasks"""
request = Mock()
request.user = User.objects.get(username=requester_username)
request.get_host = Mock(return_value="testhost")
request.META = {'REMOTE_ADDR': '0:0:0:0', 'SERVER_NAME': 'testhost'}
request.is_secure = Mock(return_value=False)
return request
import json
import logging
from django.http import HttpResponse
from celery.states import FAILURE, REVOKED, READY_STATES
from instructor_task.api_helper import (get_status_from_instructor_task,
get_updated_instructor_task)
from instructor_task.models import PROGRESS
log = logging.getLogger(__name__)
# return status for completed tasks and tasks in progress
STATES_WITH_STATUS = [state for state in READY_STATES] + [PROGRESS]
def _get_instructor_task_status(task_id):
"""
Returns status for a specific task.
Written as an internal method here (rather than as a helper)
so that get_task_completion_info() can be called without
causing a circular dependency (since it's also called directly).
"""
instructor_task = get_updated_instructor_task(task_id)
status = get_status_from_instructor_task(instructor_task)
if instructor_task is not None and instructor_task.task_state in STATES_WITH_STATUS:
succeeded, message = get_task_completion_info(instructor_task)
status['message'] = message
status['succeeded'] = succeeded
return status
def instructor_task_status(request):
"""
View method that returns the status of a course-related task or tasks.
Status is returned as a JSON-serialized dict, wrapped as the content of a HTTPResponse.
The task_id can be specified to this view in one of three ways:
* by making a request containing 'task_id' as a parameter with a single value
Returns a dict containing status information for the specified task_id
* by making a request containing 'task_ids' as a parameter,
with a list of task_id values.
Returns a dict of dicts, with the task_id as key, and the corresponding
dict containing status information for the specified task_id
Task_id values that are unrecognized are skipped.
The dict with status information for a task contains the following keys:
'message': on complete tasks, status message reporting on final progress,
or providing exception message if failed. For tasks in progress,
indicates the current progress.
'succeeded': on complete tasks or tasks in progress, boolean value indicates if the
task outcome was successful: did it achieve what it set out to do.
This is in contrast with a successful task_state, which indicates that the
task merely completed.
'task_id': id assigned by LMS and used by celery.
'task_state': state of task as stored in celery's result store.
'in_progress': boolean indicating if task is still running.
'task_progress': dict containing progress information. This includes:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
'duration_ms': how long the task has (or had) been running.
'exception': name of exception class raised in failed tasks.
'message': returned for failed and revoked tasks.
'traceback': optional, returned if task failed and produced a traceback.
"""
output = {}
if 'task_id' in request.REQUEST:
task_id = request.REQUEST['task_id']
output = _get_instructor_task_status(task_id)
elif 'task_ids[]' in request.REQUEST:
tasks = request.REQUEST.getlist('task_ids[]')
for task_id in tasks:
task_output = _get_instructor_task_status(task_id)
if task_output is not None:
output[task_id] = task_output
return HttpResponse(json.dumps(output, indent=4))
def get_task_completion_info(instructor_task):
"""
Construct progress message from progress information in InstructorTask entry.
Returns (boolean, message string) duple, where the boolean indicates
whether the task completed without incident. (It is possible for a
task to attempt many sub-tasks, such as rescoring many students' problem
responses, and while the task runs to completion, some of the students'
responses could not be rescored.)
Used for providing messages to instructor_task_status(), as well as
external calls for providing course task submission history information.
"""
succeeded = False
if instructor_task.task_state not in STATES_WITH_STATUS:
return (succeeded, "No status information available")
# we're more surprised if there is no output for a completed task, but just warn:
if instructor_task.task_output is None:
log.warning("No task_output information found for instructor_task {0}".format(instructor_task.task_id))
return (succeeded, "No status information available")
try:
task_output = json.loads(instructor_task.task_output)
except ValueError:
fmt = "No parsable task_output information found for instructor_task {0}: {1}"
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output))
return (succeeded, "No parsable status information available")
if instructor_task.task_state in [FAILURE, REVOKED]:
return (succeeded, task_output.get('message', 'No message provided'))
if any([key not in task_output for key in ['action_name', 'attempted', 'updated', 'total']]):
fmt = "Invalid task_output information found for instructor_task {0}: {1}"
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output))
return (succeeded, "No progress status information available")
action_name = task_output['action_name']
num_attempted = task_output['attempted']
num_updated = task_output['updated']
num_total = task_output['total']
student = None
try:
task_input = json.loads(instructor_task.task_input)
except ValueError:
fmt = "No parsable task_input information found for instructor_task {0}: {1}"
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_input))
else:
student = task_input.get('student')
if instructor_task.task_state == PROGRESS:
# special message for providing progress updates:
msg_format = "Progress: {action} {updated} of {attempted} so far"
elif student is not None:
if num_attempted == 0:
msg_format = "Unable to find submission to be {action} for student '{student}'"
elif num_updated == 0:
msg_format = "Problem failed to be {action} for student '{student}'"
else:
succeeded = True
msg_format = "Problem successfully {action} for student '{student}'"
elif num_attempted == 0:
msg_format = "Unable to find any students with submissions to be {action}"
elif num_updated == 0:
msg_format = "Problem failed to be {action} for any of {attempted} students"
elif num_updated == num_attempted:
succeeded = True
msg_format = "Problem successfully {action} for {attempted} students"
else: # num_updated < num_attempted
msg_format = "Problem {action} for {updated} of {attempted} students"
if student is None and num_attempted != num_total:
msg_format += " (out of {total})"
# Update status in task result object itself:
message = msg_format.format(action=action_name, updated=num_updated,
attempted=num_attempted, total=num_total,
student=student)
return (succeeded, message)
......@@ -122,7 +122,10 @@ MITX_FEATURES = {
'USE_CUSTOM_THEME': False,
# Do autoplay videos for students
'AUTOPLAY_VIDEOS': True
'AUTOPLAY_VIDEOS': True,
# Enable instructor dash to submit background tasks
'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True,
}
# Used for A/B testing
......@@ -691,6 +694,7 @@ INSTALLED_APPS = (
'util',
'certificates',
'instructor',
'instructor_task',
'open_ended_grading',
'psychometrics',
'licenses',
......
// Define an InstructorTaskProgress object for updating a table on the instructor
// dashboard that shows the current background tasks that are currently running
// for the instructor's course. Any tasks that were running when the page is
// first displayed are passed in as instructor_tasks, and populate the "Pending Instructor
// Task" table. The InstructorTaskProgress is bound to this table, and periodically
// polls the LMS to see if any of the tasks has completed. Once a task is complete,
// it is not included in any further polling.
(function() {
var __bind = function(fn, me){ return function(){ return fn.apply(me, arguments); }; };
this.InstructorTaskProgress = (function() {
function InstructorTaskProgress(element) {
this.update_progress = __bind(this.update_progress, this);
this.get_status = __bind(this.get_status, this);
this.element = element;
this.entries = $(element).find('.task-progress-entry')
if (window.queuePollerID) {
window.clearTimeout(window.queuePollerID);
}
// Hardcode the initial delay before the first refresh to one second:
window.queuePollerID = window.setTimeout(this.get_status, 1000);
}
InstructorTaskProgress.prototype.$ = function(selector) {
return $(selector, this.element);
};
InstructorTaskProgress.prototype.update_progress = function(response) {
var _this = this;
// Response should be a dict with an entry for each requested task_id,
// with a "task-state" and "in_progress" key and optionally a "message"
// and a "task_progress.duration" key.
var something_in_progress = false;
for (task_id in response) {
var task_dict = response[task_id];
// find the corresponding entry, and update it:
entry = $(_this.element).find('[data-task-id="' + task_id + '"]');
entry.find('.task-state').text(task_dict.task_state)
var duration_value = (task_dict.task_progress && task_dict.task_progress.duration_ms
&& Math.round(task_dict.task_progress.duration_ms/1000)) || 'unknown';
entry.find('.task-duration').text(duration_value);
var progress_value = task_dict.message || '';
entry.find('.task-progress').text(progress_value);
// if the task is complete, then change the entry so it won't
// be queried again. Otherwise set a flag.
if (task_dict.in_progress === true) {
something_in_progress = true;
} else {
entry.data('inProgress', "False")
}
}
// if some entries are still incomplete, then repoll:
// Hardcode the refresh interval to be every five seconds.
// TODO: allow the refresh interval to be set. (And if it is disabled,
// then don't set the timeout at all.)
if (something_in_progress) {
window.queuePollerID = window.setTimeout(_this.get_status, 5000);
} else {
delete window.queuePollerID;
}
}
InstructorTaskProgress.prototype.get_status = function() {
var _this = this;
var task_ids = [];
// Construct the array of ids to get status for, by
// including the subset of entries that are still in progress.
this.entries.each(function(idx, element) {
var task_id = $(element).data('taskId');
var in_progress = $(element).data('inProgress');
if (in_progress="True") {
task_ids.push(task_id);
}
});
// Make call to get status for these ids.
// Note that the keyname here ends up with "[]" being appended
// in the POST parameter that shows up on the Django server.
// TODO: add error handler.
var ajax_url = '/instructor_task_status/';
var data = {'task_ids': task_ids };
$.post(ajax_url, data).done(this.update_progress);
};
return InstructorTaskProgress;
})();
}).call(this);
// once the page is rendered, create the progress object
var instructorTaskProgress;
$(document).ready(function() {
instructorTaskProgress = new InstructorTaskProgress($('#task-progress-wrapper'));
});
......@@ -9,7 +9,9 @@
<script type="text/javascript" src="${static.url('js/vendor/jquery-jvectormap-1.1.1/jquery-jvectormap-1.1.1.min.js')}"></script>
<script type="text/javascript" src="${static.url('js/vendor/jquery-jvectormap-1.1.1/jquery-jvectormap-world-mill-en.js')}"></script>
<script type="text/javascript" src="${static.url('js/course_groups/cohorts.js')}"></script>
%if instructor_tasks is not None:
<script type="text/javascript" src="${static.url('js/pending_tasks.js')}"></script>>
%endif
</%block>
<%include file="/courseware/course_navigation.html" args="active_page='instructor'" />
......@@ -194,19 +196,77 @@ function goto( mode)
<hr width="40%" style="align:left">
%endif
%if settings.MITX_FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
<H2>Course-specific grade adjustment</h2>
<p>
Specify a particular problem in the course here by its url:
<input type="text" name="problem_for_all_students" size="60">
</p>
<p>
You may use just the "urlname" if a problem, or "modulename/urlname" if not.
(For example, if the location is <tt>i4x://university/course/problem/problemname</tt>,
then just provide the <tt>problemname</tt>.
If the location is <tt>i4x://university/course/notaproblem/someothername</tt>, then
provide <tt>notaproblem/someothername</tt>.)
</p>
<p>
Then select an action:
<input type="submit" name="action" value="Reset ALL students' attempts">
<input type="submit" name="action" value="Rescore ALL students' problem submissions">
</p>
<p>
<p>These actions run in the background, and status for active tasks will appear in a table below.
To see status for all tasks submitted for this problem, click on this button:
</p>
<p>
<input type="submit" name="action" value="Show Background Task History">
</p>
<hr width="40%" style="align:left">
%endif
<H2>Student-specific grade inspection and adjustment</h2>
<p>edX email address or their username: </p>
<p><input type="text" name="unique_student_identifier"> <input type="submit" name="action" value="Get link to student's progress page"></p>
<p>and, if you want to reset the number of attempts for a problem, the urlname of that problem
(e.g. if the location is <tt>i4x://university/course/problem/problemname</tt>, then the urlname is <tt>problemname</tt>).</p>
<p> <input type="text" name="problem_to_reset" size="60"> <input type="submit" name="action" value="Reset student's attempts"> </p>
<p>
Specify the edX email address or username of a student here:
<input type="text" name="unique_student_identifier">
</p>
<p>
Click this, and a link to student's progress page will appear below:
<input type="submit" name="action" value="Get link to student's progress page">
</p>
<p>
Specify a particular problem in the course here by its url:
<input type="text" name="problem_for_student" size="60">
</p>
<p>
You may use just the "urlname" if a problem, or "modulename/urlname" if not.
(For example, if the location is <tt>i4x://university/course/problem/problemname</tt>,
then just provide the <tt>problemname</tt>.
If the location is <tt>i4x://university/course/notaproblem/someothername</tt>, then
provide <tt>notaproblem/someothername</tt>.)
</p>
<p>
Then select an action:
<input type="submit" name="action" value="Reset student's attempts">
%if settings.MITX_FEATURES.get('ENABLE_COURSE_BACKGROUND_TASKS'):
<input type="submit" name="action" value="Rescore student's problem submission">
%endif
</p>
%if instructor_access:
<p> You may also delete the entire state of a student for a problem:
<input type="submit" name="action" value="Delete student state for problem"> </p>
<p>To delete the state of other XBlocks specify modulename/urlname, eg
<tt>combinedopenended/Humanities_SA_Peer</tt></p>
<p>
You may also delete the entire state of a student for the specified module:
<input type="submit" name="action" value="Delete student state for module">
</p>
%endif
%if settings.MITX_FEATURES.get('ENABLE_COURSE_BACKGROUND_TASKS'):
<p>Rescoring runs in the background, and status for active tasks will appear in a table below.
To see status for all tasks submitted for this course and student, click on this button:
</p>
<p>
<input type="submit" name="action" value="Show Background Task History for Student">
</p>
%endif
%endif
......@@ -234,6 +294,7 @@ function goto( mode)
##-----------------------------------------------------------------------------
%if modeflag.get('Admin'):
%if instructor_access:
<hr width="40%" style="align:left">
<p>
......@@ -373,6 +434,7 @@ function goto( mode)
%if msg:
<p></p><p>${msg}</p>
%endif
##-----------------------------------------------------------------------------
%if modeflag.get('Analytics'):
......@@ -559,6 +621,69 @@ function goto( mode)
</p>
%endif
## Output tasks in progress
%if instructor_tasks is not None and len(instructor_tasks) > 0:
<hr width="100%">
<h2>Pending Instructor Tasks</h2>
<div id="task-progress-wrapper">
<table class="stat_table">
<tr>
<th>Task Type</th>
<th>Task inputs</th>
<th>Task Id</th>
<th>Requester</th>
<th>Submitted</th>
<th>Task State</th>
<th>Duration (sec)</th>
<th>Task Progress</th>
</tr>
%for tasknum, instructor_task in enumerate(instructor_tasks):
<tr id="task-progress-entry-${tasknum}" class="task-progress-entry"
data-task-id="${instructor_task.task_id}"
data-in-progress="true">
<td>${instructor_task.task_type}</td>
<td>${instructor_task.task_input}</td>
<td class="task-id">${instructor_task.task_id}</td>
<td>${instructor_task.requester}</td>
<td>${instructor_task.created}</td>
<td class="task-state">${instructor_task.task_state}</td>
<td class="task-duration">unknown</td>
<td class="task-progress">unknown</td>
</tr>
%endfor
</table>
</div>
<br/>
%endif
##-----------------------------------------------------------------------------
%if course_stats and modeflag.get('Psychometrics') is None:
<br/>
<br/>
<p>
<hr width="100%">
<h2>${course_stats['title'] | h}</h2>
<table class="stat_table">
<tr>
%for hname in course_stats['header']:
<th>${hname | h}</th>
%endfor
</tr>
%for row in course_stats['data']:
<tr>
%for value in row:
<td>${value | h}</td>
%endfor
</tr>
%endfor
</table>
</p>
%endif
##-----------------------------------------------------------------------------
%if modeflag.get('Psychometrics'):
......
......@@ -394,6 +394,11 @@ if settings.MITX_FEATURES.get('ENABLE_SERVICE_STATUS'):
url(r'^status/', include('service_status.urls')),
)
if settings.MITX_FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
urlpatterns += (
url(r'^instructor_task_status/$', 'instructor_task.views.instructor_task_status', name='instructor_task_status'),
)
# FoldIt views
urlpatterns += (
# The path is hardcoded into their app...
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment