Commit 0524ff53 by JonahStanley

Merge branch 'master' into jonahstanley/add-courseteam-tests

Conflicts:
	cms/djangoapps/contentstore/features/common.py
parents 9dc12627 7c048fb2
...@@ -5,6 +5,13 @@ These are notable changes in edx-platform. This is a rolling list of changes, ...@@ -5,6 +5,13 @@ These are notable changes in edx-platform. This is a rolling list of changes,
in roughly chronological order, most recent first. Add your entries at or near in roughly chronological order, most recent first. Add your entries at or near
the top. Include a label indicating the component affected. the top. Include a label indicating the component affected.
LMS: Problem rescoring. Added options on the Grades tab of the
Instructor Dashboard to allow all students' submissions for a
particular problem to be rescored. Also supports resetting all
students' number of attempts to zero. Provides a list of background
tasks that are currently running for the course, and an option to
see a history of background tasks for a given problem.
LMS: Forums. Added handling for case where discussion module can get `None` as LMS: Forums. Added handling for case where discussion module can get `None` as
value of lms.start in `lms/djangoapps/django_comment_client/utils.py` value of lms.start in `lms/djangoapps/django_comment_client/utils.py`
......
...@@ -39,8 +39,6 @@ def get_users_in_course_group_by_role(location, role): ...@@ -39,8 +39,6 @@ def get_users_in_course_group_by_role(location, role):
''' '''
Create all permission groups for a new course and subscribe the caller into those roles Create all permission groups for a new course and subscribe the caller into those roles
''' '''
def create_all_course_groups(creator, location): def create_all_course_groups(creator, location):
create_new_course_group(creator, location, INSTRUCTOR_ROLE_NAME) create_new_course_group(creator, location, INSTRUCTOR_ROLE_NAME)
create_new_course_group(creator, location, STAFF_ROLE_NAME) create_new_course_group(creator, location, STAFF_ROLE_NAME)
...@@ -57,13 +55,11 @@ def create_new_course_group(creator, location, role): ...@@ -57,13 +55,11 @@ def create_new_course_group(creator, location, role):
return return
'''
This is to be called only by either a command line code path or through a app which has already
asserted permissions
'''
def _delete_course_group(location): def _delete_course_group(location):
'''
This is to be called only by either a command line code path or through a app which has already
asserted permissions
'''
# remove all memberships # remove all memberships
instructors = Group.objects.get(name=get_course_groupname_for_role(location, INSTRUCTOR_ROLE_NAME)) instructors = Group.objects.get(name=get_course_groupname_for_role(location, INSTRUCTOR_ROLE_NAME))
for user in instructors.user_set.all(): for user in instructors.user_set.all():
...@@ -75,13 +71,11 @@ def _delete_course_group(location): ...@@ -75,13 +71,11 @@ def _delete_course_group(location):
user.groups.remove(staff) user.groups.remove(staff)
user.save() user.save()
'''
This is to be called only by either a command line code path or through an app which has already
asserted permissions to do this action
'''
def _copy_course_group(source, dest): def _copy_course_group(source, dest):
'''
This is to be called only by either a command line code path or through an app which has already
asserted permissions to do this action
'''
instructors = Group.objects.get(name=get_course_groupname_for_role(source, INSTRUCTOR_ROLE_NAME)) instructors = Group.objects.get(name=get_course_groupname_for_role(source, INSTRUCTOR_ROLE_NAME))
new_instructors_group = Group.objects.get(name=get_course_groupname_for_role(dest, INSTRUCTOR_ROLE_NAME)) new_instructors_group = Group.objects.get(name=get_course_groupname_for_role(dest, INSTRUCTOR_ROLE_NAME))
for user in instructors.user_set.all(): for user in instructors.user_set.all():
......
#pylint: disable=C0111 # pylint: disable=C0111
#pylint: disable=W0621 # pylint: disable=W0621
from lettuce import world, step from lettuce import world, step
from nose.tools import assert_true from nose.tools import assert_true
...@@ -20,7 +20,7 @@ COURSE_ORG = 'MITx' ...@@ -20,7 +20,7 @@ COURSE_ORG = 'MITx'
########### STEP HELPERS ############## ########### STEP HELPERS ##############
@step('I (?:visit|access|open) the Studio homepage$') @step('I (?:visit|access|open) the Studio homepage$')
def i_visit_the_studio_homepage(step): def i_visit_the_studio_homepage(_step):
# To make this go to port 8001, put # To make this go to port 8001, put
# LETTUCE_SERVER_PORT = 8001 # LETTUCE_SERVER_PORT = 8001
# in your settings.py file. # in your settings.py file.
...@@ -30,17 +30,17 @@ def i_visit_the_studio_homepage(step): ...@@ -30,17 +30,17 @@ def i_visit_the_studio_homepage(step):
@step('I am logged into Studio$') @step('I am logged into Studio$')
def i_am_logged_into_studio(step): def i_am_logged_into_studio(_step):
log_into_studio() log_into_studio()
@step('I confirm the alert$') @step('I confirm the alert$')
def i_confirm_with_ok(step): def i_confirm_with_ok(_step):
world.browser.get_alert().accept() world.browser.get_alert().accept()
@step(u'I press the "([^"]*)" delete icon$') @step(u'I press the "([^"]*)" delete icon$')
def i_press_the_category_delete_icon(step, category): def i_press_the_category_delete_icon(_step, category):
if category == 'section': if category == 'section':
css = 'a.delete-button.delete-section-button span.delete-icon' css = 'a.delete-button.delete-section-button span.delete-icon'
elif category == 'subsection': elif category == 'subsection':
...@@ -51,7 +51,7 @@ def i_press_the_category_delete_icon(step, category): ...@@ -51,7 +51,7 @@ def i_press_the_category_delete_icon(step, category):
@step('I have opened a new course in Studio$') @step('I have opened a new course in Studio$')
def i_have_opened_a_new_course(step): def i_have_opened_a_new_course(_step):
open_new_course() open_new_course()
...@@ -78,7 +78,6 @@ def create_studio_user( ...@@ -78,7 +78,6 @@ def create_studio_user(
registration.register(studio_user) registration.register(studio_user)
registration.activate() registration.activate()
def fill_in_course_info( def fill_in_course_info(
name=COURSE_NAME, name=COURSE_NAME,
org=COURSE_ORG, org=COURSE_ORG,
...@@ -149,6 +148,7 @@ def set_date_and_time(date_css, desired_date, time_css, desired_time): ...@@ -149,6 +148,7 @@ def set_date_and_time(date_css, desired_date, time_css, desired_time):
world.css_fill(date_css, desired_date) world.css_fill(date_css, desired_date)
# hit TAB to get to the time field # hit TAB to get to the time field
e = world.css_find(date_css).first e = world.css_find(date_css).first
# pylint: disable=W0212
e._element.send_keys(Keys.TAB) e._element.send_keys(Keys.TAB)
world.css_fill(time_css, desired_time) world.css_fill(time_css, desired_time)
e = world.css_find(time_css).first e = world.css_find(time_css).first
......
#pylint: disable=C0111 # pylint: disable=C0111
#pylint: disable=W0621 # pylint: disable=W0621
from lettuce import world, step from lettuce import world, step
from common import * from common import *
...@@ -8,7 +8,7 @@ from nose.tools import assert_equal ...@@ -8,7 +8,7 @@ from nose.tools import assert_equal
############### ACTIONS #################### ############### ACTIONS ####################
@step('I click the new section link$') @step('I click the New Section link$')
def i_click_new_section_link(_step): def i_click_new_section_link(_step):
link_css = 'a.new-courseware-section-button' link_css = 'a.new-courseware-section-button'
world.css_click(link_css) world.css_click(link_css)
......
#pylint: disable=C0111 # pylint: disable=C0111
#pylint: disable=W0621 # pylint: disable=W0621
from lettuce import world, step from lettuce import world, step
from common import * from common import *
......
...@@ -6,13 +6,13 @@ from lettuce import world, step ...@@ -6,13 +6,13 @@ from lettuce import world, step
@step('when I view the video it does not have autoplay enabled') @step('when I view the video it does not have autoplay enabled')
def does_not_autoplay(step): def does_not_autoplay(_step):
assert world.css_find('.video')[0]['data-autoplay'] == 'False' assert world.css_find('.video')[0]['data-autoplay'] == 'False'
assert world.css_find('.video_control')[0].has_class('play') assert world.css_find('.video_control')[0].has_class('play')
@step('creating a video takes a single click') @step('creating a video takes a single click')
def video_takes_a_single_click(step): def video_takes_a_single_click(_step):
assert(not world.is_css_present('.xmodule_VideoModule')) assert(not world.is_css_present('.xmodule_VideoModule'))
world.css_click("a[data-location='i4x://edx/templates/video/default']") world.css_click("a[data-location='i4x://edx/templates/video/default']")
assert(world.is_css_present('.xmodule_VideoModule')) assert(world.is_css_present('.xmodule_VideoModule'))
......
...@@ -39,9 +39,6 @@ def get_module_info(store, location, parent_location=None, rewrite_static_links= ...@@ -39,9 +39,6 @@ def get_module_info(store, location, parent_location=None, rewrite_static_links=
def set_module_info(store, location, post_data): def set_module_info(store, location, post_data):
module = None module = None
try: try:
if location.revision is None:
module = store.get_item(location)
else:
module = store.get_item(location) module = store.get_item(location)
except: except:
pass pass
......
...@@ -99,6 +99,7 @@ class ChecklistTestCase(CourseTestCase): ...@@ -99,6 +99,7 @@ class ChecklistTestCase(CourseTestCase):
'name': self.course.location.name, 'name': self.course.location.name,
'checklist_index': 2}) 'checklist_index': 2})
def get_first_item(checklist): def get_first_item(checklist):
return checklist['items'][0] return checklist['items'][0]
......
...@@ -132,7 +132,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): ...@@ -132,7 +132,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
# just pick one vertical # just pick one vertical
descriptor = store.get_items(Location('i4x', 'edX', 'simple', 'vertical', None, None))[0] descriptor = store.get_items(Location('i4x', 'edX', 'simple', 'vertical', None, None))[0]
location = descriptor.location._replace(name='.' + descriptor.location.name) location = descriptor.location.replace(name='.' + descriptor.location.name)
resp = self.client.get(reverse('edit_unit', kwargs={'location': location.url()})) resp = self.client.get(reverse('edit_unit', kwargs={'location': location.url()}))
self.assertEqual(resp.status_code, 400) self.assertEqual(resp.status_code, 400)
...@@ -224,7 +224,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): ...@@ -224,7 +224,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
draft_store.clone_item(html_module.location, html_module.location) draft_store.clone_item(html_module.location, html_module.location)
html_module = draft_store.get_item(['i4x', 'edX', 'simple', 'html', 'test_html', None]) html_module = draft_store.get_item(['i4x', 'edX', 'simple', 'html', 'test_html', None])
new_graceperiod = timedelta(**{'hours': 1}) new_graceperiod = timedelta(hours=1)
self.assertNotIn('graceperiod', own_metadata(html_module)) self.assertNotIn('graceperiod', own_metadata(html_module))
html_module.lms.graceperiod = new_graceperiod html_module.lms.graceperiod = new_graceperiod
...@@ -369,7 +369,6 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): ...@@ -369,7 +369,6 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
''' '''
module_store = modulestore('direct') module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['full']) import_from_xml(module_store, 'common/test/data/', ['full'])
effort = module_store.get_item(Location(['i4x', 'edX', 'full', 'about', 'effort', None])) effort = module_store.get_item(Location(['i4x', 'edX', 'full', 'about', 'effort', None]))
self.assertEqual(effort.data, '6 hours') self.assertEqual(effort.data, '6 hours')
...@@ -617,12 +616,12 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): ...@@ -617,12 +616,12 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
items = module_store.get_items(Location(['i4x', 'edX', 'full', 'vertical', None])) items = module_store.get_items(Location(['i4x', 'edX', 'full', 'vertical', None]))
self.assertEqual(len(items), 0) self.assertEqual(len(items), 0)
def verify_content_existence(self, modulestore, root_dir, location, dirname, category_name, filename_suffix=''): def verify_content_existence(self, store, root_dir, location, dirname, category_name, filename_suffix=''):
filesystem = OSFS(root_dir / 'test_export') filesystem = OSFS(root_dir / 'test_export')
self.assertTrue(filesystem.exists(dirname)) self.assertTrue(filesystem.exists(dirname))
query_loc = Location('i4x', location.org, location.course, category_name, None) query_loc = Location('i4x', location.org, location.course, category_name, None)
items = modulestore.get_items(query_loc) items = store.get_items(query_loc)
for item in items: for item in items:
filesystem = OSFS(root_dir / ('test_export/' + dirname)) filesystem = OSFS(root_dir / ('test_export/' + dirname))
...@@ -768,7 +767,6 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): ...@@ -768,7 +767,6 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
def test_prefetch_children(self): def test_prefetch_children(self):
module_store = modulestore('direct') module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['full']) import_from_xml(module_store, 'common/test/data/', ['full'])
location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012') location = CourseDescriptor.id_to_location('edX/full/6.002_Spring_2012')
wrapper = MongoCollectionFindWrapper(module_store.collection.find) wrapper = MongoCollectionFindWrapper(module_store.collection.find)
...@@ -864,7 +862,7 @@ class ContentStoreTest(ModuleStoreTestCase): ...@@ -864,7 +862,7 @@ class ContentStoreTest(ModuleStoreTestCase):
def test_create_course_duplicate_course(self): def test_create_course_duplicate_course(self):
"""Test new course creation - error path""" """Test new course creation - error path"""
resp = self.client.post(reverse('create_new_course'), self.course_data) self.client.post(reverse('create_new_course'), self.course_data)
resp = self.client.post(reverse('create_new_course'), self.course_data) resp = self.client.post(reverse('create_new_course'), self.course_data)
data = parse_json(resp) data = parse_json(resp)
self.assertEqual(resp.status_code, 200) self.assertEqual(resp.status_code, 200)
...@@ -872,7 +870,7 @@ class ContentStoreTest(ModuleStoreTestCase): ...@@ -872,7 +870,7 @@ class ContentStoreTest(ModuleStoreTestCase):
def test_create_course_duplicate_number(self): def test_create_course_duplicate_number(self):
"""Test new course creation - error path""" """Test new course creation - error path"""
resp = self.client.post(reverse('create_new_course'), self.course_data) self.client.post(reverse('create_new_course'), self.course_data)
self.course_data['display_name'] = 'Robot Super Course Two' self.course_data['display_name'] = 'Robot Super Course Two'
resp = self.client.post(reverse('create_new_course'), self.course_data) resp = self.client.post(reverse('create_new_course'), self.course_data)
...@@ -1090,11 +1088,9 @@ class ContentStoreTest(ModuleStoreTestCase): ...@@ -1090,11 +1088,9 @@ class ContentStoreTest(ModuleStoreTestCase):
json.dumps({'id': del_loc.url()}), "application/json") json.dumps({'id': del_loc.url()}), "application/json")
self.assertEqual(200, resp.status_code) self.assertEqual(200, resp.status_code)
def test_import_metadata_with_attempts_empty_string(self): def test_import_metadata_with_attempts_empty_string(self):
module_store = modulestore('direct') module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['simple']) import_from_xml(module_store, 'common/test/data/', ['simple'])
did_load_item = False did_load_item = False
try: try:
module_store.get_item(Location(['i4x', 'edX', 'simple', 'problem', 'ps01-simple', None])) module_store.get_item(Location(['i4x', 'edX', 'simple', 'problem', 'ps01-simple', None]))
......
...@@ -224,14 +224,14 @@ def add_extra_panel_tab(tab_type, course): ...@@ -224,14 +224,14 @@ def add_extra_panel_tab(tab_type, course):
@param course: A course object from the modulestore. @param course: A course object from the modulestore.
@return: Boolean indicating whether or not a tab was added and a list of tabs for the course. @return: Boolean indicating whether or not a tab was added and a list of tabs for the course.
""" """
#Copy course tabs # Copy course tabs
course_tabs = copy.copy(course.tabs) course_tabs = copy.copy(course.tabs)
changed = False changed = False
#Check to see if open ended panel is defined in the course # Check to see if open ended panel is defined in the course
tab_panel = EXTRA_TAB_PANELS.get(tab_type) tab_panel = EXTRA_TAB_PANELS.get(tab_type)
if tab_panel not in course_tabs: if tab_panel not in course_tabs:
#Add panel to the tabs if it is not defined # Add panel to the tabs if it is not defined
course_tabs.append(tab_panel) course_tabs.append(tab_panel)
changed = True changed = True
return changed, course_tabs return changed, course_tabs
...@@ -244,14 +244,14 @@ def remove_extra_panel_tab(tab_type, course): ...@@ -244,14 +244,14 @@ def remove_extra_panel_tab(tab_type, course):
@param course: A course object from the modulestore. @param course: A course object from the modulestore.
@return: Boolean indicating whether or not a tab was added and a list of tabs for the course. @return: Boolean indicating whether or not a tab was added and a list of tabs for the course.
""" """
#Copy course tabs # Copy course tabs
course_tabs = copy.copy(course.tabs) course_tabs = copy.copy(course.tabs)
changed = False changed = False
#Check to see if open ended panel is defined in the course # Check to see if open ended panel is defined in the course
tab_panel = EXTRA_TAB_PANELS.get(tab_type) tab_panel = EXTRA_TAB_PANELS.get(tab_type)
if tab_panel in course_tabs: if tab_panel in course_tabs:
#Add panel to the tabs if it is not defined # Add panel to the tabs if it is not defined
course_tabs = [ct for ct in course_tabs if ct != tab_panel] course_tabs = [ct for ct in course_tabs if ct != tab_panel]
changed = True changed = True
return changed, course_tabs return changed, course_tabs
...@@ -12,8 +12,8 @@ from django.core.urlresolvers import reverse ...@@ -12,8 +12,8 @@ from django.core.urlresolvers import reverse
from mitxmako.shortcuts import render_to_response from mitxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, \
from xmodule.modulestore.exceptions import ItemNotFoundError, InvalidLocationError InvalidLocationError
from xmodule.modulestore import Location from xmodule.modulestore import Location
from contentstore.course_info_model import get_course_updates, update_course_updates, delete_course_update from contentstore.course_info_model import get_course_updates, update_course_updates, delete_course_update
...@@ -33,9 +33,6 @@ from .component import OPEN_ENDED_COMPONENT_TYPES, \ ...@@ -33,9 +33,6 @@ from .component import OPEN_ENDED_COMPONENT_TYPES, \
from django_comment_common.utils import seed_permissions_roles from django_comment_common.utils import seed_permissions_roles
import datetime import datetime
from django.utils.timezone import UTC from django.utils.timezone import UTC
# TODO: should explicitly enumerate exports with __all__
__all__ = ['course_index', 'create_new_course', 'course_info', __all__ = ['course_index', 'create_new_course', 'course_info',
'course_info_updates', 'get_course_settings', 'course_info_updates', 'get_course_settings',
'course_config_graders_page', 'course_config_graders_page',
......
...@@ -103,7 +103,7 @@ def clone_item(request): ...@@ -103,7 +103,7 @@ def clone_item(request):
@expect_json @expect_json
def delete_item(request): def delete_item(request):
item_location = request.POST['id'] item_location = request.POST['id']
item_loc = Location(item_location) item_location = Location(item_location)
# check permissions for this user within this course # check permissions for this user within this course
if not has_access(request.user, item_location): if not has_access(request.user, item_location):
...@@ -124,11 +124,11 @@ def delete_item(request): ...@@ -124,11 +124,11 @@ def delete_item(request):
# cdodge: we need to remove our parent's pointer to us so that it is no longer dangling # cdodge: we need to remove our parent's pointer to us so that it is no longer dangling
if delete_all_versions: if delete_all_versions:
parent_locs = modulestore('direct').get_parent_locations(item_loc, None) parent_locs = modulestore('direct').get_parent_locations(item_location, None)
for parent_loc in parent_locs: for parent_loc in parent_locs:
parent = modulestore('direct').get_item(parent_loc) parent = modulestore('direct').get_item(parent_loc)
item_url = item_loc.url() item_url = item_location.url()
if item_url in parent.children: if item_url in parent.children:
children = parent.children children = parent.children
children.remove(item_url) children.remove(item_url)
......
...@@ -41,25 +41,25 @@ class CourseDetails(object): ...@@ -41,25 +41,25 @@ class CourseDetails(object):
course.enrollment_start = descriptor.enrollment_start course.enrollment_start = descriptor.enrollment_start
course.enrollment_end = descriptor.enrollment_end course.enrollment_end = descriptor.enrollment_end
temploc = course_location._replace(category='about', name='syllabus') temploc = course_location.replace(category='about', name='syllabus')
try: try:
course.syllabus = get_modulestore(temploc).get_item(temploc).data course.syllabus = get_modulestore(temploc).get_item(temploc).data
except ItemNotFoundError: except ItemNotFoundError:
pass pass
temploc = temploc._replace(name='overview') temploc = temploc.replace(name='overview')
try: try:
course.overview = get_modulestore(temploc).get_item(temploc).data course.overview = get_modulestore(temploc).get_item(temploc).data
except ItemNotFoundError: except ItemNotFoundError:
pass pass
temploc = temploc._replace(name='effort') temploc = temploc.replace(name='effort')
try: try:
course.effort = get_modulestore(temploc).get_item(temploc).data course.effort = get_modulestore(temploc).get_item(temploc).data
except ItemNotFoundError: except ItemNotFoundError:
pass pass
temploc = temploc._replace(name='video') temploc = temploc.replace(name='video')
try: try:
raw_video = get_modulestore(temploc).get_item(temploc).data raw_video = get_modulestore(temploc).get_item(temploc).data
course.intro_video = CourseDetails.parse_video_tag(raw_video) course.intro_video = CourseDetails.parse_video_tag(raw_video)
...@@ -126,16 +126,16 @@ class CourseDetails(object): ...@@ -126,16 +126,16 @@ class CourseDetails(object):
# NOTE: below auto writes to the db w/o verifying that any of the fields actually changed # NOTE: below auto writes to the db w/o verifying that any of the fields actually changed
# to make faster, could compare against db or could have client send over a list of which fields changed. # to make faster, could compare against db or could have client send over a list of which fields changed.
temploc = Location(course_location)._replace(category='about', name='syllabus') temploc = Location(course_location).replace(category='about', name='syllabus')
update_item(temploc, jsondict['syllabus']) update_item(temploc, jsondict['syllabus'])
temploc = temploc._replace(name='overview') temploc = temploc.replace(name='overview')
update_item(temploc, jsondict['overview']) update_item(temploc, jsondict['overview'])
temploc = temploc._replace(name='effort') temploc = temploc.replace(name='effort')
update_item(temploc, jsondict['effort']) update_item(temploc, jsondict['effort'])
temploc = temploc._replace(name='video') temploc = temploc.replace(name='video')
recomposed_video_tag = CourseDetails.recompose_video_tag(jsondict['intro_video']) recomposed_video_tag = CourseDetails.recompose_video_tag(jsondict['intro_video'])
update_item(temploc, recomposed_video_tag) update_item(temploc, recomposed_video_tag)
...@@ -174,10 +174,10 @@ class CourseDetails(object): ...@@ -174,10 +174,10 @@ class CourseDetails(object):
return result return result
# TODO move to a more general util? Is there a better way to do the isinstance model check? # TODO move to a more general util?
class CourseSettingsEncoder(json.JSONEncoder): class CourseSettingsEncoder(json.JSONEncoder):
def default(self, obj): def default(self, obj):
if isinstance(obj, CourseDetails) or isinstance(obj, course_grading.CourseGradingModel): if isinstance(obj, (CourseDetails, course_grading.CourseGradingModel)):
return obj.__dict__ return obj.__dict__
elif isinstance(obj, Location): elif isinstance(obj, Location):
return obj.dict() return obj.dict()
......
...@@ -235,8 +235,7 @@ PIPELINE_JS = { ...@@ -235,8 +235,7 @@ PIPELINE_JS = {
'source_filenames': sorted( 'source_filenames': sorted(
rooted_glob(COMMON_ROOT / 'static/', 'coffee/src/**/*.js') + rooted_glob(COMMON_ROOT / 'static/', 'coffee/src/**/*.js') +
rooted_glob(PROJECT_ROOT / 'static/', 'coffee/src/**/*.js') rooted_glob(PROJECT_ROOT / 'static/', 'coffee/src/**/*.js')
) + ['js/hesitate.js', 'js/base.js', ) + ['js/hesitate.js', 'js/base.js', 'js/views/feedback.js',
'js/models/feedback.js', 'js/views/feedback.js',
'js/models/section.js', 'js/views/section.js', 'js/models/section.js', 'js/views/section.js',
'js/models/metadata_model.js', 'js/views/metadata_editor_view.js', 'js/models/metadata_model.js', 'js/views/metadata_editor_view.js',
'js/views/assets.js'], 'js/views/assets.js'],
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
"js/vendor/jquery.cookie.js", "js/vendor/jquery.cookie.js",
"js/vendor/json2.js", "js/vendor/json2.js",
"js/vendor/underscore-min.js", "js/vendor/underscore-min.js",
"js/vendor/underscore.string.min.js",
"js/vendor/backbone-min.js", "js/vendor/backbone-min.js",
"js/vendor/jquery.leanModal.min.js", "js/vendor/jquery.leanModal.min.js",
"js/vendor/sinon-1.7.1.js", "js/vendor/sinon-1.7.1.js",
......
describe "CMS.Models.SystemFeedback", ->
beforeEach ->
@model = new CMS.Models.SystemFeedback()
it "should have an empty message by default", ->
expect(@model.get("message")).toEqual("")
it "should have an empty title by default", ->
expect(@model.get("title")).toEqual("")
it "should not have an intent set by default", ->
expect(@model.get("intent")).toBeNull()
describe "CMS.Models.WarningMessage", ->
beforeEach ->
@model = new CMS.Models.WarningMessage()
it "should have the correct intent", ->
expect(@model.get("intent")).toEqual("warning")
describe "CMS.Models.ErrorMessage", ->
beforeEach ->
@model = new CMS.Models.ErrorMessage()
it "should have the correct intent", ->
expect(@model.get("intent")).toEqual("error")
describe "CMS.Models.ConfirmationMessage", ->
beforeEach ->
@model = new CMS.Models.ConfirmationMessage()
it "should have the correct intent", ->
expect(@model.get("intent")).toEqual("confirmation")
...@@ -18,79 +18,105 @@ beforeEach -> ...@@ -18,79 +18,105 @@ beforeEach ->
else else
return trimmedText.indexOf(text) != -1; return trimmedText.indexOf(text) != -1;
describe "CMS.Views.Alert as base class", -> describe "CMS.Views.SystemFeedback", ->
beforeEach -> beforeEach ->
@model = new CMS.Models.ConfirmationMessage({ @options =
title: "Portal" title: "Portal"
message: "Welcome to the Aperture Science Computer-Aided Enrichment Center" message: "Welcome to the Aperture Science Computer-Aided Enrichment Center"
})
# it will be interesting to see when this.render is called, so lets spy on it # it will be interesting to see when this.render is called, so lets spy on it
spyOn(CMS.Views.Alert.prototype, 'render').andCallThrough() @renderSpy = spyOn(CMS.Views.Alert.Confirmation.prototype, 'render').andCallThrough()
@showSpy = spyOn(CMS.Views.Alert.Confirmation.prototype, 'show').andCallThrough()
it "renders on initalize", -> @hideSpy = spyOn(CMS.Views.Alert.Confirmation.prototype, 'hide').andCallThrough()
view = new CMS.Views.Alert({model: @model})
expect(view.render).toHaveBeenCalled() it "requires a type and an intent", ->
neither = =>
new CMS.Views.SystemFeedback(@options)
noType = =>
options = $.extend({}, @options)
options.intent = "confirmation"
new CMS.Views.SystemFeedback(options)
noIntent = =>
options = $.extend({}, @options)
options.type = "alert"
new CMS.Views.SystemFeedback(options)
both = =>
options = $.extend({}, @options)
options.type = "alert"
options.intent = "confirmation"
new CMS.Views.SystemFeedback(options)
expect(neither).toThrow()
expect(noType).toThrow()
expect(noIntent).toThrow()
expect(both).not.toThrow()
# for simplicity, we'll use CMS.Views.Alert.Confirmation from here on,
# which extends and proxies to CMS.Views.SystemFeedback
it "does not show on initalize", ->
view = new CMS.Views.Alert.Confirmation(@options)
expect(@renderSpy).not.toHaveBeenCalled()
expect(@showSpy).not.toHaveBeenCalled()
it "renders the template", -> it "renders the template", ->
view = new CMS.Views.Alert({model: @model}) view = new CMS.Views.Alert.Confirmation(@options)
view.show()
expect(view.$(".action-close")).toBeDefined() expect(view.$(".action-close")).toBeDefined()
expect(view.$('.wrapper')).toBeShown() expect(view.$('.wrapper')).toBeShown()
expect(view.$el).toContainText(@model.get("title")) expect(view.$el).toContainText(@options.title)
expect(view.$el).toContainText(@model.get("message")) expect(view.$el).toContainText(@options.message)
it "close button sends a .hide() message", -> it "close button sends a .hide() message", ->
spyOn(CMS.Views.Alert.prototype, 'hide').andCallThrough() view = new CMS.Views.Alert.Confirmation(@options).show()
view = new CMS.Views.Alert({model: @model})
view.$(".action-close").click() view.$(".action-close").click()
expect(CMS.Views.Alert.prototype.hide).toHaveBeenCalled() expect(@hideSpy).toHaveBeenCalled()
expect(view.$('.wrapper')).toBeHiding() expect(view.$('.wrapper')).toBeHiding()
describe "CMS.Views.Prompt", -> describe "CMS.Views.Prompt", ->
beforeEach ->
@model = new CMS.Models.ConfirmationMessage({
title: "Portal"
message: "Welcome to the Aperture Science Computer-Aided Enrichment Center"
})
# for some reason, expect($("body")) blows up the test runner, so this test # for some reason, expect($("body")) blows up the test runner, so this test
# just exercises the Prompt rather than asserting on anything. Best I can # just exercises the Prompt rather than asserting on anything. Best I can
# do for now. :( # do for now. :(
it "changes class on body", -> it "changes class on body", ->
# expect($("body")).not.toHaveClass("prompt-is-shown") # expect($("body")).not.toHaveClass("prompt-is-shown")
view = new CMS.Views.Prompt({model: @model}) view = new CMS.Views.Prompt.Confirmation({
title: "Portal"
message: "Welcome to the Aperture Science Computer-Aided Enrichment Center"
})
# expect($("body")).toHaveClass("prompt-is-shown") # expect($("body")).toHaveClass("prompt-is-shown")
view.hide() view.hide()
# expect($("body")).not.toHaveClass("prompt-is-shown") # expect($("body")).not.toHaveClass("prompt-is-shown")
describe "CMS.Views.Alert click events", -> describe "CMS.Views.SystemFeedback click events", ->
beforeEach -> beforeEach ->
@model = new CMS.Models.WarningMessage( @primaryClickSpy = jasmine.createSpy('primaryClick')
@secondaryClickSpy = jasmine.createSpy('secondaryClick')
@view = new CMS.Views.Notification.Warning(
title: "Unsaved", title: "Unsaved",
message: "Your content is currently Unsaved.", message: "Your content is currently Unsaved.",
actions: actions:
primary: primary:
text: "Save", text: "Save",
class: "save-button", class: "save-button",
click: jasmine.createSpy('primaryClick') click: @primaryClickSpy
secondary: [{ secondary: [{
text: "Revert", text: "Revert",
class: "cancel-button", class: "cancel-button",
click: jasmine.createSpy('secondaryClick') click: @secondaryClickSpy
}] }]
) )
@view.show()
@view = new CMS.Views.Alert({model: @model})
it "should trigger the primary event on a primary click", -> it "should trigger the primary event on a primary click", ->
@view.primaryClick() @view.$(".action-primary").click()
expect(@model.get('actions').primary.click).toHaveBeenCalled() expect(@primaryClickSpy).toHaveBeenCalled()
expect(@secondaryClickSpy).not.toHaveBeenCalled()
it "should trigger the secondary event on a secondary click", -> it "should trigger the secondary event on a secondary click", ->
@view.secondaryClick() @view.$(".action-secondary").click()
expect(@model.get('actions').secondary[0].click).toHaveBeenCalled() expect(@secondaryClickSpy).toHaveBeenCalled()
expect(@primaryClickSpy).not.toHaveBeenCalled()
it "should apply class to primary action", -> it "should apply class to primary action", ->
expect(@view.$(".action-primary")).toHaveClass("save-button") expect(@view.$(".action-primary")).toHaveClass("save-button")
...@@ -100,20 +126,18 @@ describe "CMS.Views.Alert click events", -> ...@@ -100,20 +126,18 @@ describe "CMS.Views.Alert click events", ->
describe "CMS.Views.Notification minShown and maxShown", -> describe "CMS.Views.Notification minShown and maxShown", ->
beforeEach -> beforeEach ->
@model = new CMS.Models.SystemFeedback( @showSpy = spyOn(CMS.Views.Notification.Saving.prototype, 'show')
intent: "saving" @showSpy.andCallThrough()
title: "Saving" @hideSpy = spyOn(CMS.Views.Notification.Saving.prototype, 'hide')
) @hideSpy.andCallThrough()
spyOn(CMS.Views.Notification.prototype, 'show').andCallThrough()
spyOn(CMS.Views.Notification.prototype, 'hide').andCallThrough()
@clock = sinon.useFakeTimers() @clock = sinon.useFakeTimers()
afterEach -> afterEach ->
@clock.restore() @clock.restore()
it "a minShown view should not hide too quickly", -> it "a minShown view should not hide too quickly", ->
view = new CMS.Views.Notification({model: @model, minShown: 1000}) view = new CMS.Views.Notification.Saving({minShown: 1000})
expect(CMS.Views.Notification.prototype.show).toHaveBeenCalled() view.show()
expect(view.$('.wrapper')).toBeShown() expect(view.$('.wrapper')).toBeShown()
# call hide() on it, but the minShown should prevent it from hiding right away # call hide() on it, but the minShown should prevent it from hiding right away
...@@ -125,8 +149,8 @@ describe "CMS.Views.Notification minShown and maxShown", -> ...@@ -125,8 +149,8 @@ describe "CMS.Views.Notification minShown and maxShown", ->
expect(view.$('.wrapper')).toBeHiding() expect(view.$('.wrapper')).toBeHiding()
it "a maxShown view should hide by itself", -> it "a maxShown view should hide by itself", ->
view = new CMS.Views.Notification({model: @model, maxShown: 1000}) view = new CMS.Views.Notification.Saving({maxShown: 1000})
expect(CMS.Views.Notification.prototype.show).toHaveBeenCalled() view.show()
expect(view.$('.wrapper')).toBeShown() expect(view.$('.wrapper')).toBeShown()
# wait for the maxShown timeout to expire, and check again # wait for the maxShown timeout to expire, and check again
...@@ -134,13 +158,13 @@ describe "CMS.Views.Notification minShown and maxShown", -> ...@@ -134,13 +158,13 @@ describe "CMS.Views.Notification minShown and maxShown", ->
expect(view.$('.wrapper')).toBeHiding() expect(view.$('.wrapper')).toBeHiding()
it "a minShown view can stay visible longer", -> it "a minShown view can stay visible longer", ->
view = new CMS.Views.Notification({model: @model, minShown: 1000}) view = new CMS.Views.Notification.Saving({minShown: 1000})
expect(CMS.Views.Notification.prototype.show).toHaveBeenCalled() view.show()
expect(view.$('.wrapper')).toBeShown() expect(view.$('.wrapper')).toBeShown()
# wait for the minShown timeout to expire, and check again # wait for the minShown timeout to expire, and check again
@clock.tick(1001) @clock.tick(1001)
expect(CMS.Views.Notification.prototype.hide).not.toHaveBeenCalled() expect(@hideSpy).not.toHaveBeenCalled()
expect(view.$('.wrapper')).toBeShown() expect(view.$('.wrapper')).toBeShown()
# can now hide immediately # can now hide immediately
...@@ -148,8 +172,8 @@ describe "CMS.Views.Notification minShown and maxShown", -> ...@@ -148,8 +172,8 @@ describe "CMS.Views.Notification minShown and maxShown", ->
expect(view.$('.wrapper')).toBeHiding() expect(view.$('.wrapper')).toBeHiding()
it "a maxShown view can hide early", -> it "a maxShown view can hide early", ->
view = new CMS.Views.Notification({model: @model, maxShown: 1000}) view = new CMS.Views.Notification.Saving({maxShown: 1000})
expect(CMS.Views.Notification.prototype.show).toHaveBeenCalled() view.show()
expect(view.$('.wrapper')).toBeShown() expect(view.$('.wrapper')).toBeShown()
# wait 50 milliseconds, and hide it early # wait 50 milliseconds, and hide it early
...@@ -162,7 +186,8 @@ describe "CMS.Views.Notification minShown and maxShown", -> ...@@ -162,7 +186,8 @@ describe "CMS.Views.Notification minShown and maxShown", ->
expect(view.$('.wrapper')).toBeHiding() expect(view.$('.wrapper')).toBeHiding()
it "a view can have both maxShown and minShown", -> it "a view can have both maxShown and minShown", ->
view = new CMS.Views.Notification({model: @model, minShown: 1000, maxShown: 2000}) view = new CMS.Views.Notification.Saving({minShown: 1000, maxShown: 2000})
view.show()
# can't hide early # can't hide early
@clock.tick(50) @clock.tick(50)
......
...@@ -18,11 +18,15 @@ $ -> ...@@ -18,11 +18,15 @@ $ ->
$(document).ajaxError (event, jqXHR, ajaxSettings, thrownError) -> $(document).ajaxError (event, jqXHR, ajaxSettings, thrownError) ->
if ajaxSettings.notifyOnError is false if ajaxSettings.notifyOnError is false
return return
msg = new CMS.Models.ErrorMessage( if jqXHR.responseText
message = _.str.truncate(jqXHR.responseText, 300)
else
message = gettext("This may be happening because of an error with our server or your internet connection. Try refreshing the page or making sure you are online.")
msg = new CMS.Views.Notification.Error(
"title": gettext("Studio's having trouble saving your work") "title": gettext("Studio's having trouble saving your work")
"message": jqXHR.responseText || gettext("This may be happening because of an error with our server or your internet connection. Try refreshing the page or making sure you are online.") "message": message
) )
new CMS.Views.Notification({model: msg}) msg.show()
window.onTouchBasedDevice = -> window.onTouchBasedDevice = ->
navigator.userAgent.match /iPhone|iPod|iPad/i navigator.userAgent.match /iPhone|iPod|iPad/i
......
CMS.Models.SystemFeedback = Backbone.Model.extend({
defaults: {
"intent": null, // "warning", "confirmation", "error", "announcement", "step-required", etc
"title": "",
"message": ""
/* could also have an "actions" hash: here is an example demonstrating
the expected structure
"actions": {
"primary": {
"text": "Save",
"class": "action-save",
"click": function() {
// do something when Save is clicked
// `this` refers to the model
}
},
"secondary": [
{
"text": "Cancel",
"class": "action-cancel",
"click": function() {}
}, {
"text": "Discard Changes",
"class": "action-discard",
"click": function() {}
}
]
}
*/
}
});
CMS.Models.WarningMessage = CMS.Models.SystemFeedback.extend({
defaults: $.extend({}, CMS.Models.SystemFeedback.prototype.defaults, {
"intent": "warning"
})
});
CMS.Models.ErrorMessage = CMS.Models.SystemFeedback.extend({
defaults: $.extend({}, CMS.Models.SystemFeedback.prototype.defaults, {
"intent": "error"
})
});
CMS.Models.ConfirmAssetDeleteMessage = CMS.Models.SystemFeedback.extend({
defaults: $.extend({}, CMS.Models.SystemFeedback.prototype.defaults, {
"intent": "warning"
})
});
CMS.Models.ConfirmationMessage = CMS.Models.SystemFeedback.extend({
defaults: $.extend({}, CMS.Models.SystemFeedback.prototype.defaults, {
"intent": "confirmation"
})
});
...@@ -22,22 +22,16 @@ CMS.Models.Section = Backbone.Model.extend({ ...@@ -22,22 +22,16 @@ CMS.Models.Section = Backbone.Model.extend({
}, },
showNotification: function() { showNotification: function() {
if(!this.msg) { if(!this.msg) {
this.msg = new CMS.Models.SystemFeedback({ this.msg = new CMS.Views.Notification.Saving({
intent: "saving", title: gettext("Saving…"),
title: gettext("Saving…")
});
}
if(!this.msgView) {
this.msgView = new CMS.Views.Notification({
model: this.msg,
closeIcon: false, closeIcon: false,
minShown: 1250 minShown: 1250
}); });
} }
this.msgView.show(); this.msg.show();
}, },
hideNotification: function() { hideNotification: function() {
if(!this.msgView) { return; } if(!this.msg) { return; }
this.msgView.hide(); this.msg.hide();
} }
}); });
CMS.Views.Alert = Backbone.View.extend({ CMS.Views.SystemFeedback = Backbone.View.extend({
options: { options: {
type: "alert", title: "",
message: "",
intent: null, // "warning", "confirmation", "error", "announcement", "step-required", etc
type: null, // "alert", "notification", or "prompt": set by subclass
shown: true, // is this view currently being shown? shown: true, // is this view currently being shown?
icon: true, // should we render an icon related to the message intent? icon: true, // should we render an icon related to the message intent?
closeIcon: true, // should we render a close button in the top right corner? closeIcon: true, // should we render a close button in the top right corner?
minShown: 0, // length of time after this view has been shown before it can be hidden (milliseconds) minShown: 0, // length of time after this view has been shown before it can be hidden (milliseconds)
maxShown: Infinity // length of time after this view has been shown before it will be automatically hidden (milliseconds) maxShown: Infinity // length of time after this view has been shown before it will be automatically hidden (milliseconds)
/* could also have an "actions" hash: here is an example demonstrating
the expected structure
actions: {
primary: {
"text": "Save",
"class": "action-save",
"click": function(view) {
// do something when Save is clicked
}
},
secondary: [
{
"text": "Cancel",
"class": "action-cancel",
"click": function(view) {}
}, {
"text": "Discard Changes",
"class": "action-discard",
"click": function(view) {}
}
]
}
*/
}, },
initialize: function() { initialize: function() {
if(!this.options.type) {
throw "SystemFeedback: type required (given " +
JSON.stringify(this.options) + ")";
}
if(!this.options.intent) {
throw "SystemFeedback: intent required (given " +
JSON.stringify(this.options) + ")";
}
var tpl = $("#system-feedback-tpl").text(); var tpl = $("#system-feedback-tpl").text();
if(!tpl) { if(!tpl) {
console.error("Couldn't load system-feedback template"); console.error("Couldn't load system-feedback template");
} }
this.template = _.template(tpl); this.template = _.template(tpl);
this.setElement($("#page-"+this.options.type)); this.setElement($("#page-"+this.options.type));
this.listenTo(this.model, 'change', this.render);
return this.show();
},
render: function() {
var attrs = $.extend({}, this.options, this.model.attributes);
this.$el.html(this.template(attrs));
return this; return this;
}, },
events: { // public API: show() and hide()
"click .action-close": "hide",
"click .action-primary": "primaryClick",
"click .action-secondary": "secondaryClick"
},
show: function() { show: function() {
clearTimeout(this.hideTimeout); clearTimeout(this.hideTimeout);
this.options.shown = true; this.options.shown = true;
this.shownAt = new Date(); this.shownAt = new Date();
this.render(); this.render();
if($.isNumeric(this.options.maxShown)) { if($.isNumeric(this.options.maxShown)) {
this.hideTimeout = setTimeout($.proxy(this.hide, this), this.hideTimeout = setTimeout(_.bind(this.hide, this),
this.options.maxShown); this.options.maxShown);
} }
return this; return this;
...@@ -43,7 +68,7 @@ CMS.Views.Alert = Backbone.View.extend({ ...@@ -43,7 +68,7 @@ CMS.Views.Alert = Backbone.View.extend({
this.options.minShown > new Date() - this.shownAt) this.options.minShown > new Date() - this.shownAt)
{ {
clearTimeout(this.hideTimeout); clearTimeout(this.hideTimeout);
this.hideTimeout = setTimeout($.proxy(this.hide, this), this.hideTimeout = setTimeout(_.bind(this.hide, this),
this.options.minShown - (new Date() - this.shownAt)); this.options.minShown - (new Date() - this.shownAt));
} else { } else {
this.options.shown = false; this.options.shown = false;
...@@ -52,40 +77,63 @@ CMS.Views.Alert = Backbone.View.extend({ ...@@ -52,40 +77,63 @@ CMS.Views.Alert = Backbone.View.extend({
} }
return this; return this;
}, },
primaryClick: function() { // the rest of the API should be considered semi-private
var actions = this.model.get("actions"); events: {
"click .action-close": "hide",
"click .action-primary": "primaryClick",
"click .action-secondary": "secondaryClick"
},
render: function() {
// there can be only one active view of a given type at a time: only
// one alert, only one notification, only one prompt. Therefore, we'll
// use a singleton approach.
var parent = CMS.Views[_.str.capitalize(this.options.type)];
if(parent && parent.active && parent.active !== this) {
parent.active.stopListening();
}
this.$el.html(this.template(this.options));
parent.active = this;
return this;
},
primaryClick: function(event) {
var actions = this.options.actions;
if(!actions) { return; } if(!actions) { return; }
var primary = actions.primary; var primary = actions.primary;
if(!primary) { return; } if(!primary) { return; }
if(primary.click) { if(primary.click) {
primary.click.call(this.model, this); primary.click.call(event.target, this, event);
} }
}, },
secondaryClick: function(e) { secondaryClick: function(event) {
var actions = this.model.get("actions"); var actions = this.options.actions;
if(!actions) { return; } if(!actions) { return; }
var secondaryList = actions.secondary; var secondaryList = actions.secondary;
if(!secondaryList) { return; } if(!secondaryList) { return; }
// which secondary action was clicked? // which secondary action was clicked?
var i = 0; // default to the first secondary action (easier for testing) var i = 0; // default to the first secondary action (easier for testing)
if(e && e.target) { if(event && event.target) {
i = _.indexOf(this.$(".action-secondary"), e.target); i = _.indexOf(this.$(".action-secondary"), event.target);
} }
var secondary = this.model.get("actions").secondary[i]; var secondary = secondaryList[i];
if(secondary.click) { if(secondary.click) {
secondary.click.call(this.model, this); secondary.click.call(event.target, this, event);
} }
} }
}); });
CMS.Views.Notification = CMS.Views.Alert.extend({ CMS.Views.Alert = CMS.Views.SystemFeedback.extend({
options: $.extend({}, CMS.Views.Alert.prototype.options, { options: $.extend({}, CMS.Views.SystemFeedback.prototype.options, {
type: "alert"
})
});
CMS.Views.Notification = CMS.Views.SystemFeedback.extend({
options: $.extend({}, CMS.Views.SystemFeedback.prototype.options, {
type: "notification", type: "notification",
closeIcon: false closeIcon: false
}) })
}); });
CMS.Views.Prompt = CMS.Views.Alert.extend({ CMS.Views.Prompt = CMS.Views.SystemFeedback.extend({
options: $.extend({}, CMS.Views.Alert.prototype.options, { options: $.extend({}, CMS.Views.SystemFeedback.prototype.options, {
type: "prompt", type: "prompt",
closeIcon: false, closeIcon: false,
icon: false icon: false
...@@ -98,6 +146,27 @@ CMS.Views.Prompt = CMS.Views.Alert.extend({ ...@@ -98,6 +146,27 @@ CMS.Views.Prompt = CMS.Views.Alert.extend({
$body.removeClass('prompt-is-shown'); $body.removeClass('prompt-is-shown');
} }
// super() in Javascript has awkward syntax :( // super() in Javascript has awkward syntax :(
return CMS.Views.Alert.prototype.render.apply(this, arguments); return CMS.Views.SystemFeedback.prototype.render.apply(this, arguments);
} }
}); });
// create CMS.Views.Alert.Warning, CMS.Views.Notification.Confirmation,
// CMS.Views.Prompt.StepRequired, etc
var capitalCamel, types, intents;
capitalCamel = _.compose(_.str.capitalize, _.str.camelize);
types = ["alert", "notification", "prompt"];
intents = ["warning", "error", "confirmation", "announcement", "step-required", "help", "saving"];
_.each(types, function(type) {
_.each(intents, function(intent) {
// "class" is a reserved word in Javascript, so use "klass" instead
var klass, subklass;
klass = CMS.Views[capitalCamel(type)];
subklass = klass.extend({
options: $.extend({}, klass.prototype.options, {
type: type,
intent: intent
})
});
klass[capitalCamel(intent)] = subklass;
});
});
...@@ -67,7 +67,7 @@ CMS.Views.SectionEdit = Backbone.View.extend({ ...@@ -67,7 +67,7 @@ CMS.Views.SectionEdit = Backbone.View.extend({
showInvalidMessage: function(model, error, options) { showInvalidMessage: function(model, error, options) {
model.set("name", model.previous("name")); model.set("name", model.previous("name"));
var that = this; var that = this;
var msg = new CMS.Models.ErrorMessage({ var prompt = new CMS.Views.Prompt.Error({
title: gettext("Your change could not be saved"), title: gettext("Your change could not be saved"),
message: error, message: error,
actions: { actions: {
...@@ -80,6 +80,6 @@ CMS.Views.SectionEdit = Backbone.View.extend({ ...@@ -80,6 +80,6 @@ CMS.Views.SectionEdit = Backbone.View.extend({
} }
} }
}); });
new CMS.Views.Prompt({model: msg}); prompt.show();
} }
}); });
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
<%block name="jsextra"> <%block name="jsextra">
<script src="${static.url('js/vendor/mustache.js')}"></script> <script src="${static.url('js/vendor/mustache.js')}"></script>
<script type="text/javascript" src="${static.url('js/views/assets.js')}"></script>
<script type='text/javascript'> <script type='text/javascript'>
// we just want a singleton // we just want a singleton
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
<script type="text/javascript" src="/jsi18n/"></script> <script type="text/javascript" src="/jsi18n/"></script>
<script type="text/javascript" src="${static.url('js/vendor/json2.js')}"></script> <script type="text/javascript" src="${static.url('js/vendor/json2.js')}"></script>
<script type="text/javascript" src="${static.url('js/vendor/underscore-min.js')}"></script> <script type="text/javascript" src="${static.url('js/vendor/underscore-min.js')}"></script>
<script type="text/javascript" src="${static.url('js/vendor/underscore.string.min.js')}"></script>
<script type="text/javascript" src="${static.url('js/vendor/backbone-min.js')}"></script> <script type="text/javascript" src="${static.url('js/vendor/backbone-min.js')}"></script>
<script type="text/javascript" src="${static.url('js/vendor/markitup/jquery.markitup.js')}"></script> <script type="text/javascript" src="${static.url('js/vendor/markitup/jquery.markitup.js')}"></script>
<script type="text/javascript" src="${static.url('js/vendor/markitup/sets/wiki/set.js')}"></script> <script type="text/javascript" src="${static.url('js/vendor/markitup/sets/wiki/set.js')}"></script>
...@@ -54,7 +55,6 @@ ...@@ -54,7 +55,6 @@
<script type="text/javascript" src="${static.url('js/vendor/CodeMirror/css.js')}"></script> <script type="text/javascript" src="${static.url('js/vendor/CodeMirror/css.js')}"></script>
<script type="text/javascript" src="//www.youtube.com/player_api"></script> <script type="text/javascript" src="//www.youtube.com/player_api"></script>
<script src="${static.url('js/models/feedback.js')}"></script>
<script src="${static.url('js/views/feedback.js')}"></script> <script src="${static.url('js/views/feedback.js')}"></script>
<!-- view --> <!-- view -->
......
#pylint: disable=C0111 # pylint: disable=C0111
#pylint: disable=W0621 # pylint: disable=W0621
from lettuce import world, step from lettuce import world, step
from .factories import * from .factories import *
......
import json import json
import logging import logging
import os
import pytz import pytz
import datetime import datetime
import dateutil.parser import dateutil.parser
from django.contrib.auth.decorators import login_required from django.contrib.auth.decorators import login_required
from django.http import HttpResponse from django.http import HttpResponse
from django.http import Http404
from django.shortcuts import redirect from django.shortcuts import redirect
from django.conf import settings from django.conf import settings
from mitxmako.shortcuts import render_to_response from mitxmako.shortcuts import render_to_response
...@@ -22,6 +20,7 @@ LOGFIELDS = ['username', 'ip', 'event_source', 'event_type', 'event', 'agent', ' ...@@ -22,6 +20,7 @@ LOGFIELDS = ['username', 'ip', 'event_source', 'event_type', 'event', 'agent', '
def log_event(event): def log_event(event):
"""Write tracking event to log file, and optionally to TrackingLog model."""
event_str = json.dumps(event) event_str = json.dumps(event)
log.info(event_str[:settings.TRACK_MAX_EVENT]) log.info(event_str[:settings.TRACK_MAX_EVENT])
if settings.MITX_FEATURES.get('ENABLE_SQL_TRACKING_LOGS'): if settings.MITX_FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
...@@ -34,6 +33,11 @@ def log_event(event): ...@@ -34,6 +33,11 @@ def log_event(event):
def user_track(request): def user_track(request):
"""
Log when GET call to "event" URL is made by a user.
GET call should provide "event_type", "event", and "page" arguments.
"""
try: # TODO: Do the same for many of the optional META parameters try: # TODO: Do the same for many of the optional META parameters
username = request.user.username username = request.user.username
except: except:
...@@ -50,7 +54,6 @@ def user_track(request): ...@@ -50,7 +54,6 @@ def user_track(request):
except: except:
agent = '' agent = ''
# TODO: Move a bunch of this into log_event
event = { event = {
"username": username, "username": username,
"session": scookie, "session": scookie,
...@@ -68,6 +71,7 @@ def user_track(request): ...@@ -68,6 +71,7 @@ def user_track(request):
def server_track(request, event_type, event, page=None): def server_track(request, event_type, event, page=None):
"""Log events related to server requests."""
try: try:
username = request.user.username username = request.user.username
except: except:
...@@ -95,9 +99,52 @@ def server_track(request, event_type, event, page=None): ...@@ -95,9 +99,52 @@ def server_track(request, event_type, event, page=None):
log_event(event) log_event(event)
def task_track(request_info, task_info, event_type, event, page=None):
"""
Logs tracking information for events occuring within celery tasks.
The `event_type` is a string naming the particular event being logged,
while `event` is a dict containing whatever additional contextual information
is desired.
The `request_info` is a dict containing information about the original
task request. Relevant keys are `username`, `ip`, `agent`, and `host`.
While the dict is required, the values in it are not, so that {} can be
passed in.
In addition, a `task_info` dict provides more information about the current
task, to be stored with the `event` dict. This may also be an empty dict.
The `page` parameter is optional, and allows the name of the page to
be provided.
"""
# supplement event information with additional information
# about the task in which it is running.
full_event = dict(event, **task_info)
# All fields must be specified, in case the tracking information is
# also saved to the TrackingLog model. Get values from the task-level
# information, or just add placeholder values.
event = {
"username": request_info.get('username', 'unknown'),
"ip": request_info.get('ip', 'unknown'),
"event_source": "task",
"event_type": event_type,
"event": full_event,
"agent": request_info.get('agent', 'unknown'),
"page": page,
"time": datetime.datetime.utcnow().isoformat(),
"host": request_info.get('host', 'unknown')
}
log_event(event)
@login_required @login_required
@ensure_csrf_cookie @ensure_csrf_cookie
def view_tracking_log(request, args=''): def view_tracking_log(request, args=''):
"""View to output contents of TrackingLog model. For staff use only."""
if not request.user.is_staff: if not request.user.is_staff:
return redirect('/') return redirect('/')
nlen = 100 nlen = 100
......
import re
import json import json
import logging import logging
import static_replace import static_replace
......
...@@ -15,25 +15,22 @@ This is used by capa_module. ...@@ -15,25 +15,22 @@ This is used by capa_module.
from datetime import datetime from datetime import datetime
import logging import logging
import math
import numpy
import os.path import os.path
import re import re
import sys
from lxml import etree from lxml import etree
from xml.sax.saxutils import unescape from xml.sax.saxutils import unescape
from copy import deepcopy from copy import deepcopy
from .correctmap import CorrectMap from capa.correctmap import CorrectMap
import inputtypes import capa.inputtypes as inputtypes
import customrender import capa.customrender as customrender
from .util import contextualize_text, convert_files_to_filenames from capa.util import contextualize_text, convert_files_to_filenames
import xqueue_interface import capa.xqueue_interface as xqueue_interface
# to be replaced with auto-registering # to be replaced with auto-registering
import responsetypes import capa.responsetypes as responsetypes
import safe_exec from capa.safe_exec import safe_exec
# dict of tagname, Response Class -- this should come from auto-registering # dict of tagname, Response Class -- this should come from auto-registering
response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__]) response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__])
...@@ -46,8 +43,8 @@ response_properties = ["codeparam", "responseparam", "answer", "openendedparam"] ...@@ -46,8 +43,8 @@ response_properties = ["codeparam", "responseparam", "answer", "openendedparam"]
# special problem tags which should be turned into innocuous HTML # special problem tags which should be turned into innocuous HTML
html_transforms = {'problem': {'tag': 'div'}, html_transforms = {'problem': {'tag': 'div'},
"text": {'tag': 'span'}, 'text': {'tag': 'span'},
"math": {'tag': 'span'}, 'math': {'tag': 'span'},
} }
# These should be removed from HTML output, including all subelements # These should be removed from HTML output, including all subelements
...@@ -134,7 +131,6 @@ class LoncapaProblem(object): ...@@ -134,7 +131,6 @@ class LoncapaProblem(object):
self.extracted_tree = self._extract_html(self.tree) self.extracted_tree = self._extract_html(self.tree)
def do_reset(self): def do_reset(self):
''' '''
Reset internal state to unfinished, with no answers Reset internal state to unfinished, with no answers
...@@ -175,7 +171,7 @@ class LoncapaProblem(object): ...@@ -175,7 +171,7 @@ class LoncapaProblem(object):
Return the maximum score for this problem. Return the maximum score for this problem.
''' '''
maxscore = 0 maxscore = 0
for response, responder in self.responders.iteritems(): for responder in self.responders.values():
maxscore += responder.get_max_score() maxscore += responder.get_max_score()
return maxscore return maxscore
...@@ -230,7 +226,6 @@ class LoncapaProblem(object): ...@@ -230,7 +226,6 @@ class LoncapaProblem(object):
if hasattr(the_input, 'ungraded_response'): if hasattr(the_input, 'ungraded_response'):
the_input.ungraded_response(xqueue_msg, queuekey) the_input.ungraded_response(xqueue_msg, queuekey)
def is_queued(self): def is_queued(self):
''' '''
Returns True if any part of the problem has been submitted to an external queue Returns True if any part of the problem has been submitted to an external queue
...@@ -238,7 +233,6 @@ class LoncapaProblem(object): ...@@ -238,7 +233,6 @@ class LoncapaProblem(object):
''' '''
return any(self.correct_map.is_queued(answer_id) for answer_id in self.correct_map) return any(self.correct_map.is_queued(answer_id) for answer_id in self.correct_map)
def get_recentmost_queuetime(self): def get_recentmost_queuetime(self):
''' '''
Returns a DateTime object that represents the timestamp of the most recent Returns a DateTime object that represents the timestamp of the most recent
...@@ -256,11 +250,11 @@ class LoncapaProblem(object): ...@@ -256,11 +250,11 @@ class LoncapaProblem(object):
return max(queuetimes) return max(queuetimes)
def grade_answers(self, answers): def grade_answers(self, answers):
''' '''
Grade student responses. Called by capa_module.check_problem. Grade student responses. Called by capa_module.check_problem.
answers is a dict of all the entries from request.POST, but with the first part
`answers` is a dict of all the entries from request.POST, but with the first part
of each key removed (the string before the first "_"). of each key removed (the string before the first "_").
Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123 Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123
...@@ -270,24 +264,72 @@ class LoncapaProblem(object): ...@@ -270,24 +264,72 @@ class LoncapaProblem(object):
# if answers include File objects, convert them to filenames. # if answers include File objects, convert them to filenames.
self.student_answers = convert_files_to_filenames(answers) self.student_answers = convert_files_to_filenames(answers)
return self._grade_answers(answers)
def supports_rescoring(self):
"""
Checks that the current problem definition permits rescoring.
More precisely, it checks that there are no response types in
the current problem that are not fully supported (yet) for rescoring.
This includes responsetypes for which the student's answer
is not properly stored in state, i.e. file submissions. At present,
we have no way to know if an existing response was actually a real
answer or merely the filename of a file submitted as an answer.
It turns out that because rescoring is a background task, limiting
it to responsetypes that don't support file submissions also means
that the responsetypes are synchronous. This is convenient as it
permits rescoring to be complete when the rescoring call returns.
"""
return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values())
def rescore_existing_answers(self):
"""
Rescore student responses. Called by capa_module.rescore_problem.
"""
return self._grade_answers(None)
def _grade_answers(self, student_answers):
"""
Internal grading call used for checking new 'student_answers' and also
rescoring existing student_answers.
For new student_answers being graded, `student_answers` is a dict of all the
entries from request.POST, but with the first part of each key removed
(the string before the first "_"). Thus, for example,
input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123.
For rescoring, `student_answers` is None.
Calls the Response for each question in this problem, to do the actual grading.
"""
# old CorrectMap # old CorrectMap
oldcmap = self.correct_map oldcmap = self.correct_map
# start new with empty CorrectMap # start new with empty CorrectMap
newcmap = CorrectMap() newcmap = CorrectMap()
# log.debug('Responders: %s' % self.responders)
# Call each responsetype instance to do actual grading # Call each responsetype instance to do actual grading
for responder in self.responders.values(): for responder in self.responders.values():
# File objects are passed only if responsetype explicitly allows for file # File objects are passed only if responsetype explicitly allows
# submissions # for file submissions. But we have no way of knowing if
if 'filesubmission' in responder.allowed_inputfields: # student_answers contains a proper answer or the filename of
results = responder.evaluate_answers(answers, oldcmap) # an earlier submission, so for now skip these entirely.
# TODO: figure out where to get file submissions when rescoring.
if 'filesubmission' in responder.allowed_inputfields and student_answers is None:
raise Exception("Cannot rescore problems with possible file submissions")
# use 'student_answers' only if it is provided, and if it might contain a file
# submission that would not exist in the persisted "student_answers".
if 'filesubmission' in responder.allowed_inputfields and student_answers is not None:
results = responder.evaluate_answers(student_answers, oldcmap)
else: else:
results = responder.evaluate_answers(convert_files_to_filenames(answers), oldcmap) results = responder.evaluate_answers(self.student_answers, oldcmap)
newcmap.update(results) newcmap.update(results)
self.correct_map = newcmap self.correct_map = newcmap
# log.debug('%s: in grade_answers, answers=%s, cmap=%s' % (self,answers,newcmap))
return newcmap return newcmap
def get_question_answers(self): def get_question_answers(self):
...@@ -331,7 +373,6 @@ class LoncapaProblem(object): ...@@ -331,7 +373,6 @@ class LoncapaProblem(object):
html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context) html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context)
return html return html
def handle_input_ajax(self, get): def handle_input_ajax(self, get):
''' '''
InputTypes can support specialized AJAX calls. Find the correct input and pass along the correct data InputTypes can support specialized AJAX calls. Find the correct input and pass along the correct data
...@@ -348,8 +389,6 @@ class LoncapaProblem(object): ...@@ -348,8 +389,6 @@ class LoncapaProblem(object):
log.warning("Could not find matching input for id: %s" % input_id) log.warning("Could not find matching input for id: %s" % input_id)
return {} return {}
# ======= Private Methods Below ======== # ======= Private Methods Below ========
def _process_includes(self): def _process_includes(self):
...@@ -359,16 +398,16 @@ class LoncapaProblem(object): ...@@ -359,16 +398,16 @@ class LoncapaProblem(object):
''' '''
includes = self.tree.findall('.//include') includes = self.tree.findall('.//include')
for inc in includes: for inc in includes:
file = inc.get('file') filename = inc.get('file')
if file is not None: if filename is not None:
try: try:
# open using ModuleSystem OSFS filestore # open using ModuleSystem OSFS filestore
ifp = self.system.filestore.open(file) ifp = self.system.filestore.open(filename)
except Exception as err: except Exception as err:
log.warning('Error %s in problem xml include: %s' % ( log.warning('Error %s in problem xml include: %s' % (
err, etree.tostring(inc, pretty_print=True))) err, etree.tostring(inc, pretty_print=True)))
log.warning('Cannot find file %s in %s' % ( log.warning('Cannot find file %s in %s' % (
file, self.system.filestore)) filename, self.system.filestore))
# if debugging, don't fail - just log error # if debugging, don't fail - just log error
# TODO (vshnayder): need real error handling, display to users # TODO (vshnayder): need real error handling, display to users
if not self.system.get('DEBUG'): if not self.system.get('DEBUG'):
...@@ -381,7 +420,7 @@ class LoncapaProblem(object): ...@@ -381,7 +420,7 @@ class LoncapaProblem(object):
except Exception as err: except Exception as err:
log.warning('Error %s in problem xml include: %s' % ( log.warning('Error %s in problem xml include: %s' % (
err, etree.tostring(inc, pretty_print=True))) err, etree.tostring(inc, pretty_print=True)))
log.warning('Cannot parse XML in %s' % (file)) log.warning('Cannot parse XML in %s' % (filename))
# if debugging, don't fail - just log error # if debugging, don't fail - just log error
# TODO (vshnayder): same as above # TODO (vshnayder): same as above
if not self.system.get('DEBUG'): if not self.system.get('DEBUG'):
...@@ -389,11 +428,11 @@ class LoncapaProblem(object): ...@@ -389,11 +428,11 @@ class LoncapaProblem(object):
else: else:
continue continue
# insert new XML into tree in place of inlcude # insert new XML into tree in place of include
parent = inc.getparent() parent = inc.getparent()
parent.insert(parent.index(inc), incxml) parent.insert(parent.index(inc), incxml)
parent.remove(inc) parent.remove(inc)
log.debug('Included %s into %s' % (file, self.problem_id)) log.debug('Included %s into %s' % (filename, self.problem_id))
def _extract_system_path(self, script): def _extract_system_path(self, script):
""" """
...@@ -463,7 +502,7 @@ class LoncapaProblem(object): ...@@ -463,7 +502,7 @@ class LoncapaProblem(object):
if all_code: if all_code:
try: try:
safe_exec.safe_exec( safe_exec(
all_code, all_code,
context, context,
random_seed=self.seed, random_seed=self.seed,
......
...@@ -4,7 +4,6 @@ Tests of responsetypes ...@@ -4,7 +4,6 @@ Tests of responsetypes
from datetime import datetime from datetime import datetime
import json import json
from nose.plugins.skip import SkipTest
import os import os
import random import random
import unittest import unittest
...@@ -56,9 +55,18 @@ class ResponseTest(unittest.TestCase): ...@@ -56,9 +55,18 @@ class ResponseTest(unittest.TestCase):
self.assertEqual(result, 'incorrect', self.assertEqual(result, 'incorrect',
msg="%s should be marked incorrect" % str(input_str)) msg="%s should be marked incorrect" % str(input_str))
def _get_random_number_code(self):
"""Returns code to be used to generate a random result."""
return "str(random.randint(0, 1e9))"
def _get_random_number_result(self, seed_value):
"""Returns a result that should be generated using the random_number_code."""
rand = random.Random(seed_value)
return str(rand.randint(0, 1e9))
class MultiChoiceResponseTest(ResponseTest): class MultiChoiceResponseTest(ResponseTest):
from response_xml_factory import MultipleChoiceResponseXMLFactory from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
xml_factory_class = MultipleChoiceResponseXMLFactory xml_factory_class = MultipleChoiceResponseXMLFactory
def test_multiple_choice_grade(self): def test_multiple_choice_grade(self):
...@@ -80,7 +88,7 @@ class MultiChoiceResponseTest(ResponseTest): ...@@ -80,7 +88,7 @@ class MultiChoiceResponseTest(ResponseTest):
class TrueFalseResponseTest(ResponseTest): class TrueFalseResponseTest(ResponseTest):
from response_xml_factory import TrueFalseResponseXMLFactory from capa.tests.response_xml_factory import TrueFalseResponseXMLFactory
xml_factory_class = TrueFalseResponseXMLFactory xml_factory_class = TrueFalseResponseXMLFactory
def test_true_false_grade(self): def test_true_false_grade(self):
...@@ -120,7 +128,7 @@ class TrueFalseResponseTest(ResponseTest): ...@@ -120,7 +128,7 @@ class TrueFalseResponseTest(ResponseTest):
class ImageResponseTest(ResponseTest): class ImageResponseTest(ResponseTest):
from response_xml_factory import ImageResponseXMLFactory from capa.tests.response_xml_factory import ImageResponseXMLFactory
xml_factory_class = ImageResponseXMLFactory xml_factory_class = ImageResponseXMLFactory
def test_rectangle_grade(self): def test_rectangle_grade(self):
...@@ -184,7 +192,7 @@ class ImageResponseTest(ResponseTest): ...@@ -184,7 +192,7 @@ class ImageResponseTest(ResponseTest):
class SymbolicResponseTest(ResponseTest): class SymbolicResponseTest(ResponseTest):
from response_xml_factory import SymbolicResponseXMLFactory from capa.tests.response_xml_factory import SymbolicResponseXMLFactory
xml_factory_class = SymbolicResponseXMLFactory xml_factory_class = SymbolicResponseXMLFactory
def test_grade_single_input(self): def test_grade_single_input(self):
...@@ -312,7 +320,7 @@ class SymbolicResponseTest(ResponseTest): ...@@ -312,7 +320,7 @@ class SymbolicResponseTest(ResponseTest):
# Should not allow multiple inputs, since we specify # Should not allow multiple inputs, since we specify
# only one "expect" value # only one "expect" value
with self.assertRaises(Exception): with self.assertRaises(Exception):
problem = self.build_problem(math_display=True, self.build_problem(math_display=True,
expect="2*x+3*y", expect="2*x+3*y",
num_inputs=3) num_inputs=3)
...@@ -330,7 +338,7 @@ class SymbolicResponseTest(ResponseTest): ...@@ -330,7 +338,7 @@ class SymbolicResponseTest(ResponseTest):
class OptionResponseTest(ResponseTest): class OptionResponseTest(ResponseTest):
from response_xml_factory import OptionResponseXMLFactory from capa.tests.response_xml_factory import OptionResponseXMLFactory
xml_factory_class = OptionResponseXMLFactory xml_factory_class = OptionResponseXMLFactory
def test_grade(self): def test_grade(self):
...@@ -350,7 +358,7 @@ class FormulaResponseTest(ResponseTest): ...@@ -350,7 +358,7 @@ class FormulaResponseTest(ResponseTest):
""" """
Test the FormulaResponse class Test the FormulaResponse class
""" """
from response_xml_factory import FormulaResponseXMLFactory from capa.tests.response_xml_factory import FormulaResponseXMLFactory
xml_factory_class = FormulaResponseXMLFactory xml_factory_class = FormulaResponseXMLFactory
def test_grade(self): def test_grade(self):
...@@ -570,7 +578,7 @@ class FormulaResponseTest(ResponseTest): ...@@ -570,7 +578,7 @@ class FormulaResponseTest(ResponseTest):
class StringResponseTest(ResponseTest): class StringResponseTest(ResponseTest):
from response_xml_factory import StringResponseXMLFactory from capa.tests.response_xml_factory import StringResponseXMLFactory
xml_factory_class = StringResponseXMLFactory xml_factory_class = StringResponseXMLFactory
def test_case_sensitive(self): def test_case_sensitive(self):
...@@ -647,19 +655,18 @@ class StringResponseTest(ResponseTest): ...@@ -647,19 +655,18 @@ class StringResponseTest(ResponseTest):
hintfn="gimme_a_random_hint", hintfn="gimme_a_random_hint",
script=textwrap.dedent(""" script=textwrap.dedent("""
def gimme_a_random_hint(answer_ids, student_answers, new_cmap, old_cmap): def gimme_a_random_hint(answer_ids, student_answers, new_cmap, old_cmap):
answer = str(random.randint(0, 1e9)) answer = {code}
new_cmap.set_hint_and_mode(answer_ids[0], answer, "always") new_cmap.set_hint_and_mode(answer_ids[0], answer, "always")
""") """.format(code=self._get_random_number_code()))
) )
correct_map = problem.grade_answers({'1_2_1': '2'}) correct_map = problem.grade_answers({'1_2_1': '2'})
hint = correct_map.get_hint('1_2_1') hint = correct_map.get_hint('1_2_1')
r = random.Random(problem.seed) self.assertEqual(hint, self._get_random_number_result(problem.seed))
self.assertEqual(hint, str(r.randint(0, 1e9)))
class CodeResponseTest(ResponseTest): class CodeResponseTest(ResponseTest):
from response_xml_factory import CodeResponseXMLFactory from capa.tests.response_xml_factory import CodeResponseXMLFactory
xml_factory_class = CodeResponseXMLFactory xml_factory_class = CodeResponseXMLFactory
def setUp(self): def setUp(self):
...@@ -673,6 +680,7 @@ class CodeResponseTest(ResponseTest): ...@@ -673,6 +680,7 @@ class CodeResponseTest(ResponseTest):
@staticmethod @staticmethod
def make_queuestate(key, time): def make_queuestate(key, time):
"""Create queuestate dict"""
timestr = datetime.strftime(time, dateformat) timestr = datetime.strftime(time, dateformat)
return {'key': key, 'time': timestr} return {'key': key, 'time': timestr}
...@@ -710,7 +718,7 @@ class CodeResponseTest(ResponseTest): ...@@ -710,7 +718,7 @@ class CodeResponseTest(ResponseTest):
old_cmap = CorrectMap() old_cmap = CorrectMap()
for i, answer_id in enumerate(answer_ids): for i, answer_id in enumerate(answer_ids):
queuekey = 1000 + i queuekey = 1000 + i
queuestate = CodeResponseTest.make_queuestate(1000 + i, datetime.now()) queuestate = CodeResponseTest.make_queuestate(queuekey, datetime.now())
old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))
# Message format common to external graders # Message format common to external graders
...@@ -771,7 +779,7 @@ class CodeResponseTest(ResponseTest): ...@@ -771,7 +779,7 @@ class CodeResponseTest(ResponseTest):
for i, answer_id in enumerate(answer_ids): for i, answer_id in enumerate(answer_ids):
queuekey = 1000 + i queuekey = 1000 + i
latest_timestamp = datetime.now() latest_timestamp = datetime.now()
queuestate = CodeResponseTest.make_queuestate(1000 + i, latest_timestamp) queuestate = CodeResponseTest.make_queuestate(queuekey, latest_timestamp)
cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate))
self.problem.correct_map.update(cmap) self.problem.correct_map.update(cmap)
...@@ -796,7 +804,7 @@ class CodeResponseTest(ResponseTest): ...@@ -796,7 +804,7 @@ class CodeResponseTest(ResponseTest):
class ChoiceResponseTest(ResponseTest): class ChoiceResponseTest(ResponseTest):
from response_xml_factory import ChoiceResponseXMLFactory from capa.tests.response_xml_factory import ChoiceResponseXMLFactory
xml_factory_class = ChoiceResponseXMLFactory xml_factory_class = ChoiceResponseXMLFactory
def test_radio_group_grade(self): def test_radio_group_grade(self):
...@@ -828,7 +836,7 @@ class ChoiceResponseTest(ResponseTest): ...@@ -828,7 +836,7 @@ class ChoiceResponseTest(ResponseTest):
class JavascriptResponseTest(ResponseTest): class JavascriptResponseTest(ResponseTest):
from response_xml_factory import JavascriptResponseXMLFactory from capa.tests.response_xml_factory import JavascriptResponseXMLFactory
xml_factory_class = JavascriptResponseXMLFactory xml_factory_class = JavascriptResponseXMLFactory
def test_grade(self): def test_grade(self):
...@@ -858,7 +866,7 @@ class JavascriptResponseTest(ResponseTest): ...@@ -858,7 +866,7 @@ class JavascriptResponseTest(ResponseTest):
system.can_execute_unsafe_code = lambda: False system.can_execute_unsafe_code = lambda: False
with self.assertRaises(LoncapaProblemError): with self.assertRaises(LoncapaProblemError):
problem = self.build_problem( self.build_problem(
system=system, system=system,
generator_src="test_problem_generator.js", generator_src="test_problem_generator.js",
grader_src="test_problem_grader.js", grader_src="test_problem_grader.js",
...@@ -869,7 +877,7 @@ class JavascriptResponseTest(ResponseTest): ...@@ -869,7 +877,7 @@ class JavascriptResponseTest(ResponseTest):
class NumericalResponseTest(ResponseTest): class NumericalResponseTest(ResponseTest):
from response_xml_factory import NumericalResponseXMLFactory from capa.tests.response_xml_factory import NumericalResponseXMLFactory
xml_factory_class = NumericalResponseXMLFactory xml_factory_class = NumericalResponseXMLFactory
def test_grade_exact(self): def test_grade_exact(self):
...@@ -961,7 +969,7 @@ class NumericalResponseTest(ResponseTest): ...@@ -961,7 +969,7 @@ class NumericalResponseTest(ResponseTest):
class CustomResponseTest(ResponseTest): class CustomResponseTest(ResponseTest):
from response_xml_factory import CustomResponseXMLFactory from capa.tests.response_xml_factory import CustomResponseXMLFactory
xml_factory_class = CustomResponseXMLFactory xml_factory_class = CustomResponseXMLFactory
def test_inline_code(self): def test_inline_code(self):
...@@ -1000,15 +1008,14 @@ class CustomResponseTest(ResponseTest): ...@@ -1000,15 +1008,14 @@ class CustomResponseTest(ResponseTest):
def test_inline_randomization(self): def test_inline_randomization(self):
# Make sure the seed from the problem gets fed into the script execution. # Make sure the seed from the problem gets fed into the script execution.
inline_script = """messages[0] = str(random.randint(0, 1e9))""" inline_script = "messages[0] = {code}".format(code=self._get_random_number_code())
problem = self.build_problem(answer=inline_script) problem = self.build_problem(answer=inline_script)
input_dict = {'1_2_1': '0'} input_dict = {'1_2_1': '0'}
correctmap = problem.grade_answers(input_dict) correctmap = problem.grade_answers(input_dict)
input_msg = correctmap.get_msg('1_2_1') input_msg = correctmap.get_msg('1_2_1')
r = random.Random(problem.seed) self.assertEqual(input_msg, self._get_random_number_result(problem.seed))
self.assertEqual(input_msg, str(r.randint(0, 1e9)))
def test_function_code_single_input(self): def test_function_code_single_input(self):
# For function code, we pass in these arguments: # For function code, we pass in these arguments:
...@@ -1241,25 +1248,23 @@ class CustomResponseTest(ResponseTest): ...@@ -1241,25 +1248,23 @@ class CustomResponseTest(ResponseTest):
def test_setup_randomization(self): def test_setup_randomization(self):
# Ensure that the problem setup script gets the random seed from the problem. # Ensure that the problem setup script gets the random seed from the problem.
script = textwrap.dedent(""" script = textwrap.dedent("""
num = random.randint(0, 1e9) num = {code}
""") """.format(code=self._get_random_number_code()))
problem = self.build_problem(script=script) problem = self.build_problem(script=script)
r = random.Random(problem.seed) self.assertEqual(problem.context['num'], self._get_random_number_result(problem.seed))
self.assertEqual(r.randint(0, 1e9), problem.context['num'])
def test_check_function_randomization(self): def test_check_function_randomization(self):
# The check function should get random-seeded from the problem. # The check function should get random-seeded from the problem.
script = textwrap.dedent(""" script = textwrap.dedent("""
def check_func(expect, answer_given): def check_func(expect, answer_given):
return {'ok': True, 'msg': str(random.randint(0, 1e9))} return {{'ok': True, 'msg': {code} }}
""") """.format(code=self._get_random_number_code()))
problem = self.build_problem(script=script, cfn="check_func", expect="42") problem = self.build_problem(script=script, cfn="check_func", expect="42")
input_dict = {'1_2_1': '42'} input_dict = {'1_2_1': '42'}
correct_map = problem.grade_answers(input_dict) correct_map = problem.grade_answers(input_dict)
msg = correct_map.get_msg('1_2_1') msg = correct_map.get_msg('1_2_1')
r = random.Random(problem.seed) self.assertEqual(msg, self._get_random_number_result(problem.seed))
self.assertEqual(msg, str(r.randint(0, 1e9)))
def test_module_imports_inline(self): def test_module_imports_inline(self):
''' '''
...@@ -1320,7 +1325,7 @@ class CustomResponseTest(ResponseTest): ...@@ -1320,7 +1325,7 @@ class CustomResponseTest(ResponseTest):
class SchematicResponseTest(ResponseTest): class SchematicResponseTest(ResponseTest):
from response_xml_factory import SchematicResponseXMLFactory from capa.tests.response_xml_factory import SchematicResponseXMLFactory
xml_factory_class = SchematicResponseXMLFactory xml_factory_class = SchematicResponseXMLFactory
def test_grade(self): def test_grade(self):
...@@ -1349,11 +1354,10 @@ class SchematicResponseTest(ResponseTest): ...@@ -1349,11 +1354,10 @@ class SchematicResponseTest(ResponseTest):
def test_check_function_randomization(self): def test_check_function_randomization(self):
# The check function should get a random seed from the problem. # The check function should get a random seed from the problem.
script = "correct = ['correct' if (submission[0]['num'] == random.randint(0, 1e9)) else 'incorrect']" script = "correct = ['correct' if (submission[0]['num'] == {code}) else 'incorrect']".format(code=self._get_random_number_code())
problem = self.build_problem(answer=script) problem = self.build_problem(answer=script)
r = random.Random(problem.seed) submission_dict = {'num': self._get_random_number_result(problem.seed)}
submission_dict = {'num': r.randint(0, 1e9)}
input_dict = {'1_2_1': json.dumps(submission_dict)} input_dict = {'1_2_1': json.dumps(submission_dict)}
correct_map = problem.grade_answers(input_dict) correct_map = problem.grade_answers(input_dict)
...@@ -1372,7 +1376,7 @@ class SchematicResponseTest(ResponseTest): ...@@ -1372,7 +1376,7 @@ class SchematicResponseTest(ResponseTest):
class AnnotationResponseTest(ResponseTest): class AnnotationResponseTest(ResponseTest):
from response_xml_factory import AnnotationResponseXMLFactory from capa.tests.response_xml_factory import AnnotationResponseXMLFactory
xml_factory_class = AnnotationResponseXMLFactory xml_factory_class = AnnotationResponseXMLFactory
def test_grade(self): def test_grade(self):
...@@ -1393,7 +1397,7 @@ class AnnotationResponseTest(ResponseTest): ...@@ -1393,7 +1397,7 @@ class AnnotationResponseTest(ResponseTest):
{'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null'}}, {'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null'}},
] ]
for (index, test) in enumerate(tests): for test in tests:
expected_correctness = test['correctness'] expected_correctness = test['correctness']
expected_points = test['points'] expected_points = test['points']
answers = test['answers'] answers = test['answers']
......
...@@ -424,7 +424,7 @@ class CapaModule(CapaFields, XModule): ...@@ -424,7 +424,7 @@ class CapaModule(CapaFields, XModule):
# If we cannot construct the problem HTML, # If we cannot construct the problem HTML,
# then generate an error message instead. # then generate an error message instead.
except Exception, err: except Exception as err:
html = self.handle_problem_html_error(err) html = self.handle_problem_html_error(err)
# The convention is to pass the name of the check button # The convention is to pass the name of the check button
...@@ -655,7 +655,7 @@ class CapaModule(CapaFields, XModule): ...@@ -655,7 +655,7 @@ class CapaModule(CapaFields, XModule):
@staticmethod @staticmethod
def make_dict_of_responses(get): def make_dict_of_responses(get):
'''Make dictionary of student responses (aka "answers") '''Make dictionary of student responses (aka "answers")
get is POST dictionary (Djano QueryDict). get is POST dictionary (Django QueryDict).
The *get* dict has keys of the form 'x_y', which are mapped The *get* dict has keys of the form 'x_y', which are mapped
to key 'y' in the returned dict. For example, to key 'y' in the returned dict. For example,
...@@ -739,13 +739,13 @@ class CapaModule(CapaFields, XModule): ...@@ -739,13 +739,13 @@ class CapaModule(CapaFields, XModule):
# Too late. Cannot submit # Too late. Cannot submit
if self.closed(): if self.closed():
event_info['failure'] = 'closed' event_info['failure'] = 'closed'
self.system.track_function('save_problem_check_fail', event_info) self.system.track_function('problem_check_fail', event_info)
raise NotFoundError('Problem is closed') raise NotFoundError('Problem is closed')
# Problem submitted. Student should reset before checking again # Problem submitted. Student should reset before checking again
if self.done and self.rerandomize == "always": if self.done and self.rerandomize == "always":
event_info['failure'] = 'unreset' event_info['failure'] = 'unreset'
self.system.track_function('save_problem_check_fail', event_info) self.system.track_function('problem_check_fail', event_info)
raise NotFoundError('Problem must be reset before it can be checked again') raise NotFoundError('Problem must be reset before it can be checked again')
# Problem queued. Students must wait a specified waittime before they are allowed to submit # Problem queued. Students must wait a specified waittime before they are allowed to submit
...@@ -759,6 +759,8 @@ class CapaModule(CapaFields, XModule): ...@@ -759,6 +759,8 @@ class CapaModule(CapaFields, XModule):
try: try:
correct_map = self.lcp.grade_answers(answers) correct_map = self.lcp.grade_answers(answers)
self.attempts = self.attempts + 1
self.lcp.done = True
self.set_state_from_lcp() self.set_state_from_lcp()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst: except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
...@@ -778,17 +780,13 @@ class CapaModule(CapaFields, XModule): ...@@ -778,17 +780,13 @@ class CapaModule(CapaFields, XModule):
return {'success': msg} return {'success': msg}
except Exception, err: except Exception as err:
if self.system.DEBUG: if self.system.DEBUG:
msg = "Error checking problem: " + str(err) msg = "Error checking problem: " + str(err)
msg += '\nTraceback:\n' + traceback.format_exc() msg += '\nTraceback:\n' + traceback.format_exc()
return {'success': msg} return {'success': msg}
raise raise
self.attempts = self.attempts + 1
self.lcp.done = True
self.set_state_from_lcp()
self.publish_grade() self.publish_grade()
# success = correct if ALL questions in this problem are correct # success = correct if ALL questions in this problem are correct
...@@ -802,7 +800,7 @@ class CapaModule(CapaFields, XModule): ...@@ -802,7 +800,7 @@ class CapaModule(CapaFields, XModule):
event_info['correct_map'] = correct_map.get_dict() event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success event_info['success'] = success
event_info['attempts'] = self.attempts event_info['attempts'] = self.attempts
self.system.track_function('save_problem_check', event_info) self.system.track_function('problem_check', event_info)
if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback
self.system.psychometrics_handler(self.get_state_for_lcp()) self.system.psychometrics_handler(self.get_state_for_lcp())
...@@ -814,12 +812,92 @@ class CapaModule(CapaFields, XModule): ...@@ -814,12 +812,92 @@ class CapaModule(CapaFields, XModule):
'contents': html, 'contents': html,
} }
def rescore_problem(self):
"""
Checks whether the existing answers to a problem are correct.
This is called when the correct answer to a problem has been changed,
and the grade should be re-evaluated.
Returns a dict with one key:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
Raises NotFoundError if called on a problem that has not yet been
answered, or NotImplementedError if it's a problem that cannot be rescored.
Returns the error messages for exceptions occurring while performing
the rescoring, rather than throwing them.
"""
event_info = {'state': self.lcp.get_state(), 'problem_id': self.location.url()}
if not self.lcp.supports_rescoring():
event_info['failure'] = 'unsupported'
self.system.track_function('problem_rescore_fail', event_info)
raise NotImplementedError("Problem's definition does not support rescoring")
if not self.done:
event_info['failure'] = 'unanswered'
self.system.track_function('problem_rescore_fail', event_info)
raise NotFoundError('Problem must be answered before it can be graded again')
# get old score, for comparison:
orig_score = self.lcp.get_score()
event_info['orig_score'] = orig_score['score']
event_info['orig_total'] = orig_score['total']
try:
correct_map = self.lcp.rescore_existing_answers()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("Input error in capa_module:problem_rescore", exc_info=True)
event_info['failure'] = 'input_error'
self.system.track_function('problem_rescore_fail', event_info)
return {'success': u"Error: {0}".format(inst.message)}
except Exception as err:
event_info['failure'] = 'unexpected'
self.system.track_function('problem_rescore_fail', event_info)
if self.system.DEBUG:
msg = u"Error checking problem: {0}".format(err.message)
msg += u'\nTraceback:\n' + traceback.format_exc()
return {'success': msg}
raise
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
self.publish_grade()
new_score = self.lcp.get_score()
event_info['new_score'] = new_score['score']
event_info['new_total'] = new_score['total']
# success = correct if ALL questions in this problem are correct
success = 'correct'
for answer_id in correct_map:
if not correct_map.is_correct(answer_id):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
self.system.track_function('problem_rescore', event_info)
# psychometrics should be called on rescoring requests in the same way as check-problem
if hasattr(self.system, 'psychometrics_handler'): # update PsychometricsData using callback
self.system.psychometrics_handler(self.get_state_for_lcp())
return {'success': success}
def save_problem(self, get): def save_problem(self, get):
''' """
Save the passed in answers. Save the passed in answers.
Returns a dict { 'success' : bool, ['error' : error-msg]}, Returns a dict { 'success' : bool, 'msg' : message }
with the error key only present if success is False. The message is informative on success, and an error message on failure.
''' """
event_info = dict() event_info = dict()
event_info['state'] = self.lcp.get_state() event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.url() event_info['problem_id'] = self.location.url()
......
...@@ -66,7 +66,7 @@ class CombinedOpenEndedFields(object): ...@@ -66,7 +66,7 @@ class CombinedOpenEndedFields(object):
attempts = Integer( attempts = Integer(
display_name="Maximum Attempts", display_name="Maximum Attempts",
help="The number of times the student can try to answer this problem.", default=1, help="The number of times the student can try to answer this problem.", default=1,
scope=Scope.settings, values = {"min" : 1 } scope=Scope.settings, values={"min" : 1 }
) )
is_graded = Boolean(display_name="Graded", help="Whether or not the problem is graded.", default=False, scope=Scope.settings) is_graded = Boolean(display_name="Graded", help="Whether or not the problem is graded.", default=False, scope=Scope.settings)
accept_file_upload = Boolean( accept_file_upload = Boolean(
...@@ -89,7 +89,7 @@ class CombinedOpenEndedFields(object): ...@@ -89,7 +89,7 @@ class CombinedOpenEndedFields(object):
weight = Float( weight = Float(
display_name="Problem Weight", display_name="Problem Weight",
help="Defines the number of points each problem is worth. If the value is not set, each problem is worth one point.", help="Defines the number of points each problem is worth. If the value is not set, each problem is worth one point.",
scope=Scope.settings, values = {"min" : 0 , "step": ".1"} scope=Scope.settings, values={"min" : 0 , "step": ".1"}
) )
markdown = String(help="Markdown source of this module", scope=Scope.settings) markdown = String(help="Markdown source of this module", scope=Scope.settings)
......
...@@ -77,10 +77,8 @@ class Date(ModelType): ...@@ -77,10 +77,8 @@ class Date(ModelType):
else: else:
return value.isoformat() return value.isoformat()
TIMEDELTA_REGEX = re.compile(r'^((?P<days>\d+?) day(?:s?))?(\s)?((?P<hours>\d+?) hour(?:s?))?(\s)?((?P<minutes>\d+?) minute(?:s)?)?(\s)?((?P<seconds>\d+?) second(?:s)?)?$') TIMEDELTA_REGEX = re.compile(r'^((?P<days>\d+?) day(?:s?))?(\s)?((?P<hours>\d+?) hour(?:s?))?(\s)?((?P<minutes>\d+?) minute(?:s)?)?(\s)?((?P<seconds>\d+?) second(?:s)?)?$')
class Timedelta(ModelType): class Timedelta(ModelType):
def from_json(self, time_str): def from_json(self, time_str):
""" """
......
...@@ -84,7 +84,7 @@ class GraphicalSliderToolModule(GraphicalSliderToolFields, XModule): ...@@ -84,7 +84,7 @@ class GraphicalSliderToolModule(GraphicalSliderToolFields, XModule):
xml = html.fromstring(html_string) xml = html.fromstring(html_string)
#substitute plot, if presented # substitute plot, if presented
plot_div = '<div class="{element_class}_plot" id="{element_id}_plot" \ plot_div = '<div class="{element_class}_plot" id="{element_id}_plot" \
style="{style}"></div>' style="{style}"></div>'
plot_el = xml.xpath('//plot') plot_el = xml.xpath('//plot')
...@@ -95,7 +95,7 @@ class GraphicalSliderToolModule(GraphicalSliderToolFields, XModule): ...@@ -95,7 +95,7 @@ class GraphicalSliderToolModule(GraphicalSliderToolFields, XModule):
element_id=self.html_id, element_id=self.html_id,
style=plot_el.get('style', "")))) style=plot_el.get('style', ""))))
#substitute sliders # substitute sliders
slider_div = '<div class="{element_class}_slider" \ slider_div = '<div class="{element_class}_slider" \
id="{element_id}_slider_{var}" \ id="{element_id}_slider_{var}" \
data-var="{var}" \ data-var="{var}" \
......
...@@ -100,9 +100,9 @@ class HtmlDescriptor(HtmlFields, XmlDescriptor, EditingDescriptor): ...@@ -100,9 +100,9 @@ class HtmlDescriptor(HtmlFields, XmlDescriptor, EditingDescriptor):
pointer_path = "{category}/{url_path}".format(category='html', pointer_path = "{category}/{url_path}".format(category='html',
url_path=name_to_pathname(location.name)) url_path=name_to_pathname(location.name))
base = path(pointer_path).dirname() base = path(pointer_path).dirname()
#log.debug("base = {0}, base.dirname={1}, filename={2}".format(base, base.dirname(), filename)) # log.debug("base = {0}, base.dirname={1}, filename={2}".format(base, base.dirname(), filename))
filepath = "{base}/{name}.html".format(base=base, name=filename) filepath = "{base}/{name}.html".format(base=base, name=filename)
#log.debug("looking for html file for {0} at {1}".format(location, filepath)) # log.debug("looking for html file for {0} at {1}".format(location, filepath))
# VS[compat] # VS[compat]
# TODO (cpennington): If the file doesn't exist at the right path, # TODO (cpennington): If the file doesn't exist at the right path,
...@@ -111,7 +111,7 @@ class HtmlDescriptor(HtmlFields, XmlDescriptor, EditingDescriptor): ...@@ -111,7 +111,7 @@ class HtmlDescriptor(HtmlFields, XmlDescriptor, EditingDescriptor):
# online and has imported all current (fall 2012) courses from xml # online and has imported all current (fall 2012) courses from xml
if not system.resources_fs.exists(filepath): if not system.resources_fs.exists(filepath):
candidates = cls.backcompat_paths(filepath) candidates = cls.backcompat_paths(filepath)
#log.debug("candidates = {0}".format(candidates)) # log.debug("candidates = {0}".format(candidates))
for candidate in candidates: for candidate in candidates:
if system.resources_fs.exists(candidate): if system.resources_fs.exists(candidate):
filepath = candidate filepath = candidate
......
...@@ -196,7 +196,7 @@ class Location(_LocationBase): ...@@ -196,7 +196,7 @@ class Location(_LocationBase):
raise InvalidLocationError(location) raise InvalidLocationError(location)
if len(location) == 5: if len(location) == 5:
args = tuple(location) + (None, ) args = tuple(location) + (None,)
else: else:
args = tuple(location) args = tuple(location)
...@@ -440,7 +440,7 @@ class ModuleStoreBase(ModuleStore): ...@@ -440,7 +440,7 @@ class ModuleStoreBase(ModuleStore):
""" """
# check that item is present and raise the promised exceptions if needed # check that item is present and raise the promised exceptions if needed
# TODO (vshnayder): post-launch, make errors properties of items # TODO (vshnayder): post-launch, make errors properties of items
#self.get_item(location) # self.get_item(location)
errorlog = self._get_errorlog(location) errorlog = self._get_errorlog(location)
return errorlog.errors return errorlog.errors
......
...@@ -15,14 +15,14 @@ def as_draft(location): ...@@ -15,14 +15,14 @@ def as_draft(location):
""" """
Returns the Location that is the draft for `location` Returns the Location that is the draft for `location`
""" """
return Location(location)._replace(revision=DRAFT) return Location(location).replace(revision=DRAFT)
def as_published(location): def as_published(location):
""" """
Returns the Location that is the published version for `location` Returns the Location that is the published version for `location`
""" """
return Location(location)._replace(revision=None) return Location(location).replace(revision=None)
def wrap_draft(item): def wrap_draft(item):
...@@ -32,7 +32,7 @@ def wrap_draft(item): ...@@ -32,7 +32,7 @@ def wrap_draft(item):
non-draft location in either case non-draft location in either case
""" """
setattr(item, 'is_draft', item.location.revision == DRAFT) setattr(item, 'is_draft', item.location.revision == DRAFT)
item.location = item.location._replace(revision=None) item.location = item.location.replace(revision=None)
return item return item
...@@ -234,7 +234,7 @@ class DraftModuleStore(ModuleStoreBase): ...@@ -234,7 +234,7 @@ class DraftModuleStore(ModuleStoreBase):
# always return the draft - if available # always return the draft - if available
for draft in to_process_drafts: for draft in to_process_drafts:
draft_loc = Location(draft["_id"]) draft_loc = Location(draft["_id"])
draft_as_non_draft_loc = draft_loc._replace(revision=None) draft_as_non_draft_loc = draft_loc.replace(revision=None)
# does non-draft exist in the collection # does non-draft exist in the collection
# if so, replace it # if so, replace it
......
...@@ -307,7 +307,7 @@ class MongoModuleStore(ModuleStoreBase): ...@@ -307,7 +307,7 @@ class MongoModuleStore(ModuleStoreBase):
location = Location(result['_id']) location = Location(result['_id'])
# We need to collate between draft and non-draft # We need to collate between draft and non-draft
# i.e. draft verticals can have children which are not in non-draft versions # i.e. draft verticals can have children which are not in non-draft versions
location = location._replace(revision=None) location = location.replace(revision=None)
location_url = location.url() location_url = location.url()
if location_url in results_by_url: if location_url in results_by_url:
existing_children = results_by_url[location_url].get('definition', {}).get('children', []) existing_children = results_by_url[location_url].get('definition', {}).get('children', [])
......
...@@ -19,18 +19,18 @@ log = logging.getLogger("mitx.courseware") ...@@ -19,18 +19,18 @@ log = logging.getLogger("mitx.courseware")
# attempts specified in xml definition overrides this. # attempts specified in xml definition overrides this.
MAX_ATTEMPTS = 1 MAX_ATTEMPTS = 1
#The highest score allowed for the overall xmodule and for each rubric point # The highest score allowed for the overall xmodule and for each rubric point
MAX_SCORE_ALLOWED = 50 MAX_SCORE_ALLOWED = 50
#If true, default behavior is to score module as a practice problem. Otherwise, no grade at all is shown in progress # If true, default behavior is to score module as a practice problem. Otherwise, no grade at all is shown in progress
#Metadata overrides this. # Metadata overrides this.
IS_SCORED = False IS_SCORED = False
#If true, then default behavior is to require a file upload or pasted link from a student for this problem. # If true, then default behavior is to require a file upload or pasted link from a student for this problem.
#Metadata overrides this. # Metadata overrides this.
ACCEPT_FILE_UPLOAD = False ACCEPT_FILE_UPLOAD = False
#Contains all reasonable bool and case combinations of True # Contains all reasonable bool and case combinations of True
TRUE_DICT = ["True", True, "TRUE", "true"] TRUE_DICT = ["True", True, "TRUE", "true"]
HUMAN_TASK_TYPE = { HUMAN_TASK_TYPE = {
...@@ -38,8 +38,8 @@ HUMAN_TASK_TYPE = { ...@@ -38,8 +38,8 @@ HUMAN_TASK_TYPE = {
'openended': "edX Assessment", 'openended': "edX Assessment",
} }
#Default value that controls whether or not to skip basic spelling checks in the controller # Default value that controls whether or not to skip basic spelling checks in the controller
#Metadata overrides this # Metadata overrides this
SKIP_BASIC_CHECKS = False SKIP_BASIC_CHECKS = False
...@@ -74,7 +74,7 @@ class CombinedOpenEndedV1Module(): ...@@ -74,7 +74,7 @@ class CombinedOpenEndedV1Module():
INTERMEDIATE_DONE = 'intermediate_done' INTERMEDIATE_DONE = 'intermediate_done'
DONE = 'done' DONE = 'done'
#Where the templates live for this problem # Where the templates live for this problem
TEMPLATE_DIR = "combinedopenended" TEMPLATE_DIR = "combinedopenended"
def __init__(self, system, location, definition, descriptor, def __init__(self, system, location, definition, descriptor,
...@@ -118,21 +118,21 @@ class CombinedOpenEndedV1Module(): ...@@ -118,21 +118,21 @@ class CombinedOpenEndedV1Module():
self.instance_state = instance_state self.instance_state = instance_state
self.display_name = instance_state.get('display_name', "Open Ended") self.display_name = instance_state.get('display_name', "Open Ended")
#We need to set the location here so the child modules can use it # We need to set the location here so the child modules can use it
system.set('location', location) system.set('location', location)
self.system = system self.system = system
#Tells the system which xml definition to load # Tells the system which xml definition to load
self.current_task_number = instance_state.get('current_task_number', 0) self.current_task_number = instance_state.get('current_task_number', 0)
#This loads the states of the individual children # This loads the states of the individual children
self.task_states = instance_state.get('task_states', []) self.task_states = instance_state.get('task_states', [])
#Overall state of the combined open ended module # Overall state of the combined open ended module
self.state = instance_state.get('state', self.INITIAL) self.state = instance_state.get('state', self.INITIAL)
self.student_attempts = instance_state.get('student_attempts', 0) self.student_attempts = instance_state.get('student_attempts', 0)
self.weight = instance_state.get('weight', 1) self.weight = instance_state.get('weight', 1)
#Allow reset is true if student has failed the criteria to move to the next child task # Allow reset is true if student has failed the criteria to move to the next child task
self.ready_to_reset = instance_state.get('ready_to_reset', False) self.ready_to_reset = instance_state.get('ready_to_reset', False)
self.attempts = self.instance_state.get('attempts', MAX_ATTEMPTS) self.attempts = self.instance_state.get('attempts', MAX_ATTEMPTS)
self.is_scored = self.instance_state.get('is_graded', IS_SCORED) in TRUE_DICT self.is_scored = self.instance_state.get('is_graded', IS_SCORED) in TRUE_DICT
...@@ -153,7 +153,7 @@ class CombinedOpenEndedV1Module(): ...@@ -153,7 +153,7 @@ class CombinedOpenEndedV1Module():
rubric_string = stringify_children(definition['rubric']) rubric_string = stringify_children(definition['rubric'])
self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED) self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED)
#Static data is passed to the child modules to render # Static data is passed to the child modules to render
self.static_data = { self.static_data = {
'max_score': self._max_score, 'max_score': self._max_score,
'max_attempts': self.attempts, 'max_attempts': self.attempts,
...@@ -243,11 +243,11 @@ class CombinedOpenEndedV1Module(): ...@@ -243,11 +243,11 @@ class CombinedOpenEndedV1Module():
self.current_task_descriptor = children['descriptors'][current_task_type](self.system) self.current_task_descriptor = children['descriptors'][current_task_type](self.system)
#This is the xml object created from the xml definition of the current task # This is the xml object created from the xml definition of the current task
etree_xml = etree.fromstring(self.current_task_xml) etree_xml = etree.fromstring(self.current_task_xml)
#This sends the etree_xml object through the descriptor module of the current task, and # This sends the etree_xml object through the descriptor module of the current task, and
#returns the xml parsed by the descriptor # returns the xml parsed by the descriptor
self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system) self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system)
if current_task_state is None and self.current_task_number == 0: if current_task_state is None and self.current_task_number == 0:
self.current_task = child_task_module(self.system, self.location, self.current_task = child_task_module(self.system, self.location,
...@@ -293,6 +293,7 @@ class CombinedOpenEndedV1Module(): ...@@ -293,6 +293,7 @@ class CombinedOpenEndedV1Module():
if self.current_task_number > 0: if self.current_task_number > 0:
last_response_data = self.get_last_response(self.current_task_number - 1) last_response_data = self.get_last_response(self.current_task_number - 1)
current_response_data = self.get_current_attributes(self.current_task_number) current_response_data = self.get_current_attributes(self.current_task_number)
if (current_response_data['min_score_to_attempt'] > last_response_data['score'] if (current_response_data['min_score_to_attempt'] > last_response_data['score']
or current_response_data['max_score_to_attempt'] < last_response_data['score']): or current_response_data['max_score_to_attempt'] < last_response_data['score']):
self.state = self.DONE self.state = self.DONE
...@@ -307,7 +308,7 @@ class CombinedOpenEndedV1Module(): ...@@ -307,7 +308,7 @@ class CombinedOpenEndedV1Module():
Output: A dictionary that can be rendered into the combined open ended template. Output: A dictionary that can be rendered into the combined open ended template.
""" """
task_html = self.get_html_base() task_html = self.get_html_base()
#set context variables and render template # set context variables and render template
context = { context = {
'items': [{'content': task_html}], 'items': [{'content': task_html}],
...@@ -499,7 +500,6 @@ class CombinedOpenEndedV1Module(): ...@@ -499,7 +500,6 @@ class CombinedOpenEndedV1Module():
""" """
changed = self.update_task_states() changed = self.update_task_states()
if changed: if changed:
#return_html=self.get_html()
pass pass
return return_html return return_html
...@@ -730,15 +730,15 @@ class CombinedOpenEndedV1Module(): ...@@ -730,15 +730,15 @@ class CombinedOpenEndedV1Module():
max_score = None max_score = None
score = None score = None
if self.is_scored and self.weight is not None: if self.is_scored and self.weight is not None:
#Finds the maximum score of all student attempts and keeps it. # Finds the maximum score of all student attempts and keeps it.
score_mat = [] score_mat = []
for i in xrange(0, len(self.task_states)): for i in xrange(0, len(self.task_states)):
#For each task, extract all student scores on that task (each attempt for each task) # For each task, extract all student scores on that task (each attempt for each task)
last_response = self.get_last_response(i) last_response = self.get_last_response(i)
max_score = last_response.get('max_score', None) max_score = last_response.get('max_score', None)
score = last_response.get('all_scores', None) score = last_response.get('all_scores', None)
if score is not None: if score is not None:
#Convert none scores and weight scores properly # Convert none scores and weight scores properly
for z in xrange(0, len(score)): for z in xrange(0, len(score)):
if score[z] is None: if score[z] is None:
score[z] = 0 score[z] = 0
...@@ -746,19 +746,19 @@ class CombinedOpenEndedV1Module(): ...@@ -746,19 +746,19 @@ class CombinedOpenEndedV1Module():
score_mat.append(score) score_mat.append(score)
if len(score_mat) > 0: if len(score_mat) > 0:
#Currently, assume that the final step is the correct one, and that those are the final scores. # Currently, assume that the final step is the correct one, and that those are the final scores.
#This will change in the future, which is why the machinery above exists to extract all scores on all steps # This will change in the future, which is why the machinery above exists to extract all scores on all steps
#TODO: better final score handling. # TODO: better final score handling.
scores = score_mat[-1] scores = score_mat[-1]
score = max(scores) score = max(scores)
else: else:
score = 0 score = 0
if max_score is not None: if max_score is not None:
#Weight the max score if it is not None # Weight the max score if it is not None
max_score *= float(self.weight) max_score *= float(self.weight)
else: else:
#Without a max_score, we cannot have a score! # Without a max_score, we cannot have a score!
score = None score = None
score_dict = { score_dict = {
...@@ -833,7 +833,7 @@ class CombinedOpenEndedV1Descriptor(): ...@@ -833,7 +833,7 @@ class CombinedOpenEndedV1Descriptor():
expected_children = ['task', 'rubric', 'prompt'] expected_children = ['task', 'rubric', 'prompt']
for child in expected_children: for child in expected_children:
if len(xml_object.xpath(child)) == 0: if len(xml_object.xpath(child)) == 0:
#This is a staff_facing_error # This is a staff_facing_error
raise ValueError( raise ValueError(
"Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance. {1}".format( "Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance. {1}".format(
child, xml_object)) child, xml_object))
...@@ -848,6 +848,7 @@ class CombinedOpenEndedV1Descriptor(): ...@@ -848,6 +848,7 @@ class CombinedOpenEndedV1Descriptor():
return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')} return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')}
def definition_to_xml(self, resource_fs): def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.''' '''Return an xml element representing this definition.'''
elt = etree.Element('combinedopenended') elt = etree.Element('combinedopenended')
......
...@@ -57,13 +57,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -57,13 +57,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.queue_name = definition.get('queuename', self.DEFAULT_QUEUE) self.queue_name = definition.get('queuename', self.DEFAULT_QUEUE)
self.message_queue_name = definition.get('message-queuename', self.DEFAULT_MESSAGE_QUEUE) self.message_queue_name = definition.get('message-queuename', self.DEFAULT_MESSAGE_QUEUE)
#This is needed to attach feedback to specific responses later # This is needed to attach feedback to specific responses later
self.submission_id = None self.submission_id = None
self.grader_id = None self.grader_id = None
error_message = "No {0} found in problem xml for open ended problem. Contact the learning sciences group for assistance." error_message = "No {0} found in problem xml for open ended problem. Contact the learning sciences group for assistance."
if oeparam is None: if oeparam is None:
#This is a staff_facing_error # This is a staff_facing_error
raise ValueError(error_message.format('oeparam')) raise ValueError(error_message.format('oeparam'))
if self.child_prompt is None: if self.child_prompt is None:
raise ValueError(error_message.format('prompt')) raise ValueError(error_message.format('prompt'))
...@@ -95,14 +95,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -95,14 +95,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
grader_payload = oeparam.find('grader_payload') grader_payload = oeparam.find('grader_payload')
grader_payload = grader_payload.text if grader_payload is not None else '' grader_payload = grader_payload.text if grader_payload is not None else ''
#Update grader payload with student id. If grader payload not json, error. # Update grader payload with student id. If grader payload not json, error.
try: try:
parsed_grader_payload = json.loads(grader_payload) parsed_grader_payload = json.loads(grader_payload)
# NOTE: self.system.location is valid because the capa_module # NOTE: self.system.location is valid because the capa_module
# __init__ adds it (easiest way to get problem location into # __init__ adds it (easiest way to get problem location into
# response types) # response types)
except TypeError, ValueError: except TypeError, ValueError:
#This is a dev_facing_error # This is a dev_facing_error
log.exception( log.exception(
"Grader payload from external open ended grading server is not a json object! Object: {0}".format( "Grader payload from external open ended grading server is not a json object! Object: {0}".format(
grader_payload)) grader_payload))
...@@ -148,7 +148,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -148,7 +148,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
survey_responses = event_info['survey_responses'] survey_responses = event_info['survey_responses']
for tag in ['feedback', 'submission_id', 'grader_id', 'score']: for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses: if tag not in survey_responses:
#This is a student_facing_error # This is a student_facing_error
return {'success': False, return {'success': False,
'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format( 'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(
tag)} tag)}
...@@ -158,14 +158,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -158,14 +158,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
feedback = str(survey_responses['feedback'].encode('ascii', 'ignore')) feedback = str(survey_responses['feedback'].encode('ascii', 'ignore'))
score = int(survey_responses['score']) score = int(survey_responses['score'])
except: except:
#This is a dev_facing_error # This is a dev_facing_error
error_message = ( error_message = (
"Could not parse submission id, grader id, " "Could not parse submission id, grader id, "
"or feedback from message_post ajax call. " "or feedback from message_post ajax call. "
"Here is the message data: {0}".format(survey_responses) "Here is the message data: {0}".format(survey_responses)
) )
log.exception(error_message) log.exception(error_message)
#This is a student_facing_error # This is a student_facing_error
return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."} return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."}
xqueue = system.get('xqueue') xqueue = system.get('xqueue')
...@@ -201,14 +201,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -201,14 +201,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
body=json.dumps(contents) body=json.dumps(contents)
) )
#Convert error to a success value # Convert error to a success value
success = True success = True
if error: if error:
success = False success = False
self.child_state = self.DONE self.child_state = self.DONE
#This is a student_facing_message # This is a student_facing_message
return {'success': success, 'msg': "Successfully submitted your feedback."} return {'success': success, 'msg': "Successfully submitted your feedback."}
def send_to_grader(self, submission, system): def send_to_grader(self, submission, system):
...@@ -249,7 +249,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -249,7 +249,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'submission_time': qtime, 'submission_time': qtime,
} }
#Update contents with student response and student info # Update contents with student response and student info
contents.update({ contents.update({
'student_info': json.dumps(student_info), 'student_info': json.dumps(student_info),
'student_response': submission, 'student_response': submission,
...@@ -369,21 +369,21 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -369,21 +369,21 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
for tag in ['success', 'feedback', 'submission_id', 'grader_id']: for tag in ['success', 'feedback', 'submission_id', 'grader_id']:
if tag not in response_items: if tag not in response_items:
#This is a student_facing_error # This is a student_facing_error
return format_feedback('errors', 'Error getting feedback from grader.') return format_feedback('errors', 'Error getting feedback from grader.')
feedback_items = response_items['feedback'] feedback_items = response_items['feedback']
try: try:
feedback = json.loads(feedback_items) feedback = json.loads(feedback_items)
except (TypeError, ValueError): except (TypeError, ValueError):
#This is a dev_facing_error # This is a dev_facing_error
log.exception("feedback_items from external open ended grader have invalid json {0}".format(feedback_items)) log.exception("feedback_items from external open ended grader have invalid json {0}".format(feedback_items))
#This is a student_facing_error # This is a student_facing_error
return format_feedback('errors', 'Error getting feedback from grader.') return format_feedback('errors', 'Error getting feedback from grader.')
if response_items['success']: if response_items['success']:
if len(feedback) == 0: if len(feedback) == 0:
#This is a student_facing_error # This is a student_facing_error
return format_feedback('errors', 'No feedback available from grader.') return format_feedback('errors', 'No feedback available from grader.')
for tag in do_not_render: for tag in do_not_render:
...@@ -393,7 +393,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -393,7 +393,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
feedback_lst = sorted(feedback.items(), key=get_priority) feedback_lst = sorted(feedback.items(), key=get_priority)
feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst) feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst)
else: else:
#This is a student_facing_error # This is a student_facing_error
feedback_list_part1 = format_feedback('errors', response_items['feedback']) feedback_list_part1 = format_feedback('errors', response_items['feedback'])
feedback_list_part2 = (u"\n".join([format_feedback_hidden(feedback_type, value) feedback_list_part2 = (u"\n".join([format_feedback_hidden(feedback_type, value)
...@@ -470,7 +470,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -470,7 +470,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
try: try:
score_result = json.loads(score_msg) score_result = json.loads(score_msg)
except (TypeError, ValueError): except (TypeError, ValueError):
#This is a dev_facing_error # This is a dev_facing_error
error_message = ("External open ended grader message should be a JSON-serialized dict." error_message = ("External open ended grader message should be a JSON-serialized dict."
" Received score_msg = {0}".format(score_msg)) " Received score_msg = {0}".format(score_msg))
log.error(error_message) log.error(error_message)
...@@ -478,7 +478,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -478,7 +478,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return fail return fail
if not isinstance(score_result, dict): if not isinstance(score_result, dict):
#This is a dev_facing_error # This is a dev_facing_error
error_message = ("External open ended grader message should be a JSON-serialized dict." error_message = ("External open ended grader message should be a JSON-serialized dict."
" Received score_result = {0}".format(score_result)) " Received score_result = {0}".format(score_result))
log.error(error_message) log.error(error_message)
...@@ -487,13 +487,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -487,13 +487,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']: for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']:
if tag not in score_result: if tag not in score_result:
#This is a dev_facing_error # This is a dev_facing_error
error_message = ("External open ended grader message is missing required tag: {0}" error_message = ("External open ended grader message is missing required tag: {0}"
.format(tag)) .format(tag))
log.error(error_message) log.error(error_message)
fail['feedback'] = error_message fail['feedback'] = error_message
return fail return fail
#This is to support peer grading # This is to support peer grading
if isinstance(score_result['score'], list): if isinstance(score_result['score'], list):
feedback_items = [] feedback_items = []
rubric_scores = [] rubric_scores = []
...@@ -529,7 +529,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -529,7 +529,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
feedback = feedback_items feedback = feedback_items
score = int(median(score_result['score'])) score = int(median(score_result['score']))
else: else:
#This is for instructor and ML grading # This is for instructor and ML grading
feedback, rubric_score = self._format_feedback(score_result, system) feedback, rubric_score = self._format_feedback(score_result, system)
score = score_result['score'] score = score_result['score']
rubric_scores = [rubric_score] rubric_scores = [rubric_score]
...@@ -608,9 +608,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -608,9 +608,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
} }
if dispatch not in handlers: if dispatch not in handlers:
#This is a dev_facing_error # This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error # This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False}) return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress() before = self.get_progress()
...@@ -659,10 +659,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -659,10 +659,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.send_to_grader(get['student_answer'], system) self.send_to_grader(get['student_answer'], system)
self.change_state(self.ASSESSING) self.change_state(self.ASSESSING)
else: else:
#Error message already defined # Error message already defined
success = False success = False
else: else:
#This is a student_facing_error # This is a student_facing_error
error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box." error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box."
return { return {
...@@ -679,7 +679,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -679,7 +679,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
""" """
queuekey = get['queuekey'] queuekey = get['queuekey']
score_msg = get['xqueue_body'] score_msg = get['xqueue_body']
#TODO: Remove need for cmap # TODO: Remove need for cmap
self._update_score(score_msg, queuekey, system) self._update_score(score_msg, queuekey, system)
return dict() # No AJAX return is needed return dict() # No AJAX return is needed
...@@ -690,7 +690,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -690,7 +690,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
Input: Modulesystem object Input: Modulesystem object
Output: Rendered HTML Output: Rendered HTML
""" """
#set context variables and render template # set context variables and render template
eta_string = None eta_string = None
if self.child_state != self.INITIAL: if self.child_state != self.INITIAL:
latest = self.latest_answer() latest = self.latest_answer()
...@@ -749,7 +749,7 @@ class OpenEndedDescriptor(): ...@@ -749,7 +749,7 @@ class OpenEndedDescriptor():
""" """
for child in ['openendedparam']: for child in ['openendedparam']:
if len(xml_object.xpath(child)) != 1: if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error # This is a staff_facing_error
raise ValueError( raise ValueError(
"Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format( "Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child)) child))
......
...@@ -54,7 +54,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -54,7 +54,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
@param system: Modulesystem @param system: Modulesystem
@return: Rendered HTML @return: Rendered HTML
""" """
#set context variables and render template # set context variables and render template
if self.child_state != self.INITIAL: if self.child_state != self.INITIAL:
latest = self.latest_answer() latest = self.latest_answer()
previous_answer = latest if latest is not None else '' previous_answer = latest if latest is not None else ''
...@@ -93,9 +93,9 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -93,9 +93,9 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
} }
if dispatch not in handlers: if dispatch not in handlers:
#This is a dev_facing_error # This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error # This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False}) return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress() before = self.get_progress()
...@@ -129,7 +129,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -129,7 +129,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
elif self.child_state in (self.POST_ASSESSMENT, self.DONE): elif self.child_state in (self.POST_ASSESSMENT, self.DONE):
context['read_only'] = True context['read_only'] = True
else: else:
#This is a dev_facing_error # This is a dev_facing_error
raise ValueError("Self assessment module is in an illegal state '{0}'".format(self.child_state)) raise ValueError("Self assessment module is in an illegal state '{0}'".format(self.child_state))
return system.render_template('{0}/self_assessment_rubric.html'.format(self.TEMPLATE_DIR), context) return system.render_template('{0}/self_assessment_rubric.html'.format(self.TEMPLATE_DIR), context)
...@@ -155,7 +155,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -155,7 +155,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
elif self.child_state == self.DONE: elif self.child_state == self.DONE:
context['read_only'] = True context['read_only'] = True
else: else:
#This is a dev_facing_error # This is a dev_facing_error
raise ValueError("Self Assessment module is in an illegal state '{0}'".format(self.child_state)) raise ValueError("Self Assessment module is in an illegal state '{0}'".format(self.child_state))
return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context) return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context)
...@@ -190,10 +190,10 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -190,10 +190,10 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
self.new_history_entry(get['student_answer']) self.new_history_entry(get['student_answer'])
self.change_state(self.ASSESSING) self.change_state(self.ASSESSING)
else: else:
#Error message already defined # Error message already defined
success = False success = False
else: else:
#This is a student_facing_error # This is a student_facing_error
error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box." error_message = "There was a problem saving the image in your submission. Please try a different image, or try pasting a link to an image into the answer box."
return { return {
...@@ -227,12 +227,12 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -227,12 +227,12 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
for i in xrange(0, len(score_list)): for i in xrange(0, len(score_list)):
score_list[i] = int(score_list[i]) score_list[i] = int(score_list[i])
except ValueError: except ValueError:
#This is a dev_facing_error # This is a dev_facing_error
log.error("Non-integer score value passed to save_assessment ,or no score list present.") log.error("Non-integer score value passed to save_assessment ,or no score list present.")
#This is a student_facing_error # This is a student_facing_error
return {'success': False, 'error': "Error saving your score. Please notify course staff."} return {'success': False, 'error': "Error saving your score. Please notify course staff."}
#Record score as assessment and rubric scores as post assessment # Record score as assessment and rubric scores as post assessment
self.record_latest_score(score) self.record_latest_score(score)
self.record_latest_post_assessment(json.dumps(score_list)) self.record_latest_post_assessment(json.dumps(score_list))
...@@ -272,7 +272,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -272,7 +272,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
try: try:
rubric_scores = json.loads(latest_post_assessment) rubric_scores = json.loads(latest_post_assessment)
except: except:
#This is a dev_facing_error # This is a dev_facing_error
log.error("Cannot parse rubric scores in self assessment module from {0}".format(latest_post_assessment)) log.error("Cannot parse rubric scores in self assessment module from {0}".format(latest_post_assessment))
rubric_scores = [] rubric_scores = []
return [rubric_scores] return [rubric_scores]
...@@ -306,7 +306,7 @@ class SelfAssessmentDescriptor(): ...@@ -306,7 +306,7 @@ class SelfAssessmentDescriptor():
expected_children = [] expected_children = []
for child in expected_children: for child in expected_children:
if len(xml_object.xpath(child)) != 1: if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error # This is a staff_facing_error
raise ValueError( raise ValueError(
"Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format( "Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child)) child))
......
...@@ -55,7 +55,7 @@ class CustomTagDescriptor(RawDescriptor): ...@@ -55,7 +55,7 @@ class CustomTagDescriptor(RawDescriptor):
params = dict(xmltree.items()) params = dict(xmltree.items())
# cdodge: look up the template as a module # cdodge: look up the template as a module
template_loc = self.location._replace(category='custom_tag_template', name=template_name) template_loc = self.location.replace(category='custom_tag_template', name=template_name)
template_module = modulestore().get_instance(system.course_id, template_loc) template_module = modulestore().get_instance(system.course_id, template_loc)
template_module_data = template_module.data template_module_data = template_module.data
......
...@@ -19,6 +19,7 @@ from django.http import QueryDict ...@@ -19,6 +19,7 @@ from django.http import QueryDict
from . import test_system from . import test_system
from pytz import UTC from pytz import UTC
from capa.correctmap import CorrectMap
class CapaFactory(object): class CapaFactory(object):
...@@ -597,6 +598,85 @@ class CapaModuleTest(unittest.TestCase): ...@@ -597,6 +598,85 @@ class CapaModuleTest(unittest.TestCase):
# Expect that the problem was NOT reset # Expect that the problem was NOT reset
self.assertTrue('success' in result and not result['success']) self.assertTrue('success' in result and not result['success'])
def test_rescore_problem_correct(self):
module = CapaFactory.create(attempts=1, done=True)
# Simulate that all answers are marked correct, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'correct')
result = module.rescore_problem()
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
# Expect that we get no HTML
self.assertFalse('contents' in result)
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 1)
def test_rescore_problem_incorrect(self):
# make sure it also works when attempts have been reset,
# so add this to the test:
module = CapaFactory.create(attempts=0, done=True)
# Simulate that all answers are marked incorrect, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect')
result = module.rescore_problem()
# Expect that the problem is marked incorrect
self.assertEqual(result['success'], 'incorrect')
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 0)
def test_rescore_problem_not_done(self):
# Simulate that the problem is NOT done
module = CapaFactory.create(done=False)
# Try to rescore the problem, and get exception
with self.assertRaises(xmodule.exceptions.NotFoundError):
module.rescore_problem()
def test_rescore_problem_not_supported(self):
module = CapaFactory.create(done=True)
# Try to rescore the problem, and get exception
with patch('capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring:
mock_supports_rescoring.return_value = False
with self.assertRaises(NotImplementedError):
module.rescore_problem()
def _rescore_problem_error_helper(self, exception_class):
"""Helper to allow testing all errors that rescoring might return."""
# Create the module
module = CapaFactory.create(attempts=1, done=True)
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = exception_class(u'test error \u03a9')
result = module.rescore_problem()
# Expect an AJAX alert message in 'success'
expected_msg = u'Error: test error \u03a9'
self.assertEqual(result['success'], expected_msg)
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_rescore_problem_student_input_error(self):
self._rescore_problem_error_helper(StudentInputError)
def test_rescore_problem_problem_error(self):
self._rescore_problem_error_helper(LoncapaProblemError)
def test_rescore_problem_response_error(self):
self._rescore_problem_error_helper(ResponseError)
def test_save_problem(self): def test_save_problem(self):
module = CapaFactory.create(done=False) module = CapaFactory.create(done=False)
......
...@@ -20,7 +20,7 @@ from . import test_system ...@@ -20,7 +20,7 @@ from . import test_system
class DummySystem(ImportSystem): class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda dir: MemoryFS()) @patch('xmodule.modulestore.xml.OSFS', lambda directory: MemoryFS())
def __init__(self, load_error_modules): def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", course_dirs=[], load_error_modules=load_error_modules) xmlstore = XMLModuleStore("data_dir", course_dirs=[], load_error_modules=load_error_modules)
...@@ -43,6 +43,7 @@ class DummySystem(ImportSystem): ...@@ -43,6 +43,7 @@ class DummySystem(ImportSystem):
def render_template(self, template, context): def render_template(self, template, context):
raise Exception("Shouldn't be called") raise Exception("Shouldn't be called")
class ConditionalFactory(object): class ConditionalFactory(object):
""" """
A helper class to create a conditional module and associated source and child modules A helper class to create a conditional module and associated source and child modules
...@@ -93,7 +94,7 @@ class ConditionalFactory(object): ...@@ -93,7 +94,7 @@ class ConditionalFactory(object):
# return dict: # return dict:
return {'cond_module': cond_module, return {'cond_module': cond_module,
'source_module': source_module, 'source_module': source_module,
'child_module': child_module } 'child_module': child_module}
class ConditionalModuleBasicTest(unittest.TestCase): class ConditionalModuleBasicTest(unittest.TestCase):
...@@ -109,12 +110,11 @@ class ConditionalModuleBasicTest(unittest.TestCase): ...@@ -109,12 +110,11 @@ class ConditionalModuleBasicTest(unittest.TestCase):
'''verify that get_icon_class works independent of condition satisfaction''' '''verify that get_icon_class works independent of condition satisfaction'''
modules = ConditionalFactory.create(self.test_system) modules = ConditionalFactory.create(self.test_system)
for attempted in ["false", "true"]: for attempted in ["false", "true"]:
for icon_class in [ 'other', 'problem', 'video']: for icon_class in ['other', 'problem', 'video']:
modules['source_module'].is_attempted = attempted modules['source_module'].is_attempted = attempted
modules['child_module'].get_icon_class = lambda: icon_class modules['child_module'].get_icon_class = lambda: icon_class
self.assertEqual(modules['cond_module'].get_icon_class(), icon_class) self.assertEqual(modules['cond_module'].get_icon_class(), icon_class)
def test_get_html(self): def test_get_html(self):
modules = ConditionalFactory.create(self.test_system) modules = ConditionalFactory.create(self.test_system)
# because test_system returns the repr of the context dict passed to render_template, # because test_system returns the repr of the context dict passed to render_template,
...@@ -224,4 +224,3 @@ class ConditionalModuleXmlTest(unittest.TestCase): ...@@ -224,4 +224,3 @@ class ConditionalModuleXmlTest(unittest.TestCase):
print "post-attempt ajax: ", ajax print "post-attempt ajax: ", ajax
html = ajax['html'] html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html])) self.assertTrue(any(['This is a secret' in item for item in html]))
...@@ -15,7 +15,7 @@ from xblock.core import XBlock, Scope, String, Integer, Float, ModelType ...@@ -15,7 +15,7 @@ from xblock.core import XBlock, Scope, String, Integer, Float, ModelType
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
def dummy_track(event_type, event): def dummy_track(_event_type, _event):
pass pass
...@@ -231,7 +231,7 @@ class XModule(XModuleFields, HTMLSnippet, XBlock): ...@@ -231,7 +231,7 @@ class XModule(XModuleFields, HTMLSnippet, XBlock):
''' '''
return self.icon_class return self.icon_class
### Functions used in the LMS # Functions used in the LMS
def get_score(self): def get_score(self):
""" """
...@@ -272,7 +272,7 @@ class XModule(XModuleFields, HTMLSnippet, XBlock): ...@@ -272,7 +272,7 @@ class XModule(XModuleFields, HTMLSnippet, XBlock):
''' '''
return None return None
def handle_ajax(self, dispatch, get): def handle_ajax(self, _dispatch, _get):
''' dispatch is last part of the URL. ''' dispatch is last part of the URL.
get is a dictionary-like object ''' get is a dictionary-like object '''
return "" return ""
...@@ -647,13 +647,13 @@ class XModuleDescriptor(XModuleFields, HTMLSnippet, ResourceTemplates, XBlock): ...@@ -647,13 +647,13 @@ class XModuleDescriptor(XModuleFields, HTMLSnippet, ResourceTemplates, XBlock):
# 1. A select editor for fields with a list of possible values (includes Booleans). # 1. A select editor for fields with a list of possible values (includes Booleans).
# 2. Number editors for integers and floats. # 2. Number editors for integers and floats.
# 3. A generic string editor for anything else (editing JSON representation of the value). # 3. A generic string editor for anything else (editing JSON representation of the value).
type = "Generic" editor_type = "Generic"
values = [] if field.values is None else copy.deepcopy(field.values) values = [] if field.values is None else copy.deepcopy(field.values)
if isinstance(values, tuple): if isinstance(values, tuple):
values = list(values) values = list(values)
if isinstance(values, list): if isinstance(values, list):
if len(values) > 0: if len(values) > 0:
type = "Select" editor_type = "Select"
for index, choice in enumerate(values): for index, choice in enumerate(values):
json_choice = copy.deepcopy(choice) json_choice = copy.deepcopy(choice)
if isinstance(json_choice, dict) and 'value' in json_choice: if isinstance(json_choice, dict) and 'value' in json_choice:
...@@ -662,11 +662,11 @@ class XModuleDescriptor(XModuleFields, HTMLSnippet, ResourceTemplates, XBlock): ...@@ -662,11 +662,11 @@ class XModuleDescriptor(XModuleFields, HTMLSnippet, ResourceTemplates, XBlock):
json_choice = field.to_json(json_choice) json_choice = field.to_json(json_choice)
values[index] = json_choice values[index] = json_choice
elif isinstance(field, Integer): elif isinstance(field, Integer):
type = "Integer" editor_type = "Integer"
elif isinstance(field, Float): elif isinstance(field, Float):
type = "Float" editor_type = "Float"
metadata_fields[field.name] = {'field_name': field.name, metadata_fields[field.name] = {'field_name': field.name,
'type': type, 'type': editor_type,
'display_name': field.display_name, 'display_name': field.display_name,
'value': field.to_json(value), 'value': field.to_json(value),
'options': values, 'options': values,
...@@ -862,7 +862,7 @@ class ModuleSystem(object): ...@@ -862,7 +862,7 @@ class ModuleSystem(object):
class DoNothingCache(object): class DoNothingCache(object):
"""A duck-compatible object to use in ModuleSystem when there's no cache.""" """A duck-compatible object to use in ModuleSystem when there's no cache."""
def get(self, key): def get(self, _key):
return None return None
def set(self, key, value, timeout=None): def set(self, key, value, timeout=None):
......
...@@ -56,7 +56,6 @@ def get_metadata_from_xml(xml_object, remove=True): ...@@ -56,7 +56,6 @@ def get_metadata_from_xml(xml_object, remove=True):
if meta is None: if meta is None:
return '' return ''
dmdata = meta.text dmdata = meta.text
#log.debug('meta for %s loaded: %s' % (xml_object,dmdata))
if remove: if remove:
xml_object.remove(meta) xml_object.remove(meta)
return dmdata return dmdata
......
...@@ -3,6 +3,11 @@ describe 'Logger', -> ...@@ -3,6 +3,11 @@ describe 'Logger', ->
expect(window.log_event).toBe Logger.log expect(window.log_event).toBe Logger.log
describe 'log', -> describe 'log', ->
it 'sends an event to Segment.io, if the event is whitelisted', ->
spyOn(analytics, 'track')
Logger.log 'seq_goto', 'data'
expect(analytics.track).toHaveBeenCalledWith 'seq_goto', 'data'
it 'send a request to log event', -> it 'send a request to log event', ->
spyOn $, 'getWithPrefix' spyOn $, 'getWithPrefix'
Logger.log 'example', 'data' Logger.log 'example', 'data'
......
class @Logger class @Logger
# events we want sent to Segment.io for tracking
SEGMENT_IO_WHITELIST = ["seq_goto", "seq_next", "seq_prev"]
@log: (event_type, data) -> @log: (event_type, data) ->
if event_type in SEGMENT_IO_WHITELIST
# Segment.io event tracking
analytics.track event_type, data
$.getWithPrefix '/event', $.getWithPrefix '/event',
event_type: event_type event_type: event_type
event: JSON.stringify(data) event: JSON.stringify(data)
......
This source diff could not be displayed because it is too large. You can view the blob instead.
!function(e,t){"use strict";var n=t.prototype.trim,r=t.prototype.trimRight,i=t.prototype.trimLeft,s=function(e){return e*1||0},o=function(e,t){if(t<1)return"";var n="";while(t>0)t&1&&(n+=e),t>>=1,e+=e;return n},u=[].slice,a=function(e){return e==null?"\\s":e.source?e.source:"["+p.escapeRegExp(e)+"]"},f={lt:"<",gt:">",quot:'"',apos:"'",amp:"&"},l={};for(var c in f)l[f[c]]=c;var h=function(){function e(e){return Object.prototype.toString.call(e).slice(8,-1).toLowerCase()}var n=o,r=function(){return r.cache.hasOwnProperty(arguments[0])||(r.cache[arguments[0]]=r.parse(arguments[0])),r.format.call(null,r.cache[arguments[0]],arguments)};return r.format=function(r,i){var s=1,o=r.length,u="",a,f=[],l,c,p,d,v,m;for(l=0;l<o;l++){u=e(r[l]);if(u==="string")f.push(r[l]);else if(u==="array"){p=r[l];if(p[2]){a=i[s];for(c=0;c<p[2].length;c++){if(!a.hasOwnProperty(p[2][c]))throw new Error(h('[_.sprintf] property "%s" does not exist',p[2][c]));a=a[p[2][c]]}}else p[1]?a=i[p[1]]:a=i[s++];if(/[^s]/.test(p[8])&&e(a)!="number")throw new Error(h("[_.sprintf] expecting number but found %s",e(a)));switch(p[8]){case"b":a=a.toString(2);break;case"c":a=t.fromCharCode(a);break;case"d":a=parseInt(a,10);break;case"e":a=p[7]?a.toExponential(p[7]):a.toExponential();break;case"f":a=p[7]?parseFloat(a).toFixed(p[7]):parseFloat(a);break;case"o":a=a.toString(8);break;case"s":a=(a=t(a))&&p[7]?a.substring(0,p[7]):a;break;case"u":a=Math.abs(a);break;case"x":a=a.toString(16);break;case"X":a=a.toString(16).toUpperCase()}a=/[def]/.test(p[8])&&p[3]&&a>=0?"+"+a:a,v=p[4]?p[4]=="0"?"0":p[4].charAt(1):" ",m=p[6]-t(a).length,d=p[6]?n(v,m):"",f.push(p[5]?a+d:d+a)}}return f.join("")},r.cache={},r.parse=function(e){var t=e,n=[],r=[],i=0;while(t){if((n=/^[^\x25]+/.exec(t))!==null)r.push(n[0]);else if((n=/^\x25{2}/.exec(t))!==null)r.push("%");else{if((n=/^\x25(?:([1-9]\d*)\$|\(([^\)]+)\))?(\+)?(0|'[^$])?(-)?(\d+)?(?:\.(\d+))?([b-fosuxX])/.exec(t))===null)throw new Error("[_.sprintf] huh?");if(n[2]){i|=1;var s=[],o=n[2],u=[];if((u=/^([a-z_][a-z_\d]*)/i.exec(o))===null)throw new Error("[_.sprintf] huh?");s.push(u[1]);while((o=o.substring(u[0].length))!=="")if((u=/^\.([a-z_][a-z_\d]*)/i.exec(o))!==null)s.push(u[1]);else{if((u=/^\[(\d+)\]/.exec(o))===null)throw new Error("[_.sprintf] huh?");s.push(u[1])}n[2]=s}else i|=2;if(i===3)throw new Error("[_.sprintf] mixing positional and named placeholders is not (yet) supported");r.push(n)}t=t.substring(n[0].length)}return r},r}(),p={VERSION:"2.3.0",isBlank:function(e){return e==null&&(e=""),/^\s*$/.test(e)},stripTags:function(e){return e==null?"":t(e).replace(/<\/?[^>]+>/g,"")},capitalize:function(e){return e=e==null?"":t(e),e.charAt(0).toUpperCase()+e.slice(1)},chop:function(e,n){return e==null?[]:(e=t(e),n=~~n,n>0?e.match(new RegExp(".{1,"+n+"}","g")):[e])},clean:function(e){return p.strip(e).replace(/\s+/g," ")},count:function(e,n){return e==null||n==null?0:t(e).split(n).length-1},chars:function(e){return e==null?[]:t(e).split("")},swapCase:function(e){return e==null?"":t(e).replace(/\S/g,function(e){return e===e.toUpperCase()?e.toLowerCase():e.toUpperCase()})},escapeHTML:function(e){return e==null?"":t(e).replace(/[&<>"']/g,function(e){return"&"+l[e]+";"})},unescapeHTML:function(e){return e==null?"":t(e).replace(/\&([^;]+);/g,function(e,n){var r;return n in f?f[n]:(r=n.match(/^#x([\da-fA-F]+)$/))?t.fromCharCode(parseInt(r[1],16)):(r=n.match(/^#(\d+)$/))?t.fromCharCode(~~r[1]):e})},escapeRegExp:function(e){return e==null?"":t(e).replace(/([.*+?^=!:${}()|[\]\/\\])/g,"\\$1")},splice:function(e,t,n,r){var i=p.chars(e);return i.splice(~~t,~~n,r),i.join("")},insert:function(e,t,n){return p.splice(e,t,0,n)},include:function(e,n){return n===""?!0:e==null?!1:t(e).indexOf(n)!==-1},join:function(){var e=u.call(arguments),t=e.shift();return t==null&&(t=""),e.join(t)},lines:function(e){return e==null?[]:t(e).split("\n")},reverse:function(e){return p.chars(e).reverse().join("")},startsWith:function(e,n){return n===""?!0:e==null||n==null?!1:(e=t(e),n=t(n),e.length>=n.length&&e.slice(0,n.length)===n)},endsWith:function(e,n){return n===""?!0:e==null||n==null?!1:(e=t(e),n=t(n),e.length>=n.length&&e.slice(e.length-n.length)===n)},succ:function(e){return e==null?"":(e=t(e),e.slice(0,-1)+t.fromCharCode(e.charCodeAt(e.length-1)+1))},titleize:function(e){return e==null?"":t(e).replace(/(?:^|\s)\S/g,function(e){return e.toUpperCase()})},camelize:function(e){return p.trim(e).replace(/[-_\s]+(.)?/g,function(e,t){return t.toUpperCase()})},underscored:function(e){return p.trim(e).replace(/([a-z\d])([A-Z]+)/g,"$1_$2").replace(/[-\s]+/g,"_").toLowerCase()},dasherize:function(e){return p.trim(e).replace(/([A-Z])/g,"-$1").replace(/[-_\s]+/g,"-").toLowerCase()},classify:function(e){return p.titleize(t(e).replace(/_/g," ")).replace(/\s/g,"")},humanize:function(e){return p.capitalize(p.underscored(e).replace(/_id$/,"").replace(/_/g," "))},trim:function(e,r){return e==null?"":!r&&n?n.call(e):(r=a(r),t(e).replace(new RegExp("^"+r+"+|"+r+"+$","g"),""))},ltrim:function(e,n){return e==null?"":!n&&i?i.call(e):(n=a(n),t(e).replace(new RegExp("^"+n+"+"),""))},rtrim:function(e,n){return e==null?"":!n&&r?r.call(e):(n=a(n),t(e).replace(new RegExp(n+"+$"),""))},truncate:function(e,n,r){return e==null?"":(e=t(e),r=r||"...",n=~~n,e.length>n?e.slice(0,n)+r:e)},prune:function(e,n,r){if(e==null)return"";e=t(e),n=~~n,r=r!=null?t(r):"...";if(e.length<=n)return e;var i=function(e){return e.toUpperCase()!==e.toLowerCase()?"A":" "},s=e.slice(0,n+1).replace(/.(?=\W*\w*$)/g,i);return s.slice(s.length-2).match(/\w\w/)?s=s.replace(/\s*\S+$/,""):s=p.rtrim(s.slice(0,s.length-1)),(s+r).length>e.length?e:e.slice(0,s.length)+r},words:function(e,t){return p.isBlank(e)?[]:p.trim(e,t).split(t||/\s+/)},pad:function(e,n,r,i){e=e==null?"":t(e),n=~~n;var s=0;r?r.length>1&&(r=r.charAt(0)):r=" ";switch(i){case"right":return s=n-e.length,e+o(r,s);case"both":return s=n-e.length,o(r,Math.ceil(s/2))+e+o(r,Math.floor(s/2));default:return s=n-e.length,o(r,s)+e}},lpad:function(e,t,n){return p.pad(e,t,n)},rpad:function(e,t,n){return p.pad(e,t,n,"right")},lrpad:function(e,t,n){return p.pad(e,t,n,"both")},sprintf:h,vsprintf:function(e,t){return t.unshift(e),h.apply(null,t)},toNumber:function(e,n){if(e==null||e=="")return 0;e=t(e);var r=s(s(e).toFixed(~~n));return r===0&&!e.match(/^0+$/)?Number.NaN:r},numberFormat:function(e,t,n,r){if(isNaN(e)||e==null)return"";e=e.toFixed(~~t),r=r||",";var i=e.split("."),s=i[0],o=i[1]?(n||".")+i[1]:"";return s.replace(/(\d)(?=(?:\d{3})+$)/g,"$1"+r)+o},strRight:function(e,n){if(e==null)return"";e=t(e),n=n!=null?t(n):n;var r=n?e.indexOf(n):-1;return~r?e.slice(r+n.length,e.length):e},strRightBack:function(e,n){if(e==null)return"";e=t(e),n=n!=null?t(n):n;var r=n?e.lastIndexOf(n):-1;return~r?e.slice(r+n.length,e.length):e},strLeft:function(e,n){if(e==null)return"";e=t(e),n=n!=null?t(n):n;var r=n?e.indexOf(n):-1;return~r?e.slice(0,r):e},strLeftBack:function(e,t){if(e==null)return"";e+="",t=t!=null?""+t:t;var n=e.lastIndexOf(t);return~n?e.slice(0,n):e},toSentence:function(e,t,n,r){t=t||", ",n=n||" and ";var i=e.slice(),s=i.pop();return e.length>2&&r&&(n=p.rtrim(t)+n),i.length?i.join(t)+n+s:s},toSentenceSerial:function(){var e=u.call(arguments);return e[3]=!0,p.toSentence.apply(p,e)},slugify:function(e){if(e==null)return"";var n="ąàáäâãåæćęèéëêìíïîłńòóöôõøùúüûñçżź",r="aaaaaaaaceeeeeiiiilnoooooouuuunczz",i=new RegExp(a(n),"g");return e=t(e).toLowerCase().replace(i,function(e){var t=n.indexOf(e);return r.charAt(t)||"-"}),p.dasherize(e.replace(/[^\w\s-]/g,""))},surround:function(e,t){return[t,e,t].join("")},quote:function(e){return p.surround(e,'"')},exports:function(){var e={};for(var t in this){if(!this.hasOwnProperty(t)||t.match(/^(?:include|contains|reverse)$/))continue;e[t]=this[t]}return e},repeat:function(e,n,r){if(e==null)return"";n=~~n;if(r==null)return o(t(e),n);for(var i=[];n>0;i[--n]=e);return i.join(r)},levenshtein:function(e,n){if(e==null&&n==null)return 0;if(e==null)return t(n).length;if(n==null)return t(e).length;e=t(e),n=t(n);var r=[],i,s;for(var o=0;o<=n.length;o++)for(var u=0;u<=e.length;u++)o&&u?e.charAt(u-1)===n.charAt(o-1)?s=i:s=Math.min(r[u],r[u-1],i)+1:s=o+u,i=r[u],r[u]=s;return r.pop()}};p.strip=p.trim,p.lstrip=p.ltrim,p.rstrip=p.rtrim,p.center=p.lrpad,p.rjust=p.lpad,p.ljust=p.rpad,p.contains=p.include,p.q=p.quote,typeof exports!="undefined"?(typeof module!="undefined"&&module.exports&&(module.exports=p),exports._s=p):typeof define=="function"&&define.amd?define("underscore.string",[],function(){return p}):(e._=e._||{},e._.string=e._.str=p)}(this,String);
\ No newline at end of file
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
<script type="text/javascript" src="<%= common_js_root %>/vendor/tiny_mce/tiny_mce.js"></script> <script type="text/javascript" src="<%= common_js_root %>/vendor/tiny_mce/tiny_mce.js"></script>
<script type="text/javascript" src="<%= common_js_root %>/vendor/mathjax-MathJax-c9db6ac/MathJax.js?config=default"></script> <script type="text/javascript" src="<%= common_js_root %>/vendor/mathjax-MathJax-c9db6ac/MathJax.js?config=default"></script>
<script type="text/javascript" src="<%= common_js_root %>/vendor/jquery.timeago.js"></script> <script type="text/javascript" src="<%= common_js_root %>/vendor/jquery.timeago.js"></script>
<script type="text/javascript" src="<%= common_js_root %>/vendor/sinon-1.7.1.js"></script>
<script type="text/javascript" src="<%= common_js_root %>/vendor/analytics.js"></script>
<script type="text/javascript"> <script type="text/javascript">
AjaxPrefix.addAjaxPrefix(jQuery, function() { AjaxPrefix.addAjaxPrefix(jQuery, function() {
return ""; return "";
......
#pylint: disable=C0111 # pylint: disable=C0111
#pylint: disable=W0621 # pylint: disable=W0621
from __future__ import absolute_import from __future__ import absolute_import
......
...@@ -112,12 +112,12 @@ def assert_problem_has_answer(step, problem_type, answer_class): ...@@ -112,12 +112,12 @@ def assert_problem_has_answer(step, problem_type, answer_class):
@step(u'I reset the problem') @step(u'I reset the problem')
def reset_problem(step): def reset_problem(_step):
world.css_click('input.reset') world.css_click('input.reset')
@step(u'I press the button with the label "([^"]*)"$') @step(u'I press the button with the label "([^"]*)"$')
def press_the_button_with_label(step, buttonname): def press_the_button_with_label(_step, buttonname):
button_css = 'button span.show-label' button_css = 'button span.show-label'
elem = world.css_find(button_css).first elem = world.css_find(button_css).first
assert_equal(elem.text, buttonname) assert_equal(elem.text, buttonname)
...@@ -125,7 +125,7 @@ def press_the_button_with_label(step, buttonname): ...@@ -125,7 +125,7 @@ def press_the_button_with_label(step, buttonname):
@step(u'The "([^"]*)" button does( not)? appear') @step(u'The "([^"]*)" button does( not)? appear')
def action_button_present(step, buttonname, doesnt_appear): def action_button_present(_step, buttonname, doesnt_appear):
button_css = 'section.action input[value*="%s"]' % buttonname button_css = 'section.action input[value*="%s"]' % buttonname
if doesnt_appear: if doesnt_appear:
assert world.is_css_not_present(button_css) assert world.is_css_not_present(button_css)
......
#pylint: disable=C0111 # pylint: disable=C0111
#pylint: disable=W0621 # pylint: disable=W0621
from lettuce import world, step from lettuce import world, step
from lettuce.django import django_url from lettuce.django import django_url
...@@ -7,7 +7,7 @@ from common import TEST_COURSE_ORG, TEST_COURSE_NAME ...@@ -7,7 +7,7 @@ from common import TEST_COURSE_ORG, TEST_COURSE_NAME
@step('I register for the course "([^"]*)"$') @step('I register for the course "([^"]*)"$')
def i_register_for_the_course(step, course): def i_register_for_the_course(_step, course):
cleaned_name = TEST_COURSE_NAME.replace(' ', '_') cleaned_name = TEST_COURSE_NAME.replace(' ', '_')
url = django_url('courses/%s/%s/%s/about' % (TEST_COURSE_ORG, course, cleaned_name)) url = django_url('courses/%s/%s/%s/about' % (TEST_COURSE_ORG, course, cleaned_name))
world.browser.visit(url) world.browser.visit(url)
...@@ -20,13 +20,13 @@ def i_register_for_the_course(step, course): ...@@ -20,13 +20,13 @@ def i_register_for_the_course(step, course):
@step(u'I should see an empty dashboard message') @step(u'I should see an empty dashboard message')
def i_should_see_empty_dashboard(step): def i_should_see_empty_dashboard(_step):
empty_dash_css = 'section.empty-dashboard-message' empty_dash_css = 'section.empty-dashboard-message'
assert world.is_css_present(empty_dash_css) assert world.is_css_present(empty_dash_css)
@step(u'I should( NOT)? see the course numbered "([^"]*)" in my dashboard$') @step(u'I should( NOT)? see the course numbered "([^"]*)" in my dashboard$')
def i_should_see_that_course_in_my_dashboard(step, doesnt_appear, course): def i_should_see_that_course_in_my_dashboard(_step, doesnt_appear, course):
course_link_css = 'section.my-courses a[href*="%s"]' % course course_link_css = 'section.my-courses a[href*="%s"]' % course
if doesnt_appear: if doesnt_appear:
assert world.is_css_not_present(course_link_css) assert world.is_css_not_present(course_link_css)
...@@ -35,7 +35,7 @@ def i_should_see_that_course_in_my_dashboard(step, doesnt_appear, course): ...@@ -35,7 +35,7 @@ def i_should_see_that_course_in_my_dashboard(step, doesnt_appear, course):
@step(u'I unregister for the course numbered "([^"]*)"') @step(u'I unregister for the course numbered "([^"]*)"')
def i_unregister_for_that_course(step, course): def i_unregister_for_that_course(_step, course):
unregister_css = 'section.info a[href*="#unenroll-modal"][data-course-number*="%s"]' % course unregister_css = 'section.info a[href*="#unenroll-modal"][data-course-number*="%s"]' % course
world.css_click(unregister_css) world.css_click(unregister_css)
button_css = 'section#unenroll-modal input[value="Unregister"]' button_css = 'section#unenroll-modal input[value="Unregister"]'
......
...@@ -8,12 +8,12 @@ from common import TEST_COURSE_NAME, TEST_SECTION_NAME, i_am_registered_for_the_ ...@@ -8,12 +8,12 @@ from common import TEST_COURSE_NAME, TEST_SECTION_NAME, i_am_registered_for_the_
@step('when I view the video it has autoplay enabled') @step('when I view the video it has autoplay enabled')
def does_autoplay(step): def does_autoplay(_step):
assert(world.css_find('.video')[0]['data-autoplay'] == 'True') assert(world.css_find('.video')[0]['data-autoplay'] == 'True')
@step('the course has a Video component') @step('the course has a Video component')
def view_video(step): def view_video(_step):
coursename = TEST_COURSE_NAME.replace(' ', '_') coursename = TEST_COURSE_NAME.replace(' ', '_')
i_am_registered_for_the_course(step, coursename) i_am_registered_for_the_course(step, coursename)
......
...@@ -4,9 +4,9 @@ WE'RE USING MIGRATIONS! ...@@ -4,9 +4,9 @@ WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that, file and check it in at the same time as your model changes. To do that,
1. Go to the mitx dir 1. Go to the edx-platform dir
2. ./manage.py schemamigration courseware --auto description_of_your_change 2. ./manage.py schemamigration courseware --auto description_of_your_change
3. Add the migration file created in mitx/courseware/migrations/ 3. Add the migration file created in edx-platform/lms/djangoapps/courseware/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types ASSUMPTIONS: modules have unique IDs, even across different module_types
...@@ -17,6 +17,7 @@ from django.db import models ...@@ -17,6 +17,7 @@ from django.db import models
from django.db.models.signals import post_save from django.db.models.signals import post_save
from django.dispatch import receiver from django.dispatch import receiver
class StudentModule(models.Model): class StudentModule(models.Model):
""" """
Keeps student state for a particular module in a particular course. Keeps student state for a particular module in a particular course.
......
...@@ -121,7 +121,7 @@ def toc_for_course(user, request, course, active_chapter, active_section, model_ ...@@ -121,7 +121,7 @@ def toc_for_course(user, request, course, active_chapter, active_section, model_
def get_module(user, request, location, model_data_cache, course_id, def get_module(user, request, location, model_data_cache, course_id,
position=None, not_found_ok = False, wrap_xmodule_display=True, position=None, not_found_ok=False, wrap_xmodule_display=True,
grade_bucket_type=None, depth=0): grade_bucket_type=None, depth=0):
""" """
Get an instance of the xmodule class identified by location, Get an instance of the xmodule class identified by location,
...@@ -161,16 +161,49 @@ def get_module(user, request, location, model_data_cache, course_id, ...@@ -161,16 +161,49 @@ def get_module(user, request, location, model_data_cache, course_id,
return None return None
def get_xqueue_callback_url_prefix(request):
"""
Calculates default prefix based on request, but allows override via settings
This is separated from get_module_for_descriptor so that it can be called
by the LMS before submitting background tasks to run. The xqueue callbacks
should go back to the LMS, not to the worker.
"""
prefix = '{proto}://{host}'.format(
proto=request.META.get('HTTP_X_FORWARDED_PROTO', 'https' if request.is_secure() else 'http'),
host=request.get_host()
)
return settings.XQUEUE_INTERFACE.get('callback_url', prefix)
def get_module_for_descriptor(user, request, descriptor, model_data_cache, course_id, def get_module_for_descriptor(user, request, descriptor, model_data_cache, course_id,
position=None, wrap_xmodule_display=True, grade_bucket_type=None): position=None, wrap_xmodule_display=True, grade_bucket_type=None):
""" """
Actually implement get_module. See docstring there for details. Implements get_module, extracting out the request-specific functionality.
"""
See get_module() docstring for further details.
"""
# allow course staff to masquerade as student # allow course staff to masquerade as student
if has_access(user, descriptor, 'staff', course_id): if has_access(user, descriptor, 'staff', course_id):
setup_masquerade(request, True) setup_masquerade(request, True)
track_function = make_track_function(request)
xqueue_callback_url_prefix = get_xqueue_callback_url_prefix(request)
return get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
track_function, xqueue_callback_url_prefix,
position, wrap_xmodule_display, grade_bucket_type)
def get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
track_function, xqueue_callback_url_prefix,
position=None, wrap_xmodule_display=True, grade_bucket_type=None):
"""
Actually implement get_module, without requiring a request.
See get_module() docstring for further details.
"""
# Short circuit--if the user shouldn't have access, bail without doing any work # Short circuit--if the user shouldn't have access, bail without doing any work
if not has_access(user, descriptor, 'load', course_id): if not has_access(user, descriptor, 'load', course_id):
return None return None
...@@ -186,19 +219,13 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours ...@@ -186,19 +219,13 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
def make_xqueue_callback(dispatch='score_update'): def make_xqueue_callback(dispatch='score_update'):
# Fully qualified callback URL for external queueing system # Fully qualified callback URL for external queueing system
xqueue_callback_url = '{proto}://{host}'.format( relative_xqueue_callback_url = reverse('xqueue_callback',
host=request.get_host(),
proto=request.META.get('HTTP_X_FORWARDED_PROTO', 'https' if request.is_secure() else 'http')
)
xqueue_callback_url = settings.XQUEUE_INTERFACE.get('callback_url',xqueue_callback_url) # allow override
xqueue_callback_url += reverse('xqueue_callback',
kwargs=dict(course_id=course_id, kwargs=dict(course_id=course_id,
userid=str(user.id), userid=str(user.id),
id=descriptor.location.url(), id=descriptor.location.url(),
dispatch=dispatch), dispatch=dispatch),
) )
return xqueue_callback_url return xqueue_callback_url_prefix + relative_xqueue_callback_url
# Default queuename is course-specific and is derived from the course that # Default queuename is course-specific and is derived from the course that
# contains the current module. # contains the current module.
...@@ -211,20 +238,20 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours ...@@ -211,20 +238,20 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS 'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS
} }
#This is a hacky way to pass settings to the combined open ended xmodule # This is a hacky way to pass settings to the combined open ended xmodule
#It needs an S3 interface to upload images to S3 # It needs an S3 interface to upload images to S3
#It needs the open ended grading interface in order to get peer grading to be done # It needs the open ended grading interface in order to get peer grading to be done
#this first checks to see if the descriptor is the correct one, and only sends settings if it is # this first checks to see if the descriptor is the correct one, and only sends settings if it is
#Get descriptor metadata fields indicating needs for various settings # Get descriptor metadata fields indicating needs for various settings
needs_open_ended_interface = getattr(descriptor, "needs_open_ended_interface", False) needs_open_ended_interface = getattr(descriptor, "needs_open_ended_interface", False)
needs_s3_interface = getattr(descriptor, "needs_s3_interface", False) needs_s3_interface = getattr(descriptor, "needs_s3_interface", False)
#Initialize interfaces to None # Initialize interfaces to None
open_ended_grading_interface = None open_ended_grading_interface = None
s3_interface = None s3_interface = None
#Create interfaces if needed # Create interfaces if needed
if needs_open_ended_interface: if needs_open_ended_interface:
open_ended_grading_interface = settings.OPEN_ENDED_GRADING_INTERFACE open_ended_grading_interface = settings.OPEN_ENDED_GRADING_INTERFACE
open_ended_grading_interface['mock_peer_grading'] = settings.MOCK_PEER_GRADING open_ended_grading_interface['mock_peer_grading'] = settings.MOCK_PEER_GRADING
...@@ -238,10 +265,15 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours ...@@ -238,10 +265,15 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
def inner_get_module(descriptor): def inner_get_module(descriptor):
""" """
Delegate to get_module. It does an access check, so may return None Delegate to get_module_for_descriptor_internal() with all values except `descriptor` set.
Because it does an access check, it may return None.
""" """
return get_module_for_descriptor(user, request, descriptor, # TODO: fix this so that make_xqueue_callback uses the descriptor passed into
model_data_cache, course_id, position) # inner_get_module, not the parent's callback. Add it as an argument....
return get_module_for_descriptor_internal(user, descriptor, model_data_cache, course_id,
track_function, make_xqueue_callback,
position, wrap_xmodule_display, grade_bucket_type)
def xblock_model_data(descriptor): def xblock_model_data(descriptor):
return DbModel( return DbModel(
...@@ -266,7 +298,7 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours ...@@ -266,7 +298,7 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
student_module.max_grade = event.get('max_value') student_module.max_grade = event.get('max_value')
student_module.save() student_module.save()
#Bin score into range and increment stats # Bin score into range and increment stats
score_bucket = get_score_bucket(student_module.grade, student_module.max_grade) score_bucket = get_score_bucket(student_module.grade, student_module.max_grade)
org, course_num, run = course_id.split("/") org, course_num, run = course_id.split("/")
...@@ -291,7 +323,7 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours ...@@ -291,7 +323,7 @@ def get_module_for_descriptor(user, request, descriptor, model_data_cache, cours
# TODO (cpennington): When modules are shared between courses, the static # TODO (cpennington): When modules are shared between courses, the static
# prefix is going to have to be specific to the module, not the directory # prefix is going to have to be specific to the module, not the directory
# that the xml was loaded from # that the xml was loaded from
system = ModuleSystem(track_function=make_track_function(request), system = ModuleSystem(track_function=track_function,
render_template=render_to_string, render_template=render_to_string,
ajax_url=ajax_url, ajax_url=ajax_url,
xqueue=xqueue, xqueue=xqueue,
...@@ -440,13 +472,13 @@ def modx_dispatch(request, dispatch, location, course_id): ...@@ -440,13 +472,13 @@ def modx_dispatch(request, dispatch, location, course_id):
inputfiles = request.FILES.getlist(fileinput_id) inputfiles = request.FILES.getlist(fileinput_id)
if len(inputfiles) > settings.MAX_FILEUPLOADS_PER_INPUT: if len(inputfiles) > settings.MAX_FILEUPLOADS_PER_INPUT:
too_many_files_msg = 'Submission aborted! Maximum %d files may be submitted at once' %\ too_many_files_msg = 'Submission aborted! Maximum %d files may be submitted at once' % \
settings.MAX_FILEUPLOADS_PER_INPUT settings.MAX_FILEUPLOADS_PER_INPUT
return HttpResponse(json.dumps({'success': too_many_files_msg})) return HttpResponse(json.dumps({'success': too_many_files_msg}))
for inputfile in inputfiles: for inputfile in inputfiles:
if inputfile.size > settings.STUDENT_FILEUPLOAD_MAX_SIZE: # Bytes if inputfile.size > settings.STUDENT_FILEUPLOAD_MAX_SIZE: # Bytes
file_too_big_msg = 'Submission aborted! Your file "%s" is too large (max size: %d MB)' %\ file_too_big_msg = 'Submission aborted! Your file "%s" is too large (max size: %d MB)' % \
(inputfile.name, settings.STUDENT_FILEUPLOAD_MAX_SIZE / (1000 ** 2)) (inputfile.name, settings.STUDENT_FILEUPLOAD_MAX_SIZE / (1000 ** 2))
return HttpResponse(json.dumps({'success': file_too_big_msg})) return HttpResponse(json.dumps({'success': file_too_big_msg}))
p[fileinput_id] = inputfiles p[fileinput_id] = inputfiles
......
...@@ -11,6 +11,7 @@ from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE ...@@ -11,6 +11,7 @@ from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory from xmodule.modulestore.tests.factories import CourseFactory
class ProgressTestCase(TestCase): class ProgressTestCase(TestCase):
def setUp(self): def setUp(self):
...@@ -35,7 +36,7 @@ class ProgressTestCase(TestCase): ...@@ -35,7 +36,7 @@ class ProgressTestCase(TestCase):
self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course, self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course,
self.active_page1)[0].link, self.active_page1)[0].link,
reverse('progress', args = [self.course.id])) reverse('progress', args=[self.course.id]))
self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course, self.assertEqual(tabs._progress(self.tab, self.mockuser1, self.course,
self.active_page0)[0].is_active, False) self.active_page0)[0].is_active, False)
...@@ -129,14 +130,13 @@ class StaticTabTestCase(TestCase): ...@@ -129,14 +130,13 @@ class StaticTabTestCase(TestCase):
self.assertEqual(tabs._static_tab(self.tabby, self.user, self.assertEqual(tabs._static_tab(self.tabby, self.user,
self.course, self.active_page1)[0].link, self.course, self.active_page1)[0].link,
reverse('static_tab', args = [self.course.id, reverse('static_tab', args=[self.course.id,
self.tabby['url_slug']])) self.tabby['url_slug']]))
self.assertEqual(tabs._static_tab(self.tabby, self.user, self.assertEqual(tabs._static_tab(self.tabby, self.user,
self.course, self.active_page1)[0].is_active, self.course, self.active_page1)[0].is_active,
True) True)
self.assertEqual(tabs._static_tab(self.tabby, self.user, self.assertEqual(tabs._static_tab(self.tabby, self.user,
self.course, self.active_page0)[0].is_active, self.course, self.active_page0)[0].is_active,
False) False)
...@@ -206,6 +206,7 @@ class TextbooksTestCase(TestCase): ...@@ -206,6 +206,7 @@ class TextbooksTestCase(TestCase):
self.assertEqual(tabs._textbooks(self.tab, self.mockuser0, self.assertEqual(tabs._textbooks(self.tab, self.mockuser0,
self.course, self.active_pageX), []) self.course, self.active_pageX), [])
class KeyCheckerTestCase(TestCase): class KeyCheckerTestCase(TestCase):
def setUp(self): def setUp(self):
...@@ -225,37 +226,34 @@ class NullValidatorTestCase(TestCase): ...@@ -225,37 +226,34 @@ class NullValidatorTestCase(TestCase):
def setUp(self): def setUp(self):
self.d = {} self.dummy = {}
def test_null_validator(self): def test_null_validator(self):
self.assertIsNone(tabs.null_validator(self.dummy))
self.assertIsNone(tabs.null_validator(self.d))
class ValidateTabsTestCase(TestCase): class ValidateTabsTestCase(TestCase):
def setUp(self): def setUp(self):
self.courses = [MagicMock() for i in range(0,5)] self.courses = [MagicMock() for i in range(0, 5)]
self.courses[0].tabs = None self.courses[0].tabs = None
self.courses[1].tabs = [{'type':'courseware'}, {'type': 'fax'}] self.courses[1].tabs = [{'type': 'courseware'}, {'type': 'fax'}]
self.courses[2].tabs = [{'type':'shadow'}, {'type': 'course_info'}] self.courses[2].tabs = [{'type': 'shadow'}, {'type': 'course_info'}]
self.courses[3].tabs = [{'type':'courseware'},{'type':'course_info', 'name': 'alice'}, self.courses[3].tabs = [{'type': 'courseware'}, {'type': 'course_info', 'name': 'alice'},
{'type': 'wiki', 'name':'alice'}, {'type':'discussion', 'name': 'alice'}, {'type': 'wiki', 'name': 'alice'}, {'type': 'discussion', 'name': 'alice'},
{'type':'external_link', 'name': 'alice', 'link':'blink'}, {'type': 'external_link', 'name': 'alice', 'link': 'blink'},
{'type':'textbooks'}, {'type':'progress', 'name': 'alice'}, {'type': 'textbooks'}, {'type': 'progress', 'name': 'alice'},
{'type':'static_tab', 'name':'alice', 'url_slug':'schlug'}, {'type': 'static_tab', 'name': 'alice', 'url_slug': 'schlug'},
{'type': 'staff_grading'}] {'type': 'staff_grading'}]
self.courses[4].tabs = [{'type':'courseware'},{'type': 'course_info'}, {'type': 'flying'}] self.courses[4].tabs = [{'type': 'courseware'}, {'type': 'course_info'}, {'type': 'flying'}]
def test_validate_tabs(self): def test_validate_tabs(self):
self.assertIsNone(tabs.validate_tabs(self.courses[0])) self.assertIsNone(tabs.validate_tabs(self.courses[0]))
self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[1]) self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[1])
self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[2]) self.assertRaises(tabs.InvalidTabsException, tabs.validate_tabs, self.courses[2])
...@@ -268,15 +266,15 @@ class DiscussionLinkTestCase(ModuleStoreTestCase): ...@@ -268,15 +266,15 @@ class DiscussionLinkTestCase(ModuleStoreTestCase):
def setUp(self): def setUp(self):
self.tabs_with_discussion = [ self.tabs_with_discussion = [
{'type':'courseware'}, {'type': 'courseware'},
{'type':'course_info'}, {'type': 'course_info'},
{'type':'discussion'}, {'type': 'discussion'},
{'type':'textbooks'}, {'type': 'textbooks'},
] ]
self.tabs_without_discussion = [ self.tabs_without_discussion = [
{'type':'courseware'}, {'type': 'courseware'},
{'type':'course_info'}, {'type': 'course_info'},
{'type':'textbooks'}, {'type': 'textbooks'},
] ]
@staticmethod @staticmethod
......
...@@ -10,7 +10,6 @@ import os ...@@ -10,7 +10,6 @@ import os
import re import re
import requests import requests
from requests.status_codes import codes from requests.status_codes import codes
import urllib
from collections import OrderedDict from collections import OrderedDict
from StringIO import StringIO from StringIO import StringIO
...@@ -20,8 +19,10 @@ from django.contrib.auth.models import User, Group ...@@ -20,8 +19,10 @@ from django.contrib.auth.models import User, Group
from django.http import HttpResponse from django.http import HttpResponse
from django_future.csrf import ensure_csrf_cookie from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control from django.views.decorators.cache import cache_control
from mitxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
import xmodule.graders as xmgraders
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from courseware import grades from courseware import grades
from courseware.access import (has_access, get_access_group_name, from courseware.access import (has_access, get_access_group_name,
...@@ -33,13 +34,18 @@ from django_comment_common.models import (Role, ...@@ -33,13 +34,18 @@ from django_comment_common.models import (Role,
FORUM_ROLE_MODERATOR, FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA) FORUM_ROLE_COMMUNITY_TA)
from django_comment_client.utils import has_forum_access from django_comment_client.utils import has_forum_access
from instructor.offline_gradecalc import student_grades, offline_grades_available
from instructor_task.api import (get_running_instructor_tasks,
get_instructor_task_history,
submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students)
from instructor_task.views import get_task_completion_info
from mitxmako.shortcuts import render_to_response
from psychometrics import psychoanalyze from psychometrics import psychoanalyze
from student.models import CourseEnrollment, CourseEnrollmentAllowed from student.models import CourseEnrollment, CourseEnrollmentAllowed
from xmodule.modulestore.django import modulestore
import xmodule.graders as xmgraders
import track.views import track.views
from .offline_gradecalc import student_grades, offline_grades_available
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
...@@ -68,6 +74,7 @@ def instructor_dashboard(request, course_id): ...@@ -68,6 +74,7 @@ def instructor_dashboard(request, course_id):
msg = '' msg = ''
problems = [] problems = []
plots = [] plots = []
datatable = {}
# the instructor dashboard page is modal: grades, psychometrics, admin # the instructor dashboard page is modal: grades, psychometrics, admin
# keep that state in request.session (defaults to grades mode) # keep that state in request.session (defaults to grades mode)
...@@ -78,6 +85,7 @@ def instructor_dashboard(request, course_id): ...@@ -78,6 +85,7 @@ def instructor_dashboard(request, course_id):
idash_mode = request.session.get('idash_mode', 'Grades') idash_mode = request.session.get('idash_mode', 'Grades')
# assemble some course statistics for output to instructor # assemble some course statistics for output to instructor
def get_course_stats_table():
datatable = {'header': ['Statistic', 'Value'], datatable = {'header': ['Statistic', 'Value'],
'title': 'Course Statistics At A Glance', 'title': 'Course Statistics At A Glance',
} }
...@@ -96,8 +104,10 @@ def instructor_dashboard(request, course_id): ...@@ -96,8 +104,10 @@ def instructor_dashboard(request, course_id):
data.append(["{}.{}".format(namespace, field.name), json.dumps(field.read_json(course))]) data.append(["{}.{}".format(namespace, field.name), json.dumps(field.read_json(course))])
datatable['data'] = data datatable['data'] = data
return datatable
def return_csv(fn, datatable, fp=None): def return_csv(fn, datatable, fp=None):
"""Outputs a CSV file from the contents of a datatable."""
if fp is None: if fp is None:
response = HttpResponse(mimetype='text/csv') response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename={0}'.format(fn) response['Content-Disposition'] = 'attachment; filename={0}'.format(fn)
...@@ -111,12 +121,15 @@ def instructor_dashboard(request, course_id): ...@@ -111,12 +121,15 @@ def instructor_dashboard(request, course_id):
return response return response
def get_staff_group(course): def get_staff_group(course):
"""Get or create the staff access group"""
return get_group(course, 'staff') return get_group(course, 'staff')
def get_instructor_group(course): def get_instructor_group(course):
"""Get or create the instructor access group"""
return get_group(course, 'instructor') return get_group(course, 'instructor')
def get_group(course, groupname): def get_group(course, groupname):
"""Get or create an access group"""
grpname = get_access_group_name(course, groupname) grpname = get_access_group_name(course, groupname)
try: try:
group = Group.objects.get(name=grpname) group = Group.objects.get(name=grpname)
...@@ -136,6 +149,39 @@ def instructor_dashboard(request, course_id): ...@@ -136,6 +149,39 @@ def instructor_dashboard(request, course_id):
(group, _) = Group.objects.get_or_create(name=name) (group, _) = Group.objects.get_or_create(name=name)
return group return group
def get_module_url(urlname):
"""
Construct full URL for a module from its urlname.
Form is either urlname or modulename/urlname. If no modulename
is provided, "problem" is assumed.
"""
# tolerate an XML suffix in the urlname
if urlname[-4:] == ".xml":
urlname = urlname[:-4]
# implement default
if '/' not in urlname:
urlname = "problem/" + urlname
# complete the url using information about the current course:
(org, course_name, _) = course_id.split("/")
return "i4x://" + org + "/" + course_name + "/" + urlname
def get_student_from_identifier(unique_student_identifier):
"""Gets a student object using either an email address or username"""
msg = ""
try:
if "@" in unique_student_identifier:
student = User.objects.get(email=unique_student_identifier)
else:
student = User.objects.get(username=unique_student_identifier)
msg += "Found a single student. "
except User.DoesNotExist:
student = None
msg += "<font color='red'>Couldn't find student with that email or username. </font>"
return msg, student
# process actions from form POST # process actions from form POST
action = request.POST.get('action', '') action = request.POST.get('action', '')
use_offline = request.POST.get('use_offline_grades', False) use_offline = request.POST.get('use_offline_grades', False)
...@@ -205,88 +251,138 @@ def instructor_dashboard(request, course_id): ...@@ -205,88 +251,138 @@ def instructor_dashboard(request, course_id):
track.views.server_track(request, action, {}, page='idashboard') track.views.server_track(request, action, {}, page='idashboard')
msg += dump_grading_context(course) msg += dump_grading_context(course)
elif "Reset student's attempts" in action or "Delete student state for problem" in action: elif "Rescore ALL students' problem submissions" in action:
problem_urlname = request.POST.get('problem_for_all_students', '')
problem_url = get_module_url(problem_urlname)
try:
instructor_task = submit_rescore_problem_for_all_students(request, course_id, problem_url)
if instructor_task is None:
msg += '<font color="red">Failed to create a background task for rescoring "{0}".</font>'.format(problem_url)
else:
track_msg = 'rescore problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard')
except ItemNotFoundError as e:
msg += '<font color="red">Failed to create a background task for rescoring "{0}": problem not found.</font>'.format(problem_url)
except Exception as e:
log.error("Encountered exception from rescore: {0}".format(e))
msg += '<font color="red">Failed to create a background task for rescoring "{0}": {1}.</font>'.format(problem_url, e.message)
elif "Reset ALL students' attempts" in action:
problem_urlname = request.POST.get('problem_for_all_students', '')
problem_url = get_module_url(problem_urlname)
try:
instructor_task = submit_reset_problem_attempts_for_all_students(request, course_id, problem_url)
if instructor_task is None:
msg += '<font color="red">Failed to create a background task for resetting "{0}".</font>'.format(problem_url)
else:
track_msg = 'reset problem {problem} for all students in {course}'.format(problem=problem_url, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard')
except ItemNotFoundError as e:
log.error('Failure to reset: unknown problem "{0}"'.format(e))
msg += '<font color="red">Failed to create a background task for resetting "{0}": problem not found.</font>'.format(problem_url)
except Exception as e:
log.error("Encountered exception from reset: {0}".format(e))
msg += '<font color="red">Failed to create a background task for resetting "{0}": {1}.</font>'.format(problem_url, e.message)
elif "Show Background Task History for Student" in action:
# put this before the non-student case, since the use of "in" will cause this to be missed
unique_student_identifier = request.POST.get('unique_student_identifier', '')
message, student = get_student_from_identifier(unique_student_identifier)
if student is None:
msg += message
else:
problem_urlname = request.POST.get('problem_for_student', '')
problem_url = get_module_url(problem_urlname)
message, datatable = get_background_task_table(course_id, problem_url, student)
msg += message
elif "Show Background Task History" in action:
problem_urlname = request.POST.get('problem_for_all_students', '')
problem_url = get_module_url(problem_urlname)
message, datatable = get_background_task_table(course_id, problem_url)
msg += message
elif ("Reset student's attempts" in action or
"Delete student state for module" in action or
"Rescore student's problem submission" in action):
# get the form data # get the form data
unique_student_identifier = request.POST.get('unique_student_identifier', '') unique_student_identifier = request.POST.get('unique_student_identifier', '')
problem_to_reset = request.POST.get('problem_to_reset', '') problem_urlname = request.POST.get('problem_for_student', '')
module_state_key = get_module_url(problem_urlname)
if problem_to_reset[-4:] == ".xml":
problem_to_reset = problem_to_reset[:-4]
# try to uniquely id student by email address or username # try to uniquely id student by email address or username
try: message, student = get_student_from_identifier(unique_student_identifier)
if "@" in unique_student_identifier: msg += message
student_to_reset = User.objects.get(email=unique_student_identifier) student_module = None
else: if student is not None:
student_to_reset = User.objects.get(username=unique_student_identifier)
msg += "Found a single student to reset. "
except:
student_to_reset = None
msg += "<font color='red'>Couldn't find student with that email or username. </font>"
if student_to_reset is not None:
# find the module in question # find the module in question
if '/' not in problem_to_reset: # allow state of modules other than problem to be reset
problem_to_reset = "problem/" + problem_to_reset # but problem is the default
try: try:
(org, course_name, _) = course_id.split("/") student_module = StudentModule.objects.get(student_id=student.id,
module_state_key = "i4x://" + org + "/" + course_name + "/" + problem_to_reset
module_to_reset = StudentModule.objects.get(student_id=student_to_reset.id,
course_id=course_id, course_id=course_id,
module_state_key=module_state_key) module_state_key=module_state_key)
msg += "Found module to reset. " msg += "Found module. "
except Exception: except StudentModule.DoesNotExist:
msg += "<font color='red'>Couldn't find module with that urlname. </font>" msg += "<font color='red'>Couldn't find module with that urlname. </font>"
if "Delete student state for problem" in action: if student_module is not None:
if "Delete student state for module" in action:
# delete the state # delete the state
try: try:
module_to_reset.delete() student_module.delete()
msg += "<font color='red'>Deleted student module state for %s!</font>" % module_state_key msg += "<font color='red'>Deleted student module state for %s!</font>" % module_state_key
track_format = 'delete student module state for problem {problem} for student {student} in {course}'
track_msg = track_format.format(problem=problem_url, student=unique_student_identifier, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard')
except: except:
msg += "Failed to delete module state for %s/%s" % (unique_student_identifier, problem_to_reset) msg += "Failed to delete module state for %s/%s" % (unique_student_identifier, problem_urlname)
else: elif "Reset student's attempts" in action:
# modify the problem's state # modify the problem's state
try: try:
# load the state json # load the state json
problem_state = json.loads(module_to_reset.state) problem_state = json.loads(student_module.state)
old_number_of_attempts = problem_state["attempts"] old_number_of_attempts = problem_state["attempts"]
problem_state["attempts"] = 0 problem_state["attempts"] = 0
# save # save
module_to_reset.state = json.dumps(problem_state) student_module.state = json.dumps(problem_state)
module_to_reset.save() student_module.save()
track.views.server_track(request, track_format = '{instructor} reset attempts from {old_attempts} to 0 for {student} on problem {problem} in {course}'
'{instructor} reset attempts from {old_attempts} to 0 for {student} on problem {problem} in {course}'.format( track_msg = track_format.format(old_attempts=old_number_of_attempts,
old_attempts=old_number_of_attempts, student=student,
student=student_to_reset, problem=student_module.module_state_key,
problem=module_to_reset.module_state_key,
instructor=request.user, instructor=request.user,
course=course_id), course=course_id)
{}, track.views.server_track(request, track_msg, {}, page='idashboard')
page='idashboard')
msg += "<font color='green'>Module state successfully reset!</font>" msg += "<font color='green'>Module state successfully reset!</font>"
except: except:
msg += "<font color='red'>Couldn't reset module state. </font>" msg += "<font color='red'>Couldn't reset module state. </font>"
else:
# "Rescore student's problem submission" case
try:
instructor_task = submit_rescore_problem_for_student(request, course_id, module_state_key, student)
if instructor_task is None:
msg += '<font color="red">Failed to create a background task for rescoring "{0}" for student {1}.</font>'.format(module_state_key, unique_student_identifier)
else:
track_msg = 'rescore problem {problem} for student {student} in {course}'.format(problem=module_state_key, student=unique_student_identifier, course=course_id)
track.views.server_track(request, track_msg, {}, page='idashboard')
except Exception as e:
log.exception("Encountered exception from rescore: {0}")
msg += '<font color="red">Failed to create a background task for rescoring "{0}": {1}.</font>'.format(module_state_key, e.message)
elif "Get link to student's progress page" in action: elif "Get link to student's progress page" in action:
unique_student_identifier = request.POST.get('unique_student_identifier', '') unique_student_identifier = request.POST.get('unique_student_identifier', '')
try: # try to uniquely id student by email address or username
if "@" in unique_student_identifier: message, student = get_student_from_identifier(unique_student_identifier)
student_to_reset = User.objects.get(email=unique_student_identifier) msg += message
else: if student is not None:
student_to_reset = User.objects.get(username=unique_student_identifier) progress_url = reverse('student_progress', kwargs={'course_id': course_id, 'student_id': student.id})
progress_url = reverse('student_progress', kwargs={'course_id': course_id, 'student_id': student_to_reset.id})
track.views.server_track(request, track.views.server_track(request,
'{instructor} requested progress page for {student} in {course}'.format( '{instructor} requested progress page for {student} in {course}'.format(
student=student_to_reset, student=student,
instructor=request.user, instructor=request.user,
course=course_id), course=course_id),
{}, {},
page='idashboard') page='idashboard')
msg += "<a href='{0}' target='_blank'> Progress page for username: {1} with email address: {2}</a>.".format(progress_url, student_to_reset.username, student_to_reset.email) msg += "<a href='{0}' target='_blank'> Progress page for username: {1} with email address: {2}</a>.".format(progress_url, student.username, student.email)
except:
msg += "<font color='red'>Couldn't find student with that username. </font>"
#---------------------------------------- #----------------------------------------
# export grades to remote gradebook # export grades to remote gradebook
...@@ -427,7 +523,7 @@ def instructor_dashboard(request, course_id): ...@@ -427,7 +523,7 @@ def instructor_dashboard(request, course_id):
if problem_to_dump[-4:] == ".xml": if problem_to_dump[-4:] == ".xml":
problem_to_dump = problem_to_dump[:-4] problem_to_dump = problem_to_dump[:-4]
try: try:
(org, course_name, run) = course_id.split("/") (org, course_name, _) = course_id.split("/")
module_state_key = "i4x://" + org + "/" + course_name + "/problem/" + problem_to_dump module_state_key = "i4x://" + org + "/" + course_name + "/problem/" + problem_to_dump
smdat = StudentModule.objects.filter(course_id=course_id, smdat = StudentModule.objects.filter(course_id=course_id,
module_state_key=module_state_key) module_state_key=module_state_key)
...@@ -625,6 +721,16 @@ def instructor_dashboard(request, course_id): ...@@ -625,6 +721,16 @@ def instructor_dashboard(request, course_id):
if use_offline: if use_offline:
msg += "<br/><font color='orange'>Grades from %s</font>" % offline_grades_available(course_id) msg += "<br/><font color='orange'>Grades from %s</font>" % offline_grades_available(course_id)
# generate list of pending background tasks
if settings.MITX_FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
instructor_tasks = get_running_instructor_tasks(course_id)
else:
instructor_tasks = None
# display course stats only if there is no other table to display:
course_stats = None
if not datatable:
course_stats = get_course_stats_table()
#---------------------------------------- #----------------------------------------
# context for rendering # context for rendering
...@@ -634,12 +740,13 @@ def instructor_dashboard(request, course_id): ...@@ -634,12 +740,13 @@ def instructor_dashboard(request, course_id):
'instructor_access': instructor_access, 'instructor_access': instructor_access,
'forum_admin_access': forum_admin_access, 'forum_admin_access': forum_admin_access,
'datatable': datatable, 'datatable': datatable,
'course_stats': course_stats,
'msg': msg, 'msg': msg,
'modeflag': {idash_mode: 'selectedmode'}, 'modeflag': {idash_mode: 'selectedmode'},
'problems': problems, # psychometrics 'problems': problems, # psychometrics
'plots': plots, # psychometrics 'plots': plots, # psychometrics
'course_errors': modulestore().get_item_errors(course.location), 'course_errors': modulestore().get_item_errors(course.location),
'instructor_tasks': instructor_tasks,
'djangopid': os.getpid(), 'djangopid': os.getpid(),
'mitx_version': getattr(settings, 'MITX_VERSION_STRING', ''), 'mitx_version': getattr(settings, 'MITX_VERSION_STRING', ''),
'offline_grade_log': offline_grades_available(course_id), 'offline_grade_log': offline_grades_available(course_id),
...@@ -1030,7 +1137,7 @@ def _do_unenroll_students(course_id, students): ...@@ -1030,7 +1137,7 @@ def _do_unenroll_students(course_id, students):
"""Do the actual work of un-enrolling multiple students, presented as a string """Do the actual work of un-enrolling multiple students, presented as a string
of emails separated by commas or returns""" of emails separated by commas or returns"""
old_students, old_students_lc = get_and_clean_student_list(students) old_students, _ = get_and_clean_student_list(students)
status = dict([x, 'unprocessed'] for x in old_students) status = dict([x, 'unprocessed'] for x in old_students)
for student in old_students: for student in old_students:
...@@ -1054,7 +1161,7 @@ def _do_unenroll_students(course_id, students): ...@@ -1054,7 +1161,7 @@ def _do_unenroll_students(course_id, students):
try: try:
ce[0].delete() ce[0].delete()
status[student] = "un-enrolled" status[student] = "un-enrolled"
except Exception as err: except Exception:
if not isok: if not isok:
status[student] = "Error! Failed to un-enroll" status[student] = "Error! Failed to un-enroll"
...@@ -1113,11 +1220,11 @@ def get_answers_distribution(request, course_id): ...@@ -1113,11 +1220,11 @@ def get_answers_distribution(request, course_id):
def compute_course_stats(course): def compute_course_stats(course):
''' """
Compute course statistics, including number of problems, videos, html. Compute course statistics, including number of problems, videos, html.
course is a CourseDescriptor from the xmodule system. course is a CourseDescriptor from the xmodule system.
''' """
# walk the course by using get_children() until we come to the leaves; count the # walk the course by using get_children() until we come to the leaves; count the
# number of different leaf types # number of different leaf types
...@@ -1137,10 +1244,10 @@ def compute_course_stats(course): ...@@ -1137,10 +1244,10 @@ def compute_course_stats(course):
def dump_grading_context(course): def dump_grading_context(course):
''' """
Dump information about course grading context (eg which problems are graded in what assignments) Dump information about course grading context (eg which problems are graded in what assignments)
Very useful for debugging grading_policy.json and policy.json Very useful for debugging grading_policy.json and policy.json
''' """
msg = "-----------------------------------------------------------------------------\n" msg = "-----------------------------------------------------------------------------\n"
msg += "Course grader:\n" msg += "Course grader:\n"
...@@ -1164,10 +1271,10 @@ def dump_grading_context(course): ...@@ -1164,10 +1271,10 @@ def dump_grading_context(course):
msg += "--> Section %s:\n" % (gs) msg += "--> Section %s:\n" % (gs)
for sec in gsvals: for sec in gsvals:
s = sec['section_descriptor'] s = sec['section_descriptor']
format = getattr(s.lms, 'format', None) grade_format = getattr(s.lms, 'grade_format', None)
aname = '' aname = ''
if format in graders: if grade_format in graders:
g = graders[format] g = graders[grade_format]
aname = '%s %02d' % (g.short_label, g.index) aname = '%s %02d' % (g.short_label, g.index)
g.index += 1 g.index += 1
elif s.display_name in graders: elif s.display_name in graders:
...@@ -1176,8 +1283,73 @@ def dump_grading_context(course): ...@@ -1176,8 +1283,73 @@ def dump_grading_context(course):
notes = '' notes = ''
if getattr(s, 'score_by_attempt', False): if getattr(s, 'score_by_attempt', False):
notes = ', score by attempt!' notes = ', score by attempt!'
msg += " %s (format=%s, Assignment=%s%s)\n" % (s.display_name, format, aname, notes) msg += " %s (grade_format=%s, Assignment=%s%s)\n" % (s.display_name, grade_format, aname, notes)
msg += "all descriptors:\n" msg += "all descriptors:\n"
msg += "length=%d\n" % len(gc['all_descriptors']) msg += "length=%d\n" % len(gc['all_descriptors'])
msg = '<pre>%s</pre>' % msg.replace('<', '&lt;') msg = '<pre>%s</pre>' % msg.replace('<', '&lt;')
return msg return msg
def get_background_task_table(course_id, problem_url, student=None):
"""
Construct the "datatable" structure to represent background task history.
Filters the background task history to the specified course and problem.
If a student is provided, filters to only those tasks for which that student
was specified.
Returns a tuple of (msg, datatable), where the msg is a possible error message,
and the datatable is the datatable to be used for display.
"""
history_entries = get_instructor_task_history(course_id, problem_url, student)
datatable = {}
msg = ""
# first check to see if there is any history at all
# (note that we don't have to check that the arguments are valid; it
# just won't find any entries.)
if (history_entries.count()) == 0:
if student is not None:
template = '<font color="red">Failed to find any background tasks for course "{course}", module "{problem}" and student "{student}".</font>'
msg += template.format(course=course_id, problem=problem_url, student=student.username)
else:
msg += '<font color="red">Failed to find any background tasks for course "{course}" and module "{problem}".</font>'.format(course=course_id, problem=problem_url)
else:
datatable['header'] = ["Task Type",
"Task Id",
"Requester",
"Submitted",
"Duration (sec)",
"Task State",
"Task Status",
"Task Output"]
datatable['data'] = []
for instructor_task in history_entries:
# get duration info, if known:
duration_sec = 'unknown'
if hasattr(instructor_task, 'task_output') and instructor_task.task_output is not None:
task_output = json.loads(instructor_task.task_output)
if 'duration_ms' in task_output:
duration_sec = int(task_output['duration_ms'] / 1000.0)
# get progress status message:
success, task_message = get_task_completion_info(instructor_task)
status = "Complete" if success else "Incomplete"
# generate row for this task:
row = [str(instructor_task.task_type),
str(instructor_task.task_id),
str(instructor_task.requester),
instructor_task.created.isoformat(' '),
duration_sec,
str(instructor_task.task_state),
status,
task_message]
datatable['data'].append(row)
if student is not None:
datatable['title'] = "{course_id} > {location} > {student}".format(course_id=course_id,
location=problem_url,
student=student.username)
else:
datatable['title'] = "{course_id} > {location}".format(course_id=course_id, location=problem_url)
return msg, datatable
"""
API for submitting background tasks by an instructor for a course.
Also includes methods for getting information about tasks that have
already been submitted, filtered either by running state or input
arguments.
"""
from celery.states import READY_STATES
from xmodule.modulestore.django import modulestore
from instructor_task.models import InstructorTask
from instructor_task.tasks import (rescore_problem,
reset_problem_attempts,
delete_problem_state)
from instructor_task.api_helper import (check_arguments_for_rescoring,
encode_problem_and_student_input,
submit_task)
def get_running_instructor_tasks(course_id):
"""
Returns a query of InstructorTask objects of running tasks for a given course.
Used to generate a list of tasks to display on the instructor dashboard.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
for state in READY_STATES:
instructor_tasks = instructor_tasks.exclude(task_state=state)
return instructor_tasks.order_by('-id')
def get_instructor_task_history(course_id, problem_url, student=None):
"""
Returns a query of InstructorTask objects of historical tasks for a given course,
that match a particular problem and optionally a student.
"""
_, task_key = encode_problem_and_student_input(problem_url, student)
instructor_tasks = InstructorTask.objects.filter(course_id=course_id, task_key=task_key)
return instructor_tasks.order_by('-id')
def submit_rescore_problem_for_student(request, course_id, problem_url, student):
"""
Request a problem to be rescored as a background task.
The problem will be rescored for the specified student only. Parameters are the `course_id`,
the `problem_url`, and the `student` as a User object.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored for this student, or NotImplementedError if
the problem doesn't support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(course_id, problem_url)
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(problem_url, student)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_rescore_problem_for_all_students(request, course_id, problem_url):
"""
Request a problem to be rescored as a background task.
The problem will be rescored for all students who have accessed the
particular problem in a course and have provided and checked an answer.
Parameters are the `course_id` and the `problem_url`.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored, or NotImplementedError if the problem doesn't
support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(course_id, problem_url)
# check to see if task is already running, and reserve it otherwise
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_reset_problem_attempts_for_all_students(request, course_id, problem_url):
"""
Request to have attempts reset for a problem as a background task.
The problem's attempts will be reset for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `problem_url`. The url must specify the location of the problem,
using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being reset.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the problem_url is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_instance(course_id, problem_url)
task_type = 'reset_problem_attempts'
task_class = reset_problem_attempts
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_delete_problem_state_for_all_students(request, course_id, problem_url):
"""
Request to have state deleted for a problem as a background task.
The problem's state will be deleted for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `problem_url`. The url must specify the location of the problem,
using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the particular problem's state is already being deleted.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the problem_url is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_instance(course_id, problem_url)
task_type = 'delete_problem_state'
task_class = delete_problem_state
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
import hashlib
import json
import logging
from django.db import transaction
from celery.result import AsyncResult
from celery.states import READY_STATES, SUCCESS, FAILURE, REVOKED
from courseware.module_render import get_xqueue_callback_url_prefix
from xmodule.modulestore.django import modulestore
from instructor_task.models import InstructorTask, PROGRESS
log = logging.getLogger(__name__)
class AlreadyRunningError(Exception):
"""Exception indicating that a background task is already running"""
pass
def _task_is_running(course_id, task_type, task_key):
"""Checks if a particular task is already running"""
runningTasks = InstructorTask.objects.filter(course_id=course_id, task_type=task_type, task_key=task_key)
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
for state in READY_STATES:
runningTasks = runningTasks.exclude(task_state=state)
return len(runningTasks) > 0
@transaction.autocommit
def _reserve_task(course_id, task_type, task_key, task_input, requester):
"""
Creates a database entry to indicate that a task is in progress.
Throws AlreadyRunningError if the task is already in progress.
Includes the creation of an arbitrary value for task_id, to be
submitted with the task call to celery.
Autocommit annotation makes sure the database entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, this autocommit here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
Note that there is a chance of a race condition here, when two users
try to run the same task at almost exactly the same time. One user
could be after the check and before the create when the second user
gets to the check. At that point, both users are able to run their
tasks simultaneously. This is deemed a small enough risk to not
put in further safeguards.
"""
if _task_is_running(course_id, task_type, task_key):
raise AlreadyRunningError("requested task is already running")
# Create log entry now, so that future requests will know it's running.
return InstructorTask.create(course_id, task_type, task_key, task_input, requester)
def _get_xmodule_instance_args(request):
"""
Calculate parameters needed for instantiating xmodule instances.
The `request_info` will be passed to a tracking log function, to provide information
about the source of the task request. The `xqueue_callback_url_prefix` is used to
permit old-style xqueue callbacks directly to the appropriate module in the LMS.
"""
request_info = {'username': request.user.username,
'ip': request.META['REMOTE_ADDR'],
'agent': request.META.get('HTTP_USER_AGENT', ''),
'host': request.META['SERVER_NAME'],
}
xmodule_instance_args = {'xqueue_callback_url_prefix': get_xqueue_callback_url_prefix(request),
'request_info': request_info,
}
return xmodule_instance_args
def _update_instructor_task(instructor_task, task_result):
"""
Updates and possibly saves a InstructorTask entry based on a task Result.
Used when updated status is requested.
The `instructor_task` that is passed in is updated in-place, but
is usually not saved. In general, tasks that have finished (either with
success or failure) should have their entries updated by the task itself,
so are not updated here. Tasks that are still running are not updated
while they run. So the one exception to the no-save rule are tasks that
are in a "revoked" state. This may mean that the task never had the
opportunity to update the InstructorTask entry.
Calculates json to store in "task_output" field of the `instructor_task`,
as well as updating the task_state.
For a successful task, the json contains the output of the task result.
For a failed task, the json contains "exception", "message", and "traceback"
keys. A revoked task just has a "message" stating it was revoked.
"""
# Pull values out of the result object as close to each other as possible.
# If we wait and check the values later, the values for the state and result
# are more likely to have changed. Pull the state out first, and
# then code assuming that the result may not exactly match the state.
task_id = task_result.task_id
result_state = task_result.state
returned_result = task_result.result
result_traceback = task_result.traceback
# Assume we don't always update the InstructorTask entry if we don't have to:
entry_needs_saving = False
task_output = None
if result_state in [PROGRESS, SUCCESS]:
# construct a status message directly from the task result's result:
# it needs to go back with the entry passed in.
log.info("background task (%s), state %s: result: %s", task_id, result_state, returned_result)
task_output = InstructorTask.create_output_for_success(returned_result)
elif result_state == FAILURE:
# on failure, the result's result contains the exception that caused the failure
exception = returned_result
traceback = result_traceback if result_traceback is not None else ''
log.warning("background task (%s) failed: %s %s", task_id, returned_result, traceback)
task_output = InstructorTask.create_output_for_failure(exception, result_traceback)
elif result_state == REVOKED:
# on revocation, the result's result doesn't contain anything
# but we cannot rely on the worker thread to set this status,
# so we set it here.
entry_needs_saving = True
log.warning("background task (%s) revoked.", task_id)
task_output = InstructorTask.create_output_for_revoked()
# save progress and state into the entry, even if it's not being saved:
# when celery is run in "ALWAYS_EAGER" mode, progress needs to go back
# with the entry passed in.
instructor_task.task_state = result_state
if task_output is not None:
instructor_task.task_output = task_output
if entry_needs_saving:
instructor_task.save()
def get_updated_instructor_task(task_id):
"""
Returns InstructorTask object corresponding to a given `task_id`.
If the InstructorTask thinks the task is still running, then
the task's result is checked to return an updated state and output.
"""
# First check if the task_id is known
try:
instructor_task = InstructorTask.objects.get(task_id=task_id)
except InstructorTask.DoesNotExist:
log.warning("query for InstructorTask status failed: task_id=(%s) not found", task_id)
return None
# if the task is not already known to be done, then we need to query
# the underlying task's result object:
if instructor_task.task_state not in READY_STATES:
result = AsyncResult(task_id)
_update_instructor_task(instructor_task, result)
return instructor_task
def get_status_from_instructor_task(instructor_task):
"""
Get the status for a given InstructorTask entry.
Returns a dict, with the following keys:
'task_id': id assigned by LMS and used by celery.
'task_state': state of task as stored in celery's result store.
'in_progress': boolean indicating if task is still running.
'task_progress': dict containing progress information. This includes:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
'duration_ms': how long the task has (or had) been running.
'exception': name of exception class raised in failed tasks.
'message': returned for failed and revoked tasks.
'traceback': optional, returned if task failed and produced a traceback.
"""
status = {}
if instructor_task is not None:
# status basic information matching what's stored in InstructorTask:
status['task_id'] = instructor_task.task_id
status['task_state'] = instructor_task.task_state
status['in_progress'] = instructor_task.task_state not in READY_STATES
if instructor_task.task_output is not None:
status['task_progress'] = json.loads(instructor_task.task_output)
return status
def check_arguments_for_rescoring(course_id, problem_url):
"""
Do simple checks on the descriptor to confirm that it supports rescoring.
Confirms first that the problem_url is defined (since that's currently typed
in). An ItemNotFoundException is raised if the corresponding module
descriptor doesn't exist. NotImplementedError is raised if the
corresponding module doesn't support rescoring calls.
"""
descriptor = modulestore().get_instance(course_id, problem_url)
if not hasattr(descriptor, 'module_class') or not hasattr(descriptor.module_class, 'rescore_problem'):
msg = "Specified module does not support rescoring."
raise NotImplementedError(msg)
def encode_problem_and_student_input(problem_url, student=None):
"""
Encode problem_url and optional student into task_key and task_input values.
`problem_url` is full URL of the problem.
`student` is the user object of the student
"""
if student is not None:
task_input = {'problem_url': problem_url, 'student': student.username}
task_key_stub = "{student}_{problem}".format(student=student.id, problem=problem_url)
else:
task_input = {'problem_url': problem_url}
task_key_stub = "_{problem}".format(problem=problem_url)
# create the key value by using MD5 hash:
task_key = hashlib.md5(task_key_stub).hexdigest()
return task_input, task_key
def submit_task(request, task_type, task_class, course_id, task_input, task_key):
"""
Helper method to submit a task.
Reserves the requested task, based on the `course_id`, `task_type`, and `task_key`,
checking to see if the task is already running. The `task_input` is also passed so that
it can be stored in the resulting InstructorTask entry. Arguments are extracted from
the `request` provided by the originating server request. Then the task is submitted to run
asynchronously, using the specified `task_class` and using the task_id constructed for it.
`AlreadyRunningError` is raised if the task is already running.
The _reserve_task method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check to see if task is already running, and reserve it otherwise:
instructor_task = _reserve_task(course_id, task_type, task_key, task_input, request.user)
# submit task:
task_id = instructor_task.task_id
task_args = [instructor_task.id, _get_xmodule_instance_args(request)]
task_class.apply_async(task_args, task_id=task_id)
return instructor_task
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'InstructorTask'
db.create_table('instructor_task_instructortask', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task_type', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_key', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_input', self.gf('django.db.models.fields.CharField')(max_length=255)),
('task_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_state', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, db_index=True)),
('task_output', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)),
('requester', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('instructor_task', ['InstructorTask'])
def backwards(self, orm):
# Deleting model 'InstructorTask'
db.delete_table('instructor_task_instructortask')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'instructor_task.instructortask': {
'Meta': {'object_name': 'InstructorTask'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_input': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_output': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'task_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'task_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['instructor_task']
\ No newline at end of file
"""
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py schemamigration instructor_task --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/instructor_task/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
"""
from uuid import uuid4
import json
from django.contrib.auth.models import User
from django.db import models, transaction
# define custom states used by InstructorTask
QUEUING = 'QUEUING'
PROGRESS = 'PROGRESS'
class InstructorTask(models.Model):
"""
Stores information about background tasks that have been submitted to
perform work by an instructor (or course staff).
Examples include grading and rescoring.
`task_type` identifies the kind of task being performed, e.g. rescoring.
`course_id` uses the course run's unique id to identify the course.
`task_key` stores relevant input arguments encoded into key value for testing to see
if the task is already running (together with task_type and course_id).
`task_input` stores input arguments as JSON-serialized dict, for reporting purposes.
Examples include url of problem being rescored, id of student if only one student being rescored.
`task_id` stores the id used by celery for the background task.
`task_state` stores the last known state of the celery task
`task_output` stores the output of the celery task.
Format is a JSON-serialized dict. Content varies by task_type and task_state.
`requester` stores id of user who submitted the task
`created` stores date that entry was first created
`updated` stores date that entry was last modified
"""
task_type = models.CharField(max_length=50, db_index=True)
course_id = models.CharField(max_length=255, db_index=True)
task_key = models.CharField(max_length=255, db_index=True)
task_input = models.CharField(max_length=255)
task_id = models.CharField(max_length=255, db_index=True) # max_length from celery_taskmeta
task_state = models.CharField(max_length=50, null=True, db_index=True) # max_length from celery_taskmeta
task_output = models.CharField(max_length=1024, null=True)
requester = models.ForeignKey(User, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True)
def __repr__(self):
return 'InstructorTask<%r>' % ({
'task_type': self.task_type,
'course_id': self.course_id,
'task_input': self.task_input,
'task_id': self.task_id,
'task_state': self.task_state,
'task_output': self.task_output,
},)
def __unicode__(self):
return unicode(repr(self))
@classmethod
def create(cls, course_id, task_type, task_key, task_input, requester):
# create the task_id here, and pass it into celery:
task_id = str(uuid4())
json_task_input = json.dumps(task_input)
# check length of task_input, and return an exception if it's too long:
if len(json_task_input) > 255:
fmt = 'Task input longer than 255: "{input}" for "{task}" of "{course}"'
msg = fmt.format(input=json_task_input, task=task_type, course=course_id)
raise ValueError(msg)
# create the task, then save it:
instructor_task = cls(course_id=course_id,
task_type=task_type,
task_id=task_id,
task_key=task_key,
task_input=json_task_input,
task_state=QUEUING,
requester=requester)
instructor_task.save_now()
return instructor_task
@transaction.autocommit
def save_now(self):
"""Writes InstructorTask immediately, ensuring the transaction is committed."""
self.save()
@staticmethod
def create_output_for_success(returned_result):
"""
Converts successful result to output format.
Raises a ValueError exception if the output is too long.
"""
# In future, there should be a check here that the resulting JSON
# will fit in the column. In the meantime, just return an exception.
json_output = json.dumps(returned_result)
if len(json_output) > 1023:
raise ValueError("Length of task output is too long: {0}".format(json_output))
return json_output
@staticmethod
def create_output_for_failure(exception, traceback_string):
"""
Converts failed result information to output format.
Traceback information is truncated or not included if it would result in an output string
that would not fit in the database. If the output is still too long, then the
exception message is also truncated.
Truncation is indicated by adding "..." to the end of the value.
"""
tag = '...'
task_progress = {'exception': type(exception).__name__, 'message': str(exception.message)}
if traceback_string is not None:
# truncate any traceback that goes into the InstructorTask model:
task_progress['traceback'] = traceback_string
json_output = json.dumps(task_progress)
# if the resulting output is too long, then first shorten the
# traceback, and then the message, until it fits.
too_long = len(json_output) - 1023
if too_long > 0:
if traceback_string is not None:
if too_long >= len(traceback_string) - len(tag):
# remove the traceback entry entirely (so no key or value)
del task_progress['traceback']
too_long -= (len(traceback_string) + len('traceback'))
else:
# truncate the traceback:
task_progress['traceback'] = traceback_string[:-(too_long + len(tag))] + tag
too_long = 0
if too_long > 0:
# we need to shorten the message:
task_progress['message'] = task_progress['message'][:-(too_long + len(tag))] + tag
json_output = json.dumps(task_progress)
return json_output
@staticmethod
def create_output_for_revoked():
"""Creates standard message to store in output format for revoked tasks."""
return json.dumps({'message': 'Task revoked before running'})
"""
This file contains tasks that are designed to perform background operations on the
running state of a course.
At present, these tasks all operate on StudentModule objects in one way or another,
so they share a visitor architecture. Each task defines an "update function" that
takes a module_descriptor, a particular StudentModule object, and xmodule_instance_args.
A task may optionally specify a "filter function" that takes a query for StudentModule
objects, and adds additional filter clauses.
A task also passes through "xmodule_instance_args", that are used to provide
information to our code that instantiates xmodule instances.
The task definition then calls the traversal function, passing in the three arguments
above, along with the id value for an InstructorTask object. The InstructorTask
object contains a 'task_input' row which is a JSON-encoded dict containing
a problem URL and optionally a student. These are used to set up the initial value
of the query for traversing StudentModule objects.
"""
from celery import task
from instructor_task.tasks_helper import (update_problem_module_state,
rescore_problem_module_state,
reset_attempts_module_state,
delete_problem_module_state)
@task
def rescore_problem(entry_id, xmodule_instance_args):
"""Rescores a problem in a course, for all students or one specific student.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
'student': the identifier (username or email) of a particular user whose
problem submission should be rescored. If not specified, all problem
submissions for the problem will be rescored.
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
action_name = 'rescored'
update_fcn = rescore_problem_module_state
filter_fcn = lambda(modules_to_update): modules_to_update.filter(state__contains='"done": true')
return update_problem_module_state(entry_id,
update_fcn, action_name, filter_fcn=filter_fcn,
xmodule_instance_args=xmodule_instance_args)
@task
def reset_problem_attempts(entry_id, xmodule_instance_args):
"""Resets problem attempts to zero for a particular problem for all students in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
action_name = 'reset'
update_fcn = reset_attempts_module_state
return update_problem_module_state(entry_id,
update_fcn, action_name, filter_fcn=None,
xmodule_instance_args=xmodule_instance_args)
@task
def delete_problem_state(entry_id, xmodule_instance_args):
"""Deletes problem state entirely for all students on a particular problem in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
action_name = 'deleted'
update_fcn = delete_problem_module_state
return update_problem_module_state(entry_id,
update_fcn, action_name, filter_fcn=None,
xmodule_instance_args=xmodule_instance_args)
"""
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json
from time import time
from sys import exc_info
from traceback import format_exc
from celery import current_task
from celery.utils.log import get_task_logger
from celery.signals import worker_process_init
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.db import transaction
from dogapi import dog_stats_api
from xmodule.modulestore.django import modulestore
import mitxmako.middleware as middleware
from track.views import task_track
from courseware.models import StudentModule
from courseware.model_data import ModelDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_task.models import InstructorTask, PROGRESS
# define different loggers for use within tasks and on client side
TASK_LOG = get_task_logger(__name__)
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
def initialize_mako(sender=None, conf=None, **kwargs):
"""
Get mako templates to work on celery worker server's worker thread.
The initialization of Mako templating is usually done when Django is
initializing middleware packages as part of processing a server request.
When this is run on a celery worker server, no such initialization is
called.
To make sure that we don't load this twice (just in case), we look for the
result: the defining of the lookup paths for templates.
"""
if 'main' not in middleware.lookup:
TASK_LOG.info("Initializing Mako middleware explicitly")
middleware.MakoMiddleware()
# Actually make the call to define the hook:
worker_process_init.connect(initialize_mako)
class UpdateProblemModuleStateError(Exception):
"""
Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and no more
modules should be attempted.
"""
pass
def _get_current_task():
"""Stub to make it easier to test without actually running Celery"""
return current_task
def _perform_module_state_update(course_id, module_state_key, student_identifier, update_fcn, action_name, filter_fcn,
xmodule_instance_args):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
# get start time for task:
start_time = time()
# find the problem descriptor:
module_descriptor = modulestore().get_instance(course_id, module_state_key)
# find the module in question
modules_to_update = StudentModule.objects.filter(course_id=course_id,
module_state_key=module_state_key)
# give the option of rescoring an individual student. If not specified,
# then rescores all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
# perform the main loop
num_updated = 0
num_attempted = 0
num_total = modules_to_update.count()
def get_task_progress():
"""Return a dict containing info about current task"""
current_time = time()
progress = {'action_name': action_name,
'attempted': num_attempted,
'updated': num_updated,
'total': num_total,
'duration_ms': int((current_time - start_time) * 1000),
}
return progress
task_progress = get_task_progress()
_get_current_task().update_state(state=PROGRESS, meta=task_progress)
for module_to_update in modules_to_update:
num_attempted += 1
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=['action:{name}'.format(name=action_name)]):
if update_fcn(module_descriptor, module_to_update, xmodule_instance_args):
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
num_updated += 1
# update task status:
task_progress = get_task_progress()
_get_current_task().update_state(state=PROGRESS, meta=task_progress)
return task_progress
def update_problem_module_state(entry_id, update_fcn, action_name, filter_fcn,
xmodule_instance_args):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This function
updates the entry on success and failure of the _perform_module_state_update function it
wraps. It is setting the entry's value for task_state based on what Celery would set it to once
the task returns to Celery: FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to _perform_module_state_update, and documented there.
If no exceptions are raised, a dict containing the task's result is returned, with the following keys:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Before returning, this is also JSON-serialized and stored in the task_output column of the InstructorTask entry.
If an exception is raised internally, it is caught and recorded in the InstructorTask entry.
This is also a JSON-serialized dict, stored in the task_output column, containing the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Once the exception is caught, it is raised again and allowed to pass up to the
task-running level, so that it can also set the failure modes and capture the error trace in the
result object that Celery creates.
"""
# get the InstructorTask to be updated. If this fails, then let the exception return to Celery.
# There's no point in catching it here.
entry = InstructorTask.objects.get(pk=entry_id)
# get inputs to use in this task from the entry:
task_id = entry.task_id
course_id = entry.course_id
task_input = json.loads(entry.task_input)
module_state_key = task_input.get('problem_url')
student_ident = task_input['student'] if 'student' in task_input else None
fmt = 'Starting to update problem modules as task "{task_id}": course "{course_id}" problem "{state_key}": nothing {action} yet'
TASK_LOG.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, action=action_name))
# add task_id to xmodule_instance_args, so that it can be output with tracking info:
if xmodule_instance_args is not None:
xmodule_instance_args['task_id'] = task_id
# Now that we have an entry we can try to catch failures:
task_progress = None
try:
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = 'Requested task "{task_id}" did not match actual task "{actual_id}"'
message = fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, actual_id=request_task_id)
TASK_LOG.error(message)
raise UpdateProblemModuleStateError(message)
# Now do the work:
with dog_stats_api.timer('instructor_tasks.module.time.overall', tags=['action:{name}'.format(name=action_name)]):
task_progress = _perform_module_state_update(course_id, module_state_key, student_ident, update_fcn,
action_name, filter_fcn, xmodule_instance_args)
# If we get here, we assume we've succeeded, so update the InstructorTask entry in anticipation.
# But we do this within the try, in case creating the task_output causes an exception to be
# raised.
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
except Exception:
# try to write out the failure to the entry before failing
_, exception, traceback = exc_info()
traceback_string = format_exc(traceback) if traceback is not None else ''
TASK_LOG.warning("background task (%s) failed: %s %s", task_id, exception, traceback_string)
entry.task_output = InstructorTask.create_output_for_failure(exception, traceback_string)
entry.task_state = FAILURE
entry.save_now()
raise
# log and exit, returning task_progress info as task result:
fmt = 'Finishing task "{task_id}": course "{course_id}" problem "{state_key}": final: {progress}'
TASK_LOG.info(fmt.format(task_id=task_id, course_id=course_id, state_key=module_state_key, progress=task_progress))
return task_progress
def _get_task_id_from_xmodule_args(xmodule_instance_args):
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None):
"""
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
"""
# reconstitute the problem's corresponding XModule:
model_data_cache = ModelDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function():
'''
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
'''
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None else ''
return get_module_for_descriptor_internal(student, module_descriptor, model_data_cache, course_id,
make_track_function(), xqueue_callback_url_prefix,
grade_bucket_type=grade_bucket_type)
@transaction.autocommit
def rescore_problem_module_state(module_descriptor, student_module, xmodule_instance_args=None):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
module_state_key = student_module.module_state_key
instance = _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args, grade_bucket_type='rescore')
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(loc=module_state_key,
student=student)
TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'):
# This should also not happen, since it should be already checked in the caller,
# but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
TASK_LOG.warning(u"error processing rescore call for course {course}, problem {loc} and student {student}: "
"unexpected response {msg}".format(msg=result, course=course_id, loc=module_state_key, student=student))
return False
elif result['success'] not in ['correct', 'incorrect']:
TASK_LOG.warning(u"error processing rescore call for course {course}, problem {loc} and student {student}: "
"{msg}".format(msg=result['success'], course=course_id, loc=module_state_key, student=student))
return False
else:
TASK_LOG.debug(u"successfully processed rescore call for course {course}, problem {loc} and student {student}: "
"{msg}".format(msg=result['success'], course=course_id, loc=module_state_key, student=student))
return True
@transaction.autocommit
def reset_attempts_module_state(_module_descriptor, student_module, xmodule_instance_args=None):
"""
Resets problem attempts to zero for specified `student_module`.
Always returns true, indicating success, if it doesn't raise an exception due to database error.
"""
problem_state = json.loads(student_module.state) if student_module.state else {}
if 'attempts' in problem_state:
old_number_of_attempts = problem_state["attempts"]
if old_number_of_attempts > 0:
problem_state["attempts"] = 0
# convert back to json and save
student_module.state = json.dumps(problem_state)
student_module.save()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student_module.student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
task_track(request_info, task_info, 'problem_reset_attempts', event_info, page='x_module_task')
# consider the reset to be successful, even if no update was performed. (It's just "optimized".)
return True
@transaction.autocommit
def delete_problem_module_state(_module_descriptor, student_module, xmodule_instance_args=None):
"""
Delete the StudentModule entry.
Always returns true, indicating success, if it doesn't raise an exception due to database error.
"""
student_module.delete()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student_module.student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
task_track(request_info, task_info, 'problem_delete_state', {}, page='x_module_task')
return True
import json
from factory import DjangoModelFactory, SubFactory
from student.tests.factories import UserFactory as StudentUserFactory
from instructor_task.models import InstructorTask
from celery.states import PENDING
class InstructorTaskFactory(DjangoModelFactory):
FACTORY_FOR = InstructorTask
task_type = 'rescore_problem'
course_id = "MITx/999/Robot_Super_Course"
task_input = json.dumps({})
task_key = None
task_id = None
task_state = PENDING
task_output = None
requester = SubFactory(StudentUserFactory)
"""
Test for LMS instructor background task queue management
"""
from xmodule.modulestore.exceptions import ItemNotFoundError
from courseware.tests.factories import UserFactory
from instructor_task.api import (get_running_instructor_tasks,
get_instructor_task_history,
submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students,
submit_delete_problem_state_for_all_students)
from instructor_task.api_helper import AlreadyRunningError
from instructor_task.models import InstructorTask, PROGRESS
from instructor_task.tests.test_base import (InstructorTaskTestCase,
InstructorTaskModuleTestCase,
TEST_COURSE_ID)
class InstructorTaskReportTest(InstructorTaskTestCase):
"""
Tests API and view methods that involve the reporting of status for background tasks.
"""
def test_get_running_instructor_tasks(self):
# when fetching running tasks, we get all running tasks, and only running tasks
for _ in range(1, 5):
self._create_failure_entry()
self._create_success_entry()
progress_task_ids = [self._create_progress_entry().task_id for _ in range(1, 5)]
task_ids = [instructor_task.task_id for instructor_task in get_running_instructor_tasks(TEST_COURSE_ID)]
self.assertEquals(set(task_ids), set(progress_task_ids))
def test_get_instructor_task_history(self):
# when fetching historical tasks, we get all tasks, including running tasks
expected_ids = []
for _ in range(1, 5):
expected_ids.append(self._create_failure_entry().task_id)
expected_ids.append(self._create_success_entry().task_id)
expected_ids.append(self._create_progress_entry().task_id)
task_ids = [instructor_task.task_id for instructor_task
in get_instructor_task_history(TEST_COURSE_ID, self.problem_url)]
self.assertEquals(set(task_ids), set(expected_ids))
class InstructorTaskSubmitTest(InstructorTaskModuleTestCase):
"""Tests API methods that involve the submission of background tasks."""
def setUp(self):
self.initialize_course()
self.student = UserFactory.create(username="student", email="student@edx.org")
self.instructor = UserFactory.create(username="instructor", email="instructor@edx.org")
def test_submit_nonexistent_modules(self):
# confirm that a rescore of a non-existent module returns an exception
problem_url = InstructorTaskModuleTestCase.problem_location("NonexistentProblem")
course_id = self.course.id
request = None
with self.assertRaises(ItemNotFoundError):
submit_rescore_problem_for_student(request, course_id, problem_url, self.student)
with self.assertRaises(ItemNotFoundError):
submit_rescore_problem_for_all_students(request, course_id, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_reset_problem_attempts_for_all_students(request, course_id, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_delete_problem_state_for_all_students(request, course_id, problem_url)
def test_submit_nonrescorable_modules(self):
# confirm that a rescore of an existent but unscorable module returns an exception
# (Note that it is easier to test a scoreable but non-rescorable module in test_tasks,
# where we are creating real modules.)
problem_url = self.problem_section.location.url()
course_id = self.course.id
request = None
with self.assertRaises(NotImplementedError):
submit_rescore_problem_for_student(request, course_id, problem_url, self.student)
with self.assertRaises(NotImplementedError):
submit_rescore_problem_for_all_students(request, course_id, problem_url)
def _test_submit_with_long_url(self, task_function, student=None):
problem_url_name = 'x' * 255
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
with self.assertRaises(ValueError):
if student is not None:
task_function(self.create_task_request(self.instructor), self.course.id, location, student)
else:
task_function(self.create_task_request(self.instructor), self.course.id, location)
def test_submit_rescore_all_with_long_url(self):
self._test_submit_with_long_url(submit_rescore_problem_for_all_students)
def test_submit_rescore_student_with_long_url(self):
self._test_submit_with_long_url(submit_rescore_problem_for_student, self.student)
def test_submit_reset_all_with_long_url(self):
self._test_submit_with_long_url(submit_reset_problem_attempts_for_all_students)
def test_submit_delete_all_with_long_url(self):
self._test_submit_with_long_url(submit_delete_problem_state_for_all_students)
def _test_submit_task(self, task_function, student=None):
# tests submit, and then tests a second identical submission.
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
if student is not None:
instructor_task = task_function(self.create_task_request(self.instructor),
self.course.id, location, student)
else:
instructor_task = task_function(self.create_task_request(self.instructor),
self.course.id, location)
# test resubmitting, by updating the existing record:
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
instructor_task.task_state = PROGRESS
instructor_task.save()
with self.assertRaises(AlreadyRunningError):
if student is not None:
task_function(self.create_task_request(self.instructor), self.course.id, location, student)
else:
task_function(self.create_task_request(self.instructor), self.course.id, location)
def test_submit_rescore_all(self):
self._test_submit_task(submit_rescore_problem_for_all_students)
def test_submit_rescore_student(self):
self._test_submit_task(submit_rescore_problem_for_student, self.student)
def test_submit_reset_all(self):
self._test_submit_task(submit_reset_problem_attempts_for_all_students)
def test_submit_delete_all(self):
self._test_submit_task(submit_delete_problem_state_for_all_students)
"""
Base test classes for LMS instructor-initiated background tasks
"""
import json
from uuid import uuid4
from mock import Mock
from celery.states import SUCCESS, FAILURE
from django.test.testcases import TestCase
from django.contrib.auth.models import User
from django.test.utils import override_settings
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from courseware.model_data import StudentModule
from courseware.tests.tests import LoginEnrollmentTestCase, TEST_DATA_MONGO_MODULESTORE
from instructor_task.api_helper import encode_problem_and_student_input
from instructor_task.models import PROGRESS, QUEUING
from instructor_task.tests.factories import InstructorTaskFactory
from instructor_task.views import instructor_task_status
TEST_COURSE_ORG = 'edx'
TEST_COURSE_NAME = 'Test Course'
TEST_COURSE_NUMBER = '1.23x'
TEST_SECTION_NAME = "Problem"
TEST_COURSE_ID = 'edx/1.23x/test_course'
TEST_FAILURE_MESSAGE = 'task failed horribly'
TEST_FAILURE_EXCEPTION = 'RandomCauseError'
OPTION_1 = 'Option 1'
OPTION_2 = 'Option 2'
class InstructorTaskTestCase(TestCase):
"""
Tests API and view methods that involve the reporting of status for background tasks.
"""
def setUp(self):
self.student = UserFactory.create(username="student", email="student@edx.org")
self.instructor = UserFactory.create(username="instructor", email="instructor@edx.org")
self.problem_url = InstructorTaskTestCase.problem_location("test_urlname")
@staticmethod
def problem_location(problem_url_name):
"""
Create an internal location for a test problem.
"""
return "i4x://{org}/{number}/problem/{problem_url_name}".format(org='edx',
number='1.23x',
problem_url_name=problem_url_name)
def _create_entry(self, task_state=QUEUING, task_output=None, student=None):
"""Creates a InstructorTask entry for testing."""
task_id = str(uuid4())
progress_json = json.dumps(task_output) if task_output is not None else None
task_input, task_key = encode_problem_and_student_input(self.problem_url, student)
instructor_task = InstructorTaskFactory.create(course_id=TEST_COURSE_ID,
requester=self.instructor,
task_input=json.dumps(task_input),
task_key=task_key,
task_id=task_id,
task_state=task_state,
task_output=progress_json)
return instructor_task
def _create_failure_entry(self):
"""Creates a InstructorTask entry representing a failed task."""
# view task entry for task failure
progress = {'message': TEST_FAILURE_MESSAGE,
'exception': TEST_FAILURE_EXCEPTION,
}
return self._create_entry(task_state=FAILURE, task_output=progress)
def _create_success_entry(self, student=None):
"""Creates a InstructorTask entry representing a successful task."""
return self._create_progress_entry(student, task_state=SUCCESS)
def _create_progress_entry(self, student=None, task_state=PROGRESS):
"""Creates a InstructorTask entry representing a task in progress."""
progress = {'attempted': 3,
'updated': 2,
'total': 5,
'action_name': 'rescored',
}
return self._create_entry(task_state=task_state, task_output=progress, student=student)
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class InstructorTaskModuleTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Base test class for InstructorTask-related tests that require
the setup of a course and problem in order to access StudentModule state.
"""
course = None
current_user = None
def initialize_course(self):
"""Create a course in the store, with a chapter and section."""
self.module_store = modulestore()
# Create the course
self.course = CourseFactory.create(org=TEST_COURSE_ORG,
number=TEST_COURSE_NUMBER,
display_name=TEST_COURSE_NAME)
# Add a chapter to the course
chapter = ItemFactory.create(parent_location=self.course.location,
display_name=TEST_SECTION_NAME)
# add a sequence to the course to which the problems can be added
self.problem_section = ItemFactory.create(parent_location=chapter.location,
template='i4x://edx/templates/sequential/Empty',
display_name=TEST_SECTION_NAME)
@staticmethod
def get_user_email(username):
"""Generate email address based on username"""
return '{0}@test.com'.format(username)
def login_username(self, username):
"""Login the user, given the `username`."""
if self.current_user != username:
self.login(InstructorTaskModuleTestCase.get_user_email(username), "test")
self.current_user = username
def _create_user(self, username, is_staff=False):
"""Creates a user and enrolls them in the test course."""
email = InstructorTaskModuleTestCase.get_user_email(username)
thisuser = UserFactory.create(username=username, email=email, is_staff=is_staff)
CourseEnrollmentFactory.create(user=thisuser, course_id=self.course.id)
return thisuser
def create_instructor(self, username):
"""Creates an instructor for the test course."""
return self._create_user(username, is_staff=True)
def create_student(self, username):
"""Creates a student for the test course."""
return self._create_user(username, is_staff=False)
@staticmethod
def problem_location(problem_url_name):
"""
Create an internal location for a test problem.
"""
if "i4x:" in problem_url_name:
return problem_url_name
else:
return "i4x://{org}/{number}/problem/{problem_url_name}".format(org=TEST_COURSE_ORG,
number=TEST_COURSE_NUMBER,
problem_url_name=problem_url_name)
def define_option_problem(self, problem_url_name):
"""Create the problem definition so the answer is Option 1"""
factory = OptionResponseXMLFactory()
factory_args = {'question_text': 'The correct answer is {0}'.format(OPTION_1),
'options': [OPTION_1, OPTION_2],
'correct_option': OPTION_1,
'num_responses': 2}
problem_xml = factory.build_xml(**factory_args)
ItemFactory.create(parent_location=self.problem_section.location,
template="i4x://edx/templates/problem/Blank_Common_Problem",
display_name=str(problem_url_name),
data=problem_xml)
def redefine_option_problem(self, problem_url_name):
"""Change the problem definition so the answer is Option 2"""
factory = OptionResponseXMLFactory()
factory_args = {'question_text': 'The correct answer is {0}'.format(OPTION_2),
'options': [OPTION_1, OPTION_2],
'correct_option': OPTION_2,
'num_responses': 2}
problem_xml = factory.build_xml(**factory_args)
location = InstructorTaskTestCase.problem_location(problem_url_name)
self.module_store.update_item(location, problem_xml)
def get_student_module(self, username, descriptor):
"""Get StudentModule object for test course, given the `username` and the problem's `descriptor`."""
return StudentModule.objects.get(course_id=self.course.id,
student=User.objects.get(username=username),
module_type=descriptor.location.category,
module_state_key=descriptor.location.url(),
)
@staticmethod
def get_task_status(task_id):
"""Use api method to fetch task status, using mock request."""
mock_request = Mock()
mock_request.REQUEST = {'task_id': task_id}
response = instructor_task_status(mock_request)
status = json.loads(response.content)
return status
def create_task_request(self, requester_username):
"""Generate request that can be used for submitting tasks"""
request = Mock()
request.user = User.objects.get(username=requester_username)
request.get_host = Mock(return_value="testhost")
request.META = {'REMOTE_ADDR': '0:0:0:0', 'SERVER_NAME': 'testhost'}
request.is_secure = Mock(return_value=False)
return request
"""
Integration Tests for LMS instructor-initiated background tasks
Runs tasks on answers to course problems to validate that code
paths actually work.
"""
import logging
import json
from mock import patch
import textwrap
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from capa.tests.response_xml_factory import (CodeResponseXMLFactory,
CustomResponseXMLFactory)
from xmodule.modulestore.tests.factories import ItemFactory
from xmodule.modulestore.exceptions import ItemNotFoundError
from courseware.model_data import StudentModule
from instructor_task.api import (submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students,
submit_delete_problem_state_for_all_students)
from instructor_task.models import InstructorTask
from instructor_task.tests.test_base import (InstructorTaskModuleTestCase, TEST_COURSE_ORG, TEST_COURSE_NUMBER,
OPTION_1, OPTION_2)
from capa.responsetypes import StudentInputError
log = logging.getLogger(__name__)
class TestIntegrationTask(InstructorTaskModuleTestCase):
"""
Base class to provide general methods used for "integration" testing of particular tasks.
"""
def submit_student_answer(self, username, problem_url_name, responses):
"""
Use ajax interface to submit a student answer.
Assumes the input list of responses has two values.
"""
def get_input_id(response_id):
"""Creates input id using information about the test course and the current problem."""
# Note that this is a capa-specific convention. The form is a version of the problem's
# URL, modified so that it can be easily stored in html, prepended with "input-" and
# appended with a sequence identifier for the particular response the input goes to.
return 'input_i4x-{0}-{1}-problem-{2}_{3}'.format(TEST_COURSE_ORG.lower(),
TEST_COURSE_NUMBER.replace('.', '_'),
problem_url_name, response_id)
# make sure that the requested user is logged in, so that the ajax call works
# on the right problem:
self.login_username(username)
# make ajax call:
modx_url = reverse('modx_dispatch',
kwargs={'course_id': self.course.id,
'location': InstructorTaskModuleTestCase.problem_location(problem_url_name),
'dispatch': 'problem_check', })
# we assume we have two responses, so assign them the correct identifiers.
resp = self.client.post(modx_url, {
get_input_id('2_1'): responses[0],
get_input_id('3_1'): responses[1],
})
return resp
def _assert_task_failure(self, entry_id, task_type, problem_url_name, expected_message):
"""Confirm that expected values are stored in InstructorTask on task failure."""
instructor_task = InstructorTask.objects.get(id=entry_id)
self.assertEqual(instructor_task.task_state, FAILURE)
self.assertEqual(instructor_task.requester.username, 'instructor')
self.assertEqual(instructor_task.task_type, task_type)
task_input = json.loads(instructor_task.task_input)
self.assertFalse('student' in task_input)
self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name))
status = json.loads(instructor_task.task_output)
self.assertEqual(status['exception'], 'ZeroDivisionError')
self.assertEqual(status['message'], expected_message)
# check status returned:
status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
self.assertEqual(status['message'], expected_message)
class TestRescoringTask(TestIntegrationTask):
"""
Integration-style tests for rescoring problems in a background task.
Exercises real problems with a minimum of patching.
"""
def setUp(self):
self.initialize_course()
self.create_instructor('instructor')
self.create_student('u1')
self.create_student('u2')
self.create_student('u3')
self.create_student('u4')
self.logout()
def render_problem(self, username, problem_url_name):
"""
Use ajax interface to request html for a problem.
"""
# make sure that the requested user is logged in, so that the ajax call works
# on the right problem:
self.login_username(username)
# make ajax call:
modx_url = reverse('modx_dispatch',
kwargs={'course_id': self.course.id,
'location': InstructorTaskModuleTestCase.problem_location(problem_url_name),
'dispatch': 'problem_get', })
resp = self.client.post(modx_url, {})
return resp
def check_state(self, username, descriptor, expected_score, expected_max_score, expected_attempts):
"""
Check that the StudentModule state contains the expected values.
The student module is found for the test course, given the `username` and problem `descriptor`.
Values checked include the number of attempts, the score, and the max score for a problem.
"""
module = self.get_student_module(username, descriptor)
self.assertEqual(module.grade, expected_score)
self.assertEqual(module.max_grade, expected_max_score)
state = json.loads(module.state)
attempts = state['attempts']
self.assertEqual(attempts, expected_attempts)
if attempts > 0:
self.assertTrue('correct_map' in state)
self.assertTrue('student_answers' in state)
self.assertGreater(len(state['correct_map']), 0)
self.assertGreater(len(state['student_answers']), 0)
def submit_rescore_all_student_answers(self, instructor, problem_url_name):
"""Submits the particular problem for rescoring"""
return submit_rescore_problem_for_all_students(self.create_task_request(instructor), self.course.id,
InstructorTaskModuleTestCase.problem_location(problem_url_name))
def submit_rescore_one_student_answer(self, instructor, problem_url_name, student):
"""Submits the particular problem for rescoring for a particular student"""
return submit_rescore_problem_for_student(self.create_task_request(instructor), self.course.id,
InstructorTaskModuleTestCase.problem_location(problem_url_name),
student)
def test_rescoring_option_problem(self):
"""Run rescore scenario on option problem"""
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_instance(self.course.id, location)
# first store answers for each of the separate users:
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
self.submit_student_answer('u2', problem_url_name, [OPTION_1, OPTION_2])
self.submit_student_answer('u3', problem_url_name, [OPTION_2, OPTION_1])
self.submit_student_answer('u4', problem_url_name, [OPTION_2, OPTION_2])
self.check_state('u1', descriptor, 2, 2, 1)
self.check_state('u2', descriptor, 1, 2, 1)
self.check_state('u3', descriptor, 1, 2, 1)
self.check_state('u4', descriptor, 0, 2, 1)
# update the data in the problem definition
self.redefine_option_problem(problem_url_name)
# confirm that simply rendering the problem again does not result in a change
# in the grade:
self.render_problem('u1', problem_url_name)
self.check_state('u1', descriptor, 2, 2, 1)
# rescore the problem for only one student -- only that student's grade should change:
self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
self.check_state('u1', descriptor, 0, 2, 1)
self.check_state('u2', descriptor, 1, 2, 1)
self.check_state('u3', descriptor, 1, 2, 1)
self.check_state('u4', descriptor, 0, 2, 1)
# rescore the problem for all students
self.submit_rescore_all_student_answers('instructor', problem_url_name)
self.check_state('u1', descriptor, 0, 2, 1)
self.check_state('u2', descriptor, 1, 2, 1)
self.check_state('u3', descriptor, 1, 2, 1)
self.check_state('u4', descriptor, 2, 2, 1)
def test_rescoring_failure(self):
"""Simulate a failure in rescoring a problem"""
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
expected_message = "bad things happened"
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = ZeroDivisionError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
self._assert_task_failure(instructor_task.id, 'rescore_problem', problem_url_name, expected_message)
def test_rescoring_bad_unicode_input(self):
"""Generate a real failure in rescoring a problem, with an answer including unicode"""
# At one point, the student answers that resulted in StudentInputErrors were being
# persisted (even though they were not counted as an attempt). That is not possible
# now, so it's harder to generate a test for how such input is handled.
problem_url_name = 'H1P1'
# set up an option problem -- doesn't matter really what problem it is, but we need
# it to have an answer.
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
# return an input error as if it were a numerical response, with an embedded unicode character:
expected_message = u"Could not interpret '2/3\u03a9' as a number"
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = StudentInputError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
# check instructor_task returned
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, 'SUCCESS')
self.assertEqual(instructor_task.requester.username, 'instructor')
self.assertEqual(instructor_task.task_type, 'rescore_problem')
task_input = json.loads(instructor_task.task_input)
self.assertFalse('student' in task_input)
self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name))
status = json.loads(instructor_task.task_output)
self.assertEqual(status['attempted'], 1)
self.assertEqual(status['updated'], 0)
self.assertEqual(status['total'], 1)
def define_code_response_problem(self, problem_url_name):
"""
Define an arbitrary code-response problem.
We'll end up mocking its evaluation later.
"""
factory = CodeResponseXMLFactory()
grader_payload = json.dumps({"grader": "ps04/grade_square.py"})
problem_xml = factory.build_xml(initial_display="def square(x):",
answer_display="answer",
grader_payload=grader_payload,
num_responses=2)
ItemFactory.create(parent_location=self.problem_section.location,
template="i4x://edx/templates/problem/Blank_Common_Problem",
display_name=str(problem_url_name),
data=problem_xml)
def test_rescoring_code_problem(self):
"""Run rescore scenario on problem with code submission"""
problem_url_name = 'H1P2'
self.define_code_response_problem(problem_url_name)
# we fully create the CodeResponse problem, but just pretend that we're queuing it:
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
mock_send_to_queue.return_value = (0, "Successfully queued")
self.submit_student_answer('u1', problem_url_name, ["answer1", "answer2"])
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, FAILURE)
status = json.loads(instructor_task.task_output)
self.assertEqual(status['exception'], 'NotImplementedError')
self.assertEqual(status['message'], "Problem's definition does not support rescoring")
status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
self.assertEqual(status['message'], "Problem's definition does not support rescoring")
def define_randomized_custom_response_problem(self, problem_url_name, redefine=False):
"""
Defines a custom response problem that uses a random value to determine correctness.
Generated answer is also returned as the `msg`, so that the value can be used as a
correct answer by a test.
If the `redefine` flag is set, then change the definition of correctness (from equals
to not-equals).
"""
factory = CustomResponseXMLFactory()
script = textwrap.dedent("""
def check_func(expect, answer_given):
expected = str(random.randint(0, 100))
return {'ok': answer_given %s expected, 'msg': expected}
""" % ('!=' if redefine else '=='))
problem_xml = factory.build_xml(script=script, cfn="check_func", expect="42", num_responses=1)
if redefine:
self.module_store.update_item(InstructorTaskModuleTestCase.problem_location(problem_url_name), problem_xml)
else:
# Use "per-student" rerandomization so that check-problem can be called more than once.
# Using "always" means we cannot check a problem twice, but we want to call once to get the
# correct answer, and call a second time with that answer to confirm it's graded as correct.
# Per-student rerandomization will at least generate different seeds for different users, so
# we get a little more test coverage.
ItemFactory.create(parent_location=self.problem_section.location,
template="i4x://edx/templates/problem/Blank_Common_Problem",
display_name=str(problem_url_name),
data=problem_xml,
metadata={"rerandomize": "per_student"})
def test_rescoring_randomized_problem(self):
"""Run rescore scenario on custom problem that uses randomize"""
# First define the custom response problem:
problem_url_name = 'H1P1'
self.define_randomized_custom_response_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_instance(self.course.id, location)
# run with more than one user
userlist = ['u1', 'u2', 'u3', 'u4']
for username in userlist:
# first render the problem, so that a seed will be created for this user
self.render_problem(username, problem_url_name)
# submit a bogus answer, in order to get the problem to tell us its real answer
dummy_answer = "1000"
self.submit_student_answer(username, problem_url_name, [dummy_answer, dummy_answer])
# we should have gotten the problem wrong, since we're way out of range:
self.check_state(username, descriptor, 0, 1, 1)
# dig the correct answer out of the problem's message
module = self.get_student_module(username, descriptor)
state = json.loads(module.state)
correct_map = state['correct_map']
log.info("Correct Map: %s", correct_map)
# only one response, so pull it out:
answer = correct_map.values()[0]['msg']
self.submit_student_answer(username, problem_url_name, [answer, answer])
# we should now get the problem right, with a second attempt:
self.check_state(username, descriptor, 1, 1, 2)
# redefine the problem (as stored in Mongo) so that the definition of correct changes
self.define_randomized_custom_response_problem(problem_url_name, redefine=True)
# confirm that simply rendering the problem again does not result in a change
# in the grade (or the attempts):
self.render_problem('u1', problem_url_name)
self.check_state('u1', descriptor, 1, 1, 2)
# rescore the problem for only one student -- only that student's grade should change
# (and none of the attempts):
self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))
for username in userlist:
self.check_state(username, descriptor, 0 if username == 'u1' else 1, 1, 2)
# rescore the problem for all students
self.submit_rescore_all_student_answers('instructor', problem_url_name)
# all grades should change to being wrong (with no change in attempts)
for username in userlist:
self.check_state(username, descriptor, 0, 1, 2)
class TestResetAttemptsTask(TestIntegrationTask):
"""
Integration-style tests for resetting problem attempts in a background task.
Exercises real problems with a minimum of patching.
"""
userlist = ['u1', 'u2', 'u3', 'u4']
def setUp(self):
self.initialize_course()
self.create_instructor('instructor')
for username in self.userlist:
self.create_student(username)
self.logout()
def get_num_attempts(self, username, descriptor):
"""returns number of attempts stored for `username` on problem `descriptor` for test course"""
module = self.get_student_module(username, descriptor)
state = json.loads(module.state)
return state['attempts']
def reset_problem_attempts(self, instructor, problem_url_name):
"""Submits the current problem for resetting"""
return submit_reset_problem_attempts_for_all_students(self.create_task_request(instructor), self.course.id,
InstructorTaskModuleTestCase.problem_location(problem_url_name))
def test_reset_attempts_on_problem(self):
"""Run reset-attempts scenario on option problem"""
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_instance(self.course.id, location)
num_attempts = 3
# first store answers for each of the separate users:
for _ in range(num_attempts):
for username in self.userlist:
self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
for username in self.userlist:
self.assertEquals(self.get_num_attempts(username, descriptor), num_attempts)
self.reset_problem_attempts('instructor', problem_url_name)
for username in self.userlist:
self.assertEquals(self.get_num_attempts(username, descriptor), 0)
def test_reset_failure(self):
"""Simulate a failure in resetting attempts on a problem"""
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
expected_message = "bad things happened"
with patch('courseware.models.StudentModule.save') as mock_save:
mock_save.side_effect = ZeroDivisionError(expected_message)
instructor_task = self.reset_problem_attempts('instructor', problem_url_name)
self._assert_task_failure(instructor_task.id, 'reset_problem_attempts', problem_url_name, expected_message)
def test_reset_non_problem(self):
"""confirm that a non-problem can still be successfully reset"""
problem_url_name = self.problem_section.location.url()
instructor_task = self.reset_problem_attempts('instructor', problem_url_name)
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, SUCCESS)
class TestDeleteProblemTask(TestIntegrationTask):
"""
Integration-style tests for deleting problem state in a background task.
Exercises real problems with a minimum of patching.
"""
userlist = ['u1', 'u2', 'u3', 'u4']
def setUp(self):
self.initialize_course()
self.create_instructor('instructor')
for username in self.userlist:
self.create_student(username)
self.logout()
def delete_problem_state(self, instructor, problem_url_name):
"""Submits the current problem for deletion"""
return submit_delete_problem_state_for_all_students(self.create_task_request(instructor), self.course.id,
InstructorTaskModuleTestCase.problem_location(problem_url_name))
def test_delete_problem_state(self):
"""Run delete-state scenario on option problem"""
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_instance(self.course.id, location)
# first store answers for each of the separate users:
for username in self.userlist:
self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
# confirm that state exists:
for username in self.userlist:
self.assertTrue(self.get_student_module(username, descriptor) is not None)
# run delete task:
self.delete_problem_state('instructor', problem_url_name)
# confirm that no state can be found:
for username in self.userlist:
with self.assertRaises(StudentModule.DoesNotExist):
self.get_student_module(username, descriptor)
def test_delete_failure(self):
"""Simulate a failure in deleting state of a problem"""
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
expected_message = "bad things happened"
with patch('courseware.models.StudentModule.delete') as mock_delete:
mock_delete.side_effect = ZeroDivisionError(expected_message)
instructor_task = self.delete_problem_state('instructor', problem_url_name)
self._assert_task_failure(instructor_task.id, 'delete_problem_state', problem_url_name, expected_message)
def test_delete_non_problem(self):
"""confirm that a non-problem can still be successfully deleted"""
problem_url_name = self.problem_section.location.url()
instructor_task = self.delete_problem_state('instructor', problem_url_name)
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
self.assertEqual(instructor_task.task_state, SUCCESS)
"""
Unit tests for LMS instructor-initiated background tasks,
Runs tasks on answers to course problems to validate that code
paths actually work.
"""
import json
from uuid import uuid4
from mock import Mock, patch
from celery.states import SUCCESS, FAILURE
from xmodule.modulestore.exceptions import ItemNotFoundError
from courseware.model_data import StudentModule
from courseware.tests.factories import StudentModuleFactory
from student.tests.factories import UserFactory
from instructor_task.models import InstructorTask
from instructor_task.tests.test_base import InstructorTaskModuleTestCase, TEST_COURSE_ORG, TEST_COURSE_NUMBER
from instructor_task.tests.factories import InstructorTaskFactory
from instructor_task.tasks import rescore_problem, reset_problem_attempts, delete_problem_state
from instructor_task.tasks_helper import UpdateProblemModuleStateError, update_problem_module_state
PROBLEM_URL_NAME = "test_urlname"
class TestTaskFailure(Exception):
pass
class TestInstructorTasks(InstructorTaskModuleTestCase):
def setUp(self):
super(InstructorTaskModuleTestCase, self).setUp()
self.initialize_course()
self.instructor = self.create_instructor('instructor')
self.problem_url = InstructorTaskModuleTestCase.problem_location(PROBLEM_URL_NAME)
def _create_input_entry(self, student_ident=None):
"""Creates a InstructorTask entry for testing."""
task_id = str(uuid4())
task_input = {'problem_url': self.problem_url}
if student_ident is not None:
task_input['student'] = student_ident
instructor_task = InstructorTaskFactory.create(course_id=self.course.id,
requester=self.instructor,
task_input=json.dumps(task_input),
task_key='dummy value',
task_id=task_id)
return instructor_task
def _get_xmodule_instance_args(self):
"""
Calculate dummy values for parameters needed for instantiating xmodule instances.
"""
return {'xqueue_callback_url_prefix': 'dummy_value',
'request_info': {},
}
def _run_task_with_mock_celery(self, task_function, entry_id, task_id, expected_failure_message=None):
self.current_task = Mock()
self.current_task.request = Mock()
self.current_task.request.id = task_id
self.current_task.update_state = Mock()
if expected_failure_message is not None:
self.current_task.update_state.side_effect = TestTaskFailure(expected_failure_message)
with patch('instructor_task.tasks_helper._get_current_task') as mock_get_task:
mock_get_task.return_value = self.current_task
return task_function(entry_id, self._get_xmodule_instance_args())
def _test_missing_current_task(self, task_function):
# run without (mock) Celery running
task_entry = self._create_input_entry()
with self.assertRaises(UpdateProblemModuleStateError):
task_function(task_entry.id, self._get_xmodule_instance_args())
def test_rescore_missing_current_task(self):
self._test_missing_current_task(rescore_problem)
def test_reset_missing_current_task(self):
self._test_missing_current_task(reset_problem_attempts)
def test_delete_missing_current_task(self):
self._test_missing_current_task(delete_problem_state)
def _test_undefined_problem(self, task_function):
# run with celery, but no problem defined
task_entry = self._create_input_entry()
with self.assertRaises(ItemNotFoundError):
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id)
def test_rescore_undefined_problem(self):
self._test_undefined_problem(rescore_problem)
def test_reset_undefined_problem(self):
self._test_undefined_problem(reset_problem_attempts)
def test_delete_undefined_problem(self):
self._test_undefined_problem(delete_problem_state)
def _test_run_with_task(self, task_function, action_name, expected_num_updated):
# run with some StudentModules for the problem
task_entry = self._create_input_entry()
status = self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id)
# check return value
self.assertEquals(status.get('attempted'), expected_num_updated)
self.assertEquals(status.get('updated'), expected_num_updated)
self.assertEquals(status.get('total'), expected_num_updated)
self.assertEquals(status.get('action_name'), action_name)
self.assertGreater('duration_ms', 0)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(json.loads(entry.task_output), status)
self.assertEquals(entry.task_state, SUCCESS)
def _test_run_with_no_state(self, task_function, action_name):
# run with no StudentModules for the problem
self.define_option_problem(PROBLEM_URL_NAME)
self._test_run_with_task(task_function, action_name, 0)
def test_rescore_with_no_state(self):
self._test_run_with_no_state(rescore_problem, 'rescored')
def test_reset_with_no_state(self):
self._test_run_with_no_state(reset_problem_attempts, 'reset')
def test_delete_with_no_state(self):
self._test_run_with_no_state(delete_problem_state, 'deleted')
def _create_students_with_state(self, num_students, state=None):
"""Create students, a problem, and StudentModule objects for testing"""
self.define_option_problem(PROBLEM_URL_NAME)
students = [
UserFactory.create(username='robot%d' % i, email='robot+test+%d@edx.org' % i)
for i in xrange(num_students)
]
for student in students:
StudentModuleFactory.create(course_id=self.course.id,
module_state_key=self.problem_url,
student=student,
state=state)
return students
def _assert_num_attempts(self, students, num_attempts):
"""Check the number attempts for all students is the same"""
for student in students:
module = StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.problem_url)
state = json.loads(module.state)
self.assertEquals(state['attempts'], num_attempts)
def test_reset_with_some_state(self):
initial_attempts = 3
input_state = json.dumps({'attempts': initial_attempts})
num_students = 10
students = self._create_students_with_state(num_students, input_state)
# check that entries were set correctly
self._assert_num_attempts(students, initial_attempts)
# run the task
self._test_run_with_task(reset_problem_attempts, 'reset', num_students)
# check that entries were reset
self._assert_num_attempts(students, 0)
def test_delete_with_some_state(self):
# This will create StudentModule entries -- we don't have to worry about
# the state inside them.
num_students = 10
students = self._create_students_with_state(num_students)
# check that entries were created correctly
for student in students:
StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.problem_url)
self._test_run_with_task(delete_problem_state, 'deleted', num_students)
# confirm that no state can be found anymore:
for student in students:
with self.assertRaises(StudentModule.DoesNotExist):
StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.problem_url)
def _test_reset_with_student(self, use_email):
# run with some StudentModules for the problem
num_students = 10
initial_attempts = 3
input_state = json.dumps({'attempts': initial_attempts})
students = self._create_students_with_state(num_students, input_state)
# check that entries were set correctly
for student in students:
module = StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.problem_url)
state = json.loads(module.state)
self.assertEquals(state['attempts'], initial_attempts)
if use_email:
student_ident = students[3].email
else:
student_ident = students[3].username
task_entry = self._create_input_entry(student_ident)
status = self._run_task_with_mock_celery(reset_problem_attempts, task_entry.id, task_entry.task_id)
# check return value
self.assertEquals(status.get('attempted'), 1)
self.assertEquals(status.get('updated'), 1)
self.assertEquals(status.get('total'), 1)
self.assertEquals(status.get('action_name'), 'reset')
self.assertGreater('duration_ms', 0)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(json.loads(entry.task_output), status)
self.assertEquals(entry.task_state, SUCCESS)
# check that the correct entry was reset
for index, student in enumerate(students):
module = StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.problem_url)
state = json.loads(module.state)
if index == 3:
self.assertEquals(state['attempts'], 0)
else:
self.assertEquals(state['attempts'], initial_attempts)
def test_reset_with_student_username(self):
self._test_reset_with_student(False)
def test_reset_with_student_email(self):
self._test_reset_with_student(True)
def _test_run_with_failure(self, task_function, expected_message):
# run with no StudentModules for the problem,
# because we will fail before entering the loop.
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
with self.assertRaises(TestTaskFailure):
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id, expected_message)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(entry.task_state, FAILURE)
output = json.loads(entry.task_output)
self.assertEquals(output['exception'], 'TestTaskFailure')
self.assertEquals(output['message'], expected_message)
def test_rescore_with_failure(self):
self._test_run_with_failure(rescore_problem, 'We expected this to fail')
def test_reset_with_failure(self):
self._test_run_with_failure(reset_problem_attempts, 'We expected this to fail')
def test_delete_with_failure(self):
self._test_run_with_failure(delete_problem_state, 'We expected this to fail')
def _test_run_with_long_error_msg(self, task_function):
# run with an error message that is so long it will require
# truncation (as well as the jettisoning of the traceback).
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
expected_message = "x" * 1500
with self.assertRaises(TestTaskFailure):
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id, expected_message)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(entry.task_state, FAILURE)
self.assertGreater(1023, len(entry.task_output))
output = json.loads(entry.task_output)
self.assertEquals(output['exception'], 'TestTaskFailure')
self.assertEquals(output['message'], expected_message[:len(output['message']) - 3] + "...")
self.assertTrue('traceback' not in output)
def test_rescore_with_long_error_msg(self):
self._test_run_with_long_error_msg(rescore_problem)
def test_reset_with_long_error_msg(self):
self._test_run_with_long_error_msg(reset_problem_attempts)
def test_delete_with_long_error_msg(self):
self._test_run_with_long_error_msg(delete_problem_state)
def _test_run_with_short_error_msg(self, task_function):
# run with an error message that is short enough to fit
# in the output, but long enough that the traceback won't.
# Confirm that the traceback is truncated.
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
expected_message = "x" * 900
with self.assertRaises(TestTaskFailure):
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id, expected_message)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(entry.task_state, FAILURE)
self.assertGreater(1023, len(entry.task_output))
output = json.loads(entry.task_output)
self.assertEquals(output['exception'], 'TestTaskFailure')
self.assertEquals(output['message'], expected_message)
self.assertEquals(output['traceback'][-3:], "...")
def test_rescore_with_short_error_msg(self):
self._test_run_with_short_error_msg(rescore_problem)
def test_reset_with_short_error_msg(self):
self._test_run_with_short_error_msg(reset_problem_attempts)
def test_delete_with_short_error_msg(self):
self._test_run_with_short_error_msg(delete_problem_state)
def test_successful_result_too_long(self):
# while we don't expect the existing tasks to generate output that is too
# long, we can test the framework will handle such an occurrence.
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
action_name = 'x' * 1000
update_fcn = lambda(_module_descriptor, _student_module, _xmodule_instance_args): True
task_function = (lambda entry_id, xmodule_instance_args:
update_problem_module_state(entry_id,
update_fcn, action_name, filter_fcn=None,
xmodule_instance_args=None))
with self.assertRaises(ValueError):
self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEquals(entry.task_state, FAILURE)
self.assertGreater(1023, len(entry.task_output))
output = json.loads(entry.task_output)
self.assertEquals(output['exception'], 'ValueError')
self.assertTrue("Length of task output is too long" in output['message'])
self.assertTrue('traceback' not in output)
"""
Test for LMS instructor background task queue management
"""
import json
from celery.states import SUCCESS, FAILURE, REVOKED, PENDING
from mock import Mock, patch
from django.utils.datastructures import MultiValueDict
from instructor_task.models import PROGRESS
from instructor_task.tests.test_base import (InstructorTaskTestCase,
TEST_FAILURE_MESSAGE,
TEST_FAILURE_EXCEPTION)
from instructor_task.views import instructor_task_status, get_task_completion_info
class InstructorTaskReportTest(InstructorTaskTestCase):
"""
Tests API and view methods that involve the reporting of status for background tasks.
"""
def _get_instructor_task_status(self, task_id):
"""Returns status corresponding to task_id via api method."""
request = Mock()
request.REQUEST = {'task_id': task_id}
return instructor_task_status(request)
def test_instructor_task_status(self):
instructor_task = self._create_failure_entry()
task_id = instructor_task.task_id
request = Mock()
request.REQUEST = {'task_id': task_id}
response = instructor_task_status(request)
output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id)
def test_missing_instructor_task_status(self):
task_id = "missing_id"
request = Mock()
request.REQUEST = {'task_id': task_id}
response = instructor_task_status(request)
output = json.loads(response.content)
self.assertEquals(output, {})
def test_instructor_task_status_list(self):
# Fetch status for existing tasks by arg list, as if called from ajax.
# Note that ajax does something funny with the marshalling of
# list data, so the key value has "[]" appended to it.
task_ids = [(self._create_failure_entry()).task_id for _ in range(1, 5)]
request = Mock()
request.REQUEST = MultiValueDict({'task_ids[]': task_ids})
response = instructor_task_status(request)
output = json.loads(response.content)
self.assertEquals(len(output), len(task_ids))
for task_id in task_ids:
self.assertEquals(output[task_id]['task_id'], task_id)
def test_get_status_from_failure(self):
# get status for a task that has already failed
instructor_task = self._create_failure_entry()
task_id = instructor_task.task_id
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['message'], TEST_FAILURE_MESSAGE)
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], FAILURE)
self.assertFalse(output['in_progress'])
expected_progress = {'exception': TEST_FAILURE_EXCEPTION,
'message': TEST_FAILURE_MESSAGE}
self.assertEquals(output['task_progress'], expected_progress)
def test_get_status_from_success(self):
# get status for a task that has already succeeded
instructor_task = self._create_success_entry()
task_id = instructor_task.task_id
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['message'], "Problem rescored for 2 of 3 students (out of 5)")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], SUCCESS)
self.assertFalse(output['in_progress'])
expected_progress = {'attempted': 3,
'updated': 2,
'total': 5,
'action_name': 'rescored'}
self.assertEquals(output['task_progress'], expected_progress)
def _test_get_status_from_result(self, task_id, mock_result):
"""
Provides mock result to caller of instructor_task_status, and returns resulting output.
"""
with patch('celery.result.AsyncResult.__new__') as mock_result_ctor:
mock_result_ctor.return_value = mock_result
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id)
return output
def test_get_status_to_pending(self):
# get status for a task that hasn't begun to run yet
instructor_task = self._create_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = PENDING
output = self._test_get_status_from_result(task_id, mock_result)
for key in ['message', 'succeeded', 'task_progress']:
self.assertTrue(key not in output)
self.assertEquals(output['task_state'], 'PENDING')
self.assertTrue(output['in_progress'])
def test_update_progress_to_progress(self):
# view task entry for task in progress
instructor_task = self._create_progress_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = PROGRESS
mock_result.result = {'attempted': 5,
'updated': 4,
'total': 10,
'action_name': 'rescored'}
output = self._test_get_status_from_result(task_id, mock_result)
self.assertEquals(output['message'], "Progress: rescored 4 of 5 so far (out of 10)")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], PROGRESS)
self.assertTrue(output['in_progress'])
self.assertEquals(output['task_progress'], mock_result.result)
def test_update_progress_to_failure(self):
# view task entry for task in progress that later fails
instructor_task = self._create_progress_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = FAILURE
mock_result.result = NotImplementedError("This task later failed.")
mock_result.traceback = "random traceback"
output = self._test_get_status_from_result(task_id, mock_result)
self.assertEquals(output['message'], "This task later failed.")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], FAILURE)
self.assertFalse(output['in_progress'])
expected_progress = {'exception': 'NotImplementedError',
'message': "This task later failed.",
'traceback': "random traceback"}
self.assertEquals(output['task_progress'], expected_progress)
def test_update_progress_to_revoked(self):
# view task entry for task in progress that later fails
instructor_task = self._create_progress_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = REVOKED
output = self._test_get_status_from_result(task_id, mock_result)
self.assertEquals(output['message'], "Task revoked before running")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], REVOKED)
self.assertFalse(output['in_progress'])
expected_progress = {'message': "Task revoked before running"}
self.assertEquals(output['task_progress'], expected_progress)
def _get_output_for_task_success(self, attempted, updated, total, student=None):
"""returns the task_id and the result returned by instructor_task_status()."""
# view task entry for task in progress
instructor_task = self._create_progress_entry(student)
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = SUCCESS
mock_result.result = {'attempted': attempted,
'updated': updated,
'total': total,
'action_name': 'rescored'}
output = self._test_get_status_from_result(task_id, mock_result)
return output
def test_update_progress_to_success(self):
output = self._get_output_for_task_success(10, 8, 10)
self.assertEquals(output['message'], "Problem rescored for 8 of 10 students")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], SUCCESS)
self.assertFalse(output['in_progress'])
expected_progress = {'attempted': 10,
'updated': 8,
'total': 10,
'action_name': 'rescored'}
self.assertEquals(output['task_progress'], expected_progress)
def test_success_messages(self):
output = self._get_output_for_task_success(0, 0, 10)
self.assertEqual(output['message'], "Unable to find any students with submissions to be rescored (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(10, 0, 10)
self.assertEqual(output['message'], "Problem failed to be rescored for any of 10 students")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(10, 8, 10)
self.assertEqual(output['message'], "Problem rescored for 8 of 10 students")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(9, 8, 10)
self.assertEqual(output['message'], "Problem rescored for 8 of 9 students (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(10, 10, 10)
self.assertEqual(output['message'], "Problem successfully rescored for 10 students")
self.assertTrue(output['succeeded'])
output = self._get_output_for_task_success(0, 0, 1, student=self.student)
self.assertTrue("Unable to find submission to be rescored for student" in output['message'])
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(1, 0, 1, student=self.student)
self.assertTrue("Problem failed to be rescored for student" in output['message'])
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(1, 1, 1, student=self.student)
self.assertTrue("Problem successfully rescored for student" in output['message'])
self.assertTrue(output['succeeded'])
def test_get_info_for_queuing_task(self):
# get status for a task that is still running:
instructor_task = self._create_entry()
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No status information available")
def test_get_info_for_missing_output(self):
# check for missing task_output
instructor_task = self._create_success_entry()
instructor_task.task_output = None
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No status information available")
def test_get_info_for_broken_output(self):
# check for non-JSON task_output
instructor_task = self._create_success_entry()
instructor_task.task_output = "{ bad"
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No parsable status information available")
def test_get_info_for_empty_output(self):
# check for JSON task_output with missing keys
instructor_task = self._create_success_entry()
instructor_task.task_output = "{}"
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No progress status information available")
def test_get_info_for_broken_input(self):
# check for non-JSON task_input, but then just ignore it
instructor_task = self._create_success_entry()
instructor_task.task_input = "{ bad"
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "Problem rescored for 2 of 3 students (out of 5)")
import json
import logging
from django.http import HttpResponse
from celery.states import FAILURE, REVOKED, READY_STATES
from instructor_task.api_helper import (get_status_from_instructor_task,
get_updated_instructor_task)
from instructor_task.models import PROGRESS
log = logging.getLogger(__name__)
# return status for completed tasks and tasks in progress
STATES_WITH_STATUS = [state for state in READY_STATES] + [PROGRESS]
def _get_instructor_task_status(task_id):
"""
Returns status for a specific task.
Written as an internal method here (rather than as a helper)
so that get_task_completion_info() can be called without
causing a circular dependency (since it's also called directly).
"""
instructor_task = get_updated_instructor_task(task_id)
status = get_status_from_instructor_task(instructor_task)
if instructor_task is not None and instructor_task.task_state in STATES_WITH_STATUS:
succeeded, message = get_task_completion_info(instructor_task)
status['message'] = message
status['succeeded'] = succeeded
return status
def instructor_task_status(request):
"""
View method that returns the status of a course-related task or tasks.
Status is returned as a JSON-serialized dict, wrapped as the content of a HTTPResponse.
The task_id can be specified to this view in one of three ways:
* by making a request containing 'task_id' as a parameter with a single value
Returns a dict containing status information for the specified task_id
* by making a request containing 'task_ids' as a parameter,
with a list of task_id values.
Returns a dict of dicts, with the task_id as key, and the corresponding
dict containing status information for the specified task_id
Task_id values that are unrecognized are skipped.
The dict with status information for a task contains the following keys:
'message': on complete tasks, status message reporting on final progress,
or providing exception message if failed. For tasks in progress,
indicates the current progress.
'succeeded': on complete tasks or tasks in progress, boolean value indicates if the
task outcome was successful: did it achieve what it set out to do.
This is in contrast with a successful task_state, which indicates that the
task merely completed.
'task_id': id assigned by LMS and used by celery.
'task_state': state of task as stored in celery's result store.
'in_progress': boolean indicating if task is still running.
'task_progress': dict containing progress information. This includes:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
'duration_ms': how long the task has (or had) been running.
'exception': name of exception class raised in failed tasks.
'message': returned for failed and revoked tasks.
'traceback': optional, returned if task failed and produced a traceback.
"""
output = {}
if 'task_id' in request.REQUEST:
task_id = request.REQUEST['task_id']
output = _get_instructor_task_status(task_id)
elif 'task_ids[]' in request.REQUEST:
tasks = request.REQUEST.getlist('task_ids[]')
for task_id in tasks:
task_output = _get_instructor_task_status(task_id)
if task_output is not None:
output[task_id] = task_output
return HttpResponse(json.dumps(output, indent=4))
def get_task_completion_info(instructor_task):
"""
Construct progress message from progress information in InstructorTask entry.
Returns (boolean, message string) duple, where the boolean indicates
whether the task completed without incident. (It is possible for a
task to attempt many sub-tasks, such as rescoring many students' problem
responses, and while the task runs to completion, some of the students'
responses could not be rescored.)
Used for providing messages to instructor_task_status(), as well as
external calls for providing course task submission history information.
"""
succeeded = False
if instructor_task.task_state not in STATES_WITH_STATUS:
return (succeeded, "No status information available")
# we're more surprised if there is no output for a completed task, but just warn:
if instructor_task.task_output is None:
log.warning("No task_output information found for instructor_task {0}".format(instructor_task.task_id))
return (succeeded, "No status information available")
try:
task_output = json.loads(instructor_task.task_output)
except ValueError:
fmt = "No parsable task_output information found for instructor_task {0}: {1}"
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output))
return (succeeded, "No parsable status information available")
if instructor_task.task_state in [FAILURE, REVOKED]:
return (succeeded, task_output.get('message', 'No message provided'))
if any([key not in task_output for key in ['action_name', 'attempted', 'updated', 'total']]):
fmt = "Invalid task_output information found for instructor_task {0}: {1}"
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output))
return (succeeded, "No progress status information available")
action_name = task_output['action_name']
num_attempted = task_output['attempted']
num_updated = task_output['updated']
num_total = task_output['total']
student = None
try:
task_input = json.loads(instructor_task.task_input)
except ValueError:
fmt = "No parsable task_input information found for instructor_task {0}: {1}"
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_input))
else:
student = task_input.get('student')
if instructor_task.task_state == PROGRESS:
# special message for providing progress updates:
msg_format = "Progress: {action} {updated} of {attempted} so far"
elif student is not None:
if num_attempted == 0:
msg_format = "Unable to find submission to be {action} for student '{student}'"
elif num_updated == 0:
msg_format = "Problem failed to be {action} for student '{student}'"
else:
succeeded = True
msg_format = "Problem successfully {action} for student '{student}'"
elif num_attempted == 0:
msg_format = "Unable to find any students with submissions to be {action}"
elif num_updated == 0:
msg_format = "Problem failed to be {action} for any of {attempted} students"
elif num_updated == num_attempted:
succeeded = True
msg_format = "Problem successfully {action} for {attempted} students"
else: # num_updated < num_attempted
msg_format = "Problem {action} for {updated} of {attempted} students"
if student is None and num_attempted != num_total:
msg_format += " (out of {total})"
# Update status in task result object itself:
message = msg_format.format(action=action_name, updated=num_updated,
attempted=num_attempted, total=num_total,
student=student)
return (succeeded, message)
...@@ -102,7 +102,7 @@ MITX_FEATURES = { ...@@ -102,7 +102,7 @@ MITX_FEATURES = {
# Staff Debug tool. # Staff Debug tool.
'ENABLE_STUDENT_HISTORY_VIEW': True, 'ENABLE_STUDENT_HISTORY_VIEW': True,
# segment.io for LMS--need to explicitly turn it on on production. # segment.io for LMS--need to explicitly turn it on for production.
'SEGMENT_IO_LMS': False, 'SEGMENT_IO_LMS': False,
# Enables the student notes API and UI. # Enables the student notes API and UI.
...@@ -122,7 +122,10 @@ MITX_FEATURES = { ...@@ -122,7 +122,10 @@ MITX_FEATURES = {
'USE_CUSTOM_THEME': False, 'USE_CUSTOM_THEME': False,
# Do autoplay videos for students # Do autoplay videos for students
'AUTOPLAY_VIDEOS': True 'AUTOPLAY_VIDEOS': True,
# Enable instructor dash to submit background tasks
'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True,
} }
# Used for A/B testing # Used for A/B testing
...@@ -691,6 +694,7 @@ INSTALLED_APPS = ( ...@@ -691,6 +694,7 @@ INSTALLED_APPS = (
'util', 'util',
'certificates', 'certificates',
'instructor', 'instructor',
'instructor_task',
'open_ended_grading', 'open_ended_grading',
'psychometrics', 'psychometrics',
'licenses', 'licenses',
......
// Define an InstructorTaskProgress object for updating a table on the instructor
// dashboard that shows the current background tasks that are currently running
// for the instructor's course. Any tasks that were running when the page is
// first displayed are passed in as instructor_tasks, and populate the "Pending Instructor
// Task" table. The InstructorTaskProgress is bound to this table, and periodically
// polls the LMS to see if any of the tasks has completed. Once a task is complete,
// it is not included in any further polling.
(function() {
var __bind = function(fn, me){ return function(){ return fn.apply(me, arguments); }; };
this.InstructorTaskProgress = (function() {
function InstructorTaskProgress(element) {
this.update_progress = __bind(this.update_progress, this);
this.get_status = __bind(this.get_status, this);
this.element = element;
this.entries = $(element).find('.task-progress-entry')
if (window.queuePollerID) {
window.clearTimeout(window.queuePollerID);
}
// Hardcode the initial delay before the first refresh to one second:
window.queuePollerID = window.setTimeout(this.get_status, 1000);
}
InstructorTaskProgress.prototype.$ = function(selector) {
return $(selector, this.element);
};
InstructorTaskProgress.prototype.update_progress = function(response) {
var _this = this;
// Response should be a dict with an entry for each requested task_id,
// with a "task-state" and "in_progress" key and optionally a "message"
// and a "task_progress.duration" key.
var something_in_progress = false;
for (task_id in response) {
var task_dict = response[task_id];
// find the corresponding entry, and update it:
entry = $(_this.element).find('[data-task-id="' + task_id + '"]');
entry.find('.task-state').text(task_dict.task_state)
var duration_value = (task_dict.task_progress && task_dict.task_progress.duration_ms
&& Math.round(task_dict.task_progress.duration_ms/1000)) || 'unknown';
entry.find('.task-duration').text(duration_value);
var progress_value = task_dict.message || '';
entry.find('.task-progress').text(progress_value);
// if the task is complete, then change the entry so it won't
// be queried again. Otherwise set a flag.
if (task_dict.in_progress === true) {
something_in_progress = true;
} else {
entry.data('inProgress', "False")
}
}
// if some entries are still incomplete, then repoll:
// Hardcode the refresh interval to be every five seconds.
// TODO: allow the refresh interval to be set. (And if it is disabled,
// then don't set the timeout at all.)
if (something_in_progress) {
window.queuePollerID = window.setTimeout(_this.get_status, 5000);
} else {
delete window.queuePollerID;
}
}
InstructorTaskProgress.prototype.get_status = function() {
var _this = this;
var task_ids = [];
// Construct the array of ids to get status for, by
// including the subset of entries that are still in progress.
this.entries.each(function(idx, element) {
var task_id = $(element).data('taskId');
var in_progress = $(element).data('inProgress');
if (in_progress="True") {
task_ids.push(task_id);
}
});
// Make call to get status for these ids.
// Note that the keyname here ends up with "[]" being appended
// in the POST parameter that shows up on the Django server.
// TODO: add error handler.
var ajax_url = '/instructor_task_status/';
var data = {'task_ids': task_ids };
$.post(ajax_url, data).done(this.update_progress);
};
return InstructorTaskProgress;
})();
}).call(this);
// once the page is rendered, create the progress object
var instructorTaskProgress;
$(document).ready(function() {
instructorTaskProgress = new InstructorTaskProgress($('#task-progress-wrapper'));
});
...@@ -9,7 +9,9 @@ ...@@ -9,7 +9,9 @@
<script type="text/javascript" src="${static.url('js/vendor/jquery-jvectormap-1.1.1/jquery-jvectormap-1.1.1.min.js')}"></script> <script type="text/javascript" src="${static.url('js/vendor/jquery-jvectormap-1.1.1/jquery-jvectormap-1.1.1.min.js')}"></script>
<script type="text/javascript" src="${static.url('js/vendor/jquery-jvectormap-1.1.1/jquery-jvectormap-world-mill-en.js')}"></script> <script type="text/javascript" src="${static.url('js/vendor/jquery-jvectormap-1.1.1/jquery-jvectormap-world-mill-en.js')}"></script>
<script type="text/javascript" src="${static.url('js/course_groups/cohorts.js')}"></script> <script type="text/javascript" src="${static.url('js/course_groups/cohorts.js')}"></script>
%if instructor_tasks is not None:
<script type="text/javascript" src="${static.url('js/pending_tasks.js')}"></script>>
%endif
</%block> </%block>
<%include file="/courseware/course_navigation.html" args="active_page='instructor'" /> <%include file="/courseware/course_navigation.html" args="active_page='instructor'" />
...@@ -194,19 +196,77 @@ function goto( mode) ...@@ -194,19 +196,77 @@ function goto( mode)
<hr width="40%" style="align:left"> <hr width="40%" style="align:left">
%endif %endif
%if settings.MITX_FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
<H2>Course-specific grade adjustment</h2>
<p>
Specify a particular problem in the course here by its url:
<input type="text" name="problem_for_all_students" size="60">
</p>
<p>
You may use just the "urlname" if a problem, or "modulename/urlname" if not.
(For example, if the location is <tt>i4x://university/course/problem/problemname</tt>,
then just provide the <tt>problemname</tt>.
If the location is <tt>i4x://university/course/notaproblem/someothername</tt>, then
provide <tt>notaproblem/someothername</tt>.)
</p>
<p>
Then select an action:
<input type="submit" name="action" value="Reset ALL students' attempts">
<input type="submit" name="action" value="Rescore ALL students' problem submissions">
</p>
<p>
<p>These actions run in the background, and status for active tasks will appear in a table below.
To see status for all tasks submitted for this problem, click on this button:
</p>
<p>
<input type="submit" name="action" value="Show Background Task History">
</p>
<hr width="40%" style="align:left">
%endif
<H2>Student-specific grade inspection and adjustment</h2> <H2>Student-specific grade inspection and adjustment</h2>
<p>edX email address or their username: </p> <p>
<p><input type="text" name="unique_student_identifier"> <input type="submit" name="action" value="Get link to student's progress page"></p> Specify the edX email address or username of a student here:
<p>and, if you want to reset the number of attempts for a problem, the urlname of that problem <input type="text" name="unique_student_identifier">
(e.g. if the location is <tt>i4x://university/course/problem/problemname</tt>, then the urlname is <tt>problemname</tt>).</p> </p>
<p> <input type="text" name="problem_to_reset" size="60"> <input type="submit" name="action" value="Reset student's attempts"> </p> <p>
Click this, and a link to student's progress page will appear below:
<input type="submit" name="action" value="Get link to student's progress page">
</p>
<p>
Specify a particular problem in the course here by its url:
<input type="text" name="problem_for_student" size="60">
</p>
<p>
You may use just the "urlname" if a problem, or "modulename/urlname" if not.
(For example, if the location is <tt>i4x://university/course/problem/problemname</tt>,
then just provide the <tt>problemname</tt>.
If the location is <tt>i4x://university/course/notaproblem/someothername</tt>, then
provide <tt>notaproblem/someothername</tt>.)
</p>
<p>
Then select an action:
<input type="submit" name="action" value="Reset student's attempts">
%if settings.MITX_FEATURES.get('ENABLE_COURSE_BACKGROUND_TASKS'):
<input type="submit" name="action" value="Rescore student's problem submission">
%endif
</p>
%if instructor_access: %if instructor_access:
<p> You may also delete the entire state of a student for a problem: <p>
<input type="submit" name="action" value="Delete student state for problem"> </p> You may also delete the entire state of a student for the specified module:
<p>To delete the state of other XBlocks specify modulename/urlname, eg <input type="submit" name="action" value="Delete student state for module">
<tt>combinedopenended/Humanities_SA_Peer</tt></p> </p>
%endif
%if settings.MITX_FEATURES.get('ENABLE_COURSE_BACKGROUND_TASKS'):
<p>Rescoring runs in the background, and status for active tasks will appear in a table below.
To see status for all tasks submitted for this course and student, click on this button:
</p>
<p>
<input type="submit" name="action" value="Show Background Task History for Student">
</p>
%endif %endif
%endif %endif
...@@ -234,6 +294,7 @@ function goto( mode) ...@@ -234,6 +294,7 @@ function goto( mode)
##----------------------------------------------------------------------------- ##-----------------------------------------------------------------------------
%if modeflag.get('Admin'): %if modeflag.get('Admin'):
%if instructor_access: %if instructor_access:
<hr width="40%" style="align:left"> <hr width="40%" style="align:left">
<p> <p>
...@@ -373,6 +434,7 @@ function goto( mode) ...@@ -373,6 +434,7 @@ function goto( mode)
%if msg: %if msg:
<p></p><p>${msg}</p> <p></p><p>${msg}</p>
%endif %endif
##----------------------------------------------------------------------------- ##-----------------------------------------------------------------------------
%if modeflag.get('Analytics'): %if modeflag.get('Analytics'):
...@@ -559,6 +621,69 @@ function goto( mode) ...@@ -559,6 +621,69 @@ function goto( mode)
</p> </p>
%endif %endif
## Output tasks in progress
%if instructor_tasks is not None and len(instructor_tasks) > 0:
<hr width="100%">
<h2>Pending Instructor Tasks</h2>
<div id="task-progress-wrapper">
<table class="stat_table">
<tr>
<th>Task Type</th>
<th>Task inputs</th>
<th>Task Id</th>
<th>Requester</th>
<th>Submitted</th>
<th>Task State</th>
<th>Duration (sec)</th>
<th>Task Progress</th>
</tr>
%for tasknum, instructor_task in enumerate(instructor_tasks):
<tr id="task-progress-entry-${tasknum}" class="task-progress-entry"
data-task-id="${instructor_task.task_id}"
data-in-progress="true">
<td>${instructor_task.task_type}</td>
<td>${instructor_task.task_input}</td>
<td class="task-id">${instructor_task.task_id}</td>
<td>${instructor_task.requester}</td>
<td>${instructor_task.created}</td>
<td class="task-state">${instructor_task.task_state}</td>
<td class="task-duration">unknown</td>
<td class="task-progress">unknown</td>
</tr>
%endfor
</table>
</div>
<br/>
%endif
##-----------------------------------------------------------------------------
%if course_stats and modeflag.get('Psychometrics') is None:
<br/>
<br/>
<p>
<hr width="100%">
<h2>${course_stats['title'] | h}</h2>
<table class="stat_table">
<tr>
%for hname in course_stats['header']:
<th>${hname | h}</th>
%endfor
</tr>
%for row in course_stats['data']:
<tr>
%for value in row:
<td>${value | h}</td>
%endfor
</tr>
%endfor
</table>
</p>
%endif
##----------------------------------------------------------------------------- ##-----------------------------------------------------------------------------
%if modeflag.get('Psychometrics'): %if modeflag.get('Psychometrics'):
......
...@@ -394,6 +394,11 @@ if settings.MITX_FEATURES.get('ENABLE_SERVICE_STATUS'): ...@@ -394,6 +394,11 @@ if settings.MITX_FEATURES.get('ENABLE_SERVICE_STATUS'):
url(r'^status/', include('service_status.urls')), url(r'^status/', include('service_status.urls')),
) )
if settings.MITX_FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
urlpatterns += (
url(r'^instructor_task_status/$', 'instructor_task.views.instructor_task_status', name='instructor_task_status'),
)
# FoldIt views # FoldIt views
urlpatterns += ( urlpatterns += (
# The path is hardcoded into their app... # The path is hardcoded into their app...
......
...@@ -3,11 +3,11 @@ ...@@ -3,11 +3,11 @@
# Third-party: # Third-party:
-e git://github.com/edx/django-staticfiles.git@6d2504e5c8#egg=django-staticfiles -e git://github.com/edx/django-staticfiles.git@6d2504e5c8#egg=django-staticfiles
-e git://github.com/edx/django-pipeline.git#egg=django-pipeline -e git://github.com/edx/django-pipeline.git#egg=django-pipeline
-e git://github.com/edx/django-wiki.git@e2e84558#egg=django-wiki -e git://github.com/edx/django-wiki.git@ac906abe#egg=django-wiki
-e git://github.com/dementrock/pystache_custom.git@776973740bdaad83a3b029f96e415a7d1e8bec2f#egg=pystache_custom-dev -e git://github.com/dementrock/pystache_custom.git@776973740bdaad83a3b029f96e415a7d1e8bec2f#egg=pystache_custom-dev
-e git://github.com/eventbrite/zendesk.git@d53fe0e81b623f084e91776bcf6369f8b7b63879#egg=zendesk -e git://github.com/eventbrite/zendesk.git@d53fe0e81b623f084e91776bcf6369f8b7b63879#egg=zendesk
# Our libraries: # Our libraries:
-e git+https://github.com/edx/XBlock.git@4d8735e883#egg=XBlock -e git+https://github.com/edx/XBlock.git@4d8735e883#egg=XBlock
-e git+https://github.com/edx/codejail.git@0a1b468#egg=codejail -e git+https://github.com/edx/codejail.git@0a1b468#egg=codejail
-e git+https://github.com/edx/diff-cover.git@v0.1.1#egg=diff_cover -e git+https://github.com/edx/diff-cover.git@v0.1.2#egg=diff_cover
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment