Commit 531eb80e by Calen Pennington

Merge remote-tracking branch 'origin/master' into feature/alex/poll-merged

Conflicts:
	cms/djangoapps/contentstore/tests/test_contentstore.py
	cms/djangoapps/contentstore/views.py
	common/lib/xmodule/xmodule/combined_open_ended_module.py
	common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
	common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
	common/lib/xmodule/xmodule/open_ended_grading_classes/openendedchild.py
	common/lib/xmodule/xmodule/peer_grading_module.py
	common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
	common/lib/xmodule/xmodule/tests/test_self_assessment.py
	lms/djangoapps/open_ended_grading/tests.py
parents 5379a9fd 0b2226b0
...@@ -12,7 +12,7 @@ profile=no ...@@ -12,7 +12,7 @@ profile=no
# Add files or directories to the blacklist. They should be base names, not # Add files or directories to the blacklist. They should be base names, not
# paths. # paths.
ignore=CVS ignore=CVS, migrations
# Pickle collected data for later comparisons. # Pickle collected data for later comparisons.
persistent=yes persistent=yes
...@@ -33,7 +33,11 @@ load-plugins= ...@@ -33,7 +33,11 @@ load-plugins=
# can either give multiple identifier separated by comma (,) or put this option # can either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where # multiple time (only on the command line, not in the configuration file where
# it should appear only once). # it should appear only once).
disable=E1102,W0142 disable=
# W0141: Used builtin function 'map'
# W0142: Used * or ** magic
# R0903: Too few public methods (1/2)
W0141,W0142,R0903
[REPORTS] [REPORTS]
...@@ -43,7 +47,7 @@ disable=E1102,W0142 ...@@ -43,7 +47,7 @@ disable=E1102,W0142
output-format=text output-format=text
# Include message's id in output # Include message's id in output
include-ids=no include-ids=yes
# Put messages in a separate file for each module / package specified on the # Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be # command line instead of printing them on stdout. Reports (if any) will be
...@@ -97,7 +101,7 @@ bad-functions=map,filter,apply,input ...@@ -97,7 +101,7 @@ bad-functions=map,filter,apply,input
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression which should only match correct module level names # Regular expression which should only match correct module level names
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__)|log|urlpatterns)$
# Regular expression which should only match correct class names # Regular expression which should only match correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$ class-rgx=[A-Z_][a-zA-Z0-9]+$
...@@ -106,7 +110,7 @@ class-rgx=[A-Z_][a-zA-Z0-9]+$ ...@@ -106,7 +110,7 @@ class-rgx=[A-Z_][a-zA-Z0-9]+$
function-rgx=[a-z_][a-z0-9_]{2,30}$ function-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct method names # Regular expression which should only match correct method names
method-rgx=[a-z_][a-z0-9_]{2,30}$ method-rgx=([a-z_][a-z0-9_]{2,60}|setUp|set[Uu]pClass|tearDown|tear[Dd]ownClass|assert[A-Z]\w*)$
# Regular expression which should only match correct instance attribute names # Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-z0-9_]{2,30}$ attr-rgx=[a-z_][a-z0-9_]{2,30}$
......
...@@ -114,7 +114,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase): ...@@ -114,7 +114,7 @@ class ContentStoreToyCourseTest(ModuleStoreTestCase):
self.assertTrue(sequential.location.url() in chapter.children) self.assertTrue(sequential.location.url() in chapter.children)
self.client.post(reverse('delete_item'), self.client.post(reverse('delete_item'),
json.dumps({'id': sequential.location.url(), 'delete_children': 'true'}), json.dumps({'id': sequential.location.url(), 'delete_children': 'true', 'delete_all_versions': 'true'}),
"application/json") "application/json")
found = False found = False
......
...@@ -643,17 +643,17 @@ def delete_item(request): ...@@ -643,17 +643,17 @@ def delete_item(request):
modulestore('direct').delete_item(item.location) modulestore('direct').delete_item(item.location)
# cdodge: we need to remove our parent's pointer to us so that it is no longer dangling # cdodge: we need to remove our parent's pointer to us so that it is no longer dangling
if delete_all_versions:
parent_locs = modulestore('direct').get_parent_locations(item_loc, None) parent_locs = modulestore('direct').get_parent_locations(item_loc, None)
for parent_loc in parent_locs: for parent_loc in parent_locs:
parent = modulestore('direct').get_item(parent_loc) parent = modulestore('direct').get_item(parent_loc)
item_url = item_loc.url() item_url = item_loc.url()
if item_url in parent.children: if item_url in parent.children:
children = parent.children children = parent.children
children.remove(item_url) children.remove(item_url)
parent.children = children parent.children = children
modulestore('direct').update_children(parent.location, parent.children) modulestore('direct').update_children(parent.location, parent.children)
return HttpResponse() return HttpResponse()
......
...@@ -65,23 +65,23 @@ def is_commentable_cohorted(course_id, commentable_id): ...@@ -65,23 +65,23 @@ def is_commentable_cohorted(course_id, commentable_id):
ans)) ans))
return ans return ans
def get_cohorted_commentables(course_id): def get_cohorted_commentables(course_id):
""" """
Given a course_id return a list of strings representing cohorted commentables Given a course_id return a list of strings representing cohorted commentables
""" """
course = courses.get_course_by_id(course_id) course = courses.get_course_by_id(course_id)
if not course.is_cohorted: if not course.is_cohorted:
# this is the easy case :) # this is the easy case :)
ans = [] ans = []
else: else:
ans = course.cohorted_discussions ans = course.cohorted_discussions
return ans return ans
def get_cohort(user, course_id): def get_cohort(user, course_id):
""" """
Given a django User and a course_id, return the user's cohort in that Given a django User and a course_id, return the user's cohort in that
...@@ -120,7 +120,8 @@ def get_cohort(user, course_id): ...@@ -120,7 +120,8 @@ def get_cohort(user, course_id):
return None return None
choices = course.auto_cohort_groups choices = course.auto_cohort_groups
if len(choices) == 0: n = len(choices)
if n == 0:
# Nowhere to put user # Nowhere to put user
log.warning("Course %s is auto-cohorted, but there are no" log.warning("Course %s is auto-cohorted, but there are no"
" auto_cohort_groups specified", " auto_cohort_groups specified",
...@@ -128,12 +129,19 @@ def get_cohort(user, course_id): ...@@ -128,12 +129,19 @@ def get_cohort(user, course_id):
return None return None
# Put user in a random group, creating it if needed # Put user in a random group, creating it if needed
group_name = random.choice(choices) choice = random.randrange(0, n)
group_name = choices[choice]
# Victor: we are seeing very strange behavior on prod, where almost all users
# end up in the same group. Log at INFO to try to figure out what's going on.
log.info("DEBUG: adding user {0} to cohort {1}. choice={2}".format(
user, group_name,choice))
group, created = CourseUserGroup.objects.get_or_create( group, created = CourseUserGroup.objects.get_or_create(
course_id=course_id, course_id=course_id,
group_type=CourseUserGroup.COHORT, group_type=CourseUserGroup.COHORT,
name=group_name) name=group_name)
user.course_groups.add(group) user.course_groups.add(group)
return group return group
......
...@@ -6,7 +6,7 @@ from django.test.utils import override_settings ...@@ -6,7 +6,7 @@ from django.test.utils import override_settings
from course_groups.models import CourseUserGroup from course_groups.models import CourseUserGroup
from course_groups.cohorts import (get_cohort, get_course_cohorts, from course_groups.cohorts import (get_cohort, get_course_cohorts,
is_commentable_cohorted) is_commentable_cohorted, get_cohort_by_name)
from xmodule.modulestore.django import modulestore, _MODULESTORES from xmodule.modulestore.django import modulestore, _MODULESTORES
...@@ -180,6 +180,37 @@ class TestCohorts(django.test.TestCase): ...@@ -180,6 +180,37 @@ class TestCohorts(django.test.TestCase):
"user2 should still be in originally placed cohort") "user2 should still be in originally placed cohort")
def test_auto_cohorting_randomization(self):
"""
Make sure get_cohort() randomizes properly.
"""
course = modulestore().get_course("edX/toy/2012_Fall")
self.assertEqual(course.id, "edX/toy/2012_Fall")
self.assertFalse(course.is_cohorted)
groups = ["group_{0}".format(n) for n in range(5)]
self.config_course_cohorts(course, [], cohorted=True,
auto_cohort=True,
auto_cohort_groups=groups)
# Assign 100 users to cohorts
for i in range(100):
user = User.objects.create(username="test_{0}".format(i),
email="a@b{0}.com".format(i))
get_cohort(user, course.id)
# Now make sure that the assignment was at least vaguely random:
# each cohort should have at least 1, and fewer than 50 students.
# (with 5 groups, probability of 0 users in any group is about
# .8**100= 2.0e-10)
for cohort_name in groups:
cohort = get_cohort_by_name(course.id, cohort_name)
num_users = cohort.users.count()
self.assertGreater(num_users, 1)
self.assertLess(num_users, 50)
def test_get_course_cohorts(self): def test_get_course_cohorts(self):
course1_id = 'a/b/c' course1_id = 'a/b/c'
course2_id = 'e/f/g' course2_id = 'e/f/g'
......
...@@ -89,7 +89,6 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): ...@@ -89,7 +89,6 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
def __init__(self, system, location, descriptor, model_data): def __init__(self, system, location, descriptor, model_data):
XModule.__init__(self, system, location, descriptor, model_data) XModule.__init__(self, system, location, descriptor, model_data)
""" """
Definition file should have one or many task blocks, a rubric block, and a prompt block: Definition file should have one or many task blocks, a rubric block, and a prompt block:
...@@ -152,13 +151,13 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): ...@@ -152,13 +151,13 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
attributes = self.student_attributes + self.settings_attributes attributes = self.student_attributes + self.settings_attributes
static_data = { static_data = {
'rewrite_content_links' : self.rewrite_content_links, 'rewrite_content_links': self.rewrite_content_links,
} }
instance_state = { k: getattr(self,k) for k in attributes} instance_state = {k: getattr(self, k) for k in attributes}
self.child_descriptor = descriptors[version_index](self.system) self.child_descriptor = descriptors[version_index](self.system)
self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(self.data), self.system) self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(self.data), self.system)
self.child_module = modules[version_index](self.system, location, self.child_definition, self.child_descriptor, self.child_module = modules[version_index](self.system, location, self.child_definition, self.child_descriptor,
instance_state = instance_state, static_data= static_data, attributes=attributes) instance_state=instance_state, static_data=static_data, attributes=attributes)
self.save_instance_data() self.save_instance_data()
def get_html(self): def get_html(self):
...@@ -190,9 +189,9 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): ...@@ -190,9 +189,9 @@ class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
def save_instance_data(self): def save_instance_data(self):
for attribute in self.student_attributes: for attribute in self.student_attributes:
child_attr = getattr(self.child_module,attribute) child_attr = getattr(self.child_module, attribute)
if child_attr != getattr(self, attribute): if child_attr != getattr(self, attribute):
setattr(self,attribute, getattr(self.child_module,attribute)) setattr(self, attribute, getattr(self.child_module, attribute))
class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor): class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor):
......
...@@ -89,7 +89,7 @@ class FolditModule(FolditFields, XModule): ...@@ -89,7 +89,7 @@ class FolditModule(FolditFields, XModule):
from foldit.models import Score from foldit.models import Score
leaders = [(e['username'], e['score']) for e in Score.get_tops_n(10)] leaders = [(e['username'], e['score']) for e in Score.get_tops_n(10)]
leaders.sort(key=lambda x: x[1]) leaders.sort(key=lambda x: -x[1])
return leaders return leaders
......
...@@ -38,14 +38,15 @@ ACCEPT_FILE_UPLOAD = False ...@@ -38,14 +38,15 @@ ACCEPT_FILE_UPLOAD = False
TRUE_DICT = ["True", True, "TRUE", "true"] TRUE_DICT = ["True", True, "TRUE", "true"]
HUMAN_TASK_TYPE = { HUMAN_TASK_TYPE = {
'selfassessment' : "Self Assessment", 'selfassessment': "Self Assessment",
'openended' : "edX Assessment", 'openended': "edX Assessment",
} }
#Default value that controls whether or not to skip basic spelling checks in the controller #Default value that controls whether or not to skip basic spelling checks in the controller
#Metadata overrides this #Metadata overrides this
SKIP_BASIC_CHECKS = False SKIP_BASIC_CHECKS = False
class CombinedOpenEndedV1Module(): class CombinedOpenEndedV1Module():
""" """
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
...@@ -81,7 +82,7 @@ class CombinedOpenEndedV1Module(): ...@@ -81,7 +82,7 @@ class CombinedOpenEndedV1Module():
TEMPLATE_DIR = "combinedopenended" TEMPLATE_DIR = "combinedopenended"
def __init__(self, system, location, definition, descriptor, def __init__(self, system, location, definition, descriptor,
instance_state=None, shared_state=None, metadata = None, static_data = None, **kwargs): instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs):
""" """
Definition file should have one or many task blocks, a rubric block, and a prompt block: Definition file should have one or many task blocks, a rubric block, and a prompt block:
...@@ -120,7 +121,7 @@ class CombinedOpenEndedV1Module(): ...@@ -120,7 +121,7 @@ class CombinedOpenEndedV1Module():
self.instance_state = instance_state self.instance_state = instance_state
self.display_name = instance_state.get('display_name', "Open Ended") self.display_name = instance_state.get('display_name', "Open Ended")
self.rewrite_content_links = static_data.get('rewrite_content_links',"") self.rewrite_content_links = static_data.get('rewrite_content_links', "")
#We need to set the location here so the child modules can use it #We need to set the location here so the child modules can use it
system.set('location', location) system.set('location', location)
...@@ -168,10 +169,10 @@ class CombinedOpenEndedV1Module(): ...@@ -168,10 +169,10 @@ class CombinedOpenEndedV1Module():
'rubric': definition['rubric'], 'rubric': definition['rubric'],
'display_name': self.display_name, 'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload, 'accept_file_upload': self.accept_file_upload,
'close_date' : self.timeinfo.close_date, 'close_date': self.timeinfo.close_date,
's3_interface' : self.system.s3_interface, 's3_interface': self.system.s3_interface,
'skip_basic_checks' : self.skip_basic_checks, 'skip_basic_checks': self.skip_basic_checks,
} }
self.task_xml = definition['task_xml'] self.task_xml = definition['task_xml']
self.location = location self.location = location
...@@ -214,15 +215,15 @@ class CombinedOpenEndedV1Module(): ...@@ -214,15 +215,15 @@ class CombinedOpenEndedV1Module():
child_modules = { child_modules = {
'openended': open_ended_module.OpenEndedModule, 'openended': open_ended_module.OpenEndedModule,
'selfassessment': self_assessment_module.SelfAssessmentModule, 'selfassessment': self_assessment_module.SelfAssessmentModule,
} }
child_descriptors = { child_descriptors = {
'openended': open_ended_module.OpenEndedDescriptor, 'openended': open_ended_module.OpenEndedDescriptor,
'selfassessment': self_assessment_module.SelfAssessmentDescriptor, 'selfassessment': self_assessment_module.SelfAssessmentDescriptor,
} }
children = { children = {
'modules': child_modules, 'modules': child_modules,
'descriptors': child_descriptors, 'descriptors': child_descriptors,
} }
return children return children
def setup_next_task(self, reset=False): def setup_next_task(self, reset=False):
...@@ -258,7 +259,8 @@ class CombinedOpenEndedV1Module(): ...@@ -258,7 +259,8 @@ class CombinedOpenEndedV1Module():
self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system) self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system)
if current_task_state is None and self.current_task_number == 0: if current_task_state is None and self.current_task_number == 0:
self.current_task = child_task_module(self.system, self.location, self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data) self.current_task_parsed_xml, self.current_task_descriptor,
self.static_data)
self.task_states.append(self.current_task.get_instance_state()) self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING self.state = self.ASSESSING
elif current_task_state is None and self.current_task_number > 0: elif current_task_state is None and self.current_task_number > 0:
...@@ -271,18 +273,20 @@ class CombinedOpenEndedV1Module(): ...@@ -271,18 +273,20 @@ class CombinedOpenEndedV1Module():
'child_attempts': 0, 'child_attempts': 0,
'child_created': True, 'child_created': True,
'child_history': [{'answer': last_response}], 'child_history': [{'answer': last_response}],
}) })
self.current_task = child_task_module(self.system, self.location, self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, self.current_task_parsed_xml, self.current_task_descriptor,
instance_state=current_task_state) self.static_data,
instance_state=current_task_state)
self.task_states.append(self.current_task.get_instance_state()) self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING self.state = self.ASSESSING
else: else:
if self.current_task_number > 0 and not reset: if self.current_task_number > 0 and not reset:
current_task_state = self.overwrite_state(current_task_state) current_task_state = self.overwrite_state(current_task_state)
self.current_task = child_task_module(self.system, self.location, self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, self.current_task_parsed_xml, self.current_task_descriptor,
instance_state=current_task_state) self.static_data,
instance_state=current_task_state)
return True return True
...@@ -298,8 +302,8 @@ class CombinedOpenEndedV1Module(): ...@@ -298,8 +302,8 @@ class CombinedOpenEndedV1Module():
last_response_data = self.get_last_response(self.current_task_number - 1) last_response_data = self.get_last_response(self.current_task_number - 1)
current_response_data = self.get_current_attributes(self.current_task_number) current_response_data = self.get_current_attributes(self.current_task_number)
if(current_response_data['min_score_to_attempt'] > last_response_data['score'] if (current_response_data['min_score_to_attempt'] > last_response_data['score']
or current_response_data['max_score_to_attempt'] < last_response_data['score']): or current_response_data['max_score_to_attempt'] < last_response_data['score']):
self.state = self.DONE self.state = self.DONE
self.ready_to_reset = True self.ready_to_reset = True
...@@ -325,8 +329,8 @@ class CombinedOpenEndedV1Module(): ...@@ -325,8 +329,8 @@ class CombinedOpenEndedV1Module():
'display_name': self.display_name, 'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload, 'accept_file_upload': self.accept_file_upload,
'location': self.location, 'location': self.location,
'legend_list' : LEGEND_LIST, 'legend_list': LEGEND_LIST,
} }
return context return context
...@@ -395,7 +399,7 @@ class CombinedOpenEndedV1Module(): ...@@ -395,7 +399,7 @@ class CombinedOpenEndedV1Module():
task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system) task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system)
task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor, task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor,
self.static_data, instance_state=task_state) self.static_data, instance_state=task_state)
last_response = task.latest_answer() last_response = task.latest_answer()
last_score = task.latest_score() last_score = task.latest_score()
last_post_assessment = task.latest_post_assessment(self.system) last_post_assessment = task.latest_post_assessment(self.system)
...@@ -417,10 +421,10 @@ class CombinedOpenEndedV1Module(): ...@@ -417,10 +421,10 @@ class CombinedOpenEndedV1Module():
rubric_scores = rubric_data['rubric_scores'] rubric_scores = rubric_data['rubric_scores']
grader_types = rubric_data['grader_types'] grader_types = rubric_data['grader_types']
feedback_items = rubric_data['feedback_items'] feedback_items = rubric_data['feedback_items']
feedback_dicts = rubric_data['feedback_dicts'] feedback_dicts = rubric_data['feedback_dicts']
grader_ids = rubric_data['grader_ids'] grader_ids = rubric_data['grader_ids']
submission_ids = rubric_data['submission_ids'] submission_ids = rubric_data['submission_ids']
elif task_type== "selfassessment": elif task_type == "selfassessment":
rubric_scores = last_post_assessment rubric_scores = last_post_assessment
grader_types = ['SA'] grader_types = ['SA']
feedback_items = [''] feedback_items = ['']
...@@ -437,7 +441,7 @@ class CombinedOpenEndedV1Module(): ...@@ -437,7 +441,7 @@ class CombinedOpenEndedV1Module():
human_state = task.HUMAN_NAMES[state] human_state = task.HUMAN_NAMES[state]
else: else:
human_state = state human_state = state
if len(grader_types)>0: if len(grader_types) > 0:
grader_type = grader_types[0] grader_type = grader_types[0]
else: else:
grader_type = "IN" grader_type = "IN"
...@@ -459,15 +463,15 @@ class CombinedOpenEndedV1Module(): ...@@ -459,15 +463,15 @@ class CombinedOpenEndedV1Module():
'correct': last_correctness, 'correct': last_correctness,
'min_score_to_attempt': min_score_to_attempt, 'min_score_to_attempt': min_score_to_attempt,
'max_score_to_attempt': max_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt,
'rubric_scores' : rubric_scores, 'rubric_scores': rubric_scores,
'grader_types' : grader_types, 'grader_types': grader_types,
'feedback_items' : feedback_items, 'feedback_items': feedback_items,
'grader_type' : grader_type, 'grader_type': grader_type,
'human_grader_type' : human_grader_name, 'human_grader_type': human_grader_name,
'feedback_dicts' : feedback_dicts, 'feedback_dicts': feedback_dicts,
'grader_ids' : grader_ids, 'grader_ids': grader_ids,
'submission_ids' : submission_ids, 'submission_ids': submission_ids,
} }
return last_response_dict return last_response_dict
def update_task_states(self): def update_task_states(self):
...@@ -510,20 +514,27 @@ class CombinedOpenEndedV1Module(): ...@@ -510,20 +514,27 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html. Output: Dictionary to be rendered via ajax that contains the result html.
""" """
all_responses = [] all_responses = []
loop_up_to_task = self.current_task_number+1 loop_up_to_task = self.current_task_number + 1
for i in xrange(0,loop_up_to_task): for i in xrange(0, loop_up_to_task):
all_responses.append(self.get_last_response(i)) all_responses.append(self.get_last_response(i))
rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['rubric_scores'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()] rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0, len(all_responses)) if
grader_types = [all_responses[i]['grader_types'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['grader_types'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()] len(all_responses[i]['rubric_scores']) > 0 and all_responses[i]['grader_types'][
feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['feedback_items'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()] 0] in HUMAN_GRADER_TYPE.keys()]
rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), rubric_scores, grader_types = [all_responses[i]['grader_types'] for i in xrange(0, len(all_responses)) if
grader_types, feedback_items) len(all_responses[i]['grader_types']) > 0 and all_responses[i]['grader_types'][
0] in HUMAN_GRADER_TYPE.keys()]
feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0, len(all_responses)) if
len(all_responses[i]['feedback_items']) > 0 and all_responses[i]['grader_types'][
0] in HUMAN_GRADER_TYPE.keys()]
rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']),
rubric_scores,
grader_types, feedback_items)
response_dict = all_responses[-1] response_dict = all_responses[-1]
context = { context = {
'results': rubric_html, 'results': rubric_html,
'task_name' : 'Scored Rubric', 'task_name': 'Scored Rubric',
'class_name' : 'combined-rubric-container' 'class_name': 'combined-rubric-container'
} }
html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True} return {'html': html, 'success': True}
...@@ -535,8 +546,8 @@ class CombinedOpenEndedV1Module(): ...@@ -535,8 +546,8 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html. Output: Dictionary to be rendered via ajax that contains the result html.
""" """
context = { context = {
'legend_list' : LEGEND_LIST, 'legend_list': LEGEND_LIST,
} }
html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context) html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True} return {'html': html, 'success': True}
...@@ -547,15 +558,16 @@ class CombinedOpenEndedV1Module(): ...@@ -547,15 +558,16 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html. Output: Dictionary to be rendered via ajax that contains the result html.
""" """
self.update_task_states() self.update_task_states()
loop_up_to_task = self.current_task_number+1 loop_up_to_task = self.current_task_number + 1
all_responses =[] all_responses = []
for i in xrange(0,loop_up_to_task): for i in xrange(0, loop_up_to_task):
all_responses.append(self.get_last_response(i)) all_responses.append(self.get_last_response(i))
context_list = [] context_list = []
for ri in all_responses: for ri in all_responses:
for i in xrange(0,len(ri['rubric_scores'])): for i in xrange(0, len(ri['rubric_scores'])):
feedback = ri['feedback_dicts'][i].get('feedback','') feedback = ri['feedback_dicts'][i].get('feedback', '')
rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']), ri['rubric_scores'][i]) rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']),
ri['rubric_scores'][i])
if rubric_data['success']: if rubric_data['success']:
rubric_html = rubric_data['html'] rubric_html = rubric_data['html']
else: else:
...@@ -563,23 +575,23 @@ class CombinedOpenEndedV1Module(): ...@@ -563,23 +575,23 @@ class CombinedOpenEndedV1Module():
context = { context = {
'rubric_html': rubric_html, 'rubric_html': rubric_html,
'grader_type': ri['grader_type'], 'grader_type': ri['grader_type'],
'feedback' : feedback, 'feedback': feedback,
'grader_id' : ri['grader_ids'][i], 'grader_id': ri['grader_ids'][i],
'submission_id' : ri['submission_ids'][i], 'submission_id': ri['submission_ids'][i],
} }
context_list.append(context) context_list.append(context)
feedback_table = self.system.render_template('{0}/open_ended_result_table.html'.format(self.TEMPLATE_DIR), { feedback_table = self.system.render_template('{0}/open_ended_result_table.html'.format(self.TEMPLATE_DIR), {
'context_list' : context_list, 'context_list': context_list,
'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT, 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
'human_grader_types' : HUMAN_GRADER_TYPE, 'human_grader_types': HUMAN_GRADER_TYPE,
'rows': 50, 'rows': 50,
'cols': 50, 'cols': 50,
}) })
context = { context = {
'results': feedback_table, 'results': feedback_table,
'task_name' : "Feedback", 'task_name': "Feedback",
'class_name' : "result-container", 'class_name': "result-container",
} }
html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True} return {'html': html, 'success': True}
...@@ -608,8 +620,8 @@ class CombinedOpenEndedV1Module(): ...@@ -608,8 +620,8 @@ class CombinedOpenEndedV1Module():
'reset': self.reset, 'reset': self.reset,
'get_results': self.get_results, 'get_results': self.get_results,
'get_combined_rubric': self.get_rubric, 'get_combined_rubric': self.get_rubric,
'get_status' : self.get_status_ajax, 'get_status': self.get_status_ajax,
'get_legend' : self.get_legend, 'get_legend': self.get_legend,
} }
if dispatch not in handlers: if dispatch not in handlers:
...@@ -672,7 +684,7 @@ class CombinedOpenEndedV1Module(): ...@@ -672,7 +684,7 @@ class CombinedOpenEndedV1Module():
'task_states': self.task_states, 'task_states': self.task_states,
'student_attempts': self.student_attempts, 'student_attempts': self.student_attempts,
'ready_to_reset': self.ready_to_reset, 'ready_to_reset': self.ready_to_reset,
} }
return json.dumps(state) return json.dumps(state)
...@@ -690,11 +702,12 @@ class CombinedOpenEndedV1Module(): ...@@ -690,11 +702,12 @@ class CombinedOpenEndedV1Module():
context = { context = {
'status_list': status, 'status_list': status,
'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT, 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
'legend_list' : LEGEND_LIST, 'legend_list': LEGEND_LIST,
'render_via_ajax' : render_via_ajax, 'render_via_ajax': render_via_ajax,
} }
status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR), context) status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR),
context)
return status_html return status_html
...@@ -727,7 +740,7 @@ class CombinedOpenEndedV1Module(): ...@@ -727,7 +740,7 @@ class CombinedOpenEndedV1Module():
score_dict = { score_dict = {
'score': score, 'score': score,
'total': max_score, 'total': max_score,
} }
return score_dict return score_dict
...@@ -787,7 +800,9 @@ class CombinedOpenEndedV1Descriptor(): ...@@ -787,7 +800,9 @@ class CombinedOpenEndedV1Descriptor():
for child in expected_children: for child in expected_children:
if len(xml_object.xpath(child)) == 0: if len(xml_object.xpath(child)) == 0:
#This is a staff_facing_error #This is a staff_facing_error
raise ValueError("Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(child)) raise ValueError(
"Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse_task(k): def parse_task(k):
"""Assumes that xml_object has child k""" """Assumes that xml_object has child k"""
......
...@@ -4,24 +4,26 @@ from lxml import etree ...@@ -4,24 +4,26 @@ from lxml import etree
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
GRADER_TYPE_IMAGE_DICT = { GRADER_TYPE_IMAGE_DICT = {
'SA' : '/static/images/self_assessment_icon.png', 'SA': '/static/images/self_assessment_icon.png',
'PE' : '/static/images/peer_grading_icon.png', 'PE': '/static/images/peer_grading_icon.png',
'ML' : '/static/images/ml_grading_icon.png', 'ML': '/static/images/ml_grading_icon.png',
'IN' : '/static/images/peer_grading_icon.png', 'IN': '/static/images/peer_grading_icon.png',
'BC' : '/static/images/ml_grading_icon.png', 'BC': '/static/images/ml_grading_icon.png',
} }
HUMAN_GRADER_TYPE = { HUMAN_GRADER_TYPE = {
'SA' : 'Self-Assessment', 'SA': 'Self-Assessment',
'PE' : 'Peer-Assessment', 'PE': 'Peer-Assessment',
'IN' : 'Instructor-Assessment', 'IN': 'Instructor-Assessment',
'ML' : 'AI-Assessment', 'ML': 'AI-Assessment',
'BC' : 'AI-Assessment', 'BC': 'AI-Assessment',
} }
DO_NOT_DISPLAY = ['BC', 'IN'] DO_NOT_DISPLAY = ['BC', 'IN']
LEGEND_LIST = [{'name' : HUMAN_GRADER_TYPE[k], 'image' : GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys() if k not in DO_NOT_DISPLAY ] LEGEND_LIST = [{'name': HUMAN_GRADER_TYPE[k], 'image': GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys()
if k not in DO_NOT_DISPLAY]
class RubricParsingError(Exception): class RubricParsingError(Exception):
def __init__(self, msg): def __init__(self, msg):
...@@ -29,15 +31,14 @@ class RubricParsingError(Exception): ...@@ -29,15 +31,14 @@ class RubricParsingError(Exception):
class CombinedOpenEndedRubric(object): class CombinedOpenEndedRubric(object):
TEMPLATE_DIR = "combinedopenended/openended" TEMPLATE_DIR = "combinedopenended/openended"
def __init__ (self, system, view_only = False): def __init__(self, system, view_only=False):
self.has_score = False self.has_score = False
self.view_only = view_only self.view_only = view_only
self.system = system self.system = system
def render_rubric(self, rubric_xml, score_list = None): def render_rubric(self, rubric_xml, score_list=None):
''' '''
render_rubric: takes in an xml string and outputs the corresponding render_rubric: takes in an xml string and outputs the corresponding
html for that xml, given the type of rubric we're generating html for that xml, given the type of rubric we're generating
...@@ -50,11 +51,11 @@ class CombinedOpenEndedRubric(object): ...@@ -50,11 +51,11 @@ class CombinedOpenEndedRubric(object):
success = False success = False
try: try:
rubric_categories = self.extract_categories(rubric_xml) rubric_categories = self.extract_categories(rubric_xml)
if score_list and len(score_list)==len(rubric_categories): if score_list and len(score_list) == len(rubric_categories):
for i in xrange(0,len(rubric_categories)): for i in xrange(0, len(rubric_categories)):
category = rubric_categories[i] category = rubric_categories[i]
for j in xrange(0,len(category['options'])): for j in xrange(0, len(category['options'])):
if score_list[i]==j: if score_list[i] == j:
rubric_categories[i]['options'][j]['selected'] = True rubric_categories[i]['options'][j]['selected'] = True
rubric_scores = [cat['score'] for cat in rubric_categories] rubric_scores = [cat['score'] for cat in rubric_categories]
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
...@@ -63,19 +64,20 @@ class CombinedOpenEndedRubric(object): ...@@ -63,19 +64,20 @@ class CombinedOpenEndedRubric(object):
if self.view_only: if self.view_only:
rubric_template = '{0}/open_ended_view_only_rubric.html'.format(self.TEMPLATE_DIR) rubric_template = '{0}/open_ended_view_only_rubric.html'.format(self.TEMPLATE_DIR)
html = self.system.render_template(rubric_template, html = self.system.render_template(rubric_template,
{'categories': rubric_categories, {'categories': rubric_categories,
'has_score': self.has_score, 'has_score': self.has_score,
'view_only': self.view_only, 'view_only': self.view_only,
'max_score': max_score, 'max_score': max_score,
'combined_rubric' : False 'combined_rubric': False
}) })
success = True success = True
except: except:
#This is a staff_facing_error #This is a staff_facing_error
error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format(rubric_xml) error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format(
rubric_xml)
log.exception(error_message) log.exception(error_message)
raise RubricParsingError(error_message) raise RubricParsingError(error_message)
return {'success' : success, 'html' : html, 'rubric_scores' : rubric_scores} return {'success': success, 'html': html, 'rubric_scores': rubric_scores}
def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed, max_score): def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed, max_score):
rubric_dict = self.render_rubric(rubric_string) rubric_dict = self.render_rubric(rubric_string)
...@@ -83,7 +85,8 @@ class CombinedOpenEndedRubric(object): ...@@ -83,7 +85,8 @@ class CombinedOpenEndedRubric(object):
rubric_feedback = rubric_dict['html'] rubric_feedback = rubric_dict['html']
if not success: if not success:
#This is a staff_facing_error #This is a staff_facing_error
error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format(rubric_string, location.url()) error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format(
rubric_string, location.url())
log.error(error_message) log.error(error_message)
raise RubricParsingError(error_message) raise RubricParsingError(error_message)
...@@ -101,7 +104,7 @@ class CombinedOpenEndedRubric(object): ...@@ -101,7 +104,7 @@ class CombinedOpenEndedRubric(object):
if int(total) != int(max_score): if int(total) != int(max_score):
#This is a staff_facing_error #This is a staff_facing_error
error_msg = "The max score {0} for problem {1} does not match the total number of points in the rubric {2}. Contact the learning sciences group for assistance.".format( error_msg = "The max score {0} for problem {1} does not match the total number of points in the rubric {2}. Contact the learning sciences group for assistance.".format(
max_score, location, total) max_score, location, total)
log.error(error_msg) log.error(error_msg)
raise RubricParsingError(error_msg) raise RubricParsingError(error_msg)
...@@ -123,12 +126,13 @@ class CombinedOpenEndedRubric(object): ...@@ -123,12 +126,13 @@ class CombinedOpenEndedRubric(object):
for category in element: for category in element:
if category.tag != 'category': if category.tag != 'category':
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_categories] Expected a <category> tag: got {0} instead. Contact the learning sciences group for assistance.".format(category.tag)) raise RubricParsingError(
"[extract_categories] Expected a <category> tag: got {0} instead. Contact the learning sciences group for assistance.".format(
category.tag))
else: else:
categories.append(self.extract_category(category)) categories.append(self.extract_category(category))
return categories return categories
def extract_category(self, category): def extract_category(self, category):
''' '''
construct an individual category construct an individual category
...@@ -150,13 +154,17 @@ class CombinedOpenEndedRubric(object): ...@@ -150,13 +154,17 @@ class CombinedOpenEndedRubric(object):
# if we are missing the score tag and we are expecting one # if we are missing the score tag and we are expecting one
elif self.has_score: elif self.has_score:
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_category] Category {0} is missing a score. Contact the learning sciences group for assistance.".format(descriptionxml.text)) raise RubricParsingError(
"[extract_category] Category {0} is missing a score. Contact the learning sciences group for assistance.".format(
descriptionxml.text))
# parse description # parse description
if descriptionxml.tag != 'description': if descriptionxml.tag != 'description':
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_category]: expected description tag, got {0} instead. Contact the learning sciences group for assistance.".format(descriptionxml.tag)) raise RubricParsingError(
"[extract_category]: expected description tag, got {0} instead. Contact the learning sciences group for assistance.".format(
descriptionxml.tag))
description = descriptionxml.text description = descriptionxml.text
...@@ -167,7 +175,9 @@ class CombinedOpenEndedRubric(object): ...@@ -167,7 +175,9 @@ class CombinedOpenEndedRubric(object):
for option in optionsxml: for option in optionsxml:
if option.tag != 'option': if option.tag != 'option':
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_category]: expected option tag, got {0} instead. Contact the learning sciences group for assistance.".format(option.tag)) raise RubricParsingError(
"[extract_category]: expected option tag, got {0} instead. Contact the learning sciences group for assistance.".format(
option.tag))
else: else:
pointstr = option.get("points") pointstr = option.get("points")
if pointstr: if pointstr:
...@@ -177,13 +187,16 @@ class CombinedOpenEndedRubric(object): ...@@ -177,13 +187,16 @@ class CombinedOpenEndedRubric(object):
points = int(pointstr) points = int(pointstr)
except ValueError: except ValueError:
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_category]: expected points to have int, got {0} instead. Contact the learning sciences group for assistance.".format(pointstr)) raise RubricParsingError(
"[extract_category]: expected points to have int, got {0} instead. Contact the learning sciences group for assistance.".format(
pointstr))
elif autonumbering: elif autonumbering:
# use the generated one if we're in the right mode # use the generated one if we're in the right mode
points = cur_points points = cur_points
cur_points = cur_points + 1 cur_points = cur_points + 1
else: else:
raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.") raise Exception(
"[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.")
selected = score == points selected = score == points
optiontext = option.text optiontext = option.text
...@@ -193,34 +206,34 @@ class CombinedOpenEndedRubric(object): ...@@ -193,34 +206,34 @@ class CombinedOpenEndedRubric(object):
options = sorted(options, key=lambda option: option['points']) options = sorted(options, key=lambda option: option['points'])
CombinedOpenEndedRubric.validate_options(options) CombinedOpenEndedRubric.validate_options(options)
return {'description': description, 'options': options, 'score' : score} return {'description': description, 'options': options, 'score': score}
def render_combined_rubric(self,rubric_xml,scores,score_types,feedback_types): def render_combined_rubric(self, rubric_xml, scores, score_types, feedback_types):
success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores,score_types,feedback_types) success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores, score_types,
feedback_types)
rubric_categories = self.extract_categories(rubric_xml) rubric_categories = self.extract_categories(rubric_xml)
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
max_score = max(max_scores) max_score = max(max_scores)
for i in xrange(0,len(rubric_categories)): for i in xrange(0, len(rubric_categories)):
category = rubric_categories[i] category = rubric_categories[i]
for j in xrange(0,len(category['options'])): for j in xrange(0, len(category['options'])):
rubric_categories[i]['options'][j]['grader_types'] = [] rubric_categories[i]['options'][j]['grader_types'] = []
for tuple in score_tuples: for tuple in score_tuples:
if tuple[1] == i and tuple[2] ==j: if tuple[1] == i and tuple[2] == j:
for grader_type in tuple[3]: for grader_type in tuple[3]:
rubric_categories[i]['options'][j]['grader_types'].append(grader_type) rubric_categories[i]['options'][j]['grader_types'].append(grader_type)
html = self.system.render_template('{0}/open_ended_combined_rubric.html'.format(self.TEMPLATE_DIR), html = self.system.render_template('{0}/open_ended_combined_rubric.html'.format(self.TEMPLATE_DIR),
{'categories': rubric_categories, {'categories': rubric_categories,
'has_score': True, 'has_score': True,
'view_only': True, 'view_only': True,
'max_score': max_score, 'max_score': max_score,
'combined_rubric' : True, 'combined_rubric': True,
'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT, 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
'human_grader_types' : HUMAN_GRADER_TYPE, 'human_grader_types': HUMAN_GRADER_TYPE,
}) })
return html return html
@staticmethod @staticmethod
def validate_options(options): def validate_options(options):
''' '''
...@@ -228,14 +241,16 @@ class CombinedOpenEndedRubric(object): ...@@ -228,14 +241,16 @@ class CombinedOpenEndedRubric(object):
''' '''
if len(options) == 0: if len(options) == 0:
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_category]: no options associated with this category. Contact the learning sciences group for assistance.") raise RubricParsingError(
"[extract_category]: no options associated with this category. Contact the learning sciences group for assistance.")
if len(options) == 1: if len(options) == 1:
return return
prev = options[0]['points'] prev = options[0]['points']
for option in options[1:]: for option in options[1:]:
if prev == option['points']: if prev == option['points']:
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_category]: found duplicate point values between two different options. Contact the learning sciences group for assistance.") raise RubricParsingError(
"[extract_category]: found duplicate point values between two different options. Contact the learning sciences group for assistance.")
else: else:
prev = option['points'] prev = option['points']
...@@ -250,7 +265,7 @@ class CombinedOpenEndedRubric(object): ...@@ -250,7 +265,7 @@ class CombinedOpenEndedRubric(object):
@return: @return:
""" """
success = False success = False
if len(scores)==0: if len(scores) == 0:
#This is a dev_facing_error #This is a dev_facing_error
log.error("Score length is 0 when trying to reformat rubric scores for rendering.") log.error("Score length is 0 when trying to reformat rubric scores for rendering.")
return success, "" return success, ""
...@@ -264,25 +279,25 @@ class CombinedOpenEndedRubric(object): ...@@ -264,25 +279,25 @@ class CombinedOpenEndedRubric(object):
score_lists = [] score_lists = []
score_type_list = [] score_type_list = []
feedback_type_list = [] feedback_type_list = []
for i in xrange(0,len(scores)): for i in xrange(0, len(scores)):
score_cont_list = scores[i] score_cont_list = scores[i]
for j in xrange(0,len(score_cont_list)): for j in xrange(0, len(score_cont_list)):
score_list = score_cont_list[j] score_list = score_cont_list[j]
score_lists.append(score_list) score_lists.append(score_list)
score_type_list.append(score_types[i][j]) score_type_list.append(score_types[i][j])
feedback_type_list.append(feedback_types[i][j]) feedback_type_list.append(feedback_types[i][j])
score_list_len = len(score_lists[0]) score_list_len = len(score_lists[0])
for i in xrange(0,len(score_lists)): for i in xrange(0, len(score_lists)):
score_list = score_lists[i] score_list = score_lists[i]
if len(score_list)!=score_list_len: if len(score_list) != score_list_len:
return success, "" return success, ""
score_tuples = [] score_tuples = []
for i in xrange(0,len(score_lists)): for i in xrange(0, len(score_lists)):
for j in xrange(0,len(score_lists[i])): for j in xrange(0, len(score_lists[i])):
tuple = [1,j,score_lists[i][j],[],[]] tuple = [1, j, score_lists[i][j], [], []]
score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples,tuple) score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples, tuple)
score_tuples[tup_ind][0] += 1 score_tuples[tup_ind][0] += 1
score_tuples[tup_ind][3].append(score_type_list[i]) score_tuples[tup_ind][3].append(score_type_list[i])
score_tuples[tup_ind][4].append(feedback_type_list[i]) score_tuples[tup_ind][4].append(feedback_type_list[i])
...@@ -302,18 +317,12 @@ class CombinedOpenEndedRubric(object): ...@@ -302,18 +317,12 @@ class CombinedOpenEndedRubric(object):
category = tuple[1] category = tuple[1]
score = tuple[2] score = tuple[2]
tup_ind = -1 tup_ind = -1
for t in xrange(0,len(tuples)): for t in xrange(0, len(tuples)):
if tuples[t][1] == category and tuples[t][2] == score: if tuples[t][1] == category and tuples[t][2] == score:
tup_ind = t tup_ind = t
break break
if tup_ind == -1: if tup_ind == -1:
tuples.append([0,category,score,[],[]]) tuples.append([0, category, score, [], []])
tup_ind = len(tuples)-1 tup_ind = len(tuples) - 1
return tuples, tup_ind return tuples, tup_ind
...@@ -8,6 +8,7 @@ class ControllerQueryService(GradingService): ...@@ -8,6 +8,7 @@ class ControllerQueryService(GradingService):
""" """
Interface to staff grading backend. Interface to staff grading backend.
""" """
def __init__(self, config, system): def __init__(self, config, system):
config['system'] = system config['system'] = system
super(ControllerQueryService, self).__init__(config) super(ControllerQueryService, self).__init__(config)
...@@ -59,7 +60,7 @@ class ControllerQueryService(GradingService): ...@@ -59,7 +60,7 @@ class ControllerQueryService(GradingService):
def get_flagged_problem_list(self, course_id): def get_flagged_problem_list(self, course_id):
params = { params = {
'course_id': course_id, 'course_id': course_id,
} }
response = self.get(self.flagged_problem_list_url, params) response = self.get(self.flagged_problem_list_url, params)
return response return response
...@@ -70,20 +71,21 @@ class ControllerQueryService(GradingService): ...@@ -70,20 +71,21 @@ class ControllerQueryService(GradingService):
'student_id': student_id, 'student_id': student_id,
'submission_id': submission_id, 'submission_id': submission_id,
'action_type': action_type 'action_type': action_type
} }
response = self.post(self.take_action_on_flags_url, params) response = self.post(self.take_action_on_flags_url, params)
return response return response
def convert_seconds_to_human_readable(seconds): def convert_seconds_to_human_readable(seconds):
if seconds < 60: if seconds < 60:
human_string = "{0} seconds".format(seconds) human_string = "{0} seconds".format(seconds)
elif seconds < 60 * 60: elif seconds < 60 * 60:
human_string = "{0} minutes".format(round(seconds/60,1)) human_string = "{0} minutes".format(round(seconds / 60, 1))
elif seconds < (24*60*60): elif seconds < (24 * 60 * 60):
human_string = "{0} hours".format(round(seconds/(60*60),1)) human_string = "{0} hours".format(round(seconds / (60 * 60), 1))
else: else:
human_string = "{0} days".format(round(seconds/(60*60*24),1)) human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1))
eta_string = "{0}".format(human_string) eta_string = "{0}".format(human_string)
return eta_string return eta_string
...@@ -19,6 +19,7 @@ class GradingService(object): ...@@ -19,6 +19,7 @@ class GradingService(object):
""" """
Interface to staff grading backend. Interface to staff grading backend.
""" """
def __init__(self, config): def __init__(self, config):
self.username = config['username'] self.username = config['username']
self.password = config['password'] self.password = config['password']
...@@ -34,8 +35,8 @@ class GradingService(object): ...@@ -34,8 +35,8 @@ class GradingService(object):
Returns the decoded json dict of the response. Returns the decoded json dict of the response.
""" """
response = self.session.post(self.login_url, response = self.session.post(self.login_url,
{'username': self.username, {'username': self.username,
'password': self.password, }) 'password': self.password, })
response.raise_for_status() response.raise_for_status()
...@@ -47,7 +48,7 @@ class GradingService(object): ...@@ -47,7 +48,7 @@ class GradingService(object):
""" """
try: try:
op = lambda: self.session.post(url, data=data, op = lambda: self.session.post(url, data=data,
allow_redirects=allow_redirects) allow_redirects=allow_redirects)
r = self._try_with_login(op) r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err: except (RequestException, ConnectionError, HTTPError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace. # reraise as promised GradingServiceError, but preserve stacktrace.
...@@ -63,8 +64,8 @@ class GradingService(object): ...@@ -63,8 +64,8 @@ class GradingService(object):
""" """
log.debug(params) log.debug(params)
op = lambda: self.session.get(url, op = lambda: self.session.get(url,
allow_redirects=allow_redirects, allow_redirects=allow_redirects,
params=params) params=params)
try: try:
r = self._try_with_login(op) r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err: except (RequestException, ConnectionError, HTTPError) as err:
...@@ -92,7 +93,7 @@ class GradingService(object): ...@@ -92,7 +93,7 @@ class GradingService(object):
r = self._login() r = self._login()
if r and not r.get('success'): if r and not r.get('success'):
log.warning("Couldn't log into staff_grading backend. Response: %s", log.warning("Couldn't log into staff_grading backend. Response: %s",
r) r)
# try again # try again
response = operation() response = operation()
response.raise_for_status() response.raise_for_status()
......
...@@ -5,6 +5,7 @@ to send them to S3. ...@@ -5,6 +5,7 @@ to send them to S3.
try: try:
from PIL import Image from PIL import Image
ENABLE_PIL = True ENABLE_PIL = True
except: except:
ENABLE_PIL = False ENABLE_PIL = False
...@@ -51,6 +52,7 @@ class ImageProperties(object): ...@@ -51,6 +52,7 @@ class ImageProperties(object):
""" """
Class to check properties of an image and to validate if they are allowed. Class to check properties of an image and to validate if they are allowed.
""" """
def __init__(self, image_data): def __init__(self, image_data):
""" """
Initializes class variables Initializes class variables
...@@ -92,7 +94,7 @@ class ImageProperties(object): ...@@ -92,7 +94,7 @@ class ImageProperties(object):
g = rgb[1] g = rgb[1]
b = rgb[2] b = rgb[2]
check_r = (r > 60) check_r = (r > 60)
check_g = (r * 0.4) < g < (r * 0.85) check_g = (r * 0.4) < g < (r * 0.85)
check_b = (r * 0.2) < b < (r * 0.7) check_b = (r * 0.2) < b < (r * 0.7)
colors_okay = check_r and check_b and check_g colors_okay = check_r and check_b and check_g
except: except:
...@@ -141,6 +143,7 @@ class URLProperties(object): ...@@ -141,6 +143,7 @@ class URLProperties(object):
Checks to see if a URL points to acceptable content. Added to check if students are submitting reasonable Checks to see if a URL points to acceptable content. Added to check if students are submitting reasonable
links to the peer grading image functionality of the external grading service. links to the peer grading image functionality of the external grading service.
""" """
def __init__(self, url_string): def __init__(self, url_string):
self.url_string = url_string self.url_string = url_string
...@@ -212,7 +215,7 @@ def run_image_tests(image): ...@@ -212,7 +215,7 @@ def run_image_tests(image):
success = image_properties.run_tests() success = image_properties.run_tests()
except: except:
log.exception("Cannot run image tests in combined open ended xmodule. May be an issue with a particular image," log.exception("Cannot run image tests in combined open ended xmodule. May be an issue with a particular image,"
"or an issue with the deployment configuration of PIL/Pillow") "or an issue with the deployment configuration of PIL/Pillow")
return success return success
...@@ -252,7 +255,8 @@ def upload_to_s3(file_to_upload, keyname, s3_interface): ...@@ -252,7 +255,8 @@ def upload_to_s3(file_to_upload, keyname, s3_interface):
return True, public_url return True, public_url
except: except:
#This is a dev_facing_error #This is a dev_facing_error
error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format(bucketname.lower()) error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format(
bucketname.lower())
log.error(error_message) log.error(error_message)
return False, error_message return False, error_message
......
...@@ -10,7 +10,7 @@ import logging ...@@ -10,7 +10,7 @@ import logging
from lxml import etree from lxml import etree
import capa.xqueue_interface as xqueue_interface import capa.xqueue_interface as xqueue_interface
from xmodule.capa_module import ComplexEncoder from xmodule.capa_module import ComplexEncoder
from xmodule.editing_module import EditingDescriptor from xmodule.editing_module import EditingDescriptor
from xmodule.progress import Progress from xmodule.progress import Progress
from xmodule.stringify import stringify_children from xmodule.stringify import stringify_children
...@@ -77,7 +77,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -77,7 +77,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.send_to_grader(self.latest_answer(), system) self.send_to_grader(self.latest_answer(), system)
self.child_created = False self.child_created = False
def _parse(self, oeparam, prompt, rubric, system): def _parse(self, oeparam, prompt, rubric, system):
''' '''
Parse OpenEndedResponse XML: Parse OpenEndedResponse XML:
...@@ -104,7 +103,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -104,7 +103,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# response types) # response types)
except TypeError, ValueError: except TypeError, ValueError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Grader payload from external open ended grading server is not a json object! Object: {0}".format(grader_payload)) log.exception(
"Grader payload from external open ended grading server is not a json object! Object: {0}".format(
grader_payload))
self.initial_display = find_with_default(oeparam, 'initial_display', '') self.initial_display = find_with_default(oeparam, 'initial_display', '')
self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.') self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
...@@ -148,7 +149,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -148,7 +149,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
for tag in ['feedback', 'submission_id', 'grader_id', 'score']: for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses: if tag not in survey_responses:
#This is a student_facing_error #This is a student_facing_error
return {'success': False, 'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(tag)} return {'success': False,
'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(
tag)}
try: try:
submission_id = int(survey_responses['submission_id']) submission_id = int(survey_responses['submission_id'])
grader_id = int(survey_responses['grader_id']) grader_id = int(survey_responses['grader_id'])
...@@ -188,7 +191,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -188,7 +191,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
} }
(error, msg) = qinterface.send_to_queue(header=xheader, (error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents)) body=json.dumps(contents))
#Convert error to a success value #Convert error to a success value
success = True success = True
...@@ -222,8 +225,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -222,8 +225,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
str(len(self.child_history))) str(len(self.child_history)))
xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['callback_url'], xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['callback_url'],
lms_key=queuekey, lms_key=queuekey,
queue_name=self.queue_name) queue_name=self.queue_name)
contents = self.payload.copy() contents = self.payload.copy()
...@@ -241,7 +244,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -241,7 +244,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# Submit request. When successful, 'msg' is the prior length of the queue # Submit request. When successful, 'msg' is the prior length of the queue
(error, msg) = qinterface.send_to_queue(header=xheader, (error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents)) body=json.dumps(contents))
# State associated with the queueing request # State associated with the queueing request
queuestate = {'key': queuekey, queuestate = {'key': queuekey,
...@@ -266,7 +269,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -266,7 +269,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return True return True
def get_answers(self): def get_answers(self):
""" """
Gets and shows the answer for this problem. Gets and shows the answer for this problem.
...@@ -300,7 +302,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -300,7 +302,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# We want to display available feedback in a particular order. # We want to display available feedback in a particular order.
# This dictionary specifies which goes first--lower first. # This dictionary specifies which goes first--lower first.
priorities = { # These go at the start of the feedback priorities = {# These go at the start of the feedback
'spelling': 0, 'spelling': 0,
'grammar': 1, 'grammar': 1,
# needs to be after all the other feedback # needs to be after all the other feedback
...@@ -400,7 +402,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -400,7 +402,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
if not response_items['success']: if not response_items['success']:
return system.render_template("{0}/open_ended_error.html".format(self.TEMPLATE_DIR), return system.render_template("{0}/open_ended_error.html".format(self.TEMPLATE_DIR),
{'errors': feedback}) {'errors': feedback})
feedback_template = system.render_template("{0}/open_ended_feedback.html".format(self.TEMPLATE_DIR), { feedback_template = system.render_template("{0}/open_ended_feedback.html".format(self.TEMPLATE_DIR), {
'grader_type': response_items['grader_type'], 'grader_type': response_items['grader_type'],
...@@ -411,7 +413,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -411,7 +413,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return feedback_template, rubric_scores return feedback_template, rubric_scores
def _parse_score_msg(self, score_msg, system, join_feedback=True): def _parse_score_msg(self, score_msg, system, join_feedback=True):
""" """
Grader reply is a JSON-dump of the following dict Grader reply is a JSON-dump of the following dict
...@@ -437,13 +438,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -437,13 +438,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'valid': False, 'valid': False,
'score': 0, 'score': 0,
'feedback': '', 'feedback': '',
'rubric_scores' : [[0]], 'rubric_scores': [[0]],
'grader_types' : [''], 'grader_types': [''],
'feedback_items' : [''], 'feedback_items': [''],
'feedback_dicts' : [{}], 'feedback_dicts': [{}],
'grader_ids' : [0], 'grader_ids': [0],
'submission_ids' : [0], 'submission_ids': [0],
} }
try: try:
score_result = json.loads(score_msg) score_result = json.loads(score_msg)
except (TypeError, ValueError): except (TypeError, ValueError):
...@@ -470,7 +471,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -470,7 +471,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
log.error(error_message) log.error(error_message)
fail['feedback'] = error_message fail['feedback'] = error_message
return fail return fail
#This is to support peer grading #This is to support peer grading
if isinstance(score_result['score'], list): if isinstance(score_result['score'], list):
feedback_items = [] feedback_items = []
rubric_scores = [] rubric_scores = []
...@@ -527,12 +528,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -527,12 +528,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'valid': True, 'valid': True,
'score': score, 'score': score,
'feedback': feedback, 'feedback': feedback,
'rubric_scores' : rubric_scores, 'rubric_scores': rubric_scores,
'grader_types' : grader_types, 'grader_types': grader_types,
'feedback_items' : feedback_items, 'feedback_items': feedback_items,
'feedback_dicts' : feedback_dicts, 'feedback_dicts': feedback_dicts,
'grader_ids' : grader_ids, 'grader_ids': grader_ids,
'submission_ids' : submission_ids, 'submission_ids': submission_ids,
} }
def latest_post_assessment(self, system, short_feedback=False, join_feedback=True): def latest_post_assessment(self, system, short_feedback=False, join_feedback=True):
...@@ -545,7 +546,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -545,7 +546,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return "" return ""
feedback_dict = self._parse_score_msg(self.child_history[-1].get('post_assessment', ""), system, feedback_dict = self._parse_score_msg(self.child_history[-1].get('post_assessment', ""), system,
join_feedback=join_feedback) join_feedback=join_feedback)
if not short_feedback: if not short_feedback:
return feedback_dict['feedback'] if feedback_dict['valid'] else '' return feedback_dict['feedback'] if feedback_dict['valid'] else ''
if feedback_dict['valid']: if feedback_dict['valid']:
...@@ -585,7 +586,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -585,7 +586,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
#This is a dev_facing_error #This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error #This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False}) return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress() before = self.get_progress()
d = handlers[dispatch](get, system) d = handlers[dispatch](get, system)
...@@ -679,7 +680,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -679,7 +680,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
correct = "" correct = ""
previous_answer = self.initial_display previous_answer = self.initial_display
context = { context = {
'prompt': self.child_prompt, 'prompt': self.child_prompt,
'previous_answer': previous_answer, 'previous_answer': previous_answer,
...@@ -692,7 +692,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -692,7 +692,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'child_type': 'openended', 'child_type': 'openended',
'correct': correct, 'correct': correct,
'accept_file_upload': self.accept_file_upload, 'accept_file_upload': self.accept_file_upload,
'eta_message' : eta_string, 'eta_message': eta_string,
} }
html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context) html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context)
return html return html
...@@ -726,7 +726,9 @@ class OpenEndedDescriptor(): ...@@ -726,7 +726,9 @@ class OpenEndedDescriptor():
for child in ['openendedparam']: for child in ['openendedparam']:
if len(xml_object.xpath(child)) != 1: if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error #This is a staff_facing_error
raise ValueError("Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child)) raise ValueError(
"Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k): def parse(k):
"""Assumes that xml_object has child k""" """Assumes that xml_object has child k"""
......
...@@ -100,7 +100,8 @@ class OpenEndedChild(object): ...@@ -100,7 +100,8 @@ class OpenEndedChild(object):
# completion (doesn't matter if you self-assessed correct/incorrect). # completion (doesn't matter if you self-assessed correct/incorrect).
if system.open_ended_grading_interface: if system.open_ended_grading_interface:
self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system)
self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface,system) self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface,
system)
else: else:
self.peer_gs = MockPeerGradingService() self.peer_gs = MockPeerGradingService()
self.controller_qs = None self.controller_qs = None
...@@ -142,7 +143,9 @@ class OpenEndedChild(object): ...@@ -142,7 +143,9 @@ class OpenEndedChild(object):
return True, { return True, {
'success': False, 'success': False,
#This is a student_facing_error #This is a student_facing_error
'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(self.child_attempts, self.max_attempts) 'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(
self.child_attempts, self.max_attempts
)
} }
else: else:
return False, {} return False, {}
...@@ -170,8 +173,8 @@ class OpenEndedChild(object): ...@@ -170,8 +173,8 @@ class OpenEndedChild(object):
try: try:
answer = autolink_html(answer) answer = autolink_html(answer)
cleaner = Cleaner(style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=True, cleaner = Cleaner(style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=True,
host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS, host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS,
whitelist_tags=set(['embed', 'iframe', 'a', 'img'])) whitelist_tags=set(['embed', 'iframe', 'a', 'img']))
clean_html = cleaner.clean_html(answer) clean_html = cleaner.clean_html(answer)
clean_html = re.sub(r'</p>$', '', re.sub(r'^<p>', '', clean_html)) clean_html = re.sub(r'</p>$', '', re.sub(r'^<p>', '', clean_html))
except: except:
...@@ -272,7 +275,7 @@ class OpenEndedChild(object): ...@@ -272,7 +275,7 @@ class OpenEndedChild(object):
""" """
#This is a dev_facing_error #This is a dev_facing_error
log.warning("Open ended child state out sync. state: %r, get: %r. %s", log.warning("Open ended child state out sync. state: %r, get: %r. %s",
self.child_state, get, msg) self.child_state, get, msg)
#This is a student_facing_error #This is a student_facing_error
return {'success': False, return {'success': False,
'error': 'The problem state got out-of-sync. Please try reloading the page.'} 'error': 'The problem state got out-of-sync. Please try reloading the page.'}
...@@ -298,7 +301,7 @@ class OpenEndedChild(object): ...@@ -298,7 +301,7 @@ class OpenEndedChild(object):
@return: Boolean correct. @return: Boolean correct.
""" """
correct = False correct = False
if(isinstance(score, (int, long, float, complex))): if (isinstance(score, (int, long, float, complex))):
score_ratio = int(score) / float(self.max_score()) score_ratio = int(score) / float(self.max_score())
correct = (score_ratio >= 0.66) correct = (score_ratio >= 0.66)
return correct return correct
...@@ -332,7 +335,8 @@ class OpenEndedChild(object): ...@@ -332,7 +335,8 @@ class OpenEndedChild(object):
try: try:
image_data.seek(0) image_data.seek(0)
success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key, self.s3_interface) success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key,
self.s3_interface)
except: except:
log.exception("Could not upload image to S3.") log.exception("Could not upload image to S3.")
...@@ -394,9 +398,9 @@ class OpenEndedChild(object): ...@@ -394,9 +398,9 @@ class OpenEndedChild(object):
#In this case, an image was submitted by the student, but the image could not be uploaded to S3. Likely #In this case, an image was submitted by the student, but the image could not be uploaded to S3. Likely
#a config issue (development vs deployment). For now, just treat this as a "success" #a config issue (development vs deployment). For now, just treat this as a "success"
log.exception("Student AJAX post to combined open ended xmodule indicated that it contained an image, " log.exception("Student AJAX post to combined open ended xmodule indicated that it contained an image, "
"but the image was not able to be uploaded to S3. This could indicate a config" "but the image was not able to be uploaded to S3. This could indicate a config"
"issue with this deployment, but it could also indicate a problem with S3 or with the" "issue with this deployment, but it could also indicate a problem with S3 or with the"
"student image itself.") "student image itself.")
overall_success = True overall_success = True
elif not has_file_to_upload: elif not has_file_to_upload:
#If there is no file to upload, probably the student has embedded the link in the answer text #If there is no file to upload, probably the student has embedded the link in the answer text
...@@ -435,7 +439,7 @@ class OpenEndedChild(object): ...@@ -435,7 +439,7 @@ class OpenEndedChild(object):
response = {} response = {}
#This is a student_facing_error #This is a student_facing_error
error_string = ("You need to peer grade {0} more in order to make another submission. " error_string = ("You need to peer grade {0} more in order to make another submission. "
"You have graded {1}, and {2} are required. You have made {3} successful peer grading submissions.") "You have graded {1}, and {2} are required. You have made {3} successful peer grading submissions.")
try: try:
response = self.peer_gs.get_data_for_location(self.location_string, student_id) response = self.peer_gs.get_data_for_location(self.location_string, student_id)
count_graded = response['count_graded'] count_graded = response['count_graded']
...@@ -444,16 +448,18 @@ class OpenEndedChild(object): ...@@ -444,16 +448,18 @@ class OpenEndedChild(object):
success = True success = True
except: except:
#This is a dev_facing_error #This is a dev_facing_error
log.error("Could not contact external open ended graders for location {0} and student {1}".format(self.location_string,student_id)) log.error("Could not contact external open ended graders for location {0} and student {1}".format(
self.location_string, student_id))
#This is a student_facing_error #This is a student_facing_error
error_message = "Could not contact the graders. Please notify course staff." error_message = "Could not contact the graders. Please notify course staff."
return success, allowed_to_submit, error_message return success, allowed_to_submit, error_message
if count_graded>=count_required: if count_graded >= count_required:
return success, allowed_to_submit, "" return success, allowed_to_submit, ""
else: else:
allowed_to_submit = False allowed_to_submit = False
#This is a student_facing_error #This is a student_facing_error
error_message = error_string.format(count_required-count_graded, count_graded, count_required, student_sub_count) error_message = error_string.format(count_required - count_graded, count_graded, count_required,
student_sub_count)
return success, allowed_to_submit, error_message return success, allowed_to_submit, error_message
def get_eta(self): def get_eta(self):
...@@ -468,7 +474,7 @@ class OpenEndedChild(object): ...@@ -468,7 +474,7 @@ class OpenEndedChild(object):
success = response['success'] success = response['success']
if isinstance(success, basestring): if isinstance(success, basestring):
success = (success.lower()=="true") success = (success.lower() == "true")
if success: if success:
eta = controller_query_service.convert_seconds_to_human_readable(response['eta']) eta = controller_query_service.convert_seconds_to_human_readable(response['eta'])
...@@ -477,6 +483,3 @@ class OpenEndedChild(object): ...@@ -477,6 +483,3 @@ class OpenEndedChild(object):
eta_string = "" eta_string = ""
return eta_string return eta_string
...@@ -14,6 +14,7 @@ class PeerGradingService(GradingService): ...@@ -14,6 +14,7 @@ class PeerGradingService(GradingService):
""" """
Interface with the grading controller for peer grading Interface with the grading controller for peer grading
""" """
def __init__(self, config, system): def __init__(self, config, system):
config['system'] = system config['system'] = system
super(PeerGradingService, self).__init__(config) super(PeerGradingService, self).__init__(config)
...@@ -36,10 +37,11 @@ class PeerGradingService(GradingService): ...@@ -36,10 +37,11 @@ class PeerGradingService(GradingService):
def get_next_submission(self, problem_location, grader_id): def get_next_submission(self, problem_location, grader_id):
response = self.get(self.get_next_submission_url, response = self.get(self.get_next_submission_url,
{'location': problem_location, 'grader_id': grader_id}) {'location': problem_location, 'grader_id': grader_id})
return self.try_to_decode(self._render_rubric(response)) return self.try_to_decode(self._render_rubric(response))
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged): def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores,
submission_flagged):
data = {'grader_id': grader_id, data = {'grader_id': grader_id,
'submission_id': submission_id, 'submission_id': submission_id,
'score': score, 'score': score,
...@@ -89,6 +91,7 @@ class PeerGradingService(GradingService): ...@@ -89,6 +91,7 @@ class PeerGradingService(GradingService):
pass pass
return text return text
""" """
This is a mock peer grading service that can be used for unit tests This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller without making actual service calls to the grading controller
...@@ -122,7 +125,7 @@ class MockPeerGradingService(object): ...@@ -122,7 +125,7 @@ class MockPeerGradingService(object):
'max_score': 4}) 'max_score': 4})
def save_calibration_essay(self, problem_location, grader_id, def save_calibration_essay(self, problem_location, grader_id,
calibration_essay_id, submission_key, score, calibration_essay_id, submission_key, score,
feedback, rubric_scores): feedback, rubric_scores):
return {'success': True, 'actual_score': 2} return {'success': True, 'actual_score': 2}
......
...@@ -75,7 +75,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -75,7 +75,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context) html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context)
return html return html
def handle_ajax(self, dispatch, get, system): def handle_ajax(self, dispatch, get, system):
""" """
This is called by courseware.module_render, to handle an AJAX call. This is called by courseware.module_render, to handle an AJAX call.
...@@ -97,7 +96,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -97,7 +96,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
#This is a dev_facing_error #This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error #This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False}) return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress() before = self.get_progress()
d = handlers[dispatch](get, system) d = handlers[dispatch](get, system)
...@@ -161,7 +160,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -161,7 +160,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context) return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context)
def save_answer(self, get, system): def save_answer(self, get, system):
""" """
After the answer is submitted, show the rubric. After the answer is submitted, show the rubric.
...@@ -226,7 +224,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -226,7 +224,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
try: try:
score = int(get['assessment']) score = int(get['assessment'])
score_list = get.getlist('score_list[]') score_list = get.getlist('score_list[]')
for i in xrange(0,len(score_list)): for i in xrange(0, len(score_list)):
score_list[i] = int(score_list[i]) score_list[i] = int(score_list[i])
except ValueError: except ValueError:
#This is a dev_facing_error #This is a dev_facing_error
...@@ -270,7 +268,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -270,7 +268,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
'allow_reset': self._allow_reset()} 'allow_reset': self._allow_reset()}
def latest_post_assessment(self, system): def latest_post_assessment(self, system):
latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system) latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system)
try: try:
rubric_scores = json.loads(latest_post_assessment) rubric_scores = json.loads(latest_post_assessment)
except: except:
...@@ -310,7 +308,9 @@ class SelfAssessmentDescriptor(): ...@@ -310,7 +308,9 @@ class SelfAssessmentDescriptor():
for child in expected_children: for child in expected_children:
if len(xml_object.xpath(child)) != 1: if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error #This is a staff_facing_error
raise ValueError("Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child)) raise ValueError(
"Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k): def parse(k):
"""Assumes that xml_object has child k""" """Assumes that xml_object has child k"""
......
...@@ -44,7 +44,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -44,7 +44,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
resource_string(__name__, 'js/src/peergrading/peer_grading_problem.coffee'), resource_string(__name__, 'js/src/peergrading/peer_grading_problem.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'), resource_string(__name__, 'js/src/javascript_loader.coffee'),
]} ]}
js_module_name = "PeerGrading" js_module_name = "PeerGrading"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
...@@ -55,7 +55,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -55,7 +55,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
#We need to set the location here so the child modules can use it #We need to set the location here so the child modules can use it
system.set('location', location) system.set('location', location)
self.system = system self.system = system
if(self.system.open_ended_grading_interface): if (self.system.open_ended_grading_interface):
self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system) self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system)
else: else:
self.peer_gs = MockPeerGradingService() self.peer_gs = MockPeerGradingService()
...@@ -139,13 +139,13 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -139,13 +139,13 @@ class PeerGradingModule(PeerGradingFields, XModule):
'save_grade': self.save_grade, 'save_grade': self.save_grade,
'save_calibration_essay': self.save_calibration_essay, 'save_calibration_essay': self.save_calibration_essay,
'problem': self.peer_grading_problem, 'problem': self.peer_grading_problem,
} }
if dispatch not in handlers: if dispatch not in handlers:
#This is a dev_facing_error #This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error #This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False}) return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
d = handlers[dispatch](get) d = handlers[dispatch](get)
...@@ -182,9 +182,10 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -182,9 +182,10 @@ class PeerGradingModule(PeerGradingFields, XModule):
except: except:
success, response = self.query_data_for_location() success, response = self.query_data_for_location()
if not success: if not success:
log.exception("No instance data found and could not get data from controller for loc {0} student {1}".format( log.exception(
self.system.location.url(), self.system.anonymous_student_id "No instance data found and could not get data from controller for loc {0} student {1}".format(
)) self.system.location.url(), self.system.anonymous_student_id
))
return None return None
count_graded = response['count_graded'] count_graded = response['count_graded']
count_required = response['count_required'] count_required = response['count_required']
...@@ -195,7 +196,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -195,7 +196,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
score_dict = { score_dict = {
'score': int(count_graded >= count_required), 'score': int(count_graded >= count_required),
'total': self.max_grade, 'total': self.max_grade,
} }
return score_dict return score_dict
...@@ -244,7 +245,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -244,7 +245,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
.format(self.peer_gs.url, location, grader_id)) .format(self.peer_gs.url, location, grader_id))
#This is a student_facing_error #This is a student_facing_error
return {'success': False, return {'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR} 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
def save_grade(self, get): def save_grade(self, get):
""" """
...@@ -262,7 +263,8 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -262,7 +263,8 @@ class PeerGradingModule(PeerGradingFields, XModule):
error: if there was an error in the submission, this is the error message error: if there was an error in the submission, this is the error message
""" """
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', 'submission_flagged']) required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]',
'submission_flagged'])
success, message = self._check_required(get, required) success, message = self._check_required(get, required)
if not success: if not success:
return self._err_response(message) return self._err_response(message)
...@@ -278,14 +280,14 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -278,14 +280,14 @@ class PeerGradingModule(PeerGradingFields, XModule):
try: try:
response = self.peer_gs.save_grade(location, grader_id, submission_id, response = self.peer_gs.save_grade(location, grader_id, submission_id,
score, feedback, submission_key, rubric_scores, submission_flagged) score, feedback, submission_key, rubric_scores, submission_flagged)
return response return response
except GradingServiceError: except GradingServiceError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("""Error saving grade to open ended grading service. server url: {0}, location: {1}, submission_id:{2}, log.exception("""Error saving grade to open ended grading service. server url: {0}, location: {1}, submission_id:{2},
submission_key: {3}, score: {4}""" submission_key: {3}, score: {4}"""
.format(self.peer_gs.url, .format(self.peer_gs.url,
location, submission_id, submission_key, score) location, submission_id, submission_key, score)
) )
#This is a student_facing_error #This is a student_facing_error
return { return {
...@@ -373,7 +375,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -373,7 +375,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
.format(self.peer_gs.url, location)) .format(self.peer_gs.url, location))
#This is a student_facing_error #This is a student_facing_error
return {'success': False, return {'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR} 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
# if we can't parse the rubric into HTML, # if we can't parse the rubric into HTML,
except etree.XMLSyntaxError: except etree.XMLSyntaxError:
#This is a dev_facing_error #This is a dev_facing_error
...@@ -381,7 +383,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -381,7 +383,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
.format(rubric)) .format(rubric))
#This is a student_facing_error #This is a student_facing_error
return {'success': False, return {'success': False,
'error': 'Error displaying submission. Please notify course staff.'} 'error': 'Error displaying submission. Please notify course staff.'}
def save_calibration_essay(self, get): def save_calibration_essay(self, get):
...@@ -417,11 +419,13 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -417,11 +419,13 @@ class PeerGradingModule(PeerGradingFields, XModule):
try: try:
response = self.peer_gs.save_calibration_essay(location, grader_id, calibration_essay_id, response = self.peer_gs.save_calibration_essay(location, grader_id, calibration_essay_id,
submission_key, score, feedback, rubric_scores) submission_key, score, feedback, rubric_scores)
return response return response
except GradingServiceError: except GradingServiceError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id)) log.exception(
"Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(
location, submission_id, submission_key, grader_id))
#This is a student_facing_error #This is a student_facing_error
return self._err_response('There was an error saving your score. Please notify course staff.') return self._err_response('There was an error saving your score. Please notify course staff.')
...@@ -431,7 +435,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -431,7 +435,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
''' '''
html = self.system.render_template('peer_grading/peer_grading_closed.html', { html = self.system.render_template('peer_grading/peer_grading_closed.html', {
'use_for_single_location': self.use_for_single_location 'use_for_single_location': self.use_for_single_location
}) })
return html return html
...@@ -501,7 +505,6 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -501,7 +505,6 @@ class PeerGradingModule(PeerGradingFields, XModule):
problem['due'] = None problem['due'] = None
problem['closed'] = False problem['closed'] = False
ajax_url = self.ajax_url ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading.html', { html = self.system.render_template('peer_grading/peer_grading.html', {
'course_id': self.system.course_id, 'course_id': self.system.course_id,
...@@ -512,7 +515,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -512,7 +515,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
# Checked above # Checked above
'staff_access': False, 'staff_access': False,
'use_single_location': self.use_for_single_location, 'use_single_location': self.use_for_single_location,
}) })
return html return html
...@@ -524,7 +527,8 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -524,7 +527,8 @@ class PeerGradingModule(PeerGradingFields, XModule):
if self.use_for_single_location not in TRUE_DICT: if self.use_for_single_location not in TRUE_DICT:
#This is an error case, because it must be set to use a single location to be called without get parameters #This is an error case, because it must be set to use a single location to be called without get parameters
#This is a dev_facing_error #This is a dev_facing_error
log.error("Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.") log.error(
"Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
return {'html': "", 'success': False} return {'html': "", 'success': False}
problem_location = self.link_to_location problem_location = self.link_to_location
...@@ -540,7 +544,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -540,7 +544,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
# Checked above # Checked above
'staff_access': False, 'staff_access': False,
'use_single_location': self.use_for_single_location, 'use_single_location': self.use_for_single_location,
}) })
return {'html': html, 'success': True} return {'html': html, 'success': True}
...@@ -553,7 +557,7 @@ class PeerGradingModule(PeerGradingFields, XModule): ...@@ -553,7 +557,7 @@ class PeerGradingModule(PeerGradingFields, XModule):
state = { state = {
'student_data_for_location': self.student_data_for_location, 'student_data_for_location': self.student_data_for_location,
} }
return json.dumps(state) return json.dumps(state)
......
...@@ -14,6 +14,7 @@ from datetime import datetime ...@@ -14,6 +14,7 @@ from datetime import datetime
from . import test_system from . import test_system
import test_util_open_ended import test_util_open_ended
""" """
Tests for the various pieces of the CombinedOpenEndedGrading system Tests for the various pieces of the CombinedOpenEndedGrading system
...@@ -39,41 +40,38 @@ class OpenEndedChildTest(unittest.TestCase): ...@@ -39,41 +40,38 @@ class OpenEndedChildTest(unittest.TestCase):
max_score = 1 max_score = 1
static_data = { static_data = {
'max_attempts': 20, 'max_attempts': 20,
'prompt': prompt, 'prompt': prompt,
'rubric': rubric, 'rubric': rubric,
'max_score': max_score, 'max_score': max_score,
'display_name': 'Name', 'display_name': 'Name',
'accept_file_upload': False, 'accept_file_upload': False,
'close_date': None, 'close_date': None,
's3_interface' : "", 's3_interface': "",
'open_ended_grading_interface' : {}, 'open_ended_grading_interface': {},
'skip_basic_checks' : False, 'skip_basic_checks': False,
} }
definition = Mock() definition = Mock()
descriptor = Mock() descriptor = Mock()
def setUp(self): def setUp(self):
self.test_system = test_system() self.test_system = test_system()
self.openendedchild = OpenEndedChild(self.test_system, self.location, self.openendedchild = OpenEndedChild(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata) self.definition, self.descriptor, self.static_data, self.metadata)
def test_latest_answer_empty(self): def test_latest_answer_empty(self):
answer = self.openendedchild.latest_answer() answer = self.openendedchild.latest_answer()
self.assertEqual(answer, "") self.assertEqual(answer, "")
def test_latest_score_empty(self): def test_latest_score_empty(self):
answer = self.openendedchild.latest_score() answer = self.openendedchild.latest_score()
self.assertEqual(answer, None) self.assertEqual(answer, None)
def test_latest_post_assessment_empty(self): def test_latest_post_assessment_empty(self):
answer = self.openendedchild.latest_post_assessment(self.test_system) answer = self.openendedchild.latest_post_assessment(self.test_system)
self.assertEqual(answer, "") self.assertEqual(answer, "")
def test_new_history_entry(self): def test_new_history_entry(self):
new_answer = "New Answer" new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer) self.openendedchild.new_history_entry(new_answer)
...@@ -99,7 +97,6 @@ class OpenEndedChildTest(unittest.TestCase): ...@@ -99,7 +97,6 @@ class OpenEndedChildTest(unittest.TestCase):
score = self.openendedchild.latest_score() score = self.openendedchild.latest_score()
self.assertEqual(score, 4) self.assertEqual(score, 4)
def test_record_latest_post_assessment(self): def test_record_latest_post_assessment(self):
new_answer = "New Answer" new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer) self.openendedchild.new_history_entry(new_answer)
...@@ -107,7 +104,7 @@ class OpenEndedChildTest(unittest.TestCase): ...@@ -107,7 +104,7 @@ class OpenEndedChildTest(unittest.TestCase):
post_assessment = "Post assessment" post_assessment = "Post assessment"
self.openendedchild.record_latest_post_assessment(post_assessment) self.openendedchild.record_latest_post_assessment(post_assessment)
self.assertEqual(post_assessment, self.assertEqual(post_assessment,
self.openendedchild.latest_post_assessment(self.test_system)) self.openendedchild.latest_post_assessment(self.test_system))
def test_get_score(self): def test_get_score(self):
new_answer = "New Answer" new_answer = "New Answer"
...@@ -124,24 +121,22 @@ class OpenEndedChildTest(unittest.TestCase): ...@@ -124,24 +121,22 @@ class OpenEndedChildTest(unittest.TestCase):
self.assertEqual(score['score'], new_score) self.assertEqual(score['score'], new_score)
self.assertEqual(score['total'], self.static_data['max_score']) self.assertEqual(score['total'], self.static_data['max_score'])
def test_reset(self): def test_reset(self):
self.openendedchild.reset(self.test_system) self.openendedchild.reset(self.test_system)
state = json.loads(self.openendedchild.get_instance_state()) state = json.loads(self.openendedchild.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedChild.INITIAL) self.assertEqual(state['child_state'], OpenEndedChild.INITIAL)
def test_is_last_response_correct(self): def test_is_last_response_correct(self):
new_answer = "New Answer" new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer) self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(self.static_data['max_score']) self.openendedchild.record_latest_score(self.static_data['max_score'])
self.assertEqual(self.openendedchild.is_last_response_correct(), self.assertEqual(self.openendedchild.is_last_response_correct(),
'correct') 'correct')
self.openendedchild.new_history_entry(new_answer) self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(0) self.openendedchild.record_latest_score(0)
self.assertEqual(self.openendedchild.is_last_response_correct(), self.assertEqual(self.openendedchild.is_last_response_correct(),
'incorrect') 'incorrect')
class OpenEndedModuleTest(unittest.TestCase): class OpenEndedModuleTest(unittest.TestCase):
...@@ -159,18 +154,18 @@ class OpenEndedModuleTest(unittest.TestCase): ...@@ -159,18 +154,18 @@ class OpenEndedModuleTest(unittest.TestCase):
max_score = 4 max_score = 4
static_data = { static_data = {
'max_attempts': 20, 'max_attempts': 20,
'prompt': prompt, 'prompt': prompt,
'rubric': rubric, 'rubric': rubric,
'max_score': max_score, 'max_score': max_score,
'display_name': 'Name', 'display_name': 'Name',
'accept_file_upload': False, 'accept_file_upload': False,
'rewrite_content_links' : "", 'rewrite_content_links': "",
'close_date': None, 'close_date': None,
's3_interface' : test_util_open_ended.S3_INTERFACE, 's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks' : False, 'skip_basic_checks': False,
} }
oeparam = etree.XML(''' oeparam = etree.XML('''
<openendedparam> <openendedparam>
...@@ -188,25 +183,26 @@ class OpenEndedModuleTest(unittest.TestCase): ...@@ -188,25 +183,26 @@ class OpenEndedModuleTest(unittest.TestCase):
self.test_system.location = self.location self.test_system.location = self.location
self.mock_xqueue = MagicMock() self.mock_xqueue = MagicMock()
self.mock_xqueue.send_to_queue.return_value = (None, "Message") self.mock_xqueue.send_to_queue.return_value = (None, "Message")
self.test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 1} self.test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue',
'waittime': 1}
self.openendedmodule = OpenEndedModule(self.test_system, self.location, self.openendedmodule = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata) self.definition, self.descriptor, self.static_data, self.metadata)
def test_message_post(self): def test_message_post(self):
get = {'feedback': 'feedback text', get = {'feedback': 'feedback text',
'submission_id': '1', 'submission_id': '1',
'grader_id': '1', 'grader_id': '1',
'score': 3} 'score': 3}
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat) qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
student_info = {'anonymous_student_id': self.test_system.anonymous_student_id, student_info = {'anonymous_student_id': self.test_system.anonymous_student_id,
'submission_time': qtime} 'submission_time': qtime}
contents = { contents = {
'feedback': get['feedback'], 'feedback': get['feedback'],
'submission_id': int(get['submission_id']), 'submission_id': int(get['submission_id']),
'grader_id': int(get['grader_id']), 'grader_id': int(get['grader_id']),
'score': get['score'], 'score': get['score'],
'student_info': json.dumps(student_info) 'student_info': json.dumps(student_info)
} }
result = self.openendedmodule.message_post(get, self.test_system) result = self.openendedmodule.message_post(get, self.test_system)
self.assertTrue(result['success']) self.assertTrue(result['success'])
...@@ -220,13 +216,13 @@ class OpenEndedModuleTest(unittest.TestCase): ...@@ -220,13 +216,13 @@ class OpenEndedModuleTest(unittest.TestCase):
submission = "This is a student submission" submission = "This is a student submission"
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat) qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
student_info = {'anonymous_student_id': self.test_system.anonymous_student_id, student_info = {'anonymous_student_id': self.test_system.anonymous_student_id,
'submission_time': qtime} 'submission_time': qtime}
contents = self.openendedmodule.payload.copy() contents = self.openendedmodule.payload.copy()
contents.update({ contents.update({
'student_info': json.dumps(student_info), 'student_info': json.dumps(student_info),
'student_response': submission, 'student_response': submission,
'max_score': self.max_score 'max_score': self.max_score
}) })
result = self.openendedmodule.send_to_grader(submission, self.test_system) result = self.openendedmodule.send_to_grader(submission, self.test_system)
self.assertTrue(result) self.assertTrue(result)
self.mock_xqueue.send_to_queue.assert_called_with(body=json.dumps(contents), header=ANY) self.mock_xqueue.send_to_queue.assert_called_with(body=json.dumps(contents), header=ANY)
...@@ -234,36 +230,36 @@ class OpenEndedModuleTest(unittest.TestCase): ...@@ -234,36 +230,36 @@ class OpenEndedModuleTest(unittest.TestCase):
def update_score_single(self): def update_score_single(self):
self.openendedmodule.new_history_entry("New Entry") self.openendedmodule.new_history_entry("New Entry")
score_msg = { score_msg = {
'correct': True, 'correct': True,
'score': 4, 'score': 4,
'msg': 'Grader Message', 'msg': 'Grader Message',
'feedback': "Grader Feedback" 'feedback': "Grader Feedback"
} }
get = {'queuekey': "abcd", get = {'queuekey': "abcd",
'xqueue_body': score_msg} 'xqueue_body': score_msg}
self.openendedmodule.update_score(get, self.test_system) self.openendedmodule.update_score(get, self.test_system)
def update_score_single(self): def update_score_single(self):
self.openendedmodule.new_history_entry("New Entry") self.openendedmodule.new_history_entry("New Entry")
feedback = { feedback = {
"success": True, "success": True,
"feedback": "Grader Feedback" "feedback": "Grader Feedback"
} }
score_msg = { score_msg = {
'correct': True, 'correct': True,
'score': 4, 'score': 4,
'msg': 'Grader Message', 'msg': 'Grader Message',
'feedback': json.dumps(feedback), 'feedback': json.dumps(feedback),
'grader_type': 'IN', 'grader_type': 'IN',
'grader_id': '1', 'grader_id': '1',
'submission_id': '1', 'submission_id': '1',
'success': True, 'success': True,
'rubric_scores': [0], 'rubric_scores': [0],
'rubric_scores_complete': True, 'rubric_scores_complete': True,
'rubric_xml': etree.tostring(self.rubric) 'rubric_xml': etree.tostring(self.rubric)
} }
get = {'queuekey': "abcd", get = {'queuekey': "abcd",
'xqueue_body': json.dumps(score_msg)} 'xqueue_body': json.dumps(score_msg)}
self.openendedmodule.update_score(get, self.test_system) self.openendedmodule.update_score(get, self.test_system)
def test_latest_post_assessment(self): def test_latest_post_assessment(self):
...@@ -296,18 +292,18 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): ...@@ -296,18 +292,18 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
metadata = {'attempts': '10', 'max_score': max_score} metadata = {'attempts': '10', 'max_score': max_score}
static_data = { static_data = {
'max_attempts': 20, 'max_attempts': 20,
'prompt': prompt, 'prompt': prompt,
'rubric': rubric, 'rubric': rubric,
'max_score': max_score, 'max_score': max_score,
'display_name': 'Name', 'display_name': 'Name',
'accept_file_upload' : False, 'accept_file_upload': False,
'rewrite_content_links' : "", 'rewrite_content_links': "",
'close_date' : "", 'close_date': "",
's3_interface' : test_util_open_ended.S3_INTERFACE, 's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks' : False, 'skip_basic_checks': False,
} }
oeparam = etree.XML(''' oeparam = etree.XML('''
<openendedparam> <openendedparam>
...@@ -329,12 +325,12 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): ...@@ -329,12 +325,12 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
''' '''
task_xml2 = ''' task_xml2 = '''
<openended min_score_to_attempt="1" max_score_to_attempt="1"> <openended min_score_to_attempt="1" max_score_to_attempt="1">
<openendedparam> <openendedparam>
<initial_display>Enter essay here.</initial_display> <initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display> <answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload> <grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam> </openendedparam>
</openended>''' </openended>'''
definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]} definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
descriptor = Mock() descriptor = Mock()
......
...@@ -10,8 +10,8 @@ from . import test_system ...@@ -10,8 +10,8 @@ from . import test_system
import test_util_open_ended import test_util_open_ended
class SelfAssessmentTest(unittest.TestCase):
class SelfAssessmentTest(unittest.TestCase):
rubric = '''<rubric><rubric> rubric = '''<rubric><rubric>
<category> <category>
<description>Response Quality</description> <description>Response Quality</description>
...@@ -24,7 +24,7 @@ class SelfAssessmentTest(unittest.TestCase): ...@@ -24,7 +24,7 @@ class SelfAssessmentTest(unittest.TestCase):
'prompt': prompt, 'prompt': prompt,
'submitmessage': 'Shall we submit now?', 'submitmessage': 'Shall we submit now?',
'hintprompt': 'Consider this...', 'hintprompt': 'Consider this...',
} }
location = Location(["i4x", "edX", "sa_test", "selfassessment", location = Location(["i4x", "edX", "sa_test", "selfassessment",
"SampleQuestion"]) "SampleQuestion"])
...@@ -39,22 +39,22 @@ class SelfAssessmentTest(unittest.TestCase): ...@@ -39,22 +39,22 @@ class SelfAssessmentTest(unittest.TestCase):
'attempts': 2}) 'attempts': 2})
static_data = { static_data = {
'max_attempts': 10, 'max_attempts': 10,
'rubric': etree.XML(self.rubric), 'rubric': etree.XML(self.rubric),
'prompt': self.prompt, 'prompt': self.prompt,
'max_score': 1, 'max_score': 1,
'display_name': "Name", 'display_name': "Name",
'accept_file_upload': False, 'accept_file_upload': False,
'close_date': None, 'close_date': None,
's3_interface' : test_util_open_ended.S3_INTERFACE, 's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks' : False, 'skip_basic_checks': False,
} }
self.module = SelfAssessmentModule(test_system(), self.location, self.module = SelfAssessmentModule(test_system(), self.location,
self.definition, self.definition,
self.descriptor, self.descriptor,
static_data) static_data)
def test_get_html(self): def test_get_html(self):
html = self.module.get_html(self.module.system) html = self.module.get_html(self.module.system)
...@@ -62,14 +62,15 @@ class SelfAssessmentTest(unittest.TestCase): ...@@ -62,14 +62,15 @@ class SelfAssessmentTest(unittest.TestCase):
def test_self_assessment_flow(self): def test_self_assessment_flow(self):
responses = {'assessment': '0', 'score_list[]': ['0', '0']} responses = {'assessment': '0', 'score_list[]': ['0', '0']}
def get_fake_item(name): def get_fake_item(name):
return responses[name] return responses[name]
def get_data_for_location(self,location,student): def get_data_for_location(self, location, student):
return { return {
'count_graded' : 0, 'count_graded': 0,
'count_required' : 0, 'count_required': 0,
'student_sub_count': 0, 'student_sub_count': 0,
} }
mock_query_dict = MagicMock() mock_query_dict = MagicMock()
...@@ -87,7 +88,6 @@ class SelfAssessmentTest(unittest.TestCase): ...@@ -87,7 +88,6 @@ class SelfAssessmentTest(unittest.TestCase):
self.module.save_assessment(mock_query_dict, self.module.system) self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.child_state, self.module.DONE) self.assertEqual(self.module.child_state, self.module.DONE)
d = self.module.reset({}) d = self.module.reset({})
self.assertTrue(d['success']) self.assertTrue(d['success'])
self.assertEqual(self.module.child_state, self.module.INITIAL) self.assertEqual(self.module.child_state, self.module.INITIAL)
......
OPEN_ENDED_GRADING_INTERFACE = { OPEN_ENDED_GRADING_INTERFACE = {
'url' : 'http://127.0.0.1:3033/', 'url': 'http://127.0.0.1:3033/',
'username' : 'incorrect', 'username': 'incorrect',
'password' : 'incorrect', 'password': 'incorrect',
'staff_grading' : 'staff_grading', 'staff_grading': 'staff_grading',
'peer_grading' : 'peer_grading', 'peer_grading': 'peer_grading',
'grading_controller' : 'grading_controller' 'grading_controller': 'grading_controller'
} }
S3_INTERFACE = { S3_INTERFACE = {
'aws_access_key' : "", 'aws_access_key': "",
'aws_secret_key' : "", 'aws_secret_key': "",
"aws_bucket_name" : "", "aws_bucket_name": "",
} }
\ No newline at end of file
'''
This is a one-off command aimed at fixing a temporary problem encountered where partial credit was awarded for
code problems, but the resulting score (or grade) was mistakenly set to zero because of a bug in
CorrectMap.get_npoints().
'''
import json
import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from courseware.models import StudentModule
from capa.correctmap import CorrectMap
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
'''
The fix here is to recalculate the score/grade based on the partial credit.
To narrow down the set of problems that might need fixing, the StudentModule
objects to be checked is filtered down to those:
created < '2013-03-08 15:45:00' (the problem must have been answered before the fix was installed,
on Prod and Edge)
modified > '2013-03-07 20:18:00' (the problem must have been visited after the bug was introduced)
state like '%"npoints": 0.%' (the problem must have some form of partial credit).
'''
num_visited = 0
num_changed = 0
option_list = BaseCommand.option_list + (
make_option('--save',
action='store_true',
dest='save_changes',
default=False,
help='Persist the changes that were encountered. If not set, no changes are saved.'), )
def fix_studentmodules(self, save_changes):
'''Identify the list of StudentModule objects that might need fixing, and then fix each one'''
modules = StudentModule.objects.filter(modified__gt='2013-03-07 20:18:00',
created__lt='2013-03-08 15:45:00',
state__contains='"npoints": 0.')
for module in modules:
self.fix_studentmodule_grade(module, save_changes)
def fix_studentmodule_grade(self, module, save_changes):
''' Fix the grade assigned to a StudentModule'''
module_state = module.state
if module_state is None:
# not likely, since we filter on it. But in general...
LOG.info("No state found for {type} module {id} for student {student} in course {course_id}"
.format(type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
return
state_dict = json.loads(module_state)
self.num_visited += 1
# LoncapaProblem.get_score() checks student_answers -- if there are none, we will return a grade of 0
# Check that this is the case, but do so sooner, before we do any of the other grading work.
student_answers = state_dict['student_answers']
if (not student_answers) or len(student_answers) == 0:
# we should not have a grade here:
if module.grade != 0:
LOG.error("No answer found but grade {grade} exists for {type} module {id} for student {student} "
"in course {course_id}".format(grade=module.grade,
type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
else:
LOG.debug("No answer and no grade found for {type} module {id} for student {student} "
"in course {course_id}".format(grade=module.grade,
type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
return
# load into a CorrectMap, as done in LoncapaProblem.__init__():
correct_map = CorrectMap()
if 'correct_map' in state_dict:
correct_map.set_dict(state_dict['correct_map'])
# calculate score the way LoncapaProblem.get_score() works, by deferring to
# CorrectMap's get_npoints implementation.
correct = 0
for key in correct_map:
correct += correct_map.get_npoints(key)
if module.grade == correct:
# nothing to change
LOG.debug("Grade matches for {type} module {id} for student {student} in course {course_id}"
.format(type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
elif save_changes:
# make the change
LOG.info("Grade changing from {0} to {1} for {type} module {id} for student {student} "
"in course {course_id}".format(module.grade, correct,
type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
module.grade = correct
module.save()
self.num_changed += 1
else:
# don't make the change, but log that the change would be made
LOG.info("Grade would change from {0} to {1} for {type} module {id} for student {student} "
"in course {course_id}".format(module.grade, correct,
type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
self.num_changed += 1
def handle(self, **options):
'''Handle management command request'''
save_changes = options['save_changes']
LOG.info("Starting run: save_changes = {0}".format(save_changes))
self.fix_studentmodules(save_changes)
LOG.info("Finished run: updating {0} of {1} modules".format(self.num_changed, self.num_visited))
...@@ -22,7 +22,7 @@ NOTIFICATION_TYPES = ( ...@@ -22,7 +22,7 @@ NOTIFICATION_TYPES = (
('staff_needs_to_grade', 'staff_grading', 'Staff Grading'), ('staff_needs_to_grade', 'staff_grading', 'Staff Grading'),
('new_student_grading_to_view', 'open_ended_problems', 'Problems you have submitted'), ('new_student_grading_to_view', 'open_ended_problems', 'Problems you have submitted'),
('flagged_submissions_exist', 'open_ended_flagged_problems', 'Flagged Submissions') ('flagged_submissions_exist', 'open_ended_flagged_problems', 'Flagged Submissions')
) )
def staff_grading_notifications(course, user): def staff_grading_notifications(course, user):
...@@ -46,7 +46,9 @@ def staff_grading_notifications(course, user): ...@@ -46,7 +46,9 @@ def staff_grading_notifications(course, user):
#Non catastrophic error, so no real action #Non catastrophic error, so no real action
notifications = {} notifications = {}
#This is a dev_facing_error #This is a dev_facing_error
log.info("Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id, student_id)) log.info(
"Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading: if pending_grading:
img_path = "/static/images/grading_notification.png" img_path = "/static/images/grading_notification.png"
...@@ -87,7 +89,9 @@ def peer_grading_notifications(course, user): ...@@ -87,7 +89,9 @@ def peer_grading_notifications(course, user):
#Non catastrophic error, so no real action #Non catastrophic error, so no real action
notifications = {} notifications = {}
#This is a dev_facing_error #This is a dev_facing_error
log.info("Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id, student_id)) log.info(
"Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading: if pending_grading:
img_path = "/static/images/grading_notification.png" img_path = "/static/images/grading_notification.png"
...@@ -119,7 +123,9 @@ def combined_notifications(course, user): ...@@ -119,7 +123,9 @@ def combined_notifications(course, user):
return notification_dict return notification_dict
min_time_to_query = user.last_login min_time_to_query = user.last_login
last_module_seen = StudentModule.objects.filter(student=user, course_id=course_id, modified__gt=min_time_to_query).values('modified').order_by('-modified') last_module_seen = StudentModule.objects.filter(student=user, course_id=course_id,
modified__gt=min_time_to_query).values('modified').order_by(
'-modified')
last_module_seen_count = last_module_seen.count() last_module_seen_count = last_module_seen.count()
if last_module_seen_count > 0: if last_module_seen_count > 0:
...@@ -131,7 +137,8 @@ def combined_notifications(course, user): ...@@ -131,7 +137,8 @@ def combined_notifications(course, user):
img_path = "" img_path = ""
try: try:
controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff, last_time_viewed) controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff,
last_time_viewed)
log.debug(controller_response) log.debug(controller_response)
notifications = json.loads(controller_response) notifications = json.loads(controller_response)
if notifications['success']: if notifications['success']:
...@@ -141,7 +148,9 @@ def combined_notifications(course, user): ...@@ -141,7 +148,9 @@ def combined_notifications(course, user):
#Non catastrophic error, so no real action #Non catastrophic error, so no real action
notifications = {} notifications = {}
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Problem with getting notifications from controller query service for course {0} user {1}.".format(course_id, student_id)) log.exception(
"Problem with getting notifications from controller query service for course {0} user {1}.".format(
course_id, student_id))
if pending_grading: if pending_grading:
img_path = "/static/images/grading_notification.png" img_path = "/static/images/grading_notification.png"
...@@ -165,7 +174,8 @@ def set_value_in_cache(student_id, course_id, notification_type, value): ...@@ -165,7 +174,8 @@ def set_value_in_cache(student_id, course_id, notification_type, value):
def create_key_name(student_id, course_id, notification_type): def create_key_name(student_id, course_id, notification_type):
key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id, student=student_id) key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id,
student=student_id)
return key_name return key_name
......
...@@ -15,6 +15,7 @@ class StaffGrading(object): ...@@ -15,6 +15,7 @@ class StaffGrading(object):
""" """
Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views. Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views.
""" """
def __init__(self, course): def __init__(self, course):
self.course = course self.course = course
......
...@@ -20,10 +20,12 @@ log = logging.getLogger(__name__) ...@@ -20,10 +20,12 @@ log = logging.getLogger(__name__)
STAFF_ERROR_MESSAGE = 'Could not contact the external grading server. Please contact the development team. If you do not have a point of contact, you can contact Vik at vik@edx.org.' STAFF_ERROR_MESSAGE = 'Could not contact the external grading server. Please contact the development team. If you do not have a point of contact, you can contact Vik at vik@edx.org.'
class MockStaffGradingService(object): class MockStaffGradingService(object):
""" """
A simple mockup of a staff grading service, testing. A simple mockup of a staff grading service, testing.
""" """
def __init__(self): def __init__(self):
self.cnt = 0 self.cnt = 0
...@@ -43,15 +45,18 @@ class MockStaffGradingService(object): ...@@ -43,15 +45,18 @@ class MockStaffGradingService(object):
def get_problem_list(self, course_id, grader_id): def get_problem_list(self, course_id, grader_id):
self.cnt += 1 self.cnt += 1
return json.dumps({'success': True, return json.dumps({'success': True,
'problem_list': [ 'problem_list': [
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'min_for_ml': 10}), 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5,
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', 'min_for_ml': 10}),
'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'min_for_ml': 10}) json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
]}) 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5,
'min_for_ml': 10})
]})
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged):
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
return self.get_next(course_id, 'fake location', grader_id) return self.get_next(course_id, 'fake location', grader_id)
...@@ -59,6 +64,7 @@ class StaffGradingService(GradingService): ...@@ -59,6 +64,7 @@ class StaffGradingService(GradingService):
""" """
Interface to staff grading backend. Interface to staff grading backend.
""" """
def __init__(self, config): def __init__(self, config):
config['system'] = ModuleSystem( config['system'] = ModuleSystem(
ajax_url=None, ajax_url=None,
...@@ -116,12 +122,13 @@ class StaffGradingService(GradingService): ...@@ -116,12 +122,13 @@ class StaffGradingService(GradingService):
GradingServiceError: something went wrong with the connection. GradingServiceError: something went wrong with the connection.
""" """
response = self.get(self.get_next_url, response = self.get(self.get_next_url,
params={'location': location, params={'location': location,
'grader_id': grader_id}) 'grader_id': grader_id})
return json.dumps(self._render_rubric(response)) return json.dumps(self._render_rubric(response))
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged): def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
""" """
Save a score and feedback for a submission. Save a score and feedback for a submission.
...@@ -260,14 +267,14 @@ def get_problem_list(request, course_id): ...@@ -260,14 +267,14 @@ def get_problem_list(request, course_id):
try: try:
response = staff_grading_service().get_problem_list(course_id, unique_id_for_user(request.user)) response = staff_grading_service().get_problem_list(course_id, unique_id_for_user(request.user))
return HttpResponse(response, return HttpResponse(response,
mimetype="application/json") mimetype="application/json")
except GradingServiceError: except GradingServiceError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Error from staff grading service in open ended grading. server url: {0}" log.exception("Error from staff grading service in open ended grading. server url: {0}"
.format(staff_grading_service().url)) .format(staff_grading_service().url))
#This is a staff_facing_error #This is a staff_facing_error
return HttpResponse(json.dumps({'success': False, return HttpResponse(json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE})) 'error': STAFF_ERROR_MESSAGE}))
def _get_next(course_id, grader_id, location): def _get_next(course_id, grader_id, location):
...@@ -279,7 +286,7 @@ def _get_next(course_id, grader_id, location): ...@@ -279,7 +286,7 @@ def _get_next(course_id, grader_id, location):
except GradingServiceError: except GradingServiceError:
#This is a dev facing error #This is a dev facing error
log.exception("Error from staff grading service in open ended grading. server url: {0}" log.exception("Error from staff grading service in open ended grading. server url: {0}"
.format(staff_grading_service().url)) .format(staff_grading_service().url))
#This is a staff_facing_error #This is a staff_facing_error
return json.dumps({'success': False, return json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE}) 'error': STAFF_ERROR_MESSAGE})
...@@ -304,7 +311,7 @@ def save_grade(request, course_id): ...@@ -304,7 +311,7 @@ def save_grade(request, course_id):
if request.method != 'POST': if request.method != 'POST':
raise Http404 raise Http404
required = set(['score', 'feedback', 'submission_id', 'location','submission_flagged', 'rubric_scores[]']) required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged', 'rubric_scores[]'])
actual = set(request.POST.keys()) actual = set(request.POST.keys())
missing = required - actual missing = required - actual
if len(missing) > 0: if len(missing) > 0:
...@@ -314,22 +321,23 @@ def save_grade(request, course_id): ...@@ -314,22 +321,23 @@ def save_grade(request, course_id):
grader_id = unique_id_for_user(request.user) grader_id = unique_id_for_user(request.user)
p = request.POST p = request.POST
location = p['location'] location = p['location']
skipped = 'skipped' in p skipped = 'skipped' in p
try: try:
result_json = staff_grading_service().save_grade(course_id, result_json = staff_grading_service().save_grade(course_id,
grader_id, grader_id,
p['submission_id'], p['submission_id'],
p['score'], p['score'],
p['feedback'], p['feedback'],
skipped, skipped,
p.getlist('rubric_scores[]'), p.getlist('rubric_scores[]'),
p['submission_flagged']) p['submission_flagged'])
except GradingServiceError: except GradingServiceError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(request, course_id)) log.exception(
"Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(
request, course_id))
#This is a staff_facing_error #This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE) return _err_response(STAFF_ERROR_MESSAGE)
...@@ -337,13 +345,16 @@ def save_grade(request, course_id): ...@@ -337,13 +345,16 @@ def save_grade(request, course_id):
result = json.loads(result_json) result = json.loads(result_json)
except ValueError: except ValueError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(result_json)) log.exception(
"save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(
result_json))
#This is a staff_facing_error #This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE) return _err_response(STAFF_ERROR_MESSAGE)
if not result.get('success', False): if not result.get('success', False):
#This is a dev_facing_error #This is a dev_facing_error
log.warning('Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json)) log.warning(
'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))
return _err_response(STAFF_ERROR_MESSAGE) return _err_response(STAFF_ERROR_MESSAGE)
# Ok, save_grade seemed to work. Get the next submission to grade. # Ok, save_grade seemed to work. Get the next submission to grade.
......
...@@ -7,7 +7,7 @@ django-admin.py test --settings=lms.envs.test --pythonpath=. lms/djangoapps/open ...@@ -7,7 +7,7 @@ django-admin.py test --settings=lms.envs.test --pythonpath=. lms/djangoapps/open
from django.test import TestCase from django.test import TestCase
from open_ended_grading import staff_grading_service from open_ended_grading import staff_grading_service
from xmodule.open_ended_grading_classes import peer_grading_service from xmodule.open_ended_grading_classes import peer_grading_service
from xmodule import peer_grading_module from xmodule import peer_grading_module
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group from django.contrib.auth.models import Group
...@@ -22,6 +22,7 @@ from xmodule.x_module import ModuleSystem ...@@ -22,6 +22,7 @@ from xmodule.x_module import ModuleSystem
from mitxmako.shortcuts import render_to_string from mitxmako.shortcuts import render_to_string
import logging import logging
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
from django.test.utils import override_settings from django.test.utils import override_settings
from django.http import QueryDict from django.http import QueryDict
...@@ -36,6 +37,7 @@ class TestStaffGradingService(ct.PageLoader): ...@@ -36,6 +37,7 @@ class TestStaffGradingService(ct.PageLoader):
access control and error handling logic -- all the actual work is on the access control and error handling logic -- all the actual work is on the
backend. backend.
''' '''
def setUp(self): def setUp(self):
xmodule.modulestore.django._MODULESTORES = {} xmodule.modulestore.django._MODULESTORES = {}
...@@ -50,6 +52,7 @@ class TestStaffGradingService(ct.PageLoader): ...@@ -50,6 +52,7 @@ class TestStaffGradingService(ct.PageLoader):
self.course_id = "edX/toy/2012_Fall" self.course_id = "edX/toy/2012_Fall"
self.toy = modulestore().get_course(self.course_id) self.toy = modulestore().get_course(self.course_id)
def make_instructor(course): def make_instructor(course):
group_name = _course_staff_group_name(course.location) group_name = _course_staff_group_name(course.location)
g = Group.objects.create(name=group_name) g = Group.objects.create(name=group_name)
...@@ -130,6 +133,7 @@ class TestPeerGradingService(ct.PageLoader): ...@@ -130,6 +133,7 @@ class TestPeerGradingService(ct.PageLoader):
access control and error handling logic -- all the actual work is on the access control and error handling logic -- all the actual work is on the
backend. backend.
''' '''
def setUp(self): def setUp(self):
xmodule.modulestore.django._MODULESTORES = {} xmodule.modulestore.django._MODULESTORES = {}
...@@ -145,16 +149,16 @@ class TestPeerGradingService(ct.PageLoader): ...@@ -145,16 +149,16 @@ class TestPeerGradingService(ct.PageLoader):
self.course_id = "edX/toy/2012_Fall" self.course_id = "edX/toy/2012_Fall"
self.toy = modulestore().get_course(self.course_id) self.toy = modulestore().get_course(self.course_id)
location = "i4x://edX/toy/peergrading/init" location = "i4x://edX/toy/peergrading/init"
model_data = {'data' : "<peergrading/>"} model_data = {'data': "<peergrading/>"}
self.mock_service = peer_grading_service.MockPeerGradingService() self.mock_service = peer_grading_service.MockPeerGradingService()
self.system = ModuleSystem( self.system = ModuleSystem(
ajax_url=location, ajax_url=location,
track_function=None, track_function=None,
get_module = None, get_module=None,
render_template=render_to_string, render_template=render_to_string,
replace_urls=None, replace_urls=None,
xblock_model_data= {}, xblock_model_data={},
s3_interface = test_util_open_ended.S3_INTERFACE, s3_interface=test_util_open_ended.S3_INTERFACE,
open_ended_grading_interface=test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE open_ended_grading_interface=test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE
) )
self.descriptor = peer_grading_module.PeerGradingDescriptor(self.system, location, model_data) self.descriptor = peer_grading_module.PeerGradingDescriptor(self.system, location, model_data)
...@@ -182,18 +186,20 @@ class TestPeerGradingService(ct.PageLoader): ...@@ -182,18 +186,20 @@ class TestPeerGradingService(ct.PageLoader):
def test_save_grade_success(self): def test_save_grade_success(self):
data = { data = {
'rubric_scores[]': [0, 0], 'rubric_scores[]': [0, 0],
'location': self.location, 'location': self.location,
'submission_id': 1, 'submission_id': 1,
'submission_key': 'fake key', 'submission_key': 'fake key',
'score': 2, 'score': 2,
'feedback': 'feedback', 'feedback': 'feedback',
'submission_flagged': 'false' 'submission_flagged': 'false'
} }
qdict = MagicMock() qdict = MagicMock()
def fake_get_item(key): def fake_get_item(key):
return data[key] return data[key]
qdict.__getitem__.side_effect = fake_get_item qdict.__getitem__.side_effect = fake_get_item
qdict.getlist = fake_get_item qdict.getlist = fake_get_item
qdict.keys = data.keys qdict.keys = data.keys
...@@ -244,18 +250,20 @@ class TestPeerGradingService(ct.PageLoader): ...@@ -244,18 +250,20 @@ class TestPeerGradingService(ct.PageLoader):
def test_save_calibration_essay_success(self): def test_save_calibration_essay_success(self):
data = { data = {
'rubric_scores[]': [0, 0], 'rubric_scores[]': [0, 0],
'location': self.location, 'location': self.location,
'submission_id': 1, 'submission_id': 1,
'submission_key': 'fake key', 'submission_key': 'fake key',
'score': 2, 'score': 2,
'feedback': 'feedback', 'feedback': 'feedback',
'submission_flagged': 'false' 'submission_flagged': 'false'
} }
qdict = MagicMock() qdict = MagicMock()
def fake_get_item(key): def fake_get_item(key):
return data[key] return data[key]
qdict.__getitem__.side_effect = fake_get_item qdict.__getitem__.side_effect = fake_get_item
qdict.getlist = fake_get_item qdict.getlist = fake_get_item
qdict.keys = data.keys qdict.keys = data.keys
......
...@@ -57,22 +57,24 @@ def _reverse_without_slash(url_name, course_id): ...@@ -57,22 +57,24 @@ def _reverse_without_slash(url_name, course_id):
ajax_url = reverse(url_name, kwargs={'course_id': course_id}) ajax_url = reverse(url_name, kwargs={'course_id': course_id})
return ajax_url return ajax_url
DESCRIPTION_DICT = { DESCRIPTION_DICT = {
'Peer Grading': "View all problems that require peer assessment in this particular course.", 'Peer Grading': "View all problems that require peer assessment in this particular course.",
'Staff Grading': "View ungraded submissions submitted by students for the open ended problems in the course.", 'Staff Grading': "View ungraded submissions submitted by students for the open ended problems in the course.",
'Problems you have submitted': "View open ended problems that you have previously submitted for grading.", 'Problems you have submitted': "View open ended problems that you have previously submitted for grading.",
'Flagged Submissions': "View submissions that have been flagged by students as inappropriate." 'Flagged Submissions': "View submissions that have been flagged by students as inappropriate."
} }
ALERT_DICT = { ALERT_DICT = {
'Peer Grading': "New submissions to grade", 'Peer Grading': "New submissions to grade",
'Staff Grading': "New submissions to grade", 'Staff Grading': "New submissions to grade",
'Problems you have submitted': "New grades have been returned", 'Problems you have submitted': "New grades have been returned",
'Flagged Submissions': "Submissions have been flagged for review" 'Flagged Submissions': "Submissions have been flagged for review"
} }
STUDENT_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify course staff." STUDENT_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify course staff."
STAFF_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify the development team. If you do not have a point of contact, please email Vik at vik@edx.org" STAFF_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify the development team. If you do not have a point of contact, please email Vik at vik@edx.org"
@cache_control(no_cache=True, no_store=True, must_revalidate=True) @cache_control(no_cache=True, no_store=True, must_revalidate=True)
def staff_grading(request, course_id): def staff_grading(request, course_id):
""" """
...@@ -99,10 +101,10 @@ def peer_grading(request, course_id): ...@@ -99,10 +101,10 @@ def peer_grading(request, course_id):
#Get the current course #Get the current course
course = get_course_with_access(request.user, course_id, 'load') course = get_course_with_access(request.user, course_id, 'load')
course_id_parts = course.id.split("/") course_id_parts = course.id.split("/")
false_dict = [False,"False", "false", "FALSE"] false_dict = [False, "False", "false", "FALSE"]
#Reverse the base course url #Reverse the base course url
base_course_url = reverse('courses') base_course_url = reverse('courses')
try: try:
#TODO: This will not work with multiple runs of a course. Make it work. The last key in the Location passed #TODO: This will not work with multiple runs of a course. Make it work. The last key in the Location passed
#to get_items is called revision. Is this the same as run? #to get_items is called revision. Is this the same as run?
...@@ -154,7 +156,7 @@ def student_problem_list(request, course_id): ...@@ -154,7 +156,7 @@ def student_problem_list(request, course_id):
success = False success = False
error_text = "" error_text = ""
problem_list = [] problem_list = []
base_course_url = reverse('courses') base_course_url = reverse('courses')
try: try:
problem_list_json = controller_qs.get_grading_status_list(course_id, unique_id_for_user(request.user)) problem_list_json = controller_qs.get_grading_status_list(course_id, unique_id_for_user(request.user))
...@@ -181,7 +183,7 @@ def student_problem_list(request, course_id): ...@@ -181,7 +183,7 @@ def student_problem_list(request, course_id):
except: except:
#This is a student_facing_error #This is a student_facing_error
eta_string = "Error getting ETA." eta_string = "Error getting ETA."
problem_list[i].update({'eta_string' : eta_string}) problem_list[i].update({'eta_string': eta_string})
except GradingServiceError: except GradingServiceError:
#This is a student_facing_error #This is a student_facing_error
...@@ -222,7 +224,7 @@ def flagged_problem_list(request, course_id): ...@@ -222,7 +224,7 @@ def flagged_problem_list(request, course_id):
success = False success = False
error_text = "" error_text = ""
problem_list = [] problem_list = []
base_course_url = reverse('courses') base_course_url = reverse('courses')
try: try:
problem_list_json = controller_qs.get_flagged_problem_list(course_id) problem_list_json = controller_qs.get_flagged_problem_list(course_id)
...@@ -250,14 +252,14 @@ def flagged_problem_list(request, course_id): ...@@ -250,14 +252,14 @@ def flagged_problem_list(request, course_id):
ajax_url = _reverse_with_slash('open_ended_flagged_problems', course_id) ajax_url = _reverse_with_slash('open_ended_flagged_problems', course_id)
context = { context = {
'course': course, 'course': course,
'course_id': course_id, 'course_id': course_id,
'ajax_url': ajax_url, 'ajax_url': ajax_url,
'success': success, 'success': success,
'problem_list': problem_list, 'problem_list': problem_list,
'error_text': error_text, 'error_text': error_text,
# Checked above # Checked above
'staff_access': True, 'staff_access': True,
} }
return render_to_response('open_ended_problems/open_ended_flagged_problems.html', context) return render_to_response('open_ended_problems/open_ended_flagged_problems.html', context)
...@@ -312,7 +314,7 @@ def combined_notifications(request, course_id): ...@@ -312,7 +314,7 @@ def combined_notifications(request, course_id):
} }
return render_to_response('open_ended_problems/combined_notifications.html', return render_to_response('open_ended_problems/combined_notifications.html',
combined_dict combined_dict
) )
...@@ -325,13 +327,14 @@ def take_action_on_flags(request, course_id): ...@@ -325,13 +327,14 @@ def take_action_on_flags(request, course_id):
if request.method != 'POST': if request.method != 'POST':
raise Http404 raise Http404
required = ['submission_id', 'action_type', 'student_id'] required = ['submission_id', 'action_type', 'student_id']
for key in required: for key in required:
if key not in request.POST: if key not in request.POST:
#This is a staff_facing_error #This is a staff_facing_error
return HttpResponse(json.dumps({'success': False, 'error': STAFF_ERROR_MESSAGE + 'Missing key {0} from submission. Please reload and try again.'.format(key)}), return HttpResponse(json.dumps({'success': False,
mimetype="application/json") 'error': STAFF_ERROR_MESSAGE + 'Missing key {0} from submission. Please reload and try again.'.format(
key)}),
mimetype="application/json")
p = request.POST p = request.POST
submission_id = p['submission_id'] submission_id = p['submission_id']
...@@ -345,5 +348,7 @@ def take_action_on_flags(request, course_id): ...@@ -345,5 +348,7 @@ def take_action_on_flags(request, course_id):
return HttpResponse(response, mimetype="application/json") return HttpResponse(response, mimetype="application/json")
except GradingServiceError: except GradingServiceError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Error taking action on flagged peer grading submissions, submission_id: {0}, action_type: {1}, grader_id: {2}".format(submission_id, action_type, grader_id)) log.exception(
"Error taking action on flagged peer grading submissions, submission_id: {0}, action_type: {1}, grader_id: {2}".format(
submission_id, action_type, grader_id))
return _err_response(STAFF_ERROR_MESSAGE) return _err_response(STAFF_ERROR_MESSAGE)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment