Commit 554cb752 by Vik Paruchuri

Pep8 cleanup

parent dfd66c65
...@@ -10,7 +10,6 @@ from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import Comb ...@@ -10,7 +10,6 @@ from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import Comb
log = logging.getLogger("mitx.courseware") log = logging.getLogger("mitx.courseware")
VERSION_TUPLES = ( VERSION_TUPLES = (
('1', CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module), ('1', CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module),
) )
...@@ -18,6 +17,7 @@ VERSION_TUPLES = ( ...@@ -18,6 +17,7 @@ VERSION_TUPLES = (
DEFAULT_VERSION = 1 DEFAULT_VERSION = 1
DEFAULT_VERSION = str(DEFAULT_VERSION) DEFAULT_VERSION = str(DEFAULT_VERSION)
class CombinedOpenEndedModule(XModule): class CombinedOpenEndedModule(XModule):
""" """
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
...@@ -129,13 +129,15 @@ class CombinedOpenEndedModule(XModule): ...@@ -129,13 +129,15 @@ class CombinedOpenEndedModule(XModule):
version_index = versions.index(self.version) version_index = versions.index(self.version)
static_data = { static_data = {
'rewrite_content_links' : self.rewrite_content_links, 'rewrite_content_links': self.rewrite_content_links,
} }
self.child_descriptor = descriptors[version_index](self.system) self.child_descriptor = descriptors[version_index](self.system)
self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(definition['data']), self.system) self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(definition['data']),
self.system)
self.child_module = modules[version_index](self.system, location, self.child_definition, self.child_descriptor, self.child_module = modules[version_index](self.system, location, self.child_definition, self.child_descriptor,
instance_state = json.dumps(instance_state), metadata = self.metadata, static_data= static_data) instance_state=json.dumps(instance_state), metadata=self.metadata,
static_data=static_data)
def get_html(self): def get_html(self):
return self.child_module.get_html() return self.child_module.get_html()
......
...@@ -40,14 +40,15 @@ ACCEPT_FILE_UPLOAD = False ...@@ -40,14 +40,15 @@ ACCEPT_FILE_UPLOAD = False
TRUE_DICT = ["True", True, "TRUE", "true"] TRUE_DICT = ["True", True, "TRUE", "true"]
HUMAN_TASK_TYPE = { HUMAN_TASK_TYPE = {
'selfassessment' : "Self Assessment", 'selfassessment': "Self Assessment",
'openended' : "edX Assessment", 'openended': "edX Assessment",
} }
#Default value that controls whether or not to skip basic spelling checks in the controller #Default value that controls whether or not to skip basic spelling checks in the controller
#Metadata overrides this #Metadata overrides this
SKIP_BASIC_CHECKS = False SKIP_BASIC_CHECKS = False
class CombinedOpenEndedV1Module(): class CombinedOpenEndedV1Module():
""" """
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
...@@ -83,7 +84,7 @@ class CombinedOpenEndedV1Module(): ...@@ -83,7 +84,7 @@ class CombinedOpenEndedV1Module():
TEMPLATE_DIR = "combinedopenended" TEMPLATE_DIR = "combinedopenended"
def __init__(self, system, location, definition, descriptor, def __init__(self, system, location, definition, descriptor,
instance_state=None, shared_state=None, metadata = None, static_data = None, **kwargs): instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs):
""" """
Definition file should have one or many task blocks, a rubric block, and a prompt block: Definition file should have one or many task blocks, a rubric block, and a prompt block:
...@@ -122,7 +123,7 @@ class CombinedOpenEndedV1Module(): ...@@ -122,7 +123,7 @@ class CombinedOpenEndedV1Module():
self.metadata = metadata self.metadata = metadata
self.display_name = metadata.get('display_name', "Open Ended") self.display_name = metadata.get('display_name', "Open Ended")
self.rewrite_content_links = static_data.get('rewrite_content_links',"") self.rewrite_content_links = static_data.get('rewrite_content_links', "")
# Load instance state # Load instance state
...@@ -177,9 +178,9 @@ class CombinedOpenEndedV1Module(): ...@@ -177,9 +178,9 @@ class CombinedOpenEndedV1Module():
'rubric': definition['rubric'], 'rubric': definition['rubric'],
'display_name': self.display_name, 'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload, 'accept_file_upload': self.accept_file_upload,
'close_date' : self.timeinfo.close_date, 'close_date': self.timeinfo.close_date,
's3_interface' : self.system.s3_interface, 's3_interface': self.system.s3_interface,
'skip_basic_checks' : self.skip_basic_checks, 'skip_basic_checks': self.skip_basic_checks,
} }
self.task_xml = definition['task_xml'] self.task_xml = definition['task_xml']
...@@ -267,7 +268,8 @@ class CombinedOpenEndedV1Module(): ...@@ -267,7 +268,8 @@ class CombinedOpenEndedV1Module():
self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system) self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system)
if current_task_state is None and self.current_task_number == 0: if current_task_state is None and self.current_task_number == 0:
self.current_task = child_task_module(self.system, self.location, self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data) self.current_task_parsed_xml, self.current_task_descriptor,
self.static_data)
self.task_states.append(self.current_task.get_instance_state()) self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING self.state = self.ASSESSING
elif current_task_state is None and self.current_task_number > 0: elif current_task_state is None and self.current_task_number > 0:
...@@ -282,7 +284,8 @@ class CombinedOpenEndedV1Module(): ...@@ -282,7 +284,8 @@ class CombinedOpenEndedV1Module():
'history': [{'answer': last_response}], 'history': [{'answer': last_response}],
}) })
self.current_task = child_task_module(self.system, self.location, self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, self.current_task_parsed_xml, self.current_task_descriptor,
self.static_data,
instance_state=current_task_state) instance_state=current_task_state)
self.task_states.append(self.current_task.get_instance_state()) self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING self.state = self.ASSESSING
...@@ -290,7 +293,8 @@ class CombinedOpenEndedV1Module(): ...@@ -290,7 +293,8 @@ class CombinedOpenEndedV1Module():
if self.current_task_number > 0 and not reset: if self.current_task_number > 0 and not reset:
current_task_state = self.overwrite_state(current_task_state) current_task_state = self.overwrite_state(current_task_state)
self.current_task = child_task_module(self.system, self.location, self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, self.current_task_parsed_xml, self.current_task_descriptor,
self.static_data,
instance_state=current_task_state) instance_state=current_task_state)
return True return True
...@@ -307,7 +311,7 @@ class CombinedOpenEndedV1Module(): ...@@ -307,7 +311,7 @@ class CombinedOpenEndedV1Module():
last_response_data = self.get_last_response(self.current_task_number - 1) last_response_data = self.get_last_response(self.current_task_number - 1)
current_response_data = self.get_current_attributes(self.current_task_number) current_response_data = self.get_current_attributes(self.current_task_number)
if(current_response_data['min_score_to_attempt'] > last_response_data['score'] if (current_response_data['min_score_to_attempt'] > last_response_data['score']
or current_response_data['max_score_to_attempt'] < last_response_data['score']): or current_response_data['max_score_to_attempt'] < last_response_data['score']):
self.state = self.DONE self.state = self.DONE
self.allow_reset = True self.allow_reset = True
...@@ -334,7 +338,7 @@ class CombinedOpenEndedV1Module(): ...@@ -334,7 +338,7 @@ class CombinedOpenEndedV1Module():
'display_name': self.display_name, 'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload, 'accept_file_upload': self.accept_file_upload,
'location': self.location, 'location': self.location,
'legend_list' : LEGEND_LIST, 'legend_list': LEGEND_LIST,
} }
return context return context
...@@ -429,7 +433,7 @@ class CombinedOpenEndedV1Module(): ...@@ -429,7 +433,7 @@ class CombinedOpenEndedV1Module():
feedback_dicts = rubric_data['feedback_dicts'] feedback_dicts = rubric_data['feedback_dicts']
grader_ids = rubric_data['grader_ids'] grader_ids = rubric_data['grader_ids']
submission_ids = rubric_data['submission_ids'] submission_ids = rubric_data['submission_ids']
elif task_type== "selfassessment": elif task_type == "selfassessment":
rubric_scores = last_post_assessment rubric_scores = last_post_assessment
grader_types = ['SA'] grader_types = ['SA']
feedback_items = [''] feedback_items = ['']
...@@ -446,7 +450,7 @@ class CombinedOpenEndedV1Module(): ...@@ -446,7 +450,7 @@ class CombinedOpenEndedV1Module():
human_state = task.HUMAN_NAMES[state] human_state = task.HUMAN_NAMES[state]
else: else:
human_state = state human_state = state
if len(grader_types)>0: if len(grader_types) > 0:
grader_type = grader_types[0] grader_type = grader_types[0]
else: else:
grader_type = "IN" grader_type = "IN"
...@@ -468,14 +472,14 @@ class CombinedOpenEndedV1Module(): ...@@ -468,14 +472,14 @@ class CombinedOpenEndedV1Module():
'correct': last_correctness, 'correct': last_correctness,
'min_score_to_attempt': min_score_to_attempt, 'min_score_to_attempt': min_score_to_attempt,
'max_score_to_attempt': max_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt,
'rubric_scores' : rubric_scores, 'rubric_scores': rubric_scores,
'grader_types' : grader_types, 'grader_types': grader_types,
'feedback_items' : feedback_items, 'feedback_items': feedback_items,
'grader_type' : grader_type, 'grader_type': grader_type,
'human_grader_type' : human_grader_name, 'human_grader_type': human_grader_name,
'feedback_dicts' : feedback_dicts, 'feedback_dicts': feedback_dicts,
'grader_ids' : grader_ids, 'grader_ids': grader_ids,
'submission_ids' : submission_ids, 'submission_ids': submission_ids,
} }
return last_response_dict return last_response_dict
...@@ -519,20 +523,27 @@ class CombinedOpenEndedV1Module(): ...@@ -519,20 +523,27 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html. Output: Dictionary to be rendered via ajax that contains the result html.
""" """
all_responses = [] all_responses = []
loop_up_to_task = self.current_task_number+1 loop_up_to_task = self.current_task_number + 1
for i in xrange(0,loop_up_to_task): for i in xrange(0, loop_up_to_task):
all_responses.append(self.get_last_response(i)) all_responses.append(self.get_last_response(i))
rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['rubric_scores'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()] rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0, len(all_responses)) if
grader_types = [all_responses[i]['grader_types'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['grader_types'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()] len(all_responses[i]['rubric_scores']) > 0 and all_responses[i]['grader_types'][
feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['feedback_items'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()] 0] in HUMAN_GRADER_TYPE.keys()]
rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), rubric_scores, grader_types = [all_responses[i]['grader_types'] for i in xrange(0, len(all_responses)) if
len(all_responses[i]['grader_types']) > 0 and all_responses[i]['grader_types'][
0] in HUMAN_GRADER_TYPE.keys()]
feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0, len(all_responses)) if
len(all_responses[i]['feedback_items']) > 0 and all_responses[i]['grader_types'][
0] in HUMAN_GRADER_TYPE.keys()]
rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']),
rubric_scores,
grader_types, feedback_items) grader_types, feedback_items)
response_dict = all_responses[-1] response_dict = all_responses[-1]
context = { context = {
'results': rubric_html, 'results': rubric_html,
'task_name' : 'Scored Rubric', 'task_name': 'Scored Rubric',
'class_name' : 'combined-rubric-container' 'class_name': 'combined-rubric-container'
} }
html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True} return {'html': html, 'success': True}
...@@ -544,7 +555,7 @@ class CombinedOpenEndedV1Module(): ...@@ -544,7 +555,7 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html. Output: Dictionary to be rendered via ajax that contains the result html.
""" """
context = { context = {
'legend_list' : LEGEND_LIST, 'legend_list': LEGEND_LIST,
} }
html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context) html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True} return {'html': html, 'success': True}
...@@ -556,15 +567,16 @@ class CombinedOpenEndedV1Module(): ...@@ -556,15 +567,16 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html. Output: Dictionary to be rendered via ajax that contains the result html.
""" """
self.update_task_states() self.update_task_states()
loop_up_to_task = self.current_task_number+1 loop_up_to_task = self.current_task_number + 1
all_responses =[] all_responses = []
for i in xrange(0,loop_up_to_task): for i in xrange(0, loop_up_to_task):
all_responses.append(self.get_last_response(i)) all_responses.append(self.get_last_response(i))
context_list = [] context_list = []
for ri in all_responses: for ri in all_responses:
for i in xrange(0,len(ri['rubric_scores'])): for i in xrange(0, len(ri['rubric_scores'])):
feedback = ri['feedback_dicts'][i].get('feedback','') feedback = ri['feedback_dicts'][i].get('feedback', '')
rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']), ri['rubric_scores'][i]) rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']),
ri['rubric_scores'][i])
if rubric_data['success']: if rubric_data['success']:
rubric_html = rubric_data['html'] rubric_html = rubric_data['html']
else: else:
...@@ -572,22 +584,22 @@ class CombinedOpenEndedV1Module(): ...@@ -572,22 +584,22 @@ class CombinedOpenEndedV1Module():
context = { context = {
'rubric_html': rubric_html, 'rubric_html': rubric_html,
'grader_type': ri['grader_type'], 'grader_type': ri['grader_type'],
'feedback' : feedback, 'feedback': feedback,
'grader_id' : ri['grader_ids'][i], 'grader_id': ri['grader_ids'][i],
'submission_id' : ri['submission_ids'][i], 'submission_id': ri['submission_ids'][i],
} }
context_list.append(context) context_list.append(context)
feedback_table = self.system.render_template('{0}/open_ended_result_table.html'.format(self.TEMPLATE_DIR), { feedback_table = self.system.render_template('{0}/open_ended_result_table.html'.format(self.TEMPLATE_DIR), {
'context_list' : context_list, 'context_list': context_list,
'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT, 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
'human_grader_types' : HUMAN_GRADER_TYPE, 'human_grader_types': HUMAN_GRADER_TYPE,
'rows': 50, 'rows': 50,
'cols': 50, 'cols': 50,
}) })
context = { context = {
'results': feedback_table, 'results': feedback_table,
'task_name' : "Feedback", 'task_name': "Feedback",
'class_name' : "result-container", 'class_name': "result-container",
} }
html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True} return {'html': html, 'success': True}
...@@ -617,8 +629,8 @@ class CombinedOpenEndedV1Module(): ...@@ -617,8 +629,8 @@ class CombinedOpenEndedV1Module():
'reset': self.reset, 'reset': self.reset,
'get_results': self.get_results, 'get_results': self.get_results,
'get_combined_rubric': self.get_rubric, 'get_combined_rubric': self.get_rubric,
'get_status' : self.get_status_ajax, 'get_status': self.get_status_ajax,
'get_legend' : self.get_legend, 'get_legend': self.get_legend,
} }
if dispatch not in handlers: if dispatch not in handlers:
...@@ -699,11 +711,12 @@ class CombinedOpenEndedV1Module(): ...@@ -699,11 +711,12 @@ class CombinedOpenEndedV1Module():
context = { context = {
'status_list': status, 'status_list': status,
'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT, 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
'legend_list' : LEGEND_LIST, 'legend_list': LEGEND_LIST,
'render_via_ajax' : render_via_ajax, 'render_via_ajax': render_via_ajax,
} }
status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR), context) status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR),
context)
return status_html return status_html
...@@ -793,7 +806,9 @@ class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor): ...@@ -793,7 +806,9 @@ class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor):
for child in expected_children: for child in expected_children:
if len(xml_object.xpath(child)) == 0: if len(xml_object.xpath(child)) == 0:
#This is a staff_facing_error #This is a staff_facing_error
raise ValueError("Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(child)) raise ValueError(
"Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse_task(k): def parse_task(k):
"""Assumes that xml_object has child k""" """Assumes that xml_object has child k"""
......
...@@ -4,24 +4,26 @@ from lxml import etree ...@@ -4,24 +4,26 @@ from lxml import etree
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
GRADER_TYPE_IMAGE_DICT = { GRADER_TYPE_IMAGE_DICT = {
'SA' : '/static/images/self_assessment_icon.png', 'SA': '/static/images/self_assessment_icon.png',
'PE' : '/static/images/peer_grading_icon.png', 'PE': '/static/images/peer_grading_icon.png',
'ML' : '/static/images/ml_grading_icon.png', 'ML': '/static/images/ml_grading_icon.png',
'IN' : '/static/images/peer_grading_icon.png', 'IN': '/static/images/peer_grading_icon.png',
'BC' : '/static/images/ml_grading_icon.png', 'BC': '/static/images/ml_grading_icon.png',
} }
HUMAN_GRADER_TYPE = { HUMAN_GRADER_TYPE = {
'SA' : 'Self-Assessment', 'SA': 'Self-Assessment',
'PE' : 'Peer-Assessment', 'PE': 'Peer-Assessment',
'IN' : 'Instructor-Assessment', 'IN': 'Instructor-Assessment',
'ML' : 'AI-Assessment', 'ML': 'AI-Assessment',
'BC' : 'AI-Assessment', 'BC': 'AI-Assessment',
} }
DO_NOT_DISPLAY = ['BC', 'IN'] DO_NOT_DISPLAY = ['BC', 'IN']
LEGEND_LIST = [{'name' : HUMAN_GRADER_TYPE[k], 'image' : GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys() if k not in DO_NOT_DISPLAY ] LEGEND_LIST = [{'name': HUMAN_GRADER_TYPE[k], 'image': GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys()
if k not in DO_NOT_DISPLAY]
class RubricParsingError(Exception): class RubricParsingError(Exception):
def __init__(self, msg): def __init__(self, msg):
...@@ -29,15 +31,14 @@ class RubricParsingError(Exception): ...@@ -29,15 +31,14 @@ class RubricParsingError(Exception):
class CombinedOpenEndedRubric(object): class CombinedOpenEndedRubric(object):
TEMPLATE_DIR = "combinedopenended/openended" TEMPLATE_DIR = "combinedopenended/openended"
def __init__ (self, system, view_only = False): def __init__(self, system, view_only=False):
self.has_score = False self.has_score = False
self.view_only = view_only self.view_only = view_only
self.system = system self.system = system
def render_rubric(self, rubric_xml, score_list = None): def render_rubric(self, rubric_xml, score_list=None):
''' '''
render_rubric: takes in an xml string and outputs the corresponding render_rubric: takes in an xml string and outputs the corresponding
html for that xml, given the type of rubric we're generating html for that xml, given the type of rubric we're generating
...@@ -50,11 +51,11 @@ class CombinedOpenEndedRubric(object): ...@@ -50,11 +51,11 @@ class CombinedOpenEndedRubric(object):
success = False success = False
try: try:
rubric_categories = self.extract_categories(rubric_xml) rubric_categories = self.extract_categories(rubric_xml)
if score_list and len(score_list)==len(rubric_categories): if score_list and len(score_list) == len(rubric_categories):
for i in xrange(0,len(rubric_categories)): for i in xrange(0, len(rubric_categories)):
category = rubric_categories[i] category = rubric_categories[i]
for j in xrange(0,len(category['options'])): for j in xrange(0, len(category['options'])):
if score_list[i]==j: if score_list[i] == j:
rubric_categories[i]['options'][j]['selected'] = True rubric_categories[i]['options'][j]['selected'] = True
rubric_scores = [cat['score'] for cat in rubric_categories] rubric_scores = [cat['score'] for cat in rubric_categories]
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
...@@ -67,15 +68,16 @@ class CombinedOpenEndedRubric(object): ...@@ -67,15 +68,16 @@ class CombinedOpenEndedRubric(object):
'has_score': self.has_score, 'has_score': self.has_score,
'view_only': self.view_only, 'view_only': self.view_only,
'max_score': max_score, 'max_score': max_score,
'combined_rubric' : False 'combined_rubric': False
}) })
success = True success = True
except: except:
#This is a staff_facing_error #This is a staff_facing_error
error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format(rubric_xml) error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format(
rubric_xml)
log.exception(error_message) log.exception(error_message)
raise RubricParsingError(error_message) raise RubricParsingError(error_message)
return {'success' : success, 'html' : html, 'rubric_scores' : rubric_scores} return {'success': success, 'html': html, 'rubric_scores': rubric_scores}
def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed, max_score): def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed, max_score):
rubric_dict = self.render_rubric(rubric_string) rubric_dict = self.render_rubric(rubric_string)
...@@ -83,7 +85,8 @@ class CombinedOpenEndedRubric(object): ...@@ -83,7 +85,8 @@ class CombinedOpenEndedRubric(object):
rubric_feedback = rubric_dict['html'] rubric_feedback = rubric_dict['html']
if not success: if not success:
#This is a staff_facing_error #This is a staff_facing_error
error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format(rubric_string, location.url()) error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format(
rubric_string, location.url())
log.error(error_message) log.error(error_message)
raise RubricParsingError(error_message) raise RubricParsingError(error_message)
...@@ -123,7 +126,9 @@ class CombinedOpenEndedRubric(object): ...@@ -123,7 +126,9 @@ class CombinedOpenEndedRubric(object):
for category in element: for category in element:
if category.tag != 'category': if category.tag != 'category':
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_categories] Expected a <category> tag: got {0} instead. Contact the learning sciences group for assistance.".format(category.tag)) raise RubricParsingError(
"[extract_categories] Expected a <category> tag: got {0} instead. Contact the learning sciences group for assistance.".format(
category.tag))
else: else:
categories.append(self.extract_category(category)) categories.append(self.extract_category(category))
return categories return categories
...@@ -150,13 +155,17 @@ class CombinedOpenEndedRubric(object): ...@@ -150,13 +155,17 @@ class CombinedOpenEndedRubric(object):
# if we are missing the score tag and we are expecting one # if we are missing the score tag and we are expecting one
elif self.has_score: elif self.has_score:
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_category] Category {0} is missing a score. Contact the learning sciences group for assistance.".format(descriptionxml.text)) raise RubricParsingError(
"[extract_category] Category {0} is missing a score. Contact the learning sciences group for assistance.".format(
descriptionxml.text))
# parse description # parse description
if descriptionxml.tag != 'description': if descriptionxml.tag != 'description':
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_category]: expected description tag, got {0} instead. Contact the learning sciences group for assistance.".format(descriptionxml.tag)) raise RubricParsingError(
"[extract_category]: expected description tag, got {0} instead. Contact the learning sciences group for assistance.".format(
descriptionxml.tag))
description = descriptionxml.text description = descriptionxml.text
...@@ -167,7 +176,9 @@ class CombinedOpenEndedRubric(object): ...@@ -167,7 +176,9 @@ class CombinedOpenEndedRubric(object):
for option in optionsxml: for option in optionsxml:
if option.tag != 'option': if option.tag != 'option':
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_category]: expected option tag, got {0} instead. Contact the learning sciences group for assistance.".format(option.tag)) raise RubricParsingError(
"[extract_category]: expected option tag, got {0} instead. Contact the learning sciences group for assistance.".format(
option.tag))
else: else:
pointstr = option.get("points") pointstr = option.get("points")
if pointstr: if pointstr:
...@@ -177,13 +188,16 @@ class CombinedOpenEndedRubric(object): ...@@ -177,13 +188,16 @@ class CombinedOpenEndedRubric(object):
points = int(pointstr) points = int(pointstr)
except ValueError: except ValueError:
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_category]: expected points to have int, got {0} instead. Contact the learning sciences group for assistance.".format(pointstr)) raise RubricParsingError(
"[extract_category]: expected points to have int, got {0} instead. Contact the learning sciences group for assistance.".format(
pointstr))
elif autonumbering: elif autonumbering:
# use the generated one if we're in the right mode # use the generated one if we're in the right mode
points = cur_points points = cur_points
cur_points = cur_points + 1 cur_points = cur_points + 1
else: else:
raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.") raise Exception(
"[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.")
selected = score == points selected = score == points
optiontext = option.text optiontext = option.text
...@@ -193,19 +207,20 @@ class CombinedOpenEndedRubric(object): ...@@ -193,19 +207,20 @@ class CombinedOpenEndedRubric(object):
options = sorted(options, key=lambda option: option['points']) options = sorted(options, key=lambda option: option['points'])
CombinedOpenEndedRubric.validate_options(options) CombinedOpenEndedRubric.validate_options(options)
return {'description': description, 'options': options, 'score' : score} return {'description': description, 'options': options, 'score': score}
def render_combined_rubric(self,rubric_xml,scores,score_types,feedback_types): def render_combined_rubric(self, rubric_xml, scores, score_types, feedback_types):
success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores,score_types,feedback_types) success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores, score_types,
feedback_types)
rubric_categories = self.extract_categories(rubric_xml) rubric_categories = self.extract_categories(rubric_xml)
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories) max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
max_score = max(max_scores) max_score = max(max_scores)
for i in xrange(0,len(rubric_categories)): for i in xrange(0, len(rubric_categories)):
category = rubric_categories[i] category = rubric_categories[i]
for j in xrange(0,len(category['options'])): for j in xrange(0, len(category['options'])):
rubric_categories[i]['options'][j]['grader_types'] = [] rubric_categories[i]['options'][j]['grader_types'] = []
for tuple in score_tuples: for tuple in score_tuples:
if tuple[1] == i and tuple[2] ==j: if tuple[1] == i and tuple[2] == j:
for grader_type in tuple[3]: for grader_type in tuple[3]:
rubric_categories[i]['options'][j]['grader_types'].append(grader_type) rubric_categories[i]['options'][j]['grader_types'].append(grader_type)
...@@ -214,9 +229,9 @@ class CombinedOpenEndedRubric(object): ...@@ -214,9 +229,9 @@ class CombinedOpenEndedRubric(object):
'has_score': True, 'has_score': True,
'view_only': True, 'view_only': True,
'max_score': max_score, 'max_score': max_score,
'combined_rubric' : True, 'combined_rubric': True,
'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT, 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
'human_grader_types' : HUMAN_GRADER_TYPE, 'human_grader_types': HUMAN_GRADER_TYPE,
}) })
return html return html
...@@ -228,14 +243,16 @@ class CombinedOpenEndedRubric(object): ...@@ -228,14 +243,16 @@ class CombinedOpenEndedRubric(object):
''' '''
if len(options) == 0: if len(options) == 0:
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_category]: no options associated with this category. Contact the learning sciences group for assistance.") raise RubricParsingError(
"[extract_category]: no options associated with this category. Contact the learning sciences group for assistance.")
if len(options) == 1: if len(options) == 1:
return return
prev = options[0]['points'] prev = options[0]['points']
for option in options[1:]: for option in options[1:]:
if prev == option['points']: if prev == option['points']:
#This is a staff_facing_error #This is a staff_facing_error
raise RubricParsingError("[extract_category]: found duplicate point values between two different options. Contact the learning sciences group for assistance.") raise RubricParsingError(
"[extract_category]: found duplicate point values between two different options. Contact the learning sciences group for assistance.")
else: else:
prev = option['points'] prev = option['points']
...@@ -250,7 +267,7 @@ class CombinedOpenEndedRubric(object): ...@@ -250,7 +267,7 @@ class CombinedOpenEndedRubric(object):
@return: @return:
""" """
success = False success = False
if len(scores)==0: if len(scores) == 0:
#This is a dev_facing_error #This is a dev_facing_error
log.error("Score length is 0 when trying to reformat rubric scores for rendering.") log.error("Score length is 0 when trying to reformat rubric scores for rendering.")
return success, "" return success, ""
...@@ -264,25 +281,25 @@ class CombinedOpenEndedRubric(object): ...@@ -264,25 +281,25 @@ class CombinedOpenEndedRubric(object):
score_lists = [] score_lists = []
score_type_list = [] score_type_list = []
feedback_type_list = [] feedback_type_list = []
for i in xrange(0,len(scores)): for i in xrange(0, len(scores)):
score_cont_list = scores[i] score_cont_list = scores[i]
for j in xrange(0,len(score_cont_list)): for j in xrange(0, len(score_cont_list)):
score_list = score_cont_list[j] score_list = score_cont_list[j]
score_lists.append(score_list) score_lists.append(score_list)
score_type_list.append(score_types[i][j]) score_type_list.append(score_types[i][j])
feedback_type_list.append(feedback_types[i][j]) feedback_type_list.append(feedback_types[i][j])
score_list_len = len(score_lists[0]) score_list_len = len(score_lists[0])
for i in xrange(0,len(score_lists)): for i in xrange(0, len(score_lists)):
score_list = score_lists[i] score_list = score_lists[i]
if len(score_list)!=score_list_len: if len(score_list) != score_list_len:
return success, "" return success, ""
score_tuples = [] score_tuples = []
for i in xrange(0,len(score_lists)): for i in xrange(0, len(score_lists)):
for j in xrange(0,len(score_lists[i])): for j in xrange(0, len(score_lists[i])):
tuple = [1,j,score_lists[i][j],[],[]] tuple = [1, j, score_lists[i][j], [], []]
score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples,tuple) score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples, tuple)
score_tuples[tup_ind][0] += 1 score_tuples[tup_ind][0] += 1
score_tuples[tup_ind][3].append(score_type_list[i]) score_tuples[tup_ind][3].append(score_type_list[i])
score_tuples[tup_ind][4].append(feedback_type_list[i]) score_tuples[tup_ind][4].append(feedback_type_list[i])
...@@ -302,14 +319,14 @@ class CombinedOpenEndedRubric(object): ...@@ -302,14 +319,14 @@ class CombinedOpenEndedRubric(object):
category = tuple[1] category = tuple[1]
score = tuple[2] score = tuple[2]
tup_ind = -1 tup_ind = -1
for t in xrange(0,len(tuples)): for t in xrange(0, len(tuples)):
if tuples[t][1] == category and tuples[t][2] == score: if tuples[t][1] == category and tuples[t][2] == score:
tup_ind = t tup_ind = t
break break
if tup_ind == -1: if tup_ind == -1:
tuples.append([0,category,score,[],[]]) tuples.append([0, category, score, [], []])
tup_ind = len(tuples)-1 tup_ind = len(tuples) - 1
return tuples, tup_ind return tuples, tup_ind
......
...@@ -8,6 +8,7 @@ class ControllerQueryService(GradingService): ...@@ -8,6 +8,7 @@ class ControllerQueryService(GradingService):
""" """
Interface to staff grading backend. Interface to staff grading backend.
""" """
def __init__(self, config, system): def __init__(self, config, system):
config['system'] = system config['system'] = system
super(ControllerQueryService, self).__init__(config) super(ControllerQueryService, self).__init__(config)
...@@ -75,15 +76,16 @@ class ControllerQueryService(GradingService): ...@@ -75,15 +76,16 @@ class ControllerQueryService(GradingService):
response = self.post(self.take_action_on_flags_url, params) response = self.post(self.take_action_on_flags_url, params)
return response return response
def convert_seconds_to_human_readable(seconds): def convert_seconds_to_human_readable(seconds):
if seconds < 60: if seconds < 60:
human_string = "{0} seconds".format(seconds) human_string = "{0} seconds".format(seconds)
elif seconds < 60 * 60: elif seconds < 60 * 60:
human_string = "{0} minutes".format(round(seconds/60,1)) human_string = "{0} minutes".format(round(seconds / 60, 1))
elif seconds < (24*60*60): elif seconds < (24 * 60 * 60):
human_string = "{0} hours".format(round(seconds/(60*60),1)) human_string = "{0} hours".format(round(seconds / (60 * 60), 1))
else: else:
human_string = "{0} days".format(round(seconds/(60*60*24),1)) human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1))
eta_string = "{0}".format(human_string) eta_string = "{0}".format(human_string)
return eta_string return eta_string
...@@ -19,6 +19,7 @@ class GradingService(object): ...@@ -19,6 +19,7 @@ class GradingService(object):
""" """
Interface to staff grading backend. Interface to staff grading backend.
""" """
def __init__(self, config): def __init__(self, config):
self.username = config['username'] self.username = config['username']
self.password = config['password'] self.password = config['password']
......
...@@ -5,6 +5,7 @@ to send them to S3. ...@@ -5,6 +5,7 @@ to send them to S3.
try: try:
from PIL import Image from PIL import Image
ENABLE_PIL = True ENABLE_PIL = True
except: except:
ENABLE_PIL = False ENABLE_PIL = False
...@@ -51,6 +52,7 @@ class ImageProperties(object): ...@@ -51,6 +52,7 @@ class ImageProperties(object):
""" """
Class to check properties of an image and to validate if they are allowed. Class to check properties of an image and to validate if they are allowed.
""" """
def __init__(self, image_data): def __init__(self, image_data):
""" """
Initializes class variables Initializes class variables
...@@ -141,6 +143,7 @@ class URLProperties(object): ...@@ -141,6 +143,7 @@ class URLProperties(object):
Checks to see if a URL points to acceptable content. Added to check if students are submitting reasonable Checks to see if a URL points to acceptable content. Added to check if students are submitting reasonable
links to the peer grading image functionality of the external grading service. links to the peer grading image functionality of the external grading service.
""" """
def __init__(self, url_string): def __init__(self, url_string):
self.url_string = url_string self.url_string = url_string
...@@ -252,7 +255,8 @@ def upload_to_s3(file_to_upload, keyname, s3_interface): ...@@ -252,7 +255,8 @@ def upload_to_s3(file_to_upload, keyname, s3_interface):
return True, public_url return True, public_url
except: except:
#This is a dev_facing_error #This is a dev_facing_error
error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format(bucketname.lower()) error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format(
bucketname.lower())
log.error(error_message) log.error(error_message)
return False, error_message return False, error_message
......
...@@ -104,7 +104,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -104,7 +104,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# response types) # response types)
except TypeError, ValueError: except TypeError, ValueError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Grader payload from external open ended grading server is not a json object! Object: {0}".format(grader_payload)) log.exception(
"Grader payload from external open ended grading server is not a json object! Object: {0}".format(
grader_payload))
self.initial_display = find_with_default(oeparam, 'initial_display', '') self.initial_display = find_with_default(oeparam, 'initial_display', '')
self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.') self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
...@@ -148,7 +150,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -148,7 +150,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
for tag in ['feedback', 'submission_id', 'grader_id', 'score']: for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses: if tag not in survey_responses:
#This is a student_facing_error #This is a student_facing_error
return {'success': False, 'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(tag)} return {'success': False,
'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(
tag)}
try: try:
submission_id = int(survey_responses['submission_id']) submission_id = int(survey_responses['submission_id'])
grader_id = int(survey_responses['grader_id']) grader_id = int(survey_responses['grader_id'])
...@@ -300,7 +304,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -300,7 +304,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# We want to display available feedback in a particular order. # We want to display available feedback in a particular order.
# This dictionary specifies which goes first--lower first. # This dictionary specifies which goes first--lower first.
priorities = { # These go at the start of the feedback priorities = {# These go at the start of the feedback
'spelling': 0, 'spelling': 0,
'grammar': 1, 'grammar': 1,
# needs to be after all the other feedback # needs to be after all the other feedback
...@@ -437,12 +441,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -437,12 +441,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'valid': False, 'valid': False,
'score': 0, 'score': 0,
'feedback': '', 'feedback': '',
'rubric_scores' : [[0]], 'rubric_scores': [[0]],
'grader_types' : [''], 'grader_types': [''],
'feedback_items' : [''], 'feedback_items': [''],
'feedback_dicts' : [{}], 'feedback_dicts': [{}],
'grader_ids' : [0], 'grader_ids': [0],
'submission_ids' : [0], 'submission_ids': [0],
} }
try: try:
score_result = json.loads(score_msg) score_result = json.loads(score_msg)
...@@ -527,12 +531,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -527,12 +531,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'valid': True, 'valid': True,
'score': score, 'score': score,
'feedback': feedback, 'feedback': feedback,
'rubric_scores' : rubric_scores, 'rubric_scores': rubric_scores,
'grader_types' : grader_types, 'grader_types': grader_types,
'feedback_items' : feedback_items, 'feedback_items': feedback_items,
'feedback_dicts' : feedback_dicts, 'feedback_dicts': feedback_dicts,
'grader_ids' : grader_ids, 'grader_ids': grader_ids,
'submission_ids' : submission_ids, 'submission_ids': submission_ids,
} }
def latest_post_assessment(self, system, short_feedback=False, join_feedback=True): def latest_post_assessment(self, system, short_feedback=False, join_feedback=True):
...@@ -585,7 +589,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -585,7 +589,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
#This is a dev_facing_error #This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error #This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False}) return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress() before = self.get_progress()
d = handlers[dispatch](get, system) d = handlers[dispatch](get, system)
...@@ -679,7 +683,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -679,7 +683,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
correct = "" correct = ""
previous_answer = self.initial_display previous_answer = self.initial_display
context = { context = {
'prompt': self.prompt, 'prompt': self.prompt,
'previous_answer': previous_answer, 'previous_answer': previous_answer,
...@@ -692,7 +695,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -692,7 +695,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'child_type': 'openended', 'child_type': 'openended',
'correct': correct, 'correct': correct,
'accept_file_upload': self.accept_file_upload, 'accept_file_upload': self.accept_file_upload,
'eta_message' : eta_string, 'eta_message': eta_string,
} }
html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context) html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context)
return html return html
...@@ -723,7 +726,9 @@ class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor): ...@@ -723,7 +726,9 @@ class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor):
for child in ['openendedparam']: for child in ['openendedparam']:
if len(xml_object.xpath(child)) != 1: if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error #This is a staff_facing_error
raise ValueError("Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child)) raise ValueError(
"Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k): def parse(k):
"""Assumes that xml_object has child k""" """Assumes that xml_object has child k"""
......
...@@ -108,13 +108,12 @@ class OpenEndedChild(object): ...@@ -108,13 +108,12 @@ class OpenEndedChild(object):
self._max_score = static_data['max_score'] self._max_score = static_data['max_score']
if system.open_ended_grading_interface: if system.open_ended_grading_interface:
self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system)
self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface,system) self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface,
system)
else: else:
self.peer_gs = MockPeerGradingService() self.peer_gs = MockPeerGradingService()
self.controller_qs = None self.controller_qs = None
self.system = system self.system = system
self.location_string = location self.location_string = location
...@@ -152,7 +151,8 @@ class OpenEndedChild(object): ...@@ -152,7 +151,8 @@ class OpenEndedChild(object):
return True, { return True, {
'success': False, 'success': False,
#This is a student_facing_error #This is a student_facing_error
'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(self.attempts, self.max_attempts) 'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(
self.attempts, self.max_attempts)
} }
else: else:
return False, {} return False, {}
...@@ -308,7 +308,7 @@ class OpenEndedChild(object): ...@@ -308,7 +308,7 @@ class OpenEndedChild(object):
@return: Boolean correct. @return: Boolean correct.
""" """
correct = False correct = False
if(isinstance(score, (int, long, float, complex))): if (isinstance(score, (int, long, float, complex))):
score_ratio = int(score) / float(self.max_score()) score_ratio = int(score) / float(self.max_score())
correct = (score_ratio >= 0.66) correct = (score_ratio >= 0.66)
return correct return correct
...@@ -342,7 +342,8 @@ class OpenEndedChild(object): ...@@ -342,7 +342,8 @@ class OpenEndedChild(object):
try: try:
image_data.seek(0) image_data.seek(0)
success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key, self.s3_interface) success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key,
self.s3_interface)
except: except:
log.exception("Could not upload image to S3.") log.exception("Could not upload image to S3.")
...@@ -454,16 +455,18 @@ class OpenEndedChild(object): ...@@ -454,16 +455,18 @@ class OpenEndedChild(object):
success = True success = True
except: except:
#This is a dev_facing_error #This is a dev_facing_error
log.error("Could not contact external open ended graders for location {0} and student {1}".format(self.location_string,student_id)) log.error("Could not contact external open ended graders for location {0} and student {1}".format(
self.location_string, student_id))
#This is a student_facing_error #This is a student_facing_error
error_message = "Could not contact the graders. Please notify course staff." error_message = "Could not contact the graders. Please notify course staff."
return success, allowed_to_submit, error_message return success, allowed_to_submit, error_message
if count_graded>=count_required: if count_graded >= count_required:
return success, allowed_to_submit, "" return success, allowed_to_submit, ""
else: else:
allowed_to_submit = False allowed_to_submit = False
#This is a student_facing_error #This is a student_facing_error
error_message = error_string.format(count_required-count_graded, count_graded, count_required, student_sub_count) error_message = error_string.format(count_required - count_graded, count_graded, count_required,
student_sub_count)
return success, allowed_to_submit, error_message return success, allowed_to_submit, error_message
def get_eta(self): def get_eta(self):
...@@ -478,7 +481,7 @@ class OpenEndedChild(object): ...@@ -478,7 +481,7 @@ class OpenEndedChild(object):
success = response['success'] success = response['success']
if isinstance(success, basestring): if isinstance(success, basestring):
success = (success.lower()=="true") success = (success.lower() == "true")
if success: if success:
eta = controller_query_service.convert_seconds_to_human_readable(response['eta']) eta = controller_query_service.convert_seconds_to_human_readable(response['eta'])
......
...@@ -14,6 +14,7 @@ class PeerGradingService(GradingService): ...@@ -14,6 +14,7 @@ class PeerGradingService(GradingService):
""" """
Interface with the grading controller for peer grading Interface with the grading controller for peer grading
""" """
def __init__(self, config, system): def __init__(self, config, system):
config['system'] = system config['system'] = system
super(PeerGradingService, self).__init__(config) super(PeerGradingService, self).__init__(config)
...@@ -39,7 +40,8 @@ class PeerGradingService(GradingService): ...@@ -39,7 +40,8 @@ class PeerGradingService(GradingService):
{'location': problem_location, 'grader_id': grader_id}) {'location': problem_location, 'grader_id': grader_id})
return self.try_to_decode(self._render_rubric(response)) return self.try_to_decode(self._render_rubric(response))
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged): def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores,
submission_flagged):
data = {'grader_id': grader_id, data = {'grader_id': grader_id,
'submission_id': submission_id, 'submission_id': submission_id,
'score': score, 'score': score,
...@@ -89,6 +91,7 @@ class PeerGradingService(GradingService): ...@@ -89,6 +91,7 @@ class PeerGradingService(GradingService):
pass pass
return text return text
""" """
This is a mock peer grading service that can be used for unit tests This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller without making actual service calls to the grading controller
......
...@@ -95,7 +95,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -95,7 +95,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
#This is a dev_facing_error #This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error #This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False}) return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress() before = self.get_progress()
d = handlers[dispatch](get, system) d = handlers[dispatch](get, system)
...@@ -224,7 +224,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild): ...@@ -224,7 +224,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
try: try:
score = int(get['assessment']) score = int(get['assessment'])
score_list = get.getlist('score_list[]') score_list = get.getlist('score_list[]')
for i in xrange(0,len(score_list)): for i in xrange(0, len(score_list)):
score_list[i] = int(score_list[i]) score_list[i] = int(score_list[i])
except ValueError: except ValueError:
#This is a dev_facing_error #This is a dev_facing_error
...@@ -305,7 +305,9 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): ...@@ -305,7 +305,9 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor):
for child in expected_children: for child in expected_children:
if len(xml_object.xpath(child)) != 1: if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error #This is a staff_facing_error
raise ValueError("Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child)) raise ValueError(
"Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k): def parse(k):
"""Assumes that xml_object has child k""" """Assumes that xml_object has child k"""
......
...@@ -53,12 +53,11 @@ class PeerGradingModule(XModule): ...@@ -53,12 +53,11 @@ class PeerGradingModule(XModule):
#We need to set the location here so the child modules can use it #We need to set the location here so the child modules can use it
system.set('location', location) system.set('location', location)
self.system = system self.system = system
if(self.system.open_ended_grading_interface): if (self.system.open_ended_grading_interface):
self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system) self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system)
else: else:
self.peer_gs = MockPeerGradingService() self.peer_gs = MockPeerGradingService()
self.use_for_single_location = self.metadata.get('use_for_single_location', USE_FOR_SINGLE_LOCATION) self.use_for_single_location = self.metadata.get('use_for_single_location', USE_FOR_SINGLE_LOCATION)
if isinstance(self.use_for_single_location, basestring): if isinstance(self.use_for_single_location, basestring):
self.use_for_single_location = (self.use_for_single_location in TRUE_DICT) self.use_for_single_location = (self.use_for_single_location in TRUE_DICT)
...@@ -90,7 +89,6 @@ class PeerGradingModule(XModule): ...@@ -90,7 +89,6 @@ class PeerGradingModule(XModule):
self.display_due_date = self.timeinfo.display_due_date self.display_due_date = self.timeinfo.display_due_date
self.ajax_url = self.system.ajax_url self.ajax_url = self.system.ajax_url
if not self.ajax_url.endswith("/"): if not self.ajax_url.endswith("/"):
self.ajax_url = self.ajax_url + "/" self.ajax_url = self.ajax_url + "/"
...@@ -154,7 +152,7 @@ class PeerGradingModule(XModule): ...@@ -154,7 +152,7 @@ class PeerGradingModule(XModule):
#This is a dev_facing_error #This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch)) log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error #This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False}) return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
d = handlers[dispatch](get) d = handlers[dispatch](get)
...@@ -191,7 +189,8 @@ class PeerGradingModule(XModule): ...@@ -191,7 +189,8 @@ class PeerGradingModule(XModule):
except: except:
success, response = self.query_data_for_location() success, response = self.query_data_for_location()
if not success: if not success:
log.exception("No instance data found and could not get data from controller for loc {0} student {1}".format( log.exception(
"No instance data found and could not get data from controller for loc {0} student {1}".format(
self.system.location.url(), self.system.anonymous_student_id self.system.location.url(), self.system.anonymous_student_id
)) ))
return None return None
...@@ -271,7 +270,8 @@ class PeerGradingModule(XModule): ...@@ -271,7 +270,8 @@ class PeerGradingModule(XModule):
error: if there was an error in the submission, this is the error message error: if there was an error in the submission, this is the error message
""" """
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', 'submission_flagged']) required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]',
'submission_flagged'])
success, message = self._check_required(get, required) success, message = self._check_required(get, required)
if not success: if not success:
return self._err_response(message) return self._err_response(message)
...@@ -430,7 +430,9 @@ class PeerGradingModule(XModule): ...@@ -430,7 +430,9 @@ class PeerGradingModule(XModule):
return response return response
except GradingServiceError: except GradingServiceError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id)) log.exception(
"Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(
location, submission_id, submission_key, grader_id))
#This is a student_facing_error #This is a student_facing_error
return self._err_response('There was an error saving your score. Please notify course staff.') return self._err_response('There was an error saving your score. Please notify course staff.')
...@@ -508,7 +510,6 @@ class PeerGradingModule(XModule): ...@@ -508,7 +510,6 @@ class PeerGradingModule(XModule):
problem['due'] = None problem['due'] = None
problem['closed'] = False problem['closed'] = False
ajax_url = self.ajax_url ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading.html', { html = self.system.render_template('peer_grading/peer_grading.html', {
'course_id': self.system.course_id, 'course_id': self.system.course_id,
...@@ -531,7 +532,8 @@ class PeerGradingModule(XModule): ...@@ -531,7 +532,8 @@ class PeerGradingModule(XModule):
if not self.use_for_single_location: if not self.use_for_single_location:
#This is an error case, because it must be set to use a single location to be called without get parameters #This is an error case, because it must be set to use a single location to be called without get parameters
#This is a dev_facing_error #This is a dev_facing_error
log.error("Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.") log.error(
"Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
return {'html': "", 'success': False} return {'html': "", 'success': False}
problem_location = self.link_to_location problem_location = self.link_to_location
...@@ -596,7 +598,9 @@ class PeerGradingDescriptor(XmlDescriptor, EditingDescriptor): ...@@ -596,7 +598,9 @@ class PeerGradingDescriptor(XmlDescriptor, EditingDescriptor):
for child in expected_children: for child in expected_children:
if len(xml_object.xpath(child)) == 0: if len(xml_object.xpath(child)) == 0:
#This is a staff_facing_error #This is a staff_facing_error
raise ValueError("Peer grading definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(child)) raise ValueError(
"Peer grading definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse_task(k): def parse_task(k):
"""Assumes that xml_object has child k""" """Assumes that xml_object has child k"""
......
...@@ -14,6 +14,7 @@ from datetime import datetime ...@@ -14,6 +14,7 @@ from datetime import datetime
from . import test_system from . import test_system
import test_util_open_ended import test_util_open_ended
""" """
Tests for the various pieces of the CombinedOpenEndedGrading system Tests for the various pieces of the CombinedOpenEndedGrading system
...@@ -46,9 +47,9 @@ class OpenEndedChildTest(unittest.TestCase): ...@@ -46,9 +47,9 @@ class OpenEndedChildTest(unittest.TestCase):
'display_name': 'Name', 'display_name': 'Name',
'accept_file_upload': False, 'accept_file_upload': False,
'close_date': None, 'close_date': None,
's3_interface' : "", 's3_interface': "",
'open_ended_grading_interface' : {}, 'open_ended_grading_interface': {},
'skip_basic_checks' : False, 'skip_basic_checks': False,
} }
definition = Mock() definition = Mock()
descriptor = Mock() descriptor = Mock()
...@@ -58,22 +59,18 @@ class OpenEndedChildTest(unittest.TestCase): ...@@ -58,22 +59,18 @@ class OpenEndedChildTest(unittest.TestCase):
self.openendedchild = OpenEndedChild(self.test_system, self.location, self.openendedchild = OpenEndedChild(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata) self.definition, self.descriptor, self.static_data, self.metadata)
def test_latest_answer_empty(self): def test_latest_answer_empty(self):
answer = self.openendedchild.latest_answer() answer = self.openendedchild.latest_answer()
self.assertEqual(answer, "") self.assertEqual(answer, "")
def test_latest_score_empty(self): def test_latest_score_empty(self):
answer = self.openendedchild.latest_score() answer = self.openendedchild.latest_score()
self.assertEqual(answer, None) self.assertEqual(answer, None)
def test_latest_post_assessment_empty(self): def test_latest_post_assessment_empty(self):
answer = self.openendedchild.latest_post_assessment(self.test_system) answer = self.openendedchild.latest_post_assessment(self.test_system)
self.assertEqual(answer, "") self.assertEqual(answer, "")
def test_new_history_entry(self): def test_new_history_entry(self):
new_answer = "New Answer" new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer) self.openendedchild.new_history_entry(new_answer)
...@@ -99,7 +96,6 @@ class OpenEndedChildTest(unittest.TestCase): ...@@ -99,7 +96,6 @@ class OpenEndedChildTest(unittest.TestCase):
score = self.openendedchild.latest_score() score = self.openendedchild.latest_score()
self.assertEqual(score, 4) self.assertEqual(score, 4)
def test_record_latest_post_assessment(self): def test_record_latest_post_assessment(self):
new_answer = "New Answer" new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer) self.openendedchild.new_history_entry(new_answer)
...@@ -124,13 +120,11 @@ class OpenEndedChildTest(unittest.TestCase): ...@@ -124,13 +120,11 @@ class OpenEndedChildTest(unittest.TestCase):
self.assertEqual(score['score'], new_score) self.assertEqual(score['score'], new_score)
self.assertEqual(score['total'], self.static_data['max_score']) self.assertEqual(score['total'], self.static_data['max_score'])
def test_reset(self): def test_reset(self):
self.openendedchild.reset(self.test_system) self.openendedchild.reset(self.test_system)
state = json.loads(self.openendedchild.get_instance_state()) state = json.loads(self.openendedchild.get_instance_state())
self.assertEqual(state['state'], OpenEndedChild.INITIAL) self.assertEqual(state['state'], OpenEndedChild.INITIAL)
def test_is_last_response_correct(self): def test_is_last_response_correct(self):
new_answer = "New Answer" new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer) self.openendedchild.new_history_entry(new_answer)
...@@ -165,11 +159,11 @@ class OpenEndedModuleTest(unittest.TestCase): ...@@ -165,11 +159,11 @@ class OpenEndedModuleTest(unittest.TestCase):
'max_score': max_score, 'max_score': max_score,
'display_name': 'Name', 'display_name': 'Name',
'accept_file_upload': False, 'accept_file_upload': False,
'rewrite_content_links' : "", 'rewrite_content_links': "",
'close_date': None, 'close_date': None,
's3_interface' : test_util_open_ended.S3_INTERFACE, 's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks' : False, 'skip_basic_checks': False,
} }
oeparam = etree.XML(''' oeparam = etree.XML('''
...@@ -188,7 +182,8 @@ class OpenEndedModuleTest(unittest.TestCase): ...@@ -188,7 +182,8 @@ class OpenEndedModuleTest(unittest.TestCase):
self.test_system.location = self.location self.test_system.location = self.location
self.mock_xqueue = MagicMock() self.mock_xqueue = MagicMock()
self.mock_xqueue.send_to_queue.return_value = (None, "Message") self.mock_xqueue.send_to_queue.return_value = (None, "Message")
self.test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 1} self.test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue',
'waittime': 1}
self.openendedmodule = OpenEndedModule(self.test_system, self.location, self.openendedmodule = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata) self.definition, self.descriptor, self.static_data, self.metadata)
...@@ -301,12 +296,12 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): ...@@ -301,12 +296,12 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
'rubric': rubric, 'rubric': rubric,
'max_score': max_score, 'max_score': max_score,
'display_name': 'Name', 'display_name': 'Name',
'accept_file_upload' : False, 'accept_file_upload': False,
'rewrite_content_links' : "", 'rewrite_content_links': "",
'close_date' : "", 'close_date': "",
's3_interface' : test_util_open_ended.S3_INTERFACE, 's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks' : False, 'skip_basic_checks': False,
} }
oeparam = etree.XML(''' oeparam = etree.XML('''
...@@ -344,7 +339,7 @@ class CombinedOpenEndedModuleTest(unittest.TestCase): ...@@ -344,7 +339,7 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
self.location, self.location,
self.definition, self.definition,
self.descriptor, self.descriptor,
static_data = self.static_data, static_data=self.static_data,
metadata=self.metadata) metadata=self.metadata)
def test_get_tag_name(self): def test_get_tag_name(self):
......
...@@ -10,8 +10,8 @@ from . import test_system ...@@ -10,8 +10,8 @@ from . import test_system
import test_util_open_ended import test_util_open_ended
class SelfAssessmentTest(unittest.TestCase):
class SelfAssessmentTest(unittest.TestCase):
rubric = '''<rubric><rubric> rubric = '''<rubric><rubric>
<category> <category>
<description>Response Quality</description> <description>Response Quality</description>
...@@ -48,9 +48,9 @@ class SelfAssessmentTest(unittest.TestCase): ...@@ -48,9 +48,9 @@ class SelfAssessmentTest(unittest.TestCase):
'display_name': "Name", 'display_name': "Name",
'accept_file_upload': False, 'accept_file_upload': False,
'close_date': None, 'close_date': None,
's3_interface' : test_util_open_ended.S3_INTERFACE, 's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks' : False, 'skip_basic_checks': False,
} }
self.module = SelfAssessmentModule(test_system(), self.location, self.module = SelfAssessmentModule(test_system(), self.location,
...@@ -64,13 +64,14 @@ class SelfAssessmentTest(unittest.TestCase): ...@@ -64,13 +64,14 @@ class SelfAssessmentTest(unittest.TestCase):
def test_self_assessment_flow(self): def test_self_assessment_flow(self):
responses = {'assessment': '0', 'score_list[]': ['0', '0']} responses = {'assessment': '0', 'score_list[]': ['0', '0']}
def get_fake_item(name): def get_fake_item(name):
return responses[name] return responses[name]
def get_data_for_location(self,location,student): def get_data_for_location(self, location, student):
return { return {
'count_graded' : 0, 'count_graded': 0,
'count_required' : 0, 'count_required': 0,
'student_sub_count': 0, 'student_sub_count': 0,
} }
...@@ -89,7 +90,6 @@ class SelfAssessmentTest(unittest.TestCase): ...@@ -89,7 +90,6 @@ class SelfAssessmentTest(unittest.TestCase):
self.module.save_assessment(mock_query_dict, self.module.system) self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.state, self.module.DONE) self.assertEqual(self.module.state, self.module.DONE)
d = self.module.reset({}) d = self.module.reset({})
self.assertTrue(d['success']) self.assertTrue(d['success'])
self.assertEqual(self.module.state, self.module.INITIAL) self.assertEqual(self.module.state, self.module.INITIAL)
......
OPEN_ENDED_GRADING_INTERFACE = { OPEN_ENDED_GRADING_INTERFACE = {
'url' : 'http://127.0.0.1:3033/', 'url': 'http://127.0.0.1:3033/',
'username' : 'incorrect', 'username': 'incorrect',
'password' : 'incorrect', 'password': 'incorrect',
'staff_grading' : 'staff_grading', 'staff_grading': 'staff_grading',
'peer_grading' : 'peer_grading', 'peer_grading': 'peer_grading',
'grading_controller' : 'grading_controller' 'grading_controller': 'grading_controller'
} }
S3_INTERFACE = { S3_INTERFACE = {
'aws_access_key' : "", 'aws_access_key': "",
'aws_secret_key' : "", 'aws_secret_key': "",
"aws_bucket_name" : "", "aws_bucket_name": "",
} }
\ No newline at end of file
...@@ -22,7 +22,7 @@ NOTIFICATION_TYPES = ( ...@@ -22,7 +22,7 @@ NOTIFICATION_TYPES = (
('staff_needs_to_grade', 'staff_grading', 'Staff Grading'), ('staff_needs_to_grade', 'staff_grading', 'Staff Grading'),
('new_student_grading_to_view', 'open_ended_problems', 'Problems you have submitted'), ('new_student_grading_to_view', 'open_ended_problems', 'Problems you have submitted'),
('flagged_submissions_exist', 'open_ended_flagged_problems', 'Flagged Submissions') ('flagged_submissions_exist', 'open_ended_flagged_problems', 'Flagged Submissions')
) )
def staff_grading_notifications(course, user): def staff_grading_notifications(course, user):
...@@ -46,7 +46,9 @@ def staff_grading_notifications(course, user): ...@@ -46,7 +46,9 @@ def staff_grading_notifications(course, user):
#Non catastrophic error, so no real action #Non catastrophic error, so no real action
notifications = {} notifications = {}
#This is a dev_facing_error #This is a dev_facing_error
log.info("Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id, student_id)) log.info(
"Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading: if pending_grading:
img_path = "/static/images/grading_notification.png" img_path = "/static/images/grading_notification.png"
...@@ -80,7 +82,9 @@ def peer_grading_notifications(course, user): ...@@ -80,7 +82,9 @@ def peer_grading_notifications(course, user):
#Non catastrophic error, so no real action #Non catastrophic error, so no real action
notifications = {} notifications = {}
#This is a dev_facing_error #This is a dev_facing_error
log.info("Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id, student_id)) log.info(
"Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading: if pending_grading:
img_path = "/static/images/grading_notification.png" img_path = "/static/images/grading_notification.png"
...@@ -105,7 +109,9 @@ def combined_notifications(course, user): ...@@ -105,7 +109,9 @@ def combined_notifications(course, user):
return notification_dict return notification_dict
min_time_to_query = user.last_login min_time_to_query = user.last_login
last_module_seen = StudentModule.objects.filter(student=user, course_id=course_id, modified__gt=min_time_to_query).values('modified').order_by('-modified') last_module_seen = StudentModule.objects.filter(student=user, course_id=course_id,
modified__gt=min_time_to_query).values('modified').order_by(
'-modified')
last_module_seen_count = last_module_seen.count() last_module_seen_count = last_module_seen.count()
if last_module_seen_count > 0: if last_module_seen_count > 0:
...@@ -117,7 +123,8 @@ def combined_notifications(course, user): ...@@ -117,7 +123,8 @@ def combined_notifications(course, user):
img_path = "" img_path = ""
try: try:
controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff, last_time_viewed) controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff,
last_time_viewed)
log.debug(controller_response) log.debug(controller_response)
notifications = json.loads(controller_response) notifications = json.loads(controller_response)
if notifications['success']: if notifications['success']:
...@@ -127,7 +134,9 @@ def combined_notifications(course, user): ...@@ -127,7 +134,9 @@ def combined_notifications(course, user):
#Non catastrophic error, so no real action #Non catastrophic error, so no real action
notifications = {} notifications = {}
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Problem with getting notifications from controller query service for course {0} user {1}.".format(course_id, student_id)) log.exception(
"Problem with getting notifications from controller query service for course {0} user {1}.".format(
course_id, student_id))
if pending_grading: if pending_grading:
img_path = "/static/images/grading_notification.png" img_path = "/static/images/grading_notification.png"
...@@ -151,7 +160,8 @@ def set_value_in_cache(student_id, course_id, notification_type, value): ...@@ -151,7 +160,8 @@ def set_value_in_cache(student_id, course_id, notification_type, value):
def create_key_name(student_id, course_id, notification_type): def create_key_name(student_id, course_id, notification_type):
key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id, student=student_id) key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id,
student=student_id)
return key_name return key_name
......
...@@ -15,6 +15,7 @@ class StaffGrading(object): ...@@ -15,6 +15,7 @@ class StaffGrading(object):
""" """
Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views. Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views.
""" """
def __init__(self, course): def __init__(self, course):
self.course = course self.course = course
......
...@@ -20,10 +20,12 @@ log = logging.getLogger(__name__) ...@@ -20,10 +20,12 @@ log = logging.getLogger(__name__)
STAFF_ERROR_MESSAGE = 'Could not contact the external grading server. Please contact the development team. If you do not have a point of contact, you can contact Vik at vik@edx.org.' STAFF_ERROR_MESSAGE = 'Could not contact the external grading server. Please contact the development team. If you do not have a point of contact, you can contact Vik at vik@edx.org.'
class MockStaffGradingService(object): class MockStaffGradingService(object):
""" """
A simple mockup of a staff grading service, testing. A simple mockup of a staff grading service, testing.
""" """
def __init__(self): def __init__(self):
self.cnt = 0 self.cnt = 0
...@@ -45,13 +47,16 @@ class MockStaffGradingService(object): ...@@ -45,13 +47,16 @@ class MockStaffGradingService(object):
return json.dumps({'success': True, return json.dumps({'success': True,
'problem_list': [ 'problem_list': [
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'min_for_ml': 10}), 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5,
'min_for_ml': 10}),
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'min_for_ml': 10}) 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5,
'min_for_ml': 10})
]}) ]})
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged): def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
return self.get_next(course_id, 'fake location', grader_id) return self.get_next(course_id, 'fake location', grader_id)
...@@ -59,6 +64,7 @@ class StaffGradingService(GradingService): ...@@ -59,6 +64,7 @@ class StaffGradingService(GradingService):
""" """
Interface to staff grading backend. Interface to staff grading backend.
""" """
def __init__(self, config): def __init__(self, config):
config['system'] = ModuleSystem(None, None, None, render_to_string, None) config['system'] = ModuleSystem(None, None, None, render_to_string, None)
super(StaffGradingService, self).__init__(config) super(StaffGradingService, self).__init__(config)
...@@ -114,7 +120,8 @@ class StaffGradingService(GradingService): ...@@ -114,7 +120,8 @@ class StaffGradingService(GradingService):
return json.dumps(self._render_rubric(response)) return json.dumps(self._render_rubric(response))
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged): def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
""" """
Save a score and feedback for a submission. Save a score and feedback for a submission.
...@@ -297,7 +304,7 @@ def save_grade(request, course_id): ...@@ -297,7 +304,7 @@ def save_grade(request, course_id):
if request.method != 'POST': if request.method != 'POST':
raise Http404 raise Http404
required = set(['score', 'feedback', 'submission_id', 'location','submission_flagged', 'rubric_scores[]']) required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged', 'rubric_scores[]'])
actual = set(request.POST.keys()) actual = set(request.POST.keys())
missing = required - actual missing = required - actual
if len(missing) > 0: if len(missing) > 0:
...@@ -307,7 +314,6 @@ def save_grade(request, course_id): ...@@ -307,7 +314,6 @@ def save_grade(request, course_id):
grader_id = unique_id_for_user(request.user) grader_id = unique_id_for_user(request.user)
p = request.POST p = request.POST
location = p['location'] location = p['location']
skipped = 'skipped' in p skipped = 'skipped' in p
...@@ -322,7 +328,9 @@ def save_grade(request, course_id): ...@@ -322,7 +328,9 @@ def save_grade(request, course_id):
p['submission_flagged']) p['submission_flagged'])
except GradingServiceError: except GradingServiceError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(request, course_id)) log.exception(
"Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(
request, course_id))
#This is a staff_facing_error #This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE) return _err_response(STAFF_ERROR_MESSAGE)
...@@ -330,13 +338,16 @@ def save_grade(request, course_id): ...@@ -330,13 +338,16 @@ def save_grade(request, course_id):
result = json.loads(result_json) result = json.loads(result_json)
except ValueError: except ValueError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(result_json)) log.exception(
"save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(
result_json))
#This is a staff_facing_error #This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE) return _err_response(STAFF_ERROR_MESSAGE)
if not result.get('success', False): if not result.get('success', False):
#This is a dev_facing_error #This is a dev_facing_error
log.warning('Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json)) log.warning(
'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))
return _err_response(STAFF_ERROR_MESSAGE) return _err_response(STAFF_ERROR_MESSAGE)
# Ok, save_grade seemed to work. Get the next submission to grade. # Ok, save_grade seemed to work. Get the next submission to grade.
......
...@@ -22,6 +22,7 @@ from xmodule.x_module import ModuleSystem ...@@ -22,6 +22,7 @@ from xmodule.x_module import ModuleSystem
from mitxmako.shortcuts import render_to_string from mitxmako.shortcuts import render_to_string
import logging import logging
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
from django.test.utils import override_settings from django.test.utils import override_settings
from django.http import QueryDict from django.http import QueryDict
...@@ -36,6 +37,7 @@ class TestStaffGradingService(ct.PageLoader): ...@@ -36,6 +37,7 @@ class TestStaffGradingService(ct.PageLoader):
access control and error handling logic -- all the actual work is on the access control and error handling logic -- all the actual work is on the
backend. backend.
''' '''
def setUp(self): def setUp(self):
xmodule.modulestore.django._MODULESTORES = {} xmodule.modulestore.django._MODULESTORES = {}
...@@ -50,6 +52,7 @@ class TestStaffGradingService(ct.PageLoader): ...@@ -50,6 +52,7 @@ class TestStaffGradingService(ct.PageLoader):
self.course_id = "edX/toy/2012_Fall" self.course_id = "edX/toy/2012_Fall"
self.toy = modulestore().get_course(self.course_id) self.toy = modulestore().get_course(self.course_id)
def make_instructor(course): def make_instructor(course):
group_name = _course_staff_group_name(course.location) group_name = _course_staff_group_name(course.location)
g = Group.objects.create(name=group_name) g = Group.objects.create(name=group_name)
...@@ -130,6 +133,7 @@ class TestPeerGradingService(ct.PageLoader): ...@@ -130,6 +133,7 @@ class TestPeerGradingService(ct.PageLoader):
access control and error handling logic -- all the actual work is on the access control and error handling logic -- all the actual work is on the
backend. backend.
''' '''
def setUp(self): def setUp(self):
xmodule.modulestore.django._MODULESTORES = {} xmodule.modulestore.django._MODULESTORES = {}
...@@ -148,11 +152,12 @@ class TestPeerGradingService(ct.PageLoader): ...@@ -148,11 +152,12 @@ class TestPeerGradingService(ct.PageLoader):
self.mock_service = peer_grading_service.MockPeerGradingService() self.mock_service = peer_grading_service.MockPeerGradingService()
self.system = ModuleSystem(location, None, None, render_to_string, None, self.system = ModuleSystem(location, None, None, render_to_string, None,
s3_interface = test_util_open_ended.S3_INTERFACE, s3_interface=test_util_open_ended.S3_INTERFACE,
open_ended_grading_interface=test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE open_ended_grading_interface=test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE
) )
self.descriptor = peer_grading_module.PeerGradingDescriptor(self.system) self.descriptor = peer_grading_module.PeerGradingDescriptor(self.system)
self.peer_module = peer_grading_module.PeerGradingModule(self.system, location, "<peergrading/>", self.descriptor) self.peer_module = peer_grading_module.PeerGradingModule(self.system, location, "<peergrading/>",
self.descriptor)
self.peer_module.peer_gs = self.mock_service self.peer_module.peer_gs = self.mock_service
self.logout() self.logout()
...@@ -185,8 +190,10 @@ class TestPeerGradingService(ct.PageLoader): ...@@ -185,8 +190,10 @@ class TestPeerGradingService(ct.PageLoader):
} }
qdict = MagicMock() qdict = MagicMock()
def fake_get_item(key): def fake_get_item(key):
return data[key] return data[key]
qdict.__getitem__.side_effect = fake_get_item qdict.__getitem__.side_effect = fake_get_item
qdict.getlist = fake_get_item qdict.getlist = fake_get_item
qdict.keys = data.keys qdict.keys = data.keys
...@@ -247,8 +254,10 @@ class TestPeerGradingService(ct.PageLoader): ...@@ -247,8 +254,10 @@ class TestPeerGradingService(ct.PageLoader):
} }
qdict = MagicMock() qdict = MagicMock()
def fake_get_item(key): def fake_get_item(key):
return data[key] return data[key]
qdict.__getitem__.side_effect = fake_get_item qdict.__getitem__.side_effect = fake_get_item
qdict.getlist = fake_get_item qdict.getlist = fake_get_item
qdict.keys = data.keys qdict.keys = data.keys
......
...@@ -50,22 +50,24 @@ def _reverse_without_slash(url_name, course_id): ...@@ -50,22 +50,24 @@ def _reverse_without_slash(url_name, course_id):
ajax_url = reverse(url_name, kwargs={'course_id': course_id}) ajax_url = reverse(url_name, kwargs={'course_id': course_id})
return ajax_url return ajax_url
DESCRIPTION_DICT = { DESCRIPTION_DICT = {
'Peer Grading': "View all problems that require peer assessment in this particular course.", 'Peer Grading': "View all problems that require peer assessment in this particular course.",
'Staff Grading': "View ungraded submissions submitted by students for the open ended problems in the course.", 'Staff Grading': "View ungraded submissions submitted by students for the open ended problems in the course.",
'Problems you have submitted': "View open ended problems that you have previously submitted for grading.", 'Problems you have submitted': "View open ended problems that you have previously submitted for grading.",
'Flagged Submissions': "View submissions that have been flagged by students as inappropriate." 'Flagged Submissions': "View submissions that have been flagged by students as inappropriate."
} }
ALERT_DICT = { ALERT_DICT = {
'Peer Grading': "New submissions to grade", 'Peer Grading': "New submissions to grade",
'Staff Grading': "New submissions to grade", 'Staff Grading': "New submissions to grade",
'Problems you have submitted': "New grades have been returned", 'Problems you have submitted': "New grades have been returned",
'Flagged Submissions': "Submissions have been flagged for review" 'Flagged Submissions': "Submissions have been flagged for review"
} }
STUDENT_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify course staff." STUDENT_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify course staff."
STAFF_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify the development team. If you do not have a point of contact, please email Vik at vik@edx.org" STAFF_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify the development team. If you do not have a point of contact, please email Vik at vik@edx.org"
@cache_control(no_cache=True, no_store=True, must_revalidate=True) @cache_control(no_cache=True, no_store=True, must_revalidate=True)
def staff_grading(request, course_id): def staff_grading(request, course_id):
""" """
...@@ -92,7 +94,7 @@ def peer_grading(request, course_id): ...@@ -92,7 +94,7 @@ def peer_grading(request, course_id):
#Get the current course #Get the current course
course = get_course_with_access(request.user, course_id, 'load') course = get_course_with_access(request.user, course_id, 'load')
course_id_parts = course.id.split("/") course_id_parts = course.id.split("/")
false_dict = [False,"False", "false", "FALSE"] false_dict = [False, "False", "false", "FALSE"]
#Reverse the base course url #Reverse the base course url
base_course_url = reverse('courses') base_course_url = reverse('courses')
...@@ -174,7 +176,7 @@ def student_problem_list(request, course_id): ...@@ -174,7 +176,7 @@ def student_problem_list(request, course_id):
except: except:
#This is a student_facing_error #This is a student_facing_error
eta_string = "Error getting ETA." eta_string = "Error getting ETA."
problem_list[i].update({'eta_string' : eta_string}) problem_list[i].update({'eta_string': eta_string})
except GradingServiceError: except GradingServiceError:
#This is a student_facing_error #This is a student_facing_error
...@@ -318,12 +320,13 @@ def take_action_on_flags(request, course_id): ...@@ -318,12 +320,13 @@ def take_action_on_flags(request, course_id):
if request.method != 'POST': if request.method != 'POST':
raise Http404 raise Http404
required = ['submission_id', 'action_type', 'student_id'] required = ['submission_id', 'action_type', 'student_id']
for key in required: for key in required:
if key not in request.POST: if key not in request.POST:
#This is a staff_facing_error #This is a staff_facing_error
return HttpResponse(json.dumps({'success': False, 'error': STAFF_ERROR_MESSAGE + 'Missing key {0} from submission. Please reload and try again.'.format(key)}), return HttpResponse(json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE + 'Missing key {0} from submission. Please reload and try again.'.format(
key)}),
mimetype="application/json") mimetype="application/json")
p = request.POST p = request.POST
...@@ -338,5 +341,7 @@ def take_action_on_flags(request, course_id): ...@@ -338,5 +341,7 @@ def take_action_on_flags(request, course_id):
return HttpResponse(response, mimetype="application/json") return HttpResponse(response, mimetype="application/json")
except GradingServiceError: except GradingServiceError:
#This is a dev_facing_error #This is a dev_facing_error
log.exception("Error taking action on flagged peer grading submissions, submission_id: {0}, action_type: {1}, grader_id: {2}".format(submission_id, action_type, grader_id)) log.exception(
"Error taking action on flagged peer grading submissions, submission_id: {0}, action_type: {1}, grader_id: {2}".format(
submission_id, action_type, grader_id))
return _err_response(STAFF_ERROR_MESSAGE) return _err_response(STAFF_ERROR_MESSAGE)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment