Commit 554cb752 by Vik Paruchuri

Pep8 cleanup

parent dfd66c65
......@@ -10,7 +10,6 @@ from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import Comb
log = logging.getLogger("mitx.courseware")
VERSION_TUPLES = (
('1', CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module),
)
......@@ -18,6 +17,7 @@ VERSION_TUPLES = (
DEFAULT_VERSION = 1
DEFAULT_VERSION = str(DEFAULT_VERSION)
class CombinedOpenEndedModule(XModule):
"""
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
......@@ -60,7 +60,7 @@ class CombinedOpenEndedModule(XModule):
def __init__(self, system, location, definition, descriptor,
instance_state=None, shared_state=None, **kwargs):
XModule.__init__(self, system, location, definition, descriptor,
instance_state, shared_state, **kwargs)
instance_state, shared_state, **kwargs)
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block:
......@@ -129,13 +129,15 @@ class CombinedOpenEndedModule(XModule):
version_index = versions.index(self.version)
static_data = {
'rewrite_content_links' : self.rewrite_content_links,
'rewrite_content_links': self.rewrite_content_links,
}
self.child_descriptor = descriptors[version_index](self.system)
self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(definition['data']), self.system)
self.child_definition = descriptors[version_index].definition_from_xml(etree.fromstring(definition['data']),
self.system)
self.child_module = modules[version_index](self.system, location, self.child_definition, self.child_descriptor,
instance_state = json.dumps(instance_state), metadata = self.metadata, static_data= static_data)
instance_state=json.dumps(instance_state), metadata=self.metadata,
static_data=static_data)
def get_html(self):
return self.child_module.get_html()
......
......@@ -40,14 +40,15 @@ ACCEPT_FILE_UPLOAD = False
TRUE_DICT = ["True", True, "TRUE", "true"]
HUMAN_TASK_TYPE = {
'selfassessment' : "Self Assessment",
'openended' : "edX Assessment",
}
'selfassessment': "Self Assessment",
'openended': "edX Assessment",
}
#Default value that controls whether or not to skip basic spelling checks in the controller
#Metadata overrides this
SKIP_BASIC_CHECKS = False
class CombinedOpenEndedV1Module():
"""
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
......@@ -83,7 +84,7 @@ class CombinedOpenEndedV1Module():
TEMPLATE_DIR = "combinedopenended"
def __init__(self, system, location, definition, descriptor,
instance_state=None, shared_state=None, metadata = None, static_data = None, **kwargs):
instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs):
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block:
......@@ -122,7 +123,7 @@ class CombinedOpenEndedV1Module():
self.metadata = metadata
self.display_name = metadata.get('display_name', "Open Ended")
self.rewrite_content_links = static_data.get('rewrite_content_links',"")
self.rewrite_content_links = static_data.get('rewrite_content_links', "")
# Load instance state
......@@ -152,10 +153,10 @@ class CombinedOpenEndedV1Module():
self.skip_basic_checks = self.metadata.get('skip_spelling_checks', SKIP_BASIC_CHECKS)
display_due_date_string = self.metadata.get('due', None)
grace_period_string = self.metadata.get('graceperiod', None)
try:
self.timeinfo = TimeInfo(display_due_date_string, grace_period_string)
self.timeinfo = TimeInfo(display_due_date_string, grace_period_string)
except:
log.error("Error parsing due date information in location {0}".format(location))
raise
......@@ -177,10 +178,10 @@ class CombinedOpenEndedV1Module():
'rubric': definition['rubric'],
'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload,
'close_date' : self.timeinfo.close_date,
's3_interface' : self.system.s3_interface,
'skip_basic_checks' : self.skip_basic_checks,
}
'close_date': self.timeinfo.close_date,
's3_interface': self.system.s3_interface,
'skip_basic_checks': self.skip_basic_checks,
}
self.task_xml = definition['task_xml']
self.location = location
......@@ -223,15 +224,15 @@ class CombinedOpenEndedV1Module():
child_modules = {
'openended': open_ended_module.OpenEndedModule,
'selfassessment': self_assessment_module.SelfAssessmentModule,
}
}
child_descriptors = {
'openended': open_ended_module.OpenEndedDescriptor,
'selfassessment': self_assessment_module.SelfAssessmentDescriptor,
}
}
children = {
'modules': child_modules,
'descriptors': child_descriptors,
}
}
return children
def setup_next_task(self, reset=False):
......@@ -267,7 +268,8 @@ class CombinedOpenEndedV1Module():
self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system)
if current_task_state is None and self.current_task_number == 0:
self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data)
self.current_task_parsed_xml, self.current_task_descriptor,
self.static_data)
self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING
elif current_task_state is None and self.current_task_number > 0:
......@@ -280,18 +282,20 @@ class CombinedOpenEndedV1Module():
'attempts': 0,
'created': True,
'history': [{'answer': last_response}],
})
})
self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data,
instance_state=current_task_state)
self.current_task_parsed_xml, self.current_task_descriptor,
self.static_data,
instance_state=current_task_state)
self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING
else:
if self.current_task_number > 0 and not reset:
current_task_state = self.overwrite_state(current_task_state)
self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data,
instance_state=current_task_state)
self.current_task_parsed_xml, self.current_task_descriptor,
self.static_data,
instance_state=current_task_state)
return True
......@@ -307,8 +311,8 @@ class CombinedOpenEndedV1Module():
last_response_data = self.get_last_response(self.current_task_number - 1)
current_response_data = self.get_current_attributes(self.current_task_number)
if(current_response_data['min_score_to_attempt'] > last_response_data['score']
or current_response_data['max_score_to_attempt'] < last_response_data['score']):
if (current_response_data['min_score_to_attempt'] > last_response_data['score']
or current_response_data['max_score_to_attempt'] < last_response_data['score']):
self.state = self.DONE
self.allow_reset = True
......@@ -334,8 +338,8 @@ class CombinedOpenEndedV1Module():
'display_name': self.display_name,
'accept_file_upload': self.accept_file_upload,
'location': self.location,
'legend_list' : LEGEND_LIST,
}
'legend_list': LEGEND_LIST,
}
return context
......@@ -404,7 +408,7 @@ class CombinedOpenEndedV1Module():
task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system)
task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor,
self.static_data, instance_state=task_state)
self.static_data, instance_state=task_state)
last_response = task.latest_answer()
last_score = task.latest_score()
last_post_assessment = task.latest_post_assessment(self.system)
......@@ -426,10 +430,10 @@ class CombinedOpenEndedV1Module():
rubric_scores = rubric_data['rubric_scores']
grader_types = rubric_data['grader_types']
feedback_items = rubric_data['feedback_items']
feedback_dicts = rubric_data['feedback_dicts']
feedback_dicts = rubric_data['feedback_dicts']
grader_ids = rubric_data['grader_ids']
submission_ids = rubric_data['submission_ids']
elif task_type== "selfassessment":
submission_ids = rubric_data['submission_ids']
elif task_type == "selfassessment":
rubric_scores = last_post_assessment
grader_types = ['SA']
feedback_items = ['']
......@@ -446,7 +450,7 @@ class CombinedOpenEndedV1Module():
human_state = task.HUMAN_NAMES[state]
else:
human_state = state
if len(grader_types)>0:
if len(grader_types) > 0:
grader_type = grader_types[0]
else:
grader_type = "IN"
......@@ -468,15 +472,15 @@ class CombinedOpenEndedV1Module():
'correct': last_correctness,
'min_score_to_attempt': min_score_to_attempt,
'max_score_to_attempt': max_score_to_attempt,
'rubric_scores' : rubric_scores,
'grader_types' : grader_types,
'feedback_items' : feedback_items,
'grader_type' : grader_type,
'human_grader_type' : human_grader_name,
'feedback_dicts' : feedback_dicts,
'grader_ids' : grader_ids,
'submission_ids' : submission_ids,
}
'rubric_scores': rubric_scores,
'grader_types': grader_types,
'feedback_items': feedback_items,
'grader_type': grader_type,
'human_grader_type': human_grader_name,
'feedback_dicts': feedback_dicts,
'grader_ids': grader_ids,
'submission_ids': submission_ids,
}
return last_response_dict
def update_task_states(self):
......@@ -519,20 +523,27 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html.
"""
all_responses = []
loop_up_to_task = self.current_task_number+1
for i in xrange(0,loop_up_to_task):
loop_up_to_task = self.current_task_number + 1
for i in xrange(0, loop_up_to_task):
all_responses.append(self.get_last_response(i))
rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['rubric_scores'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()]
grader_types = [all_responses[i]['grader_types'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['grader_types'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()]
feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0,len(all_responses)) if len(all_responses[i]['feedback_items'])>0 and all_responses[i]['grader_types'][0] in HUMAN_GRADER_TYPE.keys()]
rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), rubric_scores,
grader_types, feedback_items)
rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0, len(all_responses)) if
len(all_responses[i]['rubric_scores']) > 0 and all_responses[i]['grader_types'][
0] in HUMAN_GRADER_TYPE.keys()]
grader_types = [all_responses[i]['grader_types'] for i in xrange(0, len(all_responses)) if
len(all_responses[i]['grader_types']) > 0 and all_responses[i]['grader_types'][
0] in HUMAN_GRADER_TYPE.keys()]
feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0, len(all_responses)) if
len(all_responses[i]['feedback_items']) > 0 and all_responses[i]['grader_types'][
0] in HUMAN_GRADER_TYPE.keys()]
rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']),
rubric_scores,
grader_types, feedback_items)
response_dict = all_responses[-1]
context = {
'results': rubric_html,
'task_name' : 'Scored Rubric',
'class_name' : 'combined-rubric-container'
'task_name': 'Scored Rubric',
'class_name': 'combined-rubric-container'
}
html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True}
......@@ -544,8 +555,8 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html.
"""
context = {
'legend_list' : LEGEND_LIST,
}
'legend_list': LEGEND_LIST,
}
html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True}
......@@ -556,15 +567,16 @@ class CombinedOpenEndedV1Module():
Output: Dictionary to be rendered via ajax that contains the result html.
"""
self.update_task_states()
loop_up_to_task = self.current_task_number+1
all_responses =[]
for i in xrange(0,loop_up_to_task):
loop_up_to_task = self.current_task_number + 1
all_responses = []
for i in xrange(0, loop_up_to_task):
all_responses.append(self.get_last_response(i))
context_list = []
for ri in all_responses:
for i in xrange(0,len(ri['rubric_scores'])):
feedback = ri['feedback_dicts'][i].get('feedback','')
rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']), ri['rubric_scores'][i])
for i in xrange(0, len(ri['rubric_scores'])):
feedback = ri['feedback_dicts'][i].get('feedback', '')
rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']),
ri['rubric_scores'][i])
if rubric_data['success']:
rubric_html = rubric_data['html']
else:
......@@ -572,23 +584,23 @@ class CombinedOpenEndedV1Module():
context = {
'rubric_html': rubric_html,
'grader_type': ri['grader_type'],
'feedback' : feedback,
'grader_id' : ri['grader_ids'][i],
'submission_id' : ri['submission_ids'][i],
'feedback': feedback,
'grader_id': ri['grader_ids'][i],
'submission_id': ri['submission_ids'][i],
}
context_list.append(context)
feedback_table = self.system.render_template('{0}/open_ended_result_table.html'.format(self.TEMPLATE_DIR), {
'context_list' : context_list,
'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT,
'human_grader_types' : HUMAN_GRADER_TYPE,
'context_list': context_list,
'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
'human_grader_types': HUMAN_GRADER_TYPE,
'rows': 50,
'cols': 50,
})
context = {
'results': feedback_table,
'task_name' : "Feedback",
'class_name' : "result-container",
}
'task_name': "Feedback",
'class_name': "result-container",
}
html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context)
return {'html': html, 'success': True}
......@@ -617,8 +629,8 @@ class CombinedOpenEndedV1Module():
'reset': self.reset,
'get_results': self.get_results,
'get_combined_rubric': self.get_rubric,
'get_status' : self.get_status_ajax,
'get_legend' : self.get_legend,
'get_status': self.get_status_ajax,
'get_legend': self.get_legend,
}
if dispatch not in handlers:
......@@ -681,7 +693,7 @@ class CombinedOpenEndedV1Module():
'task_states': self.task_states,
'attempts': self.attempts,
'ready_to_reset': self.allow_reset,
}
}
return json.dumps(state)
......@@ -699,11 +711,12 @@ class CombinedOpenEndedV1Module():
context = {
'status_list': status,
'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT,
'legend_list' : LEGEND_LIST,
'render_via_ajax' : render_via_ajax,
'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
'legend_list': LEGEND_LIST,
'render_via_ajax': render_via_ajax,
}
status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR), context)
status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR),
context)
return status_html
......@@ -736,7 +749,7 @@ class CombinedOpenEndedV1Module():
score_dict = {
'score': score,
'total': max_score,
}
}
return score_dict
......@@ -793,7 +806,9 @@ class CombinedOpenEndedV1Descriptor(XmlDescriptor, EditingDescriptor):
for child in expected_children:
if len(xml_object.xpath(child)) == 0:
#This is a staff_facing_error
raise ValueError("Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
raise ValueError(
"Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse_task(k):
"""Assumes that xml_object has child k"""
......
......@@ -4,24 +4,26 @@ from lxml import etree
log = logging.getLogger(__name__)
GRADER_TYPE_IMAGE_DICT = {
'SA' : '/static/images/self_assessment_icon.png',
'PE' : '/static/images/peer_grading_icon.png',
'ML' : '/static/images/ml_grading_icon.png',
'IN' : '/static/images/peer_grading_icon.png',
'BC' : '/static/images/ml_grading_icon.png',
}
'SA': '/static/images/self_assessment_icon.png',
'PE': '/static/images/peer_grading_icon.png',
'ML': '/static/images/ml_grading_icon.png',
'IN': '/static/images/peer_grading_icon.png',
'BC': '/static/images/ml_grading_icon.png',
}
HUMAN_GRADER_TYPE = {
'SA' : 'Self-Assessment',
'PE' : 'Peer-Assessment',
'IN' : 'Instructor-Assessment',
'ML' : 'AI-Assessment',
'BC' : 'AI-Assessment',
}
'SA': 'Self-Assessment',
'PE': 'Peer-Assessment',
'IN': 'Instructor-Assessment',
'ML': 'AI-Assessment',
'BC': 'AI-Assessment',
}
DO_NOT_DISPLAY = ['BC', 'IN']
LEGEND_LIST = [{'name' : HUMAN_GRADER_TYPE[k], 'image' : GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys() if k not in DO_NOT_DISPLAY ]
LEGEND_LIST = [{'name': HUMAN_GRADER_TYPE[k], 'image': GRADER_TYPE_IMAGE_DICT[k]} for k in GRADER_TYPE_IMAGE_DICT.keys()
if k not in DO_NOT_DISPLAY]
class RubricParsingError(Exception):
def __init__(self, msg):
......@@ -29,15 +31,14 @@ class RubricParsingError(Exception):
class CombinedOpenEndedRubric(object):
TEMPLATE_DIR = "combinedopenended/openended"
def __init__ (self, system, view_only = False):
def __init__(self, system, view_only=False):
self.has_score = False
self.view_only = view_only
self.system = system
def render_rubric(self, rubric_xml, score_list = None):
def render_rubric(self, rubric_xml, score_list=None):
'''
render_rubric: takes in an xml string and outputs the corresponding
html for that xml, given the type of rubric we're generating
......@@ -50,11 +51,11 @@ class CombinedOpenEndedRubric(object):
success = False
try:
rubric_categories = self.extract_categories(rubric_xml)
if score_list and len(score_list)==len(rubric_categories):
for i in xrange(0,len(rubric_categories)):
if score_list and len(score_list) == len(rubric_categories):
for i in xrange(0, len(rubric_categories)):
category = rubric_categories[i]
for j in xrange(0,len(category['options'])):
if score_list[i]==j:
for j in xrange(0, len(category['options'])):
if score_list[i] == j:
rubric_categories[i]['options'][j]['selected'] = True
rubric_scores = [cat['score'] for cat in rubric_categories]
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
......@@ -63,19 +64,20 @@ class CombinedOpenEndedRubric(object):
if self.view_only:
rubric_template = '{0}/open_ended_view_only_rubric.html'.format(self.TEMPLATE_DIR)
html = self.system.render_template(rubric_template,
{'categories': rubric_categories,
'has_score': self.has_score,
'view_only': self.view_only,
'max_score': max_score,
'combined_rubric' : False
})
{'categories': rubric_categories,
'has_score': self.has_score,
'view_only': self.view_only,
'max_score': max_score,
'combined_rubric': False
})
success = True
except:
#This is a staff_facing_error
error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format(rubric_xml)
error_message = "[render_rubric] Could not parse the rubric with xml: {0}. Contact the learning sciences group for assistance.".format(
rubric_xml)
log.exception(error_message)
raise RubricParsingError(error_message)
return {'success' : success, 'html' : html, 'rubric_scores' : rubric_scores}
return {'success': success, 'html': html, 'rubric_scores': rubric_scores}
def check_if_rubric_is_parseable(self, rubric_string, location, max_score_allowed, max_score):
rubric_dict = self.render_rubric(rubric_string)
......@@ -83,7 +85,8 @@ class CombinedOpenEndedRubric(object):
rubric_feedback = rubric_dict['html']
if not success:
#This is a staff_facing_error
error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format(rubric_string, location.url())
error_message = "Could not parse rubric : {0} for location {1}. Contact the learning sciences group for assistance.".format(
rubric_string, location.url())
log.error(error_message)
raise RubricParsingError(error_message)
......@@ -101,7 +104,7 @@ class CombinedOpenEndedRubric(object):
if total != max_score:
#This is a staff_facing_error
error_msg = "The max score {0} for problem {1} does not match the total number of points in the rubric {2}. Contact the learning sciences group for assistance.".format(
max_score, location, total)
max_score, location, total)
log.error(error_msg)
raise RubricParsingError(error_msg)
......@@ -123,7 +126,9 @@ class CombinedOpenEndedRubric(object):
for category in element:
if category.tag != 'category':
#This is a staff_facing_error
raise RubricParsingError("[extract_categories] Expected a <category> tag: got {0} instead. Contact the learning sciences group for assistance.".format(category.tag))
raise RubricParsingError(
"[extract_categories] Expected a <category> tag: got {0} instead. Contact the learning sciences group for assistance.".format(
category.tag))
else:
categories.append(self.extract_category(category))
return categories
......@@ -150,13 +155,17 @@ class CombinedOpenEndedRubric(object):
# if we are missing the score tag and we are expecting one
elif self.has_score:
#This is a staff_facing_error
raise RubricParsingError("[extract_category] Category {0} is missing a score. Contact the learning sciences group for assistance.".format(descriptionxml.text))
raise RubricParsingError(
"[extract_category] Category {0} is missing a score. Contact the learning sciences group for assistance.".format(
descriptionxml.text))
# parse description
if descriptionxml.tag != 'description':
#This is a staff_facing_error
raise RubricParsingError("[extract_category]: expected description tag, got {0} instead. Contact the learning sciences group for assistance.".format(descriptionxml.tag))
raise RubricParsingError(
"[extract_category]: expected description tag, got {0} instead. Contact the learning sciences group for assistance.".format(
descriptionxml.tag))
description = descriptionxml.text
......@@ -167,7 +176,9 @@ class CombinedOpenEndedRubric(object):
for option in optionsxml:
if option.tag != 'option':
#This is a staff_facing_error
raise RubricParsingError("[extract_category]: expected option tag, got {0} instead. Contact the learning sciences group for assistance.".format(option.tag))
raise RubricParsingError(
"[extract_category]: expected option tag, got {0} instead. Contact the learning sciences group for assistance.".format(
option.tag))
else:
pointstr = option.get("points")
if pointstr:
......@@ -177,13 +188,16 @@ class CombinedOpenEndedRubric(object):
points = int(pointstr)
except ValueError:
#This is a staff_facing_error
raise RubricParsingError("[extract_category]: expected points to have int, got {0} instead. Contact the learning sciences group for assistance.".format(pointstr))
raise RubricParsingError(
"[extract_category]: expected points to have int, got {0} instead. Contact the learning sciences group for assistance.".format(
pointstr))
elif autonumbering:
# use the generated one if we're in the right mode
points = cur_points
cur_points = cur_points + 1
else:
raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.")
raise Exception(
"[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly defined.")
selected = score == points
optiontext = option.text
......@@ -193,31 +207,32 @@ class CombinedOpenEndedRubric(object):
options = sorted(options, key=lambda option: option['points'])
CombinedOpenEndedRubric.validate_options(options)
return {'description': description, 'options': options, 'score' : score}
return {'description': description, 'options': options, 'score': score}
def render_combined_rubric(self,rubric_xml,scores,score_types,feedback_types):
success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores,score_types,feedback_types)
def render_combined_rubric(self, rubric_xml, scores, score_types, feedback_types):
success, score_tuples = CombinedOpenEndedRubric.reformat_scores_for_rendering(scores, score_types,
feedback_types)
rubric_categories = self.extract_categories(rubric_xml)
max_scores = map((lambda cat: cat['options'][-1]['points']), rubric_categories)
max_score = max(max_scores)
for i in xrange(0,len(rubric_categories)):
for i in xrange(0, len(rubric_categories)):
category = rubric_categories[i]
for j in xrange(0,len(category['options'])):
for j in xrange(0, len(category['options'])):
rubric_categories[i]['options'][j]['grader_types'] = []
for tuple in score_tuples:
if tuple[1] == i and tuple[2] ==j:
if tuple[1] == i and tuple[2] == j:
for grader_type in tuple[3]:
rubric_categories[i]['options'][j]['grader_types'].append(grader_type)
html = self.system.render_template('{0}/open_ended_combined_rubric.html'.format(self.TEMPLATE_DIR),
{'categories': rubric_categories,
'has_score': True,
'view_only': True,
'max_score': max_score,
'combined_rubric' : True,
'grader_type_image_dict' : GRADER_TYPE_IMAGE_DICT,
'human_grader_types' : HUMAN_GRADER_TYPE,
})
{'categories': rubric_categories,
'has_score': True,
'view_only': True,
'max_score': max_score,
'combined_rubric': True,
'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,
'human_grader_types': HUMAN_GRADER_TYPE,
})
return html
......@@ -228,14 +243,16 @@ class CombinedOpenEndedRubric(object):
'''
if len(options) == 0:
#This is a staff_facing_error
raise RubricParsingError("[extract_category]: no options associated with this category. Contact the learning sciences group for assistance.")
raise RubricParsingError(
"[extract_category]: no options associated with this category. Contact the learning sciences group for assistance.")
if len(options) == 1:
return
prev = options[0]['points']
for option in options[1:]:
if prev == option['points']:
#This is a staff_facing_error
raise RubricParsingError("[extract_category]: found duplicate point values between two different options. Contact the learning sciences group for assistance.")
raise RubricParsingError(
"[extract_category]: found duplicate point values between two different options. Contact the learning sciences group for assistance.")
else:
prev = option['points']
......@@ -250,7 +267,7 @@ class CombinedOpenEndedRubric(object):
@return:
"""
success = False
if len(scores)==0:
if len(scores) == 0:
#This is a dev_facing_error
log.error("Score length is 0 when trying to reformat rubric scores for rendering.")
return success, ""
......@@ -264,25 +281,25 @@ class CombinedOpenEndedRubric(object):
score_lists = []
score_type_list = []
feedback_type_list = []
for i in xrange(0,len(scores)):
for i in xrange(0, len(scores)):
score_cont_list = scores[i]
for j in xrange(0,len(score_cont_list)):
for j in xrange(0, len(score_cont_list)):
score_list = score_cont_list[j]
score_lists.append(score_list)
score_type_list.append(score_types[i][j])
feedback_type_list.append(feedback_types[i][j])
score_list_len = len(score_lists[0])
for i in xrange(0,len(score_lists)):
for i in xrange(0, len(score_lists)):
score_list = score_lists[i]
if len(score_list)!=score_list_len:
if len(score_list) != score_list_len:
return success, ""
score_tuples = []
for i in xrange(0,len(score_lists)):
for j in xrange(0,len(score_lists[i])):
tuple = [1,j,score_lists[i][j],[],[]]
score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples,tuple)
for i in xrange(0, len(score_lists)):
for j in xrange(0, len(score_lists[i])):
tuple = [1, j, score_lists[i][j], [], []]
score_tuples, tup_ind = CombinedOpenEndedRubric.check_for_tuple_matches(score_tuples, tuple)
score_tuples[tup_ind][0] += 1
score_tuples[tup_ind][3].append(score_type_list[i])
score_tuples[tup_ind][4].append(feedback_type_list[i])
......@@ -302,14 +319,14 @@ class CombinedOpenEndedRubric(object):
category = tuple[1]
score = tuple[2]
tup_ind = -1
for t in xrange(0,len(tuples)):
for t in xrange(0, len(tuples)):
if tuples[t][1] == category and tuples[t][2] == score:
tup_ind = t
break
if tup_ind == -1:
tuples.append([0,category,score,[],[]])
tup_ind = len(tuples)-1
tuples.append([0, category, score, [], []])
tup_ind = len(tuples) - 1
return tuples, tup_ind
......
......@@ -8,6 +8,7 @@ class ControllerQueryService(GradingService):
"""
Interface to staff grading backend.
"""
def __init__(self, config, system):
config['system'] = system
super(ControllerQueryService, self).__init__(config)
......@@ -59,7 +60,7 @@ class ControllerQueryService(GradingService):
def get_flagged_problem_list(self, course_id):
params = {
'course_id': course_id,
}
}
response = self.get(self.flagged_problem_list_url, params)
return response
......@@ -70,20 +71,21 @@ class ControllerQueryService(GradingService):
'student_id': student_id,
'submission_id': submission_id,
'action_type': action_type
}
}
response = self.post(self.take_action_on_flags_url, params)
return response
def convert_seconds_to_human_readable(seconds):
if seconds < 60:
human_string = "{0} seconds".format(seconds)
elif seconds < 60 * 60:
human_string = "{0} minutes".format(round(seconds/60,1))
elif seconds < (24*60*60):
human_string = "{0} hours".format(round(seconds/(60*60),1))
human_string = "{0} minutes".format(round(seconds / 60, 1))
elif seconds < (24 * 60 * 60):
human_string = "{0} hours".format(round(seconds / (60 * 60), 1))
else:
human_string = "{0} days".format(round(seconds/(60*60*24),1))
human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1))
eta_string = "{0}".format(human_string)
return eta_string
......@@ -19,6 +19,7 @@ class GradingService(object):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
self.username = config['username']
self.password = config['password']
......@@ -34,8 +35,8 @@ class GradingService(object):
Returns the decoded json dict of the response.
"""
response = self.session.post(self.login_url,
{'username': self.username,
'password': self.password, })
{'username': self.username,
'password': self.password, })
response.raise_for_status()
......@@ -47,7 +48,7 @@ class GradingService(object):
"""
try:
op = lambda: self.session.post(url, data=data,
allow_redirects=allow_redirects)
allow_redirects=allow_redirects)
r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
......@@ -63,8 +64,8 @@ class GradingService(object):
"""
log.debug(params)
op = lambda: self.session.get(url,
allow_redirects=allow_redirects,
params=params)
allow_redirects=allow_redirects,
params=params)
try:
r = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError) as err:
......@@ -92,7 +93,7 @@ class GradingService(object):
r = self._login()
if r and not r.get('success'):
log.warning("Couldn't log into staff_grading backend. Response: %s",
r)
r)
# try again
response = operation()
response.raise_for_status()
......
......@@ -5,6 +5,7 @@ to send them to S3.
try:
from PIL import Image
ENABLE_PIL = True
except:
ENABLE_PIL = False
......@@ -51,6 +52,7 @@ class ImageProperties(object):
"""
Class to check properties of an image and to validate if they are allowed.
"""
def __init__(self, image_data):
"""
Initializes class variables
......@@ -92,7 +94,7 @@ class ImageProperties(object):
g = rgb[1]
b = rgb[2]
check_r = (r > 60)
check_g = (r * 0.4) < g < (r * 0.85)
check_g = (r * 0.4) < g < (r * 0.85)
check_b = (r * 0.2) < b < (r * 0.7)
colors_okay = check_r and check_b and check_g
except:
......@@ -141,6 +143,7 @@ class URLProperties(object):
Checks to see if a URL points to acceptable content. Added to check if students are submitting reasonable
links to the peer grading image functionality of the external grading service.
"""
def __init__(self, url_string):
self.url_string = url_string
......@@ -212,7 +215,7 @@ def run_image_tests(image):
success = image_properties.run_tests()
except:
log.exception("Cannot run image tests in combined open ended xmodule. May be an issue with a particular image,"
"or an issue with the deployment configuration of PIL/Pillow")
"or an issue with the deployment configuration of PIL/Pillow")
return success
......@@ -252,7 +255,8 @@ def upload_to_s3(file_to_upload, keyname, s3_interface):
return True, public_url
except:
#This is a dev_facing_error
error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format(bucketname.lower())
error_message = "Could not connect to S3 to upload peer grading image. Trying to utilize bucket: {0}".format(
bucketname.lower())
log.error(error_message)
return False, error_message
......
......@@ -10,7 +10,7 @@ import logging
from lxml import etree
import capa.xqueue_interface as xqueue_interface
from xmodule.capa_module import ComplexEncoder
from xmodule.capa_module import ComplexEncoder
from xmodule.editing_module import EditingDescriptor
from xmodule.progress import Progress
from xmodule.stringify import stringify_children
......@@ -104,7 +104,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# response types)
except TypeError, ValueError:
#This is a dev_facing_error
log.exception("Grader payload from external open ended grading server is not a json object! Object: {0}".format(grader_payload))
log.exception(
"Grader payload from external open ended grading server is not a json object! Object: {0}".format(
grader_payload))
self.initial_display = find_with_default(oeparam, 'initial_display', '')
self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
......@@ -148,7 +150,9 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses:
#This is a student_facing_error
return {'success': False, 'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(tag)}
return {'success': False,
'msg': "Could not find needed tag {0} in the survey responses. Please try submitting again.".format(
tag)}
try:
submission_id = int(survey_responses['submission_id'])
grader_id = int(survey_responses['grader_id'])
......@@ -188,7 +192,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
}
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
body=json.dumps(contents))
#Convert error to a success value
success = True
......@@ -222,8 +226,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
str(len(self.history)))
xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['callback_url'],
lms_key=queuekey,
queue_name=self.queue_name)
lms_key=queuekey,
queue_name=self.queue_name)
contents = self.payload.copy()
......@@ -241,7 +245,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# Submit request. When successful, 'msg' is the prior length of the queue
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
body=json.dumps(contents))
# State associated with the queueing request
queuestate = {'key': queuekey,
......@@ -300,7 +304,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# We want to display available feedback in a particular order.
# This dictionary specifies which goes first--lower first.
priorities = { # These go at the start of the feedback
priorities = {# These go at the start of the feedback
'spelling': 0,
'grammar': 1,
# needs to be after all the other feedback
......@@ -400,7 +404,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
if not response_items['success']:
return system.render_template("{0}/open_ended_error.html".format(self.TEMPLATE_DIR),
{'errors': feedback})
{'errors': feedback})
feedback_template = system.render_template("{0}/open_ended_feedback.html".format(self.TEMPLATE_DIR), {
'grader_type': response_items['grader_type'],
......@@ -437,13 +441,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'valid': False,
'score': 0,
'feedback': '',
'rubric_scores' : [[0]],
'grader_types' : [''],
'feedback_items' : [''],
'feedback_dicts' : [{}],
'grader_ids' : [0],
'submission_ids' : [0],
}
'rubric_scores': [[0]],
'grader_types': [''],
'feedback_items': [''],
'feedback_dicts': [{}],
'grader_ids': [0],
'submission_ids': [0],
}
try:
score_result = json.loads(score_msg)
except (TypeError, ValueError):
......@@ -470,7 +474,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
log.error(error_message)
fail['feedback'] = error_message
return fail
#This is to support peer grading
#This is to support peer grading
if isinstance(score_result['score'], list):
feedback_items = []
rubric_scores = []
......@@ -527,12 +531,12 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'valid': True,
'score': score,
'feedback': feedback,
'rubric_scores' : rubric_scores,
'grader_types' : grader_types,
'feedback_items' : feedback_items,
'feedback_dicts' : feedback_dicts,
'grader_ids' : grader_ids,
'submission_ids' : submission_ids,
'rubric_scores': rubric_scores,
'grader_types': grader_types,
'feedback_items': feedback_items,
'feedback_dicts': feedback_dicts,
'grader_ids': grader_ids,
'submission_ids': submission_ids,
}
def latest_post_assessment(self, system, short_feedback=False, join_feedback=True):
......@@ -545,7 +549,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return ""
feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), system,
join_feedback=join_feedback)
join_feedback=join_feedback)
if not short_feedback:
return feedback_dict['feedback'] if feedback_dict['valid'] else ''
if feedback_dict['valid']:
......@@ -585,7 +589,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
#This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False})
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress()
d = handlers[dispatch](get, system)
......@@ -679,7 +683,6 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
correct = ""
previous_answer = self.initial_display
context = {
'prompt': self.prompt,
'previous_answer': previous_answer,
......@@ -692,7 +695,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'child_type': 'openended',
'correct': correct,
'accept_file_upload': self.accept_file_upload,
'eta_message' : eta_string,
'eta_message': eta_string,
}
html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context)
return html
......@@ -723,7 +726,9 @@ class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor):
for child in ['openendedparam']:
if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error
raise ValueError("Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
raise ValueError(
"Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k):
"""Assumes that xml_object has child k"""
......
......@@ -74,7 +74,7 @@ class OpenEndedChild(object):
'done': 'Done',
}
def __init__(self, system, location, definition, descriptor, static_data,
def __init__(self, system, location, definition, descriptor, static_data,
instance_state=None, shared_state=None, **kwargs):
# Load instance state
if instance_state is not None:
......@@ -108,15 +108,14 @@ class OpenEndedChild(object):
self._max_score = static_data['max_score']
if system.open_ended_grading_interface:
self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system)
self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface,system)
self.controller_qs = controller_query_service.ControllerQueryService(system.open_ended_grading_interface,
system)
else:
self.peer_gs = MockPeerGradingService()
self.controller_qs = None
self.controller_qs = None
self.system = system
self.location_string = location
try:
self.location_string = self.location_string.url()
......@@ -152,7 +151,8 @@ class OpenEndedChild(object):
return True, {
'success': False,
#This is a student_facing_error
'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(self.attempts, self.max_attempts)
'error': 'You have attempted this problem {0} times. You are allowed {1} attempts.'.format(
self.attempts, self.max_attempts)
}
else:
return False, {}
......@@ -180,8 +180,8 @@ class OpenEndedChild(object):
try:
answer = autolink_html(answer)
cleaner = Cleaner(style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=True,
host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS,
whitelist_tags=set(['embed', 'iframe', 'a', 'img']))
host_whitelist=open_ended_image_submission.TRUSTED_IMAGE_DOMAINS,
whitelist_tags=set(['embed', 'iframe', 'a', 'img']))
clean_html = cleaner.clean_html(answer)
clean_html = re.sub(r'</p>$', '', re.sub(r'^<p>', '', clean_html))
except:
......@@ -282,7 +282,7 @@ class OpenEndedChild(object):
"""
#This is a dev_facing_error
log.warning("Open ended child state out sync. state: %r, get: %r. %s",
self.state, get, msg)
self.state, get, msg)
#This is a student_facing_error
return {'success': False,
'error': 'The problem state got out-of-sync. Please try reloading the page.'}
......@@ -308,7 +308,7 @@ class OpenEndedChild(object):
@return: Boolean correct.
"""
correct = False
if(isinstance(score, (int, long, float, complex))):
if (isinstance(score, (int, long, float, complex))):
score_ratio = int(score) / float(self.max_score())
correct = (score_ratio >= 0.66)
return correct
......@@ -342,7 +342,8 @@ class OpenEndedChild(object):
try:
image_data.seek(0)
success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key, self.s3_interface)
success, s3_public_url = open_ended_image_submission.upload_to_s3(image_data, image_key,
self.s3_interface)
except:
log.exception("Could not upload image to S3.")
......@@ -404,9 +405,9 @@ class OpenEndedChild(object):
#In this case, an image was submitted by the student, but the image could not be uploaded to S3. Likely
#a config issue (development vs deployment). For now, just treat this as a "success"
log.exception("Student AJAX post to combined open ended xmodule indicated that it contained an image, "
"but the image was not able to be uploaded to S3. This could indicate a config"
"issue with this deployment, but it could also indicate a problem with S3 or with the"
"student image itself.")
"but the image was not able to be uploaded to S3. This could indicate a config"
"issue with this deployment, but it could also indicate a problem with S3 or with the"
"student image itself.")
overall_success = True
elif not has_file_to_upload:
#If there is no file to upload, probably the student has embedded the link in the answer text
......@@ -445,7 +446,7 @@ class OpenEndedChild(object):
response = {}
#This is a student_facing_error
error_string = ("You need to peer grade {0} more in order to make another submission. "
"You have graded {1}, and {2} are required. You have made {3} successful peer grading submissions.")
"You have graded {1}, and {2} are required. You have made {3} successful peer grading submissions.")
try:
response = self.peer_gs.get_data_for_location(self.location_string, student_id)
count_graded = response['count_graded']
......@@ -454,16 +455,18 @@ class OpenEndedChild(object):
success = True
except:
#This is a dev_facing_error
log.error("Could not contact external open ended graders for location {0} and student {1}".format(self.location_string,student_id))
log.error("Could not contact external open ended graders for location {0} and student {1}".format(
self.location_string, student_id))
#This is a student_facing_error
error_message = "Could not contact the graders. Please notify course staff."
return success, allowed_to_submit, error_message
if count_graded>=count_required:
if count_graded >= count_required:
return success, allowed_to_submit, ""
else:
allowed_to_submit = False
#This is a student_facing_error
error_message = error_string.format(count_required-count_graded, count_graded, count_required, student_sub_count)
error_message = error_string.format(count_required - count_graded, count_graded, count_required,
student_sub_count)
return success, allowed_to_submit, error_message
def get_eta(self):
......@@ -478,7 +481,7 @@ class OpenEndedChild(object):
success = response['success']
if isinstance(success, basestring):
success = (success.lower()=="true")
success = (success.lower() == "true")
if success:
eta = controller_query_service.convert_seconds_to_human_readable(response['eta'])
......
......@@ -14,6 +14,7 @@ class PeerGradingService(GradingService):
"""
Interface with the grading controller for peer grading
"""
def __init__(self, config, system):
config['system'] = system
super(PeerGradingService, self).__init__(config)
......@@ -36,10 +37,11 @@ class PeerGradingService(GradingService):
def get_next_submission(self, problem_location, grader_id):
response = self.get(self.get_next_submission_url,
{'location': problem_location, 'grader_id': grader_id})
{'location': problem_location, 'grader_id': grader_id})
return self.try_to_decode(self._render_rubric(response))
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged):
def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores,
submission_flagged):
data = {'grader_id': grader_id,
'submission_id': submission_id,
'score': score,
......@@ -89,6 +91,7 @@ class PeerGradingService(GradingService):
pass
return text
"""
This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller
......@@ -122,7 +125,7 @@ class MockPeerGradingService(object):
'max_score': 4})
def save_calibration_essay(self, problem_location, grader_id,
calibration_essay_id, submission_key, score,
calibration_essay_id, submission_key, score,
feedback, rubric_scores):
return {'success': True, 'actual_score': 2}
......
......@@ -95,7 +95,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
#This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False})
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress()
d = handlers[dispatch](get, system)
......@@ -224,7 +224,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
try:
score = int(get['assessment'])
score_list = get.getlist('score_list[]')
for i in xrange(0,len(score_list)):
for i in xrange(0, len(score_list)):
score_list[i] = int(score_list[i])
except ValueError:
#This is a dev_facing_error
......@@ -268,7 +268,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
'allow_reset': self._allow_reset()}
def latest_post_assessment(self, system):
latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system)
latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system)
try:
rubric_scores = json.loads(latest_post_assessment)
except:
......@@ -305,7 +305,9 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor):
for child in expected_children:
if len(xml_object.xpath(child)) != 1:
#This is a staff_facing_error
raise ValueError("Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
raise ValueError(
"Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k):
"""Assumes that xml_object has child k"""
......
......@@ -5,7 +5,7 @@ from lxml import etree
from datetime import datetime
from pkg_resources import resource_string
from .capa_module import ComplexEncoder
from .capa_module import ComplexEncoder
from .editing_module import EditingDescriptor
from .stringify import stringify_children
from .x_module import XModule
......@@ -34,7 +34,7 @@ class PeerGradingModule(XModule):
resource_string(__name__, 'js/src/peergrading/peer_grading_problem.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
]}
]}
js_module_name = "PeerGrading"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
......@@ -42,7 +42,7 @@ class PeerGradingModule(XModule):
def __init__(self, system, location, definition, descriptor,
instance_state=None, shared_state=None, **kwargs):
XModule.__init__(self, system, location, definition, descriptor,
instance_state, shared_state, **kwargs)
instance_state, shared_state, **kwargs)
# Load instance state
if instance_state is not None:
......@@ -53,12 +53,11 @@ class PeerGradingModule(XModule):
#We need to set the location here so the child modules can use it
system.set('location', location)
self.system = system
if(self.system.open_ended_grading_interface):
if (self.system.open_ended_grading_interface):
self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system)
else:
self.peer_gs = MockPeerGradingService()
self.use_for_single_location = self.metadata.get('use_for_single_location', USE_FOR_SINGLE_LOCATION)
if isinstance(self.use_for_single_location, basestring):
self.use_for_single_location = (self.use_for_single_location in TRUE_DICT)
......@@ -83,14 +82,13 @@ class PeerGradingModule(XModule):
grace_period_string = self.metadata.get('graceperiod', None)
try:
self.timeinfo = TimeInfo(display_due_date_string, grace_period_string)
self.timeinfo = TimeInfo(display_due_date_string, grace_period_string)
except:
log.error("Error parsing due date information in location {0}".format(location))
raise
self.display_due_date = self.timeinfo.display_due_date
self.ajax_url = self.system.ajax_url
if not self.ajax_url.endswith("/"):
self.ajax_url = self.ajax_url + "/"
......@@ -148,13 +146,13 @@ class PeerGradingModule(XModule):
'save_grade': self.save_grade,
'save_calibration_essay': self.save_calibration_essay,
'problem': self.peer_grading_problem,
}
}
if dispatch not in handlers:
#This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
#This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success' : False})
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
d = handlers[dispatch](get)
......@@ -191,9 +189,10 @@ class PeerGradingModule(XModule):
except:
success, response = self.query_data_for_location()
if not success:
log.exception("No instance data found and could not get data from controller for loc {0} student {1}".format(
self.system.location.url(), self.system.anonymous_student_id
))
log.exception(
"No instance data found and could not get data from controller for loc {0} student {1}".format(
self.system.location.url(), self.system.anonymous_student_id
))
return None
count_graded = response['count_graded']
count_required = response['count_required']
......@@ -204,7 +203,7 @@ class PeerGradingModule(XModule):
score_dict = {
'score': int(count_graded >= count_required),
'total': self.max_grade,
}
}
return score_dict
......@@ -253,7 +252,7 @@ class PeerGradingModule(XModule):
.format(self.peer_gs.url, location, grader_id))
#This is a student_facing_error
return {'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
def save_grade(self, get):
"""
......@@ -271,7 +270,8 @@ class PeerGradingModule(XModule):
error: if there was an error in the submission, this is the error message
"""
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', 'submission_flagged'])
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]',
'submission_flagged'])
success, message = self._check_required(get, required)
if not success:
return self._err_response(message)
......@@ -287,14 +287,14 @@ class PeerGradingModule(XModule):
try:
response = self.peer_gs.save_grade(location, grader_id, submission_id,
score, feedback, submission_key, rubric_scores, submission_flagged)
score, feedback, submission_key, rubric_scores, submission_flagged)
return response
except GradingServiceError:
#This is a dev_facing_error
log.exception("""Error saving grade to open ended grading service. server url: {0}, location: {1}, submission_id:{2},
submission_key: {3}, score: {4}"""
.format(self.peer_gs.url,
location, submission_id, submission_key, score)
location, submission_id, submission_key, score)
)
#This is a student_facing_error
return {
......@@ -382,7 +382,7 @@ class PeerGradingModule(XModule):
.format(self.peer_gs.url, location))
#This is a student_facing_error
return {'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError:
#This is a dev_facing_error
......@@ -390,7 +390,7 @@ class PeerGradingModule(XModule):
.format(rubric))
#This is a student_facing_error
return {'success': False,
'error': 'Error displaying submission. Please notify course staff.'}
'error': 'Error displaying submission. Please notify course staff.'}
def save_calibration_essay(self, get):
......@@ -426,11 +426,13 @@ class PeerGradingModule(XModule):
try:
response = self.peer_gs.save_calibration_essay(location, grader_id, calibration_essay_id,
submission_key, score, feedback, rubric_scores)
submission_key, score, feedback, rubric_scores)
return response
except GradingServiceError:
#This is a dev_facing_error
log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id))
log.exception(
"Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(
location, submission_id, submission_key, grader_id))
#This is a student_facing_error
return self._err_response('There was an error saving your score. Please notify course staff.')
......@@ -440,7 +442,7 @@ class PeerGradingModule(XModule):
'''
html = self.system.render_template('peer_grading/peer_grading_closed.html', {
'use_for_single_location': self.use_for_single_location
})
})
return html
......@@ -503,12 +505,11 @@ class PeerGradingModule(XModule):
problem['closed'] = True
else:
problem['closed'] = False
else:
# if we can't find the due date, assume that it doesn't have one
else:
# if we can't find the due date, assume that it doesn't have one
problem['due'] = None
problem['closed'] = False
ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading.html', {
'course_id': self.system.course_id,
......@@ -519,7 +520,7 @@ class PeerGradingModule(XModule):
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location,
})
})
return html
......@@ -531,7 +532,8 @@ class PeerGradingModule(XModule):
if not self.use_for_single_location:
#This is an error case, because it must be set to use a single location to be called without get parameters
#This is a dev_facing_error
log.error("Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
log.error(
"Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
return {'html': "", 'success': False}
problem_location = self.link_to_location
......@@ -547,7 +549,7 @@ class PeerGradingModule(XModule):
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location,
})
})
return {'html': html, 'success': True}
......@@ -560,7 +562,7 @@ class PeerGradingModule(XModule):
state = {
'student_data_for_location': self.student_data_for_location,
}
}
return json.dumps(state)
......@@ -596,7 +598,9 @@ class PeerGradingDescriptor(XmlDescriptor, EditingDescriptor):
for child in expected_children:
if len(xml_object.xpath(child)) == 0:
#This is a staff_facing_error
raise ValueError("Peer grading definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(child))
raise ValueError(
"Peer grading definition must include at least one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse_task(k):
"""Assumes that xml_object has child k"""
......
......@@ -14,6 +14,7 @@ from datetime import datetime
from . import test_system
import test_util_open_ended
"""
Tests for the various pieces of the CombinedOpenEndedGrading system
......@@ -39,41 +40,37 @@ class OpenEndedChildTest(unittest.TestCase):
max_score = 1
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': None,
's3_interface' : "",
'open_ended_grading_interface' : {},
'skip_basic_checks' : False,
}
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': None,
's3_interface': "",
'open_ended_grading_interface': {},
'skip_basic_checks': False,
}
definition = Mock()
descriptor = Mock()
def setUp(self):
self.test_system = test_system()
self.openendedchild = OpenEndedChild(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
self.definition, self.descriptor, self.static_data, self.metadata)
def test_latest_answer_empty(self):
answer = self.openendedchild.latest_answer()
self.assertEqual(answer, "")
def test_latest_score_empty(self):
answer = self.openendedchild.latest_score()
self.assertEqual(answer, None)
def test_latest_post_assessment_empty(self):
answer = self.openendedchild.latest_post_assessment(self.test_system)
self.assertEqual(answer, "")
def test_new_history_entry(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
......@@ -99,7 +96,6 @@ class OpenEndedChildTest(unittest.TestCase):
score = self.openendedchild.latest_score()
self.assertEqual(score, 4)
def test_record_latest_post_assessment(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
......@@ -107,7 +103,7 @@ class OpenEndedChildTest(unittest.TestCase):
post_assessment = "Post assessment"
self.openendedchild.record_latest_post_assessment(post_assessment)
self.assertEqual(post_assessment,
self.openendedchild.latest_post_assessment(self.test_system))
self.openendedchild.latest_post_assessment(self.test_system))
def test_get_score(self):
new_answer = "New Answer"
......@@ -124,24 +120,22 @@ class OpenEndedChildTest(unittest.TestCase):
self.assertEqual(score['score'], new_score)
self.assertEqual(score['total'], self.static_data['max_score'])
def test_reset(self):
self.openendedchild.reset(self.test_system)
state = json.loads(self.openendedchild.get_instance_state())
self.assertEqual(state['state'], OpenEndedChild.INITIAL)
def test_is_last_response_correct(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(self.static_data['max_score'])
self.assertEqual(self.openendedchild.is_last_response_correct(),
'correct')
'correct')
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(0)
self.assertEqual(self.openendedchild.is_last_response_correct(),
'incorrect')
'incorrect')
class OpenEndedModuleTest(unittest.TestCase):
......@@ -159,18 +153,18 @@ class OpenEndedModuleTest(unittest.TestCase):
max_score = 4
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'rewrite_content_links' : "",
'close_date': None,
's3_interface' : test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks' : False,
}
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'rewrite_content_links': "",
'close_date': None,
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
}
oeparam = etree.XML('''
<openendedparam>
......@@ -188,25 +182,26 @@ class OpenEndedModuleTest(unittest.TestCase):
self.test_system.location = self.location
self.mock_xqueue = MagicMock()
self.mock_xqueue.send_to_queue.return_value = (None, "Message")
self.test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 1}
self.test_system.xqueue = {'interface': self.mock_xqueue, 'callback_url': '/', 'default_queuename': 'testqueue',
'waittime': 1}
self.openendedmodule = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
self.definition, self.descriptor, self.static_data, self.metadata)
def test_message_post(self):
get = {'feedback': 'feedback text',
'submission_id': '1',
'grader_id': '1',
'score': 3}
'submission_id': '1',
'grader_id': '1',
'score': 3}
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
student_info = {'anonymous_student_id': self.test_system.anonymous_student_id,
'submission_time': qtime}
'submission_time': qtime}
contents = {
'feedback': get['feedback'],
'submission_id': int(get['submission_id']),
'grader_id': int(get['grader_id']),
'score': get['score'],
'student_info': json.dumps(student_info)
}
'feedback': get['feedback'],
'submission_id': int(get['submission_id']),
'grader_id': int(get['grader_id']),
'score': get['score'],
'student_info': json.dumps(student_info)
}
result = self.openendedmodule.message_post(get, self.test_system)
self.assertTrue(result['success'])
......@@ -220,13 +215,13 @@ class OpenEndedModuleTest(unittest.TestCase):
submission = "This is a student submission"
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
student_info = {'anonymous_student_id': self.test_system.anonymous_student_id,
'submission_time': qtime}
'submission_time': qtime}
contents = self.openendedmodule.payload.copy()
contents.update({
'student_info': json.dumps(student_info),
'student_response': submission,
'max_score': self.max_score
})
})
result = self.openendedmodule.send_to_grader(submission, self.test_system)
self.assertTrue(result)
self.mock_xqueue.send_to_queue.assert_called_with(body=json.dumps(contents), header=ANY)
......@@ -234,36 +229,36 @@ class OpenEndedModuleTest(unittest.TestCase):
def update_score_single(self):
self.openendedmodule.new_history_entry("New Entry")
score_msg = {
'correct': True,
'score': 4,
'msg': 'Grader Message',
'feedback': "Grader Feedback"
}
'correct': True,
'score': 4,
'msg': 'Grader Message',
'feedback': "Grader Feedback"
}
get = {'queuekey': "abcd",
'xqueue_body': score_msg}
'xqueue_body': score_msg}
self.openendedmodule.update_score(get, self.test_system)
def update_score_single(self):
self.openendedmodule.new_history_entry("New Entry")
feedback = {
"success": True,
"feedback": "Grader Feedback"
}
"success": True,
"feedback": "Grader Feedback"
}
score_msg = {
'correct': True,
'score': 4,
'msg': 'Grader Message',
'feedback': json.dumps(feedback),
'grader_type': 'IN',
'grader_id': '1',
'submission_id': '1',
'success': True,
'rubric_scores': [0],
'rubric_scores_complete': True,
'rubric_xml': etree.tostring(self.rubric)
}
'correct': True,
'score': 4,
'msg': 'Grader Message',
'feedback': json.dumps(feedback),
'grader_type': 'IN',
'grader_id': '1',
'submission_id': '1',
'success': True,
'rubric_scores': [0],
'rubric_scores_complete': True,
'rubric_xml': etree.tostring(self.rubric)
}
get = {'queuekey': "abcd",
'xqueue_body': json.dumps(score_msg)}
'xqueue_body': json.dumps(score_msg)}
self.openendedmodule.update_score(get, self.test_system)
def test_latest_post_assessment(self):
......@@ -296,18 +291,18 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
metadata = {'attempts': '10', 'max_score': max_score}
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload' : False,
'rewrite_content_links' : "",
'close_date' : "",
's3_interface' : test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks' : False,
}
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'rewrite_content_links': "",
'close_date': "",
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
}
oeparam = etree.XML('''
<openendedparam>
......@@ -329,23 +324,23 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
'''
task_xml2 = '''
<openended min_score_to_attempt="1" max_score_to_attempt="1">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>'''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>'''
definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
descriptor = Mock()
def setUp(self):
self.test_system = test_system()
self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data = self.static_data,
metadata=self.metadata)
self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata)
def test_get_tag_name(self):
name = self.combinedoe.get_tag_name("<t>Tag</t>")
......
......@@ -10,8 +10,8 @@ from . import test_system
import test_util_open_ended
class SelfAssessmentTest(unittest.TestCase):
class SelfAssessmentTest(unittest.TestCase):
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
......@@ -24,7 +24,7 @@ class SelfAssessmentTest(unittest.TestCase):
'prompt': prompt,
'submitmessage': 'Shall we submit now?',
'hintprompt': 'Consider this...',
}
}
location = Location(["i4x", "edX", "sa_test", "selfassessment",
"SampleQuestion"])
......@@ -41,22 +41,22 @@ class SelfAssessmentTest(unittest.TestCase):
'attempts': 2})
static_data = {
'max_attempts': 10,
'rubric': etree.XML(self.rubric),
'prompt': self.prompt,
'max_score': 1,
'display_name': "Name",
'accept_file_upload': False,
'close_date': None,
's3_interface' : test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface' : test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks' : False,
}
'max_attempts': 10,
'rubric': etree.XML(self.rubric),
'prompt': self.prompt,
'max_score': 1,
'display_name': "Name",
'accept_file_upload': False,
'close_date': None,
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
}
self.module = SelfAssessmentModule(test_system(), self.location,
self.definition, self.descriptor,
static_data,
state, metadata=self.metadata)
self.definition, self.descriptor,
static_data,
state, metadata=self.metadata)
def test_get_html(self):
html = self.module.get_html(self.module.system)
......@@ -64,14 +64,15 @@ class SelfAssessmentTest(unittest.TestCase):
def test_self_assessment_flow(self):
responses = {'assessment': '0', 'score_list[]': ['0', '0']}
def get_fake_item(name):
return responses[name]
def get_data_for_location(self,location,student):
def get_data_for_location(self, location, student):
return {
'count_graded' : 0,
'count_required' : 0,
'student_sub_count': 0,
'count_graded': 0,
'count_required': 0,
'student_sub_count': 0,
}
mock_query_dict = MagicMock()
......@@ -82,20 +83,19 @@ class SelfAssessmentTest(unittest.TestCase):
self.assertEqual(self.module.get_score()['score'], 0)
self.module.save_answer({'student_answer': "I am an answer"},
self.module.save_answer({'student_answer': "I am an answer"},
self.module.system)
self.assertEqual(self.module.state, self.module.ASSESSING)
self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.state, self.module.DONE)
d = self.module.reset({})
self.assertTrue(d['success'])
self.assertEqual(self.module.state, self.module.INITIAL)
# if we now assess as right, skip the REQUEST_HINT state
self.module.save_answer({'student_answer': 'answer 4'},
self.module.save_answer({'student_answer': 'answer 4'},
self.module.system)
responses['assessment'] = '1'
self.module.save_assessment(mock_query_dict, self.module.system)
......
OPEN_ENDED_GRADING_INTERFACE = {
'url' : 'http://127.0.0.1:3033/',
'username' : 'incorrect',
'password' : 'incorrect',
'staff_grading' : 'staff_grading',
'peer_grading' : 'peer_grading',
'grading_controller' : 'grading_controller'
'url': 'http://127.0.0.1:3033/',
'username': 'incorrect',
'password': 'incorrect',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller'
}
S3_INTERFACE = {
'aws_access_key' : "",
'aws_secret_key' : "",
"aws_bucket_name" : "",
'aws_access_key': "",
'aws_secret_key': "",
"aws_bucket_name": "",
}
\ No newline at end of file
......@@ -22,7 +22,7 @@ NOTIFICATION_TYPES = (
('staff_needs_to_grade', 'staff_grading', 'Staff Grading'),
('new_student_grading_to_view', 'open_ended_problems', 'Problems you have submitted'),
('flagged_submissions_exist', 'open_ended_flagged_problems', 'Flagged Submissions')
)
)
def staff_grading_notifications(course, user):
......@@ -46,7 +46,9 @@ def staff_grading_notifications(course, user):
#Non catastrophic error, so no real action
notifications = {}
#This is a dev_facing_error
log.info("Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id, student_id))
log.info(
"Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
......@@ -80,7 +82,9 @@ def peer_grading_notifications(course, user):
#Non catastrophic error, so no real action
notifications = {}
#This is a dev_facing_error
log.info("Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id, student_id))
log.info(
"Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
......@@ -105,7 +109,9 @@ def combined_notifications(course, user):
return notification_dict
min_time_to_query = user.last_login
last_module_seen = StudentModule.objects.filter(student=user, course_id=course_id, modified__gt=min_time_to_query).values('modified').order_by('-modified')
last_module_seen = StudentModule.objects.filter(student=user, course_id=course_id,
modified__gt=min_time_to_query).values('modified').order_by(
'-modified')
last_module_seen_count = last_module_seen.count()
if last_module_seen_count > 0:
......@@ -117,7 +123,8 @@ def combined_notifications(course, user):
img_path = ""
try:
controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff, last_time_viewed)
controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff,
last_time_viewed)
log.debug(controller_response)
notifications = json.loads(controller_response)
if notifications['success']:
......@@ -127,7 +134,9 @@ def combined_notifications(course, user):
#Non catastrophic error, so no real action
notifications = {}
#This is a dev_facing_error
log.exception("Problem with getting notifications from controller query service for course {0} user {1}.".format(course_id, student_id))
log.exception(
"Problem with getting notifications from controller query service for course {0} user {1}.".format(
course_id, student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
......@@ -151,7 +160,8 @@ def set_value_in_cache(student_id, course_id, notification_type, value):
def create_key_name(student_id, course_id, notification_type):
key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id, student=student_id)
key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id,
student=student_id)
return key_name
......
......@@ -15,6 +15,7 @@ class StaffGrading(object):
"""
Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views.
"""
def __init__(self, course):
self.course = course
......
......@@ -20,10 +20,12 @@ log = logging.getLogger(__name__)
STAFF_ERROR_MESSAGE = 'Could not contact the external grading server. Please contact the development team. If you do not have a point of contact, you can contact Vik at vik@edx.org.'
class MockStaffGradingService(object):
"""
A simple mockup of a staff grading service, testing.
"""
def __init__(self):
self.cnt = 0
......@@ -43,15 +45,18 @@ class MockStaffGradingService(object):
def get_problem_list(self, course_id, grader_id):
self.cnt += 1
return json.dumps({'success': True,
'problem_list': [
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'min_for_ml': 10}),
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'min_for_ml': 10})
]})
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged):
'problem_list': [
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5,
'min_for_ml': 10}),
json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5,
'min_for_ml': 10})
]})
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
return self.get_next(course_id, 'fake location', grader_id)
......@@ -59,6 +64,7 @@ class StaffGradingService(GradingService):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
config['system'] = ModuleSystem(None, None, None, render_to_string, None)
super(StaffGradingService, self).__init__(config)
......@@ -109,12 +115,13 @@ class StaffGradingService(GradingService):
GradingServiceError: something went wrong with the connection.
"""
response = self.get(self.get_next_url,
params={'location': location,
'grader_id': grader_id})
params={'location': location,
'grader_id': grader_id})
return json.dumps(self._render_rubric(response))
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged):
def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
submission_flagged):
"""
Save a score and feedback for a submission.
......@@ -253,14 +260,14 @@ def get_problem_list(request, course_id):
try:
response = staff_grading_service().get_problem_list(course_id, unique_id_for_user(request.user))
return HttpResponse(response,
mimetype="application/json")
mimetype="application/json")
except GradingServiceError:
#This is a dev_facing_error
log.exception("Error from staff grading service in open ended grading. server url: {0}"
.format(staff_grading_service().url))
.format(staff_grading_service().url))
#This is a staff_facing_error
return HttpResponse(json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE}))
'error': STAFF_ERROR_MESSAGE}))
def _get_next(course_id, grader_id, location):
......@@ -272,7 +279,7 @@ def _get_next(course_id, grader_id, location):
except GradingServiceError:
#This is a dev facing error
log.exception("Error from staff grading service in open ended grading. server url: {0}"
.format(staff_grading_service().url))
.format(staff_grading_service().url))
#This is a staff_facing_error
return json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE})
......@@ -297,7 +304,7 @@ def save_grade(request, course_id):
if request.method != 'POST':
raise Http404
required = set(['score', 'feedback', 'submission_id', 'location','submission_flagged', 'rubric_scores[]'])
required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged', 'rubric_scores[]'])
actual = set(request.POST.keys())
missing = required - actual
if len(missing) > 0:
......@@ -307,22 +314,23 @@ def save_grade(request, course_id):
grader_id = unique_id_for_user(request.user)
p = request.POST
location = p['location']
skipped = 'skipped' in p
skipped = 'skipped' in p
try:
result_json = staff_grading_service().save_grade(course_id,
grader_id,
p['submission_id'],
p['score'],
p['feedback'],
skipped,
p.getlist('rubric_scores[]'),
p['submission_flagged'])
grader_id,
p['submission_id'],
p['score'],
p['feedback'],
skipped,
p.getlist('rubric_scores[]'),
p['submission_flagged'])
except GradingServiceError:
#This is a dev_facing_error
log.exception("Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(request, course_id))
log.exception(
"Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format(
request, course_id))
#This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE)
......@@ -330,13 +338,16 @@ def save_grade(request, course_id):
result = json.loads(result_json)
except ValueError:
#This is a dev_facing_error
log.exception("save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(result_json))
log.exception(
"save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(
result_json))
#This is a staff_facing_error
return _err_response(STAFF_ERROR_MESSAGE)
if not result.get('success', False):
#This is a dev_facing_error
log.warning('Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))
log.warning(
'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))
return _err_response(STAFF_ERROR_MESSAGE)
# Ok, save_grade seemed to work. Get the next submission to grade.
......
......@@ -7,7 +7,7 @@ django-admin.py test --settings=lms.envs.test --pythonpath=. lms/djangoapps/open
from django.test import TestCase
from open_ended_grading import staff_grading_service
from xmodule.open_ended_grading_classes import peer_grading_service
from xmodule import peer_grading_module
from xmodule import peer_grading_module
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group
......@@ -22,6 +22,7 @@ from xmodule.x_module import ModuleSystem
from mitxmako.shortcuts import render_to_string
import logging
log = logging.getLogger(__name__)
from django.test.utils import override_settings
from django.http import QueryDict
......@@ -36,6 +37,7 @@ class TestStaffGradingService(ct.PageLoader):
access control and error handling logic -- all the actual work is on the
backend.
'''
def setUp(self):
xmodule.modulestore.django._MODULESTORES = {}
......@@ -50,6 +52,7 @@ class TestStaffGradingService(ct.PageLoader):
self.course_id = "edX/toy/2012_Fall"
self.toy = modulestore().get_course(self.course_id)
def make_instructor(course):
group_name = _course_staff_group_name(course.location)
g = Group.objects.create(name=group_name)
......@@ -130,6 +133,7 @@ class TestPeerGradingService(ct.PageLoader):
access control and error handling logic -- all the actual work is on the
backend.
'''
def setUp(self):
xmodule.modulestore.django._MODULESTORES = {}
......@@ -148,11 +152,12 @@ class TestPeerGradingService(ct.PageLoader):
self.mock_service = peer_grading_service.MockPeerGradingService()
self.system = ModuleSystem(location, None, None, render_to_string, None,
s3_interface = test_util_open_ended.S3_INTERFACE,
open_ended_grading_interface=test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE
s3_interface=test_util_open_ended.S3_INTERFACE,
open_ended_grading_interface=test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE
)
self.descriptor = peer_grading_module.PeerGradingDescriptor(self.system)
self.peer_module = peer_grading_module.PeerGradingModule(self.system, location, "<peergrading/>", self.descriptor)
self.peer_module = peer_grading_module.PeerGradingModule(self.system, location, "<peergrading/>",
self.descriptor)
self.peer_module.peer_gs = self.mock_service
self.logout()
......@@ -175,18 +180,20 @@ class TestPeerGradingService(ct.PageLoader):
def test_save_grade_success(self):
data = {
'rubric_scores[]': [0, 0],
'location': self.location,
'submission_id': 1,
'submission_key': 'fake key',
'score': 2,
'feedback': 'feedback',
'submission_flagged': 'false'
}
'rubric_scores[]': [0, 0],
'location': self.location,
'submission_id': 1,
'submission_key': 'fake key',
'score': 2,
'feedback': 'feedback',
'submission_flagged': 'false'
}
qdict = MagicMock()
def fake_get_item(key):
return data[key]
qdict.__getitem__.side_effect = fake_get_item
qdict.getlist = fake_get_item
qdict.keys = data.keys
......@@ -237,18 +244,20 @@ class TestPeerGradingService(ct.PageLoader):
def test_save_calibration_essay_success(self):
data = {
'rubric_scores[]': [0, 0],
'location': self.location,
'submission_id': 1,
'submission_key': 'fake key',
'score': 2,
'feedback': 'feedback',
'submission_flagged': 'false'
}
'rubric_scores[]': [0, 0],
'location': self.location,
'submission_id': 1,
'submission_key': 'fake key',
'score': 2,
'feedback': 'feedback',
'submission_flagged': 'false'
}
qdict = MagicMock()
def fake_get_item(key):
return data[key]
qdict.__getitem__.side_effect = fake_get_item
qdict.getlist = fake_get_item
qdict.keys = data.keys
......
......@@ -50,22 +50,24 @@ def _reverse_without_slash(url_name, course_id):
ajax_url = reverse(url_name, kwargs={'course_id': course_id})
return ajax_url
DESCRIPTION_DICT = {
'Peer Grading': "View all problems that require peer assessment in this particular course.",
'Staff Grading': "View ungraded submissions submitted by students for the open ended problems in the course.",
'Problems you have submitted': "View open ended problems that you have previously submitted for grading.",
'Flagged Submissions': "View submissions that have been flagged by students as inappropriate."
}
'Peer Grading': "View all problems that require peer assessment in this particular course.",
'Staff Grading': "View ungraded submissions submitted by students for the open ended problems in the course.",
'Problems you have submitted': "View open ended problems that you have previously submitted for grading.",
'Flagged Submissions': "View submissions that have been flagged by students as inappropriate."
}
ALERT_DICT = {
'Peer Grading': "New submissions to grade",
'Staff Grading': "New submissions to grade",
'Problems you have submitted': "New grades have been returned",
'Flagged Submissions': "Submissions have been flagged for review"
}
'Peer Grading': "New submissions to grade",
'Staff Grading': "New submissions to grade",
'Problems you have submitted': "New grades have been returned",
'Flagged Submissions': "Submissions have been flagged for review"
}
STUDENT_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify course staff."
STAFF_ERROR_MESSAGE = "Error occured while contacting the grading service. Please notify the development team. If you do not have a point of contact, please email Vik at vik@edx.org"
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def staff_grading(request, course_id):
"""
......@@ -92,10 +94,10 @@ def peer_grading(request, course_id):
#Get the current course
course = get_course_with_access(request.user, course_id, 'load')
course_id_parts = course.id.split("/")
false_dict = [False,"False", "false", "FALSE"]
false_dict = [False, "False", "false", "FALSE"]
#Reverse the base course url
base_course_url = reverse('courses')
base_course_url = reverse('courses')
try:
#TODO: This will not work with multiple runs of a course. Make it work. The last key in the Location passed
#to get_items is called revision. Is this the same as run?
......@@ -147,7 +149,7 @@ def student_problem_list(request, course_id):
success = False
error_text = ""
problem_list = []
base_course_url = reverse('courses')
base_course_url = reverse('courses')
try:
problem_list_json = controller_qs.get_grading_status_list(course_id, unique_id_for_user(request.user))
......@@ -174,7 +176,7 @@ def student_problem_list(request, course_id):
except:
#This is a student_facing_error
eta_string = "Error getting ETA."
problem_list[i].update({'eta_string' : eta_string})
problem_list[i].update({'eta_string': eta_string})
except GradingServiceError:
#This is a student_facing_error
......@@ -215,7 +217,7 @@ def flagged_problem_list(request, course_id):
success = False
error_text = ""
problem_list = []
base_course_url = reverse('courses')
base_course_url = reverse('courses')
try:
problem_list_json = controller_qs.get_flagged_problem_list(course_id)
......@@ -243,14 +245,14 @@ def flagged_problem_list(request, course_id):
ajax_url = _reverse_with_slash('open_ended_flagged_problems', course_id)
context = {
'course': course,
'course_id': course_id,
'ajax_url': ajax_url,
'success': success,
'problem_list': problem_list,
'error_text': error_text,
# Checked above
'staff_access': True,
'course': course,
'course_id': course_id,
'ajax_url': ajax_url,
'success': success,
'problem_list': problem_list,
'error_text': error_text,
# Checked above
'staff_access': True,
}
return render_to_response('open_ended_problems/open_ended_flagged_problems.html', context)
......@@ -305,7 +307,7 @@ def combined_notifications(request, course_id):
}
return render_to_response('open_ended_problems/combined_notifications.html',
combined_dict
combined_dict
)
......@@ -318,13 +320,14 @@ def take_action_on_flags(request, course_id):
if request.method != 'POST':
raise Http404
required = ['submission_id', 'action_type', 'student_id']
for key in required:
if key not in request.POST:
#This is a staff_facing_error
return HttpResponse(json.dumps({'success': False, 'error': STAFF_ERROR_MESSAGE + 'Missing key {0} from submission. Please reload and try again.'.format(key)}),
mimetype="application/json")
return HttpResponse(json.dumps({'success': False,
'error': STAFF_ERROR_MESSAGE + 'Missing key {0} from submission. Please reload and try again.'.format(
key)}),
mimetype="application/json")
p = request.POST
submission_id = p['submission_id']
......@@ -338,5 +341,7 @@ def take_action_on_flags(request, course_id):
return HttpResponse(response, mimetype="application/json")
except GradingServiceError:
#This is a dev_facing_error
log.exception("Error taking action on flagged peer grading submissions, submission_id: {0}, action_type: {1}, grader_id: {2}".format(submission_id, action_type, grader_id))
log.exception(
"Error taking action on flagged peer grading submissions, submission_id: {0}, action_type: {1}, grader_id: {2}".format(
submission_id, action_type, grader_id))
return _err_response(STAFF_ERROR_MESSAGE)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment