Commit bcbf65e2 by Vik Paruchuri

Code reformat, line length fix, change text names to variables

parent 8dbbb021
......@@ -45,14 +45,14 @@ class CombinedOpenEndedModule(XModule):
# states
INITIAL = 'initial'
ASSESSING = 'assessing'
INTERMEDIATE_DONE='intermediate_done'
INTERMEDIATE_DONE = 'intermediate_done'
DONE = 'done'
TASK_TYPES=["self", "ml", "instructor", "peer"]
TASK_TYPES = ["self", "ml", "instructor", "peer"]
js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/display.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
]}
]}
js_module_name = "CombinedOpenEnded"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
......@@ -88,7 +88,8 @@ class CombinedOpenEndedModule(XModule):
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
<grader_payload>{"grader_settings" : "ml_grading.conf",
"problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
......@@ -108,9 +109,9 @@ class CombinedOpenEndedModule(XModule):
#Tells the system which xml definition to load
self.current_task_number = instance_state.get('current_task_number', 0)
#This loads the states of the individual children
self.task_states= instance_state.get('task_states', [])
self.task_states = instance_state.get('task_states', [])
#Overall state of the combined open ended module
self.state = instance_state.get('state', 'initial')
self.state = instance_state.get('state', self.INITIAL)
self.attempts = instance_state.get('attempts', 0)
......@@ -124,13 +125,13 @@ class CombinedOpenEndedModule(XModule):
#Static data is passed to the child modules to render
self.static_data = {
'max_score' : self._max_score,
'max_attempts' : self.max_attempts,
'prompt' : definition['prompt'],
'rubric' : definition['rubric']
'max_score': self._max_score,
'max_attempts': self.max_attempts,
'prompt': definition['prompt'],
'rubric': definition['rubric']
}
self.task_xml=definition['task_xml']
self.task_xml = definition['task_xml']
self.setup_next_task()
def get_tag_name(self, xml):
......@@ -139,7 +140,7 @@ class CombinedOpenEndedModule(XModule):
Input: XML string
Output: The name of the root tag
"""
tag=etree.fromstring(xml).tag
tag = etree.fromstring(xml).tag
return tag
def overwrite_state(self, current_task_state):
......@@ -149,15 +150,15 @@ class CombinedOpenEndedModule(XModule):
Input: Task state json string
Output: Task state json string
"""
last_response_data=self.get_last_response(self.current_task_number-1)
last_response_data = self.get_last_response(self.current_task_number - 1)
last_response = last_response_data['response']
loaded_task_state=json.loads(current_task_state)
if loaded_task_state['state']== self.INITIAL:
loaded_task_state['state']=self.ASSESSING
loaded_task_state = json.loads(current_task_state)
if loaded_task_state['state'] == self.INITIAL:
loaded_task_state['state'] = self.ASSESSING
loaded_task_state['created'] = "True"
loaded_task_state['history'].append({'answer' : last_response})
current_task_state=json.dumps(loaded_task_state)
loaded_task_state['history'].append({'answer': last_response})
current_task_state = json.dumps(loaded_task_state)
return current_task_state
def child_modules(self):
......@@ -167,17 +168,17 @@ class CombinedOpenEndedModule(XModule):
Input: None
Output: A dictionary of dictionaries containing the descriptor functions and module functions
"""
child_modules={
'openended' : open_ended_module.OpenEndedModule,
'selfassessment' : self_assessment_module.SelfAssessmentModule,
child_modules = {
'openended': open_ended_module.OpenEndedModule,
'selfassessment': self_assessment_module.SelfAssessmentModule,
}
child_descriptors={
'openended' : open_ended_module.OpenEndedDescriptor,
'selfassessment' : self_assessment_module.SelfAssessmentDescriptor,
child_descriptors = {
'openended': open_ended_module.OpenEndedDescriptor,
'selfassessment': self_assessment_module.SelfAssessmentDescriptor,
}
children={
'modules' : child_modules,
'descriptors' : child_descriptors,
children = {
'modules': child_modules,
'descriptors': child_descriptors,
}
return children
......@@ -188,41 +189,47 @@ class CombinedOpenEndedModule(XModule):
Input: A boolean indicating whether or not the reset function is calling.
Output: Boolean True (not useful right now)
"""
current_task_state=None
if len(self.task_states)>self.current_task_number:
current_task_state=self.task_states[self.current_task_number]
current_task_state = None
if len(self.task_states) > self.current_task_number:
current_task_state = self.task_states[self.current_task_number]
self.current_task_xml=self.task_xml[self.current_task_number]
self.current_task_xml = self.task_xml[self.current_task_number]
if self.current_task_number>0:
self.allow_reset=self.check_allow_reset()
if self.current_task_number > 0:
self.allow_reset = self.check_allow_reset()
if self.allow_reset:
self.current_task_number=self.current_task_number-1
self.current_task_number = self.current_task_number - 1
current_task_type=self.get_tag_name(self.current_task_xml)
current_task_type = self.get_tag_name(self.current_task_xml)
children=self.child_modules()
children = self.child_modules()
self.current_task_descriptor=children['descriptors'][current_task_type](self.system)
etree_xml=etree.fromstring(self.current_task_xml)
self.current_task_descriptor = children['descriptors'][current_task_type](self.system)
etree_xml = etree.fromstring(self.current_task_xml)
self.current_task_parsed_xml=self.current_task_descriptor.definition_from_xml(etree_xml,self.system)
if current_task_state is None and self.current_task_number==0:
self.current_task=children['modules'][current_task_type](self.system, self.location, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data)
self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system)
if current_task_state is None and self.current_task_number == 0:
self.current_task = children['modules'][current_task_type](self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data)
self.task_states.append(self.current_task.get_instance_state())
self.state=self.ASSESSING
elif current_task_state is None and self.current_task_number>0:
last_response_data =self.get_last_response(self.current_task_number-1)
self.state = self.ASSESSING
elif current_task_state is None and self.current_task_number > 0:
last_response_data = self.get_last_response(self.current_task_number - 1)
last_response = last_response_data['response']
current_task_state = ('{"state": "assessing", "version": 1, "max_score": ' + str(self._max_score) + ', ' +
'"attempts": 0, "created": "True", "history": [{"answer": "' + str(last_response) + '"}]}')
self.current_task=children['modules'][current_task_type](self.system, self.location, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, instance_state=current_task_state)
current_task_state = (
'{"state": "' + str(self.ASSESSING) + '", "version": 1, "max_score": ' + str(self._max_score) + ', ' +
'"attempts": 0, "created": "True", "history": [{"answer": "' + str(last_response) + '"}]}')
self.current_task = children['modules'][current_task_type](self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data,
instance_state=current_task_state)
self.task_states.append(self.current_task.get_instance_state())
self.state=self.ASSESSING
self.state = self.ASSESSING
else:
if self.current_task_number>0 and not reset:
current_task_state=self.overwrite_state(current_task_state)
self.current_task=children['modules'][current_task_type](self.system, self.location, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, instance_state=current_task_state)
if self.current_task_number > 0 and not reset:
current_task_state = self.overwrite_state(current_task_state)
self.current_task = children['modules'][current_task_type](self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data,
instance_state=current_task_state)
log.debug(current_task_state)
return True
......@@ -235,13 +242,14 @@ class CombinedOpenEndedModule(XModule):
Output: the allow_reset attribute of the current module.
"""
if not self.allow_reset:
if self.current_task_number>0:
last_response_data=self.get_last_response(self.current_task_number-1)
current_response_data=self.get_current_attributes(self.current_task_number)
if self.current_task_number > 0:
last_response_data = self.get_last_response(self.current_task_number - 1)
current_response_data = self.get_current_attributes(self.current_task_number)
if current_response_data['min_score_to_attempt']>last_response_data['score'] or current_response_data['max_score_to_attempt']<last_response_data['score']:
self.state=self.DONE
self.allow_reset=True
if(current_response_data['min_score_to_attempt'] > last_response_data['score']
or current_response_data['max_score_to_attempt'] < last_response_data['score']):
self.state = self.DONE
self.allow_reset = True
return self.allow_reset
......@@ -251,18 +259,18 @@ class CombinedOpenEndedModule(XModule):
Input: None
Output: A dictionary that can be rendered into the combined open ended template.
"""
task_html=self.get_html_base()
task_html = self.get_html_base()
#set context variables and render template
context = {
'items': [{'content' : task_html}],
'items': [{'content': task_html}],
'ajax_url': self.system.ajax_url,
'allow_reset': self.allow_reset,
'state' : self.state,
'task_count' : len(self.task_xml),
'task_number' : self.current_task_number+1,
'status' : self.get_status(),
}
'state': self.state,
'task_count': len(self.task_xml),
'task_number': self.current_task_number + 1,
'status': self.get_status(),
}
return context
......@@ -272,7 +280,7 @@ class CombinedOpenEndedModule(XModule):
Input: None
Output: rendered html
"""
context=self.get_context()
context = self.get_context()
html = self.system.render_template('combined_open_ended.html', context)
return html
......@@ -283,7 +291,7 @@ class CombinedOpenEndedModule(XModule):
Input: None
Output: HTML rendered directly via Mako
"""
context=self.get_context()
context = self.get_context()
html = render_to_string('combined_open_ended.html', context)
return html
......@@ -304,11 +312,11 @@ class CombinedOpenEndedModule(XModule):
Input: The number of the task.
Output: The minimum and maximum scores needed to move on to the specified task.
"""
task_xml=self.task_xml[task_number]
etree_xml=etree.fromstring(task_xml)
min_score_to_attempt=int(etree_xml.attrib.get('min_score_to_attempt',0))
max_score_to_attempt=int(etree_xml.attrib.get('max_score_to_attempt',self._max_score))
return {'min_score_to_attempt' : min_score_to_attempt, 'max_score_to_attempt' : max_score_to_attempt}
task_xml = self.task_xml[task_number]
etree_xml = etree.fromstring(task_xml)
min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0))
max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score))
return {'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt}
def get_last_response(self, task_number):
"""
......@@ -316,49 +324,50 @@ class CombinedOpenEndedModule(XModule):
Input: The number of the task.
Output: A dictionary that contains information about the specified task.
"""
last_response=""
last_response = ""
task_state = self.task_states[task_number]
task_xml=self.task_xml[task_number]
task_type=self.get_tag_name(task_xml)
task_xml = self.task_xml[task_number]
task_type = self.get_tag_name(task_xml)
children=self.child_modules()
children = self.child_modules()
task_descriptor=children['descriptors'][task_type](self.system)
etree_xml=etree.fromstring(task_xml)
task_descriptor = children['descriptors'][task_type](self.system)
etree_xml = etree.fromstring(task_xml)
min_score_to_attempt=int(etree_xml.attrib.get('min_score_to_attempt',0))
max_score_to_attempt=int(etree_xml.attrib.get('max_score_to_attempt',self._max_score))
min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0))
max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score))
task_parsed_xml=task_descriptor.definition_from_xml(etree_xml,self.system)
task=children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor, self.static_data, instance_state=task_state)
last_response=task.latest_answer()
task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system)
task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor,
self.static_data, instance_state=task_state)
last_response = task.latest_answer()
last_score = task.latest_score()
last_post_assessment = task.latest_post_assessment()
last_post_feedback=""
if task_type=="openended":
last_post_feedback = ""
if task_type == "openended":
last_post_assessment = task.latest_post_assessment(short_feedback=False, join_feedback=False)
if isinstance(last_post_assessment,list):
eval_list=[]
for i in xrange(0,len(last_post_assessment)):
if isinstance(last_post_assessment, list):
eval_list = []
for i in xrange(0, len(last_post_assessment)):
eval_list.append(task.format_feedback_with_evaluation(last_post_assessment[i]))
last_post_evaluation="".join(eval_list)
last_post_evaluation = "".join(eval_list)
else:
last_post_evaluation = task.format_feedback_with_evaluation(last_post_assessment)
last_post_assessment = last_post_evaluation
last_correctness = task.is_last_response_correct()
max_score = task.max_score()
state = task.state
last_response_dict={
'response' : last_response,
'score' : last_score,
'post_assessment' : last_post_assessment,
'type' : task_type,
'max_score' : max_score,
'state' : state,
'human_state' : task.HUMAN_NAMES[state],
'correct' : last_correctness,
'min_score_to_attempt' : min_score_to_attempt,
'max_score_to_attempt' : max_score_to_attempt,
last_response_dict = {
'response': last_response,
'score': last_score,
'post_assessment': last_post_assessment,
'type': task_type,
'max_score': max_score,
'state': state,
'human_state': task.HUMAN_NAMES[state],
'correct': last_correctness,
'min_score_to_attempt': min_score_to_attempt,
'max_score_to_attempt': max_score_to_attempt,
}
return last_response_dict
......@@ -369,28 +378,28 @@ class CombinedOpenEndedModule(XModule):
Input: None
Output: boolean indicating whether or not the task state changed.
"""
changed=False
changed = False
if not self.allow_reset:
self.task_states[self.current_task_number] = self.current_task.get_instance_state()
current_task_state=json.loads(self.task_states[self.current_task_number])
if current_task_state['state']==self.DONE:
self.current_task_number+=1
if self.current_task_number>=(len(self.task_xml)):
self.state=self.DONE
self.current_task_number=len(self.task_xml)-1
current_task_state = json.loads(self.task_states[self.current_task_number])
if current_task_state['state'] == self.DONE:
self.current_task_number += 1
if self.current_task_number >= (len(self.task_xml)):
self.state = self.DONE
self.current_task_number = len(self.task_xml) - 1
else:
self.state=self.INITIAL
changed=True
self.state = self.INITIAL
changed = True
self.setup_next_task()
return changed
def update_task_states_ajax(self,return_html):
def update_task_states_ajax(self, return_html):
"""
Runs the update task states function for ajax calls. Currently the same as update_task_states
Input: The html returned by the handle_ajax function of the child
Output: New html that should be rendered
"""
changed=self.update_task_states()
changed = self.update_task_states()
if changed:
#return_html=self.get_html()
pass
......@@ -402,12 +411,12 @@ class CombinedOpenEndedModule(XModule):
Input: AJAX get dictionary
Output: Dictionary to be rendered via ajax that contains the result html.
"""
task_number=int(get['task_number'])
task_number = int(get['task_number'])
self.update_task_states()
response_dict=self.get_last_response(task_number)
context = {'results' : response_dict['post_assessment'], 'task_number' : task_number+1}
response_dict = self.get_last_response(task_number)
context = {'results': response_dict['post_assessment'], 'task_number': task_number + 1}
html = render_to_string('combined_open_ended_results.html', context)
return {'html' : html, 'success' : True}
return {'html': html, 'success': True}
def handle_ajax(self, dispatch, get):
"""
......@@ -423,15 +432,15 @@ class CombinedOpenEndedModule(XModule):
handlers = {
'next_problem': self.next_problem,
'reset': self.reset,
'get_results' : self.get_results
}
'get_results': self.get_results
}
if dispatch not in handlers:
return_html = self.current_task.handle_ajax(dispatch,get, self.system)
return_html = self.current_task.handle_ajax(dispatch, get, self.system)
return self.update_task_states_ajax(return_html)
d = handlers[dispatch](get)
return json.dumps(d,cls=ComplexEncoder)
return json.dumps(d, cls=ComplexEncoder)
def next_problem(self, get):
"""
......@@ -440,7 +449,7 @@ class CombinedOpenEndedModule(XModule):
Output: Dictionary to be rendered
"""
self.update_task_states()
return {'success' : True, 'html' : self.get_html_nonsystem(), 'allow_reset' : self.allow_reset}
return {'success': True, 'html': self.get_html_nonsystem(), 'allow_reset': self.allow_reset}
def reset(self, get):
"""
......@@ -457,17 +466,17 @@ class CombinedOpenEndedModule(XModule):
'success': False,
'error': 'Too many attempts.'
}
self.state=self.INITIAL
self.allow_reset=False
for i in xrange(0,len(self.task_xml)):
self.current_task_number=i
self.state = self.INITIAL
self.allow_reset = False
for i in xrange(0, len(self.task_xml)):
self.current_task_number = i
self.setup_next_task(reset=True)
self.current_task.reset(self.system)
self.task_states[self.current_task_number]=self.current_task.get_instance_state()
self.current_task_number=0
self.allow_reset=False
self.task_states[self.current_task_number] = self.current_task.get_instance_state()
self.current_task_number = 0
self.allow_reset = False
self.setup_next_task()
return {'success': True, 'html' : self.get_html_nonsystem()}
return {'success': True, 'html': self.get_html_nonsystem()}
def get_instance_state(self):
"""
......@@ -482,8 +491,8 @@ class CombinedOpenEndedModule(XModule):
'state': self.state,
'task_states': self.task_states,
'attempts': self.attempts,
'ready_to_reset' : self.allow_reset,
}
'ready_to_reset': self.allow_reset,
}
return json.dumps(state)
......@@ -493,16 +502,17 @@ class CombinedOpenEndedModule(XModule):
Input: None
Output: The status html to be rendered
"""
status=[]
for i in xrange(0,self.current_task_number+1):
status = []
for i in xrange(0, self.current_task_number + 1):
task_data = self.get_last_response(i)
task_data.update({'task_number' : i+1})
task_data.update({'task_number': i + 1})
status.append(task_data)
context = {'status_list' : status}
context = {'status_list': status}
status_html = self.system.render_template("combined_open_ended_status.html", context)
return status_html
class CombinedOpenEndedDescriptor(XmlDescriptor, EditingDescriptor):
"""
Module for adding combined open ended questions
......@@ -532,18 +542,18 @@ class CombinedOpenEndedDescriptor(XmlDescriptor, EditingDescriptor):
"""
expected_children = ['task', 'rubric', 'prompt']
for child in expected_children:
if len(xml_object.xpath(child)) == 0 :
if len(xml_object.xpath(child)) == 0:
raise ValueError("Combined Open Ended definition must include at least one '{0}' tag".format(child))
def parse_task(k):
"""Assumes that xml_object has child k"""
return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0,len(xml_object.xpath(k)))]
return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))]
def parse(k):
"""Assumes that xml_object has child k"""
return xml_object.xpath(k)[0]
return {'task_xml': parse_task('task'), 'prompt' : parse('prompt'), 'rubric' : parse('rubric')}
return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')}
def definition_to_xml(self, resource_fs):
......
......@@ -49,6 +49,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
</openendedparam>
</openended>
"""
def setup_response(self, system, location, definition, descriptor):
"""
Sets up the response type.
......@@ -65,8 +66,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.message_queue_name = definition.get('message-queuename', self.DEFAULT_MESSAGE_QUEUE)
#This is needed to attach feedback to specific responses later
self.submission_id=None
self.grader_id=None
self.submission_id = None
self.grader_id = None
if oeparam is None:
raise ValueError("No oeparam found in problem xml.")
......@@ -77,10 +78,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self._parse(oeparam, self.prompt, self.rubric, system)
if self.created=="True" and self.state == self.ASSESSING:
self.created="False"
if self.created == "True" and self.state == self.ASSESSING:
self.created = "False"
self.send_to_grader(self.latest_answer(), system)
self.created="False"
self.created = "False"
def _parse(self, oeparam, prompt, rubric, system):
'''
......@@ -94,8 +95,8 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# Note that OpenEndedResponse is agnostic to the specific contents of grader_payload
prompt_string = stringify_children(prompt)
rubric_string = stringify_children(rubric)
self.prompt=prompt_string
self.rubric=rubric_string
self.prompt = prompt_string
self.rubric = rubric_string
grader_payload = oeparam.find('grader_payload')
grader_payload = grader_payload.text if grader_payload is not None else ''
......@@ -113,13 +114,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
parsed_grader_payload.update({
'location' : system.location.url(),
'course_id' : system.course_id,
'prompt' : prompt_string,
'rubric' : rubric_string,
'initial_display' : self.initial_display,
'answer' : self.answer,
})
'location': system.location.url(),
'course_id': system.course_id,
'prompt': prompt_string,
'rubric': rubric_string,
'initial_display': self.initial_display,
'answer': self.answer,
})
updated_grader_payload = json.dumps(parsed_grader_payload)
self.payload = {'grader_payload': updated_grader_payload}
......@@ -131,10 +132,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
@param system: ModuleSystem
@return: Success indicator
"""
self.state=self.DONE
return {'success' : True}
self.state = self.DONE
return {'success': True}
def message_post(self,get, system):
def message_post(self, get, system):
"""
Handles a student message post (a reaction to the grade they received from an open ended grader type)
Returns a boolean success/fail and an error message
......@@ -143,22 +144,23 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
event_info = dict()
event_info['problem_id'] = system.location.url()
event_info['student_id'] = system.anonymous_student_id
event_info['survey_responses']= get
event_info['survey_responses'] = get
survey_responses=event_info['survey_responses']
survey_responses = event_info['survey_responses']
for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses:
return {'success' : False, 'msg' : "Could not find needed tag {0}".format(tag)}
return {'success': False, 'msg': "Could not find needed tag {0}".format(tag)}
try:
submission_id=int(survey_responses['submission_id'])
submission_id = int(survey_responses['submission_id'])
grader_id = int(survey_responses['grader_id'])
feedback = str(survey_responses['feedback'].encode('ascii', 'ignore'))
score = int(survey_responses['score'])
except:
error_message=("Could not parse submission id, grader id, "
"or feedback from message_post ajax call. Here is the message data: {0}".format(survey_responses))
error_message = ("Could not parse submission id, grader id, "
"or feedback from message_post ajax call. Here is the message data: {0}".format(
survey_responses))
log.exception(error_message)
return {'success' : False, 'msg' : "There was an error saving your feedback. Please contact course staff."}
return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."}
qinterface = system.xqueue['interface']
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
......@@ -175,26 +177,26 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
student_info = {'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
}
contents= {
'feedback' : feedback,
'submission_id' : submission_id,
'grader_id' : grader_id,
}
contents = {
'feedback': feedback,
'submission_id': submission_id,
'grader_id': grader_id,
'score': score,
'student_info' : json.dumps(student_info),
}
'student_info': json.dumps(student_info),
}
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
#Convert error to a success value
success=True
success = True
if error:
success=False
success = False
self.state=self.DONE
self.state = self.DONE
return {'success' : success, 'msg' : "Successfully submitted your feedback."}
return {'success': success, 'msg': "Successfully submitted your feedback."}
def send_to_grader(self, submission, system):
"""
......@@ -226,14 +228,14 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# Metadata related to the student submission revealed to the external grader
student_info = {'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
}
}
#Update contents with student response and student info
contents.update({
'student_info': json.dumps(student_info),
'student_response': submission,
'max_score' : self.max_score(),
})
'max_score': self.max_score(),
})
# Submit request. When successful, 'msg' is the prior length of the queue
(error, msg) = qinterface.send_to_queue(header=xheader,
......@@ -241,7 +243,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
# State associated with the queueing request
queuestate = {'key': queuekey,
'time': qtime,}
'time': qtime, }
return True
def _update_score(self, score_msg, queuekey, system):
......@@ -258,7 +260,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.record_latest_score(new_score_msg['score'])
self.record_latest_post_assessment(score_msg)
self.state=self.POST_ASSESSMENT
self.state = self.POST_ASSESSMENT
return True
......@@ -313,24 +315,24 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
"""
return priorities.get(elt[0], default_priority)
def encode_values(feedback_type,value):
feedback_type=str(feedback_type).encode('ascii', 'ignore')
if not isinstance(value,basestring):
value=str(value)
value=value.encode('ascii', 'ignore')
return feedback_type,value
def encode_values(feedback_type, value):
feedback_type = str(feedback_type).encode('ascii', 'ignore')
if not isinstance(value, basestring):
value = str(value)
value = value.encode('ascii', 'ignore')
return feedback_type, value
def format_feedback(feedback_type, value):
feedback_type,value=encode_values(feedback_type,value)
feedback= """
feedback_type, value = encode_values(feedback_type, value)
feedback = """
<div class="{feedback_type}">
{value}
</div>
""".format(feedback_type=feedback_type, value=value)
return feedback
def format_feedback_hidden(feedback_type , value):
feedback_type,value=encode_values(feedback_type,value)
def format_feedback_hidden(feedback_type, value):
feedback_type, value = encode_values(feedback_type, value)
feedback = """
<input class="{feedback_type}" type="hidden" value="{value}" />
""".format(feedback_type=feedback_type, value=value)
......@@ -360,11 +362,11 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
else:
feedback_list_part1 = format_feedback('errors', response_items['feedback'])
feedback_list_part2=(u"\n".join([format_feedback_hidden(feedback_type,value)
for feedback_type,value in response_items.items()
if feedback_type in ['submission_id', 'grader_id']]))
feedback_list_part2 = (u"\n".join([format_feedback_hidden(feedback_type, value)
for feedback_type, value in response_items.items()
if feedback_type in ['submission_id', 'grader_id']]))
return u"\n".join([feedback_list_part1,feedback_list_part2])
return u"\n".join([feedback_list_part1, feedback_list_part2])
def _format_feedback(self, response_items):
"""
......@@ -378,13 +380,13 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
if not response_items['success']:
return system.render_template("open_ended_error.html",
{'errors' : feedback})
{'errors': feedback})
feedback_template = render_to_string("open_ended_feedback.html", {
'grader_type': response_items['grader_type'],
'score': "{0} / {1}".format(response_items['score'], self.max_score()),
'feedback': feedback,
})
})
return feedback_template
......@@ -403,57 +405,57 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
correct: Correctness of submission (Boolean)
score: Points to be assigned (numeric, can be float)
"""
fail = {'valid' : False, 'score' : 0, 'feedback' : ''}
fail = {'valid': False, 'score': 0, 'feedback': ''}
try:
score_result = json.loads(score_msg)
except (TypeError, ValueError):
error_message=("External grader message should be a JSON-serialized dict."
" Received score_msg = {0}".format(score_msg))
error_message = ("External grader message should be a JSON-serialized dict."
" Received score_msg = {0}".format(score_msg))
log.error(error_message)
fail['feedback']=error_message
fail['feedback'] = error_message
return fail
if not isinstance(score_result, dict):
error_message=("External grader message should be a JSON-serialized dict."
" Received score_result = {0}".format(score_result))
error_message = ("External grader message should be a JSON-serialized dict."
" Received score_result = {0}".format(score_result))
log.error(error_message)
fail['feedback']=error_message
fail['feedback'] = error_message
return fail
for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']:
if tag not in score_result:
error_message=("External grader message is missing required tag: {0}"
.format(tag))
error_message = ("External grader message is missing required tag: {0}"
.format(tag))
log.error(error_message)
fail['feedback']=error_message
fail['feedback'] = error_message
return fail
#This is to support peer grading
#This is to support peer grading
if isinstance(score_result['score'], list):
feedback_items=[]
for i in xrange(0,len(score_result['score'])):
new_score_result={
'score' : score_result['score'][i],
'feedback' : score_result['feedback'][i],
'grader_type' : score_result['grader_type'],
'success' : score_result['success'],
'grader_id' : score_result['grader_id'][i],
'submission_id' : score_result['submission_id']
}
feedback_items = []
for i in xrange(0, len(score_result['score'])):
new_score_result = {
'score': score_result['score'][i],
'feedback': score_result['feedback'][i],
'grader_type': score_result['grader_type'],
'success': score_result['success'],
'grader_id': score_result['grader_id'][i],
'submission_id': score_result['submission_id']
}
feedback_items.append(self._format_feedback(new_score_result))
if join_feedback:
feedback="".join(feedback_items)
feedback = "".join(feedback_items)
else:
feedback=feedback_items
feedback = feedback_items
score = int(median(score_result['score']))
else:
#This is for instructor and ML grading
feedback = self._format_feedback(score_result)
score=score_result['score']
score = score_result['score']
self.submission_id=score_result['submission_id']
self.grader_id=score_result['grader_id']
self.submission_id = score_result['submission_id']
self.grader_id = score_result['grader_id']
return {'valid' : True, 'score' : score, 'feedback' : feedback}
return {'valid': True, 'score': score, 'feedback': feedback}
def latest_post_assessment(self, short_feedback=False, join_feedback=True):
"""
......@@ -468,17 +470,18 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
if not short_feedback:
return feedback_dict['feedback'] if feedback_dict['valid'] else ''
if feedback_dict['valid']:
short_feedback = self._convert_longform_feedback_to_html(json.loads(self.history[-1].get('post_assessment', "")))
short_feedback = self._convert_longform_feedback_to_html(
json.loads(self.history[-1].get('post_assessment', "")))
return short_feedback if feedback_dict['valid'] else ''
def format_feedback_with_evaluation(self,feedback):
def format_feedback_with_evaluation(self, feedback):
"""
Renders a given html feedback into an evaluation template
@param feedback: HTML feedback
@return: Rendered html
"""
context={'msg' : feedback, 'id' : "1", 'rows' : 50, 'cols' : 50}
html= render_to_string('open_ended_evaluation.html', context)
context = {'msg': feedback, 'id': "1", 'rows': 50, 'cols': 50}
html = render_to_string('open_ended_evaluation.html', context)
return html
def handle_ajax(self, dispatch, get, system):
......@@ -494,10 +497,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
handlers = {
'save_answer': self.save_answer,
'score_update': self.update_score,
'save_post_assessment' : self.message_post,
'skip_post_assessment' : self.skip_post_assessment,
'check_for_score' : self.check_for_score,
}
'save_post_assessment': self.message_post,
'skip_post_assessment': self.skip_post_assessment,
'check_for_score': self.check_for_score,
}
if dispatch not in handlers:
return 'Error'
......@@ -508,7 +511,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
d.update({
'progress_changed': after != before,
'progress_status': Progress.to_js_status_str(after),
})
})
return json.dumps(d, cls=ComplexEncoder)
def check_for_score(self, get, system):
......@@ -519,7 +522,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
@return: Returns the current state
"""
state = self.state
return {'state' : state}
return {'state': state}
def save_answer(self, get, system):
"""
......@@ -545,7 +548,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self.send_to_grader(get['student_answer'], system)
self.change_state(self.ASSESSING)
return {'success': True,}
return {'success': True, }
def update_score(self, get, system):
"""
......@@ -571,11 +574,11 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
latest = self.latest_answer()
previous_answer = latest if latest is not None else self.initial_display
post_assessment = self.latest_post_assessment()
score= self.latest_score()
score = self.latest_score()
correct = 'correct' if self.is_submission_correct(score) else 'incorrect'
else:
post_assessment=""
correct=""
post_assessment = ""
correct = ""
previous_answer = self.initial_display
context = {
......@@ -583,17 +586,18 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
'previous_answer': previous_answer,
'state': self.state,
'allow_reset': self._allow_reset(),
'rows' : 30,
'cols' : 80,
'id' : 'open_ended',
'msg' : post_assessment,
'child_type' : 'openended',
'correct' : correct,
}
'rows': 30,
'cols': 80,
'id': 'open_ended',
'msg': post_assessment,
'child_type': 'openended',
'correct': correct,
}
log.debug(context)
html = system.render_template('open_ended.html', context)
return html
class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor):
"""
Module for adding open ended response questions to courses
......@@ -627,7 +631,7 @@ class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor):
"""Assumes that xml_object has child k"""
return xml_object.xpath(k)[0]
return {'oeparam': parse('openendedparam'),}
return {'oeparam': parse('openendedparam'), }
def definition_to_xml(self, resource_fs):
......
......@@ -63,11 +63,11 @@ class OpenEndedChild():
DONE = 'done'
#This is used to tell students where they are at in the module
HUMAN_NAMES={
'initial' : 'Started',
'assessing' : 'Being scored',
'post_assessment' : 'Scoring finished',
'done' : 'Problem complete',
HUMAN_NAMES = {
'initial': 'Started',
'assessing': 'Being scored',
'post_assessment': 'Scoring finished',
'done': 'Problem complete',
}
def __init__(self, system, location, definition, descriptor, static_data,
......@@ -84,7 +84,7 @@ class OpenEndedChild():
# Scores are on scale from 0 to max_score
self.history = instance_state.get('history', [])
self.state = instance_state.get('state', 'initial')
self.state = instance_state.get('state', self.INITIAL)
self.created = instance_state.get('created', "False")
......@@ -171,8 +171,8 @@ class OpenEndedChild():
'state': self.state,
'max_score': self._max_score,
'attempts': self.attempts,
'created' : "False",
}
'created': "False",
}
return json.dumps(state)
def _allow_reset(self):
......@@ -244,8 +244,8 @@ class OpenEndedChild():
@param score: Numeric score.
@return: Boolean correct.
"""
correct=False
if(isinstance(score,(int, long, float, complex))):
correct = False
if(isinstance(score, (int, long, float, complex))):
score_ratio = int(score) / float(self.max_score())
correct = (score_ratio >= 0.66)
return correct
......@@ -255,7 +255,7 @@ class OpenEndedChild():
Checks to see if the last response in the module is correct.
@return: 'correct' if correct, otherwise 'incorrect'
"""
score=self.get_score()['score']
score = self.get_score()['score']
correctness = 'correct' if self.is_submission_correct(score) else 'incorrect'
return correctness
......
......@@ -40,6 +40,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
</submitmessage>
</selfassessment>
"""
def setup_response(self, system, location, definition, descriptor):
"""
Sets up the module
......@@ -76,7 +77,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
'initial_message': self.get_message_html(),
'state': self.state,
'allow_reset': self._allow_reset(),
'child_type' : 'selfassessment',
'child_type': 'selfassessment',
}
html = system.render_template('self_assessment_prompt.html', context)
......@@ -112,7 +113,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
})
return json.dumps(d, cls=ComplexEncoder)
def get_rubric_html(self,system):
def get_rubric_html(self, system):
"""
Return the appropriate version of the rubric, based on the state.
"""
......@@ -121,8 +122,8 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
# we'll render it
context = {'rubric': self.rubric,
'max_score' : self._max_score,
}
'max_score': self._max_score,
}
if self.state == self.ASSESSING:
context['read_only'] = False
......@@ -133,7 +134,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
return system.render_template('self_assessment_rubric.html', context)
def get_hint_html(self,system):
def get_hint_html(self, system):
"""
Return the appropriate version of the hint view, based on state.
"""
......@@ -201,7 +202,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
return {
'success': True,
'rubric_html': self.get_rubric_html(system)
}
}
def save_assessment(self, get, system):
"""
......@@ -228,7 +229,7 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
self.record_latest_score(score)
d = {'success': True,}
d = {'success': True, }
if score == self.max_score():
self.change_state(self.DONE)
......@@ -264,7 +265,6 @@ class SelfAssessmentModule(openendedchild.OpenEndedChild):
'allow_reset': self._allow_reset()}
class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor):
"""
Module for adding self assessment questions to courses
......@@ -302,7 +302,7 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor):
return {'submitmessage': parse('submitmessage'),
'hintprompt': parse('hintprompt'),
}
}
def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.'''
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment