Commit 682ac345 by Vik Paruchuri

Address some comments from code review, fix rubric display

parent 9a752f51
...@@ -52,6 +52,11 @@ class CombinedOpenEndedModule(XModule): ...@@ -52,6 +52,11 @@ class CombinedOpenEndedModule(XModule):
'reset' -- resets the whole combined open ended module and returns to the first child module 'reset' -- resets the whole combined open ended module and returns to the first child module
'next_problem' -- moves to the next child module 'next_problem' -- moves to the next child module
'get_results' -- gets results from a given child module 'get_results' -- gets results from a given child module
Types of children. Task is synonymous with child module, so each combined open ended module
incorporates multiple children (tasks):
openendedmodule
selfassessmentmodule
""" """
STATE_VERSION = 1 STATE_VERSION = 1
...@@ -60,7 +65,6 @@ class CombinedOpenEndedModule(XModule): ...@@ -60,7 +65,6 @@ class CombinedOpenEndedModule(XModule):
ASSESSING = 'assessing' ASSESSING = 'assessing'
INTERMEDIATE_DONE = 'intermediate_done' INTERMEDIATE_DONE = 'intermediate_done'
DONE = 'done' DONE = 'done'
TASK_TYPES = ["self", "ml", "instructor", "peer"]
js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/display.coffee'), js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/display.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'),
...@@ -216,23 +220,33 @@ class CombinedOpenEndedModule(XModule): ...@@ -216,23 +220,33 @@ class CombinedOpenEndedModule(XModule):
current_task_type = self.get_tag_name(self.current_task_xml) current_task_type = self.get_tag_name(self.current_task_xml)
children = self.child_modules() children = self.child_modules()
child_task_module = children['modules'][current_task_type]
self.current_task_descriptor = children['descriptors'][current_task_type](self.system) self.current_task_descriptor = children['descriptors'][current_task_type](self.system)
#This is the xml object created from the xml definition of the current task
etree_xml = etree.fromstring(self.current_task_xml) etree_xml = etree.fromstring(self.current_task_xml)
#This sends the etree_xml object through the descriptor module of the current task, and
#returns the xml parsed by the descriptor
self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system) self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system)
if current_task_state is None and self.current_task_number == 0: if current_task_state is None and self.current_task_number == 0:
self.current_task = children['modules'][current_task_type](self.system, self.location, self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data) self.current_task_parsed_xml, self.current_task_descriptor, self.static_data)
self.task_states.append(self.current_task.get_instance_state()) self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING self.state = self.ASSESSING
elif current_task_state is None and self.current_task_number > 0: elif current_task_state is None and self.current_task_number > 0:
last_response_data = self.get_last_response(self.current_task_number - 1) last_response_data = self.get_last_response(self.current_task_number - 1)
last_response = last_response_data['response'] last_response = last_response_data['response']
current_task_state = ( current_task_state=json.dumps({
'{"state": "' + str(self.ASSESSING) + '", "version": 1, "max_score": ' + str(self._max_score) + ', ' + 'state' : self.assessing,
'"attempts": 0, "created": "True", "history": [{"answer": "' + str(last_response) + '"}]}') 'version' : self.STATE_VERSION,
self.current_task = children['modules'][current_task_type](self.system, self.location, 'max_score' : self._max_score,
'attempts' : 0,
'created' : True,
'history' : [{'answer' : str(last_response)}],
})
self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data,
instance_state=current_task_state) instance_state=current_task_state)
self.task_states.append(self.current_task.get_instance_state()) self.task_states.append(self.current_task.get_instance_state())
...@@ -240,7 +254,7 @@ class CombinedOpenEndedModule(XModule): ...@@ -240,7 +254,7 @@ class CombinedOpenEndedModule(XModule):
else: else:
if self.current_task_number > 0 and not reset: if self.current_task_number > 0 and not reset:
current_task_state = self.overwrite_state(current_task_state) current_task_state = self.overwrite_state(current_task_state)
self.current_task = children['modules'][current_task_type](self.system, self.location, self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data,
instance_state=current_task_state) instance_state=current_task_state)
......
...@@ -78,10 +78,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -78,10 +78,10 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
self._parse(oeparam, self.prompt, self.rubric, system) self._parse(oeparam, self.prompt, self.rubric, system)
if self.created == "True" and self.state == self.ASSESSING: if self.created == True and self.state == self.ASSESSING:
self.created = "False" self.created = False
self.send_to_grader(self.latest_answer(), system) self.send_to_grader(self.latest_answer(), system)
self.created = "False" self.created = False
def _parse(self, oeparam, prompt, rubric, system): def _parse(self, oeparam, prompt, rubric, system):
''' '''
...@@ -379,7 +379,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild): ...@@ -379,7 +379,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
log.debug(response_items) log.debug(response_items)
rubric_feedback="" rubric_feedback=""
feedback = self._convert_longform_feedback_to_html(response_items) feedback = self._convert_longform_feedback_to_html(response_items)
if response_items['rubric_scores_complete']: if response_items['rubric_scores_complete']==True:
rubric_feedback = self.render_rubric(response_items['rubric_xml']) rubric_feedback = self.render_rubric(response_items['rubric_xml'])
if not response_items['success']: if not response_items['success']:
......
...@@ -86,7 +86,7 @@ class OpenEndedChild(): ...@@ -86,7 +86,7 @@ class OpenEndedChild():
self.state = instance_state.get('state', self.INITIAL) self.state = instance_state.get('state', self.INITIAL)
self.created = instance_state.get('created', "False") self.created = instance_state.get('created', False)
self.attempts = instance_state.get('attempts', 0) self.attempts = instance_state.get('attempts', 0)
self.max_attempts = static_data['max_attempts'] self.max_attempts = static_data['max_attempts']
...@@ -171,7 +171,7 @@ class OpenEndedChild(): ...@@ -171,7 +171,7 @@ class OpenEndedChild():
'state': self.state, 'state': self.state,
'max_score': self._max_score, 'max_score': self._max_score,
'attempts': self.attempts, 'attempts': self.attempts,
'created': "False", 'created': False,
} }
return json.dumps(state) return json.dumps(state)
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
<% category = rubric_categories[i] %> <% category = rubric_categories[i] %>
<tr> <tr>
<th>${category['description']}</th> <th>${category['description']}</th>
<td>${category['score']}</td> <td>Your Score: ${category['score']} </td>
% for j in range(len(category['options'])): % for j in range(len(category['options'])):
<% option = category['options'][j] %> <% option = category['options'][j] %>
<td> <td>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment