Commit a64111a6 by VikParuchuri

Merge pull request #1242 from MITx/feature/vik/integrate-sa-and-oe

Feature/vik/integrate sa and oe
parents 19f79fa3 a6480665
......@@ -735,51 +735,3 @@ class ChemicalEquationInput(InputTypeBase):
registry.register(ChemicalEquationInput)
#-----------------------------------------------------------------------------
class OpenEndedInput(InputTypeBase):
"""
A text area input for code--uses codemirror, does syntax highlighting, special tab handling,
etc.
"""
template = "openendedinput.html"
tags = ['openendedinput']
# pulled out for testing
submitted_msg = ("Feedback not yet available. Reload to check again. "
"Once the problem is graded, this message will be "
"replaced with the grader's feedback.")
@classmethod
def get_attributes(cls):
"""
Convert options to a convenient format.
"""
return [Attribute('rows', '30'),
Attribute('cols', '80'),
Attribute('hidden', ''),
]
def setup(self):
"""
Implement special logic: handle queueing state, and default input.
"""
# if no student input yet, then use the default input given by the problem
if not self.value:
self.value = self.xml.text
# Check if problem has been queued
self.queue_len = 0
# Flag indicating that the problem has been queued, 'msg' is length of queue
if self.status == 'incomplete':
self.status = 'queued'
self.queue_len = self.msg
self.msg = self.submitted_msg
def _extra_context(self):
"""Defined queue_len, add it """
return {'queue_len': self.queue_len,}
registry.register(OpenEndedInput)
#-----------------------------------------------------------------------------
......@@ -1815,436 +1815,6 @@ class ImageResponse(LoncapaResponse):
return (dict([(ie.get('id'), ie.get('rectangle')) for ie in self.ielements]),
dict([(ie.get('id'), ie.get('regions')) for ie in self.ielements]))
#-----------------------------------------------------------------------------
class OpenEndedResponse(LoncapaResponse):
"""
Grade student open ended responses using an external grading system,
accessed through the xqueue system.
Expects 'xqueue' dict in ModuleSystem with the following keys that are
needed by OpenEndedResponse:
system.xqueue = { 'interface': XqueueInterface object,
'callback_url': Per-StudentModule callback URL
where results are posted (string),
}
External requests are only submitted for student submission grading
(i.e. and not for getting reference answers)
By default, uses the OpenEndedResponse.DEFAULT_QUEUE queue.
"""
DEFAULT_QUEUE = 'open-ended'
DEFAULT_MESSAGE_QUEUE = 'open-ended-message'
response_tag = 'openendedresponse'
allowed_inputfields = ['openendedinput']
max_inputfields = 1
def setup_response(self):
'''
Configure OpenEndedResponse from XML.
'''
xml = self.xml
self.url = xml.get('url', None)
self.queue_name = xml.get('queuename', self.DEFAULT_QUEUE)
self.message_queue_name = xml.get('message-queuename', self.DEFAULT_MESSAGE_QUEUE)
# The openendedparam tag encapsulates all grader settings
oeparam = self.xml.find('openendedparam')
prompt = self.xml.find('prompt')
rubric = self.xml.find('openendedrubric')
#This is needed to attach feedback to specific responses later
self.submission_id=None
self.grader_id=None
if oeparam is None:
raise ValueError("No oeparam found in problem xml.")
if prompt is None:
raise ValueError("No prompt found in problem xml.")
if rubric is None:
raise ValueError("No rubric found in problem xml.")
self._parse(oeparam, prompt, rubric)
@staticmethod
def stringify_children(node):
"""
Modify code from stringify_children in xmodule. Didn't import directly
in order to avoid capa depending on xmodule (seems to be avoided in
code)
"""
parts=[node.text if node.text is not None else '']
for p in node.getchildren():
parts.append(etree.tostring(p, with_tail=True, encoding='unicode'))
return ' '.join(parts)
def _parse(self, oeparam, prompt, rubric):
'''
Parse OpenEndedResponse XML:
self.initial_display
self.payload - dict containing keys --
'grader' : path to grader settings file, 'problem_id' : id of the problem
self.answer - What to display when show answer is clicked
'''
# Note that OpenEndedResponse is agnostic to the specific contents of grader_payload
prompt_string = self.stringify_children(prompt)
rubric_string = self.stringify_children(rubric)
grader_payload = oeparam.find('grader_payload')
grader_payload = grader_payload.text if grader_payload is not None else ''
#Update grader payload with student id. If grader payload not json, error.
try:
parsed_grader_payload = json.loads(grader_payload)
# NOTE: self.system.location is valid because the capa_module
# __init__ adds it (easiest way to get problem location into
# response types)
except TypeError, ValueError:
log.exception("Grader payload %r is not a json object!", grader_payload)
self.initial_display = find_with_default(oeparam, 'initial_display', '')
self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
parsed_grader_payload.update({
'location' : self.system.location,
'course_id' : self.system.course_id,
'prompt' : prompt_string,
'rubric' : rubric_string,
'initial_display' : self.initial_display,
'answer' : self.answer,
})
updated_grader_payload = json.dumps(parsed_grader_payload)
self.payload = {'grader_payload': updated_grader_payload}
try:
self.max_score = int(find_with_default(oeparam, 'max_score', 1))
except ValueError:
self.max_score = 1
def handle_message_post(self,event_info):
"""
Handles a student message post (a reaction to the grade they received from an open ended grader type)
Returns a boolean success/fail and an error message
"""
survey_responses=event_info['survey_responses']
for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses:
return False, "Could not find needed tag {0}".format(tag)
try:
submission_id=int(survey_responses['submission_id'])
grader_id = int(survey_responses['grader_id'])
feedback = str(survey_responses['feedback'].encode('ascii', 'ignore'))
score = int(survey_responses['score'])
except:
error_message=("Could not parse submission id, grader id, "
"or feedback from message_post ajax call. Here is the message data: {0}".format(survey_responses))
log.exception(error_message)
return False, "There was an error saving your feedback. Please contact course staff."
qinterface = self.system.xqueue['interface']
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
anonymous_student_id = self.system.anonymous_student_id
queuekey = xqueue_interface.make_hashkey(str(self.system.seed) + qtime +
anonymous_student_id +
self.answer_id)
xheader = xqueue_interface.make_xheader(
lms_callback_url=self.system.xqueue['callback_url'],
lms_key=queuekey,
queue_name=self.message_queue_name
)
student_info = {'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
}
contents= {
'feedback' : feedback,
'submission_id' : submission_id,
'grader_id' : grader_id,
'score': score,
'student_info' : json.dumps(student_info),
}
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
#Convert error to a success value
success=True
if error:
success=False
return success, "Successfully submitted your feedback."
def get_score(self, student_answers):
try:
submission = student_answers[self.answer_id]
except KeyError:
msg = ('Cannot get student answer for answer_id: {0}. student_answers {1}'
.format(self.answer_id, student_answers))
log.exception(msg)
raise LoncapaProblemError(msg)
# Prepare xqueue request
#------------------------------------------------------------
qinterface = self.system.xqueue['interface']
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
anonymous_student_id = self.system.anonymous_student_id
# Generate header
queuekey = xqueue_interface.make_hashkey(str(self.system.seed) + qtime +
anonymous_student_id +
self.answer_id)
xheader = xqueue_interface.make_xheader(lms_callback_url=self.system.xqueue['callback_url'],
lms_key=queuekey,
queue_name=self.queue_name)
self.context.update({'submission': submission})
contents = self.payload.copy()
# Metadata related to the student submission revealed to the external grader
student_info = {'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
}
#Update contents with student response and student info
contents.update({
'student_info': json.dumps(student_info),
'student_response': submission,
'max_score' : self.max_score,
})
# Submit request. When successful, 'msg' is the prior length of the queue
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
# State associated with the queueing request
queuestate = {'key': queuekey,
'time': qtime,}
cmap = CorrectMap()
if error:
cmap.set(self.answer_id, queuestate=None,
msg='Unable to deliver your submission to grader. (Reason: {0}.)'
' Please try again later.'.format(msg))
else:
# Queueing mechanism flags:
# 1) Backend: Non-null CorrectMap['queuestate'] indicates that
# the problem has been queued
# 2) Frontend: correctness='incomplete' eventually trickles down
# through inputtypes.textbox and .filesubmission to inform the
# browser that the submission is queued (and it could e.g. poll)
cmap.set(self.answer_id, queuestate=queuestate,
correctness='incomplete', msg=msg)
return cmap
def update_score(self, score_msg, oldcmap, queuekey):
log.debug(score_msg)
score_msg = self._parse_score_msg(score_msg)
if not score_msg.valid:
oldcmap.set(self.answer_id,
msg = 'Invalid grader reply. Please contact the course staff.')
return oldcmap
correctness = 'correct' if score_msg.correct else 'incorrect'
# TODO: Find out how this is used elsewhere, if any
self.context['correct'] = correctness
# Replace 'oldcmap' with new grading results if queuekey matches. If queuekey
# does not match, we keep waiting for the score_msg whose key actually matches
if oldcmap.is_right_queuekey(self.answer_id, queuekey):
# Sanity check on returned points
points = score_msg.points
if points < 0:
points = 0
# Queuestate is consumed, so reset it to None
oldcmap.set(self.answer_id, npoints=points, correctness=correctness,
msg = score_msg.msg.replace('&nbsp;', '&#160;'), queuestate=None)
else:
log.debug('OpenEndedResponse: queuekey {0} does not match for answer_id={1}.'.format(
queuekey, self.answer_id))
return oldcmap
def get_answers(self):
anshtml = '<span class="openended-answer"><pre><code>{0}</code></pre></span>'.format(self.answer)
return {self.answer_id: anshtml}
def get_initial_display(self):
return {self.answer_id: self.initial_display}
def _convert_longform_feedback_to_html(self, response_items):
"""
Take in a dictionary, and return html strings for display to student.
Input:
response_items: Dictionary with keys success, feedback.
if success is True, feedback should be a dictionary, with keys for
types of feedback, and the corresponding feedback values.
if success is False, feedback is actually an error string.
NOTE: this will need to change when we integrate peer grading, because
that will have more complex feedback.
Output:
String -- html that can be displayed to the student.
"""
# We want to display available feedback in a particular order.
# This dictionary specifies which goes first--lower first.
priorities = {# These go at the start of the feedback
'spelling': 0,
'grammar': 1,
# needs to be after all the other feedback
'markup_text': 3}
default_priority = 2
def get_priority(elt):
"""
Args:
elt: a tuple of feedback-type, feedback
Returns:
the priority for this feedback type
"""
return priorities.get(elt[0], default_priority)
def encode_values(feedback_type,value):
feedback_type=str(feedback_type).encode('ascii', 'ignore')
if not isinstance(value,basestring):
value=str(value)
value=value.encode('ascii', 'ignore')
return feedback_type,value
def format_feedback(feedback_type, value):
feedback_type,value=encode_values(feedback_type,value)
feedback= """
<div class="{feedback_type}">
{value}
</div>
""".format(feedback_type=feedback_type, value=value)
return feedback
def format_feedback_hidden(feedback_type , value):
feedback_type,value=encode_values(feedback_type,value)
feedback = """
<div class="{feedback_type}" style="display: none;">
{value}
</div>
""".format(feedback_type=feedback_type, value=value)
return feedback
# TODO (vshnayder): design and document the details of this format so
# that we can do proper escaping here (e.g. are the graders allowed to
# include HTML?)
for tag in ['success', 'feedback', 'submission_id', 'grader_id']:
if tag not in response_items:
return format_feedback('errors', 'Error getting feedback')
feedback_items = response_items['feedback']
try:
feedback = json.loads(feedback_items)
except (TypeError, ValueError):
log.exception("feedback_items have invalid json %r", feedback_items)
return format_feedback('errors', 'Could not parse feedback')
if response_items['success']:
if len(feedback) == 0:
return format_feedback('errors', 'No feedback available')
feedback_lst = sorted(feedback.items(), key=get_priority)
feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst)
else:
feedback_list_part1 = format_feedback('errors', response_items['feedback'])
feedback_list_part2=(u"\n".join([format_feedback_hidden(feedback_type,value)
for feedback_type,value in response_items.items()
if feedback_type in ['submission_id', 'grader_id']]))
return u"\n".join([feedback_list_part1,feedback_list_part2])
def _format_feedback(self, response_items):
"""
Input:
Dictionary called feedback. Must contain keys seen below.
Output:
Return error message or feedback template
"""
feedback = self._convert_longform_feedback_to_html(response_items)
if not response_items['success']:
return self.system.render_template("open_ended_error.html",
{'errors' : feedback})
feedback_template = self.system.render_template("open_ended_feedback.html", {
'grader_type': response_items['grader_type'],
'score': "{0} / {1}".format(response_items['score'], self.max_score),
'feedback': feedback,
})
return feedback_template
def _parse_score_msg(self, score_msg):
"""
Grader reply is a JSON-dump of the following dict
{ 'correct': True/False,
'score': Numeric value (floating point is okay) to assign to answer
'msg': grader_msg
'feedback' : feedback from grader
}
Returns (valid_score_msg, correct, score, msg):
valid_score_msg: Flag indicating valid score_msg format (Boolean)
correct: Correctness of submission (Boolean)
score: Points to be assigned (numeric, can be float)
"""
fail = ScoreMessage(valid=False, correct=False, points=0, msg='')
try:
score_result = json.loads(score_msg)
except (TypeError, ValueError):
log.error("External grader message should be a JSON-serialized dict."
" Received score_msg = {0}".format(score_msg))
return fail
if not isinstance(score_result, dict):
log.error("External grader message should be a JSON-serialized dict."
" Received score_result = {0}".format(score_result))
return fail
for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']:
if tag not in score_result:
log.error("External grader message is missing required tag: {0}"
.format(tag))
return fail
feedback = self._format_feedback(score_result)
self.submission_id=score_result['submission_id']
self.grader_id=score_result['grader_id']
# HACK: for now, just assume it's correct if you got more than 2/3.
# Also assumes that score_result['score'] is an integer.
score_ratio = int(score_result['score']) / float(self.max_score)
correct = (score_ratio >= 0.66)
#Currently ignore msg and only return feedback (which takes the place of msg)
return ScoreMessage(valid=True, correct=correct,
points=score_result['score'], msg=feedback)
#-----------------------------------------------------------------------------
# TEMPORARY: List of all response subclasses
# FIXME: To be replaced by auto-registration
......@@ -2261,5 +1831,4 @@ __all__ = [CodeResponse,
ChoiceResponse,
MultipleChoiceResponse,
TrueFalseResponse,
JavascriptResponse,
OpenEndedResponse]
JavascriptResponse]
......@@ -19,6 +19,7 @@ setup(
"abtest = xmodule.abtest_module:ABTestDescriptor",
"book = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"chapter = xmodule.seq_module:SequenceDescriptor",
"combinedopenended = xmodule.combined_open_ended_module:CombinedOpenEndedDescriptor",
"course = xmodule.course_module:CourseDescriptor",
"customtag = xmodule.template_module:CustomTagDescriptor",
"discuss = xmodule.backcompat_module:TranslateCustomTagDescriptor",
......@@ -28,7 +29,6 @@ setup(
"problem = xmodule.capa_module:CapaDescriptor",
"problemset = xmodule.seq_module:SequenceDescriptor",
"section = xmodule.backcompat_module:SemanticSectionDescriptor",
"selfassessment = xmodule.self_assessment_module:SelfAssessmentDescriptor",
"sequential = xmodule.seq_module:SequenceDescriptor",
"slides = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"vertical = xmodule.vertical_module:VerticalDescriptor",
......
......@@ -445,6 +445,7 @@ class CapaModule(XModule):
return False
def update_score(self, get):
"""
Delivers grading response (e.g. from asynchronous code checking) to
......
import copy
from fs.errors import ResourceNotFoundError
import itertools
import json
import logging
from lxml import etree
from lxml.html import rewrite_links
from path import path
import os
import sys
from pkg_resources import resource_string
from .capa_module import only_one, ComplexEncoder
from .editing_module import EditingDescriptor
from .html_checker import check_html
from progress import Progress
from .stringify import stringify_children
from .x_module import XModule
from .xml_module import XmlDescriptor
from xmodule.modulestore import Location
import self_assessment_module
import open_ended_module
from mitxmako.shortcuts import render_to_string
log = logging.getLogger("mitx.courseware")
# Set the default number of max attempts. Should be 1 for production
# Set higher for debugging/testing
# attempts specified in xml definition overrides this.
MAX_ATTEMPTS = 10000
# Set maximum available number of points.
# Overriden by max_score specified in xml.
MAX_SCORE = 1
class CombinedOpenEndedModule(XModule):
"""
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
It transitions between problems, and support arbitrary ordering.
Each combined open ended module contains one or multiple "child" modules.
Child modules track their own state, and can transition between states. They also implement get_html and
handle_ajax.
The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess
ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem)
ajax actions implemented by all children are:
'save_answer' -- Saves the student answer
'save_assessment' -- Saves the student assessment (or external grader assessment)
'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc)
ajax actions implemented by combined open ended module are:
'reset' -- resets the whole combined open ended module and returns to the first child module
'next_problem' -- moves to the next child module
'get_results' -- gets results from a given child module
Types of children. Task is synonymous with child module, so each combined open ended module
incorporates multiple children (tasks):
openendedmodule
selfassessmentmodule
"""
STATE_VERSION = 1
# states
INITIAL = 'initial'
ASSESSING = 'assessing'
INTERMEDIATE_DONE = 'intermediate_done'
DONE = 'done'
js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/display.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
]}
js_module_name = "CombinedOpenEnded"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
def __init__(self, system, location, definition, descriptor,
instance_state=None, shared_state=None, **kwargs):
XModule.__init__(self, system, location, definition, descriptor,
instance_state, shared_state, **kwargs)
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block:
Sample file:
<combinedopenended attempts="10000" max_score="1">
<rubric>
Blah blah rubric.
</rubric>
<prompt>
Some prompt.
</prompt>
<task>
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
</task>
<task>
<openended min_score_to_attempt="1" max_score_to_attempt="1">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf",
"problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
"""
# Load instance state
if instance_state is not None:
instance_state = json.loads(instance_state)
else:
instance_state = {}
#We need to set the location here so the child modules can use it
system.set('location', location)
#Tells the system which xml definition to load
self.current_task_number = instance_state.get('current_task_number', 0)
#This loads the states of the individual children
self.task_states = instance_state.get('task_states', [])
#Overall state of the combined open ended module
self.state = instance_state.get('state', self.INITIAL)
self.attempts = instance_state.get('attempts', 0)
#Allow reset is true if student has failed the criteria to move to the next child task
self.allow_reset = instance_state.get('ready_to_reset', False)
self.max_attempts = int(self.metadata.get('attempts', MAX_ATTEMPTS))
# Used for progress / grading. Currently get credit just for
# completion (doesn't matter if you self-assessed correct/incorrect).
self._max_score = int(self.metadata.get('max_score', MAX_SCORE))
#Static data is passed to the child modules to render
self.static_data = {
'max_score': self._max_score,
'max_attempts': self.max_attempts,
'prompt': definition['prompt'],
'rubric': definition['rubric']
}
self.task_xml = definition['task_xml']
self.setup_next_task()
def get_tag_name(self, xml):
"""
Gets the tag name of a given xml block.
Input: XML string
Output: The name of the root tag
"""
tag = etree.fromstring(xml).tag
return tag
def overwrite_state(self, current_task_state):
"""
Overwrites an instance state and sets the latest response to the current response. This is used
to ensure that the student response is carried over from the first child to the rest.
Input: Task state json string
Output: Task state json string
"""
last_response_data = self.get_last_response(self.current_task_number - 1)
last_response = last_response_data['response']
loaded_task_state = json.loads(current_task_state)
if loaded_task_state['state'] == self.INITIAL:
loaded_task_state['state'] = self.ASSESSING
loaded_task_state['created'] = True
loaded_task_state['history'].append({'answer': last_response})
current_task_state = json.dumps(loaded_task_state)
return current_task_state
def child_modules(self):
"""
Returns the constructors associated with the child modules in a dictionary. This makes writing functions
simpler (saves code duplication)
Input: None
Output: A dictionary of dictionaries containing the descriptor functions and module functions
"""
child_modules = {
'openended': open_ended_module.OpenEndedModule,
'selfassessment': self_assessment_module.SelfAssessmentModule,
}
child_descriptors = {
'openended': open_ended_module.OpenEndedDescriptor,
'selfassessment': self_assessment_module.SelfAssessmentDescriptor,
}
children = {
'modules': child_modules,
'descriptors': child_descriptors,
}
return children
def setup_next_task(self, reset=False):
"""
Sets up the next task for the module. Creates an instance state if none exists, carries over the answer
from the last instance state to the next if needed.
Input: A boolean indicating whether or not the reset function is calling.
Output: Boolean True (not useful right now)
"""
current_task_state = None
if len(self.task_states) > self.current_task_number:
current_task_state = self.task_states[self.current_task_number]
self.current_task_xml = self.task_xml[self.current_task_number]
if self.current_task_number > 0:
self.allow_reset = self.check_allow_reset()
if self.allow_reset:
self.current_task_number = self.current_task_number - 1
current_task_type = self.get_tag_name(self.current_task_xml)
children = self.child_modules()
child_task_module = children['modules'][current_task_type]
self.current_task_descriptor = children['descriptors'][current_task_type](self.system)
#This is the xml object created from the xml definition of the current task
etree_xml = etree.fromstring(self.current_task_xml)
#This sends the etree_xml object through the descriptor module of the current task, and
#returns the xml parsed by the descriptor
self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system)
if current_task_state is None and self.current_task_number == 0:
self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data)
self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING
elif current_task_state is None and self.current_task_number > 0:
last_response_data = self.get_last_response(self.current_task_number - 1)
last_response = last_response_data['response']
current_task_state=json.dumps({
'state' : self.ASSESSING,
'version' : self.STATE_VERSION,
'max_score' : self._max_score,
'attempts' : 0,
'created' : True,
'history' : [{'answer' : str(last_response)}],
})
self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data,
instance_state=current_task_state)
self.task_states.append(self.current_task.get_instance_state())
self.state = self.ASSESSING
else:
if self.current_task_number > 0 and not reset:
current_task_state = self.overwrite_state(current_task_state)
self.current_task = child_task_module(self.system, self.location,
self.current_task_parsed_xml, self.current_task_descriptor, self.static_data,
instance_state=current_task_state)
log.debug(current_task_state)
return True
def check_allow_reset(self):
"""
Checks to see if the student has passed the criteria to move to the next module. If not, sets
allow_reset to true and halts the student progress through the tasks.
Input: None
Output: the allow_reset attribute of the current module.
"""
if not self.allow_reset:
if self.current_task_number > 0:
last_response_data = self.get_last_response(self.current_task_number - 1)
current_response_data = self.get_current_attributes(self.current_task_number)
if(current_response_data['min_score_to_attempt'] > last_response_data['score']
or current_response_data['max_score_to_attempt'] < last_response_data['score']):
self.state = self.DONE
self.allow_reset = True
return self.allow_reset
def get_context(self):
"""
Generates a context dictionary that is used to render html.
Input: None
Output: A dictionary that can be rendered into the combined open ended template.
"""
task_html = self.get_html_base()
#set context variables and render template
context = {
'items': [{'content': task_html}],
'ajax_url': self.system.ajax_url,
'allow_reset': self.allow_reset,
'state': self.state,
'task_count': len(self.task_xml),
'task_number': self.current_task_number + 1,
'status': self.get_status(),
}
return context
def get_html(self):
"""
Gets HTML for rendering.
Input: None
Output: rendered html
"""
context = self.get_context()
html = self.system.render_template('combined_open_ended.html', context)
return html
def get_html_nonsystem(self):
"""
Gets HTML for rendering via AJAX. Does not use system, because system contains some additional
html, which is not appropriate for returning via ajax calls.
Input: None
Output: HTML rendered directly via Mako
"""
context = self.get_context()
html = render_to_string('combined_open_ended.html', context)
return html
def get_html_base(self):
"""
Gets the HTML associated with the current child task
Input: None
Output: Child task HTML
"""
self.update_task_states()
html = self.current_task.get_html(self.system)
return_html = rewrite_links(html, self.rewrite_content_links)
return return_html
def get_current_attributes(self, task_number):
"""
Gets the min and max score to attempt attributes of the specified task.
Input: The number of the task.
Output: The minimum and maximum scores needed to move on to the specified task.
"""
task_xml = self.task_xml[task_number]
etree_xml = etree.fromstring(task_xml)
min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0))
max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score))
return {'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt}
def get_last_response(self, task_number):
"""
Returns data associated with the specified task number, such as the last response, score, etc.
Input: The number of the task.
Output: A dictionary that contains information about the specified task.
"""
last_response = ""
task_state = self.task_states[task_number]
task_xml = self.task_xml[task_number]
task_type = self.get_tag_name(task_xml)
children = self.child_modules()
task_descriptor = children['descriptors'][task_type](self.system)
etree_xml = etree.fromstring(task_xml)
min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0))
max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score))
task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system)
task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor,
self.static_data, instance_state=task_state)
last_response = task.latest_answer()
last_score = task.latest_score()
last_post_assessment = task.latest_post_assessment()
last_post_feedback = ""
if task_type == "openended":
last_post_assessment = task.latest_post_assessment(short_feedback=False, join_feedback=False)
if isinstance(last_post_assessment, list):
eval_list = []
for i in xrange(0, len(last_post_assessment)):
eval_list.append(task.format_feedback_with_evaluation(last_post_assessment[i]))
last_post_evaluation = "".join(eval_list)
else:
last_post_evaluation = task.format_feedback_with_evaluation(last_post_assessment)
last_post_assessment = last_post_evaluation
last_correctness = task.is_last_response_correct()
max_score = task.max_score()
state = task.state
last_response_dict = {
'response': last_response,
'score': last_score,
'post_assessment': last_post_assessment,
'type': task_type,
'max_score': max_score,
'state': state,
'human_state': task.HUMAN_NAMES[state],
'correct': last_correctness,
'min_score_to_attempt': min_score_to_attempt,
'max_score_to_attempt': max_score_to_attempt,
}
return last_response_dict
def update_task_states(self):
"""
Updates the task state of the combined open ended module with the task state of the current child module.
Input: None
Output: boolean indicating whether or not the task state changed.
"""
changed = False
if not self.allow_reset:
self.task_states[self.current_task_number] = self.current_task.get_instance_state()
current_task_state = json.loads(self.task_states[self.current_task_number])
if current_task_state['state'] == self.DONE:
self.current_task_number += 1
if self.current_task_number >= (len(self.task_xml)):
self.state = self.DONE
self.current_task_number = len(self.task_xml) - 1
else:
self.state = self.INITIAL
changed = True
self.setup_next_task()
return changed
def update_task_states_ajax(self, return_html):
"""
Runs the update task states function for ajax calls. Currently the same as update_task_states
Input: The html returned by the handle_ajax function of the child
Output: New html that should be rendered
"""
changed = self.update_task_states()
if changed:
#return_html=self.get_html()
pass
return return_html
def get_results(self, get):
"""
Gets the results of a given grader via ajax.
Input: AJAX get dictionary
Output: Dictionary to be rendered via ajax that contains the result html.
"""
task_number = int(get['task_number'])
self.update_task_states()
response_dict = self.get_last_response(task_number)
context = {'results': response_dict['post_assessment'], 'task_number': task_number + 1}
html = render_to_string('combined_open_ended_results.html', context)
return {'html': html, 'success': True}
def handle_ajax(self, dispatch, get):
"""
This is called by courseware.module_render, to handle an AJAX call.
"get" is request.POST.
Returns a json dictionary:
{ 'progress_changed' : True/False,
'progress': 'none'/'in_progress'/'done',
<other request-specific values here > }
"""
handlers = {
'next_problem': self.next_problem,
'reset': self.reset,
'get_results': self.get_results
}
if dispatch not in handlers:
return_html = self.current_task.handle_ajax(dispatch, get, self.system)
return self.update_task_states_ajax(return_html)
d = handlers[dispatch](get)
return json.dumps(d, cls=ComplexEncoder)
def next_problem(self, get):
"""
Called via ajax to advance to the next problem.
Input: AJAX get request.
Output: Dictionary to be rendered
"""
self.update_task_states()
return {'success': True, 'html': self.get_html_nonsystem(), 'allow_reset': self.allow_reset}
def reset(self, get):
"""
If resetting is allowed, reset the state of the combined open ended module.
Input: AJAX get dictionary
Output: AJAX dictionary to tbe rendered
"""
if self.state != self.DONE:
if not self.allow_reset:
return self.out_of_sync_error(get)
if self.attempts > self.max_attempts:
return {
'success': False,
'error': 'Too many attempts.'
}
self.state = self.INITIAL
self.allow_reset = False
for i in xrange(0, len(self.task_xml)):
self.current_task_number = i
self.setup_next_task(reset=True)
self.current_task.reset(self.system)
self.task_states[self.current_task_number] = self.current_task.get_instance_state()
self.current_task_number = 0
self.allow_reset = False
self.setup_next_task()
return {'success': True, 'html': self.get_html_nonsystem()}
def get_instance_state(self):
"""
Returns the current instance state. The module can be recreated from the instance state.
Input: None
Output: A dictionary containing the instance state.
"""
state = {
'version': self.STATE_VERSION,
'current_task_number': self.current_task_number,
'state': self.state,
'task_states': self.task_states,
'attempts': self.attempts,
'ready_to_reset': self.allow_reset,
}
return json.dumps(state)
def get_status(self):
"""
Gets the status panel to be displayed at the top right.
Input: None
Output: The status html to be rendered
"""
status = []
for i in xrange(0, self.current_task_number + 1):
task_data = self.get_last_response(i)
task_data.update({'task_number': i + 1})
status.append(task_data)
context = {'status_list': status}
status_html = self.system.render_template("combined_open_ended_status.html", context)
return status_html
class CombinedOpenEndedDescriptor(XmlDescriptor, EditingDescriptor):
"""
Module for adding combined open ended questions
"""
mako_template = "widgets/html-edit.html"
module_class = CombinedOpenEndedModule
filename_extension = "xml"
stores_state = True
has_score = True
template_dir_name = "combinedopenended"
js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]}
js_module_name = "HTMLEditingDescriptor"
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
Pull out the individual tasks, the rubric, and the prompt, and parse
Returns:
{
'rubric': 'some-html',
'prompt': 'some-html',
'task_xml': dictionary of xml strings,
}
"""
expected_children = ['task', 'rubric', 'prompt']
for child in expected_children:
if len(xml_object.xpath(child)) == 0:
raise ValueError("Combined Open Ended definition must include at least one '{0}' tag".format(child))
def parse_task(k):
"""Assumes that xml_object has child k"""
return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))]
def parse(k):
"""Assumes that xml_object has child k"""
return xml_object.xpath(k)[0]
return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')}
def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.'''
elt = etree.Element('combinedopenended')
def add_child(k):
child_str = '<{tag}>{body}</{tag}>'.format(tag=k, body=self.definition[k])
child_node = etree.fromstring(child_str)
elt.append(child_node)
for child in ['task']:
add_child(child)
return elt
\ No newline at end of file
from mitxmako.shortcuts import render_to_string
import logging
from lxml import etree
log=logging.getLogger(__name__)
class CombinedOpenEndedRubric:
@staticmethod
def render_rubric(rubric_xml):
try:
rubric_categories = CombinedOpenEndedRubric.extract_rubric_categories(rubric_xml)
html = render_to_string('open_ended_rubric.html', {'rubric_categories' : rubric_categories})
except:
log.exception("Could not parse the rubric.")
html = rubric_xml
return html
@staticmethod
def extract_rubric_categories(element):
'''
Contstruct a list of categories such that the structure looks like:
[ { category: "Category 1 Name",
options: [{text: "Option 1 Name", points: 0}, {text:"Option 2 Name", points: 5}]
},
{ category: "Category 2 Name",
options: [{text: "Option 1 Name", points: 0},
{text: "Option 2 Name", points: 1},
{text: "Option 3 Name", points: 2]}]
'''
element = etree.fromstring(element)
categories = []
for category in element:
if category.tag != 'category':
raise Exception("[capa.inputtypes.extract_categories] Expected a <category> tag: got {0} instead".format(category.tag))
else:
categories.append(CombinedOpenEndedRubric.extract_category(category))
return categories
@staticmethod
def extract_category(category):
'''
construct an individual category
{category: "Category 1 Name",
options: [{text: "Option 1 text", points: 1},
{text: "Option 2 text", points: 2}]}
all sorting and auto-point generation occurs in this function
'''
has_score=False
descriptionxml = category[0]
scorexml = category[1]
if scorexml.tag == "option":
optionsxml = category[1:]
else:
optionsxml = category[2:]
has_score=True
# parse description
if descriptionxml.tag != 'description':
raise Exception("[extract_category]: expected description tag, got {0} instead".format(descriptionxml.tag))
if has_score:
if scorexml.tag != 'score':
raise Exception("[extract_category]: expected score tag, got {0} instead".format(scorexml.tag))
for option in optionsxml:
if option.tag != "option":
raise Exception("[extract_category]: expected option tag, got {0} instead".format(option.tag))
description = descriptionxml.text
if has_score:
score = int(scorexml.text)
else:
score = 0
cur_points = 0
options = []
autonumbering = True
# parse options
for option in optionsxml:
if option.tag != 'option':
raise Exception("[extract_category]: expected option tag, got {0} instead".format(option.tag))
else:
pointstr = option.get("points")
if pointstr:
autonumbering = False
# try to parse this into an int
try:
points = int(pointstr)
except ValueError:
raise Exception("[extract_category]: expected points to have int, got {0} instead".format(pointstr))
elif autonumbering:
# use the generated one if we're in the right mode
points = cur_points
cur_points = cur_points + 1
else:
raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly dfined.")
optiontext = option.text
selected = False
if has_score:
if points == score:
selected = True
options.append({'text': option.text, 'points': points, 'selected' : selected})
# sort and check for duplicates
options = sorted(options, key=lambda option: option['points'])
CombinedOpenEndedRubric.validate_options(options)
return {'description': description, 'options': options, 'score' : score, 'has_score' : has_score}
@staticmethod
def validate_options(options):
'''
Validates a set of options. This can and should be extended to filter out other bad edge cases
'''
if len(options) == 0:
raise Exception("[extract_category]: no options associated with this category")
if len(options) == 1:
return
prev = options[0]['points']
for option in options[1:]:
if prev == option['points']:
raise Exception("[extract_category]: found duplicate point values between two different options")
else:
prev = option['points']
\ No newline at end of file
h2 {
margin-top: 0;
margin-bottom: 15px;
&.problem-header {
section.staff {
margin-top: 30px;
font-size: 80%;
}
}
@media print {
display: block;
width: auto;
border-right: 0;
}
}
.inline-error {
color: darken($error-red, 10%);
}
section.combined-open-ended {
@include clearfix;
.status-container
{
float:right;
width:40%;
}
.item-container
{
float:left;
width: 53%;
padding-bottom: 50px;
}
.result-container
{
float:left;
width: 93%;
position:relative;
}
}
section.combined-open-ended-status {
.statusitem {
background-color: #FAFAFA;
color: #2C2C2C;
font-family: monospace;
font-size: 1em;
padding-top: 10px;
}
.statusitem-current {
background-color: #BEBEBE;
color: #2C2C2C;
font-family: monospace;
font-size: 1em;
padding-top: 10px;
}
span {
&.unanswered {
@include inline-block();
background: url('../images/unanswered-icon.png') center center no-repeat;
height: 14px;
position: relative;
width: 14px;
float: right;
}
&.correct {
@include inline-block();
background: url('../images/correct-icon.png') center center no-repeat;
height: 20px;
position: relative;
width: 25px;
float: right;
}
&.incorrect {
@include inline-block();
background: url('../images/incorrect-icon.png') center center no-repeat;
height: 20px;
width: 20px;
position: relative;
float: right;
}
}
}
div.result-container {
.evaluation {
p {
margin-bottom: 1px;
}
}
.feedback-on-feedback {
height: 100px;
margin-right: 0px;
}
.evaluation-response {
header {
text-align: right;
a {
font-size: .85em;
}
}
}
.evaluation-scoring {
.scoring-list {
list-style-type: none;
margin-left: 3px;
li {
&:first-child {
margin-left: 0px;
}
display:inline;
margin-left: 0px;
label {
font-size: .9em;
}
}
}
}
.submit-message-container {
margin: 10px 0px ;
}
.external-grader-message {
section {
padding-left: 20px;
background-color: #FAFAFA;
color: #2C2C2C;
font-family: monospace;
font-size: 1em;
padding-top: 10px;
header {
font-size: 1.4em;
}
.shortform {
font-weight: bold;
}
.longform {
padding: 0px;
margin: 0px;
.result-errors {
margin: 5px;
padding: 10px 10px 10px 40px;
background: url('../images/incorrect-icon.png') center left no-repeat;
li {
color: #B00;
}
}
.result-output {
margin: 5px;
padding: 20px 0px 15px 50px;
border-top: 1px solid #DDD;
border-left: 20px solid #FAFAFA;
h4 {
font-family: monospace;
font-size: 1em;
}
dl {
margin: 0px;
}
dt {
margin-top: 20px;
}
dd {
margin-left: 24pt;
}
}
.result-correct {
background: url('../images/correct-icon.png') left 20px no-repeat;
.result-actual-output {
color: #090;
}
}
.result-incorrect {
background: url('../images/incorrect-icon.png') left 20px no-repeat;
.result-actual-output {
color: #B00;
}
}
.markup-text{
margin: 5px;
padding: 20px 0px 15px 50px;
border-top: 1px solid #DDD;
border-left: 20px solid #FAFAFA;
bs {
color: #BB0000;
}
bg {
color: #BDA046;
}
}
}
}
}
}
div.result-container, section.open-ended-child {
.rubric {
tr {
margin:10px 0px;
height: 100%;
}
td {
padding: 20px 0px;
margin: 10px 0px;
height: 100%;
}
th {
padding: 5px;
margin: 5px;
}
label,
.view-only {
margin:10px;
position: relative;
padding: 15px;
width: 200px;
height:100%;
display: inline-block;
min-height: 50px;
min-width: 50px;
background-color: #CCC;
font-size: 1em;
}
.grade {
position: absolute;
bottom:0px;
right:0px;
margin:10px;
}
.selected-grade {
background: #666;
color: white;
}
input[type=radio]:checked + label {
background: #666;
color: white; }
input[class='score-selection'] {
display: none;
}
}
}
section.open-ended-child {
@media print {
display: block;
width: auto;
padding: 0;
canvas, img {
page-break-inside: avoid;
}
}
.inline {
display: inline;
}
ol.enumerate {
li {
&:before {
content: " ";
display: block;
height: 0;
visibility: hidden;
}
}
}
.solution-span {
> span {
margin: 20px 0;
display: block;
border: 1px solid #ddd;
padding: 9px 15px 20px;
background: #FFF;
position: relative;
@include box-shadow(inset 0 0 0 1px #eee);
@include border-radius(3px);
&:empty {
display: none;
}
}
}
p {
&.answer {
margin-top: -2px;
}
&.status {
text-indent: -9999px;
margin: 8px 0 0 10px;
}
}
div.unanswered {
p.status {
@include inline-block();
background: url('../images/unanswered-icon.png') center center no-repeat;
height: 14px;
width: 14px;
}
}
div.correct, div.ui-icon-check {
p.status {
@include inline-block();
background: url('../images/correct-icon.png') center center no-repeat;
height: 20px;
width: 25px;
}
input {
border-color: green;
}
}
div.processing {
p.status {
@include inline-block();
background: url('../images/spinner.gif') center center no-repeat;
height: 20px;
width: 20px;
}
input {
border-color: #aaa;
}
}
div.incorrect, div.ui-icon-close {
p.status {
@include inline-block();
background: url('../images/incorrect-icon.png') center center no-repeat;
height: 20px;
width: 20px;
text-indent: -9999px;
}
input {
border-color: red;
}
}
> span {
display: block;
margin-bottom: lh(.5);
}
p.answer {
@include inline-block();
margin-bottom: 0;
margin-left: 10px;
&:before {
content: "Answer: ";
font-weight: bold;
display: inline;
}
&:empty {
&:before {
display: none;
}
}
}
span {
&.unanswered, &.ui-icon-bullet {
@include inline-block();
background: url('../images/unanswered-icon.png') center center no-repeat;
height: 14px;
position: relative;
top: 4px;
width: 14px;
}
&.processing, &.ui-icon-processing {
@include inline-block();
background: url('../images/spinner.gif') center center no-repeat;
height: 20px;
position: relative;
top: 6px;
width: 25px;
}
&.correct, &.ui-icon-check {
@include inline-block();
background: url('../images/correct-icon.png') center center no-repeat;
height: 20px;
position: relative;
top: 6px;
width: 25px;
}
&.incorrect, &.ui-icon-close {
@include inline-block();
background: url('../images/incorrect-icon.png') center center no-repeat;
height: 20px;
width: 20px;
position: relative;
top: 6px;
}
}
.reload
{
float:right;
margin: 10px;
}
.grader-status {
padding: 9px;
background: #F6F6F6;
border: 1px solid #ddd;
border-top: 0;
margin-bottom: 20px;
@include clearfix;
span {
text-indent: -9999px;
overflow: hidden;
display: block;
float: left;
margin: -7px 7px 0 0;
}
.grading {
background: url('../images/info-icon.png') left center no-repeat;
padding-left: 25px;
text-indent: 0px;
margin: 0px 7px 0 0;
}
p {
line-height: 20px;
text-transform: capitalize;
margin-bottom: 0;
float: left;
}
&.file {
background: #FFF;
margin-top: 20px;
padding: 20px 0 0 0;
border: {
top: 1px solid #eee;
right: 0;
bottom: 0;
left: 0;
}
p.debug {
display: none;
}
input {
float: left;
}
}
}
form.option-input {
margin: -10px 0 20px;
padding-bottom: 20px;
select {
margin-right: flex-gutter();
}
}
ul {
list-style: disc outside none;
margin-bottom: lh();
margin-left: .75em;
margin-left: .75rem;
}
ol {
list-style: decimal outside none;
margin-bottom: lh();
margin-left: .75em;
margin-left: .75rem;
}
dl {
line-height: 1.4em;
}
dl dt {
font-weight: bold;
}
dl dd {
margin-bottom: 0;
}
dd {
margin-left: .5em;
margin-left: .5rem;
}
li {
line-height: 1.4em;
margin-bottom: lh(.5);
&:last-child {
margin-bottom: 0;
}
}
p {
margin-bottom: lh();
}
hr {
background: #ddd;
border: none;
clear: both;
color: #ddd;
float: none;
height: 1px;
margin: 0 0 .75rem;
width: 100%;
}
.hidden {
display: none;
visibility: hidden;
}
#{$all-text-inputs} {
display: inline;
width: auto;
}
section.action {
margin-top: 20px;
input.save {
@extend .blue-button;
}
.submission_feedback {
// background: #F3F3F3;
// border: 1px solid #ddd;
// @include border-radius(3px);
// padding: 8px 12px;
// margin-top: 10px;
@include inline-block;
font-style: italic;
margin: 8px 0 0 10px;
color: #777;
-webkit-font-smoothing: antialiased;
}
}
.detailed-solution {
> p:first-child {
font-size: 0.9em;
font-weight: bold;
font-style: normal;
text-transform: uppercase;
color: #AAA;
}
p:last-child {
margin-bottom: 0;
}
}
div.open-ended-alert {
padding: 8px 12px;
border: 1px solid #EBE8BF;
border-radius: 3px;
background: #FFFCDD;
font-size: 0.9em;
margin-top: 10px;
}
div.capa_reset {
padding: 25px;
border: 1px solid $error-red;
background-color: lighten($error-red, 25%);
border-radius: 3px;
font-size: 1em;
margin-top: 10px;
margin-bottom: 10px;
}
.capa_reset>h2 {
color: #AA0000;
}
.capa_reset li {
font-size: 0.9em;
}
}
......@@ -25,7 +25,6 @@ class @Problem
@$('section.action input.reset').click @reset
@$('section.action input.show').click @show
@$('section.action input.save').click @save
@$('section.evaluation input.submit-message').click @message_post
# Collapsibles
Collapsible.setCollapsibles(@el)
......@@ -198,35 +197,6 @@ class @Problem
else
@gentle_alert response.success
message_post: =>
Logger.log 'message_post', @answers
fd = new FormData()
feedback = @$('section.evaluation textarea.feedback-on-feedback')[0].value
submission_id = $('div.external-grader-message div.submission_id')[0].innerHTML
grader_id = $('div.external-grader-message div.grader_id')[0].innerHTML
score = $(".evaluation-scoring input:radio[name='evaluation-score']:checked").val()
fd.append('feedback', feedback)
fd.append('submission_id', submission_id)
fd.append('grader_id', grader_id)
if(!score)
@gentle_alert "You need to pick a rating before you can submit."
return
else
fd.append('score', score)
settings =
type: "POST"
data: fd
processData: false
contentType: false
success: (response) =>
@gentle_alert response.message
@$('section.evaluation').slideToggle()
$.ajaxWithPrefix("#{@url}/message_post", settings)
reset: =>
Logger.log 'problem_reset', @answers
$.postWithPrefix "#{@url}/problem_reset", id: @id, (response) =>
......
......@@ -22,7 +22,7 @@ class @Collapsible
if $(event.target).text() == 'See full output'
new_text = 'Hide output'
else
new_text = 'See full ouput'
new_text = 'See full output'
$(event.target).text(new_text)
@toggleHint: (event) =>
......
class @CombinedOpenEnded
constructor: (element) ->
@element=element
@reinitialize(element)
reinitialize: (element) ->
@wrapper=$(element).find('section.xmodule_CombinedOpenEndedModule')
@el = $(element).find('section.combined-open-ended')
@combined_open_ended=$(element).find('section.combined-open-ended')
@id = @el.data('id')
@ajax_url = @el.data('ajax-url')
@state = @el.data('state')
@task_count = @el.data('task-count')
@task_number = @el.data('task-number')
@allow_reset = @el.data('allow_reset')
@reset_button = @$('.reset-button')
@reset_button.click @reset
@next_problem_button = @$('.next-step-button')
@next_problem_button.click @next_problem
@show_results_button=@$('.show-results-button')
@show_results_button.click @show_results
# valid states: 'initial', 'assessing', 'post_assessment', 'done'
Collapsible.setCollapsibles(@el)
@submit_evaluation_button = $('.submit-evaluation-button')
@submit_evaluation_button.click @message_post
@results_container = $('.result-container')
# Where to put the rubric once we load it
@el = $(element).find('section.open-ended-child')
@errors_area = @$('.error')
@answer_area = @$('textarea.answer')
@rubric_wrapper = @$('.rubric-wrapper')
@hint_wrapper = @$('.hint-wrapper')
@message_wrapper = @$('.message-wrapper')
@submit_button = @$('.submit-button')
@child_state = @el.data('state')
@child_type = @el.data('child-type')
if @child_type=="openended"
@skip_button = @$('.skip-button')
@skip_button.click @skip_post_assessment
@open_ended_child= @$('.open-ended-child')
@find_assessment_elements()
@find_hint_elements()
@rebind()
# locally scoped jquery.
$: (selector) ->
$(selector, @el)
show_results: (event) =>
status_item = $(event.target).parent().parent()
status_number = status_item.data('status-number')
data = {'task_number' : status_number}
$.postWithPrefix "#{@ajax_url}/get_results", data, (response) =>
if response.success
@results_container.after(response.html).remove()
@results_container = $('div.result-container')
@submit_evaluation_button = $('.submit-evaluation-button')
@submit_evaluation_button.click @message_post
Collapsible.setCollapsibles(@results_container)
else
@errors_area.html(response.error)
message_post: (event)=>
Logger.log 'message_post', @answers
external_grader_message=$(event.target).parent().parent().parent()
evaluation_scoring = $(event.target).parent()
fd = new FormData()
feedback = evaluation_scoring.find('textarea.feedback-on-feedback')[0].value
submission_id = external_grader_message.find('input.submission_id')[0].value
grader_id = external_grader_message.find('input.grader_id')[0].value
score = evaluation_scoring.find("input:radio[name='evaluation-score']:checked").val()
fd.append('feedback', feedback)
fd.append('submission_id', submission_id)
fd.append('grader_id', grader_id)
if(!score)
@gentle_alert "You need to pick a rating before you can submit."
return
else
fd.append('score', score)
settings =
type: "POST"
data: fd
processData: false
contentType: false
success: (response) =>
@gentle_alert response.msg
$('section.evaluation').slideToggle()
@message_wrapper.html(response.message_html)
$.ajaxWithPrefix("#{@ajax_url}/save_post_assessment", settings)
rebind: () =>
# rebind to the appropriate function for the current state
@submit_button.unbind('click')
@submit_button.show()
@reset_button.hide()
@next_problem_button.hide()
@hint_area.attr('disabled', false)
if @child_type=="openended"
@skip_button.hide()
if @allow_reset=="True"
@reset_button.show()
@submit_button.hide()
@answer_area.attr("disabled", true)
@hint_area.attr('disabled', true)
else if @child_state == 'initial'
@answer_area.attr("disabled", false)
@submit_button.prop('value', 'Submit')
@submit_button.click @save_answer
else if @child_state == 'assessing'
@answer_area.attr("disabled", true)
@submit_button.prop('value', 'Submit assessment')
@submit_button.click @save_assessment
if @child_type == "openended"
@submit_button.hide()
@queueing()
else if @child_state == 'post_assessment'
if @child_type=="openended"
@skip_button.show()
@skip_post_assessment()
@answer_area.attr("disabled", true)
@submit_button.prop('value', 'Submit post-assessment')
if @child_type=="selfassessment"
@submit_button.click @save_hint
else
@submit_button.click @message_post
else if @child_state == 'done'
@answer_area.attr("disabled", true)
@hint_area.attr('disabled', true)
@submit_button.hide()
if @child_type=="openended"
@skip_button.hide()
if @task_number<@task_count
@next_problem()
else
@reset_button.show()
find_assessment_elements: ->
@assessment = @$('select.assessment')
find_hint_elements: ->
@hint_area = @$('textarea.post_assessment')
save_answer: (event) =>
event.preventDefault()
if @child_state == 'initial'
data = {'student_answer' : @answer_area.val()}
$.postWithPrefix "#{@ajax_url}/save_answer", data, (response) =>
if response.success
@rubric_wrapper.html(response.rubric_html)
@child_state = 'assessing'
@find_assessment_elements()
@rebind()
else
@errors_area.html(response.error)
else
@errors_area.html('Problem state got out of sync. Try reloading the page.')
save_assessment: (event) =>
event.preventDefault()
if @child_state == 'assessing'
data = {'assessment' : @assessment.find(':selected').text()}
$.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) =>
if response.success
@child_state = response.state
if @child_state == 'post_assessment'
@hint_wrapper.html(response.hint_html)
@find_hint_elements()
else if @child_state == 'done'
@message_wrapper.html(response.message_html)
@rebind()
else
@errors_area.html(response.error)
else
@errors_area.html('Problem state got out of sync. Try reloading the page.')
save_hint: (event) =>
event.preventDefault()
if @child_state == 'post_assessment'
data = {'hint' : @hint_area.val()}
$.postWithPrefix "#{@ajax_url}/save_post_assessment", data, (response) =>
if response.success
@message_wrapper.html(response.message_html)
@child_state = 'done'
@rebind()
else
@errors_area.html(response.error)
else
@errors_area.html('Problem state got out of sync. Try reloading the page.')
skip_post_assessment: =>
if @child_state == 'post_assessment'
$.postWithPrefix "#{@ajax_url}/skip_post_assessment", {}, (response) =>
if response.success
@child_state = 'done'
@rebind()
else
@errors_area.html(response.error)
else
@errors_area.html('Problem state got out of sync. Try reloading the page.')
reset: (event) =>
event.preventDefault()
if @child_state == 'done' or @allow_reset=="True"
$.postWithPrefix "#{@ajax_url}/reset", {}, (response) =>
if response.success
@answer_area.val('')
@rubric_wrapper.html('')
@hint_wrapper.html('')
@message_wrapper.html('')
@child_state = 'initial'
@combined_open_ended.after(response.html).remove()
@allow_reset="False"
@reinitialize(@element)
@rebind()
@reset_button.hide()
else
@errors_area.html(response.error)
else
@errors_area.html('Problem state got out of sync. Try reloading the page.')
next_problem: =>
if @child_state == 'done'
$.postWithPrefix "#{@ajax_url}/next_problem", {}, (response) =>
if response.success
@answer_area.val('')
@rubric_wrapper.html('')
@hint_wrapper.html('')
@message_wrapper.html('')
@child_state = 'initial'
@combined_open_ended.after(response.html).remove()
@reinitialize(@element)
@rebind()
@next_problem_button.hide()
if !response.allow_reset
@gentle_alert "Moved to next step."
else
@gentle_alert "Your score did not meet the criteria to move to the next step."
else
@errors_area.html(response.error)
else
@errors_area.html('Problem state got out of sync. Try reloading the page.')
gentle_alert: (msg) =>
if @el.find('.open-ended-alert').length
@el.find('.open-ended-alert').remove()
alert_elem = "<div class='open-ended-alert'>" + msg + "</div>"
@el.find('.open-ended-action').after(alert_elem)
@el.find('.open-ended-alert').css(opacity: 0).animate(opacity: 1, 700)
queueing: =>
if @child_state=="assessing" and @child_type=="openended"
if window.queuePollerID # Only one poller 'thread' per Problem
window.clearTimeout(window.queuePollerID)
window.queuePollerID = window.setTimeout(@poll, 10000)
poll: =>
$.postWithPrefix "#{@ajax_url}/check_for_score", (response) =>
if response.state == "done" or response.state=="post_assessment"
delete window.queuePollerID
location.reload()
else
window.queuePollerID = window.setTimeout(@poll, 10000)
\ No newline at end of file
class @SelfAssessment
constructor: (element) ->
@el = $(element).find('section.self-assessment')
@id = @el.data('id')
@ajax_url = @el.data('ajax-url')
@state = @el.data('state')
@allow_reset = @el.data('allow_reset')
# valid states: 'initial', 'assessing', 'request_hint', 'done'
# Where to put the rubric once we load it
@errors_area = @$('.error')
@answer_area = @$('textarea.answer')
@rubric_wrapper = @$('.rubric-wrapper')
@hint_wrapper = @$('.hint-wrapper')
@message_wrapper = @$('.message-wrapper')
@submit_button = @$('.submit-button')
@reset_button = @$('.reset-button')
@reset_button.click @reset
@find_assessment_elements()
@find_hint_elements()
@rebind()
# locally scoped jquery.
$: (selector) ->
$(selector, @el)
rebind: () =>
# rebind to the appropriate function for the current state
@submit_button.unbind('click')
@submit_button.show()
@reset_button.hide()
@hint_area.attr('disabled', false)
if @state == 'initial'
@answer_area.attr("disabled", false)
@submit_button.prop('value', 'Submit')
@submit_button.click @save_answer
else if @state == 'assessing'
@answer_area.attr("disabled", true)
@submit_button.prop('value', 'Submit assessment')
@submit_button.click @save_assessment
else if @state == 'request_hint'
@answer_area.attr("disabled", true)
@submit_button.prop('value', 'Submit hint')
@submit_button.click @save_hint
else if @state == 'done'
@answer_area.attr("disabled", true)
@hint_area.attr('disabled', true)
@submit_button.hide()
if @allow_reset
@reset_button.show()
else
@reset_button.hide()
find_assessment_elements: ->
@assessment = @$('select.assessment')
find_hint_elements: ->
@hint_area = @$('textarea.hint')
save_answer: (event) =>
event.preventDefault()
if @state == 'initial'
data = {'student_answer' : @answer_area.val()}
$.postWithPrefix "#{@ajax_url}/save_answer", data, (response) =>
if response.success
@rubric_wrapper.html(response.rubric_html)
@state = 'assessing'
@find_assessment_elements()
@rebind()
else
@errors_area.html(response.error)
else
@errors_area.html('Problem state got out of sync. Try reloading the page.')
save_assessment: (event) =>
event.preventDefault()
if @state == 'assessing'
data = {'assessment' : @assessment.find(':selected').text()}
$.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) =>
if response.success
@state = response.state
if @state == 'request_hint'
@hint_wrapper.html(response.hint_html)
@find_hint_elements()
else if @state == 'done'
@message_wrapper.html(response.message_html)
@allow_reset = response.allow_reset
@rebind()
else
@errors_area.html(response.error)
else
@errors_area.html('Problem state got out of sync. Try reloading the page.')
save_hint: (event) =>
event.preventDefault()
if @state == 'request_hint'
data = {'hint' : @hint_area.val()}
$.postWithPrefix "#{@ajax_url}/save_hint", data, (response) =>
if response.success
@message_wrapper.html(response.message_html)
@state = 'done'
@allow_reset = response.allow_reset
@rebind()
else
@errors_area.html(response.error)
else
@errors_area.html('Problem state got out of sync. Try reloading the page.')
reset: (event) =>
event.preventDefault()
if @state == 'done'
$.postWithPrefix "#{@ajax_url}/reset", {}, (response) =>
if response.success
@answer_area.val('')
@rubric_wrapper.html('')
@hint_wrapper.html('')
@message_wrapper.html('')
@state = 'initial'
@rebind()
@reset_button.hide()
else
@errors_area.html(response.error)
else
@errors_area.html('Problem state got out of sync. Try reloading the page.')
"""
A Self Assessment module that allows students to write open-ended responses,
submit, then see a rubric and rate themselves. Persists student supplied
hints, answers, and assessment judgment (currently only correct/incorrect).
Parses xml definition file--see below for exact format.
"""
import copy
from fs.errors import ResourceNotFoundError
import itertools
import json
import logging
from lxml import etree
from lxml.html import rewrite_links
from path import path
import os
import sys
import hashlib
import capa.xqueue_interface as xqueue_interface
from pkg_resources import resource_string
from .capa_module import only_one, ComplexEncoder
from .editing_module import EditingDescriptor
from .html_checker import check_html
from progress import Progress
from .stringify import stringify_children
from .xml_module import XmlDescriptor
from xmodule.modulestore import Location
from capa.util import *
import openendedchild
from mitxmako.shortcuts import render_to_string
from numpy import median
from datetime import datetime
from combined_open_ended_rubric import CombinedOpenEndedRubric
log = logging.getLogger("mitx.courseware")
class OpenEndedModule(openendedchild.OpenEndedChild):
"""
The open ended module supports all external open ended grader problems.
Sample XML file:
<openended min_score_to_attempt="1" max_score_to_attempt="1">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
"""
def setup_response(self, system, location, definition, descriptor):
"""
Sets up the response type.
@param system: Modulesystem object
@param location: The location of the problem
@param definition: The xml definition of the problem
@param descriptor: The OpenEndedDescriptor associated with this
@return: None
"""
oeparam = definition['oeparam']
self.url = definition.get('url', None)
self.queue_name = definition.get('queuename', self.DEFAULT_QUEUE)
self.message_queue_name = definition.get('message-queuename', self.DEFAULT_MESSAGE_QUEUE)
#This is needed to attach feedback to specific responses later
self.submission_id = None
self.grader_id = None
if oeparam is None:
raise ValueError("No oeparam found in problem xml.")
if self.prompt is None:
raise ValueError("No prompt found in problem xml.")
if self.rubric is None:
raise ValueError("No rubric found in problem xml.")
self._parse(oeparam, self.prompt, self.rubric, system)
if self.created == True and self.state == self.ASSESSING:
self.created = False
self.send_to_grader(self.latest_answer(), system)
self.created = False
def _parse(self, oeparam, prompt, rubric, system):
'''
Parse OpenEndedResponse XML:
self.initial_display
self.payload - dict containing keys --
'grader' : path to grader settings file, 'problem_id' : id of the problem
self.answer - What to display when show answer is clicked
'''
# Note that OpenEndedResponse is agnostic to the specific contents of grader_payload
prompt_string = stringify_children(prompt)
rubric_string = stringify_children(rubric)
self.prompt = prompt_string
self.rubric = rubric_string
grader_payload = oeparam.find('grader_payload')
grader_payload = grader_payload.text if grader_payload is not None else ''
#Update grader payload with student id. If grader payload not json, error.
try:
parsed_grader_payload = json.loads(grader_payload)
# NOTE: self.system.location is valid because the capa_module
# __init__ adds it (easiest way to get problem location into
# response types)
except TypeError, ValueError:
log.exception("Grader payload %r is not a json object!", grader_payload)
self.initial_display = find_with_default(oeparam, 'initial_display', '')
self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
parsed_grader_payload.update({
'location': system.location.url(),
'course_id': system.course_id,
'prompt': prompt_string,
'rubric': rubric_string,
'initial_display': self.initial_display,
'answer': self.answer,
})
updated_grader_payload = json.dumps(parsed_grader_payload)
self.payload = {'grader_payload': updated_grader_payload}
def skip_post_assessment(self, get, system):
"""
Ajax function that allows one to skip the post assessment phase
@param get: AJAX dictionary
@param system: ModuleSystem
@return: Success indicator
"""
self.state = self.DONE
return {'success': True}
def message_post(self, get, system):
"""
Handles a student message post (a reaction to the grade they received from an open ended grader type)
Returns a boolean success/fail and an error message
"""
event_info = dict()
event_info['problem_id'] = system.location.url()
event_info['student_id'] = system.anonymous_student_id
event_info['survey_responses'] = get
survey_responses = event_info['survey_responses']
for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses:
return {'success': False, 'msg': "Could not find needed tag {0}".format(tag)}
try:
submission_id = int(survey_responses['submission_id'])
grader_id = int(survey_responses['grader_id'])
feedback = str(survey_responses['feedback'].encode('ascii', 'ignore'))
score = int(survey_responses['score'])
except:
error_message = ("Could not parse submission id, grader id, "
"or feedback from message_post ajax call. Here is the message data: {0}".format(
survey_responses))
log.exception(error_message)
return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."}
qinterface = system.xqueue['interface']
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
anonymous_student_id = system.anonymous_student_id
queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime +
anonymous_student_id +
str(len(self.history)))
xheader = xqueue_interface.make_xheader(
lms_callback_url=system.xqueue['callback_url'],
lms_key=queuekey,
queue_name=self.message_queue_name
)
student_info = {'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
}
contents = {
'feedback': feedback,
'submission_id': submission_id,
'grader_id': grader_id,
'score': score,
'student_info': json.dumps(student_info),
}
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
#Convert error to a success value
success = True
if error:
success = False
self.state = self.DONE
return {'success': success, 'msg': "Successfully submitted your feedback."}
def send_to_grader(self, submission, system):
"""
Send a given submission to the grader, via the xqueue
@param submission: The student submission to send to the grader
@param system: Modulesystem
@return: Boolean true (not useful right now)
"""
# Prepare xqueue request
#------------------------------------------------------------
qinterface = system.xqueue['interface']
qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
anonymous_student_id = system.anonymous_student_id
# Generate header
queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime +
anonymous_student_id +
str(len(self.history)))
xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['callback_url'],
lms_key=queuekey,
queue_name=self.queue_name)
contents = self.payload.copy()
# Metadata related to the student submission revealed to the external grader
student_info = {'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
}
#Update contents with student response and student info
contents.update({
'student_info': json.dumps(student_info),
'student_response': submission,
'max_score': self.max_score(),
})
# Submit request. When successful, 'msg' is the prior length of the queue
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
# State associated with the queueing request
queuestate = {'key': queuekey,
'time': qtime, }
return True
def _update_score(self, score_msg, queuekey, system):
"""
Called by xqueue to update the score
@param score_msg: The message from xqueue
@param queuekey: The key sent by xqueue
@param system: Modulesystem
@return: Boolean True (not useful currently)
"""
new_score_msg = self._parse_score_msg(score_msg)
if not new_score_msg['valid']:
score_msg['feedback'] = 'Invalid grader reply. Please contact the course staff.'
self.record_latest_score(new_score_msg['score'])
self.record_latest_post_assessment(score_msg)
self.state = self.POST_ASSESSMENT
return True
def get_answers(self):
"""
Gets and shows the answer for this problem.
@return: Answer html
"""
anshtml = '<span class="openended-answer"><pre><code>{0}</code></pre></span>'.format(self.answer)
return {self.answer_id: anshtml}
def get_initial_display(self):
"""
Gets and shows the initial display for the input box.
@return: Initial display html
"""
return {self.answer_id: self.initial_display}
def _convert_longform_feedback_to_html(self, response_items):
"""
Take in a dictionary, and return html strings for display to student.
Input:
response_items: Dictionary with keys success, feedback.
if success is True, feedback should be a dictionary, with keys for
types of feedback, and the corresponding feedback values.
if success is False, feedback is actually an error string.
NOTE: this will need to change when we integrate peer grading, because
that will have more complex feedback.
Output:
String -- html that can be displayincorrect-icon.pnged to the student.
"""
# We want to display available feedback in a particular order.
# This dictionary specifies which goes first--lower first.
priorities = {# These go at the start of the feedback
'spelling': 0,
'grammar': 1,
# needs to be after all the other feedback
'markup_text': 3}
default_priority = 2
def get_priority(elt):
"""
Args:
elt: a tuple of feedback-type, feedback
Returns:
the priority for this feedback type
"""
return priorities.get(elt[0], default_priority)
def encode_values(feedback_type, value):
feedback_type = str(feedback_type).encode('ascii', 'ignore')
if not isinstance(value, basestring):
value = str(value)
value = value.encode('ascii', 'ignore')
return feedback_type, value
def format_feedback(feedback_type, value):
feedback_type, value = encode_values(feedback_type, value)
feedback = """
<div class="{feedback_type}">
{value}
</div>
""".format(feedback_type=feedback_type, value=value)
return feedback
def format_feedback_hidden(feedback_type, value):
feedback_type, value = encode_values(feedback_type, value)
feedback = """
<input class="{feedback_type}" type="hidden" value="{value}" />
""".format(feedback_type=feedback_type, value=value)
return feedback
# TODO (vshnayder): design and document the details of this format so
# that we can do proper escaping here (e.g. are the graders allowed to
# include HTML?)
for tag in ['success', 'feedback', 'submission_id', 'grader_id']:
if tag not in response_items:
return format_feedback('errors', 'Error getting feedback')
feedback_items = response_items['feedback']
try:
feedback = json.loads(feedback_items)
except (TypeError, ValueError):
log.exception("feedback_items have invalid json %r", feedback_items)
return format_feedback('errors', 'Could not parse feedback')
if response_items['success']:
if len(feedback) == 0:
return format_feedback('errors', 'No feedback available')
feedback_lst = sorted(feedback.items(), key=get_priority)
feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst)
else:
feedback_list_part1 = format_feedback('errors', response_items['feedback'])
feedback_list_part2 = (u"\n".join([format_feedback_hidden(feedback_type, value)
for feedback_type, value in response_items.items()
if feedback_type in ['submission_id', 'grader_id']]))
return u"\n".join([feedback_list_part1, feedback_list_part2])
def _format_feedback(self, response_items):
"""
Input:
Dictionary called feedback. Must contain keys seen below.
Output:
Return error message or feedback template
"""
log.debug(response_items)
rubric_feedback=""
feedback = self._convert_longform_feedback_to_html(response_items)
if response_items['rubric_scores_complete']==True:
rubric_feedback = CombinedOpenEndedRubric.render_rubric(response_items['rubric_xml'])
if not response_items['success']:
return system.render_template("open_ended_error.html",
{'errors': feedback})
feedback_template = render_to_string("open_ended_feedback.html", {
'grader_type': response_items['grader_type'],
'score': "{0} / {1}".format(response_items['score'], self.max_score()),
'feedback': feedback,
'rubric_feedback' : rubric_feedback
})
return feedback_template
def _parse_score_msg(self, score_msg, join_feedback=True):
"""
Grader reply is a JSON-dump of the following dict
{ 'correct': True/False,
'score': Numeric value (floating point is okay) to assign to answer
'msg': grader_msg
'feedback' : feedback from grader
}
Returns (valid_score_msg, correct, score, msg):
valid_score_msg: Flag indicating valid score_msg format (Boolean)
correct: Correctness of submission (Boolean)
score: Points to be assigned (numeric, can be float)
"""
fail = {'valid': False, 'score': 0, 'feedback': ''}
try:
score_result = json.loads(score_msg)
except (TypeError, ValueError):
error_message = ("External grader message should be a JSON-serialized dict."
" Received score_msg = {0}".format(score_msg))
log.error(error_message)
fail['feedback'] = error_message
return fail
if not isinstance(score_result, dict):
error_message = ("External grader message should be a JSON-serialized dict."
" Received score_result = {0}".format(score_result))
log.error(error_message)
fail['feedback'] = error_message
return fail
for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']:
if tag not in score_result:
error_message = ("External grader message is missing required tag: {0}"
.format(tag))
log.error(error_message)
fail['feedback'] = error_message
return fail
#This is to support peer grading
if isinstance(score_result['score'], list):
feedback_items = []
for i in xrange(0, len(score_result['score'])):
new_score_result = {
'score': score_result['score'][i],
'feedback': score_result['feedback'][i],
'grader_type': score_result['grader_type'],
'success': score_result['success'],
'grader_id': score_result['grader_id'][i],
'submission_id': score_result['submission_id'],
'rubric_scores_complete' : score_result['rubric_scores_complete'],
'rubric_xml' : score_result['rubric_xml'],
}
feedback_items.append(self._format_feedback(new_score_result))
if join_feedback:
feedback = "".join(feedback_items)
else:
feedback = feedback_items
score = int(median(score_result['score']))
else:
#This is for instructor and ML grading
feedback = self._format_feedback(score_result)
score = score_result['score']
self.submission_id = score_result['submission_id']
self.grader_id = score_result['grader_id']
return {'valid': True, 'score': score, 'feedback': feedback}
def latest_post_assessment(self, short_feedback=False, join_feedback=True):
"""
Gets the latest feedback, parses, and returns
@param short_feedback: If the long feedback is wanted or not
@return: Returns formatted feedback
"""
if not self.history:
return ""
feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), join_feedback=join_feedback)
if not short_feedback:
return feedback_dict['feedback'] if feedback_dict['valid'] else ''
if feedback_dict['valid']:
short_feedback = self._convert_longform_feedback_to_html(
json.loads(self.history[-1].get('post_assessment', "")))
return short_feedback if feedback_dict['valid'] else ''
def format_feedback_with_evaluation(self, feedback):
"""
Renders a given html feedback into an evaluation template
@param feedback: HTML feedback
@return: Rendered html
"""
context = {'msg': feedback, 'id': "1", 'rows': 50, 'cols': 50}
html = render_to_string('open_ended_evaluation.html', context)
return html
def handle_ajax(self, dispatch, get, system):
'''
This is called by courseware.module_render, to handle an AJAX call.
"get" is request.POST.
Returns a json dictionary:
{ 'progress_changed' : True/False,
'progress' : 'none'/'in_progress'/'done',
<other request-specific values here > }
'''
handlers = {
'save_answer': self.save_answer,
'score_update': self.update_score,
'save_post_assessment': self.message_post,
'skip_post_assessment': self.skip_post_assessment,
'check_for_score': self.check_for_score,
}
if dispatch not in handlers:
return 'Error'
before = self.get_progress()
d = handlers[dispatch](get, system)
after = self.get_progress()
d.update({
'progress_changed': after != before,
'progress_status': Progress.to_js_status_str(after),
})
return json.dumps(d, cls=ComplexEncoder)
def check_for_score(self, get, system):
"""
Checks to see if a score has been received yet.
@param get: AJAX get dictionary
@param system: Modulesystem (needed to align with other ajax functions)
@return: Returns the current state
"""
state = self.state
return {'state': state}
def save_answer(self, get, system):
"""
Saves a student answer
@param get: AJAX get dictionary
@param system: modulesystem
@return: Success indicator
"""
if self.attempts > self.max_attempts:
# If too many attempts, prevent student from saving answer and
# seeing rubric. In normal use, students shouldn't see this because
# they won't see the reset button once they're out of attempts.
return {
'success': False,
'error': 'Too many attempts.'
}
if self.state != self.INITIAL:
return self.out_of_sync_error(get)
# add new history element with answer and empty score and hint.
self.new_history_entry(get['student_answer'])
self.send_to_grader(get['student_answer'], system)
self.change_state(self.ASSESSING)
return {'success': True, }
def update_score(self, get, system):
"""
Updates the current score via ajax. Called by xqueue.
Input: AJAX get dictionary, modulesystem
Output: None
"""
queuekey = get['queuekey']
score_msg = get['xqueue_body']
#TODO: Remove need for cmap
self._update_score(score_msg, queuekey, system)
return dict() # No AJAX return is needed
def get_html(self, system):
"""
Gets the HTML for this problem and renders it
Input: Modulesystem object
Output: Rendered HTML
"""
#set context variables and render template
if self.state != self.INITIAL:
latest = self.latest_answer()
previous_answer = latest if latest is not None else self.initial_display
post_assessment = self.latest_post_assessment()
score = self.latest_score()
correct = 'correct' if self.is_submission_correct(score) else 'incorrect'
else:
post_assessment = ""
correct = ""
previous_answer = self.initial_display
context = {
'prompt': self.prompt,
'previous_answer': previous_answer,
'state': self.state,
'allow_reset': self._allow_reset(),
'rows': 30,
'cols': 80,
'id': 'open_ended',
'msg': post_assessment,
'child_type': 'openended',
'correct': correct,
}
log.debug(context)
html = system.render_template('open_ended.html', context)
return html
class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor):
"""
Module for adding open ended response questions to courses
"""
mako_template = "widgets/html-edit.html"
module_class = OpenEndedModule
filename_extension = "xml"
stores_state = True
has_score = True
template_dir_name = "openended"
js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]}
js_module_name = "HTMLEditingDescriptor"
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
Pull out the open ended parameters into a dictionary.
Returns:
{
'oeparam': 'some-html'
}
"""
for child in ['openendedparam']:
if len(xml_object.xpath(child)) != 1:
raise ValueError("Open Ended definition must include exactly one '{0}' tag".format(child))
def parse(k):
"""Assumes that xml_object has child k"""
return xml_object.xpath(k)[0]
return {'oeparam': parse('openendedparam'), }
def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.'''
elt = etree.Element('openended')
def add_child(k):
child_str = '<{tag}>{body}</{tag}>'.format(tag=k, body=self.definition[k])
child_node = etree.fromstring(child_str)
elt.append(child_node)
for child in ['openendedparam']:
add_child(child)
return elt
import copy
from fs.errors import ResourceNotFoundError
import itertools
import json
import logging
from lxml import etree
from lxml.html import rewrite_links
from path import path
import os
import sys
import hashlib
import capa.xqueue_interface as xqueue_interface
from pkg_resources import resource_string
from .capa_module import only_one, ComplexEncoder
from .editing_module import EditingDescriptor
from .html_checker import check_html
from progress import Progress
from .stringify import stringify_children
from .xml_module import XmlDescriptor
from xmodule.modulestore import Location
from capa.util import *
from datetime import datetime
log = logging.getLogger("mitx.courseware")
# Set the default number of max attempts. Should be 1 for production
# Set higher for debugging/testing
# attempts specified in xml definition overrides this.
MAX_ATTEMPTS = 1
# Set maximum available number of points.
# Overriden by max_score specified in xml.
MAX_SCORE = 1
class OpenEndedChild():
"""
States:
initial (prompt, textbox shown)
|
assessing (read-only textbox, rubric + assessment input shown for self assessment, response queued for open ended)
|
post_assessment (read-only textbox, read-only rubric and assessment, hint input box shown)
|
done (submitted msg, green checkmark, everything else read-only. If attempts < max, shows
a reset button that goes back to initial state. Saves previous
submissions too.)
"""
DEFAULT_QUEUE = 'open-ended'
DEFAULT_MESSAGE_QUEUE = 'open-ended-message'
max_inputfields = 1
STATE_VERSION = 1
# states
INITIAL = 'initial'
ASSESSING = 'assessing'
POST_ASSESSMENT = 'post_assessment'
DONE = 'done'
#This is used to tell students where they are at in the module
HUMAN_NAMES = {
'initial': 'Started',
'assessing': 'Being scored',
'post_assessment': 'Scoring finished',
'done': 'Problem complete',
}
def __init__(self, system, location, definition, descriptor, static_data,
instance_state=None, shared_state=None, **kwargs):
# Load instance state
if instance_state is not None:
instance_state = json.loads(instance_state)
else:
instance_state = {}
# History is a list of tuples of (answer, score, hint), where hint may be
# None for any element, and score and hint can be None for the last (current)
# element.
# Scores are on scale from 0 to max_score
self.history = instance_state.get('history', [])
self.state = instance_state.get('state', self.INITIAL)
self.created = instance_state.get('created', False)
self.attempts = instance_state.get('attempts', 0)
self.max_attempts = static_data['max_attempts']
self.prompt = static_data['prompt']
self.rubric = static_data['rubric']
# Used for progress / grading. Currently get credit just for
# completion (doesn't matter if you self-assessed correct/incorrect).
self._max_score = static_data['max_score']
self.setup_response(system, location, definition, descriptor)
def setup_response(self, system, location, definition, descriptor):
"""
Needs to be implemented by the inheritors of this module. Sets up additional fields used by the child modules.
@param system: Modulesystem
@param location: Module location
@param definition: XML definition
@param descriptor: Descriptor of the module
@return: None
"""
pass
def latest_answer(self):
"""None if not available"""
if not self.history:
return ""
return self.history[-1].get('answer', "")
def latest_score(self):
"""None if not available"""
if not self.history:
return None
return self.history[-1].get('score')
def latest_post_assessment(self):
"""None if not available"""
if not self.history:
return ""
return self.history[-1].get('post_assessment', "")
def new_history_entry(self, answer):
"""
Adds a new entry to the history dictionary
@param answer: The student supplied answer
@return: None
"""
self.history.append({'answer': answer})
def record_latest_score(self, score):
"""Assumes that state is right, so we're adding a score to the latest
history element"""
self.history[-1]['score'] = score
def record_latest_post_assessment(self, post_assessment):
"""Assumes that state is right, so we're adding a score to the latest
history element"""
self.history[-1]['post_assessment'] = post_assessment
def change_state(self, new_state):
"""
A centralized place for state changes--allows for hooks. If the
current state matches the old state, don't run any hooks.
"""
if self.state == new_state:
return
self.state = new_state
if self.state == self.DONE:
self.attempts += 1
def get_instance_state(self):
"""
Get the current score and state
"""
state = {
'version': self.STATE_VERSION,
'history': self.history,
'state': self.state,
'max_score': self._max_score,
'attempts': self.attempts,
'created': False,
}
return json.dumps(state)
def _allow_reset(self):
"""Can the module be reset?"""
return (self.state == self.DONE and self.attempts < self.max_attempts)
def max_score(self):
"""
Return max_score
"""
return self._max_score
def get_score(self):
"""
Returns the last score in the list
"""
score = self.latest_score()
return {'score': score if score is not None else 0,
'total': self._max_score}
def reset(self, system):
"""
If resetting is allowed, reset the state.
Returns {'success': bool, 'error': msg}
(error only present if not success)
"""
self.change_state(self.INITIAL)
return {'success': True}
def get_progress(self):
'''
For now, just return last score / max_score
'''
if self._max_score > 0:
try:
return Progress(self.get_score()['score'], self._max_score)
except Exception as err:
log.exception("Got bad progress")
return None
return None
def out_of_sync_error(self, get, msg=''):
"""
return dict out-of-sync error message, and also log.
"""
log.warning("Assessment module state out sync. state: %r, get: %r. %s",
self.state, get, msg)
return {'success': False,
'error': 'The problem state got out-of-sync'}
def get_html(self):
"""
Needs to be implemented by inheritors. Renders the HTML that students see.
@return:
"""
pass
def handle_ajax(self):
"""
Needs to be implemented by child modules. Handles AJAX events.
@return:
"""
pass
def is_submission_correct(self, score):
"""
Checks to see if a given score makes the answer correct. Very naive right now (>66% is correct)
@param score: Numeric score.
@return: Boolean correct.
"""
correct = False
if(isinstance(score, (int, long, float, complex))):
score_ratio = int(score) / float(self.max_score())
correct = (score_ratio >= 0.66)
return correct
def is_last_response_correct(self):
"""
Checks to see if the last response in the module is correct.
@return: 'correct' if correct, otherwise 'incorrect'
"""
score = self.get_score()['score']
correctness = 'correct' if self.is_submission_correct(score) else 'incorrect'
return correctness
"""
A Self Assessment module that allows students to write open-ended responses,
submit, then see a rubric and rate themselves. Persists student supplied
hints, answers, and assessment judgment (currently only correct/incorrect).
Parses xml definition file--see below for exact format.
"""
import copy
from fs.errors import ResourceNotFoundError
import itertools
......@@ -26,205 +19,50 @@ from .stringify import stringify_children
from .x_module import XModule
from .xml_module import XmlDescriptor
from xmodule.modulestore import Location
import openendedchild
log = logging.getLogger("mitx.courseware")
# Set the default number of max attempts. Should be 1 for production
# Set higher for debugging/testing
# attempts specified in xml definition overrides this.
MAX_ATTEMPTS = 1
from combined_open_ended_rubric import CombinedOpenEndedRubric
# Set maximum available number of points.
# Overriden by max_score specified in xml.
MAX_SCORE = 1
log = logging.getLogger("mitx.courseware")
class SelfAssessmentModule(XModule):
"""
States:
initial (prompt, textbox shown)
|
assessing (read-only textbox, rubric + assessment input shown)
|
request_hint (read-only textbox, read-only rubric and assessment, hint input box shown)
|
done (submitted msg, green checkmark, everything else read-only. If attempts < max, shows
a reset button that goes back to initial state. Saves previous
submissions too.)
class SelfAssessmentModule(openendedchild.OpenEndedChild):
"""
A Self Assessment module that allows students to write open-ended responses,
submit, then see a rubric and rate themselves. Persists student supplied
hints, answers, and assessment judgment (currently only correct/incorrect).
Parses xml definition file--see below for exact format.
STATE_VERSION = 1
# states
INITIAL = 'initial'
ASSESSING = 'assessing'
REQUEST_HINT = 'request_hint'
DONE = 'done'
js = {'coffee': [resource_string(__name__, 'js/src/selfassessment/display.coffee')]}
js_module_name = "SelfAssessment"
def __init__(self, system, location, definition, descriptor,
instance_state=None, shared_state=None, **kwargs):
XModule.__init__(self, system, location, definition, descriptor,
instance_state, shared_state, **kwargs)
"""
Definition file should have 4 blocks -- prompt, rubric, submitmessage, hintprompt,
and two optional attributes:
attempts, which should be an integer that defaults to 1.
If it's > 1, the student will be able to re-submit after they see
the rubric.
max_score, which should be an integer that defaults to 1.
It defines the maximum number of points a student can get. Assumed to be integer scale
from 0 to max_score, with an interval of 1.
Note: all the submissions are stored.
Sample file:
<selfassessment attempts="1" max_score="1">
<prompt>
Insert prompt text here. (arbitrary html)
</prompt>
<rubric>
Insert grading rubric here. (arbitrary html)
</rubric>
Sample XML format:
<selfassessment>
<hintprompt>
Please enter a hint below: (arbitrary html)
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Thanks for submitting! (arbitrary html)
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
"""
# Load instance state
if instance_state is not None:
instance_state = json.loads(instance_state)
else:
instance_state = {}
instance_state = self.convert_state_to_current_format(instance_state)
# History is a list of tuples of (answer, score, hint), where hint may be
# None for any element, and score and hint can be None for the last (current)
# element.
# Scores are on scale from 0 to max_score
self.history = instance_state.get('history', [])
self.state = instance_state.get('state', 'initial')
self.attempts = instance_state.get('attempts', 0)
self.max_attempts = int(self.metadata.get('attempts', MAX_ATTEMPTS))
# Used for progress / grading. Currently get credit just for
# completion (doesn't matter if you self-assessed correct/incorrect).
self._max_score = int(self.metadata.get('max_score', MAX_SCORE))
self.rubric = definition['rubric']
self.prompt = definition['prompt']
self.submit_message = definition['submitmessage']
self.hint_prompt = definition['hintprompt']
def latest_answer(self):
"""None if not available"""
if not self.history:
return None
return self.history[-1].get('answer')
def latest_score(self):
"""None if not available"""
if not self.history:
return None
return self.history[-1].get('score')
def latest_hint(self):
"""None if not available"""
if not self.history:
return None
return self.history[-1].get('hint')
def new_history_entry(self, answer):
self.history.append({'answer': answer})
def record_latest_score(self, score):
"""Assumes that state is right, so we're adding a score to the latest
history element"""
self.history[-1]['score'] = score
def record_latest_hint(self, hint):
"""Assumes that state is right, so we're adding a score to the latest
history element"""
self.history[-1]['hint'] = hint
def change_state(self, new_state):
def setup_response(self, system, location, definition, descriptor):
"""
A centralized place for state changes--allows for hooks. If the
current state matches the old state, don't run any hooks.
Sets up the module
@param system: Modulesystem
@param location: location, to let the module know where it is.
@param definition: XML definition of the module.
@param descriptor: SelfAssessmentDescriptor
@return: None
"""
if self.state == new_state:
return
self.state = new_state
if self.state == self.DONE:
self.attempts += 1
self.submit_message = definition['submitmessage']
self.hint_prompt = definition['hintprompt']
self.prompt = stringify_children(self.prompt)
self.rubric = stringify_children(self.rubric)
@staticmethod
def convert_state_to_current_format(old_state):
def get_html(self, system):
"""
This module used to use a problematic state representation. This method
converts that into the new format.
Args:
old_state: dict of state, as passed in. May be old.
Returns:
new_state: dict of new state
Gets context and renders HTML that represents the module
@param system: Modulesystem
@return: Rendered HTML
"""
if old_state.get('version', 0) == SelfAssessmentModule.STATE_VERSION:
# already current
return old_state
# for now, there's only one older format.
new_state = {'version': SelfAssessmentModule.STATE_VERSION}
def copy_if_present(key):
if key in old_state:
new_state[key] = old_state[key]
for to_copy in ['attempts', 'state']:
copy_if_present(to_copy)
# The answers, scores, and hints need to be kept together to avoid them
# getting out of sync.
# NOTE: Since there's only one problem with a few hundred submissions
# in production so far, not trying to be smart about matching up hints
# and submissions in cases where they got out of sync.
student_answers = old_state.get('student_answers', [])
scores = old_state.get('scores', [])
hints = old_state.get('hints', [])
new_state['history'] = [
{'answer': answer,
'score': score,
'hint': hint}
for answer, score, hint in itertools.izip_longest(
student_answers, scores, hints)]
return new_state
def _allow_reset(self):
"""Can the module be reset?"""
return self.state == self.DONE and self.attempts < self.max_attempts
def get_html(self):
#set context variables and render template
if self.state != self.INITIAL:
latest = self.latest_answer()
......@@ -235,46 +73,20 @@ class SelfAssessmentModule(XModule):
context = {
'prompt': self.prompt,
'previous_answer': previous_answer,
'ajax_url': self.system.ajax_url,
'initial_rubric': self.get_rubric_html(),
'initial_hint': self.get_hint_html(),
'ajax_url': system.ajax_url,
'initial_rubric': self.get_rubric_html(system),
'initial_hint': self.get_hint_html(system),
'initial_message': self.get_message_html(),
'state': self.state,
'allow_reset': self._allow_reset(),
'child_type': 'selfassessment',
}
html = self.system.render_template('self_assessment_prompt.html', context)
# cdodge: perform link substitutions for any references to course static content (e.g. images)
return rewrite_links(html, self.rewrite_content_links)
html = system.render_template('self_assessment_prompt.html', context)
return html
def max_score(self):
"""
Return max_score
"""
return self._max_score
def get_score(self):
"""
Returns the last score in the list
"""
score = self.latest_score()
return {'score': score if score is not None else 0,
'total': self._max_score}
def get_progress(self):
'''
For now, just return last score / max_score
'''
if self._max_score > 0:
try:
return Progress(self.get_score()['score'], self._max_score)
except Exception as err:
log.exception("Got bad progress")
return None
return None
def handle_ajax(self, dispatch, get):
def handle_ajax(self, dispatch, get, system):
"""
This is called by courseware.module_render, to handle an AJAX call.
"get" is request.POST.
......@@ -288,15 +100,14 @@ class SelfAssessmentModule(XModule):
handlers = {
'save_answer': self.save_answer,
'save_assessment': self.save_assessment,
'save_hint': self.save_hint,
'reset': self.reset,
'save_post_assessment': self.save_hint,
}
if dispatch not in handlers:
return 'Error'
before = self.get_progress()
d = handlers[dispatch](get)
d = handlers[dispatch](get, system)
after = self.get_progress()
d.update({
'progress_changed': after != before,
......@@ -304,37 +115,30 @@ class SelfAssessmentModule(XModule):
})
return json.dumps(d, cls=ComplexEncoder)
def out_of_sync_error(self, get, msg=''):
"""
return dict out-of-sync error message, and also log.
"""
log.warning("Assessment module state out sync. state: %r, get: %r. %s",
self.state, get, msg)
return {'success': False,
'error': 'The problem state got out-of-sync'}
def get_rubric_html(self):
def get_rubric_html(self, system):
"""
Return the appropriate version of the rubric, based on the state.
"""
if self.state == self.INITIAL:
return ''
rubric_html = CombinedOpenEndedRubric.render_rubric(self.rubric)
# we'll render it
context = {'rubric': self.rubric,
'max_score' : self._max_score,
context = {'rubric': rubric_html,
'max_score': self._max_score,
}
if self.state == self.ASSESSING:
context['read_only'] = False
elif self.state in (self.REQUEST_HINT, self.DONE):
elif self.state in (self.POST_ASSESSMENT, self.DONE):
context['read_only'] = True
else:
raise ValueError("Illegal state '%r'" % self.state)
return self.system.render_template('self_assessment_rubric.html', context)
return system.render_template('self_assessment_rubric.html', context)
def get_hint_html(self):
def get_hint_html(self, system):
"""
Return the appropriate version of the hint view, based on state.
"""
......@@ -343,7 +147,7 @@ class SelfAssessmentModule(XModule):
if self.state == self.DONE:
# display the previous hint
latest = self.latest_hint()
latest = self.latest_post_assessment()
hint = latest if latest is not None else ''
else:
hint = ''
......@@ -351,14 +155,14 @@ class SelfAssessmentModule(XModule):
context = {'hint_prompt': self.hint_prompt,
'hint': hint}
if self.state == self.REQUEST_HINT:
if self.state == self.POST_ASSESSMENT:
context['read_only'] = False
elif self.state == self.DONE:
context['read_only'] = True
else:
raise ValueError("Illegal state '%r'" % self.state)
return self.system.render_template('self_assessment_hint.html', context)
return system.render_template('self_assessment_hint.html', context)
def get_message_html(self):
"""
......@@ -370,7 +174,7 @@ class SelfAssessmentModule(XModule):
return """<div class="save_message">{0}</div>""".format(self.submit_message)
def save_answer(self, get):
def save_answer(self, get, system):
"""
After the answer is submitted, show the rubric.
......@@ -401,10 +205,10 @@ class SelfAssessmentModule(XModule):
return {
'success': True,
'rubric_html': self.get_rubric_html()
'rubric_html': self.get_rubric_html(system)
}
def save_assessment(self, get):
def save_assessment(self, get, system):
"""
Save the assessment. If the student said they're right, don't ask for a
hint, and go straight to the done state. Otherwise, do ask for a hint.
......@@ -429,21 +233,20 @@ class SelfAssessmentModule(XModule):
self.record_latest_score(score)
d = {'success': True,}
d = {'success': True, }
if score == self.max_score():
self.change_state(self.DONE)
d['message_html'] = self.get_message_html()
d['allow_reset'] = self._allow_reset()
else:
self.change_state(self.REQUEST_HINT)
d['hint_html'] = self.get_hint_html()
self.change_state(self.POST_ASSESSMENT)
d['hint_html'] = self.get_hint_html(system)
d['state'] = self.state
return d
def save_hint(self, get):
def save_hint(self, get, system):
'''
Save the hint.
Returns a dict { 'success': bool,
......@@ -453,63 +256,19 @@ class SelfAssessmentModule(XModule):
with the error key only present if success is False and message_html
only if True.
'''
if self.state != self.REQUEST_HINT:
if self.state != self.POST_ASSESSMENT:
# Note: because we only ask for hints on wrong answers, may not have
# the same number of hints and answers.
return self.out_of_sync_error(get)
self.record_latest_hint(get['hint'])
self.record_latest_post_assessment(get['hint'])
self.change_state(self.DONE)
# To the tracking logs!
event_info = {
'selfassessment_id': self.location.url(),
'state': {
'version': self.STATE_VERSION,
'history': self.history,
}
}
self.system.track_function('save_hint', event_info)
return {'success': True,
'message_html': self.get_message_html(),
'allow_reset': self._allow_reset()}
def reset(self, get):
"""
If resetting is allowed, reset the state.
Returns {'success': bool, 'error': msg}
(error only present if not success)
"""
if self.state != self.DONE:
return self.out_of_sync_error(get)
if self.attempts > self.max_attempts:
return {
'success': False,
'error': 'Too many attempts.'
}
self.change_state(self.INITIAL)
return {'success': True}
def get_instance_state(self):
"""
Get the current score and state
"""
state = {
'version': self.STATE_VERSION,
'history': self.history,
'state': self.state,
'max_score': self._max_score,
'attempts': self.attempts,
}
return json.dumps(state)
class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor):
"""
Module for adding self assessment questions to courses
......@@ -532,13 +291,11 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor):
Returns:
{
'rubric': 'some-html',
'prompt': 'some-html',
'submitmessage': 'some-html'
'hintprompt': 'some-html'
}
"""
expected_children = ['rubric', 'prompt', 'submitmessage', 'hintprompt']
expected_children = ['submitmessage', 'hintprompt']
for child in expected_children:
if len(xml_object.xpath(child)) != 1:
raise ValueError("Self assessment definition must include exactly one '{0}' tag".format(child))
......@@ -547,13 +304,10 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor):
"""Assumes that xml_object has child k"""
return stringify_children(xml_object.xpath(k)[0])
return {'rubric': parse('rubric'),
'prompt': parse('prompt'),
'submitmessage': parse('submitmessage'),
return {'submitmessage': parse('submitmessage'),
'hintprompt': parse('hintprompt'),
}
def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.'''
elt = etree.Element('selfassessment')
......@@ -563,7 +317,7 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor):
child_node = etree.fromstring(child_str)
elt.append(child_node)
for child in ['rubric', 'prompt', 'submitmessage', 'hintprompt']:
for child in ['submitmessage', 'hintprompt']:
add_child(child)
return elt
......@@ -62,6 +62,7 @@ class GradingService(object):
"""
Make a get request to the grading controller
"""
log.debug(params)
op = lambda: self.session.get(url,
allow_redirects=allow_redirects,
params=params)
......
......@@ -81,7 +81,7 @@ class PeerGradingService(GradingService):
self.get_problem_list_url = self.url + '/get_problem_list/'
def get_next_submission(self, problem_location, grader_id):
response = self.get(self.get_next_submission_url, False,
response = self.get(self.get_next_submission_url,
{'location': problem_location, 'grader_id': grader_id})
return response
......
<section id="combined-open-ended" class="combined-open-ended" data-ajax-url="${ajax_url}" data-allow_reset="${allow_reset}" data-state="${state}" data-task-count="${task_count}" data-task-number="${task_number}">
<div class="status-container">
<h4>Status</h4><br/>
${status | n}
</div>
<div class="item-container">
<h4>Problem</h4><br/>
% for item in items:
<div class="item">${item['content'] | n}</div>
% endfor
<input type="button" value="Reset" class="reset-button" name="reset"/>
<input type="button" value="Next Step" class="next-step-button" name="reset"/>
</div>
<div class="result-container">
</div>
</section>
<div class="result-container">
<h4>Results from Step ${task_number}</h4><br/>
${results | n}
</div>
\ No newline at end of file
<section id="combined-open-ended-status" class="combined-open-ended-status">
%for i in xrange(0,len(status_list)):
<%status=status_list[i]%>
%if i==len(status_list)-1:
<div class="statusitem-current" data-status-number="${i}">
%else:
<div class="statusitem" data-status-number="${i}">
%endif
Step ${status['task_number']} (${status['human_state']}) : ${status['score']} / ${status['max_score']}
% if status['state'] == 'initial':
<span class="unanswered" id="status"></span>
% elif status['state'] in ['done', 'post_assessment'] and status['correct'] == 'correct':
<span class="correct" id="status"></span>
% elif status['state'] in ['done', 'post_assessment'] and status['correct'] == 'incorrect':
<span class="incorrect" id="status"></span>
% elif status['state'] == 'assessing':
<span class="grading" id="status"></span>
% endif
%if status['type']=="openended" and status['state'] in ['done', 'post_assessment']:
<div class="show-results">
<a href="#" class="show-results-button">Show results from step ${status['task_number']}</a>
</div>
%endif
</div>
%endfor
</section>
\ No newline at end of file
<section id="openended_${id}" class="open-ended-child" data-state="${state}" data-child-type="${child_type}">
<div class="error"></div>
<div class="prompt">
${prompt|n}
</div>
<textarea rows="${rows}" cols="${cols}" name="answer" class="answer short-form-response" id="input_${id}">${previous_answer|h}</textarea>
<div class="message-wrapper"></div>
<div class="grader-status">
% if state == 'initial':
<span class="unanswered" style="display:inline-block;" id="status_${id}">Unanswered</span>
% elif state in ['done', 'post_assessment'] and correct == 'correct':
<span class="correct" id="status_${id}">Correct</span>
% elif state in ['done', 'post_assessment'] and correct == 'incorrect':
<span class="incorrect" id="status_${id}">Incorrect</span>
% elif state == 'assessing':
<span class="grading" id="status_${id}">Submitted for grading</span>
% endif
% if hidden:
<div style="display:none;" name="${hidden}" inputid="input_${id}" />
% endif
</div>
<input type="button" value="Submit" class="submit-button" name="show"/>
<input name="skip" class="skip-button" type="button" value="Skip Post-Assessment"/>
<div class="open-ended-action"></div>
<span id="answer_${id}"></span>
</section>
<section id="openended_${id}" class="openended">
<textarea rows="${rows}" cols="${cols}" name="input_${id}" class="short-form-response" id="input_${id}"
% if hidden:
style="display:none;"
% endif
>${value|h}</textarea>
<div class="grader-status">
% if status == 'unsubmitted':
<span class="unanswered" style="display:inline-block;" id="status_${id}">Unanswered</span>
% elif status == 'correct':
<span class="correct" id="status_${id}">Correct</span>
% elif status == 'incorrect':
<span class="incorrect" id="status_${id}">Incorrect</span>
% elif status == 'queued':
<span class="grading" id="status_${id}">Submitted for grading</span>
% endif
% if hidden:
<div style="display:none;" name="${hidden}" inputid="input_${id}" />
% endif
</div>
<span id="answer_${id}"></span>
% if status == 'queued':
<input name="reload" class="reload" type="button" value="Recheck for Feedback" onclick="document.location.reload(true);" />
% endif
<div class="external-grader-message">
<div class="external-grader-message">
${msg|n}
% if status in ['correct','incorrect']:
<div class="collapsible evaluation-response">
<header>
<a href="#">Respond to Feedback</a>
......@@ -46,11 +17,7 @@
</div>
<p>Additional comments:</p>
<textarea rows="${rows}" cols="${cols}" name="feedback_${id}" class="feedback-on-feedback" id="feedback_${id}"></textarea>
<div class="submit-message-container">
<input name="submit-message" class="submit-message" type="button" value="Submit your message"/>
</div>
<input type="button" value="Submit Feedback" class="submit-evaluation-button" name="reset"/>
</section>
</div>
% endif
</div>
</section>
</div>
\ No newline at end of file
......@@ -12,5 +12,6 @@
<div class="result-output">
${ feedback | n}
</div>
${rubric_feedback | n}
</div>
</section>
\ No newline at end of file
<table class="rubric">
% for i in range(len(rubric_categories)):
<% category = rubric_categories[i] %>
<tr>
<th>
${category['description']}
% if category['has_score'] == True:
(Your score: ${category['score']})
% endif
</th>
% for j in range(len(category['options'])):
<% option = category['options'][j] %>
<td>
<div class="view-only">
${option['text']}
% if option.has_key('selected'):
% if option['selected'] == True:
<div class="selected-grade">[${option['points']} points]</div>
%else:
<div class="grade">[${option['points']} points]</div>
% endif
% else:
<div class="grade">[${option['points']} points]</div>
%endif
</div>
</td>
% endfor
</tr>
% endfor
</table>
\ No newline at end of file
......@@ -2,6 +2,6 @@
<div class="hint-prompt">
${hint_prompt}
</div>
<textarea name="hint" class="hint" cols="70" rows="5"
<textarea name="post_assessment" class="post_assessment" cols="70" rows="5"
${'readonly="true"' if read_only else ''}>${hint}</textarea>
</div>
<section id="self_assessment_${id}" class="self-assessment" data-ajax-url="${ajax_url}"
data-id="${id}" data-state="${state}" data-allow_reset="${allow_reset}">
<section id="self_assessment_${id}" class="open-ended-child" data-ajax-url="${ajax_url}"
data-id="${id}" data-state="${state}" data-allow_reset="${allow_reset}" data-child-type="${child_type}">
<div class="error"></div>
<div class="prompt">
${prompt}
......@@ -9,6 +9,8 @@
<textarea name="answer" class="answer short-form-response" cols="70" rows="20">${previous_answer|h}</textarea>
</div>
<div class="open-ended-action"></div>
<div class="rubric-wrapper">${initial_rubric}</div>
<div class="hint-wrapper">${initial_hint}</div>
......@@ -16,5 +18,4 @@
<div class="message-wrapper">${initial_message}</div>
<input type="button" value="Submit" class="submit-button" name="show"/>
<input type="button" value="Reset" class="reset-button" name="reset"/>
</section>
<div class="assessment">
<div class="rubric">
<h3>Self-assess your answer with this rubric:</h3>
${rubric}
${rubric | n }
</div>
% if not read_only:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment